123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149 |
- # Copyright (c) OpenMMLab. All rights reserved.
- from unittest import TestCase
- import torch
- from mmengine import Config
- from mmengine.structures import InstanceData
- from mmdet import * # noqa
- from mmdet.models.dense_heads import GFLHead, LDHead
- class TestLDHead(TestCase):
- def test_ld_head_loss(self):
- """Tests ld head loss when truth is empty and non-empty."""
- s = 256
- img_metas = [{
- 'img_shape': (s, s, 3),
- 'pad_shape': (s, s, 3),
- 'scale_factor': 1
- }]
- train_cfg = Config(
- dict(
- assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
- allowed_border=-1,
- pos_weight=-1,
- debug=False))
- ld_head = LDHead(
- num_classes=4,
- in_channels=1,
- train_cfg=train_cfg,
- loss_ld=dict(
- type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
- loss_cls=dict(
- type='QualityFocalLoss',
- use_sigmoid=True,
- beta=2.0,
- loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
- anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- octave_base_scale=8,
- scales_per_octave=1,
- strides=[8, 16, 32, 64, 128]))
- teacher_model = GFLHead(
- num_classes=4,
- in_channels=1,
- train_cfg=train_cfg,
- loss_cls=dict(
- type='QualityFocalLoss',
- use_sigmoid=True,
- beta=2.0,
- loss_weight=1.0),
- loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
- anchor_generator=dict(
- type='AnchorGenerator',
- ratios=[1.0],
- octave_base_scale=8,
- scales_per_octave=1,
- strides=[8, 16, 32, 64, 128]))
- feat = [
- torch.rand(1, 1, s // feat_size, s // feat_size)
- for feat_size in [4, 8, 16, 32, 64]
- ]
- cls_scores, bbox_preds = ld_head.forward(feat)
- rand_soft_target = teacher_model.forward(feat)[1]
- # Test that empty ground truth encourages the network to predict
- # background
- gt_instances = InstanceData()
- gt_instances.bboxes = torch.empty((0, 4))
- gt_instances.labels = torch.LongTensor([])
- batch_gt_instances_ignore = None
- empty_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
- [gt_instances], img_metas,
- rand_soft_target,
- batch_gt_instances_ignore)
- # When there is no truth, the cls loss should be nonzero, ld loss
- # should be non-negative but there should be no box loss.
- empty_cls_loss = sum(empty_gt_losses['loss_cls'])
- empty_box_loss = sum(empty_gt_losses['loss_bbox'])
- empty_ld_loss = sum(empty_gt_losses['loss_ld'])
- self.assertGreater(empty_cls_loss.item(), 0,
- 'cls loss should be non-zero')
- self.assertEqual(
- empty_box_loss.item(), 0,
- 'there should be no box loss when there are no true boxes')
- self.assertGreaterEqual(empty_ld_loss.item(), 0,
- 'ld loss should be non-negative')
- # When truth is non-empty then both cls and box loss should be nonzero
- # for random inputs
- gt_instances = InstanceData()
- gt_instances.bboxes = torch.Tensor(
- [[23.6667, 23.8757, 238.6326, 151.8874]])
- gt_instances.labels = torch.LongTensor([2])
- batch_gt_instances_ignore = None
- one_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
- [gt_instances], img_metas,
- rand_soft_target,
- batch_gt_instances_ignore)
- onegt_cls_loss = sum(one_gt_losses['loss_cls'])
- onegt_box_loss = sum(one_gt_losses['loss_bbox'])
- self.assertGreater(onegt_cls_loss.item(), 0,
- 'cls loss should be non-zero')
- self.assertGreater(onegt_box_loss.item(), 0,
- 'box loss should be non-zero')
- batch_gt_instances_ignore = gt_instances
- # When truth is non-empty but ignored then the cls loss should be
- # nonzero, but there should be no box loss.
- ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
- [gt_instances], img_metas,
- rand_soft_target,
- batch_gt_instances_ignore)
- ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
- ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
- self.assertGreater(ignore_cls_loss.item(), 0,
- 'cls loss should be non-zero')
- self.assertEqual(ignore_box_loss.item(), 0,
- 'gt bbox ignored loss should be zero')
- # When truth is non-empty and not ignored then both cls and box loss
- # should be nonzero for random inputs
- batch_gt_instances_ignore = InstanceData()
- batch_gt_instances_ignore.bboxes = torch.randn(1, 4)
- not_ignore_gt_losses = ld_head.loss_by_feat(cls_scores, bbox_preds,
- [gt_instances], img_metas,
- rand_soft_target,
- batch_gt_instances_ignore)
- not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
- not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
- self.assertGreater(not_ignore_cls_loss.item(), 0,
- 'cls loss should be non-zero')
- self.assertGreaterEqual(not_ignore_box_loss.item(), 0,
- 'gt bbox not ignored loss should be non-zero')
|