test_mean_teacher_hook.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. # Copyright (c) OpenMMLab. All rights reserved.
  2. import os.path as osp
  3. import tempfile
  4. from unittest import TestCase
  5. import torch
  6. import torch.nn as nn
  7. from mmengine.evaluator import BaseMetric
  8. from mmengine.model import BaseModel
  9. from mmengine.optim import OptimWrapper
  10. from mmengine.registry import MODEL_WRAPPERS
  11. from mmengine.runner import Runner
  12. from torch.utils.data import Dataset
  13. from mmdet.registry import DATASETS
  14. from mmdet.utils import register_all_modules
  15. register_all_modules()
  16. class ToyModel(nn.Module):
  17. def __init__(self):
  18. super().__init__()
  19. self.linear = nn.Linear(2, 1)
  20. def forward(self, inputs, data_samples, mode='tensor'):
  21. labels = torch.stack(data_samples)
  22. inputs = torch.stack(inputs)
  23. outputs = self.linear(inputs)
  24. if mode == 'tensor':
  25. return outputs
  26. elif mode == 'loss':
  27. loss = (labels - outputs).sum()
  28. outputs = dict(loss=loss)
  29. return outputs
  30. else:
  31. return outputs
  32. class ToyModel1(BaseModel, ToyModel):
  33. def __init__(self):
  34. super().__init__()
  35. def forward(self, *args, **kwargs):
  36. return super(BaseModel, self).forward(*args, **kwargs)
  37. class ToyModel2(BaseModel):
  38. def __init__(self):
  39. super().__init__()
  40. self.teacher = ToyModel1()
  41. self.student = ToyModel1()
  42. def forward(self, *args, **kwargs):
  43. return self.student(*args, **kwargs)
  44. @DATASETS.register_module(force=True)
  45. class DummyDataset(Dataset):
  46. METAINFO = dict() # type: ignore
  47. data = torch.randn(12, 2)
  48. label = torch.ones(12)
  49. @property
  50. def metainfo(self):
  51. return self.METAINFO
  52. def __len__(self):
  53. return self.data.size(0)
  54. def __getitem__(self, index):
  55. return dict(inputs=self.data[index], data_samples=self.label[index])
  56. class ToyMetric1(BaseMetric):
  57. def __init__(self, collect_device='cpu', dummy_metrics=None):
  58. super().__init__(collect_device=collect_device)
  59. self.dummy_metrics = dummy_metrics
  60. def process(self, data_batch, predictions):
  61. result = {'acc': 1}
  62. self.results.append(result)
  63. def compute_metrics(self, results):
  64. return dict(acc=1)
  65. class TestMeanTeacherHook(TestCase):
  66. def setUp(self):
  67. self.temp_dir = tempfile.TemporaryDirectory()
  68. def tearDown(self):
  69. self.temp_dir.cleanup()
  70. def test_mean_teacher_hook(self):
  71. device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
  72. model = ToyModel2().to(device)
  73. runner = Runner(
  74. model=model,
  75. train_dataloader=dict(
  76. dataset=DummyDataset(),
  77. sampler=dict(type='DefaultSampler', shuffle=True),
  78. batch_size=3,
  79. num_workers=0),
  80. val_dataloader=dict(
  81. dataset=DummyDataset(),
  82. sampler=dict(type='DefaultSampler', shuffle=False),
  83. batch_size=3,
  84. num_workers=0),
  85. val_evaluator=[ToyMetric1()],
  86. work_dir=self.temp_dir.name,
  87. default_scope='mmdet',
  88. optim_wrapper=OptimWrapper(
  89. torch.optim.Adam(ToyModel().parameters())),
  90. train_cfg=dict(by_epoch=True, max_epochs=2, val_interval=1),
  91. val_cfg=dict(),
  92. default_hooks=dict(logger=None),
  93. custom_hooks=[dict(type='MeanTeacherHook')],
  94. experiment_name='test1')
  95. runner.train()
  96. self.assertTrue(
  97. osp.exists(osp.join(self.temp_dir.name, 'epoch_2.pth')))
  98. # checkpoint = torch.load(osp.join(self.temp_dir.name, 'epoch_2.pth'))
  99. # load and testing
  100. runner = Runner(
  101. model=model,
  102. test_dataloader=dict(
  103. dataset=DummyDataset(),
  104. sampler=dict(type='DefaultSampler', shuffle=True),
  105. batch_size=3,
  106. num_workers=0),
  107. test_evaluator=[ToyMetric1()],
  108. test_cfg=dict(),
  109. work_dir=self.temp_dir.name,
  110. default_scope='mmdet',
  111. load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
  112. default_hooks=dict(logger=None),
  113. custom_hooks=[dict(type='MeanTeacherHook')],
  114. experiment_name='test2')
  115. runner.test()
  116. @MODEL_WRAPPERS.register_module()
  117. class DummyWrapper(BaseModel):
  118. def __init__(self, model):
  119. super().__init__()
  120. self.module = model
  121. def forward(self, *args, **kwargs):
  122. return self.module(*args, **kwargs)
  123. # with model wrapper
  124. runner = Runner(
  125. model=DummyWrapper(ToyModel2()),
  126. test_dataloader=dict(
  127. dataset=DummyDataset(),
  128. sampler=dict(type='DefaultSampler', shuffle=True),
  129. batch_size=3,
  130. num_workers=0),
  131. test_evaluator=[ToyMetric1()],
  132. test_cfg=dict(),
  133. work_dir=self.temp_dir.name,
  134. default_scope='mmdet',
  135. load_from=osp.join(self.temp_dir.name, 'epoch_2.pth'),
  136. default_hooks=dict(logger=None),
  137. custom_hooks=[dict(type='MeanTeacherHook')],
  138. experiment_name='test3')
  139. runner.test()