def test_run_resnet101(self):
     imgSize = (3, 224, 224)
     # should throw because privacy engine does not work with batch norm
     # remove the next two lines when we support batch norm
     with self.assertRaises(IncompatibleModuleException):
         self.runOneBatch(models.resnet101(), imgSize)
     self.runOneBatch(mm.convert_batchnorm_modules(models.resnet101()), imgSize)
 def test_run_basic_case(self):
     imgSize = (3, 4, 5)
     # should throw because privacy engine does not work with batch norm
     # remove the next two lines when we support batch norm
     with self.assertRaises(IncompatibleModuleException):
         self.runOneBatch(BasicModel(imgSize), imgSize)
     self.runOneBatch(mm.convert_batchnorm_modules(BasicModel(imgSize)), imgSize)
 def test_convert_batchnorm_modules_resnet50(self):
     model = models.resnet50()
     # check module BatchNorms is there
     self.checkModulePresent(model, nn.BatchNorm2d)
     # replace the module with instancenorm
     model = mm.convert_batchnorm_modules(model)
     # check module is not present
     self.checkModuleNotPresent(model, nn.BatchNorm2d)
     self.checkModulePresent(model, nn.GroupNorm)
Beispiel #4
0
    def test_module_modification_convert_example(self):
        # IMPORTANT: When changing this code you also need to update
        # the docstring for torchdp.utils.module_modification.convert_batchnorm_modules()
        # pyre-fixme[21]: Could not find name `resnet50` in `torchvision.models`.
        from torchvision.models import resnet50

        model = resnet50()
        self.assertTrue(isinstance(model.layer1[0].bn1, nn.BatchNorm2d))

        model = convert_batchnorm_modules(model)
        self.assertTrue(isinstance(model.layer1[0].bn1, nn.GroupNorm))
Beispiel #5
0
 def test_convert_batchnorm(self):
     inspector = dp_inspector.DPModelInspector()
     model = convert_batchnorm_modules(models.resnet50())
     self.assertTrue(inspector.validate(model))
Beispiel #6
0
            nn.Tanh()
            # state size. (nc) x 64 x 64
        )

    def forward(self, input):
        if input.is_cuda and self.ngpu > 1:
            output = nn.parallel.data_parallel(self.main, input,
                                               range(self.ngpu))
        else:
            output = self.main(input)
        return output


netG = Generator(ngpu)
if not opt.disable_dp:
    netG = convert_batchnorm_modules(netG)
netG = netG.to(device)
netG.apply(weights_init)
if opt.netG != "":
    netG.load_state_dict(torch.load(opt.netG))


class Discriminator(nn.Module):
    def __init__(self, ngpu):
        super(Discriminator, self).__init__()
        self.ngpu = ngpu
        self.main = nn.Sequential(
            # input is (nc) x 64 x 64
            nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            # state size. (ndf) x 32 x 32