def test_simclr_collate_tuple_input_size(self): batch = self.create_batch() img_collate = SimCLRCollateFunction(input_size=(32, 32), ) samples, labels, fnames = img_collate(batch) self.assertIsNotNone(img_collate) self.assertEqual(len(samples), len(labels), len(fnames))
resnet = torchvision.models.resnet18() backbone = nn.Sequential(*list(resnet.children())[:-1]) model = SimCLR(backbone) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) cifar10 = torchvision.datasets.CIFAR10("datasets/cifar10", download=True) dataset = LightlyDataset.from_torch_dataset(cifar10) # or create a dataset from a folder containing images or videos: # dataset = LightlyDataset("path/to/folder") collate_fn = SimCLRCollateFunction( input_size=32, gaussian_blur=0., ) dataloader = torch.utils.data.DataLoader( dataset, batch_size=256, collate_fn=collate_fn, shuffle=True, drop_last=True, num_workers=8, ) criterion = NTXentLoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.06) print("Starting Training")
loss = 0.5 * (self.criterion(z0, p1) + self.criterion(z1, p0)) return loss def configure_optimizers(self): optim = torch.optim.SGD(self.parameters(), lr=0.06) return optim model = NNCLR() cifar10 = torchvision.datasets.CIFAR10("datasets/cifar10", download=True) dataset = LightlyDataset.from_torch_dataset(cifar10) # or create a dataset from a folder containing images or videos: # dataset = LightlyDataset("path/to/folder") collate_fn = SimCLRCollateFunction(input_size=32) dataloader = torch.utils.data.DataLoader( dataset, batch_size=256, collate_fn=collate_fn, shuffle=True, drop_last=True, num_workers=8, ) gpus = torch.cuda.device_count() # train with DDP and use Synchronized Batch Norm for a more accurate batch norm # calculation trainer = pl.Trainer(