def test_fit_swa_cuda(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) opt = torch.optim.Adam(model.parameters(), lr=1e-3) loss = nn.CrossEntropyLoss() scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=300) swa_scheduler = SWALR(opt, anneal_strategy="linear", anneal_epochs=20, swa_lr=0.05) swa_start = 2 history = cnn.fit(model, 3, train_loader, val_loader, loss, device="cpu", optimizer=opt, scheduler=scheduler, num_batches=10, swa_start=swa_start, swa_scheduler=swa_scheduler) self.assertIsInstance(history, Dict) exp_keys = ("train", "val") for exp_k in exp_keys: self.assertTrue(exp_k in history.keys()) exp_keys2 = ("top1_acc", "top5_acc", "loss") for exp_k2 in exp_keys2: self.assertTrue(exp_k2 in history["train"].keys()) self.assertTrue(exp_k2 in history["val"].keys())
def test_fit_cuda(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) opt = torch.optim.Adam(model.parameters(), lr=1e-3) loss = nn.CrossEntropyLoss() # scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer=opt, base_lr=1e-4, max_lr=1e-3, mode="min") history = cnn.fit(model, 1, train_loader, val_loader, loss, device="cuda", optimizer=opt, num_batches=10, fp16=True) self.assertIsInstance(history, Dict) exp_keys = ("train", "val") for exp_k in exp_keys: self.assertTrue(exp_k in history.keys()) exp_keys2 = ("top1_acc", "top5_acc", "loss") for exp_k2 in exp_keys2: self.assertTrue(exp_k2 in history["train"].keys()) self.assertTrue(exp_k2 in history["val"].keys())
def test_val_sanity_fit(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) loss = nn.CrossEntropyLoss() res = cnn.val_sanity_fit(model, val_loader, loss, "cpu", num_batches=10) self.assertTrue(res)
def test_infer(self): # Infer over an image image = Image.open("tests/assets/grace_hopper_517x606.jpg") tensor = im2tensor(image) self.assertEqual(tensor.ndim, 4) for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) model = model.eval() out = model(tensor) self.assertEqual(out.shape[1], 10) self.assertEqual(out.ndim, 2)
def test_train(self): # Read Image using PIL Here # Do forward over image image = Image.open("tests/assets/grace_hopper_517x606.jpg") tensor = im2tensor(image) self.assertEqual(tensor.ndim, 4) for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) out = model(tensor) self.assertEqual(out.shape[1], 10) self.assertEqual(out.ndim, 2)
def test_train_sanity_fit(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) opt = torch.optim.Adam(model.parameters(), lr=1e-3) loss = nn.CrossEntropyLoss() res = cnn.train_sanity_fit(model, train_loader, loss, "cpu", num_batches=10) self.assertTrue(res)
def test_val_step(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) loss = nn.CrossEntropyLoss() val_metrics = cnn.val_step(model, val_loader, loss, "cpu", num_batches=10) self.assertIsInstance(val_metrics, Dict) exp_keys = ("loss", "top1", "top5") for exp_k in exp_keys: self.assertTrue(exp_k in val_metrics.keys())
def test_train_step(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) opt = torch.optim.Adam(model.parameters(), lr=1e-3) loss = nn.CrossEntropyLoss() # scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer=opt, base_lr=1e-4, max_lr=1e-3, mode="min") train_metrics = cnn.train_step(model, train_loader, loss, "cpu", opt, num_batches=10, grad_penalty=True) self.assertIsInstance(train_metrics, Dict) exp_keys = ("loss", "top1", "top5") for exp_k in exp_keys: self.assertTrue(exp_k in train_metrics.keys())
train_loader, valid_loader = create_loaders( train_set, train_set, train_batch_size=32, valid_batch_size=32, ) print("Train and Validation Dataloaders Created") print("Creating Model") # model = model_factory.create_timm_model(config.MODEL_NAME, num_classes=config.NUM_ClASSES, # in_channels=config.IN_CHANNELS, pretrained=config.PRETRAINED,) model = cnn.create_vision_cnn( "resnet50", num_classes=10, pretrained="imagenet", ) if torch.cuda.is_available(): print("Model Created. Moving it to CUDA") device = "cuda" else: print("Model Created. Training on CPU only") device = "cpu" optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE) # Optionially a schedulear # scheduler = optim.lr_scheduler.CyclicLR(optimizer=optimizer, base_lr=1e-4, max_lr=1e-3, mode="min")
train_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))]) valid_transforms = T.Compose([T.ToTensor(), T.Normalize((0.5,), (0.5,))]) # Create CIFAR10 Dataset and DataLoaders" train_dataset = torchvision.datasets.CIFAR10("./data", download=True, train=True, transform=train_transforms) valid_dataset = torchvision.datasets.CIFAR10("./data", download=True, train=False, transform=valid_transforms) TRAIN_BATCH_SIZE = 512 # Training Batch Size VALID_BATCH_SIZE = 512 # Validation Batch Size train_loader = torch.utils.data.DataLoader(train_dataset, TRAIN_BATCH_SIZE, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_dataset, VALID_BATCH_SIZE, shuffle=False) # Create Quantization Aware Model qat_model = cnn.create_vision_cnn("mobilenet_v2", pretrained="imagenet", num_classes=10) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(qat_model.parameters(), lr=1e-3) # Set Quantization Configurations qat_model.config = torch.quantization.get_default_qat_qconfig("fbgemm") _ = torch.quantization.prepare_qat(qat_model, inplace=True) # We can fine-tune / train the qat_models on GPU too. for param in qat_model.parameters(): param.requires_grad = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
utils.seed_everything(SEED) print(f"Setting Seed for the run, seed = {config.SEED}") print("Creating Train and Validation Dataset") train_set, valid_set = create_cifar10_dataset(train_transforms, valid_transforms) print("Train and Validation Datasets Created") print("Creating DataLoaders") train_loader, valid_loader = create_loaders(train_set, train_set) print("Train and Validation Dataloaders Created") print("Creating Model") model = cnn.create_vision_cnn(MODEL_NAME, num_classes=NUM_ClASSES, pretrained="imagenet") if torch.cuda.is_available(): print("Model Created. Moving it to CUDA") device = "cuda" else: print("Model Created. Training on CPU only") device = "cpu" optimizer = optim.Adam(model.parameters(), lr=1e-3) criterion = (nn.CrossEntropyLoss() ) # All classification problems we need Cross entropy loss early_stopper = utils.EarlyStopping(patience=7, verbose=True,
download=True, train=False, transform=valid_transforms) # Create data loaders train_loader = torch.utils.data.DataLoader(train_set, batch_size=32, shuffle=True) valid_loader = torch.utils.data.DataLoader(valid_set, batch_size=32, shuffle=False) """Quickvision Provides simple functions to create models with pretrained weights.""" # To create model with imagenet pretrained weights model = cnn.create_vision_cnn("wide_resnet50_2", num_classes=10, pretrained="imagenet") # Alternatively if you don't need pretrained weights # model_bare = cnn.create_vision_cnn("resnet50", num_classes=10, pretrained=None) # It also supports other weights, do check a list which are supported ! # model_ssl = cnn.create_vision_cnn("resnet50", num_classes=10, pretrained="ssl") """Just like in torch we define the criterion and optimizer""" criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=1e-3) """# 💪🏻 Training the Model Instead of doing something like
def test_create_vision_cnn(self): for model_name in supported_tv_models: model = cnn.create_vision_cnn(model_name, 10, pretrained=None) self.assertTrue(isinstance(model, nn.Module))