Example #1
0
 def test_list_entrypoints(self):
     entry_lists = hub.list('ailzhang/torchhub_example', force_reload=True)
     self.assertObjectIn('mnist', entry_lists)
Example #2
0
 def test_list_entrypoints(self):
     entry_lists = hub.list('pytorch/vision', force_reload=True)
     self.assertObjectIn('resnet18', entry_lists)
Example #3
0
def test_hub_entrypoints(github):
    assert set(hub.list(github)) == set(ENTRY_POINTS)
Example #4
0
 def test_list_entrypoints(self):
     entry_lists = hub.list("pytorch/vision", force_reload=True)
     assert "resnet18" in entry_lists
Example #5
0
 def test_list_entrypoints(self):
     entry_lists = hub.list('pytorch/vision', force_reload=True)
     assert 'resnet18' in entry_lists
Example #6
0
    def train_model(
        self,
        train_csv,
        train_data_dir,
        val_csv,
        val_data_dir,
        num_epochs=2,
        batch_size=64,
        save_graph=True,
        graph_fname="train-val-loss.png",
        save_model=True,
        model_fname="resnest_model",
    ):
        train_dataset = ImageDataset(train_csv, train_data_dir,
                                     self.transform.training)
        validation_dataset = ImageDataset(val_csv, val_data_dir,
                                          self.transform.validation)
        batch_size = batch_size

        train_loader = DataLoader(train_dataset,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=True)

        validation_loader = DataLoader(
            validation_dataset,
            batch_size=batch_size,
            shuffle=False,
            drop_last=False,
        )

        # get list of models
        hub.list("zhanghang1989/ResNeSt", force_reload=True)
        # load pretrained models, using ResNeSt-50 as an example
        resnest = hub.load("zhanghang1989/ResNeSt",
                           "resnest50",
                           pretrained=True)
        # Freeze layers
        for param in resnest.parameters():
            param.requires_grad = False

        # Define thelast layers to be retrained
        num_ftrs = resnest.fc.in_features
        # Redifine last layer of the model
        resnest.fc = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(num_ftrs, 512),
            nn.BatchNorm1d(512),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(512, 256),
            nn.BatchNorm1d(256),
            nn.ReLU(),
            nn.Linear(256, 17),
        )
        resnest = resnest.to(self.device)
        optimizer = optim.SGD(resnest.fc.parameters(),
                              lr=0.01,
                              momentum=0.9,
                              weight_decay=0.01)

        # Weights for imbalanced data
        pos_weights = DataStats.pos_weights(train_csv)
        pos_weights = log(as_tensor(pos_weights, dtype=float))
        pos_weights = pos_weights.to(self.device)

        training_losses, validation_losses = Trainer.train(
            resnest,
            optimizer,
            train_loader,
            validation_loader,
            num_epochs=num_epochs,
            # scheduler=scheduler,
            verbose=True,
            pos_weights=pos_weights,
            device=self.device,
        )

        if save_graph:
            PlotUtils.plot_losses(training_losses, validation_losses,
                                  graph_fname)

        if save_model:
            self._save_model(resnest, "", model_fname)

        if self.device == "cuda":
            cuda.empty_cache()