def main(hparams):
    
    seed_everything(0)
    
    # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
    if type(hparams.gpus) == str:
        if len(hparams.gpus) == 2: # GPU number and comma e.g. '0,' or '1,'
            torch.cuda.set_device(int(hparams.gpus[0]))
    
    # Model
    classifier = CIFAR10_Module(hparams)
    
    # Trainer
    lr_logger = LearningRateLogger()
    logger = TensorBoardLogger("logs", name=hparams.classifier)
    trainer = Trainer(callbacks=[lr_logger], gpus=hparams.gpus, max_epochs=hparams.max_epochs,
                      deterministic=True, early_stop_callback=False, logger=logger)
    trainer.fit(classifier)

    # Load best checkpoint
    checkpoint_path = os.path.join(os.getcwd(), 'logs', hparams.classifier, 'version_' + str(classifier.logger.version),'checkpoints')
    classifier = CIFAR10_Module.load_from_checkpoint(os.path.join(checkpoint_path, os.listdir(checkpoint_path)[0]))
    
    # Save weights from checkpoint
    statedict_path = os.path.join(os.getcwd(), 'cifar10_models', 'state_dicts', hparams.classifier + '.pt')
    torch.save(classifier.model.state_dict(), statedict_path)
    
    # Test model
    trainer.test(classifier)
def main(hparams):

    seed_everything(0)

    # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
    if type(hparams.gpus) == str:
        if len(hparams.gpus) == 2:  # GPU number and comma e.g. '0,' or '1,'
            torch.cuda.set_device(int(hparams.gpus[0]))

    # Model
    classifier = CIFAR10_Module(
        hparams, pretrained=hparams.pretrained
    )  # target is passed in hparams to create the right module with loss function
    # IMPORTANT! Be sure to initialize target models with full dataset models on your own, if pretrained=True

    # Trainer
    lr_logger = LearningRateMonitor()
    logger = TensorBoardLogger("logs", name=hparams.classifier)
    trainer = Trainer(callbacks=[lr_logger],
                      gpus=hparams.gpus,
                      max_epochs=hparams.max_epochs,
                      deterministic=True,
                      logger=logger)
    trainer.fit(classifier)

    # Load best checkpoint
    checkpoint_path = os.path.join(os.getcwd(), 'logs', hparams.classifier,
                                   'version_' + str(classifier.logger.version),
                                   'checkpoints')
    classifier = CIFAR10_Module.load_from_checkpoint(
        os.path.join(checkpoint_path,
                     os.listdir(checkpoint_path)[0]))

    # Save weights from checkpoint
    if hparams.target >= 0:
        statedict_path = [
            os.getcwd(), 'cifar10_models', 'state_dicts', hparams.classifier,
            str(hparams.target) + '.pt'
        ]
        os.makedirs(os.path.join(*statedict_path[:-1]), exist_ok=True)
    else:
        statedict_path = [
            os.getcwd(), 'cifar10_models', 'state_dicts',
            hparams.classifier + '.pt'
        ]
    statedict_path = os.path.join(*statedict_path)
    torch.save(classifier.model.state_dict(), statedict_path)

    # Test model
    trainer.test(classifier)
Exemplo n.º 3
0
def main(hparams):
    cifar10_download.main()

    if not th.cuda.is_available():
        hparams.cuda = False

    hparams.gpus = '0,' if hparams.cuda else None
    
    seed_everything(hparams.seed)

    # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
    if type(hparams.gpus) == str:
        if len(hparams.gpus) == 2: # GPU number and comma e.g. '0,' or '1,'
            torch.cuda.set_device(int(hparams.gpus[0]))
    
    # Model
    classifier = CIFAR10_Module(hparams)
    
    # Trainer
    lr_logger = LearningRateLogger()
    logger = TensorBoardLogger("logs", name=hparams.classifier)
    trainer = Trainer(callbacks=[lr_logger], gpus=hparams.gpus, max_epochs=hparams.max_epochs,
                      deterministic=True, early_stop_callback=False, logger=logger, checkpoint_callback=False, fast_dev_run=hparams.debug)
    if not hparams.eval:
        trainer.fit(classifier)
    else:
        trainer.test(classifier)
    if hparams.save_model:
        model = classifier.student_models[0] if hparams.num_students else classifier.teacher_model
        th.save(model.state_dict(), 'logs/{}.pt'.format(hparams.classifier))
Exemplo n.º 4
0
def main(hparams):
    if not hparams.no_gpu:
        # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
        if type(hparams.gpus) == str:
            if len(hparams.gpus
                   ) == 2:  # GPU number and comma e.g. '0,' or '1,'
                torch.cuda.set_device(int(hparams.gpus[0]))
    else:
        hparams.gpus = None

    model = CIFAR10_Module(hparams, pretrained=True)
    print(model)
    for name, module in model.named_modules():
        print(name)
    trainer = Trainer(gpus=hparams.gpus,
                      default_root_dir=os.path.join(os.getcwd(), 'test_temp'))
    activation = {}

    def hook(model, input_, output):
        activation['output'] = output.detach()

    model.model.features[18][1].register_forward_hook(hook)
    trainer.test(model)
    shutil.rmtree(os.path.join(os.getcwd(), 'test_temp'))
    print(activation)
def main(hparams):
    # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
    if type(hparams.gpus) == str:
        if len(hparams.gpus) == 2:  # GPU number and comma e.g. '0,' or '1,'
            torch.cuda.set_device(int(hparams.gpus[0]))

    model = CIFAR10_Module(hparams, pretrained=True)
    trainer = Trainer(gpus=hparams.gpus,
                      default_save_path=os.path.join(os.getcwd(), 'test_temp'))
    trainer.test(model)
    shutil.rmtree(os.path.join(os.getcwd(), 'test_temp'))
Exemplo n.º 6
0
def main(hparams):
    # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
    if type(hparams.gpus) == str:
        if len(hparams.gpus) == 2:  # GPU number and comma e.g. '0,' or '1,'
            torch.cuda.set_device(int(hparams.gpus[0]))

    model = CIFAR10_Module(hparams, pretrained=False)
    models_path = os.path.expanduser(
        '~/models/cifar10/cifar10_models/state_dicts')
    load_fn = os.path.join(models_path, '{}.pt'.format(hparams.classifier))
    state_dict = torch.load(load_fn, map_location='cpu')
    model.model.load_state_dict(state_dict)
    trainer = Trainer(gpus=hparams.gpus,
                      default_save_path=os.path.join(os.getcwd(), 'test_temp'))
    trainer.test(model)
    shutil.rmtree(os.path.join(os.getcwd(), 'test_temp'))
Exemplo n.º 7
0
def main(hparams):
    if not hparams.no_gpu:
        # If only train on 1 GPU. Must set_device otherwise PyTorch always store model on GPU 0 first
        if type(hparams.gpus) == str:
            if len(hparams.gpus
                   ) == 2:  # GPU number and comma e.g. '0,' or '1,'
                torch.cuda.set_device(int(hparams.gpus[0]))
    else:
        hparams.gpus = None

    module = CIFAR10_Module(hparams, pretrained=True)
    if not hparams.no_gpu:
        model = module.model.cuda()

    mean = [0.4914, 0.4822, 0.4465]
    std = [0.2023, 0.1994, 0.2010]
    transform_dataset = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std),
    ])

    if hparams.all:
        hparams.train = True
        hparams.test = True

    if hparams.probabilities:
        folder = 'probabilities'
    else:
        folder = 'labels'

    if hparams.train:
        train_dataset = CIFAR10(hparams.data_dir,
                                train=True,
                                download=False,
                                transform=transform_dataset)
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=hparams.batch_size,
                                      num_workers=4,
                                      shuffle=False,
                                      drop_last=False,
                                      pin_memory=True)
        print('Evaluate for train dataset')
        labels = evaluate_for_dataset(module.model,
                                      train_dataloader,
                                      probabilities=hparams.probabilities,
                                      gpu=not hparams.no_gpu)
        os.makedirs(folder, exist_ok=True)
        file_path = os.path.join(
            folder, '{}_{}.npy'.format(hparams.classifier, 'train'))
        save_labels(file_path, labels, probabilities=hparams.probabilities)

    if hparams.test:
        test_dataset = CIFAR10(hparams.data_dir,
                               train=False,
                               download=False,
                               transform=transform_dataset)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=hparams.batch_size,
                                     num_workers=4,
                                     shuffle=False,
                                     drop_last=False,
                                     pin_memory=True)
        print('Evaluate for test dataset')
        labels = evaluate_for_dataset(module.model,
                                      test_dataloader,
                                      probabilities=hparams.probabilities,
                                      gpu=not hparams.no_gpu)
        file_path = os.path.join(
            folder, '{}_{}.npy'.format(hparams.classifier, 'test'))
        save_labels(file_path, labels, probabilities=hparams.probabilities)