def _load_model(checkpoint_path, device, use_best=True, out_channels=1):
    model = Trainer().load(checkpoint_path,
                           best=use_best,
                           map_location=torch.device(device)).model

    # monkey patch the model if it does not have out_channels attribute
    if not hasattr(model, 'out_channels'):
        model.out_channels = out_channels

    model.eval()
    return model
示例#2
0
def save_best_model(project_directory):
    trainer = Trainer().load(from_directory=os.path.join(
        project_directory, "Weights"),
                             best=True,
                             map_location='cpu')

    # save the model
    model = trainer.model
    save_path = os.path.join(project_directory, "Weights", "best_model.nn")
    torch.save(model, save_path)

    # save the state dict
    save_path = os.path.join(project_directory, "Weights", "best_model.state")
    torch.save(model.state_dict(), save_path)
示例#3
0
def train_model(model, loaders, **kwargs):

    trainer = Trainer(model)
    trainer.build_criterion('BCEWithLogitsLoss')
    trainer.build_optimizer('Adam', lr=kwargs.get('lr', 0.0001))
    #trainer.validate_every((kwargs.get('validate_every', 10), 'epochs'))
    #trainer.save_every((kwargs.get('save_every', 10), 'epochs'))
    #trainer.save_to_directory(ensure_dir(kwargs.get('save_dir', 'save_dor')))
    trainer.set_max_num_epochs(kwargs.get('max_num_epochs', 200))

    # bind the loaders
    trainer.bind_loader('train', loaders[0])
    trainer.bind_loader('validate', loaders[1])

    if USE_CUDA:
        trainer.cuda()

    # do the training
    trainer.fit()

    return trainer