Beispiel #1
0
 def test_none_attributes(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     assert load_model(path, optimizer=None).optimizer is None
     assert load_model(path, loss=None).loss is None
     assert load_model(
         path, prediction_transform=None).prediction_transform is None
     with pytest.raises(ValueError):
         assert load_model(path, nn_module=None)
def train_fold(save_dir, train_folds, val_folds, model_path):
    depth_trns = SimpleDepthTransform()
    train_trns = SaltTransform(IMAGE_SIZE, True, 'crop')
    val_trns = SaltTransform(IMAGE_SIZE, False, 'crop')
    train_dataset = SaltDataset(TRAIN_FOLDS_PATH, train_folds, train_trns,
                                depth_trns)
    val_dataset = SaltDataset(TRAIN_FOLDS_PATH, val_folds, val_trns,
                              depth_trns)
    train_loader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True,
                              num_workers=8)
    val_loader = DataLoader(val_dataset,
                            batch_size=BATCH_SIZE,
                            shuffle=False,
                            num_workers=8)

    model = load_model(model_path)
    model.loss.lovasz_weight = 0.5
    model.loss.prob_weight = 0.5

    callbacks = [
        MonitorCheckpoint(save_dir,
                          monitor='val_crop_iout',
                          max_saves=3,
                          copy_last=False),
        LoggingToFile(os.path.join(save_dir, 'log.txt')), update_lr
    ]

    model.fit(train_loader,
              val_loader=val_loader,
              max_epochs=500,
              callbacks=callbacks,
              metrics=['crop_iout'])
Beispiel #3
0
 def test_replace_optimizer_params(self, saved_argus_model):
     path, model = saved_argus_model
     optimizer_params = ('SGD', {'lr': 0.42})
     loaded_model = load_model(path, optimizer=optimizer_params)
     assert isinstance(loaded_model.optimizer, torch.optim.SGD)
     assert loaded_model.get_lr() == 0.42
     assert loaded_model.params['optimizer'] == optimizer_params
     assert not isinstance(model.optimizer, torch.optim.SGD)
Beispiel #4
0
def check_checkpoint(path, engine, epoch, val_loss,
                     file_format='model-{epoch:03d}-{val_loss:.6f}.pth'):
    expected_path = path / file_format.format(epoch=epoch, val_loss=val_loss)
    assert expected_path.exists()
    loaded_model = load_model(expected_path)
    assert loaded_model.params == engine.state.model.params
    assert check_weights(engine.state.model, loaded_model)
    return True
    def __init__(self, model_path):
        self.model = load_model(model_path)
        self.model.nn_module.final = torch.nn.Sigmoid()  #
        self.model.nn_module.eval()

        self.depth_trns = SimpleDepthTransform()
        self.crop_trns = CenterCrop(ORIG_IMAGE_SIZE)
        self.trns = SaltTransform(PRED_IMAGE_SIZE, False, TRANSFORM_MODE)
Beispiel #6
0
    def test_save(self, tmpdir, linear_argus_model_instance):
        path = str(tmpdir.mkdir("experiment").join("model.pth"))
        linear_argus_model_instance.save(path)
        model = load_model(path, device='cpu')
        assert model.params == linear_argus_model_instance.params

        state = torch.load(path)
        assert set(state.keys()) == {'model_name', 'params', 'nn_state_dict'}
    def __init__(self, model_path):
        self.model = load_model(model_path)
        self.model.nn_module.eval()

        self.depth_trns = SimpleDepthTransform()
        self.crop_trns = CenterCrop(ORIG_IMAGE_SIZE)
        self.trns = SaltTransform(PRED_IMAGE_SIZE, False, TRANSFORM_MODE)

        self.flip = HorizontalFlip()
    def __init__(self, model_paths, bbox_transform, image_transform, device=None):
        assert model_paths
        self.models = []
        for model_path in model_paths:
            model = load_model(model_path, device)
            model.nn_module.eval()
            self.models.append(model)

        self.bbox_transform = bbox_transform
        self.image_transform = image_transform
Beispiel #9
0
 def test_replace_nn_module_params(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     nn_module_params = ('LinearNet', {
         'in_features': 4,
         'out_features': 1,
         'sigmoid': True
     })
     loaded_model = load_model(path, nn_module=nn_module_params)
     assert loaded_model.nn_module.sigmoid
     assert loaded_model.params['nn_module'] == nn_module_params
     assert not model.nn_module.sigmoid
Beispiel #10
0
 def test_replace_optimizer_params(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     optimizer_params = ('Adam', {'lr': 0.42})
     loaded_model = load_model(path,
                               optimizer=optimizer_params,
                               change_state_dict_func=lambda nn, optim:
                               (nn, None))
     assert isinstance(loaded_model.optimizer, torch.optim.Adam)
     assert loaded_model.get_lr() == 0.42
     assert loaded_model.params['optimizer'] == optimizer_params
     assert not isinstance(model.optimizer, torch.optim.Adam)
Beispiel #11
0
    def test_change_state_dict_func(self, saved_argus_model):
        path, model = saved_argus_model

        def change_state_dict_func(nn_state_dict):
            nn_state_dict['fc.weight'][0][0] = 0
            return nn_state_dict

        loaded_model = load_model(
            path, change_state_dict_func=change_state_dict_func)
        assert loaded_model.nn_module.state_dict()['fc.weight'][0][0] == 0
        assert model.nn_module.state_dict()['fc.weight'][0][0] != 0
Beispiel #12
0
    def test_replace_model_name(self, saved_argus_model, linear_net_class,
                                vision_net_class):
        path, model = saved_argus_model

        class ArgusReplaceModel(Model):
            nn_module = {
                'LinearNet': linear_net_class,
                'VisionNet': vision_net_class
            }
            prediction_transform = {
                'Sigmoid': nn.Sigmoid,
                'Identity': Identity
            }

        loaded_model = load_model(path, model_name='ArgusReplaceModel')
        assert isinstance(loaded_model, ArgusReplaceModel)
        assert not isinstance(loaded_model, model.__class__)

        with pytest.raises(ImportError):
            load_model(path, model_name='Qwerty')
Beispiel #13
0
 def test_replace_nn_module_params(self, saved_argus_model):
     path, model = saved_argus_model
     nn_module_params = ('VisionNet', {
         'n_channels': 3,
         'n_classes': 1,
         'p_dropout': 0.42
     })
     loaded_model = load_model(path, nn_module=nn_module_params)
     assert loaded_model.nn_module.p_dropout == 0.42
     assert loaded_model.params['nn_module'] == nn_module_params
     assert loaded_model.nn_module.p_dropout != model.nn_module.p_dropout
Beispiel #14
0
 def __init__(self,
              model_path,
              transforms,
              batch_size,
              tile_size,
              tile_step,
              device='cuda'):
     self.model = load_model(str(model_path), device=device)
     self.transforms = transforms
     self.tile_size = tile_size
     self.tile_step = tile_step
     self.batch_size = batch_size
Beispiel #15
0
 def __init__(self,
              model_path,
              batch_size,
              transform,
              device='cuda',
              logits=False,
              tta=False):
     self.model = load_model(model_path, device=device)
     if logits:
         self.model.prediction_transform = lambda x: x
     self.batch_size = batch_size
     self.transform = transform
     self.tta = tta
Beispiel #16
0
    def test_load_model(self, saved_argus_model):
        path, model = saved_argus_model
        loaded_model = load_model(path, device='cpu')

        assert loaded_model.params == model.params
        assert check_weights(model, loaded_model)
        assert isinstance(loaded_model.loss, model.loss.__class__)
        assert isinstance(loaded_model.optimizer, model.optimizer.__class__)
        assert isinstance(loaded_model.prediction_transform,
                          model.prediction_transform.__class__)

        nn.init.xavier_uniform_(loaded_model.nn_module.fc.weight)
        with pytest.raises(AssertionError):
            assert check_weights(model, loaded_model)
def train_fold(base_model_path, save_dir, train_folds, val_folds, folds_data,
               noisy_data):
    train_transfrom = get_transforms(train=True,
                                     size=CROP_SIZE,
                                     wrap_pad_prob=WRAP_PAD_PROB)

    mixer = RandomMixer([
        SigmoidConcatMixer(sigmoid_range=(3, 12)),
        AddMixer(alpha_dist='uniform')
    ],
                        p=[0.6, 0.4])
    mixer = UseMixerWithProb(mixer, prob=MIXER_PROB)

    curated_dataset = FreesoundDataset(folds_data,
                                       train_folds,
                                       transform=train_transfrom,
                                       mixer=mixer)
    noisy_dataset = FreesoundNoisyDataset(noisy_data,
                                          transform=train_transfrom,
                                          mixer=mixer)
    train_dataset = RandomDataset([noisy_dataset, curated_dataset],
                                  p=[NOISY_PROB, 1 - NOISY_PROB],
                                  size=DATASET_SIZE)

    val_dataset = FreesoundDataset(folds_data, val_folds,
                                   get_transforms(False, CROP_SIZE))
    train_loader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True,
                              num_workers=NUM_WORKERS)
    val_loader = DataLoader(val_dataset,
                            batch_size=BATCH_SIZE * 2,
                            shuffle=False,
                            num_workers=NUM_WORKERS)

    model = load_model(base_model_path, device=DEVICE)
    model.set_lr(BASE_LR)

    callbacks = [
        MonitorCheckpoint(save_dir, monitor='val_lwlrap', max_saves=3),
        CosineAnnealing(T_0=10, T_mult=2, eta_min=0.00001),
        LoggingToFile(save_dir / 'log.txt'),
    ]

    model.fit(train_loader,
              val_loader=val_loader,
              max_epochs=150,
              callbacks=callbacks,
              metrics=['multi_accuracy', 'lwlrap'])
Beispiel #18
0
    def test_change_state_dict_func(self, saved_argus_model):
        optimizer_state, path, model = saved_argus_model

        def change_state_dict_func(nn_state_dict, optimizer_state_dict):
            nn_state_dict['fc.weight'][0][0] = 0
            if optimizer_state:
                optimizer_state_dict['param_groups'][0]['lr'] = 0.123
            return nn_state_dict, optimizer_state_dict

        loaded_model = load_model(
            path, change_state_dict_func=change_state_dict_func)
        assert loaded_model.nn_module.state_dict()['fc.weight'][0][0] == 0
        assert model.nn_module.state_dict()['fc.weight'][0][0] != 0
        if optimizer_state:
            assert loaded_model.get_lr() == 0.123
            assert model.get_lr() != 0.123
Beispiel #19
0
    def test_state_load_func(self, saved_argus_model):
        def custom_state_load_func(file_path):
            file_path = Path(file_path) / 'model.pth'
            return torch.load(file_path)

        optimizer_state, path, model = saved_argus_model
        loaded_model = load_model(Path(path).parent,
                                  device='cpu',
                                  state_load_func=custom_state_load_func)

        assert loaded_model.params == model.params
        assert check_weights(model, loaded_model)
        assert isinstance(loaded_model.loss, model.loss.__class__)
        assert isinstance(loaded_model.optimizer, model.optimizer.__class__)
        assert isinstance(loaded_model.prediction_transform,
                          model.prediction_transform.__class__)
Beispiel #20
0
    def test_load_model(self, saved_argus_model):
        optimizer_state, path, model = saved_argus_model
        loaded_model = load_model(path, device='cpu')

        assert loaded_model.params == model.params
        assert check_weights(model, loaded_model)
        assert isinstance(loaded_model.loss, model.loss.__class__)
        assert isinstance(loaded_model.optimizer, model.optimizer.__class__)
        assert isinstance(loaded_model.prediction_transform,
                          model.prediction_transform.__class__)

        nn.init.xavier_uniform_(loaded_model.nn_module.fc.weight)
        with pytest.raises(AssertionError):
            assert check_weights(model, loaded_model)

        if optimizer_state:
            assert torch.all(loaded_model.optimizer.state_dict()['state'][0]
                             ['momentum_buffer'] == model.optimizer.state_dict(
                             )['state'][0]['momentum_buffer'])
        else:
            assert loaded_model.optimizer.state_dict()['state'] == {}
Beispiel #21
0
def test_pipeline(tmpdir, get_batch_function, linear_argus_model_instance):
    model = linear_argus_model_instance
    experiment_dir = Path(tmpdir.join("path/to/pipeline_experiment/"))
    train_dataset = TensorDataset(*get_batch_function(batch_size=4096))
    val_dataset = TensorDataset(*get_batch_function(batch_size=512))
    train_loader = DataLoader(train_dataset,
                              shuffle=True,
                              drop_last=True,
                              batch_size=32)
    val_loader = DataLoader(val_dataset, shuffle=False, batch_size=64)

    monitor_checkpoint = MonitorCheckpoint(dir_path=experiment_dir,
                                           monitor='val_loss',
                                           max_saves=1)
    callbacks = [
        monitor_checkpoint,
        EarlyStopping(monitor='val_loss', patience=9),
        ReduceLROnPlateau(monitor='val_loss', factor=0.64, patience=3),
        LoggingToFile(experiment_dir / 'log.txt'),
        LoggingToCSV(experiment_dir / 'log.csv')
    ]

    model.fit(train_loader,
              val_loader=val_loader,
              num_epochs=100,
              callbacks=callbacks)

    val_loss = model.validate(val_loader)['val_loss']
    assert val_loss < 0.1

    model_paths = sorted(experiment_dir.glob('*.pth'))
    assert len(model_paths) == 1

    loaded_model = load_model(model_paths[0])
    loaded_val_loss = loaded_model.validate(val_loader)['val_loss']
    assert loaded_val_loss == monitor_checkpoint.best_value

    assert (experiment_dir / 'log.txt').exists()
    assert (experiment_dir / 'log.csv').exists()
Beispiel #22
0
def check_checkpoint(path,
                     engine,
                     epoch,
                     val_loss,
                     file_format='model-{epoch:03d}-{val_loss:.6f}.pth',
                     optimizer_state=False):
    expected_path = path / file_format.format(epoch=epoch, val_loss=val_loss)
    assert expected_path.exists()
    loaded_model = load_model(expected_path)
    assert loaded_model.params == engine.state.model.params
    assert check_weights(engine.state.model, loaded_model)

    loaded_state = torch.load(expected_path)
    if optimizer_state:
        assert set(loaded_state.keys()) == {
            'model_name', 'params', 'nn_state_dict', 'optimizer_state_dict'
        }
    else:
        assert set(
            loaded_state.keys()) == {'model_name', 'params', 'nn_state_dict'}

    return True
Beispiel #23
0
 def test_file_not_found_error(self):
     with pytest.raises(FileNotFoundError):
         load_model('/fake/path/to/nothing.pth')
Beispiel #24
0
 def test_save(self, tmpdir, linear_argus_model_instance):
     path = str(tmpdir.mkdir("experiment").join("model.pth"))
     linear_argus_model_instance.save(path)
     model = load_model(path, device='cpu')
     assert model.params == linear_argus_model_instance.params
Beispiel #25
0
        },
        'optimizer': {
            'lr': args.lr
        },
        'device': args.device
    }
    model = MnistModel(params)

    callbacks = [
        MonitorCheckpoint(dir_path='mnist/',
                          monitor='val_accuracy',
                          max_saves=3),
        EarlyStopping(monitor='val_accuracy', patience=9),
        ReduceLROnPlateau(monitor='val_accuracy', factor=0.5, patience=3),
        LoggingToCSV('mnist/log.csv')
    ]

    model.fit(train_loader,
              val_loader=val_loader,
              num_epochs=args.epochs,
              metrics=['accuracy'],
              callbacks=callbacks,
              metrics_on_train=True)

    del model
    model_path = Path("mnist/").glob("*.pth")
    model_path = sorted(model_path)[-1]
    print(f"Load model: {model_path}")
    model = load_model(model_path)
    print(model.__dict__)
Beispiel #26
0
 def test_replace_prediction_transform_params(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     loaded_model = load_model(path, prediction_transform='Sigmoid')
     assert isinstance(loaded_model.prediction_transform, nn.Sigmoid)
     assert loaded_model.params['prediction_transform'] == 'Sigmoid'
     assert not isinstance(model.prediction_transform, nn.Sigmoid)
Beispiel #27
0
 def test_replace_loss_params(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     loaded_model = load_model(path, loss='BCEWithLogitsLoss')
     assert isinstance(loaded_model.loss, nn.BCEWithLogitsLoss)
     assert loaded_model.params['loss'] == 'BCEWithLogitsLoss'
     assert not isinstance(model.loss, nn.BCEWithLogitsLoss)
Beispiel #28
0
    def __init__(self, model_path, draw_transform, image_transform):
        self.model = load_model(model_path)
        self.model.nn_module.eval()

        self.draw_transform = draw_transform
        self.image_transform = image_transform
Beispiel #29
0
def train_fold(save_dir, train_folds, val_folds, folds_data):
    train_transfrom = get_transforms(train=True,
                                     size=CROP_SIZE,
                                     wrap_pad_prob=0.0,
                                     resize_scale=(0.8, 1.0),
                                     resize_ratio=(1.7, 2.3),
                                     resize_prob=0.0,
                                     spec_num_mask=2,
                                     spec_freq_masking=0.15,
                                     spec_time_masking=0.20,
                                     spec_prob=0.0)
    val_transform = get_transforms(train=False, size=CROP_SIZE)

    if MIXER_PROB:
        mixer = get_mixer(mixer_prob=MIXER_PROB,
                          sigmoid_range=(3, 12),
                          alpha_dist='uniform',
                          random_prob=(0.6, 0.4))
    else:
        mixer = None

    train_dataset = BirdsongDataset(folds_data,
                                    folds=train_folds,
                                    transform=train_transfrom,
                                    mixer=mixer)
    val_dataset = BirdsongDataset(folds_data,
                                  folds=val_folds,
                                  transform=val_transform)
    train_loader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              drop_last=True,
                              num_workers=NUM_WORKERS)
    val_loader = DataLoader(val_dataset,
                            batch_size=BATCH_SIZE * 2 // ITER_SIZE,
                            shuffle=False,
                            num_workers=NUM_WORKERS)

    model = BirdsongModel(PARAMS)
    if 'pretrained' in model.params['nn_module'][1]:
        model.params['nn_module'][1]['pretrained'] = False

    if USE_AMP:
        initialize_amp(model)

    model.set_device(DEVICES)

    num_iterations = (5 * len(train_dataset)) // BATCH_SIZE
    callbacks = [
        MonitorCheckpoint(save_dir, monitor='val_loss', max_saves=1),
        CosineAnnealingLR(T_max=num_iterations,
                          eta_min=0,
                          step_on_iteration=True),
        EarlyStopping(monitor='val_loss', patience=12),
        LoggingToFile(save_dir / 'log.txt'),
        LoggingToCSV(save_dir / 'log.csv')
    ]

    model.fit(train_loader,
              val_loader=val_loader,
              num_epochs=EPOCHS,
              callbacks=callbacks,
              metrics=['f1_score'])

    del model

    model_path = get_best_model_path(save_dir)
    model = load_model(model_path)
    val_dataset = BirdsongDataset(folds_data,
                                  folds=val_folds + [config.n_folds],
                                  transform=val_transform)
    val_loader = DataLoader(val_dataset,
                            batch_size=BATCH_SIZE * 2 // ITER_SIZE,
                            shuffle=False,
                            num_workers=NUM_WORKERS)
    model.set_device(DEVICES[0])
    model.validate(val_loader,
                   metrics=['f1_score'],
                   callbacks=[
                       LoggingToFile(save_dir / 'log.txt'),
                       LoggingToCSV(save_dir / 'log.csv')
                   ])
Beispiel #30
0
 def test_replace_kwargs_params(self, saved_argus_model):
     optimizer_state, path, model = saved_argus_model
     loaded_model = load_model(path, new_param={"qwerty": 42})
     assert loaded_model.params['new_param'] == {"qwerty": 42}