from datascience.data.loader import occurrence_loader
from datascience.data.datasets import EnvironmentalDataset
from datascience.ml.light_gbm import fit
from datascience.ml.metrics import ValidationAccuracy

train, val, test = occurrence_loader(EnvironmentalDataset, source='glc18', limit=100, size_patch=1)

fit(train, test, val, validation_params={'metrics': (ValidationAccuracy(),)})
示例#2
0
from datascience.data.datasets import EnvironmentalDataset
from datascience.ml.neural.supervised import fit

model_params = {
    'n_labels': 6823,
    'n_input': 77,
    'exp': True,  # poisson loss,
    'normalize_weight': 2.  # poisson loss
}

model = load_create_nn(model_class=InceptionEnv, model_params=model_params)


# loading dataset
train, val, test = occurrence_loader(
    EnvironmentalDataset,
    source='glc18',
    id_name='patch_id',
    label_name='species_glc_id',
    limit=1000
)

training_params['loss'] = CategoricalPoissonLoss()
training_params['log_modulo'] = 1
training_params['iterations'] = [10]
training_params['lr'] = 0.01

validation_params['metrics'] = (ValidationAccuracy(1),)  # let us just analyse convergence first

fit(model, train=train, val=val, test=test, training_params=training_params, validation_params=validation_params)
示例#3
0
    train, val, test, _ = generator.country_dataset_one_fold(
        painter_val=painter_val, painter_test=painter_test)

    model = create_model(model_class=initialize_model,
                         model_params=model_params)
    mmodel = model.module if type(model) is torch.nn.DataParallel else model
    mmodel.aux_logits = False

    training_params = {
        'iterations': [100, 130, 150, 160],
        'batch_size': 256,
    }

    optim_params = {'lr': 0.001}

    validation_params = {'metrics': (ValidationAccuracy(1), )}

    model_selection_params = {'cross_validation': True, 'min_epochs': 50}

    stats = fit(model,
                train=train,
                val=val,
                test=test,
                training_params=training_params,
                validation_params=validation_params,
                optim_params=optim_params,
                model_selection_params=model_selection_params)
    score = stats.final_metric().metric_score()
    score = score if test[0][1] == 1. else 1. - score

    # write the score in a csv
示例#4
0
    'im_shape': train[0][0].shape,
    'conv_layers': (64,),  # (150, 150),
    'linear_layers': tuple(),  # (128, 128),
    'pooling': torch.nn.AvgPool2d,
    'conv_size': 3
}

model = create_model(model_class=CustomizableCNN, model_params=model_params)

training_params = {
    'iterations': [120],  # iterations with learning rate decay
    'log_modulo': -1,  # print loss once per epoch
    'val_modulo': 1,  # run a validation on the validation set every 5 epochs
    'batch_size': 512

}

optim_params = {
    'lr': 0.01,
    'momentum': 0.0
}

validation_params = {
    'metrics': (ValidationAccuracy(1),),
    'vcallback': (FilterVarianceCallback(averaged=False, window_size=10),)  # (AlignmentMetricCallback(),NewStatCallback(train),)
}

fit(
    model, train, test, training_params=training_params, validation_params=validation_params, optim_params=optim_params
)