from datascience.ml.neural.models import load_create_nn, InceptionEnv from datascience.data.loader import occurrence_loader from datascience.data.datasets import EnvironmentalDataset from datascience.ml.neural.supervised import fit from sklearn.model_selection import train_test_split from projects.ecography.configs.inception import model_params, training_params # loading/creating model model = load_create_nn(model_class=InceptionEnv, model_params=model_params) # loading dataset train, val, test = occurrence_loader(EnvironmentalDataset, source='gbif_taxref', splitter=train_test_split) # training model validation_params = { 'metrics': (ValidationAccuracyMultipleBySpecies([1, 10, 30]), ValidationMRRBySpecies(), ValidationAccuracyRangeBySpecies(max_top_k=100, final_validation=True), ValidationAccuracyForAllSpecies(train=train, final_validation=True)) } fit(model, train=train, val=val, test=test, training_params=training_params, validation_params=validation_params)
training_params = { 'iterations': [120], 'log_modulo': -1, 'val_modulo': 1, 'loss': HebbLoss() } optim_params = { 'momentum': 0.0, 'lr': 0.1, } validation_params = {'metrics': (ValidationAccuracy(1), )} fit(model, train=train, test=test, training_params=training_params, optim_params=optim_params, validation_params=validation_params) # plot results ax = plot_db_partitions(train.dataset, train.labels, model) # plot_db_partitions_gradients plot_separator(train.separator, ax=ax) remove_axis() save_fig()
training_params = { 'iterations': [100, 130, 150, 160], 'batch_size': 256, } optim_params = {'lr': 0.001} validation_params = {'metrics': (ValidationAccuracy(1), )} model_selection_params = {'cross_validation': True, 'min_epochs': 50} stats = fit(model, train=train, val=val, test=test, training_params=training_params, validation_params=validation_params, optim_params=optim_params, model_selection_params=model_selection_params) score = stats.final_metric().metric_score() score = score if test[0][1] == 1. else 1. - score # write the score in a csv with open(export_result, 'a') as f: f.write('%s;%s;%.5f;%ld\n' % (painter_val, painter_test, score, test[0][1])) del stats del model
transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), 'test': transforms.Compose([ transforms.Resize(input_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]), } train, test = cifar10(transform) training_params = { 'iterations': [50, 75, 100], 'batch_size': 256, } optim_params = { 'lr': 0.001 } validation_params = { 'metrics': (ValidationAccuracy(1),) } fit( model, train=train, test=test, training_params=training_params, validation_params=validation_params, optim_params=optim_params, cross_validation=True )