Exemplo n.º 1
0
    def process_task(self, body, message):

        body = json.loads(body)
        env = body.get('env', {})
        args = body.get('args')
        kwargs = body.get('kwargs')

        for key, value in env.items():
            os.environ[key] = value
        print(" [x] Running model with %r" % args)

        parser = commandline_parser()
        args, unknown = parser.parse_known_args(args)

        RunLog = RunLogger(name='waterlp',
                           app_name=args.app_name,
                           run_name=args.run_name,
                           logs_dir=self.logs_dir,
                           username=args.hydra_username)

        try:
            RunLog.log_start()
            run_model(args, logs_dir, **kwargs)
            RunLog.log_finish()
        except Exception as err:
            RunLog.log_error(message=str(err))

        message.ack()
Exemplo n.º 2
0
 def _get_validation_results(self):
     for model_name in self.models:
         if model_name.startswith('convs'):
             convs = int(model_name.split('_')[0][-1])
             model = MyModel(convs)
             num_classes = 200
             dataname = 'TinyImagenet'
             if '_'.join(model_name.split('_')
                         [:2]) in self.convolution_experiments:
                 experiment = 1
             else:
                 experiment = 2
         else:
             if 'imagenet' in model_name:
                 num_classes = 200
                 dataname = 'TinyImagenet'
             else:
                 num_classes = 10
                 dataname = 'SVHN'
             experiment = 3
             model = AlexNetFine(num_classes)
         print("Running evaluation for " + str(model_name))
         model.load_state_dict(
             load(
                 os.path.join(self.models_dir, model_name,
                              'model_best.pth.tar'))['state_dict'])
         model = model.to(self.device).eval()
         dataloader = self.datasets[dataname]
         avg_test_loss, avg_test_top1_prec = run_model(
             0,
             model,
             self.criterion,
             None,
             dataloader['testing'],
             dataloader['testing_length'],
             train=False,
             device=self.device,
             num_classes=num_classes)
         self.results[model_name] = {
             'experiment': experiment,
             'Average Loss': avg_test_loss,
             'Average Accuracy': avg_test_top1_prec
         }
                                                 10):  # 10 to 100
                    sex_str = 'male' if sex == constants.Sex.MALE else 'female'
                    res_name = str(
                        res_name_prefix /
                        f'times={times_path.stem}_hospitals={hospital_path.stem}_sex={sex_str}_age={age}_race={race}_symptom={time_since_symptoms}_nsim={s_default}_beAHA.csv'
                    )
                    kwargs = {}
                    kwargs['sex'] = sex
                    kwargs['age'] = age
                    kwargs['race'] = race
                    kwargs['time_since_symptoms'] = time_since_symptoms
                    main.run_model(
                        times_file=times_path,
                        hospitals_file=hospital_path,
                        fix_performance=False,
                        patient_count=1,
                        simulation_count=s_default,
                        cores=None,  # use multicore if None
                        res_name=res_name,
                        **kwargs)
                    # if in resume modecheck off that resuming is done once loop finished
                    if resume_parameters.symp_resume & (
                            time_since_symptoms == resume_parameters.symp):
                        resume_parameters.symp_resume = False
                if resume_parameters.race_resume & (race
                                                    == resume_parameters.race):
                    resume_parameters.race_resume = False
            if resume_parameters.age_resume & (age == resume_parameters.age):
                resume_parameters.age_resume = False
    resume = False
Exemplo n.º 4
0
import argparse as ap
from main import run_model, save_model

if __name__ == '__main__':

    parser = ap.ArgumentParser()

    parser.add_argument(
        '-s',
        '--save',
        help='Save Model as file, need to pass the name of file you want',
        dest='save',
        action='store')

    parser.add_argument('-e',
                        '--epochs',
                        help='Set value of epochs to the Model',
                        dest='epochs',
                        action='store',
                        type=int)

    args = parser.parse_args()

    hist, model = run_model(args.epochs) if args.epochs else run_model()

    if args.save:
        save_model(model, args.save)
Exemplo n.º 5
0
def run_models(config, subset_names, subset_indices):

    models = config['models']
    folder = config['dataset']['save_folder']

    for name, indices in zip(subset_names, subset_indices):

        total_mse = [0 for i in range(len(models))]
        total_rmse = [0 for i in range(len(models))]
        total_r2 = [0 for i in range(len(models))]
        total_adj_r2 = [0 for i in range(len(models))]

        total_accuracy = [0 for i in range(len(models))]
        total_balanced_accuracy = [0 for i in range(len(models))]

        for i, model in enumerate(models):

            temp_config = get_config({**config, 'model': model})
            temp_config['data_path'] = folder + '/subdata_' + name + '.pkl'
            temp_config['print'] = config['print']

            if model in ['NN', 'LSTM', 'BiLSTM']:
                temp_config['in_dim'] = indices[1]
            if model == "NN":
                if name in set(['pr_su_bf_ma_tsfp', 'pr_su_bf_ma_tsfp_tsfd']):
                    temp_config['lr'] = 0.0001

            all_folds, all_folds_baseline = get_folds(temp_config)

            for index, (fold, fold_base) in enumerate(
                    zip(all_folds, all_folds_baseline)):

                if model == 'baseline':
                    mse, rmse, r2, adj_r2, accuracy, balanced_accuracy = run_model(
                        temp_config, fold, fold_base)
                else:
                    mse, rmse, r2, adj_r2, accuracy, balanced_accuracy = run_model(
                        temp_config, fold)

                total_mse[i] += mse
                total_rmse[i] += rmse
                total_r2[i] += r2
                total_adj_r2[i] += adj_r2

                total_accuracy[i] += accuracy
                total_balanced_accuracy[i] += balanced_accuracy

        # Calculate the average over all runs
        mses = [mse / len(all_folds) for mse in total_mse]
        rmses = [rmse / len(all_folds) for rmse in total_rmse]
        r2s = [r2 / len(all_folds) for r2 in total_r2]
        adj_r2s = [adj_r2 / len(all_folds) for adj_r2 in total_adj_r2]

        accuracies = [accuracy / len(all_folds) for accuracy in total_accuracy]
        balanced_accuracies = [
            balanced_accuracy / len(all_folds)
            for balanced_accuracy in total_balanced_accuracy
        ]

        # Print the results in a table
        table = [['mse'] + mses, ['root_mse'] + rmses, ['r2_score'] + r2s,
                 ['adj_r2_score'] + adj_r2s, ['accuracy'] + accuracies,
                 ['bal_accuracy'] + balanced_accuracies]
        if not os.path.exists("results"):
            os.makedirs("results")
        pd.DataFrame(table, columns=["metrics"] +
                     models).to_csv("results/results_" + name + ".csv")
        #oke nice, dan kan ik nu helemaal overnieuw alles gaan runnen? Ja idd, en moet ook ff die wijziging van net terug draaien

        print('dataset: ' + name)
        print(
            tabulate(table,
                     headers=['metrics'] + models,
                     tablefmt="fancy_grid"))  # plain