示例#1
0
        file_name_general_it = file_name_general + '_MC{}'.format(mcIter)

        # select parameters for toy lgssm
        kwargs = {"k_max_train": 2000, "k_max_val": 2000, "k_max_test": 5000}

        # Specifying datasets
        loaders = loader.load_dataset(
            dataset=options["dataset"],
            dataset_options=options["dataset_options"],
            train_batch_size=options["train_options"].batch_size,
            test_batch_size=options["test_options"].batch_size,
            **kwargs)

        # Compute normalizers
        if options["normalize"]:
            normalizer_input, normalizer_output = compute_normalizer(
                loaders['train'])
        else:
            normalizer_input = normalizer_output = None

        # Define model
        modelstate = ModelState(seed=options["seed"],
                                nu=loaders["train"].nu,
                                ny=loaders["train"].ny,
                                model=options["model"],
                                options=options,
                                normalizer_input=normalizer_input,
                                normalizer_output=normalizer_output)
        modelstate.model.to(options['device'])

        df = {}
        if options['do_train']:
        dataset=options["dataset"],
        dataset_options=options["dataset_options"],
        train_batch_size=options["train_options"].batch_size,
        test_batch_size=options["test_options"].batch_size,
        **kwargs)

    if options['do_test']:
        # %% test the model

        # ##### Loading the model
        # switch to cpu computations for testing
        options['device'] = 'cpu'

        # Compute normalizers
        if options["normalize"]:
            normalizer_input, normalizer_output = compute_normalizer(
                loaders['test'])
        else:
            normalizer_input = normalizer_output = None
        # Define model
        modelstate = ModelState(seed=options["seed"],
                                nu=loaders["train"].nu,
                                ny=loaders["train"].ny,
                                model=options["model"],
                                options=options,
                                normalizer_input=normalizer_input,
                                normalizer_output=normalizer_output)
        modelstate.model.to(options['device'])

        # load model
        path = path_general + 'model/'
        file_name = file_name_general + '_bestModel.ckpt'
示例#3
0
def run_main_single(options, path_general, file_name_general):
    start_time = time.time()
    print('Run file: main_single.py')
    print(time.strftime("%c"))

    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    file_name_general = file_name_general + '_h{}_z{}_n{}'.format(
        options['model_options'].h_dim, options['model_options'].z_dim,
        options['model_options'].n_layers)
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general)

    # Specifying datasets
    loaders = loader.load_dataset(
        dataset=options["dataset"],
        dataset_options=options["dataset_options"],
        train_batch_size=options["train_options"].batch_size,
        test_batch_size=options["test_options"].batch_size,
    )

    # Compute normalizers
    if options["normalize"]:
        normalizer_input, normalizer_output = compute_normalizer(
            loaders['train'])
    else:
        normalizer_input = normalizer_output = None

    # Define model
    modelstate = ModelState(seed=options["seed"],
                            nu=loaders["train"].nu,
                            ny=loaders["train"].ny,
                            model=options["model"],
                            options=options,
                            normalizer_input=normalizer_input,
                            normalizer_output=normalizer_output)
    modelstate.model.to(options['device'])

    # save the options
    save_options(options, path_general, 'options.txt')

    # allocation
    df = {}
    if options['do_train']:
        # train the model
        df = training.run_train(modelstate=modelstate,
                                loader_train=loaders['train'],
                                loader_valid=loaders['valid'],
                                options=options,
                                dataframe=df,
                                path_general=path_general,
                                file_name_general=file_name_general)

    if options['do_test']:
        # test the model
        df = testing.run_test(options, loaders, df, path_general,
                              file_name_general)

    # save data
    # get saving path
    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # to pandas
    df = pd.DataFrame(df)
    # filename
    file_name = file_name_general + '.csv'
    # save data
    df.to_csv(path + file_name)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {}:{:2.0f}:{:2.0f} [h:min:sec]'.format(
        hours, min, sec))
    print(time.strftime("%c"))
示例#4
0
def run_test(options, loaders, df, path_general, file_name_general, **kwargs):
    # switch to cpu computations for testing
    # options['device'] = 'cpu'

    # %% load model

    # Compute normalizers (here just used for initialization, real values loaded below)
    if options["normalize"]:
        normalizer_input, normalizer_output = compute_normalizer(loaders['train'])
    else:
        normalizer_input = normalizer_output = None

    # Define model
    modelstate = ModelState(seed=options["seed"],
                            nu=loaders["train"].nu, ny=loaders["train"].ny,
                            model=options["model"],
                            options=options,
                            normalizer_input=normalizer_input,
                            normalizer_output=normalizer_output)
    modelstate.model.to(options['device'])

    # load model
    path = path_general + 'model/'
    file_name = file_name_general + '_bestModel.ckpt'
    modelstate.load_model(path, file_name)
    modelstate.model.to(options['device'])

    # %% plot and save the loss curve
    dv.plot_losscurve(df, options, path_general, file_name_general)

    # %% others

    if bool(kwargs):
        file_name_add = kwargs['file_name_add']
    else:
        # Default option
        file_name_add = ''
    file_name_general = file_name_add + file_name_general

    # get the number of model parameters
    num_model_param = get_n_params(modelstate.model)
    print('Model parameters: {}'.format(num_model_param))

    # %% RUN PERFORMANCE EVAL
    # %%

    # %% sample from the model
    for i, (u_test, y_test) in enumerate(loaders['test']):
        # getting output distribution parameter only implemented for selected models
        u_test = u_test.to(options['device'])
        y_sample, y_sample_mu, y_sample_sigma = modelstate.model.generate(u_test)

        # convert to cpu and to numpy for evaluation
        # samples data
        y_sample_mu = y_sample_mu.cpu().detach().numpy()
        y_sample_sigma = y_sample_sigma.cpu().detach().numpy()
        # test data
        y_test = y_test.cpu().detach().numpy()
        y_sample = y_sample.cpu().detach().numpy()

    # get noisy test data for narendra_li
    if options['dataset'] == 'narendra_li':
        # original test set is unnoisy -> get noisy test set
        yshape = y_test.shape
        y_test_noisy = y_test + np.sqrt(0.1) * np.random.randn(yshape[0], yshape[1], yshape[2])
    elif options['dataset'] == 'toy_lgssm':
        # original test set is unnoisy -> get noisy test set
        yshape = y_test.shape
        y_test_noisy = y_test + np.sqrt(1) * np.random.randn(yshape[0], yshape[1], yshape[2])
    else:
        y_test_noisy = y_test

    # %% plot resulting predictions
    if options['dataset'] == 'narendra_li':
        # for narendra_li problem show test data mean pm 3sigma as well
        data_y_true = [y_test, np.sqrt(0.1) * np.ones_like(y_test)]
        data_y_sample = [y_sample_mu, y_sample_sigma]
        label_y = ['true, $\mu\pm3\sigma$', 'sample, $\mu\pm3\sigma$']
    elif options['dataset'] == 'toy_lgssm':
        # for lgssm problem show test data mean pm 3sigma as well
        data_y_true = [y_test, np.sqrt(1) * np.ones_like(y_test)]
        data_y_sample = [y_sample_mu, y_sample_sigma]
        label_y = ['true, $\mu\pm3\sigma$', 'sample, $\mu\pm3\sigma$']
    else:
        data_y_true = [y_test_noisy]
        data_y_sample = [y_sample_mu, y_sample_sigma]
        label_y = ['true', 'sample, $\mu\pm3\sigma$']
    if options['dataset'] == 'cascaded_tank':
        temp = 1024
    elif options['dataset'] == 'wiener_hammerstein':
        temp = 4000
    else:
        temp = 200
    dv.plot_time_sequence_uncertainty(data_y_true,
                                      data_y_sample,
                                      label_y,
                                      options,
                                      batch_show=0,
                                      x_limit_show=[0, temp],
                                      path_general=path_general,
                                      file_name_general=file_name_general)

    # %% compute performance values

    # compute marginal likelihood (same as for predictive distribution loss in training)
    marginal_likeli = de.compute_marginalLikelihood(y_test_noisy, y_sample_mu, y_sample_sigma, doprint=True)

    # compute VAF
    vaf = de.compute_vaf(y_test_noisy, y_sample_mu, doprint=True)

    # compute RMSE
    rmse = de.compute_rmse(y_test_noisy, y_sample_mu, doprint=True)

    # %% Collect data

    # options_dict
    options_dict = {'h_dim': options['model_options'].h_dim,
                    'z_dim': options['model_options'].z_dim,
                    'n_layers': options['model_options'].n_layers,
                    'seq_len_train': options['dataset_options'].seq_len_train,
                    'batch_size': options['train_options'].batch_size,
                    'lr_scheduler_nepochs': options['train_options'].lr_scheduler_nepochs,
                    'lr_scheduler_factor': options['train_options'].lr_scheduler_factor,
                    'model_param': num_model_param, }
    # test_dict
    test_dict = {'marginal_likeli': marginal_likeli,
                 'vaf': vaf,
                 'rmse': rmse}
    # dataframe
    df.update(options_dict)
    df.update(test_dict)

    return df
示例#5
0
def get_perf_results(path_general, model_name):
    options = {
        'dataset': 'wiener_hammerstein',
        'model': model_name,
        'logdir': 'final',
        'normalize': True,
        'seed': 1234,
        'optim': 'Adam',
        'showfig': False,
        'savefig': False,
        'MCsamples': 20,
        'gridvalues': {
            'h_values': [30, 40, 50, 60, 70],
            'z_values': [3],
            'n_values': [3],
        },
        'train_set': 'small',
    }
    h_values = options['gridvalues']['h_values']
    z_values = options['gridvalues']['z_values']
    n_values = options['gridvalues']['n_values']

    # options
    # get the options
    options['device'] = torch.device('cpu')
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # file name
    file_name_general = dataset

    # allocation
    vaf_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_multisine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    vaf_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    rmse_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])
    likelihood_all_sweptsine = torch.zeros(
        [options['MCsamples'],
         len(h_values),
         len(z_values),
         len(n_values)])

    for mcIter in range(options['MCsamples']):
        print('\n#####################')
        print('MC ITERATION: {}/{}'.format(mcIter + 1, options['MCsamples']))
        print('#####################\n')

        for i1, h_sel in enumerate(h_values):
            for i2, z_sel in enumerate(z_values):
                for i3, n_sel in enumerate(n_values):

                    # output current choice
                    print('\nCurrent run: h={}, z={}, n={}\n'.format(
                        h_sel, z_sel, n_sel))

                    # get curren file names
                    file_name = file_name_general + '_h{}_z{}_n{}_MC{}'.format(
                        h_sel, z_sel, n_sel, mcIter)

                    # set new values in options
                    options['model_options'].h_dim = h_sel
                    options['model_options'].z_dim = z_sel
                    options['model_options'].n_layers = n_sel

                    # Specifying datasets (only matters for testing
                    kwargs = {
                        'test_set': 'multisine',
                        'MCiter': mcIter,
                        'train_set': options['train_set']
                    }
                    loaders_multisine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    kwargs = {'test_set': 'sweptsine', 'MCiter': mcIter}
                    loaders_sweptsine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    # Compute normalizers
                    if options["normalize"]:
                        normalizer_input, normalizer_output = compute_normalizer(
                            loaders_multisine['train'])
                    else:
                        normalizer_input = normalizer_output = None

                    # Define model
                    modelstate = ModelState(
                        seed=options["seed"],
                        nu=loaders_multisine["train"].nu,
                        ny=loaders_multisine["train"].ny,
                        model=options["model"],
                        options=options,
                        normalizer_input=normalizer_input,
                        normalizer_output=normalizer_output)
                    modelstate.model.to(options['device'])

                    # allocation
                    df = {}

                    # test the model
                    print('\nTest: Multisine')
                    kwargs = {'file_name_add': 'Multisine_'}
                    df_multisine = df
                    df_multisine = testing.run_test(options, loaders_multisine,
                                                    df_multisine, path_general,
                                                    file_name, **kwargs)
                    print('\nTest: Sweptsine')
                    kwargs = {'file_name_add': 'Sweptsine_'}
                    df_sweptsine = {}
                    df_sweptsine = testing.run_test(options, loaders_sweptsine,
                                                    df_sweptsine, path_general,
                                                    file_name, **kwargs)

                    # save performance values
                    vaf_all_multisine[mcIter, i1, i2, i3] = df_multisine['vaf']
                    rmse_all_multisine[mcIter, i1, i2,
                                       i3] = df_multisine['rmse'][0]
                    likelihood_all_multisine[
                        mcIter, i1, i2,
                        i3] = df_multisine['marginal_likeli'].item()

                    vaf_all_sweptsine[mcIter, i1, i2, i3] = df_sweptsine['vaf']
                    rmse_all_sweptsine[mcIter, i1, i2,
                                       i3] = df_sweptsine['rmse'][0]
                    likelihood_all_sweptsine[
                        mcIter, i1, i2,
                        i3] = df_sweptsine['marginal_likeli'].item()
    # save data
    datasaver = {
        'all_vaf_multisine': vaf_all_multisine,
        'all_rmse_multisine': rmse_all_multisine,
        'all_likelihood_multisine': likelihood_all_multisine,
        'all_vaf_sweptsine': vaf_all_sweptsine,
        'all_rmse_sweptsine': rmse_all_sweptsine,
        'all_likelihood_sweptsine': likelihood_all_sweptsine
    }
    # get saving path
    path = path_general + 'data/'
    # filename
    file_name = '{}.pt'.format(options['dataset'])
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # save data
    torch.save(datasaver, path + file_name)

    print('\n')
    print('# ' * 20)
    print('Performance computation for model {}: DONE'.format(model_name))
    print('# ' * 20)
    print('\n')
示例#6
0
def run_main_ndata(options, vary_data, path_general, file_name_general,
                   params):
    print('Run file: main_ndata.py')
    start_time = time.time()
    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(
        options['dataset'])
    options['model_options'] = model_params.get_model_options(
        options['model'], options['dataset'], options['dataset_options'])
    options['train_options'] = train_params.get_train_options(
        options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # set new values in options
    options['model_options'].h_dim = params['h_best']
    options['model_options'].z_dim = params['z_best']
    options['model_options'].n_layers = params['n_best']

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general + '_runlog')

    # values of evaluation
    k_max_train_values = vary_data['k_max_train_values']
    k_max_val_values = vary_data['k_max_val_values']
    k_max_test_values = vary_data['k_max_test_values']

    # print number of evaluations
    print('Total number of data point sets: {}'.format(
        len(k_max_train_values)))

    # allocation
    all_vaf = torch.zeros([len(k_max_train_values)])
    all_rmse = torch.zeros([len(k_max_train_values)])
    all_likelihood = torch.zeros([len(k_max_train_values)])
    all_df = {}

    for i, _ in enumerate(k_max_train_values):

        # output current choice
        print('\nCurrent run: k_max_train={}\n'.format(k_max_train_values[i]))

        # get current file name
        file_name = file_name_general + '_kmaxtrain_{}'.format(
            k_max_train_values[i])

        # select parameters
        kwargs = {
            "k_max_train": k_max_train_values[i],
            "k_max_val": k_max_val_values[i],
            "k_max_test": k_max_test_values[i]
        }

        # Specifying datasets
        loaders = loader.load_dataset(
            dataset=options["dataset"],
            dataset_options=options["dataset_options"],
            train_batch_size=options["train_options"].batch_size,
            test_batch_size=options["test_options"].batch_size,
            **kwargs)

        # Compute normalizers
        if options["normalize"]:
            normalizer_input, normalizer_output = compute_normalizer(
                loaders['train'])
        else:
            normalizer_input = normalizer_output = None

        # Define model
        modelstate = ModelState(seed=options["seed"],
                                nu=loaders["train"].nu,
                                ny=loaders["train"].ny,
                                model=options["model"],
                                options=options,
                                normalizer_input=normalizer_input,
                                normalizer_output=normalizer_output)
        modelstate.model.to(options['device'])

        # allocation
        df = {}

        if options['do_train']:
            # train the model
            df = training.run_train(
                modelstate=modelstate,
                loader_train=loaders['train'],
                loader_valid=loaders['valid'],
                options=options,
                dataframe=df,
                path_general=path_general,
                file_name_general=file_name,
            )

        if options['do_test']:
            # test the model
            df = testing.run_test(options, loaders, df, path_general,
                                  file_name)

        # store values
        all_df[i] = df

        # save performance values
        all_vaf[i] = df['vaf']
        all_rmse[i] = df['rmse'][0]
        all_likelihood[i] = df['marginal_likeli'].item()

    # save data
    # get saving path
    path = path_general + 'data/'
    # to pandas
    all_df = pd.DataFrame(all_df)
    # filename
    file_name = file_name_general + '_gridsearch.csv'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # save data
    all_df.to_csv(path_general + file_name)
    # save performance values
    torch.save(all_vaf, path_general + 'data/' + 'all_vaf.pt')
    torch.save(all_rmse, path_general + 'data/' + 'all_rmse.pt')
    torch.save(all_likelihood, path_general + 'data/' + 'all_likelihood.pt')

    # plot performance
    dv.plot_perf_ndata(k_max_train_values, all_vaf, all_rmse, all_likelihood,
                       options, path_general)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {}:{:2.0f}:{:2.0f} [h:min:sec]'.format(
        hours, min, sec))
示例#7
0
def run_main_gridsearch(options, kwargs, gridvalues, path_general, file_name_general):
    print('Run file: main_gridsearch.py')
    start_time = time.time()
    # get correct computing device
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    print('Device: {}'.format(device))

    # get the options
    options['device'] = device
    options['dataset_options'] = dynsys_params.get_dataset_options(options['dataset'])
    options['model_options'] = model_params.get_model_options(options['model'], options['dataset'],
                                                              options['dataset_options'])
    options['train_options'] = train_params.get_train_options(options['dataset'])
    options['test_options'] = train_params.get_test_options()

    # print model type and dynamic system type
    print('\n\tModel Type: {}'.format(options['model']))
    print('\tDynamic System: {}\n'.format(options['dataset']))

    path = path_general + 'data/'
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)
    # set logger
    set_redirects(path, file_name_general+'_runlog')

    h_values = gridvalues['h_values']
    z_values = gridvalues['z_values']
    n_values = gridvalues['n_values']

    # print number of searches
    temp = len(h_values) * len(z_values) * len(n_values)
    print('Total number of search points: {}'.format(temp))

    # allocation
    all_vaf = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_rmse = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_likelihood = torch.zeros([len(h_values), len(z_values), len(n_values)])
    all_df = {}

    for i1, h_sel in enumerate(h_values):
        for i2, z_sel in enumerate(z_values):
            for i3, n_sel in enumerate(n_values):

                # output current choice
                print('\nCurrent run: h={}, z={}, n={}\n'.format(h_sel, z_sel, n_sel))

                # get current file names
                file_name = file_name_general + '_h{}_z{}_n{}'.format(h_sel, z_sel, n_sel)

                # set new values in options
                options['model_options'].h_dim = h_sel
                options['model_options'].z_dim = z_sel
                options['model_options'].n_layers = n_sel

                # Specifying datasets
                loaders = loader.load_dataset(dataset=options["dataset"],
                                              dataset_options=options["dataset_options"],
                                              train_batch_size=options["train_options"].batch_size,
                                              test_batch_size=options["test_options"].batch_size,
                                              **kwargs)

                # Compute normalizers
                if options["normalize"]:
                    normalizer_input, normalizer_output = compute_normalizer(loaders['train'])
                else:
                    normalizer_input = normalizer_output = None

                # Define model
                modelstate = ModelState(seed=options["seed"],
                                        nu=loaders["train"].nu, ny=loaders["train"].ny,
                                        model=options["model"],
                                        options=options,
                                        normalizer_input=normalizer_input,
                                        normalizer_output=normalizer_output)
                modelstate.model.to(options['device'])

                # allocation
                df = {}

                if options['do_train']:
                    # train the model
                    df = training.run_train(modelstate=modelstate,
                                            loader_train=loaders['train'],
                                            loader_valid=loaders['valid'],
                                            options=options,
                                            dataframe=df,
                                            path_general=path_general,
                                            file_name_general=file_name)

                if options['do_test']:
                    # test the model
                    df = testing.run_test(options, loaders, df, path_general, file_name)

                # store values
                all_df[(i1, i2, i3)] = df

                # save performance values
                all_vaf[i1, i2, i3] = df['vaf']
                all_rmse[i1, i2, i3] = df['rmse'][0]
                all_likelihood[i1, i2, i3] = df['marginal_likeli'].item()

    # save data
    # get saving path
    path = path_general + 'data/'
    # to pandas
    all_df = pd.DataFrame(all_df)
    # filename
    file_name = '{}_gridsearch.csv'.format(options['dataset'])
    # check if path exists and create otherwise
    if not os.path.exists(path):
        os.makedirs(path)

    # save data
    all_df.to_csv(path_general + file_name)
    # save performance values
    torch.save(all_vaf, path_general + 'data/' + 'all_vaf.pt')
    torch.save(all_rmse, path_general + 'data/' + 'all_rmse.pt')
    torch.save(all_likelihood, path_general + 'data/' + 'all_likelihood.pt')

    # output best parameters
    all_vaf = all_vaf.numpy()
    i, j, k = np.unravel_index(all_vaf.argmax(), all_vaf.shape)
    print('Best Parameters max vaf={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_vaf[i, j, k],
                                                                                       h_values[i],
                                                                                       z_values[j],
                                                                                       n_values[k], i, j, k))
    all_rmse = all_rmse.numpy()
    i, j, k = np.unravel_index(all_rmse.argmin(), all_rmse.shape)
    print('Best Parameters min rmse={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_rmse[i, j, k],
                                                                                        h_values[i],
                                                                                        z_values[j],
                                                                                        n_values[k], i, j, k))
    all_likelihood = all_likelihood.numpy()
    i, j, k = np.unravel_index(all_likelihood.argmax(), all_likelihood.shape)
    print('Best Parameters max likelihood={}, h={}, z={}, n={}, ind(h,z,n)=({},{},{})'.format(all_likelihood[i, j, k],
                                                                                              h_values[i],
                                                                                              z_values[j],
                                                                                              n_values[k], i, j, k))

    # plot results
    dv.plot_perf_gridsearch(all_vaf, all_rmse, all_likelihood, z_values, h_values, path_general, options)

    # time output
    time_el = time.time() - start_time
    hours = time_el // 3600
    min = time_el // 60 - hours * 60
    sec = time_el - min * 60 - hours * 3600
    print('Total ime of file execution: {:2.0f}:{:2.0f}:{:2.0f} [h:min:sec]'.format(hours, min, sec))
示例#8
0
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    kwargs = {'test_set': 'sweptsine', 'MCiter': mcIter}
                    loaders_sweptsine = loader.load_dataset(
                        dataset=options["dataset"],
                        dataset_options=options["dataset_options"],
                        train_batch_size=options["train_options"].batch_size,
                        test_batch_size=options["test_options"].batch_size,
                        **kwargs)

                    # Compute normalizers
                    if options["normalize"]:
                        normalizer_input, normalizer_output = compute_normalizer(
                            loaders_multisine['train'])
                    else:
                        normalizer_input = normalizer_output = None

                    # Define model
                    modelstate = ModelState(
                        seed=options["seed"],
                        nu=loaders_multisine["train"].nu,
                        ny=loaders_multisine["train"].ny,
                        model=options["model"],
                        options=options,
                        normalizer_input=normalizer_input,
                        normalizer_output=normalizer_output)
                    modelstate.model.to(options['device'])

                    # allocation