示例#1
0
    print('Shpae of y: {}'.format(y.shape))
    print('Shpae of y_sc: {}'.format(sc_y.shape))
    for trr in tr_ratio:
        for rs in r_state:
            print('    Working on random state: {}'.format(rs))
            X_train, X_test, y_train, y_test, scaling_y_train, \
            scaling_y_test, metadata_train, metadata_test = \
            train_test_split(X, y, sc_y, metadata, \
            train_size=trr, random_state=rs)

            MLGP = OMGP(X_train=X_train,
                        X_test=X_test,
                        y_train=y_train,
                        y_test=y_test,
                        kernel_recipe=kernel_recipe,
                        scaling=True,
                        scaling_params={
                            'alpha': sc.slope,
                            'gamma': sc.intercept
                        },
                        scaling_y_train=scaling_y_train,
                        scaling_y_test=scaling_y_test)

            MLGP.run_GP()
            trr_data[trr][rs] = MLGP.__dict__
            trr_data[trr][rs]['metadata_train'] = metadata_train
            trr_data[trr][rs]['metadata_test'] = metadata_test

    if not os.path.exists('gp_scaling_{0}_{1}'.format(ads1, ads2)):
        os.mkdir('gp_scaling_{0}_{1}'.format(ads1, ads2))
    np.save('gp_scaling_{0}_{1}/{1}_rsdata.npy'.format(ads1, ads2), trr_data)
示例#2
0
    r_data = {rs: {} for rs in r_state}
    print('Shpae of y: {}'.format(y.shape))
    print('Shpae of y_sc: {}'.format(sc_y.shape))
    for rs in r_state:
        print('    Working on random state: {}'.format(rs))
        X_train, X_test, y_train, y_test, scaling_y_train, scaling_y_test, \
            metadata_train, metadata_test = \
            train_test_split(X, y, sc_y, metadata, \
            train_size=0.80, random_state=rs)

        MLGP = OMGP(X_train=X_train,
                    X_test=X_test,
                    y_train=y_train,
                    y_test=y_test,
                    kernel_recipe=kernel_recipe,
                    scaling=True,
                    scaling_params={
                        'alpha': sc.slope,
                        'gamma': sc.intercept
                    },
                    scaling_y_train=scaling_y_train,
                    scaling_y_test=scaling_y_test)

        MLGP.run_GP()
        r_data[rs] = MLGP.__dict__
        r_data[rs]['metadata_train'] = metadata_train
        r_data[rs]['metadata_test'] = metadata_test

    np.save('gp_scaling_{0}_{1}/{1}_rsdata.npy'.format(ads1, ads2), r_data)
    #LC = MLGP.plot_learning_curve()
    #LC.savefig('gp_scaling_{0}_{1}/{1}_learning_curve.png'.format(ads1, ads2))
    PP = MLGP.parity_plot(data='train')
            'noise_level': 0.1,
            'noise_level_bounds': (1e-5, 1e5)
        }
    }

    r_state = [10, 20, 42, 80, 150, 200, 300, 400]
    #r_state = [42]
    r_data = {rs: {} for rs in r_state}
    for rs in r_state:
        print('    Working on random state: {}'.format(rs))
        X_train, X_test, y_train, y_test, metadata_train, metadata_test = \
            train_test_split(X, y, metadata, train_size=0.80, random_state=rs)

        MLGP = OMGP(X_train=X_train,
                    X_test=X_test,
                    y_train=y_train,
                    y_test=y_test,
                    kernel_recipe=kernel_recipe)

        MLGP.run_GP()
        r_data[rs] = MLGP.__dict__
        r_data[rs]['metadata_train'] = metadata_train
        r_data[rs]['metadata_test'] = metadata_test

    if not os.path.exists('run_{}'.format(ads)):
        os.mkdir('run_{}'.format(ads))

    np.save('run_{0}/{0}_rsdata.npy'.format(ads), r_data)
    #LC = MLGP.plot_learning_curve()
    #LC.savefig('run_{0}/{0}_learning_curve.png'.format(ads))
    PP = MLGP.parity_plot(data='train')
示例#4
0
    'WhiteKernel': {
        'noise_level': 0.1,
        'noise_level_bounds': (1e-5, 1e5)
    }
}

r_state = [15, 25, 45]
trr_data = {tr: {rs: {} for rs in r_state} for tr in tr_ratio}

for trr in tr_ratio:
    for rs in r_state:
        print('    Working on random state: {}'.format(rs))
        X_train, X_test, y_train, y_test, metadata_train, metadata_test = \
            train_test_split(X, y, metadata, train_size=trr, random_state=rs)

        MLGP = OMGP(X_train=X_train,
                    X_test=X_test,
                    y_train=y_train,
                    y_test=y_test,
                    kernel_recipe=kernel_recipe)

        MLGP.run_GP()
        trr_data[trr][rs] = MLGP.__dict__
        trr_data[trr][rs]['metadata_train'] = metadata_train
        trr_data[trr][rs]['metadata_test'] = metadata_test

prev_data = np.load('run_{0}/{0}_rsdata.npy'.format(adsorbate))[()]
for trr in tr_ratio:
    prev_data[trr] = trr_data[trr]
np.save('run_{0}/{0}_rsdata.npy'.format(adsorbate), prev_data)