コード例 #1
0
def single_experiment(sigma, order, sigma_noise, experiment_type):

    ename = '_'+experiment_type

    Nside = 1024

    EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma{}'.format(
        Nside, sigma_noise, order, sigma, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std)

    ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    training = LabeledDatasetWithNoise(features_train, labels_train, end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    params = get_params(training.N, EXP_NAME, order, Nside, experiment_type)
    model = models.deepsphere(**params)

    # Cleanup before running again.
    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    model.fit(training, validation)

    error_validation = experiment_helper.model_error(model, features_validation, labels_validation)
    print('The validation error is {}%'.format(error_validation * 100), flush=True)

    error_test = experiment_helper.model_error(model, features_test, labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test
コード例 #2
0
def single_experiment(sigma, order, sigma_noise, experiment_type, new,
                      n_neighbors):

    ename = '_' + experiment_type

    Nside = 1024

    if Nside == 1024:
        data_path = '/mnt/scratch/lts2/mdeff/deepsphere/data/same_psd/'
    else:
        data_path = 'data/same_psd/'

    EXP_NAME = 'cosmo' if new else 'oldgraph'
    EXP_NAME += '_{}sides_{}noise_{}order_{}sigma_{}neighbor{}_fold3'.format(
        Nside, sigma_noise, order, sigma, n_neighbors, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(
        sigma, order, data_path=data_path)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(
        sigma, order, sigma_noise, x_raw_std, data_path=data_path[:-9])

    ret = experiment_helper.data_preprossing(x_raw_train,
                                             labels_raw_train,
                                             x_raw_test,
                                             sigma_noise,
                                             feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    training = LabeledDatasetWithNoise(features_train,
                                       labels_train,
                                       end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    # Cleanup before running again.

    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    params = hyperparameters.get_params(training.N, EXP_NAME, order, Nside,
                                        experiment_type)
    model = models.deepsphere(**params, new=new, n_neighbors=n_neighbors)

    accuracy_validation, loss_validation, loss_training, t_step, t_batch = model.fit(
        training, validation)
    print("inference time: ", t_batch / params["batch_size"])

    error_validation = experiment_helper.model_error(
        model, features_validation[:, :, np.newaxis], labels_validation)
    print('The validation error is {}%'.format(error_validation * 100),
          flush=True)

    error_test = experiment_helper.model_error(model,
                                               features_test[:, :, np.newaxis],
                                               labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test, t_batch
コード例 #3
0
def single_experiment(sigma, order, sigma_noise, experiment_type):

    ename = '_'+experiment_type

    Nside = 1024

    data_path = '../../data/same_psd/'

    EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma{}'.format(
        Nside, sigma_noise, order, sigma, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order, data_path=data_path)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std, data_path=data_path[:-9])

    ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    nx = Nside//order
    nlevels = np.round(np.log2(nx)).astype(np.int)
    index = build_index(nlevels).astype(np.int)

    features_train = features_train[:, index]
    features_validation = features_validation[:, index]
    shuffle = np.random.permutation(len(features_test))
    features_test = features_test[:, index]
    features_test = features_test[shuffle]
    labels_test = labels_test[shuffle]

    training = LabeledDatasetWithNoise(features_train, labels_train, end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    # Better implementation, but it doesn't work for some reason.
    # params = hyperparameters.get_params_CNN2D(training.N, EXP_NAME, order, Nside, experiment_type)
    # model = Healpix2CNN(**params)

    params = get_params(training.N, EXP_NAME, order, Nside, experiment_type)
    model = models.cnn2d(**params)

    # Cleanup before running again.
    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    model.fit(training, validation)

    error_validation = experiment_helper.model_error(model, features_validation, labels_validation)
    print('The validation error is {}%'.format(error_validation * 100), flush=True)

    error_test = experiment_helper.model_error(model, features_test, labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test
コード例 #4
0
ret = train_test_split(x_raw,
                       x_psd,
                       labels,
                       test_size=2 * nclass - ntrain,
                       shuffle=True)
x_raw_train, x_raw_test, x_psd_train, x_psd_test, labels_train, labels_test = ret

print('Class 1 VS class 2')
print('  Training set: {} / {}'.format(np.sum(labels_train == 0),
                                       np.sum(labels_train == 1)))
print('  Test set: {} / {}'.format(np.sum(labels_test == 0),
                                   np.sum(labels_test == 1)))
clf = SVC(kernel='rbf')
clf.fit(x_raw_train, labels_train)

e_train = experiment_helper.model_error(clf, x_raw_train, labels_train)
e_test = experiment_helper.model_error(clf, x_raw_test, labels_test)
print('The training error is: {}%'.format(e_train * 100))
print('The testing error is: {}%'.format(e_test * 100))
clf = SVC(kernel='linear')
clf.fit(x_psd_train, labels_train)

e_train = experiment_helper.model_error(clf, x_psd_train, labels_train)
e_test = experiment_helper.model_error(clf, x_psd_test, labels_test)
print('The training error is: {}%'.format(e_train * 100))
print('The testing error is: {}%'.format(e_test * 100))
params = dict()
params['dir_name'] = EXP_NAME

# Types of layers.
params['conv'] = 'chebyshev5'  # Graph convolution: chebyshev5 or monomials.