def single_experiment(sigma, order, sigma_noise, experiment_type):

    ename = '_'+experiment_type

    Nside = 1024

    EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma{}'.format(
        Nside, sigma_noise, order, sigma, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std)

    ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    training = LabeledDatasetWithNoise(features_train, labels_train, end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    params = get_params(training.N, EXP_NAME, order, Nside, experiment_type)
    model = models.deepsphere(**params)

    # Cleanup before running again.
    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    model.fit(training, validation)

    error_validation = experiment_helper.model_error(model, features_validation, labels_validation)
    print('The validation error is {}%'.format(error_validation * 100), flush=True)

    error_test = experiment_helper.model_error(model, features_test, labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test
Exemple #2
0
def single_experiment(sigma, order, sigma_noise, path):
    """Run as experiment.

    Check the notebook `part_sphere.ipynb` to get more insides about this code.
    """
    Nside = 1024
    print(
        'Solve the histogram problem for sigma {}, order {}, noise {}'.format(
            sigma, order, sigma_noise),
        flush=True)
    EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma'.format(
        Nside, sigma_noise, order, sigma)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(
        sigma, order)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(
        sigma, order, sigma_noise, x_raw_std)

    if order == 4:
        augmentation = 20
    else:
        augmentation = 40

    ret = experiment_helper.data_preprossing(x_raw_train,
                                             labels_raw_train,
                                             x_raw_test,
                                             sigma_noise,
                                             feature_type='histogram',
                                             augmentation=augmentation)
    features_train, labels_train, features_validation, labels_validation, features_test = ret
    ntrain = len(features_train) // augmentation

    nsamples = list(ntrain // 12 * np.linspace(1, 6, num=6).astype(np.int))
    nsamples += list(ntrain // 2 *
                     np.linspace(1, augmentation * 2, num=40).astype(np.int))

    err_train = np.zeros(shape=[len(nsamples)])
    err_validation = np.zeros(shape=[len(nsamples)])
    err_train[:] = np.nan
    err_validation[:] = np.nan

    for i, n in enumerate(nsamples):
        print('{} Solve it for {} samples'.format(i, n), flush=True)
        err_train[i], err_validation[i], _ = experiment_helper.err_svc_linear(
            features_train[:n], labels_train[:n], features_validation,
            labels_validation)

    e_train, e_validation, C = experiment_helper.err_svc_linear(
        features_train, labels_train, features_validation, labels_validation)
    print('The validation error is {}%'.format(e_validation * 100), flush=True)

    # Cheating in favor of SVM
    e_train, e_test = experiment_helper.err_svc_linear_single(
        C, features_train, labels_train, features_test, labels_test)
    print('The test error is {}%'.format(e_test * 100), flush=True)

    np.savez(path + EXP_NAME, [nsamples, err_train, err_validation, e_test])

    return e_test
def single_experiment(sigma, order, sigma_noise, experiment_type, new,
                      n_neighbors):

    ename = '_' + experiment_type

    Nside = 1024

    if Nside == 1024:
        data_path = '/mnt/scratch/lts2/mdeff/deepsphere/data/same_psd/'
    else:
        data_path = 'data/same_psd/'

    EXP_NAME = 'cosmo' if new else 'oldgraph'
    EXP_NAME += '_{}sides_{}noise_{}order_{}sigma_{}neighbor{}_fold3'.format(
        Nside, sigma_noise, order, sigma, n_neighbors, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(
        sigma, order, data_path=data_path)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(
        sigma, order, sigma_noise, x_raw_std, data_path=data_path[:-9])

    ret = experiment_helper.data_preprossing(x_raw_train,
                                             labels_raw_train,
                                             x_raw_test,
                                             sigma_noise,
                                             feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    training = LabeledDatasetWithNoise(features_train,
                                       labels_train,
                                       end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    # Cleanup before running again.

    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    params = hyperparameters.get_params(training.N, EXP_NAME, order, Nside,
                                        experiment_type)
    model = models.deepsphere(**params, new=new, n_neighbors=n_neighbors)

    accuracy_validation, loss_validation, loss_training, t_step, t_batch = model.fit(
        training, validation)
    print("inference time: ", t_batch / params["batch_size"])

    error_validation = experiment_helper.model_error(
        model, features_validation[:, :, np.newaxis], labels_validation)
    print('The validation error is {}%'.format(error_validation * 100),
          flush=True)

    error_test = experiment_helper.model_error(model,
                                               features_test[:, :, np.newaxis],
                                               labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test, t_batch
Exemple #4
0
def single_experiment(sigma, order, sigma_noise, experiment_type):

    ename = '_'+experiment_type

    Nside = 1024

    data_path = '../../data/same_psd/'

    EXP_NAME = '40sim_{}sides_{}noise_{}order_{}sigma{}'.format(
        Nside, sigma_noise, order, sigma, ename)

    x_raw_train, labels_raw_train, x_raw_std = experiment_helper.get_training_data(sigma, order, data_path=data_path)
    x_raw_test, labels_test, _ = experiment_helper.get_testing_data(sigma, order, sigma_noise, x_raw_std, data_path=data_path[:-9])

    ret = experiment_helper.data_preprossing(x_raw_train, labels_raw_train, x_raw_test, sigma_noise, feature_type=None)
    features_train, labels_train, features_validation, labels_validation, features_test = ret

    nx = Nside//order
    nlevels = np.round(np.log2(nx)).astype(np.int)
    index = build_index(nlevels).astype(np.int)

    features_train = features_train[:, index]
    features_validation = features_validation[:, index]
    shuffle = np.random.permutation(len(features_test))
    features_test = features_test[:, index]
    features_test = features_test[shuffle]
    labels_test = labels_test[shuffle]

    training = LabeledDatasetWithNoise(features_train, labels_train, end_level=sigma_noise)
    validation = LabeledDataset(features_validation, labels_validation)

    # Better implementation, but it doesn't work for some reason.
    # params = hyperparameters.get_params_CNN2D(training.N, EXP_NAME, order, Nside, experiment_type)
    # model = Healpix2CNN(**params)

    params = get_params(training.N, EXP_NAME, order, Nside, experiment_type)
    model = models.cnn2d(**params)

    # Cleanup before running again.
    shutil.rmtree('summaries/{}/'.format(EXP_NAME), ignore_errors=True)
    shutil.rmtree('checkpoints/{}/'.format(EXP_NAME), ignore_errors=True)

    model.fit(training, validation)

    error_validation = experiment_helper.model_error(model, features_validation, labels_validation)
    print('The validation error is {}%'.format(error_validation * 100), flush=True)

    error_test = experiment_helper.model_error(model, features_test, labels_test)
    print('The testing error is {}%'.format(error_test * 100), flush=True)

    return error_test