Esempio n. 1
0
        plt.plot(steps, error_bound)
        plt.plot(steps, diff_weight_log)
        plt.grid()
        plt.show()

    return True


# create a test dataset
w0 = np.expand_dims(np.array([1]), axis=1)
datasets = []
for n in range(50):
    datasets.append(
        DataGeneration.linear_model(n_samples=300000,
                                    feature_type='random',
                                    noise_type='gaussian',
                                    x_spread=4,
                                    w0=w0))

param_UE = {'N': 3000, 'verbose': False}

hyperparam_LMS = {
    'N': param_UE['N'],
    'mu': 10,
    'gamma': 0.001,
    'model': datasets[0].gt_model,
    'alpha_factor': 1.1
}

hyperparam_QE_upper = {
    'N': param_UE['N'],
Esempio n. 2
0
def vmc_benchmark():
    # parameters
    path = '../paper_results/output/'

    # The data used for training has been generated with the vehicle dynamics simulation and controller
    # of the TUM Roborace project. These are available in an open source version from https://github.com/TUMFTM.

    # load dataset to be used for training
    data = np.loadtxt('../input/Modena_UncertaintyEstimation.csv',
                      delimiter=',')

    # extract feature variables
    ay_target_mps2 = np.expand_dims(data[10000:250000:5, 6], axis=1)
    ay_dist_mps2 = np.expand_dims(data[10000:250000:5, 2], axis=1)

    dataset = DataGeneration.dataset(ay_target_mps2, ay_dist_mps2)

    param_UE = {'N': 500, 'verbose': False}
    hyperparam_LMS = {
        'N': param_UE['N'],
        'mu': 10,
        'gamma': 0.001,
        'model': dataset.gt_model,
        'alpha_factor': 1.2,
    }
    hyperparam_QE_upper = {
        'N': param_UE['N'],
        'q_target': 0.99,
        'lambda': 6.0,
        'beta': 0.0,
        'r0_estimate': 6.0
    }
    hyperparam_QE_lower = {
        'N': param_UE['N'],
        'q_target': 0.01,
        'lambda': 6.0,
        'beta': 0.0,
        'r0_estimate': -6
    }
    hyperparam_RUMI = {
        'lms': hyperparam_LMS,
        'qe_upper': hyperparam_QE_upper,
        'qe_lower': hyperparam_QE_lower
    }
    hyperparam_GPR = {
        'N': param_UE['N'],
        'target_percentage': 0.98,
        'length_scale': 4
    }
    hyperparam_BLR = {
        'N': param_UE['N'],
        'sigma': 2.6,
        'tau': 0.01,
        'a0': 8000,
        'n0': 4000,
        'target_percentage': 0.98,
        'model': dataset.gt_model,
    }

    rumi_estimators = []
    gp_estimators = []
    blr_estimators = []

    rumi_estimators.append(
        UncertaintyEstimator(RUMIEstimator(hyperparam_RUMI), dataset,
                             param_UE))
    rumi_estimators[-1].learn()
    # create and train GP estimators
    gp_estimators.append(
        UncertaintyEstimator(GPREstimator(hyperparam_GPR), dataset, param_UE))
    gp_estimators[-1].learn()
    # create and train BLR estimators
    blr_estimators.append(
        UncertaintyEstimator(BLREstimator(hyperparam_BLR), dataset, param_UE))
    blr_estimators[-1].learn()

    full_data = [[gp_estimators, 'GPR'], [blr_estimators, 'BLR'],
                 [rumi_estimators, 'RUMI']]
    pickle.dump(full_data, open(path + "/VMC.p", "wb"))
Esempio n. 3
0
    'q_target': 0.75,
    'lambda': 2,
    'r0_estimate': 5,
    'delta_r0': 0.2
}

# set RPI size for visualization
rpi_min = 0.5

# set seed for reproducibility
np.random.seed(0)
# create model without mean value for better visualization
w0 = np.expand_dims(np.zeros(5), axis=1)
data = DataGeneration.linear_model(n_samples=10000,
                                   feature_type='correlated',
                                   noise_type='uniform',
                                   x_spread=5,
                                   w0=w0)

# calculate empiric estimator stability and maximize upper and lower bounds
x = np.linspace(-10, 10, 801)
alpha_est_max = np.zeros_like(x)
alpha_est_min = np.ones_like(x)
alpha_est_list = []
for idx in range(0, len(data.y_data) - hyperparam_QE['N'], 10):
    alpha_est = np.zeros_like(x)
    for x_idx in np.arange(len(x)):
        alpha_est[x_idx] = np.sum(data.y_data[idx:idx + hyperparam_QE['N']] <
                                  (x[x_idx])) / hyperparam_QE['N']
    alpha_est_list.append(alpha_est)
Esempio n. 4
0
def dist_comparison():
    # parameters
    n_datasets = 20
    n_samples = 60000
    path = 'output/'

    # set disturbance models
    disturbances = ['gaussian', 'nongaussian']

    # set seed for reproducibility
    np.random.seed(0)

    # create a data sets
    datasets_all = []

    # sample ground truth parameters and normalize parameter vector
    w0 = np.expand_dims(np.array(np.random.rand(5) - 0.5), axis=1)
    w0 = w0 / np.linalg.norm(w0)

    # generate appropriate datasets
    for disturbance_type in disturbances:
        dataset_batch = []
        for _ in range(n_datasets):
            dataset_batch.append(
                DataGeneration.linear_model(n_samples=n_samples,
                                            feature_type='random',
                                            noise_type=disturbance_type,
                                            x_spread=10,
                                            w0=w0))
        datasets_all.append(dataset_batch)

    # specify algorithm parameters
    param_UE = {'N': 1000, 'verbose': False}
    hyperparam_LMS = {
        'N': param_UE['N'],
        'mu': 10,
        'gamma': 0.01,
        'model': datasets_all[0][0].gt_model
    }
    hyperparam_QE_upper = {
        'N': param_UE['N'],
        'q_target': 0.9,
        'lambda': 0.5,
        'beta': 0.0,
        'r0_estimate': 2
    }
    hyperparam_QE_lower = {
        'N': param_UE['N'],
        'q_target': 0.1,
        'lambda': 0.5,
        'beta': 0.0,
        'r0_estimate': -2
    }
    hyperparam_RUMI = {
        'lms': hyperparam_LMS,
        'qe_upper': hyperparam_QE_upper,
        'qe_lower': hyperparam_QE_lower
    }
    hyperparam_GPR = {
        'N': param_UE['N'],
        'q0': 0.9,
        'target_percentage': 0.8,
        'length_scale': 3
    }
    hyperparam_BLR = {
        'N': param_UE['N'],
        'sigma': 1.41,
        'tau': 0.1,
        'a0': 8000,
        'n0': 4000,
        'target_percentage': 0.8,
        'model': datasets_all[0][0].gt_model
    }

    counter = 0
    for dataset_batch, disturbance_type in zip(datasets_all, disturbances):
        rumi_estimators = []
        gpr_estimators = []
        blr_estimators = []

        for dataset in dataset_batch:
            # create and train LMS estimator
            rumi_estimators.append(
                UncertaintyEstimator(RUMIEstimator(hyperparam_RUMI), dataset,
                                     param_UE))
            rumi_estimators[-1].learn()
            # create and train GP estimators
            gpr_estimators.append(
                UncertaintyEstimator(GPREstimator(hyperparam_GPR), dataset,
                                     param_UE))
            gpr_estimators[-1].learn()
            blr_estimators.append(
                UncertaintyEstimator(BLREstimator(hyperparam_BLR), dataset,
                                     param_UE))
            blr_estimators[-1].learn()
            counter += 1
            print('Done with ' + str(counter) + ' datasets...')

        full_data = [[gpr_estimators, 'GPR - ' + disturbance_type],
                     [blr_estimators, 'BLR - ' + disturbance_type],
                     [rumi_estimators, 'RUMI - ' + disturbance_type]]
        pickle.dump(
            full_data,
            open(path + "disturbance_variation_" + disturbance_type + ".p",
                 "wb"))