Ejemplo n.º 1
0
 def attack_rmse(attack, num_colluders):
     num_legit_sensors = total_sensors - num_colluders
     legitimate_readings = readings_generator.readings(biases[:num_legit_sensors], variances[:num_legit_sensors],
                                                       num_times, true_value)
     return rms_error(robust_aggregate.estimate(attack(legitimate_readings,
                                                       true_value,
                                                       num_colluders,
                                                       colluder_bias),
                                                reciprocal), [true_value(t) for t in range(num_times)])
Ejemplo n.º 2
0
    def attack_rmse(attack, num_colluders):
        num_legit_sensors = total_sensors - num_colluders
        legitimate_readings = readings_generator.readings(
            biases[:num_legit_sensors], variances[:num_legit_sensors],
            num_times, true_value)
        attack_result = attack(legitimate_readings, true_value, num_colluders,
                               colluder_bias)
        est_bias = bias_estimate(attack_result)
        est_var = variance_estimate(attack_result, est_bias)

        return rms_error(mle.estimate(attack_result, est_bias, est_var),
                         [true_value(t) for t in range(num_times)])
    first_bias_est = bias_estimate(readings)
    first_variance_est = variance_estimate(readings, first_bias_est)

    weights = mle.weight_vector(first_bias_est, first_variance_est)
    return iterfilter(readings, exponential, weights)


if __name__ == "__main__":
    num_legit_sensors = 20
    biases = [0] * num_legit_sensors
    variances = [1] * num_legit_sensors
    num_times = 800
    true_value = lambda t: 0
    num_colluders = 3
    colluder_bias = 5
    legitimate_readings = readings_generator.readings(biases, variances, num_times, true_value)
    sophisticated_attack_readings = attacks.readings_sophisticated_attack(legitimate_readings,
                                                                          true_value,
                                                                          num_colluders,
                                                                          colluder_bias)
    estimates = robust_first_estimate(sophisticated_attack_readings)
    colluders, ks_results, regularised_errors = find_colluders(sophisticated_attack_readings, estimates)
    print(ks_results)

    fig = pyplot.figure()

    axes = fig.add_subplot(2, 2, 1)
    axes.set_title('Legitimate Sensor')
    axes.hist(regularised_errors[0], bins=20)
    axes.set_ylim(ymax=150)
    axes.set_xlim(xmin=-4, xmax=4)
Ejemplo n.º 4
0
        variances = [variance] * 20

        def truth(t):
            return 0

        cramer_rao += [math.sqrt(1 / sum([1 / v for v in variances]))]

        iter_recip = []
        iter_expo = []
        robust_agg_recip = []
        robust_agg_expo = []

        for i in range(repeats):
            print(i)
            readings = readings_generator.readings(biases, variances,
                                                   num_times, truth)
            #iter_recip += [rms_error(iterfilter(readings, reciprocal), [0]*num_sensors)]
            iter_expo += [
                rms_error(iterfilter(readings, exponential), [0] * num_sensors)
            ]
            #robust_agg_recip += [rms_error(estimate(readings, reciprocal), [0]*num_sensors)]
            robust_agg_expo += [
                rms_error(estimate(readings, exponential), [0] * num_sensors)
            ]

        #iter_recip_mean = bayes_mvs(iter_recip)[0]
        iter_expo_mean = bayes_mvs(iter_expo)[0]
        #robust_agg_recip_mean = bayes_mvs(robust_agg_recip)[0]
        robust_agg_expo_mean = bayes_mvs(robust_agg_expo)[0]

        #iter_recip_means += [iter_recip_mean[0]]
Ejemplo n.º 5
0
if __name__ == '__main__':
    truth = 0
    num_times = 100

    num_honest = 20
    honest_variance = 1
    honest_bias = 0
    honest_bias_variance = 0.1
    honest_variances = [honest_variance] * num_honest
    honest_biases = [
        random.gauss(honest_bias, honest_bias_variance)
        for i in range(num_honest)
    ]
    honest_readings = readings_generator.readings(honest_biases,
                                                  honest_variances, num_times,
                                                  lambda t: truth).tolist()

    num_influenced = 5
    influenced_value = 1000

    influenced_readings = num_influenced * [[truth] * (num_times - 1) +
                                            [influenced_value]]

    readings = honest_readings + influenced_readings

    estimates = robust_aggregate.estimate(readings,
                                          robust_aggregate.reciprocal)
    error = rms_error(estimates, [truth] * len(readings)) / len(readings)

    print(error)
if __name__ == '__main__':
    repeats = 1000
    variance = 1
    bias = 0
    truth = 0
    times = 10
    num_sensors = 10

    variances = [variance] * num_sensors
    biases = [bias] * num_sensors

    time_errors = [[] for t in range(times)]

    def truth_fn(t):
        return truth

    for i in range(repeats):
        print ('{}/{}'.format(i, repeats))

        readings = readings_generator.readings(biases, variances, times, truth_fn)
        estimate = robust_aggregate.estimate(readings, exponential)

        for t in range(times):
            time_errors[t] += [estimate[t]] 

    mvs = [bayes_mvs(t) for t in time_errors]

    pp.errorbar(range(times), [m[0][0] for m in mvs], yerr=[m[0][0]-m[0][1][0] for m in mvs])
    pp.show()
Ejemplo n.º 7
0
if __name__ == "__main__":
    num_sensors = 20
    num_t1_sensors = 15
    num_t2_sensors = num_sensors - num_t1_sensors
    t1_bias = 0
    t2_bias = 2
    biases = array([t1_bias] * num_t1_sensors + [t2_bias] * num_t2_sensors)
    bias_compensator = -mean(biases)
    compensated_biases = biases + bias_compensator * array([1] * len(biases))
    variances = array([1 + 0.1 * t for t in range(num_sensors)])
    true_value = lambda t: 1 + 3 * t
    num_times = 10
    num_readings_samples = 1000
    readings_samples = [
        readings(biases, variances, num_times, true_value)
        for i in range(num_readings_samples)
    ]
    bias_estimates = array([v for v in map(bias_estimate, readings_samples)])
    alpha = 0.95
    confidence_intervals = [
        stats.bayes_mvs(v, alpha) for v in bias_estimates.transpose()
    ]

    means = array([ci[0][0] for ci in confidence_intervals])
    mean_lower_bounds = means - array(
        [ci[0][1][0] for ci in confidence_intervals])
    mean_upper_bounds = array([ci[0][1][1]
                               for ci in confidence_intervals]) - means

    stddev = array([ci[2][0] for ci in confidence_intervals])
Ejemplo n.º 8
0
             [[1] * num_sensors + [0]]

    return solve(matrix, target_vector)[:num_sensors]


if __name__ == "__main__":
    num_sensors = 20
    biases = array([0.3 * sensor for sensor in range(num_sensors)])
    compensated_biases = biases - array([mean(biases)] * num_sensors)
    variances = array([(num_sensors - sensor + 1) / 2
                       for sensor in range(num_sensors)])
    num_times = 10
    true_value = lambda t: (t - num_times / 2)**4
    num_readings = 500
    reading_sampling = [
        readings_generator.readings(compensated_biases, variances, num_times,
                                    true_value) for i in range(num_readings)
    ]
    bias_estimates = array(
        [linear_solution_bias_estimate(r) for r in reading_sampling])
    variance_estimates_with_estimated_biases = array([
        linear_solution_variance_estimate(r, b)
        for r, b in zip(reading_sampling, bias_estimates)
    ])
    alpha = 0.95
    #variance_estimates[i,s] gives the estimate of sensor s in reading i.
    #variance_estimates.transpose()[s, i] gives the same.
    variance_cis = [
        stats.bayes_mvs(v_est, alpha)
        for v_est in variance_estimates_with_estimated_biases.transpose()
    ]
    v_mean_cis, v_var_cis, v_std_cis = zip(*variance_cis)
               for j in range(num_sensors)] + [1]
              for i in range(num_sensors)] + \
             [[1] * num_sensors + [0]]

    return solve(matrix, target_vector)[:num_sensors]


if __name__ == "__main__":
    num_sensors = 20
    biases = array([0.3 * sensor for sensor in range(num_sensors)])
    compensated_biases = biases - array([mean(biases)] * num_sensors)
    variances = array([(num_sensors - sensor + 1) / 2 for sensor in range(num_sensors)])
    num_times = 10
    true_value = lambda t: (t - num_times / 2) ** 4
    num_readings = 500
    reading_sampling = [readings_generator.readings(compensated_biases, variances, num_times, true_value) for i in
                        range(num_readings)]
    bias_estimates = array([linear_solution_bias_estimate(r) for r in reading_sampling])
    variance_estimates_with_estimated_biases = array(
        [linear_solution_variance_estimate(r, b) for r, b in zip(reading_sampling, bias_estimates)])
    alpha = 0.95
    #variance_estimates[i,s] gives the estimate of sensor s in reading i.
    #variance_estimates.transpose()[s, i] gives the same.
    variance_cis = [stats.bayes_mvs(v_est, alpha) for v_est in variance_estimates_with_estimated_biases.transpose()]
    v_mean_cis, v_var_cis, v_std_cis = zip(*variance_cis)
    v_mean_cntrs, v_mean_bounds = zip(*v_mean_cis)
    v_mean_lo, v_mean_hi = zip(*v_mean_bounds)
    v_mean_lo_diff = array(v_mean_cntrs) - array(v_mean_lo)
    v_mean_hi_diff = array(v_mean_hi) - array(v_mean_cntrs)

    fig = pyplot.figure()
Ejemplo n.º 10
0
        print ('variance: {}'.format(variance))

        variances = [variance] * 20
        def truth(t):
            return 0

        cramer_rao += [math.sqrt(1 / sum([1 / v for v in variances]))]

        iter_recip = []
        iter_expo = []
        robust_agg_recip = []
        robust_agg_expo = []

        for i in range(repeats):
            print (i)
            readings = readings_generator.readings(biases, variances, num_times, truth)
            #iter_recip += [rms_error(iterfilter(readings, reciprocal), [0]*num_sensors)]
            iter_expo += [rms_error(iterfilter(readings, exponential), [0]*num_sensors)]
            #robust_agg_recip += [rms_error(estimate(readings, reciprocal), [0]*num_sensors)]
            robust_agg_expo += [rms_error(estimate(readings, exponential), [0]*num_sensors)]

        #iter_recip_mean = bayes_mvs(iter_recip)[0]
        iter_expo_mean = bayes_mvs(iter_expo)[0]
        #robust_agg_recip_mean = bayes_mvs(robust_agg_recip)[0]
        robust_agg_expo_mean = bayes_mvs(robust_agg_expo)[0]

        #iter_recip_means += [iter_recip_mean[0]]
        iter_expo_means += [iter_expo_mean[0]]
        #robust_agg_recip_means += [robust_agg_recip_mean[0]]
        robust_agg_expo_means += [robust_agg_expo_mean[0]]