def estimate(readings, discriminant):
    first_bias_est = bias_estimate(readings)
    first_variance_est = variance_estimate(readings, first_bias_est)
    first_estimate = mle.estimate(readings, first_bias_est, first_variance_est)

    weights = mle.weight_vector(first_bias_est, first_variance_est)
    filtering_result = iterfilter(readings, discriminant, weights)
    
    trustworthy_readings = collusion_detection.partition(readings, filtering_result)

    second_bias_est = bias_estimate(trustworthy_readings)
    second_variance_est = variance_estimate(trustworthy_readings, second_bias_est)
    final_estimate = mle.estimate(trustworthy_readings, second_bias_est, second_variance_est)

    return final_estimate
def estimate(readings, discriminant):
    first_bias_est = bias_estimate(readings)
    first_variance_est = variance_estimate(readings, first_bias_est)
    first_estimate = mle.estimate(readings, first_bias_est, first_variance_est)

    weights = mle.weight_vector(first_bias_est, first_variance_est)
    filtering_result = iterfilter(readings, discriminant, weights)

    trustworthy_readings = collusion_detection.partition(
        readings, filtering_result)

    second_bias_est = bias_estimate(trustworthy_readings)
    second_variance_est = variance_estimate(trustworthy_readings,
                                            second_bias_est)
    final_estimate = mle.estimate(trustworthy_readings, second_bias_est,
                                  second_variance_est)

    return final_estimate
    def attack_rmse(attack, num_colluders):
        num_legit_sensors = total_sensors - num_colluders
        legitimate_readings = readings_generator.readings(
            biases[:num_legit_sensors], variances[:num_legit_sensors],
            num_times, true_value)
        attack_result = attack(legitimate_readings, true_value, num_colluders,
                               colluder_bias)
        est_bias = bias_estimate(attack_result)
        est_var = variance_estimate(attack_result, est_bias)

        return rms_error(mle.estimate(attack_result, est_bias, est_var),
                         [true_value(t) for t in range(num_times)])
Exemple #4
0
def readings_ks_attack(legitimate_readings,
                       true_value,
                       num_colluders, colluder_bias):
    """
        readings[s,t] is reading of sensor s at time t

    """

    def estimate(readings):
        return robust_aggregate.estimate(readings, robust_aggregate.reciprocal)

    b = colluder_bias #/ (num_colluders - 1)
    colluder_value = [v + b + _noise() for v in estimate(legitimate_readings)]
    readings_with_colluders = vstack((array(legitimate_readings), array(colluder_value)))
    for i in range(num_colluders - 2):
        colluder_value = [v + b + _noise() for v in estimate(readings_with_colluders)]
        readings_with_colluders = vstack((array(readings_with_colluders), array(colluder_value)))
    colluder_bias_estimate = bias_estimate(readings_with_colluders)
    colluder_variance_estimate = variance_estimate(readings_with_colluders, colluder_bias_estimate)
    mle_estimate = mle.estimate(readings_with_colluders, colluder_bias_estimate, colluder_variance_estimate)
    final_colluder_value = [v + _noise() for v in mle_estimate]
    return vstack((array(readings_with_colluders), array(final_colluder_value)))
Exemple #5
0
def em(X,
       n_component=3,
       w0=None,
       g0=None,
       m0=None,
       s0=None,
       estimator=lambda X, g: mle.estimate(X, g, name='standard'),
       max_iter=100,
       tol=1e-4,
       suppress=0.,
       local=None):
    n = len(X)
    w = np.array(w0)
    g = np.array(g0)
    m = np.array(m0)
    s = np.array(s0)
    q = np.zeros((n_component, n))
    F_opt = -np.inf
    for iter in range(max_iter):
        w0, g0, m0, s0 = w, g, m, s
        q = w[:, np.newaxis] * lognorm.pdf(X,
                                           s[:, np.newaxis],
                                           loc=g[:, np.newaxis],
                                           scale=np.exp(m)[:, np.newaxis])
        z = q.sum(axis=0)
        idx = z > 0
        q[:, idx] /= z[np.newaxis, idx]
        w = q.sum(axis=1)
        w /= np.sum(w)
        idx = w != 0
        w = w[idx]
        g = g[idx]
        m = m[idx]
        s = s[idx]
        q = q[idx, :]
        n_component = sum(idx)
        for j in range(n_component):
            z = q[j, :]
            #z[z < suppress] = 0
            gt, mt, st, r = estimator(X[z > suppress], z[z > suppress])
            if r:
                if local is not None:
                    step = lambda alpha: -mle.likelihood_log(
                        g[j] + alpha * (gt - g[j]), m[j] + alpha *
                        (mt - m[j]), s[j] + alpha *
                        (st - s[j]), X[z > suppress], z[z > suppress])
                    o = opt.minimize_scalar(step,
                                            method='bounded',
                                            bounds=[0, 1])
                    alpha = o.x
                    gt, mt, st = g[j] + alpha * (gt - g[j]), m[j] + alpha * (
                        mt - m[j]), s[j] + alpha * (st - s[j])
                if mle.likelihood_log(gt, mt, st, X[z > suppress], z[z > suppress]) < \
                mle.likelihood_log(g[j], m[j], s[j], X[z > suppress], z[z > suppress]):
                    gt, mt, st = g[j], m[j], s[j]
                g[j] = gt
                m[j] = mt
                s[j] = st
        F = F_opt
        F_opt = likelihood_log(w, g, m, s, X)
        if 0 <= F_opt - F < tol:
            w0, g0, m0, s0 = w, g, m, s
            break
        if F_opt - F < 0:
            break
    return w0, g0, m0, s0, F_opt - F
    variance = 1
    bias = 0
    truth = 0
    times = 10
    num_sensors = 10

    variances = [variance] * num_sensors
    biases = [bias] * num_sensors

    iter_rms_errors = []
    mle_rms_errors = []

    def truth_fn(t):
        return truth

    for i in range(repeats):
        print ('{}/{}'.format(i, repeats))

        readings = readings_generator.readings(biases, variances, times, truth_fn)
        estimate = robust_aggregate.estimate(readings, exponential)
        iter_rms_errors += [rms_error(estimate, [0]*num_sensors)]
   
        mle_estiamte = mle.estimate(readings, variances, biases) 
        mle_rms_errors += [rms_error(mle_estiamte, [0]*num_sensors)]

    iter_mvs = bayes_mvs(iter_rms_errors)
    mle_mvs = bayes_mvs(mle_rms_errors)

    pp.bar([0, 1], [iter_mvs[0][0], mle_mvs[0][0]], yerr=[iter_mvs[0][0]-iter_mvs[0][1][0], mle_mvs[0][0]-mle_mvs[0][1][0]])
    pp.show()
Exemple #7
0
    variances = [variance] * num_sensors
    biases = [bias] * num_sensors

    iter_rms_errors = []
    mle_rms_errors = []

    def truth_fn(t):
        return truth

    for i in range(repeats):
        print('{}/{}'.format(i, repeats))

        readings = readings_generator.readings(biases, variances, times,
                                               truth_fn)
        estimate = robust_aggregate.estimate(readings, exponential)
        iter_rms_errors += [rms_error(estimate, [0] * num_sensors)]

        mle_estiamte = mle.estimate(readings, variances, biases)
        mle_rms_errors += [rms_error(mle_estiamte, [0] * num_sensors)]

    iter_mvs = bayes_mvs(iter_rms_errors)
    mle_mvs = bayes_mvs(mle_rms_errors)

    pp.bar([0, 1], [iter_mvs[0][0], mle_mvs[0][0]],
           yerr=[
               iter_mvs[0][0] - iter_mvs[0][1][0],
               mle_mvs[0][0] - mle_mvs[0][1][0]
           ])
    pp.show()