#	plt.plot(error[i, :], label='exp '+str(i))
plt.title('Accuracy of the Ensemble')
plt.xlabel('Number of meters per model ($m^d$)')
plt.ylabel('Prediction error MAE($y, \hat y$)')
plt.legend()
plt.show()

plt.savefig('accuracy_ensemble.pgf', bbox_inches='tight')

#for i in range(models_len):
#	print('MAE exp ' + str(i) + '= ' + str(np.mean(error[i, :])))

k = 9
plt.figure(5)
plt.clf()
for i in range(models_len):
    '''	
	if i in [0, 1]:
		c = n_vec[i] - 1
	else:
		c=1
	'''
    c = 1
    pred_i = predictions[i][k].reshape(-1) * c
    mae_i = fl.MAE(fl.y_test, pred_i)

    plt.plot(pred_i, label='exp ' + str(i) + ', MAE=' + str(mae_i))
plt.plot(fl.y_test, '--', label='Test')
plt.legend()
plt.show()
Ejemplo n.º 2
0
        t0 = time.perf_counter()

        for k in range(reps):
            print('\t\tk=' + str(k))

            if strategic_attack:
                meters_model = np.load(dir_models + 'meters_' + str(k) +
                                       '.npy',
                                       allow_pickle=True)
                meters_a = random.sample(set(meters_model[0]), m_a)
            else:
                meters_a = random.sample(set(range(m)), m_a)

            fa.get_prediction = get_prediction_a
            fa.get_grad = get_grad_a

            y_test, hat_y, hat_y_a, bias_opt = fl.find_attack(
                dir_models, max_num_models, 1, meters_a, unique_bias)

            impact[k, i, j] = fl.MAE(hat_y, hat_y_a)
            pred_error[k, i, j] = fl.MAE(hat_y, y_test)

        t_f = time.perf_counter()
        print('\t***Train time: ' + str((t_f - t0) / 60.0))

type_exp += '_nominal'

dir_results = './results/'
np.save(dir_results + 'impact' + type_exp + '.npy', impact)
np.save(dir_results + 'pred_error' + type_exp + '.npy', pred_error)
Ejemplo n.º 3
0
    t0 = time.perf_counter()
    print('\t model: ' + str(i))

    for k in range(max_num_models):
        print('\tk=' + str(k))

        if i in [0, 1]:
            fa.get_prediction = get_prediction_a
            fa.get_grad = get_grad_a

        elif i in [2, 3]:
            fa.get_prediction = get_prediction_b
            fa.get_grad = get_grad_b

        y_test, hat_y = fa.get_forecast(dir_rep, k)
        predictions_i.append(hat_y)
        error[i, k] = fl.MAE(y_test, hat_y)
        #pdb.set_trace()

    all_predictions.append(predictions_i)

    t_f = time.perf_counter()
    print('\t***Train time: ' + str((t_f - t0) / 60.0))

type_exp += '_test'

dir_results = './'
np.save(dir_results + 'MAE' + type_exp + '.npy', error)
np.save(dir_results + 'predictions' + type_exp + '.npy', all_predictions)