def evaluate(A, C): # accelerator results acc = A.predict(X) # classification by C and truly cls_c = [1 if v[1] > v[0] else 0 for v in C.predict(X)] cls_t = [accept(Y[i], acc[i]) for i in range(N)] # to bool # relatvie error for all test data re = [error.relative_error(Y[i], acc[i]) for i in range(N)] re_c = [error.relative_error(Y[i], acc[i]) for i in range(N) if cls_c[i]] # accuracy of C, recall of C accuracy_of_C = sum([1.0 if cls_t[i] == cls_c[i] else 0 for i in range(N)]) / float(1e-10 + N) recall_of_C = sum([1.0 if cls_t[i] and cls_c[i] else 0 for i in range(N)]) / float(1e-10 + sum([1 if v else 0 for v in cls_t])) # invocation of C, invocation truly invocation_of_C = float(sum([1 if v else 0 for v in cls_c])) / float(1e-10 + N) invocation_truly = float(sum([1 if v else 0 for v in cls_t])) / float(1e-10 + N) # re of A, re of A with C mean_relative_error_of_A = sum(re) / float(1e-10 + len(re)) mean_relative_error_of_A_with_C = sum(re_c) / (1e-10 + len(re_c)) return { 'accuracy_of_C': accuracy_of_C, 'recall_of_C': recall_of_C, 'invocation_of_C': invocation_of_C, 'invocation_truly': invocation_truly, 'mean_relative_error_of_A': mean_relative_error_of_A, 'mean_relative_error_of_A_with_C': mean_relative_error_of_A_with_C }
def main(benchmark, weights_file): X0, Y0, X1, Y1 = load_data(benchmark) A = AcceleratorModel([1, 4, 4, 2]) A.load_weights(weights_file) Y2 = A.predict(X1) print A.evaluate(X1, Y1) re = [error.relative_error(Y1[i], Y2[i]) for i in xrange(len(X1))] count = [0] * 101 for v in re: count[int(math.floor(v * 100))] += 1 s = sum(count) count = [float(v) / s for v in count] for i in range(101): print i, count[i] for i in xrange(len(X1)): print Y1[i], Y2[i], error.relative_error(Y1[i], Y2[i])
def main(benchmark, weights_file): X0, Y0, X1, Y1 = load_data(benchmark) A = AcceleratorModel([6, 8, 8, 1]) #Change this to the model by the network A.load_weights(weights_file) Y2 = A.predict(X1) print(A.evaluate(X1, Y1)) re = [error.relative_error(Y1[i], Y2[i]) for i in range(len(X1))] count = [0] * 101 for v in re: count[int(math.floor(v * 100))] += 1 s = sum(count) count = [float(v) / s for v in count] for i in range(101): print(i, count[i]) for i in range(len(X1)): print(Y1[i], Y2[i], error.relative_error(Y1[i], Y2[i]))
def train_origin(A, X0, Y0, X1, Y1, epoch, batch_size, output_name): A.fit(X0, Y0, nb_epoch=epoch, batch_size=batch_size) acc = A.predict(X1) result_num = [0 for i in range(100)] result_sum = 0 for i in range(len(X1)): rate = 0.01 tmp_re = error.relative_error(Y1[i], acc[i]) result_sum += tmp_re for j in range(100): if tmp_re <= rate: result_num[j] += 1 rate += 0.01 result_mre = result_sum / float(len(X1)) result_re_list = [(100.0 * result_num[x] / float(len(X1))) for x in range(len(result_num))] print result_mre print result_re_list f_results = open('../results/origin/{}.csv'.format(output_name), 'w') f_results.write('mre,{}\n'.format(result_mre)) f_results.write('re_bound,re_percent\n') for i in range(100): f_results.write('{}%,{:.3f}%\n'.format(i + 1, result_re_list[i])) f_results.flush() f_results.close() A.save_weights('../weights/origin/{}.weights'.format(output_name), overwrite=True) keras_to_fann(A, 'fann_sample.nn', '../fann_model/{}.nn'.format(output_name)) '''
def accept(v0, v1): return error.relative_error( v0, v1) <= re_bound if eb_type == 1 else error.absolute_error( v0, v1) <= re_bound
def accept(v0, v1): return error.relative_error(v0, v1) <= re_bound