def experiment1(training_data, validation_data, all_data): #training_data,validation_data,all_data = load_test_data1() # network initialization net = ANFIS([4, 1]) net.initialization(training_data) init_net = copy.deepcopy(net) # train the net evaluation_cost, evaluation_accuracy, \ training_cost, training_accuracy =net.stochastic_gradient_descent(training_data, validation_data, eta = 0.1, mini_batch_size=50, epoch = 25, adapt_eta_mode = True, evaluation_track_flag=True, training_track_flag=True) return evaluation_cost, evaluation_accuracy, \ training_cost, training_accuracy, net, init_net
data = np.zeros((N - 500 - T - (D - 1) * T, D)) lbls = np.zeros((N - 500 - T - (D - 1) * T,)) for t in range((D - 1) * T, N - 500 - T): data[t - (D - 1) * T, :] = [mg_series[t - 3 * T], mg_series[t - 2 * T], mg_series[t - T], mg_series[t]] lbls[t - (D - 1) * T] = mg_series[t + T] trnData = data[:lbls.size - round(lbls.size * 0.3), :] trnLbls = lbls[:lbls.size - round(lbls.size * 0.3)] chkData = data[lbls.size - round(lbls.size * 0.3):, :] chkLbls = lbls[lbls.size - round(lbls.size * 0.3):] # ANFIS params and Tensorflow graph initialization m = 16 # number of rules alpha = 0.01 # learning rate fis = ANFIS(n_inputs=D, n_rules=m, learning_rate=alpha) # Training num_epochs = 100 # Initialize session to make computations on the Tensorflow graph with tf.Session() as sess: # Initialize model parameters sess.run(fis.init_variables) trn_costs = [] val_costs = [] time_start = time.time() for epoch in range(num_epochs): # Run an update step trn_loss, trn_pred = fis.train(sess, trnData, trnLbls) # Evaluate on validation set
print( "\n" + "Consequent Parameters of All 3 Actions' Linguistic Labels" + "\n", conseqParams) # Print the first-order Sugeno model's coefficients for all 81 rules coeffParams = pd.DataFrame(np.reshape(list(trn_C.values()), (81, 5)), columns=[ "Seniority", "Purchase_Propensity", "Company_Size", "Contactable", "Bias" ]) print("\n" + "Coefficients of All 81 Rules" + "\n", coeffParams) # ANFIS params and Tensorflow graph initialization n = 4 # number of inputs f = 3 # number of linguistic labels per input variable m = f**n # number of rules = 81 alpha = 0.0007 # learning rate tf.reset_default_graph() fis = ANFIS(n_inputs=n, n_rules=m, n_fuzzy=f, learning_rate=alpha) # Print model summary model_summary() # Get data trn, chk, trnLbls, chkLbls, bias = train_testData("Data_combined.csv", n) # Train & plot the ANFIS model trainingNplotting(fis, trn, trnLbls, chk, chkLbls, bias, n, f)
for t in range((D - 1) * T, N - 500 - T): data[t - (D - 1) * T, :] = [ mg_series[t - 3 * T], mg_series[t - 2 * T], mg_series[t - T], mg_series[t] ] lbls[t - (D - 1) * T] = mg_series[t + T] trnData = data[:lbls.size - round(lbls.size * 0.3), :] trnLbls = lbls[:lbls.size - round(lbls.size * 0.3)] chkData = data[lbls.size - round(lbls.size * 0.3):, :] chkLbls = lbls[lbls.size - round(lbls.size * 0.3):] # ANFIS params and Tensorflow graph initialization m = 16 # number of rules alpha = 0.01 # learning rate fis = ANFIS(n_inputs=D, n_rules=m, learning_rate=alpha) # Training num_epochs = 100 # Initialize session to make computations on the Tensorflow graph with tf.Session() as sess: # Initialize model parameters sess.run(fis.init_variables) trn_costs = [] val_costs = [] time_start = time.time() for epoch in range(num_epochs): # Run an update step trn_loss, trn_pred = fis.train(sess, trnData, trnLbls) # Evaluate on validation set
def main(): number_of_rules = sys.argv[1] learning_rate = float(sys.argv[2]) type_of_backpropagation = sys.argv[3] input_variables = generate_input_variables() system = ANFIS(number_of_rules) index = 0 error = float('inf') draw_function() draw_learning_graphs() draw_function_graphs() return if type_of_backpropagation == 'stohastic': f1 = open( "./errors/functions/stohastic_function" + str(number_of_rules) + "." + str(learning_rate) + ".txt", "w+") f2 = open( "./errors/functions/stohastic_error" + str(number_of_rules) + "." + str(learning_rate) + ".txt", "w+") for i in range(1, 100001): print("Iteration " + str(i)) point = (input_variables[index][0], input_variables[index][1]) function_value = function.value(point) temp_error = system.calc_error(function, input_variables) if (temp_error < pow(10, -5)): break error = temp_error print("Error " + str(error)) system.value(point) for j in range(0, system.get_number_of_rules()): current_rule = system.get_rule(j) first_antecedent = copy.deepcopy( current_rule.get_first_antecedent()) second_antecedent = copy.deepcopy( current_rule.get_second_antecedent()) consequence = copy.deepcopy(current_rule.get_consequence()) # p derivation consequence.set_p( consequence.get_p() - learning_rate * p_derivative(point, system, function_value, current_rule)) # q derivation consequence.set_q( consequence.get_q() - learning_rate * q_derivative(point, system, function_value, current_rule)) # r derivation consequence.set_r( consequence.get_r() - learning_rate * r_derivative(point, system, function_value, current_rule)) # ax derivation first_antecedent.set_a( first_antecedent.get_a() - learning_rate * a_derivative(point, system, function_value, current_rule, j, first_antecedent.get_b(), first_antecedent.value(point[0]), second_antecedent.value(point[1]))) # bx derivation first_antecedent.set_b( first_antecedent.get_b() - learning_rate * b_derivative(point, system, function_value, current_rule, j, first_antecedent.get_a(), point[0], first_antecedent.value(point[0]), second_antecedent.value(point[1]))) # ay second_antecedent.set_a( second_antecedent.get_a() - learning_rate * a_derivative(point, system, function_value, current_rule, j, second_antecedent.get_b(), second_antecedent.value(point[1]), first_antecedent.value(point[0]))) # by second_antecedent.set_b( second_antecedent.get_b() - learning_rate * b_derivative(point, system, function_value, current_rule, j, second_antecedent.get_a(), point[1], second_antecedent.value(point[1]), first_antecedent.value(point[0]))) system.set_rule( j, Rule(first_antecedent, second_antecedent, consequence)) index += 1 if (index > 80): index = 0 else: f1 = open( "./errors/functions/batch_function" + str(number_of_rules) + "." + str(learning_rate) + ".txt", "w+") f2 = open( "./errors/functions/batch_error" + str(number_of_rules) + "." + str(learning_rate) + ".txt", "w+") for epoch in range(0, 5000): print("Epoch " + str(epoch)) temp_error = system.calc_error(function, input_variables) if (temp_error < pow(10, -5)): break error = temp_error print("Error " + str(error)) weight_errors = np.zeros((int(number_of_rules), 7)) for y in range(0, len(input_variables)): point = (input_variables[y][0], input_variables[y][1]) function_value = function.value(point) system.value(point) for j in range(0, system.get_number_of_rules()): current_rule = system.get_rule(j) first_antecedent = copy.deepcopy( current_rule.get_first_antecedent()) second_antecedent = copy.deepcopy( current_rule.get_second_antecedent()) consequence = copy.deepcopy(current_rule.get_consequence()) weight_errors[j][0] += p_derivative( point, system, function_value, current_rule) weight_errors[j][1] += q_derivative( point, system, function_value, current_rule) weight_errors[j][2] += r_derivative( point, system, function_value, current_rule) weight_errors[j][3] += a_derivative( point, system, function_value, current_rule, j, first_antecedent.get_b(), first_antecedent.value(point[0]), second_antecedent.value(point[1])) weight_errors[j][4] += b_derivative( point, system, function_value, current_rule, j, first_antecedent.get_a(), point[0], first_antecedent.value(point[0]), second_antecedent.value(point[1])) weight_errors[j][5] += a_derivative( point, system, function_value, current_rule, j, second_antecedent.get_b(), second_antecedent.value(point[1]), first_antecedent.value(point[0])) weight_errors[j][6] += b_derivative( point, system, function_value, current_rule, j, second_antecedent.get_a(), point[1], second_antecedent.value(point[1]), first_antecedent.value(point[0])) for j in range(0, system.get_number_of_rules()): current_rule = system.get_rule(j) first_antecedent = copy.deepcopy( current_rule.get_first_antecedent()) second_antecedent = copy.deepcopy( current_rule.get_second_antecedent()) consequence = copy.deepcopy(current_rule.get_consequence()) consequence.set_p(consequence.get_p() - learning_rate * weight_errors[j][0]) consequence.set_q(consequence.get_q() - learning_rate * weight_errors[j][1]) consequence.set_r(consequence.get_r() - learning_rate * weight_errors[j][2]) first_antecedent.set_a(first_antecedent.get_a() - learning_rate * weight_errors[j][3]) first_antecedent.set_b(first_antecedent.get_b() - learning_rate * weight_errors[j][4]) second_antecedent.set_a(second_antecedent.get_a() - learning_rate * weight_errors[j][5]) second_antecedent.set_b(second_antecedent.get_b() - learning_rate * weight_errors[j][6]) system.set_rule( j, Rule(first_antecedent, second_antecedent, consequence)) f1.truncate(0) f2.truncate(0) b = np.arange(-4, 5, 1) d = np.arange(-4, 5, 1) for x in range(-4, 5): for y in range(-4, 5): f1.write(str(system.value((x, y))) + "\n") f2.write(str(system.value((x, y)) - function.value((x, y))) + "\n") f1.close() f2.close()
T = 1 # delay N = 2000 # Number of points to generate mg_series = mackey(N)[499:] # Use last 1500 points data = np.zeros((N - 500 - T - (D - 1) * T, D)) lbls = np.zeros((N - 500 - T - (D - 1) * T, )) for t in range((D - 1) * T, N - 500 - T): data[t - (D - 1) * T, :] = [ mg_series[t - 3 * T], mg_series[t - 2 * T], mg_series[t - T], mg_series[t] ] lbls[t - (D - 1) * T] = mg_series[t + T] # Creates the inference system m = 16 # number of rules fis = ANFIS(D, m) n_params = 2 * (m * D) + m # Total number of parameters (genome size) # Evaluates the objective function def eval_objective(params): # From the parameter vector (genome) gets each set of parameters (means, standard deviations and sequent singletons) mus = params[0:fis.m * fis.n] sigmas = params[fis.m * fis.n:2 * fis.m * fis.n] y = params[2 * fis.m * fis.n:] # Sets the FIS parameters to the ones on the genome fis.setmfs(mus, sigmas, y) pred = fis.infer(data) loss = 1 - nse(pred, lbls) return loss