def evaluate_circuit(n = 208, eval_perceptron = True): test_idxs = np.random.choice(data.shape[0], n, replace = False) correct_circuit = 0 accuracy_circuit = 0.0 correct_perceptron = 0 accuracy_perceptron = 0.0 #test_idxs = 208 for i in test_idxs: #print(trial(weights, p_weights, ap_weights, i, plot = False)) #print(data[i][-1]) #print() trial_result = trial(weights, p_weights, ap_weights, i, plot = False, num = 1) if trial_result[0] == data[i][-1]: correct_circuit += 1 if(eval_perceptron): #This uses multiple noisy training rows over n epochs, should we instead train the perceptron on a single noisy row n times? perceptron_training_data = data[test_idxs, :-1] perceptron_noisy_data = perceptron_training_data + np.random.rand(n, 60) classes = data[[test_idxs], [-1]] classes = classes.reshape(n, 1) perceptron_noisy_data = np.append(perceptron_noisy_data, classes, axis = 1) perceptron = p.gradient_descent(perceptron_noisy_data, 0.1, trial_result[1]) if p.predict(data[i], perceptron) == data[i][-1]: correct_perceptron += 1 #Train perceptron on noisy data for as many epochs as it took the circuit to decide #determine perceptrons accuracy #print(perceptron) accuracy_circuit = (correct_circuit / n) * 100 accuracy_perceptron = (correct_perceptron / n) * 100 print("circuit accuracy: ", accuracy_circuit) print("perceptron accuracy:", accuracy_perceptron)
if (running_accuracy < accuracy_threshold): left_off_index = i break context_retrain = True retrain_weights = np.empty((100,3)) #cmap = colors.LinearSegmentedColormap('custom', cdict) for i in range (left_off_index, test.shape[0]): context_2_timer += 1 datum = data[i] if context_retrain: #retrain a perceptron and antiperceptron p_weights = p.gradient_descent(data[left_off_index : left_off_index + 50], 0.1, 400) ap_weights = p.gradient_descent(data[left_off_index : left_off_index + 50], 0.1, 400, antiperceptron = True) circuit_steps = np.ones((1, 200)) for j in range (0, 200): #circuit_values = diffusion_predict(p_weights, ap_weights, data2[i+j,:2], data[i+j, 3]) #context_2_weights = p.gradient_descent(data[left_off_index : left_off_index + 50], 0.1, 200) #circuit_steps[0][j] = circuit_values[1] """ If we use circuit_steps as """ circuit_steps.fill(0.2) context_2_weights = gradient_descent_variable_eta(data2, circuit_steps, 400, context_1_weights, plot = True) print(context_2_weights) context_retrain = False """
train_data_0 = data[np.random.choice(97, 90, replace=False), :] train_data_1 = data[np.random.choice(np.arange(97, 208), 90, replace=False), :] train_data = np.concatenate((train_data_0, train_data_1), axis=0) np.random.shuffle(train_data) #def main(): #rng = np.random.default_rng() #train_data = rng.shuffle(data) #train_data2 = data[np.random.choice(data.shape[0], 154, replace = False), :] #Train perceptron, store weights #p_weights = p.gradient_descent(train_data, 0.005, 900) ap_weights = p.gradient_descent(train_data, 0.005, 900, antiperceptron=True) #p.gradient_descent(train_data, 0.005, 900, antiperceptron = True) print(ap_weights) #print(perceptron_accuracy(ap_weights, ap = True)) #print(p_weights) #print(perceptron_accuracy(p_weights)) #print(ap_weights) #return(ap_weights) #main() ###89.423% accuracy for perceptron """ [ 0.415 0.3567415 0.2803695 -0.3303615 0.1614965 0.280231 -0.0106485 -0.1240805 -0.346292 0.288558 -0.0213655 0.196917
def perceptronval(): for i in range(0,10): pp = p.gradient_descent(train_data, (0.2 - (0.15 * i)), 700) print(perceptron_accuracy(pp)) print(pp)
dataset = np.asarray(load_data("sonar1.all-data")) ROW_LENGTH = len(dataset[0]) COLUMNS = len(dataset) #noisy_data = p.noisyData(dataset) #perceptron = p.perceptron #antiperceptron = p.antiperceptron p_random_indices = np.random.choice(110, size=110, replace=False) p_train_data = dataset[p_random_indices, :110] ap_random_indices = np.random.choice(110, size=110, replace=False) ap_train_data = dataset[ap_random_indices, :110] #test_data = np.delete(noisy_data, random_indices, 0) p_weights = p.gradient_descent(p_train_data, 0.1, 200) ap_weights = p.gradient_descent(p_train_data, 0.1, 200, antiperceptron=True) #scores = p.evaluate_algorithm(dataset, p.perceptron, 3, .01, 500) c_weights = np.array([ [0, -.5, 0, -.5], #1->1 2->1 3->1 4->1 [-.5, 0, -.5, 0], #1->2 2->2 3->2 4->2 [2.23, 0, 6, -6], #1->3 2->3 3->3 4->3 [0, 2.23, 0, 2.5] ]) #1->4 2->4 3->4 4->4 weights = np.array([ [0, -2, 0, -2], #1->1 2->1, 3->1, 4->1 [-2, 0, -2, 0], #1->2, 2->2, 3->2, 4->2 [1.23, 0, 2.087, 0], #1->3, 2->3, 3->3, 4->3