def create_menu_bar(self, root): self.file_menu.add_command(label = "Open audio file", command = self.open_audio_file) self.menu_bar.add_cascade(label="File", menu=self.file_menu) self.ds_menu.add_command(label = "Generate graphics", command = lambda: spectogram.create_data_set_graphs()) self.ds_menu.add_command(label = "Graphics augmentation", command = lambda: ImageTransform.gen_dataset_augmens()) self.menu_bar.add_cascade(label = "Data-Set", menu=self.ds_menu) self.nn_menu.add_command(label = "Train", command = lambda: NeuralNetwork.create_and_train_nn()) self.nn_menu.add_command(label = "Load last model weights", command=lambda : NeuralNetwork.load_model_weights()) self.menu_bar.add_cascade(label = "Neural Network", menu=self.nn_menu)
def main(): learning_rate = 0.6 beta = 0.7 inner_size = 128 topology = [INPUT_SIZE, inner_size, OUTPUT_SIZE] activation = lambda x: sigmoid(x, beta) activation_diff = lambda x: sigmoid_diff(x, beta) network = NeuralNetwork(topology, activation, activation_diff) for i in xrange(200): network.training_session(DATA, 100, learning_rate) network.test_effectiveness(DATA, 200)
def main(): network = NeuralNetwork([ LinearLayer(4, 8), ActivationLayer('sigmoid'), LinearLayer(8, 4), ActivationLayer('sigmoid'), LinearLayer(4, 2), ActivationLayer('tanh'), ]) solver = SGDSolver(network, eta=0.1) X = [[1, 2, 3, 4]] * 10000 Y = [[0.23148, -1]] * 10000 print network.predict([1, 2, 3, 4]) solver.solve(X, Y) print network.predict([1, 2, 3, 4])
def __init__(self): # Deniz: moved parameters to params.py self.time = 0 self.neuralNetwork = NeuralNetwork(params.POS_NEURONS, params.POS_RANGE, params.VEL_NEURONS, params.VEL_RANGE, params.NB_OUTPUTS, params.ETA, params.GAMMA, params.LAMBDA) # store last take action, in order to reinforce eligibility trace self.action_index = None
class Processor: def __init__(self, parent): self.steps = ( self._step_grayscale, self._step_blur, self._step_tresholding, self._step_line_detection, self._step_boxing, self._step_features_extraction, ) self.image = None def open(self, path): self.current_step = 0 self.image = Image() self.image.load(path) self.network = NeuralNetwork('default') return self.image def is_done(self): if self.image is not None and self.current_step < len(self.steps): return False return True def step_process(self): step_function = self.steps[self.current_step] print 'step process: {0}'.format(step_function.__name__[len('_step_'):]) step_function() self.current_step += 1 def _step_grayscale(self): self.image.apply_grayscale() def _step_blur(self): self.image.apply_gaussian_blur(3, 3) def _step_tresholding(self): self.image.apply_thresholding() def _step_line_detection(self): pass #self.image._detect_lines() def _step_boxing(self): pass #self.image.apply_boxing() def _step_features_extraction(self): # for every character found for character in (self.image, ): projections = character.apply_projections() results = self.network.recognize(projections) print results
def main(): trains, chs = read_trains() n_net = NeuralNetwork() if os.path.exists(TRAINED_FPATH): n_net.load_trained_file(TRAINED_FPATH) else: n_net.set_trains(trains) train(n_net, DESIRED_ACCURACY) n_net.save_trained_file(TRAINED_FPATH) test_inputs(n_net, chs)
def _right_interface(self, y): index_y = (self.frame_height - y) // self.spaces if index_y == 1: self.network = NeuralNetwork(self.network_tuple, epochs=self.epochs, save=self.save_net, scale=self.frame_height, max_error=self.max_net_error) self.training = self.training_frames # use less features per zone during training self.feature_params['maxCorners'] //= self.training_corners_mod self.app_params['tracks_number'] //= self.training_corners_mod elif index_y == 2: self.distance = 0
def create_result(self, root): frame_result = Frame(root) frame_result.pack(fill=BOTH) l_result = Label(frame_result, text="Recognized sound:") l_result.pack(pady=10, padx=5, side=LEFT) self.t_result_str_var.set("Output is in console..") t_result = Label(frame_result, height=1, width=20, textvariable=self.t_result_str_var, bg="white") t_result.pack(pady=10, padx=5, side=LEFT) b_predict = Button(frame_result, text='Recognize', command= lambda: NeuralNetwork.predict_results()) b_predict.pack(pady=10, padx=5, side=LEFT) b_details = Button(frame_result, text='Details') b_details.pack(pady=10, padx=5, side=RIGHT)
def __init__(self, **kwargs): super(TrainingResult, self).__init__(**kwargs) contents = Builder.load_string(kv) self.add_widget(contents) self.ids = contents.ids # Bind the network self.network = NeuralNetwork(App.get_running_app()) # Sample content # grid = self.ids.result_grid # for j in range(157): # # First column: Actual image # grid.add_widget(Image(source='face1.png')) # # # Second column: Network's image reconstruction # grid.add_widget(Image(source='face1.png')) # # # Third column: Network Representation # grid.add_widget(Label(text='mad', size_hint=(.5, .5))) # # # Fourth column: Actual representation # grid.add_widget(Label(text='happy', size_hint=(.5, .5))) # # # Fifth column: correct/incorrect # grid.add_widget(Label(text='1', size_hint=(.5, .5))) # Bind the buttons def back_pressed(instance): self.pause_training(instance) App.get_running_app().go_back() self.ids.back_button.bind(on_press=back_pressed) # Start the training thread when the screen is displayed self.bind(on_enter=self.start_training)
from data_processor import DataProcessor from neural_network import NeuralNetwork if __name__ == '__main__': data_processor = DataProcessor() x_train, y_train = data_processor.get_train_set() x_test, y_test = data_processor.get_test_set() input_nodes = 2 hidden_nodes = 3 output_nodes = 1 network = NeuralNetwork(input_nodes=input_nodes, hidden_nodes=hidden_nodes, output_nodes=output_nodes, lr=0.01) network.train(x_train, y_train) score = network.evaluate(x_test, y_test) print(score) x = np.array([[1, 1], [10, 10], [100, 100], [2000, 1000]], dtype=float) y = network.predict(x) print(x) print(y)
# -*- coding: utf-8 -*- from flask import Flask, render_template, request, jsonify from neural_network import NeuralNetwork import numpy as np app = Flask(__name__) nn = NeuralNetwork(input_size=784, hidden_size=324, output_size=10) nn.load("../data/neural_network.npz") @app.route("/") def index(): return render_template("index.html") @app.route("/estimate", methods = ["POST"]) def estimate(): try: x = np.array(request.json["input"]) / 255.0 y = int(nn.predicate(x)) return jsonify({"estimated": y}) except Exception as e: print(e) return jsonify({"error": e}) if __name__ == '__main__': app.run(host='0.0.0.0')
train_X_pca[i] = flda.transform(train_X[i]) test_X_pca = np.zeros((len(test_X), n_components)) for i in range(len(test_X)): test_X_pca[i] = flda.transform(test_X[i]) print "train_X_pca.shape", train_X_pca.shape ############################################################################### # Train a neuralnet classification model print("\nFitting the neural net to the training set") nnet = NeuralNetwork(lr=0.1, sizes=[50, 25, 10], seed=1234, n_epochs=50) nnet.initialize(n_components, len(animal_labels), classes_mapping=animal_labels) i_iteration, i_loss, lr = nnet.train(train_X_pca, train_y) ############################################################################### # Quantitative evaluation of the prediction accuracy on the test set print("\nPredicting classes on the test set") train_y_pred, train_y_prob = nnet.predict(train_X_pca) test_y_pred, test_y_prob = nnet.predict(test_X_pca) accuracy = 0.0 for i in range(len(train_y)): if train_y[i] == train_y_pred[i]:
from data_preprocessor import DataPreprocessor from neural_network import NeuralNetwork import numpy as np from tkinter.filedialog import askopenfilename from tkinter import * import operator import matplotlib.pyplot as plt if __name__ == "__main__": dataset = DataPreprocessor().gather() Tk().withdraw() weight_file = askopenfilename() nn = NeuralNetwork(784, 64, 10, weight_file) if (weight_file == ""): for i in range(1000): normalize_image = dataset["train_images"][i] / 255 nn.train(normalize_image, dataset["train_labels"][i]) newimg = plt.imread("E:\\Projects\\ANN\MNIST_Classifire\\testing\\9.png") normalize_image = newimg[:, :, 1] guess = nn.predict(normalize_image) index, value = max(enumerate(guess), key=operator.itemgetter(1)) lable = "" if (index == 0): lable = "Zero" elif (index == 1): lable = "One" elif (index == 2): lable = "Two" elif (index == 3): lable = "Three"
def trainVision(self): net = NeuralNetwork("vision") net.makeNetwork(1024,25,2) if raw_input("Continue Previous Training? (y/n): ") == "y": print "loading weights..." net.loadWeights("weights/vision_2") else: print "initializing random weights..." net.initializeRandomWeights() print "generating input activations..." net.genInputActivations() try: print "training..." net.train() except KeyboardInterrupt: net.saveWeights() print "weights saved"
def open(self, path): self.current_step = 0 self.image = Image() self.image.load(path) self.network = NeuralNetwork('default') return self.image
def create_neural_network(): """ Creates a new NeuralNetwork object with the given parameters, saving the serailized object to disk and creating a metadata entry in the db. The NeuralNetwork object is then loaded into global cache for further operations. :return: """ # TODO: This method should require a token or other authentication resp = {'status': APIResponseStatus.OK.value} status_code = 200 required_args = {"name", "input", "hidden", "output", "learningrate"} update_request = 'update' in flask.request.url_rule.rule db_record_exists = perceptron_db.record_exists( {"network_id": flask.request.args['name']}) if required_args.intersection(set(flask.request.args)) == required_args: if db_record_exists and not update_request: return { 'status': APIResponseStatus.ERROR_DUPLICATE_ENTRY.value }, 400 elif not db_record_exists and update_request: update_request = False # treat as a new network to create if no record currently exists try: # create new network class object with provided params new_neural_network = NeuralNetwork( int(flask.request.args['input']), int(flask.request.args['hidden']), int(flask.request.args['output']), float(flask.request.args['learningrate'])) # setup metadata entry for tracking serialized file location via the db network_storage_document = { "network_id": flask.request.args['name'], "saved_data": "{0}/{1}_{2}_perceptron_network.bin".format( config_data['trained_networks_directory'], uuid.uuid1(), flask.request.args['name']) } # load the newly generated network into the global cache for fast access (i.e. for training operations) perceptron_cache.add( network_storage_document['network_id'], (new_neural_network, network_storage_document['saved_data'])) # create a snapshot of the initialized class object and serialize it to the location given in the metadata io_helper.save_pretrained_network( new_neural_network, network_storage_document['saved_data']) # save metadata entry to db if update_request: # cleanup the old serialized object from disk, then replace its metadata entry as well os.remove( perceptron_db.read_document( {"network_id": flask.request.args['name']})["saved_data"]) db_update_count = perceptron_db.update_document( {"network_id": flask.request.args['name']}, network_storage_document) resp.update({'documents_updated': db_update_count}) else: db_storage_id = perceptron_db.write_document( network_storage_document) resp.update({'document_id': str(db_storage_id)}) except ValueError: resp.update({'status': APIResponseStatus.VALUE_ERROR.value}) status_code = 400 else: resp.update({'status': APIResponseStatus.NO_ID_ERROR.value}) status_code = 400 return resp, status_code
def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue( np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) # Training the network import sys #################### ### Set the hyperparameters in you myanswers.py file ### #################### from neural_network import iterations, learning_rate, hidden_nodes, output_nodes N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train': [], 'validation': []} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE( network.run(train_features).T, train_targets['cnt'].values) val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii / float(iterations)) +
def __init__(self): self.and_nn = NeuralNetwork([2, 1]) self.layer = self.and_nn.getLayer(0) self.layer[:, :] = 0.0 self.layer += torch.DoubleTensor([[10], [10], [-15]])
def __init__(self): self.not_nn = NeuralNetwork([1, 1]) self.layer = self.not_nn.getLayer(0) self.layer[:, :] = 0.0 self.layer += torch.DoubleTensor([[-20], [10]])
for i in range(num_cases): if expected_outputs[i] >= 0.5: positives.append(inputs[i]) else: negatives.append(inputs[i]) positives_array = np.zeros((2, len(positives))) negatives_array = np.zeros((2, len(negatives))) for i in range(len(positives)): positives_array[0, i] = positives[i][0, 0] positives_array[1, i] = positives[i][1, 0] for i in range(len(negatives)): negatives_array[0, i] = negatives[i][0, 0] negatives_array[1, i] = negatives[i][1, 0] # Creating and training the neural network neural_network = NeuralNetwork(2, 10, 1, 6.0) costs = np.zeros(num_epochs) for i in range(num_epochs): neural_network.back_propagation(inputs, expected_outputs) costs[i] = neural_network.compute_cost(inputs, expected_outputs) print('epoch: %d; cost: %f' % (i + 1, costs[i])) # Plotting cost function convergence plt.plot(costs) plt.xlabel('Epoch') plt.ylabel('Cost') plt.title('Cost Function Convergence') plt.grid() plt.savefig('cost_function_convergence.' + fig_format, format=fig_format) # Plotting positive and negative samples
# -*- coding: utf-8 -*- from flask import Flask, render_template, request, jsonify from neural_network import NeuralNetwork import numpy as np app = Flask(__name__) nn = NeuralNetwork(input_size=784, hidden_size=324, output_size=10) nn.load("../notebooks/neural_network.npz") @app.route("/") def index(): return render_template("index.html") @app.route("/estimate", methods = ["POST"]) def estimate(): try: x = np.array(request.json["input"]) / 255.0 y = int(nn.predict(x)) return jsonify({"estimated": y}) except Exception as e: print(e) return jsonify({"error": e}) if __name__ == '__main__': app.run(host='0.0.0.0')
cur_line = line.strip().split(',') time = cur_line[-1]; type = cur_line[3] if time in dict_time: dict_time[time][type] = cur_line[-2] else: dict_time[time] = {type: cur_line[-2]} # 生成新数据集 x_arr = []; y_arr = [] for key in sorted(dict_time.keys()): type_value = dict_time[key] if len(type_value) == 4: type_value_arr= [float(type_value['"temp"']), float(type_value['"co2"']), float(type_value['"hum"']), float(type_value['"lx"'])] x_arr.append(type_value_arr) y_arr.append(float(type_value['"temp"'])) nn = NeuralNetwork([4,20,1], 'logistic') x_arr = np.array(x_arr); y_arr = np.array(y_arr) print x_arr, len(y_arr) # data normalizate x_arr = np.where(x_arr>0, x_arr, np.NAN) mean_x = np.nanmean(x_arr, 0) x_arr = np.where(x_arr>0, x_arr, mean_x) x_min_max = np.max(x_arr, 0)-np.min(x_arr,0) x_arr = (x_arr-np.min(x_arr,0))/x_min_max y_arr = x_arr[:,0] print x_arr,y_arr x_train, x_test, y_train, y_test = train_test_split(x_arr[:-1], y_arr[1:]) print "start fitting" nn.fit(x_train, y_train) errors = [];times = []
for i in range(check_if_int('Posa krufa strwmata tha dhmiourghthoun? :')): print('\n---Krufo Strwma ---', (i + 1)) NumberOfNeuronsNInEveryLayer.append( check_if_int('Dwse plithos Neurwnwn sto strwma :')) activationFunctionInEveryLayer.append(check_activation_function()) NumberOfNeuronsNInEveryLayer.append(3) print('\n---Strwma Eksodou') activationFunctionInEveryLayer.append(check_activation_function()) if (activationFunctionInEveryLayer[-1] != 'logsig'): for stoxos in stoxoiInBinary: for stoixeio in range(3): if (stoxos[stoixeio] == 0): stoxos[stoixeio] = -1 nn = NeuralNetwork(NumberOfNeuronsNInEveryLayer, activationFunctionInEveryLayer) number = check_correct_value_for_training() if (number == 1 or number == 2): nn.BuildNN() if (number == 1): size = check_correct_value_for_training_size_of_training() if (size == 1): nn.inputsAndTargets(eisodoi, stoxoiInBinary, 1, 1) nn.Gradient_Descent_Training_No_Validating() elif (size == 2): nn.inputsAndTargets(eisodoi, stoxoiInBinary, 0.5, 1) nn.Gradient_Descent_Training_No_Validating() print('MSE = ', nn.anaklhsh()) elif (size == 3):
def main(): global X, y try: df = pd.read_csv("./datasets/digits.csv", header=None) X = df.values df = pd.read_csv("./datasets/labels.csv", header=None) y = df.values except: X = np.array([[]]) y = np.array([[]]) sel = np.random.permutation(X) sel = sel[0:100] if len(X[0]) != 0: display_data(sel) input_layer_size = 400 hidden_layer_size = 25 num_labels = 10 shape = (input_layer_size, hidden_layer_size, num_labels) for i in range(1, 11): prevLen = len(y) print(f'Write example for {i if i < 10 else 0}') f = lambda x: addTrainingExample(x, [[i]]) Paint(func=f) if len(y) == prevLen: break if len(X[0]) == 0: print("Not enough data... Exiting") return nn = NeuralNetwork(shape) nn.Lambda = 1 initial_Theta1 = nn.rand_initialize_weights(input_layer_size, hidden_layer_size) initial_Theta2 = nn.rand_initialize_weights(hidden_layer_size, num_labels) initial_nn_params = np.array(initial_Theta1.flatten().tolist() + initial_Theta2.flatten().tolist()) f = lambda p: nn.cost_function(p, X, y)[0] gradf = lambda p: nn.cost_function(p, X, y)[1] print('Training Neural Network... (This can take a while)') nn_params = optimize.fmin_cg(f, initial_nn_params, fprime=gradf, maxiter=250) shape = hidden_layer_size, (input_layer_size + 1) idx = hidden_layer_size * (input_layer_size + 1) Theta1 = np.reshape(nn_params[0:idx], shape) display_data(Theta1[:, 1:]) pred = nn.predict(nn_params, X) print(f'Training Set Accuracy: {np.mean(pred == y.T) * 100}%') p = lambda x: nn.predict(nn_params, x) Paint(predict=p) file = open("./datasets/digits.csv", 'w+') for i in range(len(X)): file.write(f'{", ".join(str(x) for x in X[i])}\n') file.close() file = open("./datasets/labels.csv", 'w+') for l in y: file.write(f'{l[0]}\n') file.close()
def __init__(self): self.xornn = NeuralNetwork([2, 2, 1]) self.layer10 = self.xornn.getLayer(0) #self.layer10.fill_(0) #self.layer10 += torch.FloatTensor([[-0.1, -0.1],[0.6,-0.6],[-0.6,0.6]]) self.layer20 = self.xornn.getLayer(1)
class TestNeuralNetwork(): import numpy as np import scipy.io # set correct cost value as per Coursera Maching Learning Course (ex4) J_correct = 0.287629 J_correct_reg = 0.383770 # load sample data theta = scipy.io.loadmat('sample_data/ex4weights.mat') data = scipy.io.loadmat('sample_data/ex4data1.mat') data_array = np.concatenate((data['X'], data['y']), axis=1) X = y = data_array[:,:-1] y = data_array[:,-1] # minus each output by 1 as index starts at 0 (MATLAB starts at 1) for idx in np.unique(y): y[y==idx] = idx-1 # set NN theta sample_theta = {} sample_theta[1] = theta['Theta1'] sample_theta[2] = theta['Theta2'] def test_cost_func_without_reg(self): from neural_network import NeuralNetwork # create NN instance nn = NeuralNetwork(X=self.X, y=self.y, multi_class=True, options={'lamb': 0.0, 'num_layers':3, 'hidden_layer_size': 25, 'maxiter': 2, 'debug':False, 'gradient_check': False,}) # set theta to sample data for testing nn.set_theta(self.sample_theta) # calculate outputs a, z = nn.feed_forward() J = nn.calculate_cost(a, nn.y_matrix, with_reg=False) assert float('{0:.6g}'.format(J)) == self.J_correct def test_cost_func_with_reg(self): a, z = self.nn.feed_forward() J = self.nn.calculate_cost(a, self.nn.y_matrix, with_reg=True) print self.nn.regularize_cost() assert float('{0:.6g}'.format(J)) == self.J_correct_reg def test_two(self): x = "hello" assert hasattr(x, 'check') def test_gradient_check(self): from neural_network import NeuralNetwork import numpy as np np.random.shuffle(self.data_array) SUBSET = 100 X = self.data_array[:SUBSET,:-1] y = self.data_array[:SUBSET,-1] # minus each output by 1 as index starts at 0 (MATLAB starts at 1) for idx in np.unique(y): y[y==idx] = idx-1 # create NN instance nn = NeuralNetwork(X=X, y=y, multi_class=True, options={'lamb': 0.0, 'num_layers':3, 'hidden_layer_size': 5, 'maxiter': 5, 'debug':False, 'gradient_check': True,}) nn.train() assert np.average(nn.gradient_diff) < 1e-8 def test_train(self): from neural_network import NeuralNetwork import numpy as np np.random.shuffle(self.data_array) X = self.data_array[:,:-1] y = self.data_array[:,-1] # create NN instance self.nn = NeuralNetwork(X=X, y=y, multi_class=True, options={'lamb': 1.0, 'num_layers':3, 'hidden_layer_size': 25, 'training_split': .6, 'maxiter': 400, 'debug':False, 'gradient_check': False,}) self.nn.train()
def __init__(self): self.and_nn = NeuralNetwork([2, 1]) self.layer0 = self.and_nn.getLayer(0)
def test_learn_or(self): data = make_multiple_outputs(OR) model = NeuralNetwork().learn(data) for x, (y,) in data: self.assertEqual(clip(model.predict(x)[0]), y, msg='datum %s, %s' % (x, y))
def __init__(self): self.notnn = NeuralNetwork([1, 1]) self.layer0 = self.notnn.getLayer(0)
# we don't want zeroes and ones in the labels neither: train_labels_one_hot[train_labels_one_hot == 0] = 0.01 train_labels_one_hot[train_labels_one_hot == 1] = 0.99 test_labels_one_hot[test_labels_one_hot == 0] = 0.01 test_labels_one_hot[test_labels_one_hot == 1] = 0.99 #************************************************** # To get the model to work, adjust these values epochs = 10 learning_rate = 0.001 #************************************************** for i in range(5): dropout_rate = (i + 1) / 10 ANN = NeuralNetwork(no_of_in_nodes=image_pixels, no_of_out_nodes=10, no_of_hidden_nodes=100, learning_rate=learning_rate) print("-" * 100) print("Training for dropout rate", dropout_rate) print("-" * 100) weights = ANN.train(train_imgs, train_labels_one_hot, epochs=epochs, dropout_rate=dropout_rate, intermediate_results=True) print("-" * 100) print("Testing for dropout rate", dropout_rate) print("-" * 100)
import numpy as np from neural_network import NeuralNetwork from train import prepare_train_and_show if __name__ == "__main__": train_data = np.array([[i] for i in np.linspace(-50, 50, 26)]) train_labels = np.array(np.square(train_data)) test_data = np.array([[i] for i in np.linspace(-50, 50, 101)]) test_labels = np.array(np.square(test_data)) neural_network = NeuralNetwork.init_from_scratch( 0.25, [1, 10, 15, 15, 15, 1], ['tanh', 'sigmoid', 'sigmoid', 'sigmoid', 'sigmoid']) print("Better approximation in longer period of time") prepare_train_and_show(neural_network, train_data, train_labels, test_data, test_labels)
import parameters class TrainingExample(): def __init__(self, inputs, output): self.inputs = inputs self.output = output if __name__ == "__main__": # Seed the random number generator seed_random_number_generator() # Assemble a neural network, with 3 neurons in the first layer # 4 neurons in the second layer and 1 neuron in the third layer network = NeuralNetwork([3, 4, 1]) # Training set examples = [TrainingExample([0, 0, 1], 0), TrainingExample([0, 1, 1], 1), TrainingExample([1, 0, 1], 1), TrainingExample([0, 1, 0], 1), TrainingExample([1, 0, 0], 1), TrainingExample([1, 1, 1], 0), TrainingExample([0, 0, 0], 0)] # Create a video and image writer fig, writer = generate_writer() # Generate an image of the neural network before training print "Generating an image of a new neural network"
def get_accuracy(net, X, y): correct = 0 for inputs, expected_outputs in zip(X, y): prediction = get_prediction(net, inputs) if prediction == one_hot_decode(expected_outputs): correct += 1 return correct / len(X) np.random.seed(1) net = NeuralNetwork((28**2, 16, 16, 10)) print("Getting data...") X_train, X_test, y_train, y_test = get_data() print("Training...") net.fit(X_train, y_train, minibatch_size=10, num_epochs=50000, learning_rate=.5, reporting=100) print("Saving neural network...")
from neural_network import NeuralNetwork, sigmoid, softmax, ReLU from mnist_loader import load_data_arrays import numpy as np from matplotlib import pyplot as plt if __name__ == '__main__': activation_functions = [ReLU, softmax] layers_size_vector = [784, 200, 10] cost_function = 'euclidean_distance' dropout_probabilities = None learning_rate = .003 number_of_iterations = 10000 regularization_lambda = 0 training_data_X, training_data_Y, validation_data_X, validation_data_Y, test_data_X, test_data_Y = load_data_arrays( ) network = NeuralNetwork(layers_size_vector, activation_functions, cost_function, dropout_probabilities) cost = network.fit(training_data_X, training_data_Y, learning_rate, regularization_lambda, 1, number_of_iterations) # cost = network.fit(test_data_X, test_data_Y, learning_rate, regularization_lambda, 1, number_of_iterations) plt.plot(cost, 'o') plt.show()
class TrainingExample(): def __init__(self, inputs, output): self.inputs = inputs self.output = output if __name__ == "__main__": # Seed the random number generator seed_random_number_generator() # Assemble a neural network, with 3 neurons in the first layer # 4 neurons in the second layer and 1 neuron in the third layer network = NeuralNetwork([3, 4, 1]) # Training set examples = [ TrainingExample([0, 0, 1], 0), TrainingExample([0, 1, 1], 1), TrainingExample([1, 0, 1], 1), TrainingExample([0, 1, 0], 1), TrainingExample([1, 0, 0], 1), TrainingExample([1, 1, 1], 0), TrainingExample([0, 0, 0], 0) ] # Create a video and image writer fig, writer = generate_writer()
def test_learn_xor(self): data = make_multiple_outputs(XOR) model = NeuralNetwork().learn(data, num_hidden_layers=2, max_iterations=2000) for x, (y,) in data: self.assertEqual(clip(model.predict(x)[0]), y, msg='datum %s, %s' % (x, y))
# -*- coding: utf-8 -*- from flask import Flask, render_template, request, jsonify from neural_network import NeuralNetwork import numpy as np app = Flask(__name__) nn = NeuralNetwork(input_size=784, hidden_size=324, output_size=10) nn.load("../data/neural_network.npz") @app.route("/") def index(): return render_template("index.html") @app.route("/estimate", methods=["POST"]) def estimate(): try: x = np.array(request.json["input"]) / 255.0 y = int(nn.predicate(x)) return jsonify({"estimated": y}) except Exception as e: print(e) return jsonify({"error": e}) if __name__ == '__main__': app.run(host='0.0.0.0')
from neural_network import NeuralNetwork import numpy as np nn = NeuralNetwork([2,2,1], 'tanh') X = np.array([[0,0],[0,1],[1,0],[1,1]]) y = np.array([0, 1, 1, 0]) nn.fit(X, y) for i in [[0, 0], [0, 1], [1, 0], [1, 1]]: print(i, nn.predict(i))
def test_mnist_one_hot(num_train_examples=-1, num_test_examples=-1, hidden_layers=(100,), sigmoid='tanh', learning_rate=0.01, layer_decay=1.0, momentum=0.0, batch_size=100, num_epochs=100, csv_filename=None, return_test_accuracies=True): # Collect and preprocess the data. if sigmoid == 'logistic': train_input = convert_mnist_images_logistic(mnist.train_images()[:num_train_examples]) train_output = convert_mnist_labels_one_hot( mnist.train_labels()[:num_train_examples], positive=0.9, negative=0.1) test_input = convert_mnist_images_logistic(mnist.test_images()[:num_test_examples]) test_output = convert_mnist_labels_one_hot(mnist.test_labels()[:num_test_examples], positive=0.9, negative=0.1) elif sigmoid == 'tanh': train_input, mean_shift, std_scale = convert_mnist_images_train_tanh(mnist.train_images()[:num_train_examples]) train_output = convert_mnist_labels_one_hot( mnist.train_labels()[:num_train_examples], positive=1.0, negative=-1.0) test_input = convert_mnist_images_test_tanh(mnist.test_images()[:num_test_examples], mean_shift, std_scale) test_output = convert_mnist_labels_one_hot(mnist.test_labels()[:num_test_examples], positive=1.0, negative=-1.0) else: raise ValueError('Invalid sigmoid function.') # Create and train the neural network. layer_sizes = (784,) + hidden_layers + (10,) weight_decay = 0.0 nn = NeuralNetwork(layer_sizes, sigmoid=sigmoid, weight_decay=weight_decay) num_examples = train_input.shape[0] num_iterations = (num_examples // batch_size) * num_epochs rows = None if csv_filename is not None: rows = [] test_accuracies = None if return_test_accuracies: test_accuracies = [] def callback(iteration): if iteration % (num_examples // batch_size) == 0: epoch = iteration // (num_examples // batch_size) training_prediction_accuracy = get_prediction_accuracy(nn, train_input, train_output) test_prediction_accuracy = get_prediction_accuracy(nn, test_input, test_output) training_loss = nn.get_loss(train_input, train_output) test_loss = nn.get_loss(test_input, test_output) print('{},{:.6f},{:.6f},{:.6f},{:.6f}'.format(epoch, training_prediction_accuracy, test_prediction_accuracy, training_loss, test_loss)) if csv_filename is not None: rows.append((epoch, training_prediction_accuracy, test_prediction_accuracy, training_loss, test_loss)) if return_test_accuracies: test_accuracies.append(test_prediction_accuracy) print('Network Parameters') print('layer_sizes: {}, sigmoid: {}, weight_decay: {}'.format(layer_sizes, sigmoid, weight_decay)) print('Training Parameters') print('num_iterations: {}, learning_rate: {}, layer_decay: {}, momentum: {}, batch_size: {}'.format( num_iterations, learning_rate, layer_decay, momentum, batch_size)) print('') header = 'epoch,training_accuracy,test_accuracy,training_loss,test_loss' print(header) stochastic_gradient_descent(nn, train_input, train_output, num_iterations=num_iterations, learning_rate=learning_rate, layer_decay=layer_decay, momentum=momentum, batch_size=batch_size, callback=callback) if csv_filename is not None: save_rows_to_csv(csv_filename, rows, header.split(',')) if return_test_accuracies: return test_accuracies
class TrainingResult(Screen): def __init__(self, **kwargs): super(TrainingResult, self).__init__(**kwargs) contents = Builder.load_string(kv) self.add_widget(contents) self.ids = contents.ids # Bind the network self.network = NeuralNetwork(App.get_running_app()) # Sample content # grid = self.ids.result_grid # for j in range(157): # # First column: Actual image # grid.add_widget(Image(source='face1.png')) # # # Second column: Network's image reconstruction # grid.add_widget(Image(source='face1.png')) # # # Third column: Network Representation # grid.add_widget(Label(text='mad', size_hint=(.5, .5))) # # # Fourth column: Actual representation # grid.add_widget(Label(text='happy', size_hint=(.5, .5))) # # # Fifth column: correct/incorrect # grid.add_widget(Label(text='1', size_hint=(.5, .5))) # Bind the buttons def back_pressed(instance): self.pause_training(instance) App.get_running_app().go_back() self.ids.back_button.bind(on_press=back_pressed) # Start the training thread when the screen is displayed self.bind(on_enter=self.start_training) # Manage network training def start_training(self, instance): Logger.info('Starting training') self.network.reset_training() self.resume_training(self) def resume_training(self, instance): Logger.info('Resuming training') self.clear_results() self.training_paused = False pause_button = self.ids.pause_button pause_button.text = 'Pause' pause_button.unbind(on_press=self.resume_training) pause_button.bind(on_press=self.pause_training) # Make sure the thread stops on application exit App.get_running_app().unbind(on_stop=self.pause_training) App.get_running_app().bind(on_stop=self.pause_training) threading.Thread(target=self._run_training).start() def pause_training(self, instance): self.training_paused = True pause_button = self.ids.pause_button pause_button.text = 'Resume' pause_button.unbind(on_press=self.pause_training) pause_button.bind(on_press=self.resume_training) self.display_results() def clear_results(self): '''Clears all children of the grid, except for the header labels.''' grid = self.ids.result_grid grid.clear_widgets() @mainthread def display_results(self): '''Shows the result of the training in the grid.''' grid = self.ids.result_grid predictions, reconstructions = self.network.predict_all() predictions_correct = predictions == self.network.targets for is_test, image, reconstruction, prediction, target, prediction_correct in \ zip(self.network.idx_test, self.network.app.dataset['images'], reconstructions, predictions, self.network.targets, predictions_correct): # First column: Actual image grid.add_widget(ResultImage(image)) # Second column: Network's image reconstruction grid.add_widget(ResultImage(reconstruction)) # Third column: Network Representation grid.add_widget(Label(text=self.network.target_names[prediction + 1], size_hint=(.5, .5))) # Fourth column: Actual representation grid.add_widget(Label(text=self.network.target_names[target + 1], size_hint=(.5, .5))) # Fifth column: correct/incorrect grid.add_widget(Label(text='%d' % prediction_correct, size_hint=(.5, .5))) self.ids.result_scrollview.disabled = False self.ids.table_header.disabled = False def _run_training(self): graph = self.ids.training_graph for epoch, epochs, rmse, cerr, is_last in self.network.resume_training(): if (epoch % 10) == 1 or is_last: Logger.debug('epoch: %d, rmse shape: %s' % (epoch, str(rmse.shape))) graph.plot(epoch, epochs, rmse, cerr, self.network.minimum_rmse) if self.training_paused: break Logger.info('Exit training thread') self.display_results()
import random from neural_network import NeuralNetwork inputsize = 2 hiddensize = 4 outputsize = 1 nn = NeuralNetwork(inputsize, hiddensize, outputsize) print(nn.weights_ih) print(nn.weights_ho) print(nn.bias_h) print(nn.bias_o) inp = [1] * inputsize out = nn.predict(inp) print(out) inp = [[0, 0], [0, 1], [1, 0], [1, 1]] out = [0, 1, 1, 0] print('training...') for _ in range(10000): i = random.choice([0, 1, 2, 3]) nn.train(inp[i], out[i]) #~ out = nn.predict([1, 0]) #~ print(out)
class car: def __init__(self): # Deniz: moved parameters to params.py self.time = 0 self.neuralNetwork = NeuralNetwork(params.POS_NEURONS, params.POS_RANGE, params.VEL_NEURONS, params.VEL_RANGE, params.NB_OUTPUTS, params.ETA, params.GAMMA, params.LAMBDA) # store last take action, in order to reinforce eligibility trace self.action_index = None def reset(self) : # reset values before each trial. self.time = 0 self.neuralNetwork.reset() self.action_index = None def choose_action(self, position, velocity, R, learn = True): """This method must: -choose your action based on the current position and velocity. -update your parameters according to SARSA. This step can be turned off by the parameter 'learn=False'. GIVEN PARAMETERS ARE FOR NEXT STATE, based on PREVIOUS ACTION The [x,y] values of the position are always between 0 and 1. The [vx,vy] values of the velocity are always between -1 and 1. The reward from the last action R is a real number """ if self.time == 0: self.neuralNetwork.compute_network_output(position, velocity) self.action_index = self.policy() self.time += 1 return self.action_index #return self.neuralNetwork.getActionDirection( self.action_index ) if learn: # updating eligibility trails # do it before everything else, since eligibility traces are # reinforced with the input of the taken action (== previous action) # so before we change input to new position #print "I'm learning" self.neuralNetwork.decay_eligibility_trails() self.neuralNetwork.update_eligibility_trail(self.action_index) # pass # this copies the list by slicing, not a pointer Q_current = self.neuralNetwork.Q_outputs[self.action_index] # get new Q values after the transition Q(s',a') self.neuralNetwork.compute_network_output(position, velocity) # get action a', based on policy new_action = self.policy() Q_next = self.neuralNetwork.Q_outputs[new_action] if learn: if R>0: R *= (200 / self.time) else: R /= 2.0 delta = R + params.GAMMA*Q_next - Q_current # ivan: wrong place for updating etraces # self.neuralNetwork.decay_eligibility_trails() # self.neuralNetwork.update_eligibility_trail(self.action_index) # updating weights self.neuralNetwork.update_weights(delta, self.action_index) # ivan: same as putting it up before everything # self.neuralNetwork.decay_eligibility_trails() # self.neuralNetwork.update_eligibility_trail(new_action) self.time += 1 self.action_index = new_action # actuate the action a' return self.action_index def get_action_direction(self, a): """computes the direction for action a @param a - integer, index to Q value list """ # return constant velocity (0,0) if a = 0 if a == 0: return (0.0, 0.0) n_dir = params.NB_OUTPUTS - 1.0 dir_x = np.cos(-2.0*np.pi*a/n_dir + np.pi/2.0) dir_y = np.sin(-2.0*np.pi*a/n_dir + np.pi/2.0) return (dir_x, dir_y) def policy(self): """this method returns the action index based on some policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ #return self.decaying_e_greedy_policy() return self.e_greedy_policy() def decaying_e_greedy_policy(self): """this method returns the action index based on epsilon-greedy policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ Q = self.neuralNetwork.Q_outputs assert len(Q) == params.NB_OUTPUTS if self.time < params.STOP_DECAY: delta_eps = params.EPSILON_START - params.EPSILON eps = -delta_eps*self.time/params.STOP_DECAY + params.EPSILON_START else: eps = params.EPSILON threshold = (1 - eps) if np.random.random() < threshold: return Q.argmax() #this returns index else: return np.random.randint(0, len(Q)-1) #this returns value! def e_greedy_policy(self): """this method returns the action index based on epsilon-greedy policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ Q = self.neuralNetwork.Q_outputs assert len(Q) == params.NB_OUTPUTS if (np.random.random() < 1-params.EPSILON): argmx = Q.argmax() if abs(Q[argmx] - 0.0) <= 1e-6: return np.random.randint(0, len(Q)-1) #this returns value! return Q.argmax() #this returns index else: return np.random.randint(0, len(Q)) #this returns value!
class car: def __init__(self): # Deniz: moved parameters to params.py self.time = 0 self.neuralNetwork = NeuralNetwork(params.POS_NEURONS, params.POS_RANGE, params.VEL_NEURONS, params.VEL_RANGE, params.NB_OUTPUTS, params.ETA, params.GAMMA, params.LAMBDA) # store last take action, in order to reinforce eligibility trace self.action_index = None def reset(self): # reset values before each trial. self.time = 0 self.neuralNetwork.reset() self.action_index = None def choose_action(self, position, velocity, R, learn=True): """This method must: -choose your action based on the current position and velocity. -update your parameters according to SARSA. This step can be turned off by the parameter 'learn=False'. GIVEN PARAMETERS ARE FOR NEXT STATE, based on PREVIOUS ACTION The [x,y] values of the position are always between 0 and 1. The [vx,vy] values of the velocity are always between -1 and 1. The reward from the last action R is a real number """ if self.time == 0: self.neuralNetwork.compute_network_output(position, velocity) self.action_index = self.policy() self.time += 1 return self.action_index #return self.neuralNetwork.getActionDirection( self.action_index ) if learn: # updating eligibility trails # do it before everything else, since eligibility traces are # reinforced with the input of the taken action (== previous action) # so before we change input to new position #print "I'm learning" self.neuralNetwork.decay_eligibility_trails() self.neuralNetwork.update_eligibility_trail(self.action_index) # pass # this copies the list by slicing, not a pointer Q_current = self.neuralNetwork.Q_outputs[self.action_index] # get new Q values after the transition Q(s',a') self.neuralNetwork.compute_network_output(position, velocity) # get action a', based on policy new_action = self.policy() Q_next = self.neuralNetwork.Q_outputs[new_action] if learn: if R > 0: R *= (200 / self.time) else: R /= 2.0 delta = R + params.GAMMA * Q_next - Q_current # ivan: wrong place for updating etraces # self.neuralNetwork.decay_eligibility_trails() # self.neuralNetwork.update_eligibility_trail(self.action_index) # updating weights self.neuralNetwork.update_weights(delta, self.action_index) # ivan: same as putting it up before everything # self.neuralNetwork.decay_eligibility_trails() # self.neuralNetwork.update_eligibility_trail(new_action) self.time += 1 self.action_index = new_action # actuate the action a' return self.action_index def get_action_direction(self, a): """computes the direction for action a @param a - integer, index to Q value list """ # return constant velocity (0,0) if a = 0 if a == 0: return (0.0, 0.0) n_dir = params.NB_OUTPUTS - 1.0 dir_x = np.cos(-2.0 * np.pi * a / n_dir + np.pi / 2.0) dir_y = np.sin(-2.0 * np.pi * a / n_dir + np.pi / 2.0) return (dir_x, dir_y) def policy(self): """this method returns the action index based on some policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ #return self.decaying_e_greedy_policy() return self.e_greedy_policy() def decaying_e_greedy_policy(self): """this method returns the action index based on epsilon-greedy policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ Q = self.neuralNetwork.Q_outputs assert len(Q) == params.NB_OUTPUTS if self.time < params.STOP_DECAY: delta_eps = params.EPSILON_START - params.EPSILON eps = -delta_eps * self.time / params.STOP_DECAY + params.EPSILON_START else: eps = params.EPSILON threshold = (1 - eps) if np.random.random() < threshold: return Q.argmax() #this returns index else: return np.random.randint(0, len(Q) - 1) #this returns value! def e_greedy_policy(self): """this method returns the action index based on epsilon-greedy policy NOTE: it is assumed that the underlying neural network has already computed Q values for given position/velocity """ Q = self.neuralNetwork.Q_outputs assert len(Q) == params.NB_OUTPUTS if (np.random.random() < 1 - params.EPSILON): argmx = Q.argmax() if abs(Q[argmx] - 0.0) <= 1e-6: return np.random.randint(0, len(Q) - 1) #this returns value! return Q.argmax() #this returns index else: return np.random.randint(0, len(Q)) #this returns value!
# Load training and test data train_data = np.loadtxt('data/train-small.csv', delimiter=',', skiprows=1) # test_data = np.loadtxt('data/test.csv', delimiter=',', skiprows=1) X_train = train_data[:, 1:] y_train = train_data[0::, 0].astype(int) # X_test = test_data # Set up neural network parameters reg_lambda = 30 epsilon = 0.12 hidden_layer_size = 25 maxiter = 50 # Train the neural network nn = NeuralNetwork(reg_lambda=reg_lambda, hidden_layer_size=hidden_layer_size, epsilon=epsilon, maxiter=500) nn.fit(X_train, y_train) # Make predictions predictions = nn.predict(X_train) accuracy = accuracy_score(y_train, predictions) print(sum(predictions == y_train)) print(y_train.shape[0]) print print("ACCURACY AFTER") print("%.2f %% " % (accuracy * 100)) print # predictions = np.zeros((X_test.shape[0] + 1, 2))
def learnAction(self,action): if raw_input("I have never done this before, should I attempt to learn it now? (y/n): ") == "y": actionSpecs = open("knowledgeBase/"+action.replace(" ","_"),"w") inputLayerUnits = input("How many input units do I need? ") hiddenLayerUnits = input("How many hidden units do I need? ") outputLayerUnits = input("How many output units do I need? ") actionSpecs.write(str(inputLayerUnits)) actionSpecs.write("\n") actionSpecs.write(str(hiddenLayerUnits)) actionSpecs.write("\n") actionSpecs.write(str(outputLayerUnits)) actionSpecs.close() net = NeuralNetwork(action) print "making neural network..." net.makeNetwork(inputLayerUnits,hiddenLayerUnits,outputLayerUnits) print "initializing random weights..." net.initializeRandomWeights() print "generating input activations..." net.genInputActivations() try: print "training..." net.train() except KeyboardInterrupt: net.saveWeights() print "weights saved" return except RuntimeError: print "Training failed." net.saveWeights() print "weights saved" return print "Success!" net.saveWeights() print "weights saved" self.knowledgeBase.append(action) print "testing..." net.test() return else: print "Ok, maybe later." return
import parameters class TrainingExample(): def __init__(self, inputs, output): self.inputs = inputs self.output = output if __name__ == "__main__": # Seed the random number generator seed_random_number_generator() # Assemble a neural network, with 18 neurons in the first layer # 32 neurons in the second layer and 2 neuron in the third layer network = NeuralNetwork([18, 32, 2]) # Training set examples = [TrainingExample([-29.35549,-25.04468,-63.07045,-70.7681,11,10,10,2,-122.4435,-25.93066,-25.31638,-89.88123,-87.62306,6,13,11,0,-112.9629],1.0), TrainingExample([-25.93066,-25.31638,-89.88123,-87.62306,6,13,11,0,-112.9629,-29.35549,-25.04468,-63.07045,-70.7681,11,10,10,2,-122.4435],0.0), TrainingExample([-81.19243,-101.9763,-167.3187,-200.8598,47,23,31,10,-179.9829,-91.31429,-114.3718,-170.2887,-209.5479,51,24,32,10,-178.6222],1.0), TrainingExample([-91.31429,-114.3718,-170.2887,-209.5479,51,24,32,10,-178.6222,-81.19243,-101.9763,-167.3187,-200.8598,47,23,31,10,-179.9829],0.0), TrainingExample([-42.08201,-57.71526,-56.0271,-73.74921,30,16,18,4,-144.6631,-41.72422,-53.60718,-74.16974,-80.0265,30,15,18,5,-141.0959],1.0), TrainingExample([-41.72422,-53.60718,-74.16974,-80.0265,30,15,18,5,-141.0959,-42.08201,-57.71526,-56.0271,-73.74921,30,16,18,4,-144.6631],0.0), TrainingExample([-27.97045,-42.6892,-62.42385,-72.11889,22,13,13,2,-138.0798,-37.23659,-52.0332,-47.41673,-59.97505,29,13,15,4,-147.1644],1.0), TrainingExample([-37.23659,-52.0332,-47.41673,-59.97505,29,13,15,4,-147.1644,-27.97045,-42.6892,-62.42385,-72.11889,22,13,13,2,-138.0798],0.0), TrainingExample([-42.23076,-57.38282,-80.06391,-91.61021,24,15,19,6,-146.6433,-45.00954,-46.97235,-108.6906,-114.4459,18,19,20,3,-133.4822],1.0), TrainingExample([-45.00954,-46.97235,-108.6906,-114.4459,18,19,20,3,-133.4822,-42.23076,-57.38282,-80.06391,-91.61021,24,15,19,6,-146.6433],0.0)] # Create a video and image writer fig, writer = generate_writer()
#!/bin/python3 import sys import random import numpy as np sys.path.append('..') from neural_network import NeuralNetwork inputLen = 2 hiddenLen = 8 outputLen = 1 learningRate = 0.1 n = NeuralNetwork(inputLen, hiddenLen, outputLen) # n = NeuralNetwork(2, 2, 1) training_data = { 1: {'inputs': np.array([[0],[0]]), 'targets': np.array([[1]])}, 2: {'inputs': np.array([[0],[1]]), 'targets': np.array([[0]])}, 3: {'inputs': np.array([[1],[0]]), 'targets': np.array([[0]])}, 4: {'inputs': np.array([[1],[1]]), 'targets': np.array([[1]])}, } print("\033[4m" + "\n### Training ###" + "\033[0m") for i in range(10000): x = random.choice(list(training_data.values())) inputs = x.get('inputs') targets = x.get('targets') n.trainSVLearing(inputs,targets,learningRate) print("\033[4m" + "\n### Testing Phase ###" + "\033[0m")
def train(csv_path, polynomial_degree): """ Train NN to model points contained in the pointed dataset with a polynomial of a degree less than or equal to the given degree. Return coefficients of found polynomial. Args: csv_path (str): path to a .csv file which contains dataset of points. polynomial_degree (int): maximal degree of the modeled polynomial. Returns: list: list of coefficients of found polynomial. Most significant coefficient is first on this list. """ x_mat, y_mat = load_dataset(csv_path) x_powers_mat = np.power(x_mat, range(polynomial_degree + 1)) # Normalize only x^1 up to x^n because x^0 is always 1. x_powers_mat[:, 1:], x_powers_mean, x_powers_std = normalize_mat( x_powers_mat[:, 1:]) x_mat, x_mean, x_std = normalize_mat(x_mat) y_mat, y_mean, y_std = normalize_mat(y_mat) # Collect normalization parameters in a dict as they will be needed in # estimation. normalization_params = { "x_mean": x_mean, "x_std": x_std, "y_mean": y_mean, "y_std": y_std, "x_powers_mean": x_powers_mean, "x_powers_std": x_powers_std } save_vars_dict(normalization_params, NORMALIZATION_PARAMS_PATH) net = NeuralNetwork(polynomial_degree) optimizer = AdamOptimizer(net) min_loss = None best_net_vars = {} best_coeffs = None batch_i = 0 num_examples = x_mat.shape[0] for i in range(MAX_ITERATIONS): # Create minibatch of examples and advance batch iterator. batch_size = polynomial_degree * BATCH_SIZE_MULTIPLIER placeholders = { "x_mat": x_mat[batch_i:batch_i + batch_size], "y_mat": y_mat[batch_i:batch_i + batch_size], "x_powers_mat": x_powers_mat[batch_i:batch_i + batch_size, :polynomial_degree + 1] } batch_i += batch_size if batch_i >= num_examples: batch_i = 0 shuffle_matrices([x_mat, y_mat, x_powers_mat]) vals = net.forward_pass(placeholders) vals = net.loss_forward_pass(placeholders, vals) # Keep track of the best network approximation so far. if min_loss is None or vals["loss"] < min_loss: min_loss = vals["loss"] best_coeffs = vals["avg_coeffs"] net.backup(best_net_vars) if min_loss <= TARGET_LOSS: break net.backward_pass(placeholders, vals) # Attempt coefficients elimination after loss hits certain threshold. # The idea is to let the coefficients settle a bit to find out which # ones of them are truly unnecessary. if vals["loss"] < ELIMINATION_ATTEMPT_THRESHOLD: polynomial_degree = try_eliminating_coeffs(normalization_params, vals, net, optimizer, polynomial_degree) best_net_vars = {} net.backup(best_net_vars) optimizer.perform_update(net) save_vars_dict(best_net_vars, NET_PATH) coeffs = denormalize_coeffs(best_coeffs[np.newaxis, :], normalization_params, polynomial_degree) return list(reversed(coeffs[0].tolist()))
def _create_density_nn(self, nonzero_boost=1e-5): """ Creates the neural network for the density NN """ self.quantile_ph = tf.placeholder(tf.float32, [None, 1]) if self.density_parametric_form == "gaussian": sigma_offset = tf.Variable([0.0]) self.density_nn = NeuralNetwork.create_full_nnet( self.density_layer_sizes + ["2"], self.x_concat_ph, act_func=getattr(tf.nn, self.act_func), output_act_func=None, dropout_rate=self.dropout_rate) self.mu = self.density_nn.layers[-1][:, 0:1] # Cannot let sigma get too close to zero -- allow an offset parameter self.sigma = tf.abs( self.density_nn.layers[-1][:, 1:2]) + tf.exp(sigma_offset) # Add offset parameter to the density NN intercepts so we will train over its value self.density_nn.intercepts += [sigma_offset] self.cond_dist = tf.distributions.Normal(loc=self.mu, scale=self.sigma) self.quantile = self.cond_dist.quantile(self.quantile_ph) self.log_prob = self.cond_dist.log_prob(self.y_concat_ph) elif self.density_parametric_form.startswith("shifted_exponential"): raise ValueError( "doesn't work well in tensorflow for some stupid reason") rate_offset = tf.Variable(-0.5) #raise ValueError("This doesnt work for some weird reason. think tensorflow has issues") self.min_y = float( self.density_parametric_form.replace("shifted_exponential", "")) self.density_nn = NeuralNetwork.create_full_nnet( self.density_layer_sizes + ["1"], self.x_concat_ph, act_func=getattr(tf.nn, self.act_func), output_act_func=None, dropout_rate=self.dropout_rate) self.rate = tf.exp( tf.minimum(self.density_nn.layers[-1][:, 0], 2.5)) + tf.exp(rate_offset) self.density_nn.intercepts += [rate_offset] # Add offset parameter to the density NN intercepts so we will train over its value self.cond_dist = tf.distributions.Exponential(rate=self.rate, validate_args=True) self.log_prob = self.cond_dist.log_prob(self.y_concat_ph - self.min_y) #self.log_prob = self.cond_dist.log_prob(self.y_concat_ph) elif self.density_parametric_form == "bernoulli": self.density_nn = NeuralNetwork.create_full_nnet( self.density_layer_sizes + ["1"], self.x_concat_ph, act_func=getattr(tf.nn, self.act_func), output_act_func=None, dropout_rate=self.dropout_rate) self.logits = self.density_nn.layers[-1] self.mu = tf.nn.sigmoid(self.logits) self.cond_dist = tf.distributions.Bernoulli(logits=self.logits) self.log_prob = self.cond_dist.log_prob(self.y_concat_ph) elif self.density_parametric_form.startswith("multinomial"): self.density_nn = NeuralNetwork.create_full_nnet( self.density_layer_sizes + [str(self.label_size)], self.x_concat_ph, act_func=getattr(tf.nn, self.act_func), output_act_func=None, dropout_rate=self.dropout_rate) self.logits = self.density_nn.layers[-1] self.cond_dist = tf.distributions.Multinomial(total_count=1., logits=self.logits) self.mu = self.cond_dist.mean() # Weird.... it returns something the wrong shape. -- we do reshape to fix it self.log_prob = tf.reshape( self.cond_dist.log_prob(self.y_concat_ph), (-1, 1)) #self.log_prob = tf.log(tf.reduce_sum(self.mu * self.y_concat_ph, axis=1, keepdims=True)) else: raise ValueError("Dont know about this form") return self.density_nn
def __init__(self, video_src=0, pos_x=0, pos_y=0, quality=0.3, damping=20, speed_multi=2, save='', multiprocessed=False, epochs=1, training_accuracy=20, training_length=40, max_level = 3, save_net='', load_net='', max_net_err=0): """ :param video_src: video source :param pos_x: initial x translation of capture window :param pos_y: initial y translation of capture window :param quality: quality used by goodFeaturesToTrack :param damping: speed = speeds[-damping:]/damping :param speed_multi: speed value multiplicator (only without neural net) :param save: if specified output file will be saved under this param :param multiprocessed: if true neural net training will be done in separated thread :param epochs: number of epochs for neural network :param training_accuracy: number of frames which will be training set for neural network :return: App instance """ # Tracks related attributes tracks_number = 100 self.track_len = 10 self.tracks_count = 0 self.detect_interval = 5 self.tracks = deque([deque(maxlen=tracks_number)]) ########################### # Speed and distance measure realted attributes self.speed = 0 self.last_speeds = deque([0], maxlen=damping) self.speed_multi = speed_multi self.distance = 0 self.multiplier = 0.0001 ################################### self.feature_params = { 'maxCorners': 10, 'qualityLevel': quality, 'minDistance': 7, 'blockSize': 7 } self.lk_params = { 'winSize': (21, 21), 'maxLevel': max_level, 'criteria': (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)} self.app_params = { 'speed_multi': self.multiplier * speed_multi, 'tracks_number': tracks_number, } # Frames related attributes self.frame_idx = 1 self.prev_gray = None self.cam = create_capture(video_src) _, frame = self.cam.read() fps = self.cam.get(cv2.CAP_PROP_FPS) self.frame_duration = 1 / fps if fps else 0.04 self.callbacks = defaultdict(lambda: lambda val, mod: val + mod, { 'winSize': lambda val, mod: (val[0]+mod, val[1]+mod), 'qualityLevel': lambda val, mod: np.abs(val + mod * 0.1), 'speed_multi': lambda val, mod: np.abs(val + mod * self.multiplier), 'criteria': lambda val, mod: (val[0], val[1] + mod, val[2] + mod * 0.01) }) ############################ # Interface position related attributes height, width, _ = frame.shape params_len = (len(self.feature_params) + len(self.lk_params) + len(self.app_params)) self.start_x = 4 * width/10 + pos_x self.stop_x = 5 * width/10 + pos_x self.start_y = 19 * height/20 + pos_y self.stop_y = height + pos_y self.frame_height = height self.frame_width = width self.middle_x = width / 2 self.interface = { 'left': { 'right_border': self.spaces * 3, 'top_border': self.frame_height - self.spaces * params_len, 'clicked': False, # Depends on not feature or lk params in interface 'index_mod': 2 }, 'right': { 'left_border': self.frame_width - self.spaces * 5, 'top_border': self.frame_height - self.spaces * 3, 'clicked': False } } self.clicked = False ############################################ # Neural network related attributes self.network_tuple = (2, 3, 1) self.training = None if load_net: self.network = NeuralNetwork(epochs=epochs, load=load_net, save=save_net, scale=width) else: self.network = None self.multiprocessed = multiprocessed self.epochs = epochs self.save_net = save_net self.load_net = load_net self.samples = training_accuracy self.training_frames = training_length self.max_net_error = max_net_err #################################### # Video capture related attributes self.out = None if save: self.out = create_writer(save, (frame.shape[1], frame.shape[0]), fps)
def __init__(self): layers = [1, 1] self.nn = NeuralNetwork(layers) th0 = self.nn.getLayer(0) th0[0] = 10 th0[1] = -20
class App: window_name = 'Speedometer' spaces = 20 training_corners_mod = 2 def __init__(self, video_src=0, pos_x=0, pos_y=0, quality=0.3, damping=20, speed_multi=2, save='', multiprocessed=False, epochs=1, training_accuracy=20, training_length=40, max_level = 3, save_net='', load_net='', max_net_err=0): """ :param video_src: video source :param pos_x: initial x translation of capture window :param pos_y: initial y translation of capture window :param quality: quality used by goodFeaturesToTrack :param damping: speed = speeds[-damping:]/damping :param speed_multi: speed value multiplicator (only without neural net) :param save: if specified output file will be saved under this param :param multiprocessed: if true neural net training will be done in separated thread :param epochs: number of epochs for neural network :param training_accuracy: number of frames which will be training set for neural network :return: App instance """ # Tracks related attributes tracks_number = 100 self.track_len = 10 self.tracks_count = 0 self.detect_interval = 5 self.tracks = deque([deque(maxlen=tracks_number)]) ########################### # Speed and distance measure realted attributes self.speed = 0 self.last_speeds = deque([0], maxlen=damping) self.speed_multi = speed_multi self.distance = 0 self.multiplier = 0.0001 ################################### self.feature_params = { 'maxCorners': 10, 'qualityLevel': quality, 'minDistance': 7, 'blockSize': 7 } self.lk_params = { 'winSize': (21, 21), 'maxLevel': max_level, 'criteria': (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)} self.app_params = { 'speed_multi': self.multiplier * speed_multi, 'tracks_number': tracks_number, } # Frames related attributes self.frame_idx = 1 self.prev_gray = None self.cam = create_capture(video_src) _, frame = self.cam.read() fps = self.cam.get(cv2.CAP_PROP_FPS) self.frame_duration = 1 / fps if fps else 0.04 self.callbacks = defaultdict(lambda: lambda val, mod: val + mod, { 'winSize': lambda val, mod: (val[0]+mod, val[1]+mod), 'qualityLevel': lambda val, mod: np.abs(val + mod * 0.1), 'speed_multi': lambda val, mod: np.abs(val + mod * self.multiplier), 'criteria': lambda val, mod: (val[0], val[1] + mod, val[2] + mod * 0.01) }) ############################ # Interface position related attributes height, width, _ = frame.shape params_len = (len(self.feature_params) + len(self.lk_params) + len(self.app_params)) self.start_x = 4 * width/10 + pos_x self.stop_x = 5 * width/10 + pos_x self.start_y = 19 * height/20 + pos_y self.stop_y = height + pos_y self.frame_height = height self.frame_width = width self.middle_x = width / 2 self.interface = { 'left': { 'right_border': self.spaces * 3, 'top_border': self.frame_height - self.spaces * params_len, 'clicked': False, # Depends on not feature or lk params in interface 'index_mod': 2 }, 'right': { 'left_border': self.frame_width - self.spaces * 5, 'top_border': self.frame_height - self.spaces * 3, 'clicked': False } } self.clicked = False ############################################ # Neural network related attributes self.network_tuple = (2, 3, 1) self.training = None if load_net: self.network = NeuralNetwork(epochs=epochs, load=load_net, save=save_net, scale=width) else: self.network = None self.multiprocessed = multiprocessed self.epochs = epochs self.save_net = save_net self.load_net = load_net self.samples = training_accuracy self.training_frames = training_length self.max_net_error = max_net_err #################################### # Video capture related attributes self.out = None if save: self.out = create_writer(save, (frame.shape[1], frame.shape[0]), fps) ################################### def _on_mouse(self, event, x, y, *_): if (x < self.interface['left']['right_border'] and y > self.interface['left']['top_border']): self._interface('left', event, x, y) elif (x > self.interface['right']['left_border'] and y > self.interface['right']['top_border']): self._interface('right', event, y) elif event == cv2.EVENT_LBUTTONDOWN: # Begin capturing new feature detection zone self.clicked = True self.start_x = self.stop_x = x self.start_y = self.stop_y = y self.tracks = deque( [deque(maxlen=self.app_params['tracks_number'])]) ############################################## elif event == cv2.EVENT_LBUTTONUP: # End capturing new feature detection zone self.clicked = False ########################################### elif self.clicked and event == cv2.EVENT_MOUSEMOVE: # Capturing new feature detection zone self.stop_x = x self.stop_y = y ###################################### def _interface(self, which, event, *args): if event == cv2.EVENT_LBUTTONDOWN: self.interface[which]['clicked'] = True elif (event == cv2.EVENT_LBUTTONUP and self.interface[which]['clicked']): method = getattr(self, '_%s_interface' % which) method(*args) def _left_interface(self, x, y): index_x = x // self.spaces index_y = ((self.frame_height - y) // self.spaces - self.interface['left']['index_mod']) self.interface['left']['clicked'] = False key = (self.lk_params.keys() + self.feature_params.keys())[ index_y] if index_y == -1: temp = self.app_params key = 'speed_multi' elif key in self.lk_params: temp = self.lk_params else: temp = self.feature_params if index_x == 1: temp[key] = self.callbacks[key](temp[key], 1) elif index_x == 2: temp[key] = self.callbacks[key](temp[key], -1) def _right_interface(self, y): index_y = (self.frame_height - y) // self.spaces if index_y == 1: self.network = NeuralNetwork(self.network_tuple, epochs=self.epochs, save=self.save_net, scale=self.frame_height, max_error=self.max_net_error) self.training = self.training_frames # use less features per zone during training self.feature_params['maxCorners'] //= self.training_corners_mod self.app_params['tracks_number'] //= self.training_corners_mod elif index_y == 2: self.distance = 0 def run(self, skip=1, stop=None, tests=False): self.start = skip if stop is None: stop = self.cam.get(cv2.CAP_PROP_FRAME_COUNT) else: stop -= skip while skip: _ = self.cam.read() skip -= 1 if not tests: cv2.namedWindow(self.window_name) cv2.setMouseCallback(self.window_name, self._on_mouse) while self.frame_idx < stop: _, frame = self.cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if self.tracks: sum_r = self._optical_flow(frame_gray, self.prev_gray) self._measure_speed(sum_r) if self.training: self.training -= 1 elif not self.training and self.training is not None: if self.multiprocessed: if self.network.is_done(): self.training = None self._restore_limits_after_training() elif not self.network.training.is_alive(): self.network.training.start() else: self.training = None self._restore_limits_after_training() self.network.train() if not self.frame_idx % self.detect_interval: self._get_new_tracks(frame_gray) self.frame_idx += 1 self.prev_gray = frame_gray if not tests: self._show_and_save(vis) key = cv2.waitKey(1) if 0xFF & key == 27: # quit on esc key self._clean_up() break elif key == 32: # pause on space cv2.waitKey() self._clean_up() return self.distance def _restore_limits_after_training(self): self.feature_params['maxCorners'] *= self.training_corners_mod self.app_params['tracks_number'] *= self.training_corners_mod def _optical_flow(self, current_frame, previous_frame): sum = 0 expected_res = 0 self.tracks_count = 0 for i, track in enumerate(self.tracks): if track: p0 = np.reshape([tr[-1] for tr in track], (-1, 1, 2)) p1 = cv2.calcOpticalFlowPyrLK(previous_frame, current_frame, p0, None, **self.lk_params)[0] p0r = cv2.calcOpticalFlowPyrLK(current_frame, previous_frame, p1, None, **self.lk_params)[0] d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = deque(maxlen=self.app_params['tracks_number']) sum_r = 0 for tr, (x, y), good_flag in zip(track, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) new_tracks.append(tr) # pixel shift radius = tr[-1][-1] - tr[-2][-1] if self.training and not i: self.network.add_sample(y=tr[-1][-1], dy=radius, result=radius) elif self.training: self.network.add_sample(y=tr[-1][-1], dy=radius, result=expected_res) if self.training is None and self.network: # If neural network is ready use it sum_r += self.network.result(y=tr[-1][-1], dy=radius) else: sum_r += radius if self.training and not i: # Result used to teaching neural network expected_res = sum_r / len(track) self.tracks[i] = new_tracks self.tracks_count += len(new_tracks) if new_tracks: sum += (self.app_params['speed_multi'] * sum_r / len(new_tracks)) return sum / len(self.tracks) def _measure_speed(self, sum_r): if self.tracks: # measure speed based on frame duration and pixels movement self.last_speeds.append(3.6 * sum_r / self.frame_duration) # speed is calculated as arithmetic average of last speeds self.speed = sum(self.last_speeds)/len(self.last_speeds) # distance self.distance += np.abs(sum_r) def _get_new_tracks(self, frame_gray): if self.training: # If there is active training features capture zone is divided into # small pieces where one on bottom is treated as teacher for the # rest self.tracks = deque( [deque([], maxlen=self.app_params['tracks_number'])] * self.samples) masks = deque() counter = self.samples while counter: # Get sub capture zones step = (self.stop_y - self.start_y) // self.samples stop_y = self.start_y + counter * step start_y = self.start_y + (counter - 1) * step self.t_points = (start_y, stop_y) masks.append(np.zeros_like(frame_gray)) masks[-1][start_y:stop_y, self.start_x:self.stop_x] = 1 counter -= 1 else: self.tracks = deque( [deque([], maxlen=self.app_params['tracks_number'])]) masks = deque([np.zeros_like(frame_gray)]) masks[0][self.start_y:self.stop_y, self.start_x:self.stop_x] = 1 for i, mask in enumerate(masks): # Don't get same tracks for x, y in [np.int32(tr[-1]) for tr in self.tracks[i]]: cv2.circle(mask, (x, y), 5, 0) # Get new features features = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **self.feature_params) if features is not None: # If new features detected add them to the rest for x, y in features.reshape(-1, 2): self.tracks[i].append(deque([(x, y)], maxlen=self.track_len)) def _show_and_save(self, vis): # Output - up left corner draw_str(vis, (self.spaces, self.spaces), 'frame number: %d' % (self.frame_idx + self.start)) draw_str(vis, (self.spaces, self.spaces * 2), 'track count: %d' % self.tracks_count) draw_str(vis, (self.spaces, self.spaces * 3), 'speed: %.2f km/h' % np.abs(self.speed)) draw_str(vis, (self.spaces, self.spaces * 4), 'speed without dump: %.2f km/h' % np.abs(self.last_speeds[-1])) draw_str(vis, (self.spaces, self.spaces * 5), 'average speed: %.2f km/h' % (3.6 * self.distance / (self.frame_idx * self.frame_duration))) draw_str(vis, (self.spaces, self.spaces * 6), 'traveled distance: %.2f m' % self.distance) # Neural network training button - right bottom corner # Reset distance button - right bottom corner, above training draw_str(vis, (self.frame_width - self.spaces, self.frame_height - self.spaces), 'train', 'r') draw_str(vis, (self.frame_width - self.spaces, self.frame_height - self.spaces * 2), 'reset distance', 'r') # Program params - left bottom corner draw_str(vis, (self.spaces, self.frame_height - self.spaces), '+ - speed multi = %s' % (self.app_params['speed_multi'] / self.multiplier)) for i, (key, var) in enumerate(self.lk_params.items() + self.feature_params.items(), self.interface['left']['index_mod']): draw_str(vis, (self.spaces, self.frame_height - i * self.spaces), '+ - %s = %s' % (key, var)) # tracks if self.training is None: for track in self.tracks: for tr in track: cv2.circle(vis, (tr[0][0], tr[0][-1]), 2, (255, 0, 0)) cv2.circle(vis, (tr[-1][0], tr[-1][-1]), 2, (0, 0, 255)) cv2.polylines(vis, [np.int32(tr)], False, (0, 255, 0)) # rectangle where we tracking cv2.rectangle(vis, (self.start_x, self.start_y), (self.stop_x, self.stop_y), (255, 0, 0)) # Training progress indicator if self.training: percentage = 10 * float( self.training_frames - self.training) / self.training_frames draw_str(vis, (self.middle_x - self.spaces, self.spaces), 'Getting samples...', 'm') draw_str(vis, (self.middle_x - self.spaces, self.spaces * 2), '%s %d%%' % ('#' * int(percentage), percentage * 10), 'm') elif self.training is not None: draw_str(vis, (self.middle_x - self.spaces, self.spaces), 'Training...', 'm') cv2.imshow(self.window_name, vis) if self.out is not None: self.out.write(vis) def _clean_up(self): if self.out is not None: self.out.release() self.cam.release() cv2.destroyAllWindows()
from neural_network import NeuralNetwork from input_layer import InputLayer from output_layer import OutputLayer from hidden_layer import HiddenLayer # test train xor function model = NeuralNetwork(InputLayer(2), HiddenLayer(2, "sigmoid"), OutputLayer(1, "sigmoid")) train_input = [[1, 1], [1, 0], [0, 1], [0, 0]] train_output = [0, 1, 1, 0] model.train(train_input, train_output, 1, 0.1, 20) model.predict([1, 1]) model.predict([1, 0]) model.predict([0, 1]) model.predict([0, 0])
def performAction(self,action): actionSpecs = open("knowledgeBase/"+action.replace(" ","_"),"r") inputLayerUnits = actionSpecs.readline() hiddenLayerUnits = actionSpecs.readline() outputLayerUnits = actionSpecs.readline() actionSpecs.close() net = NeuralNetwork(action) print "making neural network..." net.makeNetwork(int(inputLayerUnits),int(hiddenLayerUnits),int(outputLayerUnits)) print "generating input activations..." net.genInputActivations() print "loading weights..." net.loadWeights("weights/"+action.replace(" ","_")) if raw_input("Continue training? (y/n): ") == "y": try: print "training..." net.train() except KeyboardInterrupt: net.saveWeights() print "weights saved" except RuntimeError: print "Training failed." net.saveWeights() print "weights saved"
] classifiers = [ KNeighborsClassifier(3), SVC(kernel="linear", C=0.025), SVC(gamma=2, C=1), GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True), DecisionTreeClassifier(max_depth=5), RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1), MLPClassifier(alpha=1), AdaBoostClassifier(), GaussianNB(), QuadraticDiscriminantAnalysis(), NeuralNetwork([ NeuronLayer(10, 2, 'leaky_relu', True), NeuronLayer(1, 10, 'sigmoid', True) ], learning_rate=0.1, step_decay_factor=0.99) ] X, y = make_classification(n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [ make_moons(noise=0.3, random_state=0),
def main(): while True: data_set_name = input("Please provide the name of the data set you want to work with: ") # Load, Randomize, Normalize, Discretize Dataset data_set = Dataset() data_set.read_file_into_dataset("C:\\Users\\Grant\\Documents\\School\\Winter 2016\\CS 450\\Neural\\" + data_set_name) data_set.randomize() data_set.data = normalize(data_set.data) #data_set.discretize() #print(data_set.data) data_set.set_missing_data() # Split Dataset split_percentage = 0.7 data_sets = data_set.split_dataset(split_percentage) training_set = data_sets[0] testing_set = data_sets[1] # Create Custom Classifier, Train Dataset, Predict Target From Testing Set iterations = int(input("How many iterations do you want to do? ")) layers = int(input("How many layers do you want in your neural network? ")) num_nodes = [] for i in range(layers): if i + 1 == layers: number = int(input("How many nodes on the output layer? ")) else: number = int(input("How many nodes on the " + str(i) + " layer? ")) num_nodes.append(number) neuralNetwork = NeuralNetwork(iterations) neuralNetwork.create_layered_network(num_nodes, training_set.feature_names.__len__()) #neuralNetwork.display_network() neuralNetwork.train(training_set) predictions = neuralNetwork.predict(testing_set) # Check Results my_accuracy = get_accuracy(predictions, testing_set.target) print("Accuracy: " + str(my_accuracy) + "%") # Compare To Existing Implementations layers_objs = [] for i in range(layers): if i + 1 == layers: layers_objs.append(Layer("Softmax", units=num_nodes[i])) else: layers_objs.append(Layer("Sigmoid", units=num_nodes[i])) mlp_nn = Classifier(layers=layers_objs, learning_rate=0.4, n_iter=iterations) mlp_nn.fit(np.array(training_set.data), np.array(training_set.target)) predictions = mlp_nn.predict(np.array(testing_set.data)) mlp_nn_accuracy = get_accuracy(predictions, testing_set.target) print("NN Accuracy: " + str(mlp_nn_accuracy) + "%") create_csv_file(neuralNetwork.accuracies, "C:\\Users\\Grant\\Documents\\School\\Winter 2016\\CS 450\\Neural\\" + data_set_name + ".csv") # Do another or not toContinue = False while True: another = input("Do you want to examine another dataset? (y / n) ") if another != 'y' and another != 'n': print("Please provide you answer in a 'y' or 'n' format.") elif another == 'y': toContinue = True break else: toContinue = False break if not toContinue: break
path_ts = DIR_DATA + TS_FILE dim_in = 6 one_hot = 17 dim_out = 1 parser = Monks_parser(path_tr, path_ts) X_train, Y_train, X_test, Y_test = parser.parse(dim_in, dim_out, one_hot) Y_train = change_output_value(Y_train, 0, -1) Y_test = change_output_value(Y_test, 0, -1) dim_in = one_hot dim_hid = 2 model = NeuralNetwork('mse', 'accuracy1-1') model.add_layer(dim_hid, input_dim=dim_in, activation='sigmoid', kernel_initialization=RandomUniformInitialization()) model.add_layer(dim_out, activation='tanh', kernel_initialization=RandomUniformInitialization(-1, 1)) model.compile(optimizer=SGD(lr=0.5, mom=0.8)) history = model.fit(X_train, Y_train, 120, X_train.shape[0], ts=(X_test, Y_test),
import numpy as np from sklearn.datasets import load_digits from sklearn.metrics import confusion_matrix, classification_report from sklearn.preprocessing import LabelBinarizer from neural_network import NeuralNetwork from sklearn.cross_validation import train_test_split digits = load_digits() X = digits.data y = digits.target X -= X.min() X /= X.max() nn = NeuralNetwork([64, 10, 10], 'logistic') X_train, X_test, y_train, y_test = train_test_split(X, y) labels_train = LabelBinarizer().fit_transform(y_train) labels_test = LabelBinarizer().fit_transform(y_test) print "start fitting" nn.fit(X_train, labels_train, epochs=3000) predictions = [] for i in xrange(X_test.shape[0]): o = nn.predict(X_test[i]) #print 'predict_value',np.argmax(o) #print 'true_value',y_test[i];break predictions.append(np.argmax(o)) print confusion_matrix(y_test, predictions) print classification_report(y_test, predictions) import pylab as pl #pl.matshow(digits.images[0])
# investigate convergence n_hidden_layers = 5 n_hidden_nodes = 15 penalties = [0, 0.01, 0.1, 1] n_minibatches = 5 n_epochs = int(2e2) a1 = 2.0e-3 a2 = 1.2e0 std_W = 0.1 const_b = 0 for penalty in penalties: NN = NeuralNetwork(n_hidden_layers, n_hidden_nodes, penalty, activation="tanh") NN.set_learning_params(a1, a2) NN.fit(X_train, Z_train, n_minibatches, n_epochs, std_W=std_W, const_b=const_b, track_cost=[X_test, Z_test]) Z_pred = NN.predict(X_test) print(f"Neural Network with penalty lambda = {penalty}") print(" MSE score =", MSE(Z_test, Z_pred)) print(" R2 score =", R2(Z_test, Z_pred))