def analyzeSymbol(): trainingData = getTrainingData() network = NeuralNetwork(inputNodes = 3, hiddenNodes = 5, outputNodes = 1) model = network.train(trainingData) # get rolling data for most recent day predictionData = getPredictionData(0) returnPrice = network.test(predictionData) predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2]) returnData = {} returnData[0] = predictedStockPrice return (predictedStockPrice)
def test_neuralNetwork_adam(): from sklearn.neural_network._stochastic_optimizers import AdamOptimizer np.random.seed(2019) X = np.random.normal(size=(1, 500)) target = 3.9285985 * X nn = NeuralNetwork(inputs=1, neurons=3, outputs=1, activations='sigmoid', silent=True) nn.addLayer() nn.addLayer() nn.addOutputLayer(activations='identity') learning_rate = 0.001 yhat = nn.forward_pass(X) nn.backpropagation(yhat.T, target.T) nn.learning_rate = learning_rate nn.initializeAdam() nn.adam() skl_adam = AdamOptimizer(params=nn.param, learning_rate_init=learning_rate) upd = skl_adam._get_updates(nn.grad) for update_nn, update_skl in zip(nn.change, upd): assert update_nn == pytest.approx(update_skl)
def init(): # Leitura do data set print('Iniciando leitura base de dados...') data = leitura_csv('data/XOR_Training.csv') training_data = get_training_data(data) test_data = get_test_data(data) # Fase de Treinamento print('Iniciando Fase de Treinamento...') neuralNetwork = NeuralNetwork(5, len(training_data[0][0]), len(training_data[0][1])) for i in range(10000): training_inputs, training_outputs = random.choice(training_data) neuralNetwork.training(training_inputs, training_outputs) print('Treinamento concluido') #Fase de Teste print('Iniciando fase de teste:') error_sum = 0 for i in range(len(test_data)): test_inputs, test_outputs = test_data[i][:] calculated_output = neuralNetwork.feed_forward(test_inputs) if not is_valid_output(test_outputs, calculated_output): error_sum += 1 print('Total de itens na base de teste: ', len(test_data)) print('Total de acertos: ', len(test_data) - error_sum) print('Erros na fase de teste: ', error_sum)
def analyzeSymbol(stockSymbol): startTime = time.time() trainingData = getTrainingData(stockSymbol) network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1) network.train(trainingData) # get rolling data for most recent day predictionData = getPredictionData(stockSymbol) # get prediction returnPrice = network.test(predictionData) # de-normalize and return predicted stock price predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2]) # create return object, including the amount of time used to predict returnData = {} returnData['price'] = predictedStockPrice returnData['time'] = time.time() - startTime return returnData
def mutation(self): mutatedNN = NeuralNetwork() for i in range(0, self.nNotChanged): mutatedNN = self.populationArray[i].nn for j in range(0, self.numberOfWeightsToMutate_1): _j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer1,0) _i = self.getRandomNumberInteger(mutatedNN.nInputLayer,0) _mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale) mutatedNN.W1[_i][_j] = mutatedNN.W1[_i][_j] + _mutationValue mutatedNN.b1[0][_j] = mutatedNN.b1[0][_j] + _mutationValue for j in range(0, self.numberOfWeightsToMutate_2): _j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer2,0) _i = self.getRandomNumberInteger(mutatedNN.nHiddenLayer1,0) _mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale) mutatedNN.W2[_i][_j] = mutatedNN.W2[_i][_j] + _mutationValue mutatedNN.b2[0][_j] = mutatedNN.b2[0][_j] + _mutationValue for j in range(0, self.numberOfWeightsToMutate_3): _j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer3,0) _i = self.getRandomNumberInteger(mutatedNN.nHiddenLayer2,0) _mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale) mutatedNN.W3[_i][_j] = mutatedNN.W3[_i][_j] + _mutationValue mutatedNN.b3[0][_j] = mutatedNN.b3[0][_j] + _mutationValue index = int(self.nPopulation - (i+1)) #print("INDEX MUTATI:", index,self.nPopulation) self.populationArray[index].nn = mutatedNN
def analyzeSymbol(stockSymbol): startTime = time.time() flag = 0 trainingData = getTrainingData(stockSymbol) network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1) network.train(trainingData) # get rolling data for most recent day network.train(trainingData) for i in range(0, 5): # get rolling data for most recent day predictionData = getPredictionData(stockSymbol, flag) returnPrice = network.test(predictionData) # de-normalize and return predicted stock price predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2]) print predictedStockPrice flag += 1 global new_value new_value = predictedStockPrice return predictedStockPrice
def test_neuralNetwork_sgd(): from sklearn.neural_network._stochastic_optimizers import SGDOptimizer np.random.seed(2019) X = np.random.normal(size=(1, 500)) target = 3.9285985 * X nn = NeuralNetwork(inputs=1, neurons=3, outputs=1, activations='sigmoid', silent=True) nn.addLayer() nn.addLayer() nn.addOutputLayer(activations='identity') learning_rate = 0.001 yhat = nn.forward_pass(X) nn.backpropagation(yhat.T, target.T) nn.learning_rate = learning_rate initial_params = copy.deepcopy(nn.weights + nn.biases) nn.sgd() grad = nn.d_weights + nn.d_biases params = nn.weights + nn.biases change = [p - i_p for p, i_p in zip(params, initial_params)] skl_sgd = SGDOptimizer(params=initial_params, learning_rate_init=learning_rate, nesterov=False, momentum=1.0) upd = skl_sgd._get_updates(grad) for update_nn, update_skl in zip(change, upd): assert update_nn == pytest.approx(update_skl)
def crossDNA(self, parent1, parent2): child = NeuralNetwork() # ----------------------------------------------------------------------- # # W1 crossoverPoint = self.getRandomNumberInteger(parent1.nHiddenLayer1,0) for i in range(0, parent1.nInputLayer): for j in range(0, crossoverPoint): child.W1[i][j] = parent1.W1[i][j] child.b1[0][j] = parent1.b1[0][j] for i in range(0, parent1.nInputLayer): for j in range(crossoverPoint, parent1.nHiddenLayer1): child.W1[i][j] = parent2.W1[i][j] child.b1[0][j] = parent2.b1[0][j] # ----------------------------------------------------------------------- # # W2 crossoverPoint = self.getRandomNumberInteger(parent1.nHiddenLayer2,0) for i in range(0, parent1.nHiddenLayer1): for j in range(0, crossoverPoint): child.W2[i][j] = parent1.W2[i][j] child.b2[0][j] = parent1.b2[0][j] for i in range(0, parent1.nHiddenLayer1): for j in range(crossoverPoint, parent1.nHiddenLayer2): child.W2[i][j] = parent2.W2[i][j] child.b2[0][j] = parent2.b2[0][j] # ----------------------------------------------------------------------- # # W3 crossoverPoint = self.getRandomNumberInteger(parent1.nHiddenLayer3,0) for i in range(0, parent1.nHiddenLayer2): for j in range(0, crossoverPoint): child.W3[i][j] = parent1.W3[i][j] child.b3[0][j] = parent1.b3[0][j] for i in range(0, parent1.nHiddenLayer2): for j in range(crossoverPoint, parent1.nHiddenLayer3): child.W3[i][j] = parent2.W3[i][j] child.b3[0][j] = parent2.b3[0][j] # ----------------------------------------------------------------------- # # WOut crossoverPoint = self.getRandomNumberInteger(parent1.nOutputLayer,0) for i in range(0, parent1.nHiddenLayer3): for j in range(0, crossoverPoint): child.WOut[i][j] = parent1.WOut[i][j] child.bOut[0][j] = parent1.bOut[0][j] for i in range(0, parent1.nHiddenLayer3): for j in range(crossoverPoint, parent1.nOutputLayer): child.WOut[i][j] = parent2.WOut[i][j] child.bOut[0][j] = parent2.bOut[0][j] return child
def test_predict(self, ): target = NeuralNetwork() target.addLayer(10, 10, lambda x: x**2) prediction = target.predict( tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=tf.dtypes.float32), [tf.zeros([10, 10])], [tf.zeros([10])]) tf.debugging.assert_equal(prediction, tf.zeros(10))
def main(): splitRatio = 0.8 dataset = df.loadCsv(r"dataset.csv") groupedDataset = df.groupDatasetByQuality(dataset) workingDataset = selectionAttributes(groupedDataset) workingDataset = normalizeData(workingDataset) trainingSet, testSet = df.splitDataset(workingDataset, splitRatio) naiveBayesCall(trainingSet, testSet) NeuralNetwork(trainingSet, testSet)
def load(self, path): if not isinstance(path, Path): path = Path(path) with open(path / "EvolutionStrategyProgram.txt") as json_file: data = json.load(json_file) self.generation = data["generation"] self.neuralNetworks = [ NeuralNetwork(path=path / f"NeuralNetwork{number}.txt") for number in data["networks"] ]
def getAccuracy(var, begin, name, step, loop, path="./results/"): value = begin for i in range(loop): print(i) # create network if (var == 'hid'): nn = NeuralNetwork(hid=value, act=ACT) elif (var == 'lay'): nn = NeuralNetwork(lay=value, act=ACT) elif (var == 'lr'): nn = NeuralNetwork(lr=value, act=ACT) elif (var == 'epoch'): nn = NeuralNetwork(act=ACT) # train network if (var == 'epoch'): nn.train(ds.train_data, ds.train_labels_arr, value, DATA) else: nn.train(ds.train_data, ds.train_labels_arr, EPOCH, DATA) # counters correct = 0 false = 0 for i in range(TEST): output = nn.feedforward(ds.test_data[i], isRound=False, isSoftmax=True) output_digit = np.where(output == np.amax(output)) if output_digit[0][0] == ds.test_labels[i]: correct += 1 else: false += 1 # calc accuracy accuracy = (correct * 100) / (correct + false) # log accuracy logResults(path, name, value, accuracy) # increment value value += step
def __init__(self): #Set Bird self.birdWidth = 34 self.birdHeight = 24 self.birdX = 50 #Initialization Variables for Jump (Standard Jump) '''self.birdY = 200 self.gravityAcceleration = 0 #self.isJumping = 0 self.jumpHeight = 20 self.vy=0''' #Initialization Variables for Jump (Parabolic Jump) self.birdY = 200 self.gravityAcceleration = 1 self.jumpHeight = -8 self.vy = 0 #Initialization Variables for Life self.dead = 0 #Initialization Variables for the neural Network self.birdPositionX = 0 self.birdPositionY = 0 self.normPosX = 0 self.normPosY = 0 self.distanceTraveled = 0 #Initialization Variables for Score self.score = 0 self.flagUpdateScore = 0 #Initialization for Features self.iD = 0 #Initialization number of jump made self.jumpMade = 0 #Initialization NN self.nn = NeuralNetwork() #Initialization GUI Bird self.bird = pygame.Rect(self.birdX, self.birdY, self.birdWidth, self.birdHeight) #Set GUI Bird self.birdImgs = [ pygame.image.load("img/blueBirdFlap0.png").convert_alpha() ]
def mutation(self): for i in range(0, nNotChanged): mutatedNN = NeuralNetwork() mutatedNN = populationArray[i].nn for j in range(0, numberOfWeightsToMutate): _j = self.getRandomNumberInteger(mutatedNN.nNeuronsLayer1, 0) _i = self.getRandomNumberInteger(mutatedNN.nInputs, 0) _mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale) #_mutationValue = 0 mutatedNN.W1[_i][_j] = mutatedNN.W1[_i][_j] + _mutationValue index = int(nPopulation - (i + 1)) #print("INDEX:", index,nPopulation) populationArray[index].nn = mutatedNN
def test_neuralNetwork_network(silent=False): # Lets set up a sci-kit learn neural network and copy over the weights # and biases to our network, verify that the two give the exact same # result. from sklearn.neural_network import MLPRegressor X = [[0.0], [1.0], [2.0], [3.0], [4.0], [5.0]] y = [0, 2, 4, 6, 8, 10] mlp = MLPRegressor(solver='sgd', alpha=0.0, hidden_layer_sizes=(3, 3), random_state=1, activation='relu') mlp.fit(X, y) W_skl = mlp.coefs_ b_skl = mlp.intercepts_ nn = NeuralNetwork(inputs=1, outputs=1, layers=3, neurons=3, activations='relu', silent=silent) nn.addLayer() nn.addLayer() nn.addOutputLayer(activations='identity') W_nn = nn.weights b_nn = nn.biases for i in range(len(W_nn)): W_nn[i] = W_skl[i] for i in range(len(b_nn)): b_nn[i] = np.expand_dims(b_skl[i], axis=1) X_test = np.array([[1.2857], [9.2508255], [-5.25255], [3.251095]]) output_skl = mlp.predict(X_test) output_nn = np.squeeze(nn(X_test.T)) if not silent: print("%20.15f %20.15f %20.15f %20.15f" % (*output_skl, )) print("%20.15f %20.15f %20.15f %20.15f" % (*output_nn, )) assert output_nn == pytest.approx(output_skl) return nn, mlp
def __init__(self, canvas, colour, y): self.brain = NeuralNetwork(4, 4, 1) self.move = 0 self.x = 70 self.y = y self.isDead = False self.canvas = canvas self.colour = colour self.prevLoc = None self.radius = 10 self.surface = pygame.Surface((self.radius * 2, self.radius * 2)) self.surface.fill(BLUE) self.prevLoc = (self.x - self.radius, self.y - self.radius) self.fitness = 0 # self.score = 0 self.isChamp = False self.distance = 0
def test_neuralNetwork_init(): # Ensure the sizing is correctly handled when creating a new instance # of the network class. inputs = 6 outputs = 4 layers = 3 neurons = 87 nn = NeuralNetwork(inputs=inputs, outputs=outputs, layers=layers, neurons=neurons) assert nn.inputs == inputs assert nn.outputs == outputs assert nn.layers == layers assert nn.neurons == neurons
def main(): nn = NeuralNetwork(config.numLayers, config.numClasses, config.weightInitialisation, config.activationFn, config.weightDecay) nn.initialiseParams(len(x_train[0])*len(x_train[0]), config.numNeurons) sample = np.random.randint(3*len(x_train)/4) nn.forwardPropagate(x_train[sample]) nn.momentumGradDesc(x_train, y_train, config.maxIterations, config.learningRate, config.batchSize, config.gamma) predictions = [] predProbs = [] test_acc = 0 test_entropy = 0 test_mse = 0 for i in range(len(x_test)): nn.forwardPropagate(x_test[i]) predictions.append(nn.predictedClass) predProbs.append(nn.output[nn.predictedClass]) test_acc = accuracy(y_test,predictions) test_entropy = crossEntropyLoss(y_test,predProbs) test_mse = MSEloss(y_test,predictions) confusion_matrix = np.zeros((config.numClasses, config.numClasses)) for i in range(len(y_test)): confusion_matrix[predictions[i]][y_test[i]] += 1 df_cm = pd.DataFrame(confusion_matrix, index = [i for i in "0123456789"], columns = [i for i in "0123456789"]) plt.figure(figsize = (10,10)) sn.heatmap(df_cm, annot=True) plt.title("Confusion Matrix") plt.xlabel("y_test") plt.ylabel("y_pred") wandb.log({"plot":wandb.Image(plt)}) plt.show() # #Log in wandb metrics = { 'test_acc': test_acc, # 'test_entropy': test_entropy, "test_mse": test_mse, # "confusion_matrix": confusion_matrix, } wandb.log(metrics) run.finish()
def crossDNA(self, parent1, parent2, crossoverPoint): child = NeuralNetwork() for i in range(0, parent1.nInputs): for j in range(0, crossoverPoint): child.W1[i][j] = parent1.W1[i][j] for i in range(0, parent1.nInputs): for j in range(0, crossoverPoint): child.W1[i][j] = parent2.W1[i][j] #NB: 0 here is because W2 [_,_,_] for i in range(0, crossoverPoint): child.W2[i][0] = parent1.W2[i][0] for i in range(crossoverPoint, parent2.nNeuronsLayer1): child.W2[i][0] = parent2.W2[i][0] return child
def test_neuralNetwork_set(): inputs = 6 outputs = 4 layers = 3 neurons = 87 nn = NeuralNetwork(inputs=inputs, outputs=outputs, layers=layers, neurons=neurons) new_inputs = 35 new_outputs = 23 new_layers = 3 new_neurons = 10 # Only the inputs should change nn.set(inputs=new_inputs) assert nn.inputs == new_inputs assert nn.outputs == outputs assert nn.layers == layers assert nn.neurons == neurons # Only the inputs and the outputs should have changed nn.set(outputs=new_outputs) assert nn.inputs == new_inputs assert nn.outputs == new_outputs assert nn.layers == layers assert nn.neurons == neurons # Inputs, outputs, and the number of layers should have changed nn.set(layers=new_layers) assert nn.inputs == new_inputs assert nn.outputs == new_outputs assert nn.layers == new_layers assert nn.neurons == neurons # All the values should be new at this point nn.set(neurons=new_neurons) assert nn.inputs == new_inputs assert nn.outputs == new_outputs assert nn.layers == new_layers assert nn.neurons == new_neurons
def predict(self, data): nn = NeuralNetwork() l1 = Layer(56, 54) l2 = Layer(54, 25) nn.add(l1) nn.add(ActivationLayer(relu, relu_derivative)) nn.add(l2) nn.add(ActivationLayer(sigmoid, sigmoid_derivative)) l1.weights = np.load('weights1.npy') l2.weights = np.load('weights2.npy') l1.bias = np.load('bias1.npy') l2.bias = np.load('bias2.npy') out = nn.predict(data) pred = np.argmax(out) return pred
def __init__(self, x, y): self.neuralNet = NeuralNetwork(7, 2, 1, 5) self.neuralNet.create() self.fitness = 0 self.frontWidth = 20 self.sideWidth = 40 self.position = (x, y) self.direction = 0 self.edgesPoints = [[self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2], [self.position[0] - self.sideWidth//2, self.position[1] + self.frontWidth//2], [self.position[0] + self.sideWidth//2, self.position[1] + self.frontWidth//2], [self.position[0] + self.sideWidth//2, self.position[1] - self.frontWidth//2], [self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2]] self.edgesPointsAprox = self.edgesPoints self.speed = 10 self.isAlive = True self.rayPoints = [[], [], [], [], [], [], []] self.inputs = [0, 0, 0, 0, 0, 0, 0] self.lastsCookies = [] self.cookie = 0
def test_addLayer(self): activationFunction1 = lambda x: x activationFunction2 = lambda x: x**2 target = NeuralNetwork() target.addLayer(10, 20, activationFunction1) self.assertEqual(len(target._layers), 1) self.assertEqual(len(target._layers[0]), 10) tf.debugging.assert_equal(target._layers[0][0]._weights, tf.zeros(20)) tf.debugging.assert_equal(target._layers[0][0]._bias, tf.Variable(0, dtype=tf.dtypes.float32)) self.assertEqual(target._layers[0][0]._activationFunction, activationFunction1) target.addLayer(5, 15, activationFunction2) self.assertEqual(len(target._layers), 2) self.assertEqual(len(target._layers[1]), 5) tf.debugging.assert_equal(target._layers[1][0]._weights, tf.zeros(15)) tf.debugging.assert_equal(target._layers[1][0]._bias, tf.Variable(0, dtype=tf.dtypes.float32)) self.assertEqual(target._layers[1][0]._activationFunction, activationFunction2)
def __init__(self, windowWidth, windowHeight, brain=False): self.width = windowWidth self.height = windowHeight self.pos = Vector2(windowWidth/2, windowHeight/2) self.vel = Vector2() self.acc = 0 self.maxVel = 4 self.thrust = 0.1 self.dir = -90 self.dirDelta = -90 self.turnSpeed = 4 self.damp = 0.01 self.size = 14 self.bullets = [] self.shootDelta = 0 self.brain = brain or NeuralNetwork(45, 60, 4)
def generateOffsprings(self): # The sigma (and the variation) array contains, for each network, the self-adaptive parameters for both weights and biases. # with the latter being the last column of the matrix. for network, index in zip(self.neuralNetworks[:NETWORKS_NUMBER // 2], range(NETWORKS_NUMBER // 2, NETWORKS_NUMBER)): variatedSigmas = network.mutateSigmas() * [ np.random.randn(FIRST_LAYER_LENGTH, INPUT_LAYER_LENGTH + 1), np.random.randn(SECOND_LAYER_LENGTH, FIRST_LAYER_LENGTH + 1), np.random.randn(OUTPUT_LAYER_LENGTH, SECOND_LAYER_LENGTH + 1) ] variatedWeights = variatedSigmas[0][:, :-1], variatedSigmas[ 1][:, :-1], variatedSigmas[2][:, :-1] variatedBiases = np.array(variatedSigmas[0][:,-1])[:,np.newaxis],\ np.array(variatedSigmas[1][:,-1])[:,np.newaxis],\ np.array(variatedSigmas[2][:,-1])[:,np.newaxis] self.neuralNetworks[index] = NeuralNetwork( weights=network.weights + variatedWeights, biases=network.biases + variatedBiases, number=self.generation * (NETWORKS_NUMBER // 2) + index + 1, parent=network.number, sigmas=variatedSigmas) self.generation += 1
def test_neuralNetwork_fit_sgd(): np.random.seed(2019) X = np.random.normal(size=(1, 500)) target = 3.9285985 * X nn = NeuralNetwork(inputs=1, neurons=3, outputs=1, activations='sigmoid', silent=True) nn.addLayer() nn.addLayer() nn.addOutputLayer(activations='identity') nn.fit(X, target, shuffle=True, batch_size=100, validation_fraction=0.2, learning_rate=0.05, verbose=False, silent=True, epochs=100) loss_after_100 = nn.loss nn.fit(X, target, shuffle=True, batch_size=100, validation_fraction=0.2, learning_rate=0.05, verbose=False, silent=True, epochs=100) loss_after_200 = nn.loss assert loss_after_200 < loss_after_100
def test_neuralNetwork_fit_adam(): np.random.seed(2019) X = np.random.normal(size=(1, 500)) target = 3.9285985 * X nn = NeuralNetwork(inputs=1, neurons=3, outputs=1, activations='tanh', silent=True) nn.addLayer() nn.addLayer() nn.addOutputLayer(activations='identity') nn.fit(X, target, shuffle=True, batch_size=100, validation_fraction=0.2, learning_rate=0.05, verbose=True, silent=False, epochs=100, optimizer='adam') loss = nn.loss nn.fit(X, target, shuffle=True, batch_size=100, validation_fraction=0.2, learning_rate=0.05, verbose=True, silent=False, epochs=100, optimizer='adam') assert loss > nn.loss
def __init__(self, path): # If no data is loaded, initializes the data from generation 0. if path is None or not os.path.isdir(path): self.generation = 0 # The networks are initialized with tanh acivation function. # Space is also being allocated for the offsprings. self.neuralNetworks = [ NeuralNetwork(number=number + 1, sigmas=np.array([ np.full((FIRST_LAYER_LENGTH, INPUT_LAYER_LENGTH + 1), 0.05), np.full((SECOND_LAYER_LENGTH, FIRST_LAYER_LENGTH + 1), 0.05), np.full((OUTPUT_LAYER_LENGTH, SECOND_LAYER_LENGTH + 1), 0.05) ])) for number in range( NETWORKS_NUMBER // 2 * self.generation, NETWORKS_NUMBER // 2 * (self.generation + 1)) ] + [None] * (NETWORKS_NUMBER // 2) else: # Loads data self.load(path)
return 1 / (1 + math.exp(-sum)) def sigmoid_derivative(sum): sig = sigmoid(sum) return sig * (1 - sig) def tanh_derivative(sum): return 1 - math.tanh(math.tanh(sum)) np.set_printoptions(precision=4) # 4.3 A. NOR Gate nn = NeuralNetwork( 3, [], [NeuronInfo(partial(threshold, 0), weights=[-1, -1, -1, 0])]) print('NOR Gate:\n{}\n'.format(nn)) print('[0, 0, 0] -> {}'.format(nn.activate([0, 0, 0]))) print('[0, 0, 1] -> {}'.format(nn.activate([0, 0, 1]))) print('[0, 1, 0] -> {}'.format(nn.activate([0, 1, 0]))) print('[0, 1, 1] -> {}'.format(nn.activate([0, 1, 1]))) print('[1, 0, 0] -> {}'.format(nn.activate([1, 0, 0]))) print('[1, 0, 1] -> {}'.format(nn.activate([1, 0, 1]))) print('[1, 1, 0] -> {}'.format(nn.activate([1, 1, 0]))) print('[1, 1, 1] -> {}'.format(nn.activate([1, 1, 1]))) print() # 4.3 A. Adder nn = NeuralNetwork( 2,
def __init__(self): # ---------------------------------------------------------------- # # PARAMETERS: # Data Parameters: batch_size = 250 # Learning Parameters: LEARNING_RATE = 0.001 LEARNING_THRESHOLD = 0.0001 N_SCREEN_ITERATIONS = 1 # Network Parameters: nInputs = 2 nNeuronsLayer1 = 50 nOutputs = 1 # Network Construction: x_ = tf.placeholder(tf.float32, shape=[None, nInputs]) y_ = tf.placeholder(tf.float32, shape=[None, nOutputs]) W1 = tf.Variable( tf.truncated_normal([nInputs, nNeuronsLayer1], stddev=0.1)) W2 = tf.Variable( tf.truncated_normal([nNeuronsLayer1, nOutputs], stddev=0.1)) b1 = tf.Variable(tf.truncated_normal([nNeuronsLayer1], stddev=0.1)) b2 = tf.Variable(tf.truncated_normal([nOutputs], stddev=0.1)) y1 = tf.sigmoid(tf.matmul(x_, W1) + b1) y2 = tf.sigmoid(tf.matmul(y1, W2) + b2) # ---------------------------------------------------------------- # # Define loss, optimizer, accuracy: #cost = tf.reduce_mean(tf.nn.l2_loss(y_ - y2)) cost = tf.reduce_mean(tf.square(y_ - y2)) train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost) #accuracy = tf.reduce_mean(tf.equal((tf.rint(y2), tf.int32), (tf.rint(y_), DType = tf.int32)), tf.float32) #accuracy = tf.reduce_mean() correct = tf.equal(tf.rint(y2), y_) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) # ---------------------------------------------------------------- # # Session Initialization: sess = tf.InteractiveSession() tf.global_variables_initializer().run() # ---------------------------------------------------------------- # # Prepare Data: datasetFeatures, datasetTargets = self.getTrainingSet() batchesLeft = int(len(datasetTargets) / batch_size) # ---------------------------------------------------------------- # # Train: i = 0 for k in range(1000000): if batchesLeft > 0: if i % batch_size == 0: batch_x, batch_y, batchesLeft = self.getNextBatch( batch_size, i, batchesLeft, datasetFeatures, datasetTargets) sess.run((train_step), feed_dict={ x_: batch_x, y_: batch_y }) else: batchesLeft = int(len(datasetTargets) / batch_size) i = -1 # Test: if k % 10000 == 0: out_batch, acc = sess.run((y2, accuracy), feed_dict={ x_: batch_x, y_: batch_y }) inx_ = 0 #print(batch_x[inx_][0], " + ", batch_x[inx_][1], " = ", out_batch[inx_][0], "|", batch_y[inx_], "cost:",cost_) print("Network: ", out_batch[inx_][0], "Target: ", batch_y[inx_][0], "|", "acc:", acc * 100, "%") i += 1 k += 1 # ---------------------------------------------------------------- # # Return Trained Neural Network self.nnTrained = NeuralNetwork() self.nnTrained.W1 = sess.run(W1) self.nnTrained.W2 = sess.run(W2) self.nnTrained.b1 = sess.run(b1) self.nnTrained.b2 = sess.run(b2) self.returnTrainedNN()