Exemplo n.º 1
0
def init():
    # Leitura do data set
    print('Iniciando leitura base de dados...')
    data = leitura_csv('data/XOR_Training.csv')
    training_data = get_training_data(data)
    test_data = get_test_data(data)

    # Fase de Treinamento
    print('Iniciando Fase de Treinamento...')
    neuralNetwork = NeuralNetwork(5, len(training_data[0][0]), len(training_data[0][1]))
    for i in range(10000):
        training_inputs, training_outputs = random.choice(training_data)
        neuralNetwork.training(training_inputs, training_outputs)
    print('Treinamento concluido')

    #Fase de Teste
    print('Iniciando fase de teste:')
    error_sum = 0
    for i in range(len(test_data)):
        test_inputs, test_outputs = test_data[i][:]
        calculated_output = neuralNetwork.feed_forward(test_inputs)
        if not is_valid_output(test_outputs, calculated_output):
            error_sum += 1

    print('Total de itens na base de teste: ', len(test_data))
    print('Total de acertos: ', len(test_data) - error_sum)
    print('Erros na fase de teste: ', error_sum)
def analyzeSymbol():
   
    
    trainingData = getTrainingData()

    
    network = NeuralNetwork(inputNodes = 3, hiddenNodes = 5, outputNodes = 1)
    
    model = network.train(trainingData)
    
   
    # get rolling data for most recent day
    
    
    predictionData = getPredictionData(0)    
        
        
    returnPrice = network.test(predictionData)
       
        
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1], predictionData[2])
    returnData = {}
    returnData[0] = predictedStockPrice
        

    return (predictedStockPrice) 
Exemplo n.º 3
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()

    trainingData = getTrainingData(stockSymbol)

    network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1)

    network.train(trainingData)

    # get rolling data for most recent day
    predictionData = getPredictionData(stockSymbol)

    # get prediction
    returnPrice = network.test(predictionData)

    # de-normalize and return predicted stock price
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                           predictionData[2])

    # create return object, including the amount of time used to predict
    returnData = {}
    returnData['price'] = predictedStockPrice
    returnData['time'] = time.time() - startTime

    return returnData
Exemplo n.º 4
0
    def test_predict(self, ):
        target = NeuralNetwork()
        target.addLayer(10, 10, lambda x: x**2)

        prediction = target.predict(
            tf.constant([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
                        dtype=tf.dtypes.float32), [tf.zeros([10, 10])],
            [tf.zeros([10])])

        tf.debugging.assert_equal(prediction, tf.zeros(10))
Exemplo n.º 5
0
    def __init__(self):

        #Set Bird
        self.birdWidth = 34
        self.birdHeight = 24
        self.birdX = 50

        #Initialization Variables for Jump (Standard Jump)
        '''self.birdY = 200
		self.gravityAcceleration = 0	#self.isJumping = 0
		self.jumpHeight = 20
		self.vy=0'''

        #Initialization Variables for Jump (Parabolic Jump)
        self.birdY = 200
        self.gravityAcceleration = 1
        self.jumpHeight = -8
        self.vy = 0

        #Initialization Variables for Life
        self.dead = 0

        #Initialization Variables for the neural Network
        self.birdPositionX = 0
        self.birdPositionY = 0
        self.normPosX = 0
        self.normPosY = 0
        self.distanceTraveled = 0

        #Initialization Variables for Score
        self.score = 0
        self.flagUpdateScore = 0

        #Initialization for Features
        self.iD = 0

        #Initialization number of jump made
        self.jumpMade = 0

        #Initialization NN
        self.nn = NeuralNetwork()

        #Initialization GUI Bird
        self.bird = pygame.Rect(self.birdX, self.birdY, self.birdWidth,
                                self.birdHeight)

        #Set GUI Bird
        self.birdImgs = [
            pygame.image.load("img/blueBirdFlap0.png").convert_alpha()
        ]
Exemplo n.º 6
0
	def mutation(self):
		mutatedNN = NeuralNetwork()
		for i in range(0, self.nNotChanged): 
			mutatedNN = self.populationArray[i].nn
			for j in range(0, self.numberOfWeightsToMutate_1):
				_j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer1,0)
				_i = self.getRandomNumberInteger(mutatedNN.nInputLayer,0)
				_mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale)
				mutatedNN.W1[_i][_j] = mutatedNN.W1[_i][_j] + _mutationValue
				mutatedNN.b1[0][_j] = mutatedNN.b1[0][_j] + _mutationValue

			for j in range(0, self.numberOfWeightsToMutate_2):
				_j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer2,0)
				_i = self.getRandomNumberInteger(mutatedNN.nHiddenLayer1,0)
				_mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale)
				mutatedNN.W2[_i][_j] = mutatedNN.W2[_i][_j] + _mutationValue
				mutatedNN.b2[0][_j] = mutatedNN.b2[0][_j] + _mutationValue

			for j in range(0, self.numberOfWeightsToMutate_3):
				_j = self.getRandomNumberInteger(mutatedNN.nHiddenLayer3,0)
				_i = self.getRandomNumberInteger(mutatedNN.nHiddenLayer2,0)
				_mutationValue = self.getRandomNumber(self.mutationScale, -self.mutationScale)
				mutatedNN.W3[_i][_j] = mutatedNN.W3[_i][_j] + _mutationValue
				mutatedNN.b3[0][_j] = mutatedNN.b3[0][_j] + _mutationValue

			index = int(self.nPopulation - (i+1))
			#print("INDEX MUTATI:", index,self.nPopulation)
			self.populationArray[index].nn = mutatedNN
def compute_numerical_gradient(nn, theta):
    """ 进行近似的数值梯度计算 """
    numgrad = np.zeros((theta.size, 1))
    perturb = np.zeros((theta.size, 1))
    e = 1e-4
    # 已经将参数展成了一个长向量, 对每一个参数进行梯度检查, 检查时仅修改该参数, 其余参数不变
    for p in range(theta.size):
        # 设置偏移值
        perturb[p] = e
        loss1, _ = nn.cost_function(theta - perturb)
        loss2, _ = nn.cost_function(theta + perturb)
        # 计算数值梯度
        numgrad[p] = (loss2 - loss1) / (2 * e)
        perturb[p] = 0  # 恢复避免扰乱其他的值计算
    return numgrad
Exemplo n.º 8
0
    def mutation(self):
        for i in range(0, nNotChanged):
            mutatedNN = NeuralNetwork()
            mutatedNN = populationArray[i].nn
            for j in range(0, numberOfWeightsToMutate):
                _j = self.getRandomNumberInteger(mutatedNN.nNeuronsLayer1, 0)
                _i = self.getRandomNumberInteger(mutatedNN.nInputs, 0)
                _mutationValue = self.getRandomNumber(self.mutationScale,
                                                      -self.mutationScale)
                #_mutationValue = 0

                mutatedNN.W1[_i][_j] = mutatedNN.W1[_i][_j] + _mutationValue

            index = int(nPopulation - (i + 1))
            #print("INDEX:", index,nPopulation)
            populationArray[index].nn = mutatedNN
Exemplo n.º 9
0
 def __init__(self, canvas, colour, y):
     self.brain = NeuralNetwork(4, 4, 1)
     self.move = 0
     self.x = 70
     self.y = y
     self.isDead = False
     self.canvas = canvas
     self.colour = colour
     self.prevLoc = None
     self.radius = 10
     self.surface = pygame.Surface((self.radius * 2, self.radius * 2))
     self.surface.fill(BLUE)
     self.prevLoc = (self.x - self.radius, self.y - self.radius)
     self.fitness = 0
     # self.score = 0
     self.isChamp = False
     self.distance = 0
Exemplo n.º 10
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()
    flag = 0
    trainingData = getTrainingData(stockSymbol)

    network = NeuralNetwork(inputNodes=3, hiddenNodes=3, outputNodes=1)

    network.train(trainingData)

    # get rolling data for most recent day

    network.train(trainingData)
    for i in range(0, 5):
        # get rolling data for most recent day
        predictionData = getPredictionData(stockSymbol, flag)
        returnPrice = network.test(predictionData)

        # de-normalize and return predicted stock price
        predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                               predictionData[2])

        print predictedStockPrice
        flag += 1
        global new_value
        new_value = predictedStockPrice

    return predictedStockPrice
Exemplo n.º 11
0
def main():

    nn = NeuralNetwork(config.numLayers, config.numClasses, config.weightInitialisation, config.activationFn, config.weightDecay)
    nn.initialiseParams(len(x_train[0])*len(x_train[0]), config.numNeurons)

    sample = np.random.randint(3*len(x_train)/4)
    nn.forwardPropagate(x_train[sample])
    nn.momentumGradDesc(x_train, y_train, config.maxIterations, config.learningRate, config.batchSize, config.gamma)

    predictions = []
    predProbs = []
    test_acc = 0
    test_entropy = 0
    test_mse = 0
    for i in range(len(x_test)):
        nn.forwardPropagate(x_test[i])
        predictions.append(nn.predictedClass)
        predProbs.append(nn.output[nn.predictedClass])


    test_acc = accuracy(y_test,predictions)
    test_entropy = crossEntropyLoss(y_test,predProbs)
    test_mse = MSEloss(y_test,predictions)

    confusion_matrix = np.zeros((config.numClasses, config.numClasses))
    for i in range(len(y_test)):
        confusion_matrix[predictions[i]][y_test[i]] += 1
    
    df_cm = pd.DataFrame(confusion_matrix, index = [i for i in "0123456789"], columns = [i for i in "0123456789"])
    plt.figure(figsize = (10,10))
    sn.heatmap(df_cm, annot=True)
    plt.title("Confusion Matrix")
    plt.xlabel("y_test")
    plt.ylabel("y_pred")
    wandb.log({"plot":wandb.Image(plt)})
    plt.show()
    # #Log in wandb
    metrics = {
        'test_acc': test_acc, 
        # 'test_entropy': test_entropy,
        "test_mse": test_mse, 
        # "confusion_matrix": confusion_matrix,
    }
    wandb.log(metrics)
    run.finish()
Exemplo n.º 12
0
def test_neuralNetwork_set():
    inputs = 6
    outputs = 4
    layers = 3
    neurons = 87

    nn = NeuralNetwork(inputs=inputs,
                       outputs=outputs,
                       layers=layers,
                       neurons=neurons)
    new_inputs = 35
    new_outputs = 23
    new_layers = 3
    new_neurons = 10

    # Only the inputs should change
    nn.set(inputs=new_inputs)
    assert nn.inputs == new_inputs
    assert nn.outputs == outputs
    assert nn.layers == layers
    assert nn.neurons == neurons

    # Only the inputs and the outputs should have changed
    nn.set(outputs=new_outputs)
    assert nn.inputs == new_inputs
    assert nn.outputs == new_outputs
    assert nn.layers == layers
    assert nn.neurons == neurons

    # Inputs, outputs, and the number of layers should have changed
    nn.set(layers=new_layers)
    assert nn.inputs == new_inputs
    assert nn.outputs == new_outputs
    assert nn.layers == new_layers
    assert nn.neurons == neurons

    # All the values should be new at this point
    nn.set(neurons=new_neurons)
    assert nn.inputs == new_inputs
    assert nn.outputs == new_outputs
    assert nn.layers == new_layers
    assert nn.neurons == new_neurons
Exemplo n.º 13
0
def main():
    splitRatio = 0.8
    dataset = df.loadCsv(r"dataset.csv")

    groupedDataset = df.groupDatasetByQuality(dataset)

    workingDataset = selectionAttributes(groupedDataset)
    workingDataset = normalizeData(workingDataset)
    trainingSet, testSet = df.splitDataset(workingDataset, splitRatio)
    naiveBayesCall(trainingSet, testSet)
    NeuralNetwork(trainingSet, testSet)
	def __init__(self, x, y):
		self.neuralNet = NeuralNetwork(7, 2, 1, 5)
		self.neuralNet.create()
		
		self.fitness = 0
		self.frontWidth = 20
		self.sideWidth = 40
		self.position = (x, y)
		self.direction = 0
		self.edgesPoints = 	[[self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2],
							[self.position[0] - self.sideWidth//2, self.position[1] + self.frontWidth//2],
							[self.position[0] + self.sideWidth//2, self.position[1] + self.frontWidth//2],
							[self.position[0] + self.sideWidth//2, self.position[1] - self.frontWidth//2],
							[self.position[0] - self.sideWidth//2, self.position[1] - self.frontWidth//2]]
		self.edgesPointsAprox = self.edgesPoints
		self.speed = 10
		self.isAlive = True
		self.rayPoints = [[], [], [], [], [], [], []]
		self.inputs = [0, 0, 0, 0, 0, 0, 0]
		self.lastsCookies = []
		self.cookie = 0
Exemplo n.º 15
0
    def load(self, path):
        if not isinstance(path, Path):
            path = Path(path)

        with open(path / "EvolutionStrategyProgram.txt") as json_file:
            data = json.load(json_file)

            self.generation = data["generation"]
            self.neuralNetworks = [
                NeuralNetwork(path=path / f"NeuralNetwork{number}.txt")
                for number in data["networks"]
            ]
Exemplo n.º 16
0
def test_neuralNetwork_fit_adam():
    np.random.seed(2019)
    X = np.random.normal(size=(1, 500))
    target = 3.9285985 * X

    nn = NeuralNetwork(inputs=1,
                       neurons=3,
                       outputs=1,
                       activations='tanh',
                       silent=True)
    nn.addLayer()
    nn.addLayer()
    nn.addOutputLayer(activations='identity')
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=True,
           silent=False,
           epochs=100,
           optimizer='adam')
    loss = nn.loss
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=True,
           silent=False,
           epochs=100,
           optimizer='adam')

    assert loss > nn.loss
Exemplo n.º 17
0
def test_neuralNetwork_fit_sgd():
    np.random.seed(2019)
    X = np.random.normal(size=(1, 500))
    target = 3.9285985 * X

    nn = NeuralNetwork(inputs=1,
                       neurons=3,
                       outputs=1,
                       activations='sigmoid',
                       silent=True)
    nn.addLayer()
    nn.addLayer()
    nn.addOutputLayer(activations='identity')
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=False,
           silent=True,
           epochs=100)

    loss_after_100 = nn.loss
    nn.fit(X,
           target,
           shuffle=True,
           batch_size=100,
           validation_fraction=0.2,
           learning_rate=0.05,
           verbose=False,
           silent=True,
           epochs=100)
    loss_after_200 = nn.loss

    assert loss_after_200 < loss_after_100
Exemplo n.º 18
0
def test_neuralNetwork_init():

    # Ensure the sizing is correctly handled when creating a new instance
    # of the network class.
    inputs = 6
    outputs = 4
    layers = 3
    neurons = 87

    nn = NeuralNetwork(inputs=inputs,
                       outputs=outputs,
                       layers=layers,
                       neurons=neurons)
    assert nn.inputs == inputs
    assert nn.outputs == outputs
    assert nn.layers == layers
    assert nn.neurons == neurons
Exemplo n.º 19
0
def test_neuralNetwork_network(silent=False):
    # Lets set up a sci-kit learn neural network and copy over the weights
    # and biases to our network, verify that the two give the exact same
    # result.

    from sklearn.neural_network import MLPRegressor

    X = [[0.0], [1.0], [2.0], [3.0], [4.0], [5.0]]
    y = [0, 2, 4, 6, 8, 10]
    mlp = MLPRegressor(solver='sgd',
                       alpha=0.0,
                       hidden_layer_sizes=(3, 3),
                       random_state=1,
                       activation='relu')
    mlp.fit(X, y)
    W_skl = mlp.coefs_
    b_skl = mlp.intercepts_

    nn = NeuralNetwork(inputs=1,
                       outputs=1,
                       layers=3,
                       neurons=3,
                       activations='relu',
                       silent=silent)
    nn.addLayer()
    nn.addLayer()
    nn.addOutputLayer(activations='identity')

    W_nn = nn.weights
    b_nn = nn.biases

    for i in range(len(W_nn)):
        W_nn[i] = W_skl[i]
    for i in range(len(b_nn)):
        b_nn[i] = np.expand_dims(b_skl[i], axis=1)

    X_test = np.array([[1.2857], [9.2508255], [-5.25255], [3.251095]])

    output_skl = mlp.predict(X_test)
    output_nn = np.squeeze(nn(X_test.T))

    if not silent:
        print("%20.15f %20.15f %20.15f %20.15f" % (*output_skl, ))
        print("%20.15f %20.15f %20.15f %20.15f" % (*output_nn, ))
    assert output_nn == pytest.approx(output_skl)

    return nn, mlp
Exemplo n.º 20
0
    def __init__(self, windowWidth, windowHeight, brain=False):
        self.width = windowWidth
        self.height = windowHeight

        self.pos = Vector2(windowWidth/2, windowHeight/2)
        self.vel = Vector2()
        self.acc = 0
        self.maxVel = 4
        self.thrust = 0.1

        self.dir = -90
        self.dirDelta = -90
        self.turnSpeed = 4

        self.damp = 0.01

        self.size = 14

        self.bullets = []
        self.shootDelta = 0

        self.brain = brain or NeuralNetwork(45, 60, 4)
Exemplo n.º 21
0
    def generateOffsprings(self):
        # The sigma (and the variation) array contains, for each network, the self-adaptive parameters for both weights and biases.
        # with the latter being the last column of the matrix.
        for network, index in zip(self.neuralNetworks[:NETWORKS_NUMBER // 2],
                                  range(NETWORKS_NUMBER // 2,
                                        NETWORKS_NUMBER)):
            variatedSigmas = network.mutateSigmas() * [
                np.random.randn(FIRST_LAYER_LENGTH, INPUT_LAYER_LENGTH + 1),
                np.random.randn(SECOND_LAYER_LENGTH, FIRST_LAYER_LENGTH + 1),
                np.random.randn(OUTPUT_LAYER_LENGTH, SECOND_LAYER_LENGTH + 1)
            ]
            variatedWeights = variatedSigmas[0][:, :-1], variatedSigmas[
                1][:, :-1], variatedSigmas[2][:, :-1]
            variatedBiases = np.array(variatedSigmas[0][:,-1])[:,np.newaxis],\
                np.array(variatedSigmas[1][:,-1])[:,np.newaxis],\
                np.array(variatedSigmas[2][:,-1])[:,np.newaxis]

            self.neuralNetworks[index] = NeuralNetwork(
                weights=network.weights + variatedWeights,
                biases=network.biases + variatedBiases,
                number=self.generation * (NETWORKS_NUMBER // 2) + index + 1,
                parent=network.number,
                sigmas=variatedSigmas)
        self.generation += 1
Exemplo n.º 22
0
    def __init__(self, path):
        # If no data is loaded, initializes the data from generation 0.
        if path is None or not os.path.isdir(path):
            self.generation = 0

            # The networks are initialized with tanh acivation function.
            # Space is also being allocated for the offsprings.
            self.neuralNetworks = [
                NeuralNetwork(number=number + 1,
                              sigmas=np.array([
                                  np.full((FIRST_LAYER_LENGTH,
                                           INPUT_LAYER_LENGTH + 1), 0.05),
                                  np.full((SECOND_LAYER_LENGTH,
                                           FIRST_LAYER_LENGTH + 1), 0.05),
                                  np.full((OUTPUT_LAYER_LENGTH,
                                           SECOND_LAYER_LENGTH + 1), 0.05)
                              ]))
                for number in range(
                    NETWORKS_NUMBER // 2 * self.generation, NETWORKS_NUMBER //
                    2 * (self.generation + 1))
            ] + [None] * (NETWORKS_NUMBER // 2)
        else:
            # Loads data
            self.load(path)
Exemplo n.º 23
0
    def crossDNA(self, parent1, parent2, crossoverPoint):
        child = NeuralNetwork()

        for i in range(0, parent1.nInputs):
            for j in range(0, crossoverPoint):
                child.W1[i][j] = parent1.W1[i][j]

        for i in range(0, parent1.nInputs):
            for j in range(0, crossoverPoint):
                child.W1[i][j] = parent2.W1[i][j]

        #NB: 0 here is because W2 [_,_,_]
        for i in range(0, crossoverPoint):
            child.W2[i][0] = parent1.W2[i][0]

        for i in range(crossoverPoint, parent2.nNeuronsLayer1):
            child.W2[i][0] = parent2.W2[i][0]

        return child
Exemplo n.º 24
0
    def test_addLayer(self):
        activationFunction1 = lambda x: x
        activationFunction2 = lambda x: x**2
        target = NeuralNetwork()

        target.addLayer(10, 20, activationFunction1)
        self.assertEqual(len(target._layers), 1)
        self.assertEqual(len(target._layers[0]), 10)
        tf.debugging.assert_equal(target._layers[0][0]._weights, tf.zeros(20))
        tf.debugging.assert_equal(target._layers[0][0]._bias,
                                  tf.Variable(0, dtype=tf.dtypes.float32))
        self.assertEqual(target._layers[0][0]._activationFunction,
                         activationFunction1)

        target.addLayer(5, 15, activationFunction2)
        self.assertEqual(len(target._layers), 2)
        self.assertEqual(len(target._layers[1]), 5)
        tf.debugging.assert_equal(target._layers[1][0]._weights, tf.zeros(15))
        tf.debugging.assert_equal(target._layers[1][0]._bias,
                                  tf.Variable(0, dtype=tf.dtypes.float32))
        self.assertEqual(target._layers[1][0]._activationFunction,
                         activationFunction2)
Exemplo n.º 25
0
    return 1 / (1 + math.exp(-sum))


def sigmoid_derivative(sum):
    sig = sigmoid(sum)
    return sig * (1 - sig)


def tanh_derivative(sum):
    return 1 - math.tanh(math.tanh(sum))


np.set_printoptions(precision=4)

# 4.3 A. NOR Gate
nn = NeuralNetwork(
    3, [], [NeuronInfo(partial(threshold, 0), weights=[-1, -1, -1, 0])])
print('NOR Gate:\n{}\n'.format(nn))

print('[0, 0, 0] -> {}'.format(nn.activate([0, 0, 0])))
print('[0, 0, 1] -> {}'.format(nn.activate([0, 0, 1])))
print('[0, 1, 0] -> {}'.format(nn.activate([0, 1, 0])))
print('[0, 1, 1] -> {}'.format(nn.activate([0, 1, 1])))
print('[1, 0, 0] -> {}'.format(nn.activate([1, 0, 0])))
print('[1, 0, 1] -> {}'.format(nn.activate([1, 0, 1])))
print('[1, 1, 0] -> {}'.format(nn.activate([1, 1, 0])))
print('[1, 1, 1] -> {}'.format(nn.activate([1, 1, 1])))
print()

# 4.3 A. Adder
nn = NeuralNetwork(
    2,
Exemplo n.º 26
0
    def __init__(self):
        # ---------------------------------------------------------------- #
        # PARAMETERS:
        # Data Parameters:
        batch_size = 250

        # Learning Parameters:
        LEARNING_RATE = 0.001
        LEARNING_THRESHOLD = 0.0001
        N_SCREEN_ITERATIONS = 1

        # Network Parameters:
        nInputs = 2
        nNeuronsLayer1 = 50
        nOutputs = 1

        # Network Construction:
        x_ = tf.placeholder(tf.float32, shape=[None, nInputs])
        y_ = tf.placeholder(tf.float32, shape=[None, nOutputs])

        W1 = tf.Variable(
            tf.truncated_normal([nInputs, nNeuronsLayer1], stddev=0.1))
        W2 = tf.Variable(
            tf.truncated_normal([nNeuronsLayer1, nOutputs], stddev=0.1))

        b1 = tf.Variable(tf.truncated_normal([nNeuronsLayer1], stddev=0.1))
        b2 = tf.Variable(tf.truncated_normal([nOutputs], stddev=0.1))

        y1 = tf.sigmoid(tf.matmul(x_, W1) + b1)
        y2 = tf.sigmoid(tf.matmul(y1, W2) + b2)

        # ---------------------------------------------------------------- #
        # Define loss, optimizer, accuracy:
        #cost = tf.reduce_mean(tf.nn.l2_loss(y_ - y2))
        cost = tf.reduce_mean(tf.square(y_ - y2))
        train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(cost)

        #accuracy = tf.reduce_mean(tf.equal((tf.rint(y2), tf.int32), (tf.rint(y_), DType = tf.int32)), tf.float32)
        #accuracy = tf.reduce_mean()
        correct = tf.equal(tf.rint(y2), y_)
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

        # ---------------------------------------------------------------- #
        # Session Initialization:
        sess = tf.InteractiveSession()
        tf.global_variables_initializer().run()

        # ---------------------------------------------------------------- #
        # Prepare Data:
        datasetFeatures, datasetTargets = self.getTrainingSet()
        batchesLeft = int(len(datasetTargets) / batch_size)

        # ---------------------------------------------------------------- #
        # Train:
        i = 0
        for k in range(1000000):
            if batchesLeft > 0:
                if i % batch_size == 0:
                    batch_x, batch_y, batchesLeft = self.getNextBatch(
                        batch_size, i, batchesLeft, datasetFeatures,
                        datasetTargets)
                    sess.run((train_step),
                             feed_dict={
                                 x_: batch_x,
                                 y_: batch_y
                             })
            else:
                batchesLeft = int(len(datasetTargets) / batch_size)
                i = -1

            # Test:
            if k % 10000 == 0:
                out_batch, acc = sess.run((y2, accuracy),
                                          feed_dict={
                                              x_: batch_x,
                                              y_: batch_y
                                          })
                inx_ = 0
                #print(batch_x[inx_][0], " + ", batch_x[inx_][1], " = ", out_batch[inx_][0], "|", batch_y[inx_], "cost:",cost_)
                print("Network: ", out_batch[inx_][0], "Target: ",
                      batch_y[inx_][0], "|", "acc:", acc * 100, "%")

            i += 1
            k += 1

        # ---------------------------------------------------------------- #
        # Return Trained Neural Network
        self.nnTrained = NeuralNetwork()
        self.nnTrained.W1 = sess.run(W1)
        self.nnTrained.W2 = sess.run(W2)
        self.nnTrained.b1 = sess.run(b1)
        self.nnTrained.b2 = sess.run(b2)
        self.returnTrainedNN()
Exemplo n.º 27
0
class CNN:

    nn = NeuralNetwork(0.001)

    def __init__(self):

        #
        # Layer zero, the input layer
        # Create neurons: the number of neurons is the same as the input
        # List of 29*29=841 pixels, and no weights/connections
        #
        layer0 = NNLayer("layer0")

        for i in range(0, 841):
            layer0.addNeuron()

        self.nn.addLayer(layer0)

        #
        # Layer 1: Convolutional layer
        # 6 feature maps. Each feature map is 13x13, and each unit in the feature map is a 5x5 convolutional kernel
        # from the input layer.
        # So there are 13x13x6 = 1014 neurons, (5x5+1)x6 wights
        #
        layer1 = NNLayer("layer1")
        layer1.setPrevLayer(layer0)

        # Add the neurons
        for i in range(0, 1014):
            layer1.addNeuron()

        # Add wights
        for i in range(0, 156):
            # Uniform random distribution
            initWeight = 0.05 * random.uniform(-1, 1)

            layer1.addWeight(initWeight)

        # interconnections with previous layer: this is difficult
        # The previous layer is a top-down bitmap
        # image that has been padded to size 29x29
        # Each neuron in this layer is connected
        # to a 5x5 kernel in its feature map, which
        # is also a top-down bitmap of size 13x13.
        # We move the kernel by TWO pixels, i.e., we
        # skip every other pixel in the input image

        kernelTemplate = [
            0, 1, 2, 3, 4, 29, 30, 31, 32, 33, 58, 59, 60, 61, 62, 87, 88, 89,
            90, 91, 116, 117, 118, 119, 120
        ]

        #Feature maps
        for fm in range(0, 6):

            for i in range(0, 13):

                for j in range(0, 13):

                    # 26 is the number of weights per featuremaps
                    iNumWeights = fm * 26

                    # Bias weight
                    layer1.neurons[fm * 169 + j + i * 13].addConnection(
                        -10000, iNumWeights)
                    iNumWeights += 1

                    for k in range(0, 25):

                        layer1.neurons[fm * 169 + j + i * 13].addConnection(
                            2 * j + 58 * i + kernelTemplate[k], iNumWeights)
                        iNumWeights += 1

        # Add layer to network
        self.nn.addLayer(layer1)

        #
        # Layer two: This layer is a convolutional layer
        # 50 feature maps. Each feature map is 5x5, and each unit in the feature maps is a 5x5 convolutional kernel of
        # corresponding areas of all 6 of the previous layers, each of which is a 13x13 feature map.
        # So, there are 5x5x50 = 1250 neurons, (5X5+1)x6x50 = 7800 weights

        layer2 = NNLayer("layer2")
        layer2.setPrevLayer(layer1)

        # Add the neurons
        for i in range(0, 1250):
            layer2.addNeuron()

        # Add wights
        for i in range(0, 7800):
            # Uniform random distribution
            initWeight = 0.05 * random.uniform(-1, 1)

            layer2.addWeight(initWeight)

        # Interconnections with previous layer: this is difficult
        # Each feature map in the previous layer
        # is a top-down bitmap image whose size
        # is 13x13, and there are 6 such feature maps.
        # Each neuron in one 5x5 feature map of this
        # layer is connected to a 5x5 kernel
        # positioned correspondingly in all 6 parent
        # feature maps, and there are individual
        # weights for the six different 5x5 kernels.  As
        # before, we move the kernel by TWO pixels, i.e., we
        # skip every other pixel in the input image.
        # The result is 50 different 5x5 top-down bitmap
        # feature maps

        kernelTemplate = [
            0, 1, 2, 3, 4, 13, 14, 15, 16, 17, 26, 27, 28, 29, 30, 39, 40, 41,
            42, 43, 52, 53, 54, 55, 56
        ]

        for fm in range(0, 50):

            for i in range(0, 5):

                for j in range(0, 5):

                    # 26 is the number of weights per featuremaps
                    iNumWeight = fm * 26

                    # Bias weight
                    layer2.neurons[fm * 25 + j + i * 5].addConnection(
                        -10000, iNumWeight)
                    iNumWeight += 1

                    for k in range(0, 25):

                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            2 * j + 26 * i + kernelTemplate[k], iNumWeight)
                        iNumWeight += 1
                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            169 + 2 * j + 26 * i + kernelTemplate[k],
                            iNumWeight)
                        iNumWeight += 1
                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            338 + 2 * j + 26 * i + kernelTemplate[k],
                            iNumWeight)
                        iNumWeight += 1
                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            507 + 2 * j + 26 * i + kernelTemplate[k],
                            iNumWeight)
                        iNumWeight += 1
                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            676 + 2 * j + 26 * i + kernelTemplate[k],
                            iNumWeight)
                        iNumWeight += 1
                        layer2.neurons[fm * 25 + j + i * 5].addConnection(
                            845 + 2 * j + 26 * i + kernelTemplate[k],
                            iNumWeight)
                        iNumWeight += 1

        # add layer to network
        self.nn.addLayer(layer2)

        #
        # layer three:
        # This layer is a fully-connected layer
        # with 100 units.  Since it is fully-connected,
        # each of the 100 neurons in the
        # layer is connected to all 1250 neurons in
        # the previous layer.
        # So, there are 100 neurons and 100*(1250+1)=125100 weights
        #

        layer3 = NNLayer("layer3")
        layer3.setPrevLayer(layer2)

        # Add the neurons
        for i in range(0, 100):
            layer3.addNeuron()

        # Add wights
        for i in range(0, 125100):
            # Uniform random distribution
            initWeight = 0.05 * random.uniform(-1, 1)

            layer3.addWeight(initWeight)

        # Interconnections with previous layer: fully-connected

        iNumWeight = 0  # Weights are not shared in this layer

        for fm in range(0, 100):
            layer3.neurons[fm].addConnection(-10000, iNumWeight)  #bias
            iNumWeight += 1

            for i in range(0, 1250):

                layer3.neurons[fm].addConnection(i, iNumWeight)  #bias
                iNumWeight += 1

        # Add layer to network
        self.nn.addLayer(layer3)

        # layer four, the final (output) layer:
        # This layer is a fully-connected layer
        # with 10 units.  Since it is fully-connected,
        # each of the 10 neurons in the layer
        # is connected to all 100 neurons in
        # the previous layer.
        # So, there are 10 neurons and 10*(100+1)=1010 weights

        layer4 = NNLayer("layer4")

        layer4.setPrevLayer(layer3)

        # Add the neurons
        for i in range(0, 10):
            layer4.addNeuron()

        # Add wights
        for i in range(0, 1010):
            # Uniform random distribution
            initWeight = 0.05 * random.uniform(-1, 1)

            layer4.addWeight(initWeight)

        # Interconnections with previous layer: fully-connected

        iNumWeight = 0  # Weights are not shared in this layer

        for fm in range(0, 10):

            layer4.neurons[fm].addConnection(-10000, iNumWeight)  #bias
            iNumWeight += 1

            for i in range(0, 100):

                layer4.neurons[fm].addConnection(i, iNumWeight)  #bias
                iNumWeight += 1

        # Add layer to network
        self.nn.addLayer(layer4)

        print "NN structure:"
        print "Layer 0:", len(self.nn.layers[0].neurons)
        print "Layer 1:", len(self.nn.layers[1].neurons)
        print "Layer 2:", len(self.nn.layers[2].neurons)
        print "Layer 3:", len(self.nn.layers[3].neurons)
        print "Layer 4:", len(self.nn.layers[4].neurons)
        print "\n"

    def setWeights(self, numberOfSet):
        self.nn.layers[1].loadWeights(loadWeights("1", str(numberOfSet)))
        self.nn.layers[2].loadWeights(loadWeights("2", str(numberOfSet)))
        self.nn.layers[3].loadWeights(loadWeights("3", str(numberOfSet)))
        self.nn.layers[4].loadWeights(loadWeights("4", str(numberOfSet)))

    def traingNetwork(self, nn, numberOfSet):
        print "Training starting:"

        imageNumberList = loadData.getTrainingImageNumberList(numberOfSet)

        d, t = loadData.getImageAndTarget(imageNumberList[0])

        for i in range(1, len(imageNumberList)):
            #print "Forwardpass"

            nn.Calculate(d)

            if (i % (numberOfSet / 10) == 0):
                print "Number of iterations:", i
                nn.learningRate -= 0.000001

            nn.Backpropagate(nn.outputVector, t)

            d, t = loadData.getImageAndTarget(imageNumberList[i])

        saveWeights("1", str(numberOfSet), nn.layers[1].weights)
        saveWeights("2", str(numberOfSet), nn.layers[2].weights)
        saveWeights("3", str(numberOfSet), nn.layers[3].weights)
        saveWeights("4", str(numberOfSet), nn.layers[4].weights)
        print "Training completed. Weights are saved.\n"

        return nn

    def testNetwork(self, nn, numberOfSet, numberOfTest):

        print "Testing starting:"

        # Set weights from file
        nn = setWeights(nn, numberOfSet)

        imageNumberList = loadData.getTestImageNumberList(numberOfTest)

        correct = 0
        for i in range(0, len(imageNumberList)):

            # Get random picture
            d, t = loadData.getImageAndTarget(imageNumberList[i])

            # Forward-pass
            nn.Calculate(d)

            correctGuess = False
            # Check if result is correct
            if (nn.outputVector.index(max(nn.outputVector)) == t.index(
                    max(t))):
                correct += 1
                correctGuess = True

            print "CNN:", nn.outputVector.index(max(
                nn.outputVector)), "Target:", t.index(max(t)), correctGuess

        print "\nNumber of correct:", correct
        print "Number of pictures", numberOfTest
        print "Percentage", (correct * 1.0 / numberOfTest) * 100

    def runCNN(self, image):

        d, t = loadData.getImageAndTarget(100)
        # Forward-pass
        nn.Calculate(d)

        return nn.outputVector.index(max(nn.outputVector))
Exemplo n.º 28
0
for f in files:
    d = np.loadtxt(path + "/" + f)
    x.append(d)
    y_name = f.split("_")[0]
    y.append(wordList[y_name])
# print np.array(x), np.array(y)
x_data = np.array(x)
y_data = np.array(y)
l = LabelBinarizer()
y_data = l.fit_transform(y_data)
result = l.classes_
pickle.dump(result, open('result.pkl', 'wb'))
# x_train, x_test, y_train, y_test = train_test_split(x_data, y_data)
# labels_train = LabelBinarizer().fit_transform(y_train)
# labels_test = LabelBinarizer().fit_transform(y_test)

# print labels_test

nn = NeuralNetwork([960, 1500, 3], "logistic")
print "start"
nn.fit(x_data, y_data, epochs=1000)
pickle.dump(nn, open('nn.pkl', 'wb'))
predictions = []
for i in range(x_data.shape[0]):
    o = nn.predict(x_data[i])
    d = result[np.argmax(o)]
    predictions.append(d)

for i in predictions:
    print i
Exemplo n.º 29
0
    def test_init(self):
        target = NeuralNetwork()

        self.assertEqual(target._layers, [])
Exemplo n.º 30
0
    def test_getBiasesForLayer(self):
        target = NeuralNetwork()
        target.addLayer(10, 20, lambda x: x**2)

        tf.debugging.assert_equal(target.getBiasesForLayer(0), tf.zeros([10]))