Example #1
0
    def createLocalizationNetwork(self):
        
        if self.localizationType == "Rotary":
            return RotaryLayer()
        
        if self.localizationType == "Scaled":
            return ScaledLayer()
        
        if self.localizationType == "ScaledUp":
            return ScaledUpLayer()
        
        if self.localizationType == "ScaledWithOffset":
            return ScaledWithOffsetLayer()
        
        if self.localizationType == "Unitary":
            return UnitaryLayer()
        
        if self.localizationType == "FullyConnected":
            network = NeuralNetwork()
            network.addLayer(FullyConnectedLayer(self.inputW * self.inputH * self.inputC, 32, 0, "ReLu"))
            network.addLayer(FullyConnectedLayer(32, 3*4, 1, "ReLu"))
            return network

        if self.localizationType == "ConvLayer":
            network = NeuralNetwork()
            network.addLayer(ConvLayer((self.inputW, self.inputH, self.inputC), (3, 3, self.inputC, self.inputC), 0, "ReLu"))
            network.addLayer(FullyConnectedLayer(self.inputW * self.inputH * self.inputC, 3*4, 1, "ReLu"))
            return network
def test(base_directory, ignore_word_file, filtered, nb_hidden_neurons, nb_max_iteration):
    print("post reading...")
    pr = PostReader(base_directory, ignore_word_file, filtered)

    print("creating neural network...")
    nn = NeuralNetwork(pr.get_word_set(), nb_hidden_neurons, nb_max_iteration)

    print("training...")
    training_set = pr.get_training_set()
    t0 = time.clock()
    nb_iteration = nn.train(training_set)
    training_time = time.clock() - t0

    print("verification...")
    t0 = time.clock()
    verification_set = pr.get_verification_set()
    verification_time = time.clock() - t0
    nb_correct = 0
    for msg in verification_set:
        final = NeuralNetwork.threshold(nn.classify(msg[0]))
        if final == msg[1]:
            nb_correct += 1

    print("=======================")
    print("training set length    : %s" % len(training_set))
    print("nb hidden neurons      : %s" % nb_hidden_neurons)
    print("nb max iterations      : %s" % nb_max_iteration)
    print("nb iterations          : %s" % nb_iteration)
    print("verification set length: %s posts" % len(verification_set))
    print("nb correct classified  : %s posts" % nb_correct)
    print("rate                   : %i %%" % (nb_correct / len(verification_set) * 100))
    print("training time          : %i s" % training_time)
    print("verification time      : %i s" % verification_time)
    print("=======================")
    print("")
Example #3
0
def run_iris_comparison(num=25):
    """ Compare a few different test and
    training configurations
    """
    print("Running neural network {} times each for three different sets of training and testing files".format(num))
    test_files = ['iris_tes.txt', 'iris_tes50.txt',\
            'iris_tes30.txt']
    train_files = ['iris_tra.txt', 'iris_tra100.txt',\
            'iris_tra120.txt']

    for i in range(0, len(test_files)):
        print("trainfile = {}     testfile = {}".format(train_files[i], test_files[i]))

    config_obj = openJsonConfig('conf/annconfig_iris.json')
    summary = {}

    for i in range(0, len(test_files)):
        config_obj['testing_file'] = test_files[i]
        config_obj['training_file'] = train_files[i]
        config_obj['plot_error'] = False
        config_obj['test'] = False
        crates = []

        for j in range(0, num):
            nn = NeuralNetwork(config_obj)
            nn.back_propagation()
            cmat, crate, cout = nn.classification_test(nn.testing_data, nn.weights_best)
            crates.append(crate)
        summary[config_obj['testing_file']] =\
            nn_stats(np.array(crates))
    print print_stat_summary(summary) 
 def accuracy(self, number_layers, numbers_neurons, learning_rate):
     """Returns the accuracy of a neural network associated with an Individual"""
     net = NeuralNetwork(number_layers, numbers_neurons, learning_rate, X_train=self.dataset.X_train, Y_train=self.dataset.Y_train, X_test=self.dataset.X_test, Y_test=self.dataset.Y_test)
     #train neural NeuralNetwork
     net.train()
     #calcule accurate
     acc = net.classify()
     #set AUC
     self.__auc = net.get_auc()
     return acc
def main():
    data = np.array([[1.0, 0.0, 0.0, 0.0, 0.0],
                     [0.0, 1.0, 0.0, 0.0, 0.0]
                    ])
    result = np.array([[0.0, 0.0, 0.0, 0.0, 1.0],
                       [0.0, 0.0, 0.0, 1.0, 0.0]
                      ])

    Nn = NeuralNetwork([5, 5, 5])
    print Nn.feedforward(np.array([[5], [5], [5], [5], [5]]))
    # to do trainning function
    print Nn.feedforward(np.array([[5], [5], [5], [5], [5]]))
Example #6
0
File: MLP.py Project: dxmtb/nn
    def __init__(self, in_dim, hidden_dim, out_dim, activation, loss_type,
                 layer_num=0):
        NeuralNetwork.__init__(self, activation, loss_type)

        args = [self.activation, self.grad_activation]
        self.layers = []
        self.layers.append(FullyConnectedLayer(in_dim, hidden_dim, *args))
        for _ in xrange(layer_num):
            self.layers.append(FullyConnectedLayer(hidden_dim, hidden_dim, *args))
        if loss_type == 'mse':
            self.layers.append(FullyConnectedLayer(hidden_dim, out_dim, *args))
        else:
            from SoftmaxLayer import SoftmaxLayer
            self.layers.append(SoftmaxLayer(hidden_dim, out_dim, *args))
def main():

	print "Starting Support Vector Machine Simulations"

	# svr = SupportVectorMachine()
	# svr.simulate()

 # 	basicNeuralNetwork = NeuralNetwork()
	# basicNeuralNetwork.simulate()


	# svr1 = SupportVectorMachine(20, 10, 500, 60)
	# svr1.simulate()

	# neuralNetwork1 = NeuralNetwork(20, 10, 500, 60)
	# neuralNetwork1.simulate()

	# # # larger window

	# svr2 = SupportVectorMachine(48, 10, 200)
	# svr2.simulate()

	# neuralNetwork2 = NeuralNetwork(48, 10, 200)
	# neuralNetwork2.simulate()

	# # # large window

	# svr3 = SupportVectorMachine(32, 10, 200)
	# svr3.simulate()

	# neuralNetwork3 = NeuralNetwork(32, 10, 200)
	# neuralNetwork3.simulate()

	# # # day sized window

	# svr4 = SupportVectorMachine(24, 10, 200)
	# svr4.simulate()

	# neuralNetwork4 = NeuralNetwork(24, 10, 200)
	# neuralNetwork4.simulate()

	# half a day sized window

	svr5 = SupportVectorMachine(12, 10, 200)
	svr5.simulate()

	neuralNetwork5 = NeuralNetwork(12, 10, 200)
	neuralNetwork5.simulate()
 def eventListener(self):
     tickTime = pygame.time.Clock()
     holdTime = 0
     pygame.init()
     DISPLAYSURF = pygame.display.set_mode((900, 900))
     DISPLAYSURF.fill((255, 255, 255, 255))
     while True:
         for event in pygame.event.get():
             if event.type == pygame.MOUSEBUTTONDOWN:
                 holdTime = tickTime.tick(60)
                 print self.alias, "DOWN: ", holdTime
             if event.type == pygame.MOUSEBUTTONUP:
                 if holdTime < 3000:
                     print "----------------------------"
                     print self.alias, "CLASSIFYING... "
                     print "----------------------------"
                     pygame.mixer.music.load("perro.wav")
                     self.takePicture()
                     #Para pruebas de reproducción --- (En mi compu no furula)
                     self.play("perro")
                     self.play("gato")
                     self.play("desconocido")
                     # -------------------------------
                     self.startClasification()
                     print self.alias, "UP: ", holdTime
                     holdTime = 0
                 else:
                     print self.alias, ": ", holdTime, " miliSegundos"
                     networkModel, netMean, prototype, classes = self.netHandler.getNextNet()
                     self.neuralNetwork = NeuralNetwork(networkModel, netMean, prototype, classes)
                     holdTime = 0
             if event.type == pygame.QUIT:
                 sys.exit(0)
Example #9
0
    def createNeuralNetwork(self, load = False):
        listHidden1 = [Neuron("1", 6, load), Neuron("2", 6, load), Neuron("3", 6, load)]
        listHidden2 = [Neuron("4", 3, load), Neuron("5", 3, load)]
        listHidden3 = [Neuron("6", 2, load)]
        listNetwork = [NeuronLayer(listHidden1), NeuronLayer(listHidden2), NeuronLayer(listHidden3)]

        self.neuralNetwork = NeuralNetwork(listNetwork)
Example #10
0
    def createFullyConnectedNetwork(parameters):
        logger.info ("Creating a fully connected network")
        network = NeuralNetwork()
        
        idx = 0
        for inputSize, outputSize in parameters:
            isLastLayer = (idx == (len(parameters) - 1))

            if isLastLayer:
                nonlinearity = "Null"
            else:
                nonlinearity = "ReLu"

            network.addLayer(FullyConnectedLayer(inputSize, outputSize, idx, nonlinearity))
            idx += 1

        return network
 def __init__(self):
     self.alias = "[CLASSIFIER]>> "
     print self.alias , "Iniciando Clasificador..."
     self.netHandler = NeuralNetworksHandler()
     self.imageProcesor = ImagePreprocesor(wideSegment=150, highSegment=150, horizontalStride=50, verticalStride=50, withResizeImgOut=250, highResizeImgOut=250)
     networkModel, netMean, prototype, classes = self.netHandler.getNetworkByIndex(0)
     self.neuralNetwork = NeuralNetwork(networkModel,  prototype, netMean, classes)
     self.speaker = AudioPlayer()
     self.eventListener()
Example #12
0
    def process(self):
        print '[ Prepare input images ]'
        inputs = self.prepare_images()

        print '[ Init Network ]'
        network = NeuralNetwork(inputs, self.p, self.image_size, self.min_error)

        print '[ Start training]'
        network.training()
        # network.load_weights()

        print '[ Start recovering picture ]'
        rec_images = network.process()

        rec_picture = self.recover_image(rec_images)

        print '[ Save recoverd image to file ]'
        misc.imsave('images/rec_image.bmp', rec_picture)
    def __trainbAction(self):
        config = {'input_size': 30 * 30,  'hidden_size': 30 * 30, 'lambda': 1, 'num_labels': (len(self.learned))}
        self.nn = NeuralNetwork(config=config)

        cost_params_fscore = []
        for i in range(self._k):
            cost_params_fscore.append(self.nn.train(self.training_X[i], self.training_y[i], self.cross_validation_set[i], self.test_set, self.cross_validation_set_y[i], self.testing_y))

        best_model = max(cost_params_fscore, key=itemgetter(2))
        print best_model[0], best_model[2]
Example #14
0
class Creature(Entity):
    BASE_SHAPE = [[10, 0], [0, -10], [-5, -5], [-5, 5], [0, 10]]
    MAX_HEALTH = 100

    def __init__(self, world, position, orientation, color):
        self.polygonshape = PolygonShape(self.BASE_SHAPE)
        self.position = position
        self.orientation = orientation
        self.color = color
        self.movespeed = MoveSpeed(0)
        self.turnspeed = TurnSpeed(0)

        self.neuralnetwork = NeuralNetwork(2, 7, 2)
        self.neuralnetwork.initialize_random_network()
        self.health = Health(self.MAX_HEALTH)
        self.foodseen = FoodSeen(0)

        bounding_square = get_bounding_square(self.BASE_SHAPE)
        self.collider = Collider(self, bounding_square, self.BASE_SHAPE)
class NeuralNetworkTestcase(unittest.TestCase):
    def setUp(self):
        self.nn = NeuralNetwork(['a', 'b'], 2)

        self.nn.hidden_neurons[0].input_weights['a'] = 0.25
        self.nn.hidden_neurons[0].input_weights['b'] = 0.50
        self.nn.hidden_neurons[0].bias = 0.0

        self.nn.hidden_neurons[1].input_weights['a'] = 0.75
        self.nn.hidden_neurons[1].input_weights['b'] = 0.75
        self.nn.hidden_neurons[1].bias = 0.0

        self.nn.final_neuron.input_weights[0] = 0.5
        self.nn.final_neuron.input_weights[1] = 0.5
        self.nn.final_neuron.bias = 0.0

    def test_calc(self):
        self.nn.classify({'a': 1.0, 'b': 0.0})
        self.assertAlmostEquals(self.nn.final_neuron.last_output, 0.650373, 5)
Example #16
0
def exo8():
    print("\n\n>>EXERCICE 8 MNIST")
    Xtrain, ytrain, Xvalid, yvalid, Xtest, ytest = utils.readMNISTfile()
    default_h = 30
    maxIter = 1
    neuralNetwork = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain),K=100)
    neuralNetworkEfficient = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain),K=100)
    neuralNetworkEfficient._w1 = neuralNetwork._w1
    neuralNetworkEfficient._w2 = neuralNetwork._w2
    print("--- Reseau de depart ---")
    t1 = datetime.now()
    neuralNetwork.train(Xtrain, ytrain, maxIter)
    t2 = datetime.now()
    delta = t2 - t1
    print("Cela a mis : " + str(delta.total_seconds()) + " secondes")
    print("--- Reseau optimise ---")
    t1 = datetime.now()
    neuralNetworkEfficient.train(Xtrain, ytrain, maxIter)
    t2 = datetime.now()
    delta = t2 - t1
    print("Cela a mis : " + str(delta.total_seconds()) + " secondes")
Example #17
0
    def createSpatialTransformerWithFullyConnectedNetwork(parameters, isVerbose):
        logger.info ("Creating a fully connected network with a spatial transformer input layer")
        network = NeuralNetwork()
        
        idx = 0
        for inputSize, outputSize in parameters:
            isLastLayer = (idx == (len(parameters) - 1))

            if isLastLayer:
                nonlinearity = "Null"
            else:
                nonlinearity = "ReLu"

            if idx == 0:
                network.addLayer(SpatialTransformerLayer(inputSize[0], inputSize[1], inputSize[2],
                    outputSize[0], outputSize[1], outputSize[2], "ConvLayer"))
            else:
                network.addLayer(FullyConnectedLayer(inputSize, outputSize, idx, nonlinearity))
            idx += 1

        return network
Example #18
0
def exo67():
    print("\n\n>>EXERCICE 6 et 7 : Calcul matriciel")
    print(" --- K=1 ---")
    #Xtrain, ytrain, Xvalid, yvalid, Xtest, ytest = utils.readMoonFile()
    Xtrain = [[30, 20, 40, 50], [25, 15, 35, 45]]
    ytrain = [0,0]
    default_h = 2
    nn = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=1, wd=0)
    nne = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=1, wd=0)
    nne._w1 = nn._w1 # trick pour que l'aleatoire soit egale
    nne._w2 = nn._w2
    nn.train(Xtrain,ytrain,1)
    nne.train(Xtrain,ytrain,1)
    utils.compareNN(nn,nne)
    print(" --- K=10 ---")
    Xtrain = [[30, 20, 40, 50], [25, 15, 35, 45],[30, 76, 45, 44],[89, 27, 42, 52],[30, 24, 44, 53],[89, 25, 45, 50],[30, 20, 40, 50],[30, 65, 47, 50],[30, 34, 40, 50],[39, 20, 29, 58]]
    ytrain = [0,0,0,0,0,0,0,0,0,0]
    default_h = 2
    nn = NeuralNetwork(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=10, wd=0)
    nne = NeuralNetworkEfficient(len(Xtrain[0]), default_h, utils.getClassCount(ytrain), K=10, wd=0)
    nne._w1 = nn._w1 # trick pour que l'aleatoire soit egale
    nne._w2 = nn._w2
    nn.train(Xtrain,ytrain,1)
    nne.train(Xtrain,ytrain,1)
    utils.compareNN(nn,nne,10)
def train(method, learningRate, momentum, numEpochs, output, samples, layers=None, minibatchsize=None):
    trainImages, trainOutput, trainNumbers = loadData('training', samples)
    nn = NeuralNetwork(layerCount=len(layers),
                       layerSize=np.array(layers),
                       actFunctions=np.array([1, 1]))

    trainmethod = {
        TRAININGMETHODS[0]: nn.trainBatch,
        TRAININGMETHODS[1]: nn.trainStochastic
    }
    errors = trainmethod[method](
        trainImages, trainOutput, learningRate=learningRate, momentum=momentum, numEpochs=numEpochs)
    # Error did happen, we just return, error message would be printed by the
    # train method
    if not errors or len(errors) == 0:
        return

    # print 'Training done, errors:', errors
    print 'Saving output to', output
    nn.save(output)
    print 'Testing...'
    test(nn)
    def __load_learned(self):
        try:
            with open('learned.json') as learned_file:
                for line in learned_file:
                    learned = json.loads(line)
                    for key in learned.keys():
                        self._totrainlist.__add__([key])
        except IOError:
            learned = {}

        config = {'input_size': 30 * 30,  'hidden_size': 30 * 30, 'lambda': 1, 'num_labels': (len(learned))}
        self.nn = NeuralNetwork(config=config)

        return learned
    def setUp(self):
        self.nn = NeuralNetwork(['a', 'b'], 2)

        self.nn.hidden_neurons[0].input_weights['a'] = 1.0
        self.nn.hidden_neurons[0].input_weights['b'] = 1.0
        self.nn.hidden_neurons[0].bias = 0.0

        self.nn.hidden_neurons[1].input_weights['a'] = 1.0
        self.nn.hidden_neurons[1].input_weights['b'] = 1.0
        self.nn.hidden_neurons[1].bias = 0.0

        self.nn.final_neuron.input_weights[0] = -1
        self.nn.final_neuron.input_weights[1] = 1
        self.nn.final_neuron.bias = 0.0
    def __init__(self):
        tkinter.Tk.__init__(self)
        self.nn = NeuralNetwork(784, 300, 10)

        self.background = tkinter.Canvas(self, width = 308, height = 308)
        self.background.config(background="black")
        self.input_canvas = InputCanvas(self, width = 300, height = 300)
        self.result_label = tkinter.Label(self, text='')
        self.recog_button = tkinter.Button(self, text='Recognize', command=self.recognize)
        self.clear_button = tkinter.Button(self, text='Clear', command=self.input_canvas.clear)

        self.background.pack()
        self.input_canvas.place(x=4, y=4)
        self.result_label.pack()
        self.recog_button.pack()
        self.clear_button.pack()
    def test_train(self):
        self.nn = NeuralNetwork(['a', 'b'], 2)
        self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
        self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
        self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
        self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
        self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
        self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])
        self.nn.train([[{'a': 1.0, 'b': 0.0}, 1.0]])
        self.nn.train([[{'a': 0.0, 'b': 1.0}, 1.0]])


        self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 0.0}), 1.0, 5)
        self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 1.0}), 1.0, 5)
        self.assertAlmostEquals(self.nn.classify({'a': 1.0, 'b': 1.0}), 0.0, 5)
        self.assertAlmostEquals(self.nn.classify({'a': 0.0, 'b': 0.0}), 0.0, 5)


        self.nn.hidden_neurons[0].input_weights
Example #24
0
def main():
    print("Program Start")
    headers = [
        "Data set", "layers", "pop", "Beta", "CR", "generations", "loss1",
        "loss2"
    ]
    filename = 'VIDEORESULTS.csv'

    Per = Performance.Results()
    Per.PipeToFile([], headers, filename)

    data_sets = [
        "soybean", "glass", "abalone", "Cancer", "forestfires", "machine"
    ]

    regression_data_set = {
        "soybean": False,
        "Cancer": False,
        "glass": False,
        "forestfires": True,
        "machine": True,
        "abalone": True
    }
    categorical_attribute_indices = {
        "soybean": [],
        "Cancer": [],
        "glass": [],
        "forestfires": [],
        "machine": [],
        "abalone": []
    }

    tuned_0_hl = {
        "soybean": {
            "omega": .5,
            "c1": .1,
            "c2": 5,
            "hidden_layer": []
        },
        "Cancer": {
            "omega": .5,
            "c1": .5,
            "c2": 5,
            "hidden_layer": []
        },
        "glass": {
            "omega": .2,
            "c1": .9,
            "c2": 5,
            "hidden_layer": []
        },
        "forestfires": {
            "omega": .2,
            "c1": 5,
            "c2": .5,
            "hidden_layer": []
        },
        "machine": {
            "omega": .5,
            "c1": .9,
            "c2": 5,
            "hidden_layer": []
        },
        "abalone": {
            "omega": .2,
            "c1": 5,
            "c2": .9,
            "hidden_layer": []
        }
    }

    tuned_1_hl = {
        "soybean": {
            "omega": .5,
            "c1": .5,
            "c2": 1,
            "hidden_layer": [7]
        },
        "Cancer": {
            "omega": .2,
            "c1": .5,
            "c2": 5,
            "hidden_layer": [4]
        },
        "glass": {
            "omega": .2,
            "c1": .9,
            "c2": 5,
            "hidden_layer": [8]
        },
        "forestfires": {
            "omega": .2,
            "c1": 5,
            "c2": 5,
            "hidden_layer": [8]
        },
        "machine": {
            "omega": .5,
            "c1": 5,
            "c2": .5,
            "hidden_layer": [4]
        },
        "abalone": {
            "omega": .2,
            "c1": .1,
            "c2": 5,
            "hidden_layer": [8]
        }
    }

    tuned_2_hl = {
        "soybean": {
            "omega": .5,
            "c1": .9,
            "c2": .1,
            "hidden_layer": [7, 12]
        },
        "Cancer": {
            "omega": .2,
            "c1": .5,
            "c2": 5,
            "hidden_layer": [4, 4]
        },
        "glass": {
            "omega": .2,
            "c1": .9,
            "c2": 5,
            "hidden_layer": [8, 6]
        },
        "forestfires": {
            "omega": .2,
            "c1": .9,
            "c2": 5,
            "hidden_layer": [8, 8]
        },
        "machine": {
            "omega": .2,
            "c1": .9,
            "c2": .1,
            "hidden_layer": [7, 2]
        },
        "abalone": {
            "omega": .2,
            "c1": 5,
            "c2": 5,
            "hidden_layer": [6, 8]
        }
    }
    du = DataUtility.DataUtility(categorical_attribute_indices,
                                 regression_data_set)
    total_counter = 0
    for data_set in data_sets:
        if data_set != 'Cancer':
            continue
        data_set_counter = 0
        # ten fold data and labels is a list of [data, labels] pairs, where
        # data and labels are numpy arrays:
        tenfold_data_and_labels = du.Dataset_and_Labels(data_set)

        for j in range(10):
            test_data, test_labels = copy.deepcopy(tenfold_data_and_labels[j])
            #Append all data folds to the training data set
            remaining_data = [
                x[0] for i, x in enumerate(tenfold_data_and_labels) if i != j
            ]
            remaining_labels = [
                y[1] for i, y in enumerate(tenfold_data_and_labels) if i != j
            ]
            #Store off a set of the remaining dataset
            X = np.concatenate(remaining_data, axis=1)
            #Store the remaining data set labels
            labels = np.concatenate(remaining_labels, axis=1)
            print(data_set, "training data prepared")
            regression = regression_data_set[data_set]
            #If the data set is a regression dataset
            if regression == True:
                #The number of output nodes is 1
                output_size = 1
            #else it is a classification data set
            else:
                #Count the number of classes in the label data set
                output_size = du.CountClasses(labels)
                #Get the test data labels in one hot encoding
                test_labels = du.ConvertLabels(test_labels, output_size)
                #Get the Labels into a One hot encoding
                labels = du.ConvertLabels(labels, output_size)

            input_size = X.shape[0]

            data_set_size = X.shape[1] + test_data.shape[1]

            tuned_parameters = [
                tuned_0_hl[data_set], tuned_1_hl[data_set],
                tuned_2_hl[data_set]
            ]
            for z in range(1):
                hidden_layers = tuned_parameters[z]["hidden_layer"]

                layers = [input_size] + hidden_layers + [output_size]

                nn = NeuralNetwork(input_size, hidden_layers, regression,
                                   output_size)
                nn.set_input_data(X, labels)
                nn1 = NeuralNetwork(input_size, hidden_layers, regression,
                                    output_size)
                nn1.set_input_data(X, labels)
                nn2 = NeuralNetwork(input_size, hidden_layers, regression,
                                    output_size)
                nn2.set_input_data(X, labels)

                total_weights = 0
                for i in range(len(layers) - 1):
                    total_weights += layers[i] * layers[i + 1]

                hyperparameters = {
                    "population_size": 10 * total_weights,
                    "beta": .5,
                    "crossover_rate": .6,
                    "max_gen": 100
                }
                hyperparameterss = {
                    "maxGen": 100,
                    "pop_size": 100,
                    "mutation_rate": .5,
                    "mutation_range": 10,
                    "crossover_rate": .5
                }
                hyperparametersss = {
                    "position_range": 10,
                    "velocity_range": 1,
                    "omega": .1,
                    # tuned_parameters[z]["omega"],
                    "c1": .9,
                    # tuned_parameters[z]["c1"],
                    "c2": .1,
                    # tuned_parameters[z]["c2"],
                    "vmax": 1,
                    "pop_size": 1000,
                    "max_t": 50
                }
                de = DE.DE(hyperparameters, total_weights, nn)
                ga = GA.GA(hyperparameterss, total_weights, nn1)
                pso = PSO.PSO(layers, hyperparametersss, nn2)
                learning_rate = 3
                momentum = 0
                VNN = VideoNN.NeuralNetworks(input_size, hidden_layers,
                                             regression, output_size,
                                             learning_rate, momentum)
                VNN.set_input_data(X, labels)

                for gen in range(de.maxgens):
                    de.mutate_and_crossover()

                for gen in range(ga.maxGen):
                    ga.fitness()
                    ga.selection()
                    ga.crossover()

                counter = 0
                for epoch in range(pso.max_t):
                    pso.update_fitness()
                    pso.update_position_and_velocity()

                for epoch in range(100):
                    VNN.forward_pass()
                    VNN.backpropagation_pass()

                bestSolution = de.bestChromie.getchromie()
                bestWeights = de.nn.weight_transform(bestSolution)
                de.nn.weights = bestWeights

                Estimation_Values = de.nn.classify(test_data, test_labels)
                Estimation_Values1 = ga.nn.classify(test_data, test_labels)
                Estimation_Values2 = pso.NN.classify(test_data, test_labels)
                Estimation_Values3 = VNN.classify(test_data, test_labels)
                if regression == False:
                    #Decode the One Hot encoding Value
                    Estimation_Values = de.nn.PickLargest(Estimation_Values)
                    test_labels_list = de.nn.PickLargest(test_labels)
                    Estimation_Values1 = ga.nn.PickLargest(Estimation_Values1)
                    Tll = ga.nn.PickLargest(test_labels)
                    Estimation_Values2 = pso.NN.PickLargest(Estimation_Values2)
                    tll1 = pso.NN.PickLargest(test_labels)
                    Estimation_Values3 = VNN.PickLargest(Estimation_Values3)
                    tll = VNN.PickLargest(test_labels)

                    # print("ESTiMATION VALUES BY GIVEN INDEX (CLASS GUESS) ")
                    # print(Estimation_Values)
                else:
                    Estimation_Values = Estimation_Values.tolist()
                    test_labels_list = test_labels.tolist()[0]
                    Estimation_Values = Estimation_Values[0]

                Estimat = Estimation_Values
                groun = test_labels_list

                meta = list()
                Nice = Per.ConvertResultsDataStructure(groun, Estimat)
                Nice1 = Per.ConvertResultsDataStructure(
                    Tll, Estimation_Values1)
                Nice2 = Per.ConvertResultsDataStructure(
                    tll1, Estimation_Values2)
                Nice3 = Per.ConvertResultsDataStructure(
                    tll, Estimation_Values3)
                DEss = Per.StartLossFunction(regression, Nice, meta)
                GAss = Per.StartLossFunction(regression, Nice1, meta)
                PSOSS = Per.StartLossFunction(regression, Nice2, meta)
                VNNS = Per.StartLossFunction(regression, Nice3, meta)
                print("DE")
                print(DEss)
                print("GA")
                print(GAss)
                print("PSO")
                print(PSOSS)
                print("NN Back prop.")
                print(VNNS)

                # print("THE GROUND VERSUS ESTIMATION:")
                # print(Nice)

                # headers = ["Data set", "layers", "pop", "Beta", "CR", "generations", "loss1", "loss2"]
                Meta = [
                    data_set,
                    len(hidden_layers), hyperparameters["population_size"],
                    hyperparameters["beta"], hyperparameters["crossover_rate"],
                    hyperparameters["max_gen"]
                ]

                Per.StartLossFunction(regression, Nice, Meta, filename)
                data_set_counter += 1
                total_counter += 1

    print("Program End ")
# 每个图片8x8  识别数字:0,1,2,3,4,5,6,7,8,9 手写识别
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_digits
import numpy as np
from sklearn.metrics import confusion_matrix, classification_report

digits = load_digits()
x = digits.data
y = digits.target
# 得到一个 交叉验证的比例
x -= x.min()
x /= x.max()

nn = NeuralNetwork([64, 100, 10], "logistic")
x_train, x_test, y_train, y_test = train_test_split(x, y)
print(x_test.shape)
'''
对于标称型数据,preprocessing.LabelBinarizer 标签二值化,是一个很好用的工具。
可把yes和no转化为0和1,或把 incident(变化) 和 normal (正常)  转化为0和1。
当然,对于两类以上的标签也是适用的。
'''
label_train = LabelBinarizer().fit_transform(y_train)
label_test = LabelBinarizer().fit_transform(y_test)
print("start fitting..")
predictions = []
nn.fit(x_train, label_train, epochs=10000)

# 官方的测试集
for i in range(x_test.shape[0]):
Example #26
0
class Mnist:
    def __init__(self, hidden_nodes, learning_rate):
        input_nodes = 784  # input data has 28 * 28 pixel
        self.output_nodes = 10  # output data are the numbers from 0..9

        self.train_data_list = Mnist.get_train_data_list()
        self.test_data_list = Mnist.get_test_data_list()
        self.neural_network = NeuralNetwork(input_nodes, hidden_nodes,
                                            self.output_nodes, learning_rate)

    def train(self):
        for record in self.train_data_list:
            all_values = record.split(
                ',')  # split the record by the ',' commas

            inputs = (numpy.asfarray(all_values[1:]) / 255.0 *
                      0.99) + 0.01  # scale and shift he inputs

            targets = numpy.zeros(self.output_nodes) + 0.01

            targets[int(all_values[0])] = 0.99

            self.neural_network.train(inputs, targets)

    def test(self):
        scorecard = []

        for record in self.test_data_list:

            all_values = record.split(
                ',')  # split the record by the ',' commas

            correct_label = int(
                all_values[0])  # correct answer is the first label

            # scale and shift the inputs
            inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01

            # query the network
            outputs = self.neural_network.query(inputs)

            # the index of the highest value corresponds to the label
            label = numpy.argmax(outputs)

            # append correct or incorrect answer to list
            if label == correct_label:
                scorecard.append(1)
            else:
                scorecard.append(0)

        # calculates the performance score, the fraction of correct answers
        scorecard_array = numpy.asarray(scorecard)

        # return performance
        return scorecard_array.sum() / scorecard_array.size

    # run the network backwards, given a label, see what image it produces
    def backward(self, label):
        # create the output signals for this label
        targets = numpy.zeros(self.output_nodes) + 0.01

        # all_values[0] is the target label for this record
        targets[label] = 0.99

        # get image data
        image_data = self.neural_network.backquery(targets)

        # plot image data
        matplotlib.pyplot.imshow(image_data.reshape(28, 28),
                                 cmap='Greys',
                                 interpolation='None')
        matplotlib.pyplot.show()

    @staticmethod
    def get_train_data_list():
        optional_train_file_name = "data/mnist_train.csv"
        default_train_file_name = "data/mnist_train_100.csv"
        return FileReader.read_optional_file_or_default(
            optional_train_file_name, default_train_file_name)

    @staticmethod
    def get_test_data_list():
        optional_test_file_name = "data/mnist_test.csv"
        default_test_file_name = "data/mnist_test_10.csv"
        return FileReader.read_optional_file_or_default(
            optional_test_file_name, default_test_file_name)
Example #27
0
class AIPlayer:
    def __init__(self, gameModel, name):
        self.neuralNetwork = NeuralNetwork(16, [4, 4], 3)

        # print(self.neuralNetwork)
        # gameModel to get data from and send input to
        self.gameModel = gameModel

        # keep input and output to print them on screen
        self.inputVector = np.zeros(8)
        self.commands = np.zeros(3)
        self.name = name

    def DNA(self):
        return self.neuralNetwork.DNA()

    def setFromDNA(self, dna):
        self.neuralNetwork.setFromDNA(dna)

    def update(self, delta):
        # get environment data from gameModel
        nbQuadrants = 8  # number of quadrants in the front 180° arc
        distances = math.inf * np.ones(nbQuadrants)
        relativeSpeeds = np.zeros(nbQuadrants)

        # compute referential change matrix for the ship
        rotMatrix = np.array([[
            math.cos(self.gameModel.ship.theta),
            math.sin(self.gameModel.ship.theta)
        ],
                              [
                                  -math.sin(self.gameModel.ship.theta),
                                  math.cos(self.gameModel.ship.theta)
                              ]])

        # print("Ship angle:", self.gameModel.ship.theta, "Rotation Matrix:", rotMatrix.flatten())
        for a in self.gameModel.asteroidsGroup:
            # compute the asteroid coordinates in the ship's referential
            deltaPos = a.pos - self.gameModel.ship.pos
            deltaPos1 = np.matmul(rotMatrix, deltaPos)

            # find in which quadrant is the asteroid
            relativeAngle = np.arctan2(deltaPos1[1], deltaPos1[0])
            quadrantIndex = int(
                round(relativeAngle / (math.pi * 2 / nbQuadrants)) %
                nbQuadrants)
            # print("Angle:", relativeAngle, "Rounded:", round(relativeAngle / (math.pi * 2 / nbQuadrants)), "Quadrant:", quadrantIndex,"\n", end="", flush=True)

            # compute the distance and update NN inputs
            distance = max(1.0,
                           np.linalg.norm(deltaPos1) - a.radius - SHIP_SIZE)
            # print("Asteroid", a.name ,"in quadrant ",self.quadrantIndex, "at", distance)
            distances[quadrantIndex] = min(distances[quadrantIndex], distance)

            # compute the speed
            nextRelativePos = a.pos + delta * a.speed * np.array([
                math.cos(a.theta), math.sin(a.theta)
            ]) - self.gameModel.ship.pos
            relativeSpeed = distance - np.linalg.norm(nextRelativePos)
            if relativeSpeed > 0.0:
                # the asteroid is getting closer
                relativeSpeeds[quadrantIndex] = max(
                    relativeSpeeds[quadrantIndex], relativeSpeed)

        # compute commands
        distances = 100.0 / (distances + 100.0)
        relativeSpeeds = relativeSpeeds / ASTEROID_MAX_SPEED
        self.inputVector = np.concatenate([distances, relativeSpeeds])
        self.commands = self.neuralNetwork.compute(self.inputVector)
        self.commands = np.clip(self.commands, -1.0, 1.0)
        # print(self.inputVector, self.commands)

        # send self.commands to gameModel
        # rotation command
        self.gameModel.ship.thetaSpeed = SHIP_TURN_RATE * self.commands[0]

        # acceleration command
        self.commands[1] = np.round(self.commands[1])
        self.gameModel.ship.acceleration = (
            (self.commands[1] + 1.0) / 2.0) * SHIP_ACCELERATION

        # firing command
        self.gameModel.ship.toggleFire(self.commands[2] >= 0.0)
Example #28
0
def img_show(array):
    pixels = array.reshape((28, 28))
    plt.imshow(pixels, cmap='gray')
    plt.show()


sigmoid = lambda x: 1 / (1 + np.exp(-x))
dsigmoid = lambda x: x * (1 - x)
tanh = np.tanh
dtanh = lambda x: 1 - (np.tanh(x)**2)

if __name__ == '__main__':
    T1 = time.time()
    n = NeuralNetwork([784, 1000, 600, 250, 10], 0.01,
                      [tanh, sigmoid, tanh, sigmoid],
                      [dtanh, dsigmoid, dtanh, dsigmoid])

    BATCH = 20
    BATCH_SIZE = 2
    losses = np.zeros(BATCH)
    losses2 = np.zeros(BATCH)

    for i in range(BATCH):
        t1 = time.time()
        for j in range(BATCH_SIZE):
            rn = np.random.randint(59999)
            n.train([x_train[rn]], [tjb_train[rn]])
        t2 = time.time()
        u = n.losses(x_test[:10], tjb_test[:10])
        losses[i] = u
        pre_word = poem[i]
        next_word = poem[i + 1]
        input_idx.append(word2index[pre_word])
        output_idx.append(word2index[next_word])

# using the keras library to express words
# in the form of one-hot vector
oht_inputs = to_categorical(np.array(input_idx))
oht_outputs = to_categorical(np.array(output_idx))

vocab_size = len(oht_inputs[0])

# sending necessary parameters to create the
# Feed-Forward Neural Network Language model
model = NeuralNetwork(i2w=index2word,
                      inp_dim=vocab_size,
                      hid_dim=512,
                      out_dim=vocab_size)

# training the model with custom epoch size which is 50.
for i in range(50):
    print("ITER:", i)
    model.train(oht_inputs, oht_outputs)

model.save_model()
model.load_model()

# Task 2: Poem Generation


def calc_perplexity(probs):
    log_sum = 0
Example #30
0
    },
    {
        'inputs': [1, 0, 1],  #
        'target': [1.]  #
    },
    {
        'inputs': [1, 1, 0],
        'target': [0.]
    },
    {
        'inputs': [1, 1, 1],
        'target': [0.]
    }
]

nn = NeuralNetwork(3, 4, 2, 2, 1)

print("Untrained Neural Network:\n")
for i in range(8):
    data = training_data[i]
    results = nn.predict(data['inputs'])
    print("results: ", round(results[0, 0], 3))

print("\nTrained Neural Network:\n")

for i in range(10000):
    r = np.random.randint(8)
    data = training_data[r]

    nn.train(data['inputs'], data['target'])
Example #31
0
train_label = img[:, 0:int(img.shape[1]/2), :]
predict_data = color.rgb2gray(img[:, int(img.shape[1]/2):img.shape[1], :])
predict_label = img[:, int(img.shape[1]/2):img.shape[1], :]

# # Use two pics, one for train and another for test
# img_train = color_normalize(io.imread('sun01.jpeg'))
# img_test = color_normalize(io.imread('beach01.jpg'))
# train_data = color.rgb2gray(img_train)
# train_label = img_train
# predict_data = color.rgb2gray(img_test)
# predict_label = img_test
# img_original = img_train


# Set up nn for R value
nn_r = NeuralNetwork(layers, 'tanh')
X = []
y_r = []

# Set up nn for G value
nn_g = NeuralNetwork(layers, 'tanh')
y_g = []

# Set up nn for B value
nn_b = NeuralNetwork(layers, 'tanh')
y_b = []

# Set train data and label
for i in range(1, train_data.shape[0]-1):
    for j in range(1, train_data.shape[1]-1):
        X.append(window(train_data, i, j))
Example #32
0
def test_nn(config):
    nn = NeuralNetwork(config)
    nn.back_propagation()
    cmat, crate, cout = nn.classification_test(nn.testing_data, nn.weights_best)
    print cmat
    print crate
Example #33
0
from NeuralNetwork import NeuralNetwork as NN
import numpy as np
import test_fixtures as tf

a = tf.t3_123

assert NN.pad_with_zeros(a, 10).shape[0] % 10 == 0
assert NN.pad_with_zeros(a, 3).shape[0] % 3 == 0
assert NN.pad_with_zeros(a, 5).shape[0] % 5 == 0
assert NN.pad_with_zeros(a, 1).shape[0] % 1 == 0
assert NN.pad_with_zeros(a, 2).shape[0] % 2 == 0

assert NN.pad_with_wrap(a, 10).shape[0] % 10 == 0
assert NN.pad_with_wrap(a, 3).shape[0] % 3 == 0
assert NN.pad_with_wrap(a, 5).shape[0] % 5 == 0
assert NN.pad_with_wrap(a, 1).shape[0] % 1 == 0
assert NN.pad_with_wrap(a, 2).shape[0] % 2 == 0

assert NN.DataSet.training.value == 0
assert NN.DataSet.validation.value == 1
assert NN.DataSet.testing.value == 2
Example #34
0
# 每个图片8x8  识别数字:0,1,2,3,4,5,6,7,8,9

import numpy as np
from sklearn.datasets import load_digits
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.preprocessing import LabelBinarizer
from NeuralNetwork import NeuralNetwork
from sklearn.cross_validation import train_test_split


digits = load_digits()
X = digits.data
y = digits.target
X -= X.min()  # normalize the values to bring them into the range 0-1
X /= X.max()

nn = NeuralNetwork([64, 100, 10], 'logistic')
X_train, X_test, y_train, y_test = train_test_split(X, y)
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
print "start fitting"
nn.fit(X_train, labels_train, epochs=3000)
predictions = []
for i in range(X_test.shape[0]):
    o = nn.predict(X_test[i])
    predictions.append(np.argmax(o))
print confusion_matrix(y_test, predictions)
print classification_report(y_test, predictions)

    def predict(self,x):
        x = np.array(x)
        temp = np.ones(x.shape[0]+1)
        temp[0:-1] = x
        a = temp
        for l in range(0,len(self.weights)):
            a = self.activation(np.dot(a,self.weights[l]))
        return a




from NeuralNetwork import NeuralNetwork


nn = NeuralNetwork([2,2,1],'tanh')
X = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([0,1,1,0])

nn.fit(X, y)
for i in [[0,0],[0,1],[1,0],[1,1]]:
    print(i,nn.predict(i))       
             

            
            




Example #36
0
                        EXAMPLE ON USING THE NEURAL NETWORK
--------------------------------------------------------------------------------

The following is an exmple on how to use the neural network library in
NeuralNetwor.py.
In the following example, the neural network is being trained and tested on the 
XOR-problem
================================================================================
"""

import numpy as np
from NeuralNetwork import NeuralNetwork
import random

# Creating model
model = NeuralNetwork(2)  # Creates an input layer with 2 input nodes
model.add_layer(
    2, activation="sigmoid")  # Creates a hidden layer with 2 input nodes
model.add_layer(
    1, activation="sigmoid")  # Creates an output layer with 1 output node

# Constants
N_EPOCHS = 100
N_SAMPLES = 1000
LEARNING_RATE = 0.1

# Generating data
X = np.array([[0, 0]]).reshape((1, -1))
y = np.array([[0]]).reshape((1, -1))

for i in range(N_SAMPLES - 1):
Example #37
0
train_features, train_targets = features[:-60 * 24], targets[:-60 * 24]
val_features, val_targets = features[-60 * 24:], targets[-60 * 24:]


def MSE(y, Y):
    return np.mean((y - Y)**2)


### Set the hyperparameters here ###
epochs = 500
learning_rate = 0.01
hidden_nodes = 25
output_nodes = 1

N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)

losses = {'train': [], 'validation': []}
for e in range(epochs):
    # Go through a random batch of 128 records from the training data set
    batch = np.random.choice(train_features.index, size=128)
    for record, target in zip(train_features.loc[batch].values,
                              train_targets.loc[batch]['cnt']):
        network.train(record, target)

    # Printing out the training progress
    train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
    val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
    sys.stdout.write("\rProgress: " + str(100 * e / float(epochs))[:4] \
                     + "% ... Training loss: " + str(train_loss)[:5] \
                     + " ... Validation loss: " + str(val_loss)[:5])
Example #38
0
# coding:utf-8
__author__ = 'fz'
__date__ = '2017-05-18 11:51'

from NeuralNetwork import NeuralNetwork
import numpy as np

#几层,每层分别有几个神经元[2,2,1] 2输入层
nn = NeuralNetwork([2, 2, 1], 'logistic')
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
    print(i, nn.predict(i))
Example #39
0
class NNPlayer(AbstractPlayer):
    def __init__(self):
        self.nn = NeuralNetwork(NN_SHAPE)

        self.log = ""

    def _write_log(self, string):
        """Log Writer for Debugging the class/neural network"""
        #print(string)
        self.log += string + "\n"

    def play(self, myState, oppState, myScore, oppScore, turn, length, nPips):
        """Overrides AbstractPlayer.play"""

        self._write_log(f"Turn Number: {turn}")
        self._write_log(f"Player Score: {myScore}")
        self._write_log(f"Oppenent Score: {oppScore}")

        # ######################
        # Inputs are:
        #   myState = 3x3x3 array of ints
        #   oppState = 3x3x3 array of ints
        #   myScore = int
        #   oppScore = int
        #   turn = int
        #   length = int
        #   nPips = int
        # ######################
        # Need to return an array of length == nPips
        # each element of the array is a sub array of length 3
        # each element of the sub array is an intefer of the set (0,1,2)

        my_state_flat = []
        opp_state_flat = []

        for i in range(3):
            for j in range(3):
                my_state_flat += myState[i][j]

        for i in range(3):
            for j in range(3):
                opp_state_flat += oppState[i][j]

        input_data = my_state_flat + opp_state_flat

        input_data = list(map(float,input_data))

        self._write_log(f"NN Input = {input_data}")

        output_data = self.nn.forward(input_data).flatten().tolist()

        self._write_log(f"NN Output = {output_data}")

        d1 = output_data[0:3]
        d2 = output_data[3:6]
        d3 = output_data[6:9]
        decision = [np.argmax(d1), np.argmax(d2), np.argmax(d3)]

        self._write_log(f"Decision = {decision}")

        return decision
Example #40
0
 def test_activation(self):
     network = NeuralNetwork(3, 2, 1, 0.5)
     # Test that the activation function is a sigmoid
     self.assertTrue(
         np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
Example #41
0
    np.save("chr1_IH1", sortedPop1[0].IHWeights)
    np.save("chr1_HO1", sortedPop1[0].HOWeights)
    np.save("chr1_IH2", sortedPop1[1].IHWeights)
    np.save("chr1_HO2", sortedPop1[1].HOWeights)

    np.save("chr2_IH1", sortedPop2[0].IHWeights)
    np.save("chr2_HO1", sortedPop2[0].HOWeights)
    np.save("chr2_IH2", sortedPop2[1].IHWeights)
    np.save("chr2_HO2", sortedPop2[1].HOWeights)


while True:
    curChromosone1 = pop1.chromosones[pop1.curChromosone]
    curChromosone2 = pop2.chromosones[pop2.curChromosone]
    NN1 = NN()
    NN2 = NN()

    game = CT()
    game_over = False
    score1 = 0
    score2 = 0

    while not game_over:
        if (iterationCounter % 20 == 0):
            print(score1)
            print(score2)
            game.print_board(game.board)

        if (iterationCounter % 1000 == 0):
            saveState(pop1, pop2)
import numpy as np
# library for plotting arrays
import matplotlib.pyplot as plt
from NeuralNetwork import NeuralNetwork

if __name__ == '__main__':
    # number of input, hidden and output nodes
    input_nodes = 784
    hidden_nodes = 200
    output_nodes = 10

    # learning rate
    learning_rate = 0.1

    # create instance of neural network
    n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

    # load the mnist training data CSV file into a list
    with open("mnist_dataset/mnist_train_100.csv", 'r') as training_data_file:
        training_data_list = training_data_file.readlines()

    # # train the neural network

    # epochs is the number of times the training data set is used for training
    epochs = 5

    for e in range(epochs):
        # go through all records in the training data set
        for record in training_data_list:
            # split the record by the ',' commas
            all_values = record.split(',')
Example #43
0
    def __init__(self):
        self.nn = NeuralNetwork(NN_SHAPE)

        self.log = ""
from NeuralNetwork import NeuralNetwork

nn = NeuralNetwork()

i1 = nn.create_new_input()
i2 = nn.create_new_input()
i3 = nn.create_new_input()

o1 = nn.create_new_output()

#nn.create_full_mesh([])

right = False
counter = 0
weights = []

i1.set_value(1)
i2.set_value(2)
i3.set_value(3)

nn.train(0)

value = o1.get_value()

while not right:
    value = o1.get_value()
    print(counter, ": ", value)
    right, weights = nn.train(counter=counter,
                              weights=weights,
                              value=value,
                              rate=0.07)
import pickle

# WARNING: It's better to download the ./assets/train.txt from the assignment page since it seem to have problems with crlf thingy on git
if __name__ == '__main__':
    '''
    test from lab (OK)
        myW = [np.array([0]), np.array([[0.3, -0.9, 1], [-1.2, 1, 1]]), np.array([[1, 0.8]])]
        nn = NeuralNetwork([3, 2, 1], sigmoid, sigmoidDeriv, myW)
        x = np.array([1, 0, 1])
        y = np.array([0])
        nn.forward(x)
        # print(nn.a)
        print(nn.backward(y, 0.3))
        print()
        print()
        print(nn.w)
    '''
    data = Data(conf.TRAIN_PATH)
    nn = NeuralNetwork(data.structure, conf.sigmoid, conf.sigmoidDeriv)
    epochs = int(input("Epochs: "))
    alpha = float(input("Learning rate: "))
    nn.learn(data.data, epochs, alpha)
    plt.plot(nn.errors)
    plt.show()

    params = [nn.w, [data.data.normFeatures, data.data.normLabels]]

    with open(conf.PARAMS_PATH, 'wb') as outfile:
        pickle.dump(params, outfile, pickle.HIGHEST_PROTOCOL)

    print(nn.w)
Example #46
0
np.random.seed(seed)
x = np.sort(np.random.uniform(0, 1, n))
y = np.sort(np.random.uniform(0, 1, n))
x, y = np.meshgrid(x, y)
z = np.ravel(f.FrankeFunction(x, y) + 0.1*np.random.randn(x.shape[0], x.shape[1]))
z = z.reshape(-1, 1)

# set up the design matrix
data = DataPrep()
X = data.design_matrix(x, y, degree=1)[:, 1:]

# split data in train and test and scale it
X_train, X_test, z_train, z_test = data.train_test_scale(X, z)

# set up the neural network
network = NeuralNetwork(X_train.shape[1], neurons, n_outputs, cost.MSE())
network.create_layers(hidden_act, output_act, seed)

# train the network
batch_size = len(X_train)//n_batches
index_array = np.arange(len(X_train))
for k in range(n_epochs):
    np.random.shuffle(index_array)
    X_minibatches = np.split(X_train[index_array], n_batches)
    z_minibatches = np.split(z_train[index_array], n_batches)

    for l in range(n_batches):
        network.backprop(X_minibatches[l], z_minibatches[l], eta, lmbda)

network.feedforward(X_test)
print(f"MSE test NN {f.MSE(z_test, network.layers[-1].a)}")
Example #47
0
n_epochs = 5                               # 20
batch_size = 1                            # 100
learning_rate = 0.035                         # 1
#regularisation = 0.6

trainingShare = 0.8
seed  = 42
training_input, test_input, training_labels, test_labels = train_test_split(
                                                                input_prepared,
                                                                labels,
                                                                train_size=trainingShare,
                                                                test_size = 1-trainingShare,
                                                                random_state=seed
                                                                )

network = NeuralNetwork(layers, Sigmoid())
network.train(training_input, training_labels, learning_rate, n_epochs, batch_size, \
            test_input, test_labels, test='accuracy')

pred_prob = network.predict_probabilities(test_input)
pred = network.predict(test_input)

figurepath = '../figures/'

#compute confusion matrix
true_negative, false_positive, false_negative, true_positive = confusion_matrix(test_labels, pred).ravel()
#normalize
'''
n_negative = true_negative+false_negative
true_negative /= n_negative
false_negative /= n_negative
Example #48
0
from NeuralNetwork import NeuralNetwork
import pandas as pd
# Data 1
print('Starting training data 1')
data = pd.read_csv('dataset.csv').values
N, d = data.shape
X = data[:, 0:d - 1].reshape(-1, d - 1)
y = data[:, 2].reshape(-1, 1)
p = NeuralNetwork([X.shape[1], 5, 1], 0.1)
p.fit(X, y, 10000, 100)
# This will print the result = 0
print('Result for x = [5 0.1] ', p.predict([5, 0.1]))
# This will print the result = 1
print('Result for x = [10 0.8] ', p.predict([10, 0.8]))
Example #49
0
def build():
    lr = math.pow(5, -4)
    alpha = 4e-4
    optimizer = Optimizers.Adam(lr)
    optimizer.add_regularizer(Constraints.L2_Regularizer(alpha))
    net = NeuralNetwork(optimizer, Initializers.He(),
                        Initializers.Constant(0.1))

    net.loss_layer = Loss.CrossEntropyLoss()

    cl_1 = Conv.Conv((1, 1), (1, 5, 5), 6)
    net.append_trainable_layer(cl_1)
    net.layers.append(ReLU.ReLU())

    pl_2 = Pooling.Pooling((2, 2), (2, 2))
    net.layers.append(pl_2)

    cl_3 = Conv.Conv((1, 1), (1, 5, 5), 16)
    net.append_trainable_layer(cl_3)
    net.layers.append(ReLU.ReLU())

    pl_4 = Pooling.Pooling((2, 2), (2, 2))  #16*7*7
    net.layers.append(pl_4)

    cl_5 = Conv.Conv(
        (100, 100), (1, 15, 15), 120
    )  #stride and convolution shape large enough, it's the same use as valid convolution
    net.append_trainable_layer(cl_5)
    net.layers.append(ReLU.ReLU())

    net.layers.append(Flatten.Flatten())  #120

    fcl_1 = FullyConnected.FullyConnected(120, 84)
    net.append_trainable_layer(fcl_1)
    net.layers.append(ReLU.ReLU())

    fcl_2 = FullyConnected.FullyConnected(84, 10)
    net.append_trainable_layer(fcl_2)
    net.layers.append(SoftMax.SoftMax())

    return net
    random.seed(4)
    random.shuffle(X)
    random.seed(4)
    random.shuffle(y)

    return np.asarray(X), np.asarray(y)


episodes = range(0, 50)
#X, y = generate_training_examples()
X_train, X_test, y_train, y_test = get_mnist_dataset()
print(X_train[2])
print(y_train[2])
net = NeuralNetwork([784, 1024, 10],
                    LogisticFunction.function,
                    0.001,
                    batch_size=512)

for e in tqdm(episodes):
    net.train(X_train, y_train)

    error_total = net.calc_total_error(X_train[1], y_train[1])
    print("Total error: " + str(error_total))

print(net.calc_feed_forward([0, 0]))
print(net.calc_feed_forward([0, 1]))
print(net.calc_feed_forward([1, 0]))
print(net.calc_feed_forward([1, 1]))

#print(net.calc_feed_forward(np.array([0.05, 0.1])))
#print(net.calc_total_error(np.array([0.05, 0.1]), np.array([0.01, 0.99])))
Example #51
0
    correct = 0
    for x in range(testData.shape[0]):
        inputs = norm(np.ndarray.flatten(testData[x]))
        guess = nn.guess(inputs)
        print("the number was: " + str(testLabels[x]))
        guess = whatIndex(guess) + 1
        print("it guessed it was :" + str(guess))
        if guess == testLabels[x]:
            correct += 1

    # print("it correctly predicted " + str(correct / len(data) * 100) + "%")
    return (correct / testData.shape[0] * 100)


nn = NeuralNetwork(784, [400], 26, 0.1)

if doSave:

    trainingData, trainingLabels = emnist.extract_training_samples('letters')

    trained = False
    bestResult = 0
    # remember to randomise the 2d array when going over another epoch!!!!!!!!!
    while not trained:
        oldPercent = 0
        for x in range(trainingData.shape[0]):
            inputs = norm(np.ndarray.flatten(trainingData[x]))
            targets = toTargets(trainingLabels[x])
            percent = int(x / trainingData.shape[0] * 100)
            if percent > oldPercent:
Example #52
0
#!/usr/bin/env python3.4
#-*- coding: utf-8 -*-

import numpy as n
from speedtest import Speedtest as sp



from NeuralNetwork import NeuralNetwork

mysp=sp()


nn = NeuralNetwork([2,2,4,2,1]) # Erstelle neues Neurales Netzwerk mit 2 Eingangsneuronen, 6 Hidden-Neuronen und 1 Ausgangsneuronen.
# Möglich wäre auch: NeuralNetwork([2,6,7,3,1]) also mit mehren Hidden Layern!

s_in = n.array([[0, 0], [0, 1], [1, 0], [1, 1]]) #Trainingsdaten Input
s_teach = n.array([[0], [1], [1], [0]])          #Trainingsdaten Output
#s_teach = n.array([[0,0], [1,1], [1,1], [0,0]])          #Trainingsdaten Output




mysp.record('start')
nn.teach(s_in, s_teach ,0.2,50000)  # Trainiren: 
mysp.record('ende')

mysp.printRecords()
#s_in: Input Daten als numpy-Array
#s_teach: Output Daten als numpy-Array
# optional: epsilon=0.2: Lernfaktor
Example #53
0
def NN4py():

    # load the mnist training data CSV file into a list
    training_data_file = open(
        "./code_from_book/makeyourownneuralnetwork/mnist_dataset/mnist_train.csv",
        "r")
    training_data_list = training_data_file.readlines()
    training_data_file.close()

    # load the mnist test data CSV file into a list
    testing_data_file = open(
        "./code_from_book/makeyourownneuralnetwork/mnist_dataset/mnist_test.csv",
        "r")
    testing_data_list = testing_data_file.readlines()
    testing_data_file.close()

    # initialise the neural network base
    # number of input, hidden and output nodes
    input_nodes = 784
    hidden_nodes = 300
    output_nodes = 10
    # learning rate
    learning_rate = 0.1
    # create instance of neural network
    nw = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

    # epochs is the number of times the training data set is used for training
    epochs = 5
    time_train_start = time.time()
    print("strat training now !!!!")
    for e in range(epochs):
        # go throgh all records in the training data set
        for record in training_data_list:
            # split the record by the ',' commas
            all_values = record.split(',')
            # scale and shift the inputs
            '''
	    数据是像素值,范围是0-255,下面的代码是让输入数据偏移到0.01-1.0的范围,原因如下:
	    选择0.01作为范围最低点是为了避免0值输入最终人为造成权重更新失败
	    选择1.0作为上限值是因为不需要避免输入,只需要避免输出值是1.0
	    我们使用的逻辑函数,即激活函数,输出范围是0.0-1.0,但事实上不能达到这两个边界值,这是逻辑函数的极限,逻辑函数仅接近这两个极限
	    训练的目标也一样,用0.01代表0,用0.99代表1
	    '''

            inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
            # create the target output values (all 0.01, except the desired label which is 0.99)
            targets = np.zeros(output_nodes) + 0.01
            # all_values[0] is the target label for this record
            # 下标是目标数字,也是输出层10个节点对应10个数字中想要激活的那个节点
            targets[int(all_values[0])] = 0.99
            nw.train(inputs, targets)
            pass
        pass
    print("training done !!!!")
    time_train_end = time.time()
    time_train_final = time_train_end - time_train_start
    print 'training time is ', time_train_final, 's'

    # scorecard for how well the network performs, initially empty
    scorecard = []
    time_test_start = time.time()
    print("start testing now !!!!")
    # go through all the records in the test data set
    for record in testing_data_list:
        # split the record by the ',' commas
        all_values = record.split(',')
        # correct answer is first value
        correct_label = int(all_values[0])
        # print correct_label, 'correct label '
        # scale and shift the inputs
        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        # query the network
        outputs = nw.query(inputs)
        # the index of the highest value corresponds to the label
        label = np.argmax(outputs)
        # np.argmax找到数组中的最大值并返回它的位置
        # print label,"network's answer"
        # append correct or incorrect to list
        if (label == correct_label):
            # network's answer matches correct answer, add 1 to scorecard
            scorecard.append(1)
        else:
            # network's answer doesn't match correct answer, add 0 to scorecard
            scorecard.append(0)
            pass
        pass
    print("testing done !!!!")
    time_test_end = time.time()
    time_test_final = time_test_end - time_test_start
    print "testing time is ", time_test_final, 's'
    # calculate the performance score, the fraction of correct answers
    scorecard_array = np.asarray(scorecard)
    print(scorecard_array.sum())
    print(scorecard_array.size)
    zhengquede = float(scorecard_array.sum())
    zongshu = float(scorecard_array.size)
    zhunquelv = zhengquede / zongshu
    print 'testing accurancy is ', zhunquelv * 100, '%'
Example #54
0
def run_xor():
    nn = NeuralNetwork('xor.json')
    nn.back_propagation()
    plot_error(nn)
Example #55
0
"""
简单非线性关系数据集测试(XOR)
 X              Y
0 0             0
0 1             1
1 0             1
1 1             0
"""
from NeuralNetwork import NeuralNetwork
import numpy as np

print(__doc__)

XORArray = [[0, 0], [0, 1], [1, 0], [1, 1]]

nn = NeuralNetwork([2, 2, 1], 'tanh')
X = np.array(XORArray)
y = np.array([0, 1, 1, 0])
nn.fit(X, y)
for i in range(len(XORArray)):
    print("样本:%s,属于:%s,预测:%s" % (XORArray[i], y[i], nn.predict(XORArray[i])))
Example #56
0
def run_nn(config):
    nn = NeuralNetwork(config)
    nn.back_propagation()
from NeuralNetwork import NeuralNetwork

nn = NeuralNetwork(2, 6, 1)
training_data = [{
    "input": [0, 0],
    "output": [0]
}, {
    "input": [1, 1],
    "output": [0]
}, {
    "input": [1, 0],
    "output": [1]
}, {
    "input": [0, 1],
    "output": [1]
}]

for i in range(2000):
    for data in training_data:
        nn.train(data["input"], data["output"])

print(nn.predict([0, 0]))
print(nn.predict([0, 1]))
print(nn.predict([1, 0]))
print(nn.predict([1, 1]))
Example #58
0
def run_reg_nn():
    nn = NeuralNetwork('fake_config.json')
    nn.back_propagation()
    #plot_error(nn)
    return nn
Example #59
0
test_sound_file = ""
frame_size = 512
frame_overlap = 0.5  # as a decimal
sampling_rate = 44100

feature_extraction = FeatureExtraction()
# Create Feature Extraction object & begin
if extract_features is True:
    feature_extraction.extract_features(framesize=frame_size,
                                        frameoverlap=frame_overlap,
                                        base_dir=base_dir,
                                        results_path=results_dir,
                                        sampling_rate=sampling_rate)

# Create NN
nn = NeuralNetwork()
nn.train(results_loc=results_dir)
nn.test()

input_sound = input("Would you like to test a single audio file?")

if input_sound == "Yes":
    test_sound_files = "../../Sounds/Speech_TIMIT/test/MJWT0/SA1.wav"
    test_csv_file = "../../RESULTS/test/MJWT0/SA1.csv"
    prediction = nn.predict_file(test_csv_file)
    if prediction is 0:
        prediction = "Female"
    else:
        prediction = "Male"
    print("Prediction: ", prediction)
    # display result