Beispiel #1
0
 def __init__(self,
              datafile,
              desired_error=0.0000000001,
              iterations_between_reports=1000):
     self.datafile = datafile
     self.desired_error = desired_error
     self.iterations_between_reports = iterations_between_reports
     f = open(datafile + ".train", 'r')
     firstline = f.readline()
     f.close
     l = string.split(firstline)
     self.num_input = int(l[1])
     self.num_output = int(l[2])
     self.breeding = False
     self.stage = 0
     self.netsTried = 0
     self.maxMutations = 18
     self.populationSize = 12
     self.trainingData = libfann.training_data()
     self.trainingData.read_train_from_file(datafile + ".train")
     self.testData = libfann.training_data()
     self.testData.read_train_from_file(datafile + ".test")
     self.flist = [
         libfann.LINEAR, libfann.SIGMOID, libfann.SIGMOID_STEPWISE,
         libfann.SIGMOID_SYMMETRIC, libfann.SIGMOID_SYMMETRIC_STEPWISE,
         libfann.GAUSSIAN, libfann.GAUSSIAN_SYMMETRIC, libfann.ELLIOT,
         libfann.ELLIOT_SYMMETRIC, libfann.LINEAR_PIECE,
         libfann.LINEAR_PIECE_SYMMETRIC, libfann.SIN_SYMMETRIC,
         libfann.COS_SYMMETRIC
     ]
     self.mutationlist = [
         "change_connection_rate", "change_learning_rate",
         "change_num_neurons_hidden", "change_num_layers_hidden",
         "change_max_iterations", "change_training_algorithm",
         "change_activation_function_hidden",
         "change_activation_function_output", "change_learning_momentum",
         "change_activation_steepness_hidden",
         "change_activation_steepness_output", "change_training_param"
     ]
     self.trmutlist = [
         "change_connection_type",
         "change_quickprop_decay",
         "change_quickprop_mu",
         "change_rprop_increase_factor",
         "change_rprop_decrease_factor",
         "change_rprop_delta_min",
         "change_rprop_delta_max",
         #                          "change_rprop_delta_zero"
     ]
Beispiel #2
0
    def train(self, inputs, outputs, params):
        self.p = inputs.shape[1]       #number of input features
        self.n_r = outputs.shape[1]    #size of output grid in rows
        self.n_c = outputs.shape[2]    #size of output grid in cols

        self.out_min = outputs.min()
        self.out_max = outputs.max()

        d = self.out_max - self.out_min
        self.out_min -= d / 98
        self.out_max -= d / 98

        outputs = (outputs - self.out_min) / (self.out_max - self.out_min)

        assert inputs.shape[0] == outputs.shape[0]

        nn = libfann.neural_net()
        #nn.create_standard_array((self.p, 50, 50, self.n_r*self.n_c))
        nn.create_shortcut_array((self.p, self.n_r*self.n_c))
        nn.set_learning_rate(.7)
        nn.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
        nn.set_activation_function_output(libfann.SIGMOID)

        data = libfann.training_data()
        data.set_train_data(inputs, outputs.reshape((-1, self.n_r*self.n_c)))

        #nn.train_on_data(data, 500, 10, .001)
        nn.cascadetrain_on_data(data, 15, 1, .001)

        nn.save('nn.net')
        nn.destroy()
Beispiel #3
0
    def test(self, ann_file, test_file):
        """Test an artificial neural network."""
        if not os.path.isfile(ann_file):
            raise IOError("Cannot open %s (no such file)" % ann_file)
        if not os.path.isfile(test_file):
            raise IOError("Cannot open %s (no such file)" % test_file)

        # Get the prefix for the classification columns.
        try:
            dependent_prefix = self.config.data.dependent_prefix
        except:
            dependent_prefix = OUTPUT_PREFIX

        self.ann = libfann.neural_net()
        self.ann.create_from_file(ann_file)

        self.test_data = TrainData()
        try:
            self.test_data.read_from_file(test_file, dependent_prefix)
        except IOError as e:
            logging.error("Failed to process the test data: %s" % e)
            exit(1)

        logging.info("Testing the neural network...")
        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(),
            self.test_data.get_output())

        self.ann.test_data(fann_test_data)

        mse = self.ann.get_MSE()
        logging.info("Mean Square Error on test data: %f" % mse)
Beispiel #4
0
    def train(self, train_data):
        self.set_train_data(train_data)

        hidden_layers = [self.hidden_neurons] * self.hidden_layers
        layers = [self.train_data.num_input]
        layers.extend(hidden_layers)
        layers.append(self.train_data.num_output)

        sys.stderr.write("Network layout:\n")
        sys.stderr.write("* Neuron layers: %s\n" % layers)
        sys.stderr.write("* Connection rate: %s\n" % self.connection_rate)
        if self.training_algorithm not in ('TRAIN_RPROP',):
            sys.stderr.write("* Learning rate: %s\n" % self.learning_rate)
        sys.stderr.write("* Activation function for the hidden layers: %s\n" % self.activation_function_hidden)
        sys.stderr.write("* Activation function for the output layer: %s\n" % self.activation_function_output)
        sys.stderr.write("* Training algorithm: %s\n" % self.training_algorithm)

        self.ann = libfann.neural_net()
        self.ann.create_sparse_array(self.connection_rate, layers)
        self.ann.set_learning_rate(self.learning_rate)
        self.ann.set_activation_function_hidden(getattr(libfann, self.activation_function_hidden))
        self.ann.set_activation_function_output(getattr(libfann, self.activation_function_output))
        self.ann.set_training_algorithm(getattr(libfann, self.training_algorithm))

        fann_train_data = libfann.training_data()
        fann_train_data.set_train_data(self.train_data.get_input(), self.train_data.get_output())

        self.ann.train_on_data(fann_train_data, self.epochs, self.iterations_between_reports, self.desired_error)
        return self.ann
Beispiel #5
0
def main():
    # setting the prediction parameters 
    known_days = 7
    predict_days = 1
    verify_days = 30

    # setting up the parameters of the network
    connection_rate = 1
    learning_rate = 0.1
    num_input = known_days * 2
    num_hidden = 60
    num_output = predict_days
    
    # setting up the parameters of the network, continued
    desired_error = 0.000040
    max_iterations = 10000
    iteration_between_reports = 100

    # setting up the network
    net = libfann.neural_net()
    net.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
    net.set_learning_rate(learning_rate)
    net.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)

    # read the input file and format data
    fin = open("cw3.in")
    lines = fin.readlines()
    fin.close()
    rawdata = list(map(float, lines))[-1000:]
    datain0 = rawdata[0::2]
    datain1 = rawdata[1::2]
    n0 = max(datain0) * 1.4
    n1 = max(datain1) * 1.4
    datain0 = list(map(lambda x: x / n0, datain0))
    datain1 = list(map(lambda x: x / n1, datain1))

    # train the network
    data = libfann.training_data()
    drange = range(len(datain0) - known_days - verify_days)
    data.set_train_data(
        map(lambda x: datain0[x:][:known_days] + datain1[x:][:known_days], drange),
        map(lambda x: datain0[x + known_days:][:predict_days], drange)
        )
    net.train_on_data(data, max_iterations, iteration_between_reports, desired_error)

    # 
    result = []
    for i in range(verify_days):
        dtest = datain0[-known_days - verify_days + i:][:known_days] + datain1[-known_days - verify_days + i:][:known_days]
        result += [net.run(dtest)[0] * n0]
    plot.plot(list(map(lambda x: x * n0, datain0[-verify_days: -verify_days])) + result, "r")
    plot.plot(map(lambda x: x * n0, datain0[-verify_days:]), "b")
    #plot.plot(list(map(lambda x: x * n0, datain0[-verify_days * 2: -verify_days])) + result, "r")
    #plot.plot(map(lambda x: x * n0, datain0[-verify_days * 2:]), "b")
    plot.show()

#    net.train_on_file("cw3.in", max_iterations, iteration_between_reports, desired_error)
    #print(net.run([1,1]))
    print("hehe")
    return
Beispiel #6
0
    def test(self, ann_file, test_file):
        """Test an artificial neural network."""
        if not os.path.isfile(ann_file):
            raise IOError("Cannot open %s (no such file)" % ann_file)
        if not os.path.isfile(test_file):
            raise IOError("Cannot open %s (no such file)" % test_file)

        # Get the prefix for the classification columns.
        try:
            dependent_prefix = self.config.data.dependent_prefix
        except:
            dependent_prefix = OUTPUT_PREFIX

        self.ann = libfann.neural_net()
        self.ann.create_from_file(ann_file)

        self.test_data = TrainData()
        try:
            self.test_data.read_from_file(test_file, dependent_prefix)
        except IOError as e:
            logging.error("Failed to process the test data: %s" % e)
            exit(1)

        logging.info("Testing the neural network...")
        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(),
            self.test_data.get_output())

        self.ann.test_data(fann_test_data)

        mse = self.ann.get_MSE()
        logging.info("Mean Square Error on test data: %f" % mse)
    def __init__(self,
                 datafile,
                 desired_error = 0.0000000001,
                 iterations_between_reports = 1000):
        self.datafile = datafile
        self.desired_error = desired_error
        self.iterations_between_reports  = iterations_between_reports
        f = open(datafile+".train", 'r')
        firstline = f.readline()
        f.close
        l = string.split(firstline)
        self.num_input = int(l[1])
        self.num_output = int(l[2])
        self.breeding = False
        self.stage = 0
        self.netsTried = 0
        self.maxMutations = 18
        self.populationSize = 12
        self.trainingData = libfann.training_data()
        self.trainingData.read_train_from_file(datafile+".train")
        self.testData = libfann.training_data()
        self.testData.read_train_from_file(datafile+".test")
        self.flist = [libfann.FANN_LINEAR,libfann.FANN_SIGMOID,libfann.FANN_SIGMOID_STEPWISE,libfann.FANN_SIGMOID_SYMMETRIC,libfann.FANN_SIGMOID_SYMMETRIC_STEPWISE,
                      libfann.FANN_GAUSSIAN,libfann.FANN_GAUSSIAN_SYMMETRIC,libfann.FANN_ELLIOT,libfann.FANN_ELLIOT_SYMMETRIC,libfann.FANN_LINEAR_PIECE,
                      libfann.FANN_LINEAR_PIECE_SYMMETRIC,libfann.FANN_SIN_SYMMETRIC,libfann.FANN_COS_SYMMETRIC]
        self.mutationlist = ["change_connection_rate",
                        "change_learning_rate",
                        "change_num_neurons_hidden",
                        "change_num_layers_hidden",
                        "change_max_iterations",
                        "change_training_algorithm",
                        "change_activation_function_hidden",
                        "change_activation_function_output",
                        "change_learning_momentum",
                        "change_activation_steepness_hidden",
                        "change_activation_steepness_output",
                        "change_training_param"]
        self.trmutlist = ["change_connection_type",
                          "change_quickprop_decay",
                          "change_quickprop_mu",
                          "change_rprop_increase_factor",
                          "change_rprop_decrease_factor",
                          "change_rprop_delta_min",
                          "change_rprop_delta_max",
#                          "change_rprop_delta_zero"
                           ]
Beispiel #8
0
def load_data_prefix(prefix):
    inp = numpy.loadtxt(prefix + "_i.txt")
    inp = check_matrix(inp)
    out = numpy.loadtxt(prefix + "_o.txt")
    out = check_matrix(out)

    data = fann.training_data()
    data.set_train_data(inp, out)

    return data
Beispiel #9
0
def testNet():
    data = libfann.training_data()
    data.read_train_from_file(test_file);

    ann = libfann.neural_net()
    ann.create_from_file(nn_file)

    ann.reset_MSE()
    ann.test_data(data)
    print("Mean square error: {0}".format(ann.get_MSE()));
def load_data_prefix(prefix):
	inp = numpy.loadtxt(prefix + "_i.txt")
	inp = check_matrix(inp)
	out = numpy.loadtxt(prefix + "_o.txt")
	out = check_matrix(out)

	data = fann.training_data()
	data.set_train_data(inp,out)

	return data
Beispiel #11
0
    def test(self, test_data):
        self.set_test_data(test_data)

        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(), self.test_data.get_output())

        self.ann.reset_MSE()
        self.ann.test_data(fann_test_data)

        return self.ann.get_MSE()
Beispiel #12
0
    def test(self):
        print "Creating network."
        train_data = libfann.training_data()
        train_data.read_train_from_file(tfile)
        ann = libfann.neural_net()
        ann.create_sparse_array(
            connection_rate, (len(train_data.get_input()[0]), num_neurons_hidden, len(train_data.get_output()[0]))
        )
        ann.set_learning_rate(learning_rate)
        ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC_STEPWISE)
        ann.set_activation_function_output(libfann.SIGMOID_STEPWISE)
        ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
        ann.train_on_data(train_data, max_iterations, iterations_between_reports, desired_error)

        print "Testing network"
        test_data = libfann.training_data()
        test_data.read_train_from_file(test_file)
        ann.reset_MSE()
        ann.test_data(test_data)
        print "MSE error on test data: %f" % ann.get_MSE()
def load_data(filename, in_outs):
	a = numpy.loadtxt(filename)
	inp = numpy.compress(numpy.ones(in_outs[0]), a, axis=1)
	inp = check_matrix(inp)
	out = numpy.compress(numpy.concatenate([numpy.zeros(in_outs[0]), numpy.ones(in_outs[1])]), a, axis=1)
	out = check_matrix(out)

	data = fann.training_data()
	data.set_train_data(inp,out)

	return data
Beispiel #14
0
    def test(self, test_data):
        self.set_test_data(test_data)

        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(),
                                      self.test_data.get_output())

        self.ann.reset_MSE()
        self.ann.test_data(fann_test_data)

        return self.ann.get_MSE()
Beispiel #15
0
    def test(self):
        print "Creating network."
        train_data = libfann.training_data()
        train_data.read_train_from_file(tfile)
        ann = libfann.neural_net()
        ann.create_sparse_array(
            connection_rate,
            (len(train_data.get_input()[0]), num_neurons_hidden,
             len(train_data.get_output()[0])))
        ann.set_learning_rate(learning_rate)
        ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC_STEPWISE)
        ann.set_activation_function_output(libfann.SIGMOID_STEPWISE)
        ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
        ann.train_on_data(train_data, max_iterations,
                          iterations_between_reports, desired_error)

        print "Testing network"
        test_data = libfann.training_data()
        test_data.read_train_from_file(test_file)
        ann.reset_MSE()
        ann.test_data(test_data)
        print "MSE error on test data: %f" % ann.get_MSE()
Beispiel #16
0
    def load_data(self, data_file, val_file=None):
        # create training data, and ann object
        print "Loading data"
        self.train_data = libfann.training_data()
        self.train_data.read_train_from_file(data_file)
        self.dim_input = self.train_data.num_input_train_data()
        self.dim_output = self.train_data.num_output_train_data()

        input = self.train_data.get_input()
        target = self.train_data.get_output()

        data_lo_hi = [0, 0]
        for i in range(len(input)):
            if target[i][0] < 0.5:
                data_lo_hi[0] += 1
            elif target[i][0] > 0.5:
                data_lo_hi[1] += 1

        print "\t Train data is %d low and %d high" % tuple(data_lo_hi)

        if (val_file and os.path.exists(val_file)):
            print "Loading validation data"
            self.do_validation = True
            self.val_data = libfann.training_data()
            self.val_data.read_train_from_file(val_file)
            input = self.val_data.get_input()
            target = self.val_data.get_output()
            data_lo_hi = [0, 0]
            for i in range(len(input)):
                if target[i][0] < 0.5:
                    data_lo_hi[0] += 1
                elif target[i][0] > 0.5:
                    data_lo_hi[1] += 1
            print "\t Validation data is %d low and %d high" % tuple(
                data_lo_hi)
        else:
            self.val_data = self.train_data
            self.do_validation = False
Beispiel #17
0
    def load_data(self, data_file,val_file=None):
        # create training data, and ann object
        print "Loading data"
        self.train_data = libfann.training_data()
        self.train_data.read_train_from_file(data_file)
        self.dim_input=self.train_data.num_input_train_data()
        self.dim_output=self.train_data.num_output_train_data()

        input=self.train_data.get_input()
        target=self.train_data.get_output()
        
        data_lo_hi=[0,0]
        for i in range(len(input)):
            if target[i][0]<0.5:
               data_lo_hi[0]+=1 
            elif target[i][0]>0.5:
               data_lo_hi[1]+=1
        
        print "\t Train data is %d low and %d high" % tuple(data_lo_hi)

        
        if (val_file and os.path.exists(val_file)):
            print "Loading validation data"
            self.do_validation=True
            self.val_data=libfann.training_data()
            self.val_data.read_train_from_file(val_file)
            input=self.val_data.get_input()
            target=self.val_data.get_output()
            data_lo_hi=[0,0]
            for i in range(len(input)):
                if target[i][0]<0.5:
                   data_lo_hi[0]+=1 
                elif target[i][0]>0.5:
                   data_lo_hi[1]+=1
            print "\t Validation data is %d low and %d high" % tuple(data_lo_hi)
        else:
            self.val_data=self.train_data
            self.do_validation=False
Beispiel #18
0
def load_data(filename, in_outs):
    a = numpy.loadtxt(filename)
    inp = numpy.compress(numpy.ones(in_outs[0]), a, axis=1)
    inp = check_matrix(inp)
    out = numpy.compress(numpy.concatenate(
        [numpy.zeros(in_outs[0]),
         numpy.ones(in_outs[1])]),
                         a,
                         axis=1)
    out = check_matrix(out)

    data = fann.training_data()
    data.set_train_data(inp, out)

    return data
Beispiel #19
0
def mainLoop():
    n_iter = 0
    last_save = 0
    min_test_MSE = 1.0
    max_iters_after_save = 50

    try:
        while True:
            n_iter += 1
            print "Iteration: %5d " % (n_iter),
            seg_copy = map(lambda (c, seg): (c, cv.CloneImage(seg)), segments)
            seg_copy = map(lambda (c, seg): (c, spoil(seg)), seg_copy)
            shuffle(seg_copy)

            f = open(train_file, "w")
            f.write("%d %d %d\n" % (len(segments), num_input, num_output))

            for c, image in seg_copy:
                image = adjustSize(image, (segW, segH))
                for y in range(image.height):
                    for x in range(image.width):
                        n = image[y, x] / 159.375 - 0.8
                        f.write("%f " % n)
                f.write("\n")
                n = charset.index(c)
                f.write("-1 " * n + "1" + " -1" * (num_output - n - 1) + "\n")

            f.close()

            train = libfann.training_data()
            train.read_train_from_file(train_file)
            ann.train_epoch(train)
            train.destroy_train()
            print "Train MSE: %f " % (ann.get_MSE()),
            print "Train bit fail: %5d " % (ann.get_bit_fail()),
            ann.test_data(test)
            mse = ann.get_MSE()
            print "Test MSE: %f " % (mse),
            print "Test bit fail: %5d " % (ann.get_bit_fail()),
            if mse < min_test_MSE:
                min_test_MSE = mse
                ann.save(ann_file)
                last_save = n_iter
                print "saved",
            if n_iter - last_save > max_iters_after_save: break
            print
    except KeyboardInterrupt:
        print "Interrupted by user."
Beispiel #20
0
Datei: train.py Projekt: woto/EPC
def mainLoop():
    n_iter = 0
    last_save = 0
    min_test_MSE = 1.0
    max_iters_after_save = 50
    
    try:
        while True:
            n_iter += 1
            print "Iteration: %5d " % (n_iter),
            seg_copy = map(lambda (c, seg): (c, cv.CloneImage(seg)), segments)
            seg_copy = map(lambda (c, seg): (c, spoil(seg)), seg_copy)
            shuffle(seg_copy)
            
            f = open(train_file, "w")
            f.write("%d %d %d\n" % (len(segments), num_input, num_output))
        
            for c, image in seg_copy:
                image = adjustSize(image, (segW, segH))
                for y in range(image.height):
                    for x in range(image.width):
                        n = image[y, x] / 159.375 - 0.8
                        f.write("%f " % n)
                f.write("\n")
                n = charset.index(c)
                f.write("-1 " * n + "1" + " -1" * (num_output - n - 1) + "\n")
        
            f.close()
            
            train = libfann.training_data()
            train.read_train_from_file(train_file)
            ann.train_epoch(train)
            train.destroy_train()
            print "Train MSE: %f " % (ann.get_MSE()),
            print "Train bit fail: %5d " % (ann.get_bit_fail()),
            ann.test_data(test)
            mse = ann.get_MSE()
            print "Test MSE: %f " % (mse),
            print "Test bit fail: %5d " % (ann.get_bit_fail()),
            if mse < min_test_MSE:
                min_test_MSE = mse
                ann.save(ann_file)
                last_save = n_iter
                print "saved",
            if n_iter - last_save > max_iters_after_save: break
            print
    except KeyboardInterrupt: print "Interrupted by user."
def train_my_net(data_file, net=None):

    desired_error = 0.01
    max_iter = 100000
    report_time = 100

    if net is None:
        network = new_net()
    else:
        network = net

    data = libfann.training_data()
    data.read_train_from_file(data_file)

    network.train_on_data(data, max_iter, report_time, desired_error)

    return network
Beispiel #22
0
def initNet():
    learning_rate = 0.3
    num_neurons_hidden = num_input / 3

    #desired_error = 0.015
    #max_iterations = 10000
    #iterations_between_reports = 10

    global ann
    ann = libfann.neural_net()
    ann.create_standard_array((num_input, num_neurons_hidden, num_output))
    ann.set_learning_rate(learning_rate)
    ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC_STEPWISE)
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)

    train = libfann.training_data()
    train.read_train_from_file(train_file)
    ann.init_weights(train)
    train.destroy_train()
Beispiel #23
0
def TestOnData(nn, testdata):
    ann = libfann.neural_net()
    ann.create_from_file(nn)
    
    testData = libfann.training_data()
    testData.read_train_from_file(testdata)
    ann.reset_MSE()

    if args.full:
        inputs = testData.get_input()
        outputs = testData.get_output()

        missed_goodbuys = 0
        missed_badbuys = 0
        correct_goodbuys = 0
        correct_badbuys = 0

        print "#Row\tCorrect\tCalc\tFail"

        for i in xrange(len(inputs)):
            nn_out = ann.run(inputs[i])[0]
            c_out = outputs[i][0]
            s = ' '
            if c_out == 1.0 and nn_out < 0.8:
                s = 'B'
                missed_badbuys += 1
            if c_out == 0.0 and nn_out >= 0.8:
                s = 'G'
                missed_goodbuys += 1
            if c_out == 1.0 and nn_out >= 0.8:
                correct_badbuys += 1
            if c_out == 0.0 and nn_out < 0.8:
                correct_goodbuys += 1
            
            print "%5u\t%1.3f\t%1.3f\t%s" % (i+1, outputs[i][0], ann.run(inputs[i])[0], s)
        print "Missed %u bad buys of %u (%2.1f%%)" % (missed_badbuys, correct_badbuys+missed_badbuys,
                                                    float(missed_badbuys)/(correct_badbuys+missed_badbuys)*100)
        print "Missed %u good buys of %u (%2.1f%%)" % (missed_goodbuys, correct_goodbuys+missed_goodbuys,
                                                     float(missed_goodbuys)/(correct_goodbuys+missed_goodbuys)*100)
    else:
        ann.test_data(testData)
        print "Bit Fail: " + str(ann.get_bit_fail())
        print "Mean Squared Error: " + str(ann.get_MSE())
Beispiel #24
0
Datei: train.py Projekt: woto/EPC
def initNet():
    learning_rate = 0.3
    num_neurons_hidden = num_input / 3
    
    #desired_error = 0.015
    #max_iterations = 10000
    #iterations_between_reports = 10
    
    global ann
    ann = libfann.neural_net()
    ann.create_standard_array((num_input, num_neurons_hidden, num_output))
    ann.set_learning_rate(learning_rate)
    ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC_STEPWISE)
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)

    train = libfann.training_data()
    train.read_train_from_file(train_file)
    ann.init_weights(train)
    train.destroy_train()
Beispiel #25
0
def XY_to_fann_train_data(X, Y):
    if len(X) != len(Y):
        raise ValueError("X and Y must have the same number of lines.")

    train_data = libfann.training_data()

    if len(X):
        dim_X, dim_Y = len(X[0]), len(Y[0])

        tmp = tempfile.NamedTemporaryFile(delete=False)
        with tmp:
            tmp.write("%d %d %d\n"%(len(X), dim_X,  dim_Y))
            for i in xrange(len(X)):
                for line in [ X[i], Y[i] ]:
                    tmp.write("%s\n"% ' '.join( str(float(val)) for val in line ))

        train_data.read_train_from_file(tmp.name)
        tmp.unlink(tmp.name)

    return train_data
Beispiel #26
0
    def train(self, train_data):
        self.set_train_data(train_data)

        hidden_layers = [self.hidden_neurons] * self.hidden_layers
        layers = [self.train_data.num_input]
        layers.extend(hidden_layers)
        layers.append(self.train_data.num_output)

        sys.stderr.write("Network layout:\n")
        sys.stderr.write("* Neuron layers: %s\n" % layers)
        sys.stderr.write("* Connection rate: %s\n" % self.connection_rate)
        if self.training_algorithm not in ('TRAIN_RPROP', ):
            sys.stderr.write("* Learning rate: %s\n" % self.learning_rate)
        sys.stderr.write("* Activation function for the hidden layers: %s\n" %
                         self.activation_function_hidden)
        sys.stderr.write("* Activation function for the output layer: %s\n" %
                         self.activation_function_output)
        sys.stderr.write("* Training algorithm: %s\n" %
                         self.training_algorithm)

        self.ann = libfann.neural_net()
        self.ann.create_sparse_array(self.connection_rate, layers)
        self.ann.set_learning_rate(self.learning_rate)
        self.ann.set_activation_function_hidden(
            getattr(libfann, self.activation_function_hidden))
        self.ann.set_activation_function_output(
            getattr(libfann, self.activation_function_output))
        self.ann.set_training_algorithm(
            getattr(libfann, self.training_algorithm))

        fann_train_data = libfann.training_data()
        fann_train_data.set_train_data(self.train_data.get_input(),
                                       self.train_data.get_output())

        self.ann.train_on_data(fann_train_data, self.epochs,
                               self.iterations_between_reports,
                               self.desired_error)
        return self.ann
Beispiel #27
0
    def __init__(self,xdat,ydat,idxs):
        if shape(xdat)[0] != shape(ydat)[0]:
            raise Exception('dimension mismatch b/w x, y')

        nt = len(xdat)
        
        ny = shape(ydat)[1]
        nx = shape(xdat)[1]

        num_input = nx;
        num_output = ny;
        num_layers = 3;
        num_neurons_hidden = 3;
        desired_error =  0.2;
        max_epochs =2000;
        epochs_between_reports = 1000;

        net = fann.neural_net()
        net.create_standard_array([num_layers, num_input, num_neurons_hidden, num_output]);

        net.set_activation_function_hidden( fann.SIGMOID_SYMMETRIC);
        net.set_activation_function_output( fann.SIGMOID_SYMMETRIC);
        
        t = fann.training_data()
        
        t.set_train_data(xdat,ydat)
        nt = net.train_on_data(t,max_epochs,epochs_between_reports,desired_error)
        out = net.save( "xor_float.net");

        print net.get_training_algorithm()
        raise Exception()

        fann.train_on_file( "xor.data", max_epochs, epochs_between_reports, desired_error);

        out = net.save( "xor_float.net");
        
        net.destroy();
Beispiel #28
0
    def test(self, data):
        """Test the trained neural network.

        Expects an instance of :class:`~nbclassify.data.TrainData`. Returns the
        mean square error on the test data `data`.
        """
        if not isinstance(data, TrainData):
            raise ValueError("Training data must be an instance of TrainData")
        if not self.ann:
            raise ValueError("No neural network was trained yet")
        if data.num_input != self.train_data.num_input:
            raise ValueError("Number of inputs of test data must be same as " \
                "train data")
        if data.num_output != self.train_data.num_output:
            raise ValueError("Number of output of test data must be same as " \
                "train data")

        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(data.get_input(), data.get_output())

        self.ann.reset_MSE()
        self.ann.test_data(fann_test_data)

        return self.ann.get_MSE()
Beispiel #29
0
    def test(self, data):
        """Test the trained neural network.

        Expects an instance of :class:`~nbclassify.data.TrainData`. Returns the
        mean square error on the test data `data`.
        """
        if not isinstance(data, TrainData):
            raise ValueError("Training data must be an instance of TrainData")
        if not self.ann:
            raise ValueError("No neural network was trained yet")
        if data.num_input != self.train_data.num_input:
            raise ValueError("Number of inputs of test data must be same as " \
                "train data")
        if data.num_output != self.train_data.num_output:
            raise ValueError("Number of output of test data must be same as " \
                "train data")

        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(data.get_input(), data.get_output())

        self.ann.reset_MSE()
        self.ann.test_data(fann_test_data)

        return self.ann.get_MSE()
Beispiel #30
0
    def train(self, data):
        """Train a neural network on training data `data`.

        Returns a FANN structure.
        """
        self.set_train_data(data)

        # Check some values.
        if not self.train_type in ('ordinary','cascade'):
            raise ValueError("Unknown training type `%s`" % self.train_type)
        if self.train_type == 'cascade':
            if not self.training_algorithm in ('TRAIN_RPROP','TRAIN_QUICKPROP'):
                raise ValueError("Expected TRAIN_RPROP or TRAIN_QUICKPROP "\
                    "as the training algorithm")

        # Get FANN train data object.
        fann_train_data = libfann.training_data()
        fann_train_data.set_train_data(self.train_data.get_input(),
            self.train_data.get_output())

        if self.train_type == 'ordinary':
            hidden_layers = [self.hidden_neurons] * self.hidden_layers
            layers = [self.train_data.num_input]
            layers.extend(hidden_layers)
            layers.append(self.train_data.num_output)

            self.ann = libfann.neural_net()
            self.ann.create_sparse_array(self.connection_rate, layers)

            # Set training parameters.
            self.ann.set_learning_rate(self.learning_rate)
            self.ann.set_activation_function_hidden(
                getattr(libfann, self.activation_function_hidden))
            self.ann.set_activation_function_output(
                getattr(libfann, self.activation_function_output))
            self.ann.set_training_algorithm(
                getattr(libfann, self.training_algorithm))

            sys.stderr.write("Ordinary training...\n")
            self.ann.print_parameters()

            # Ordinary training.
            self.ann.train_on_data(fann_train_data, self.epochs,
                self.iterations_between_reports, self.desired_error)

        if self.train_type == 'cascade':
            # This algorithm adds neurons to the neural network while training,
            # which means that it needs to start with an ANN without any hidden
            # layers.
            layers = [self.train_data.num_input, self.train_data.num_output]
            self.ann = libfann.neural_net()
            self.ann.create_shortcut_array(layers)

            # Set training parameters.
            self.ann.set_training_algorithm(
                getattr(libfann, self.training_algorithm))
            self.ann.set_activation_function_hidden(
                getattr(libfann, self.activation_function_hidden))
            self.ann.set_activation_function_output(
                getattr(libfann, self.activation_function_output))
            self.ann.set_cascade_activation_steepnesses(
                self.cascade_activation_steepnesses)
            self.ann.set_cascade_num_candidate_groups(
                self.cascade_num_candidate_groups)

            sys.stderr.write("Cascade training...\n")
            self.ann.print_parameters()

            # Cascade training.
            self.ann.cascadetrain_on_data(fann_train_data, self.max_neurons,
                self.neurons_between_reports, self.desired_error)

        return self.ann
	ann.set_activation_function_output(libfann.SIGMOID)	
					#Set the activation function for the output layer.

	#ann.set_activation_function_output(libfann.LINEAR)

 

	# start training the network
	print "Training network"
	ann.train_on_file("diabetes.train", max_iterations, iterations_between_reports, desired_error)
					#Trains on an entire dataset, for a period of time
					#from a file

	ann.save("diabetes.net")	#Save the entire network to a configuration file.


	# test outcome
	print "Testing network"
	ann_train = libfann.training_data()
	ann_train.read_train_from_file("diabetes.test")

	#ann.create_from_file("diabetes.net") #Constructs a backpropagation neural network
                                #from a configuration file, which have been
                                #saved by save.

	ann.reset_MSE()
	ann.test_data(ann_train)
	print "MSE error on test data: %f" % ann.get_MSE()
	ann.save("diabetes_net.net")
Beispiel #32
0
    def train(self, data):
        """Train a neural network on training data `data`.

        Returns a FANN structure.
        """
        self.set_train_data(data)

        # Check some values.
        if not self.train_type in ('ordinary','cascade'):
            raise ValueError("Unknown training type `%s`" % self.train_type)
        if self.train_type == 'cascade':
            if not self.training_algorithm in ('TRAIN_RPROP','TRAIN_QUICKPROP'):
                raise ValueError("Expected TRAIN_RPROP or TRAIN_QUICKPROP "\
                    "as the training algorithm")

        # Get FANN train data object.
        fann_train_data = libfann.training_data()
        fann_train_data.set_train_data(self.train_data.get_input(),
            self.train_data.get_output())

        if self.train_type == 'ordinary':
            hidden_layers = [self.hidden_neurons] * self.hidden_layers
            layers = [self.train_data.num_input]
            layers.extend(hidden_layers)
            layers.append(self.train_data.num_output)

            self.ann = libfann.neural_net()
            self.ann.create_sparse_array(self.connection_rate, layers)

            # Set training parameters.
            self.ann.set_learning_rate(self.learning_rate)
            self.ann.set_activation_function_hidden(
                getattr(libfann, self.activation_function_hidden))
            self.ann.set_activation_function_output(
                getattr(libfann, self.activation_function_output))
            self.ann.set_training_algorithm(
                getattr(libfann, self.training_algorithm))

            sys.stderr.write("Ordinary training...\n")
            self.ann.print_parameters()

            # Ordinary training.
            self.ann.train_on_data(fann_train_data, self.epochs,
                self.iterations_between_reports, self.desired_error)

        if self.train_type == 'cascade':
            # This algorithm adds neurons to the neural network while training,
            # which means that it needs to start with an ANN without any hidden
            # layers.
            layers = [self.train_data.num_input, self.train_data.num_output]
            self.ann = libfann.neural_net()
            self.ann.create_shortcut_array(layers)

            # Set training parameters.
            self.ann.set_training_algorithm(
                getattr(libfann, self.training_algorithm))
            self.ann.set_activation_function_hidden(
                getattr(libfann, self.activation_function_hidden))
            self.ann.set_activation_function_output(
                getattr(libfann, self.activation_function_output))
            self.ann.set_cascade_activation_steepnesses(
                self.cascade_activation_steepnesses)
            self.ann.set_cascade_num_candidate_groups(
                self.cascade_num_candidate_groups)

            sys.stderr.write("Cascade training...\n")
            self.ann.print_parameters()

            # Cascade training.
            self.ann.cascadetrain_on_data(fann_train_data, self.max_neurons,
                self.neurons_between_reports, self.desired_error)

        return self.ann
Beispiel #33
0
#!/usr/bin/python
from pyfann import libfann

num_neurons_hidden = 4
num_output = 1

desired_error = 0.0001
max_neurons = 40
neurons_between_reports = 1
steepnesses = [0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1]

train_data = libfann.training_data()
train_data.read_train_from_file("../../benchmarks/datasets/two-spiral.train")
test_data = libfann.training_data()
test_data.read_train_from_file("../../benchmarks/datasets/two-spiral.test")

train_data.scale_train_data(0, 1)
test_data.scale_train_data(0, 1)

ann = libfann.neural_net()
ann.create_shortcut_array(
    [len(train_data.get_input()[0]),
     len(train_data.get_output()[0])])

ann.set_training_algorithm(libfann.TRAIN_RPROP)

ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
ann.set_activation_function_output(libfann.LINEAR_PIECE)
ann.set_activation_steepness_hidden(0.5)
ann.set_activation_steepness_output(0.5)
Beispiel #34
0
def fit_NN(tr_movies, tr_responses, val_movies, val_responses, n_lag):
    p_movs = [
        np.pad(m, ((0, 0), (0, 0), (n_lag - 1, 0)), mode='constant')
        for m in tr_movies
    ]

    tr_data = tr_responses.data
    mean_rsps = []
    for i in range(tr_data.shape[1]):
        mean_rsps.append(tr_data[:, i, :, :].mean())
        tr_data[:, i, :, :] -= tr_data[:, i, :, :].mean()

    LY, LX, T = tr_movies[0].shape
    S, N, T, R = tr_data.shape

    num_input = LY * LX * n_lag
    num_output = N
    num_hidden = 1000
    epochs = 50
    epochs_between_reports = 1
    desired_error = 0.25
    learning_rate = 0.7
    connection_rate = 0.1

    train_DS = []
    val_DS = []

    for i_s in range(S):
        # Training data
        for i_tr in range(R):
            for i_t in range(T):
                inp_tr = p_movs[i_s][:, :, i_t:i_t + n_lag].flatten()
                out_tr = tr_data[i_s, :, i_t, i_tr]
                train_DS.append([inp_tr, out_tr])

    X_tr, Y_tr = zip(*train_DS)

    p_movs = [
        np.pad(m, ((0, 0), (0, 0), (n_lag - 1, 0)), mode='constant')
        for m in val_movies
    ]
    val_data = val_responses.data
    for i in range(N):
        val_data[:, i, :, :] -= mean_rsps[i]
    S, N, T, R = val_data.shape
    for i_s in range(S):
        # Validation data
        for i_tr in range(R):
            for i_t in range(T):
                inp_val = p_movs[i_s][:, :, i_t:i_t + n_lag].flatten()
                out_val = val_data[i_s, :, i_t, i_tr]
                val_DS.append([inp_val, out_val])
    X_val, Y_val = zip(*val_DS)

    train_data = fann.training_data()
    train_data.set_train_data(X_tr, Y_tr)

    net = fann.neural_net()
    net.create_sparse_array(connection_rate,
                            (num_input, num_hidden, num_output))
    net.set_learning_rate(learning_rate)
    net.set_activation_function_output(fann.LINEAR)
    net.set_activation_function_hidden(fann.SIGMOID_SYMMETRIC_STEPWISE)
    net.train_on_data(train_data, epochs, epochs_between_reports,
                      desired_error)

    pred = np.zeros((len(Y_val), N))
    for i in range(len(Y_val)):
        pred[i, :] = net.run(X_val[i])
    print mean_absolute_error(np.array(Y_val), pred)

    return net
#!/usr/bin/python

from pyfann import libfann as fann
import numpy as np
from matplotlib import pyplot as plt

X = np.random.random((500, 2))
Y = (np.dot(X, [1,1]) + 6.0).reshape((500,1))

X_tr = X[:400,:]
Y_tr = Y[:400,:]
X_te = X[400:,:]
Y_te = Y[400:,:]

train_data = fann.training_data()
test_data = fann.training_data()
train_data.set_train_data(X_tr, Y_tr)
test_data.set_train_data(X_te, Y_te)

connection_rate = 1
learning_rate = 0.7
num_input = 2
num_output = 1
num_hidden = 4
desired_error = 0.0001
max_iterations = 100000
iterations_between_reports = 1000

nn = fann.neural_net()
nn.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
nn.set_learning_rate(learning_rate)
Beispiel #36
0
class AnnColorizer(object):
    def __init__(self,
                 learning_rate=0.9,
                 num_neurons_hidden=30,
                 hidden_layers=2,
                 max_iterations=5000,
                 iterations_between_reports=100,
                 train_pct=0.6,
                 desired_error=0.0006,
                 train_count=0,
                 window_size=10):
        self.learning_rate = learning_rate
        self.num_input = 3
        self.num_neurons_hidden = num_neurons_hidden
        self.num_output = 3
        self.desired_error = desired_error
        self.train_pct = train_pct
        self.train_count = train_count
        self.max_iterations = max_iterations
        self.iterations_between_reports = iterations_between_reports
        self.ann = libfann.neural_net()
        self.hidden_layers = hidden_layers
        ann_layout = []
        ann_layout.append(self.num_input)
        for i in range(self.hidden_layers):
            ann_layout.append(self.num_neurons_hidden)
        ann_layout.append(self.num_output)
        self.ann.create_standard_array(ann_layout)
        self.ann.set_learning_rate(self.learning_rate)
        self.ann.set_training_algorithm(libfann.TRAIN_RPROP)
        self.ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
        self.ann.set_activation_function_output(libfann.LINEAR)
        self.ann.set_train_error_function(libfann.STOPFUNC_MSE)
        self.window_size = window_size

        #        print "ANN Setup with learning_rate:%0.1f neurons_hidden:%d hidden_layers:%d max_iterations:%d train_pct:%0.1f train_cnt:%d" % (learning_rate, num_neurons_hidden, hidden_layers, max_iterations, train_pct, train_count)
        print(
            "ANN Setup with learning_rate:%.1f neurons_hidden:%d hidden_layers:%d max_iterations:%d train_pct:%.1f train_cnt:%d window_size:%d"
            % (learning_rate, num_neurons_hidden, hidden_layers,
               max_iterations, train_pct, train_count, window_size))

        #self.ann.print_parameters()

    def __trainFannV2(self, img, image_name):
        training_filename = image_name + "_train.dat"
        trainf = open(training_filename, 'w')
        l, a, b = cv2.split(cv2.cvtColor(img, cv.CV_BGR2Lab))
        xdim, ydim = l.shape

        if (self.train_count == 0):
            max_train_count = int(math.floor(xdim * ydim * self.train_pct))
        else:
            max_train_count = self.train_count
        print("Training pixels %d" % (max_train_count))
        #max_train_count=3000
        num_input = self.ann.get_num_input()
        num_output = self.ann.get_num_output()
        dims = [max_train_count, num_input, num_output]
        print(*dims, sep=' ', file=trainf)
        print("Image Dimensions " + str(l.shape))
        f = trainf
        count = 1

        for k in xrange(max_train_count):
            #choose random pixel in training image
            try:
                i = int(np.random.uniform(xdim))
                j = int(np.random.uniform(ydim))
                features = self.__get_features(l, xdim, ydim, i, j)
                print(*features, sep=' ', file=f)
                #BGR values
                output = [
                    float(img[i, j, 0]),
                    float(img[i, j, 1]),
                    float(img[i, j, 2])
                ]
                print(*output, sep=' ', file=f)
                count = count + 1
            except Exception, e:
                print("Exception when training %s" % (e))
                continue

        #for i in range(xdim):
        #    for j in range(ydim):
        #        features = self.__get_features(l, xdim, ydim, i, j)
        #        print(*features, sep=' ', file=f)
        #        #BGR values
        #        output=[ float(img[i,j,0]), float(img[i,j,1]), float(img[i,j,2])]
        #        print(*output, sep=' ', file=f)
        #        count = count + 1
        trainf.close()

        data = libfann.training_data()
        data.read_train_from_file(training_filename)
        #data.shuffle_train_data()
        train_data = data
        self.ann.set_scaling_params(train_data, -1.0, 1.01, -1.0, 1.01)
        self.ann.scale_train(train_data)
        self.ann.train_on_data(train_data, self.max_iterations,
                               self.iterations_between_reports,
                               self.desired_error)
        print("Training ANN done ")

        self.ann.reset_MSE()
        os.remove(training_filename)
Beispiel #37
0
    ann.set_learning_rate(learning_rate)  #Set the learning rate

    ann.set_activation_function_output(libfann.SIGMOID)
    #Set the activation function for the output layer.

    #ann.set_activation_function_output(libfann.LINEAR)

    # start training the network
    print "Training network"
    ann.train_on_file("diabetes.train", max_iterations,
                      iterations_between_reports, desired_error)
    #Trains on an entire dataset, for a period of time
    #from a file

    ann.save("diabetes.net")  #Save the entire network to a configuration file.

    # test outcome
    print "Testing network"
    ann_train = libfann.training_data()
    ann_train.read_train_from_file("diabetes.test")

    #ann.create_from_file("diabetes.net") #Constructs a backpropagation neural network
    #from a configuration file, which have been
    #saved by save.

    ann.reset_MSE()
    ann.test_data(ann_train)
    print "MSE error on test data: %f" % ann.get_MSE()
    ann.save("diabetes_net.net")
Beispiel #38
0
Datei: train.py Projekt: woto/EPC
def loadTest():
    global test
    test = libfann.training_data()
    test.read_train_from_file(test_file)
                return False
        return True


if __name__ == "__main__":
    num = 25
    if len(sys.argv) > 1:
        num = int(sys.argv[1])

    print "Using %d ANNs" % num
    a = Ensemble(num)
    layers = [2, 4, 1]
    a.create_standard_array(layers)
    a.set_learning_rate(0.6)
    a.set_activation_function_hidden(libfann.SIGMOID)
    a.set_activation_function_output(libfann.SIGMOID)

    data = [([0, 0], [0]), ([0, 1], [1]), ([1, 0], [1]), ([1, 1], [0])]
    tdata = libfann.training_data()
    tdata.read_train_from_file('xor.data')

    for cur_in, cur_out in data:
        res = a.run(cur_in)
        print "(%d, %d) -> %d" % (cur_in[0], cur_in[1], res[0])

    a.train_on_data(tdata, 20, 0, 0.0001)

    for cur_in, cur_out in data:
        res = a.run(cur_in)
        print "(%.2f, %.2f) -> %.2f" % (cur_in[0], cur_in[1], res[0])
Beispiel #40
0
def run_fann( num_hidden = 4, fname = "ann_ws496.net", fname_data_prefix = '', n_iter = 100, disp = True, graph = True):
	print "num_hidden =", num_hidden    
	
	fname_data_train = fname_data_prefix + "train_in.data"
	fname_data_test = fname_data_prefix + "test_in.data"

	connection_rate = 1
	learning_rate = 0.7
	num_input = 1024
	#num_hidden = 40
	num_output = 1

	desired_error = 0.0001
	max_iterations = 1
	iterations_between_reports = 1

	ann = libfann.neural_net()
	ann.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
	ann.set_learning_rate(learning_rate)
	ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
	ann.set_activation_function_output(libfann.LINEAR)

	# train_data is loaded
	train_data = libfann.training_data()
	train_data.read_train_from_file( fname_data_train)

	# test_data is loaded
	test_data = libfann.training_data()
	test_data.read_train_from_file( fname_data_test)
	train_mse = list()
	test_mse = list()
	for ii in range(n_iter):
		# Training is performed with training data
		ann.reset_MSE()
		ann.train_on_data(train_data, max_iterations, iterations_between_reports, desired_error)

		# Testing is performed with test data
		ann.reset_MSE()
		ann.test_data(train_data)
		mse_train = ann.get_MSE(); train_mse.append( mse_train)

		# Testing is performed with test data
		ann.reset_MSE()
		ann.test_data(test_data)
		mse_test = ann.get_MSE(); test_mse.append( mse_test)

		if disp: 
			print ii, "MSE of train, test", mse_train, mse_test

	ann.save( fname)

	# We show the results of ANN training with validation. 
	if graph:
		plot( train_mse, label = 'train')
		plot( test_mse, label = 'test')
		legend( loc = 1)
		xlabel('iteration')
		ylabel('MSE')
		grid()
		show()
	
	return train_mse, test_mse
Beispiel #41
0
def create_net (group_id, num_neurons_hidden):
    from pyfann import libfann
    from network_functions import save_tests
    from network_functions import create_net_record
    import MySQLdb as mdb


    print "Create Net - " + str(group_id) + ", " + str(num_neurons_hidden)
    # To Do
    # Needs to querry Net_Group for training file name, test filename, ticker, input_nurons, output_neurons
    con = mdb.connect('localhost', 'root', 'fil1202job', 'network')
    sql = "select a.ticker, a.train_data, a.test_data, b.no_input, b.no_output from network.net_group a, network.net_type b where a.type = b.id and a.id =" + str(group_id)
    cursor = con.cursor()
    cursor.execute(sql)
    for row in cursor.fetchall():
        ticker = str(row[0])
        train_file = str(row[1])
        test_file = str(row[2])
        num_input = row[3]
        num_output = row[4]
    # disconnect from server
    #con.close()

    #Parameters that will be passed in
    #group_id = 191

    # create empty net record and get number
    net_id = create_net_record(group_id)
    print "Net ID = " + str(net_id)
    #train_file = "/home/user/Documents/TrainandTestdata/trainingdataFANN-ACN_out-train.dat"
    #test_file = "/home/user/Documents/TrainandTestdata/testdataFANN-ACN_out-train.dat"
    #ticker = "ACN"
    ###
    # create file name as ticker_netgroup_netnumber.net
    net_name = "/home/user/Documents/Networks/" + str(ticker) + "_" + str(group_id) + "_" + str(net_id) + ".net"

    sql2 = "UPDATE `network`.`network` SET `net_file` = \"" + net_name + "\" where id = " + str(net_id)
    #print sql2
    cursor2 = con.cursor()
    cursor2.execute(sql2)
    con.commit()

    connection_rate = 1
    learning_rate = 0.7
    #num_input = 7
    #num_neurons_hidden = 7
    #num_output = 1

    desired_error = 0.0001
    max_iterations = 100000
    iterations_between_reports = 10000

    ann = libfann.neural_net()
    #ann.create_sparse_array(connection_rate, (num_input, num_neurons_hidden, num_output))
    ann.create_standard_array([num_input, num_neurons_hidden, num_neurons_hidden, num_output])
    ann.set_learning_rate(learning_rate)
    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)


    train_data = libfann.training_data()
    train_data.read_train_from_file(train_file)
    test_data = libfann.training_data()
    test_data.read_train_from_file(test_file)

    ann.train_on_file(train_file, max_iterations, iterations_between_reports, desired_error)
    print "\nTrain error: %f, Test error: %f\n\n" %( ann.test_data(train_data),ann.test_data(test_data))

    #print "Testing network again"
    ann.reset_MSE()
    vinput = test_data.get_input()
    output = test_data.get_output()
    for i in range(len(vinput)):
        #print "###"
        #print ann.test(vinput[i], output[i])
        #print "###'"
        outval = ann.test(vinput[i], output[i])
        outval = str(outval)
        outval = outval.translate(None, '[]')

        targetout = str(output[i])
        #print "###"
        #print targetout
        targetout = targetout.translate(None, '[]')
        #print targetout


        # Store test output in net_test table as [netNumber, record(test row number), prediction, target]
        # Stillinserts into temp table net_tests2
        save_tests(net_id, i + 1, outval, targetout)
        #printingstring = 'Output number ' + str(i) + ' is '+ outval + 'Should have been: ' + targetout
        #print printingstring
    #print "MSE error on test data: %f" % ann.get_MSE()

    ann.save(net_name)
Beispiel #42
0
from pyfann import libfann
import pickle
import numpy
import sys
import gzip
import time
from pprint import pprint

from vision  import *

ph_corr = 4
mirror = True

# nacitame data set do pamate.
train_data_set = libfann.training_data()

train_input = []
train_output = []

num_input = 0
for f in sys.argv[1:]:
	with gzip.open(f, "r") as input_file:
		tmp_input = []
		tmp_output = []
		try:
			while True:
				in_values, out_values = pickle.load(input_file)

				tmp_input.append(in_values)
				tmp_output.append(out_values)
if not exists(output_dir):
    os.makedirs(output_dir)

states_files = args.states_files
if len(states_files) == 1:
    states_files = glob(states_files[0])

# Convert the files and move them to the build path
if args.fast:
    n_max = 200
else:
    n_max = inf
convert_two_particle_hdf5_to_fann(states_files, output_dir, train_ratio=0.85, n_max=n_max, min_distance=args.min_distance, max_distance=args.max_distance)

# Load data
train_data = libfann.training_data()
validate_data = libfann.training_data()
test_data = libfann.training_data()

train_data_filename = str(join(output_dir, "train.fann"))
validate_data_filename = str(join(output_dir, "validate.fann"))
test_data_filename = str(join(output_dir, "test.fann"))

print "Loading data:\n", train_data_filename, "\n", validate_data_filename, "\n", test_data_filename

train_data.read_train_from_file(train_data_filename)
validate_data.read_train_from_file(validate_data_filename)
test_data.read_train_from_file(test_data_filename)

# Create and train networks
best_test_result = inf
def fit_NN(tr_movies, tr_responses, val_movies, val_responses, n_lag):
    p_movs = [np.pad(m, ((0,0), (0,0), (n_lag-1,0)), mode='constant')
              for m in tr_movies]
    
    tr_data = tr_responses.data
    mean_rsps = []
    for i in range(tr_data.shape[1]):
        mean_rsps.append(tr_data[:,i,:,:].mean())
        tr_data[:,i,:,:] -= tr_data[:,i,:,:].mean()

    LY, LX, T = tr_movies[0].shape
    S, N, T, R = tr_data.shape

    num_input = LY*LX*n_lag
    num_output = N
    num_hidden = 1000
    epochs = 50
    epochs_between_reports = 1
    desired_error = 0.25
    learning_rate = 0.7
    connection_rate = 0.1

    train_DS = []
    val_DS = []

    for i_s in range(S):
        # Training data
        for i_tr in range(R):
            for i_t in range(T):
                inp_tr = p_movs[i_s][:,:,i_t:i_t+n_lag].flatten()
                out_tr = tr_data[i_s,:,i_t,i_tr]
                train_DS.append([inp_tr, out_tr])

    X_tr, Y_tr = zip(*train_DS)

    p_movs = [np.pad(m, ((0,0), (0,0), (n_lag-1,0)), mode='constant')
              for m in val_movies]
    val_data = val_responses.data
    for i in range(N):
        val_data[:,i,:,:] -= mean_rsps[i]
    S, N, T, R = val_data.shape
    for i_s in range(S):
        # Validation data
        for i_tr in range(R):
            for i_t in range(T):
                inp_val = p_movs[i_s][:,:,i_t:i_t+n_lag].flatten()
                out_val = val_data[i_s,:,i_t,i_tr]
                val_DS.append([inp_val, out_val])
    X_val, Y_val = zip(*val_DS)
    
    train_data = fann.training_data()
    train_data.set_train_data(X_tr, Y_tr)

    net = fann.neural_net()
    net.create_sparse_array(connection_rate, (num_input,num_hidden,num_output))
    net.set_learning_rate(learning_rate)
    net.set_activation_function_output(fann.LINEAR)
    net.set_activation_function_hidden(fann.SIGMOID_SYMMETRIC_STEPWISE)
    net.train_on_data(train_data, epochs, epochs_between_reports, desired_error)

    pred = np.zeros((len(Y_val), N))
    for i in range(len(Y_val)):
        pred[i,:] = net.run(X_val[i])
    print mean_absolute_error(np.array(Y_val), pred)

    return net
Beispiel #45
0
def test_ann(ann_path, test_data_path, output_path=None, conf_path=None, error=0.01):
    """Test an artificial neural network."""
    for path in (ann_path, test_data_path, conf_path):
        if path and not os.path.exists(path):
            logging.error("Cannot open %s (no such file or directory)" % path)
            return 1

    if output_path and not conf_path:
        raise ValueError("Argument `conf_path` must be set when `output_path` is set")

    if conf_path:
        yml = open_yaml(conf_path)
        if not yml:
            return 1
        if 'classes' not in yml:
            logging.error("Classes are not set in the YAML file. Missing object 'classes'.")
            return 1

    # Get the prefix for the classification columns.
    dependent_prefix = "OUT:"
    if 'data' in yml:
        dependent_prefix = getattr(yml.data, 'dependent_prefix', dependent_prefix)

    ann = libfann.neural_net()
    ann.create_from_file(ann_path)

    test_data = common.TrainData()
    try:
        test_data.read_from_file(test_data_path, dependent_prefix)
    except ValueError as e:
        logging.error("Failed to process the test data: %s" % e)
        exit(1)

    logging.info("Testing the neural network...")
    fann_test_data = libfann.training_data()
    fann_test_data.set_train_data(test_data.get_input(), test_data.get_output())

    ann.test_data(fann_test_data)

    mse = ann.get_MSE()
    logging.info("Mean Square Error on test data: %f" % mse)

    if not output_path:
        return

    out_file = open(output_path, 'w')
    out_file.write( "%s\n" % "\t".join(['ID','Class','Classification','Match']) )

    # Get codeword for each class.
    codewords = get_codewords(yml.classes)

    total = 0
    correct = 0
    for label, input, output in test_data:
        total += 1
        row = []

        if label:
            row.append(label)
        else:
            row.append("")

        if len(codewords) != len(output):
            logging.error("Codeword length (%d) does not match the number of classes. "
                "Please make sure the correct classes are set in %s" % (len(output), conf_path))
            exit(1)

        class_e = get_classification(codewords, output, error)
        assert len(class_e) == 1, "The codeword for a class can only have one positive value"
        row.append(class_e[0])

        codeword = ann.run(input)
        try:
            class_f = get_classification(codewords, codeword, error)
        except ValueError as e:
            logging.error("Classification failed: %s" % e)
            return 1
        row.append(", ".join(class_f))

        # Check if the first items of the classifications match.
        if len(class_f) > 0 and class_f[0] == class_e[0]:
            row.append("+")
            correct += 1
        else:
            row.append("-")

        out_file.write( "%s\n" % "\t".join(row) )

    fraction = float(correct) / total
    out_file.write( "%s\n" % "\t".join(['','','',"%.3f" % fraction]) )
    out_file.close()

    logging.info("Correctly classified: %.1f%%" % (fraction*100))
    logging.info("Testing results written to %s" % output_path)
def get_training_data(data_file):
    data = libfann.training_data()
    data.read_train_from_file(data_file)
    return data
Beispiel #47
0
def loadTest():
    global test
    test = libfann.training_data()
    test.read_train_from_file(test_file)
#!/usr/bin/python

from pyfann import libfann as fann
import numpy as np
from matplotlib import pyplot as plt

X = np.random.random((500, 2))
Y = (np.dot(X, [1, 1]) + 6.0).reshape((500, 1))

X_tr = X[:400, :]
Y_tr = Y[:400, :]
X_te = X[400:, :]
Y_te = Y[400:, :]

train_data = fann.training_data()
test_data = fann.training_data()
train_data.set_train_data(X_tr, Y_tr)
test_data.set_train_data(X_te, Y_te)

connection_rate = 1
learning_rate = 0.7
num_input = 2
num_output = 1
num_hidden = 4
desired_error = 0.0001
max_iterations = 100000
iterations_between_reports = 1000

nn = fann.neural_net()
nn.create_sparse_array(connection_rate, (num_input, num_hidden, num_output))
nn.set_learning_rate(learning_rate)
Beispiel #49
0
    def test(self):
        """Test's the BPN Network."""
        print "Creating network."
        db = dbm.open('config.dat', 'c')
        connection_rate = float(db['Connection Rate'])
        learning_rate = float(db['Learning Rate'])
        desired_error = float(db['Desired Error'])
        max_iterations = int(db['Maximum Iterations'])
        iterations_between_reports = int(db['Iteration Between Reports'])
        ol_act_fun = db['Output Layer Activation Function']
        db.close()
        hidden_neurons_list = [num_input]
        lay_neurons = tuple(
            num_neurons_hidden.split(","))  #Hidden Neurons in String
        for hid_neuron in lay_neurons:
            hidden_neurons_list.append(int(hid_neuron))
        hidden_neurons_list.append(num_output)
        hnt = tuple(hidden_neurons_list)
        hiddenn = num_neurons_hidden.split(",")
        train_data = libfann.training_data()
        train_data.read_train_from_file(tfile)
        ann = libfann.neural_net()
        if bpn_type == "SPR":
            ann.create_sparse_array(connection_rate, hnt)
        elif bpn_type == "STD":
            ann.create_standard_array(hnt)
        elif bpn_type == "SRT":
            ann.create_shortcut_array(hnt)
        ann.set_learning_rate(learning_rate)
        if talgo == "FANN_TRAIN_INCREMENTAL":
            self.ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
        elif talgo == "FANN_TRAIN_BATCH":
            self.ann.set_training_algorithm(libfann.TRAIN_BATCH)
        elif talgo == "FANN_TRAIN_RPROP":
            self.ann.set_training_algorithm(libfann.TRAIN_RPROP)
            try:
                db = dbm.open('config.dat', 'c')
                inc_factor = float(db['Increase Factor'])
                dec_factor = float(db['Decrease Factor'])
                delta_min = float(db['Delta Minimum'])
                delta_max = float(db['Delta Maximum'])
                delta_zero = float(db['Delta Zero'])
                db.close()
            except KeyError:
                pass
            else:
                self.ann.set_rprop_increase_factor(inc_factor)
                self.ann.set_rprop_decrease_factor(dec_factor)
                self.ann.set_rprop_delta_min(delta_min)
                self.ann.set_rprop_delta_max(delta_max)
        elif talgo == "FANN_TRAIN_QUICKPROP":
            self.ann.set_training_algorithm(libfann.TRAIN_QUICKPROP)
            try:
                db = dbm.open('config.dat', 'c')
                decay_val = float(db['Decay Value'])
                mu_val = float(db['Mu Value'])
                db.close()
            except KeyError:
                pass
            else:
                self.ann.set_quickprop_decay(decay_val)
                self.ann.set_quickprop_mu(mu_val)
        self.ann.set_learning_rate(learning_rate)
        if ol_act_fun == "LINEAR":
            self.ann.set_activation_function_output(libfann.LINEAR)
        elif ol_act_fun == "THRESHOLD":
            self.ann.set_activation_function_output(libfann.THRESHOLD)
        elif ol_act_fun == "THRESHOLD SYMMETRIC":
            self.ann.set_activation_function_output(
                libfann.THRESHOLD_SYMMETRIC)
        elif ol_act_fun == "SIGMOID":
            self.ann.set_activation_function_output(libfann.SIGMOID)
        elif ol_act_fun == "SIGMOID STEPWISE":
            self.ann.set_activation_function_output(libfann.SIGMOID_STEPWISE)
        elif ol_act_fun == "SIGMOID SYMMETRIC":
            self.ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC)
        elif ol_act_fun == "GAUSSIAN":
            self.ann.set_activation_function_output(libfann.GAUSSIAN)
        elif ol_act_fun == "GAUSSIAN SYMMETRIC":
            self.ann.set_activation_function_output(libfann.GAUSSIAN_SYMMETRIC)
        elif ol_act_fun == "ELLIOT":
            self.ann.set_activation_function_output(libfann.ELLIOT)
        elif ol_act_fun == "ELLIOT SYMMETRIC":
            self.ann.set_activation_function_output(libfann.ELLIOT_SYMMETRIC)
        elif ol_act_fun == "LINEAR PIECE":
            self.ann.set_activation_function_output(libfann.LINEAR_PIECE)
        elif ol_act_fun == "LINEAR PIECE SYMMETRIC":
            self.ann.set_activation_function_output(
                libfann.LINEAR_PIECE_SYMMETRIC)
        elif ol_act_fun == "SIN SYMMETRIC":
            self.ann.set_activation_function_output(libfann.SIN_SYMMETRIC)
        elif ol_act_fun == "COS SYMMETRIC":
            self.ann.set_activation_function_output(libfann.COS_SYMMETRIC)
        elif ol_act_fun == "SIN":
            self.ann.set_activation_function_output(libfann.SIN)
        elif ol_act_fun == "COS":
            self.ann.set_activation_function_output(libfann.COS)
        elif ol_act_fun == "SIGMOID SYMMETRIC STEPWISE":
            self.ann.set_activation_function_output(
                libfann.SIGMOID_SYMMETRIC_STEPWISE)
        #For Advanced Parameters related to Fixed Topology
        try:
            db = dbm.open('config.dat', 'c')
            lmomentum = float(db['Learning Momentum'])
            af_neuron_number = db['AF for Neuron']
            af_n = db['AF Neuron']
            af_layer_number = int(db['AF for layer'])
            af_l = db['AF Layer']
            asn = db['Activation Steepness for Neuron']
            asl = db['Activation Steepness for layer']
            tef = db['Train Error Function']
            tsf = db['Train Stop Function']
            bfl = float(db['Bit Fail Limit'])
            db.close()
        except KeyError:
            pass
        else:
            self.ann.set_learning_momentum(lmomentum)
            temp_list = af_neuron_number.split(",")
            layer_no = int(temp_list[0])
            neuron_no = int(temp_list[1])
            steepness_list = asn.split(",")
            svalue = float(steepness_list[0])
            layer = int(steepness_list[1])
            neuron = int(steepness_list[2])
            steep_layer_list = asl.split(",")
            vsteep = float(steep_layer_list[0])
            vslayer = int(steep_layer_list[1])
            if af_n == "LINEAR":
                self.ann.set_activation_function(libfann.LINEAR, layer_no,
                                                 neuron_no)
            elif af_n == "THRESHOLD":
                self.ann.set_activation_function(libfann.THRESHOLD, layer_no,
                                                 neuron_no)
            elif af_n == "THRESHOLD SYMMETRIC":
                self.ann.set_activation_function(libfann.THRESHOLD_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "SIGMOID":
                self.ann.set_activation_function(libfann.SIGMOID, layer_no,
                                                 neuron_no)
            elif af_n == "SIGMOID STEPWISE":
                self.ann.set_activation_function(libfann.SIGMOID_STEPWISE,
                                                 layer_no, neuron_no)
            elif af_n == "SIGMOID SYMMETRIC":
                self.ann.set_activation_function(libfann.SIGMOID_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "GAUSSIAN":
                self.ann.set_activation_function(libfann.GAUSSIAN, layer_no,
                                                 neuron_no)
            elif af_n == "GAUSSIAN SYMMETRIC":
                self.ann.set_activation_function(libfann.GAUSSIAN_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "ELLIOT":
                self.ann.set_activation_function(libfann.ELLIOT, layer_no,
                                                 neuron_no)
            elif af_n == "ELLIOT SYMMETRIC":
                self.ann.set_activation_function(libfann.ELLIOT_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "LINEAR PIECE":
                self.ann.set_activation_function(libfann.LINEAR_PIECE,
                                                 layer_no, neuron_no)
            elif af_n == "LINEAR PIECE SYMMETRIC":
                self.ann.set_activation_function(
                    libfann.LINEAR_PIECE_SYMMETRIC, layer_no, neuron_no)
            elif af_n == "SIN SYMMETRIC":
                self.ann.set_activation_function(libfann.SIN_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "COS SYMMETRIC":
                self.ann.set_activation_function(libfann.COS_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_n == "SIN":
                self.ann.set_activation_function(libfann.SIN, layer_no,
                                                 neuron_no)
            elif af_n == "COS":
                self.ann.set_activation_function(libfann.COS, layer_no,
                                                 neuron_no)
            if af_l == "LINEAR":
                self.ann.set_activation_function_layer(libfann.LINEAR,
                                                       af_layer_number)
            elif af_l == "THRESHOLD":
                self.ann.set_activation_function(libfann.THRESHOLD, layer_no,
                                                 neuron_no)
            elif af_l == "THRESHOLD SYMMETRIC":
                self.ann.set_activation_function(libfann.THRESHOLD_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "SIGMOID":
                self.ann.set_activation_function(libfann.SIGMOID, layer_no,
                                                 neuron_no)
            elif af_l == "SIGMOID STEPWISE":
                self.ann.set_activation_function(libfann.SIGMOID_STEPWISE,
                                                 layer_no, neuron_no)
            elif af_l == "SIGMOID SYMMETRIC":
                self.ann.set_activation_function(libfann.SIGMOID_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "GAUSSIAN":
                self.ann.set_activation_function(libfann.GAUSSIAN, layer_no,
                                                 neuron_no)
            elif af_l == "GAUSSIAN SYMMETRIC":
                self.ann.set_activation_function(libfann.GAUSSIAN_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "ELLIOT":
                self.ann.set_activation_function(libfann.ELLIOT, layer_no,
                                                 neuron_no)
            elif af_l == "ELLIOT SYMMETRIC":
                self.ann.set_activation_function(libfann.ELLIOT_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "LINEAR PIECE":
                self.ann.set_activation_function(libfann.LINEAR_PIECE,
                                                 layer_no, neuron_no)
            elif af_l == "LINEAR PIECE SYMMETRIC":
                self.ann.set_activation_function(
                    libfann.LINEAR_PIECE_SYMMETRIC, layer_no, neuron_no)
            elif af_l == "SIN SYMMETRIC":
                self.ann.set_activation_function(libfann.SIN_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "COS SYMMETRIC":
                self.ann.set_activation_function(libfann.COS_SYMMETRIC,
                                                 layer_no, neuron_no)
            elif af_l == "SIN":
                self.ann.set_activation_function(libfann.SIN, layer_no,
                                                 neuron_no)
            elif af_l == "COS":
                self.ann.set_activation_function(libfann.COS, layer_no,
                                                 neuron_no)
            self.ann.set_activation_steepness(svalue, layer, neuron)
            self.ann.set_activation_steepness_layer(vsteep, vslayer)
            if tef == "LINEAR":
                self.ann.set_train_error_function(libfann.ERRORFUNC_LINEAR)
            elif tef == "TANH ERROR FUNCTION":
                self.ann.set_train_error_function(libfann.ERRORFUNC_TANH)
            if tsf == "MSE":
                self.ann.set_train_stop_function(libfann.STOPFUNC_MSE)
            elif tsf == "BIT FAIL":
                self.ann.set_train_stop_function(libfann.FANN_STOPFUNC_BIT)
            self.ann.set_bit_fail_limit(bfl)
        finally:
            db.close()
        #Find Out Whether it is Evolving topology or Fixed Topology
        try:
            db = dbm.open('config.dat', 'c')
            max_neurons = db['Maximum Neurons']
            ncascade = True
            db.close()
        except KeyError:
            ncascade = False
        finally:
            db.close()
        if ncascade:
            db = dbm.open('config.dat', 'c')
            max_neurons = int(db['Maximum Neurons'])
            neurons_between_reports = int(db['Neurons Between Reports'])
            cdesired_error = float(db['Desired Error'])
            db.close()
        #For Advanced Cascade Parameters
        try:
            db = dbm.open('config.dat', 'c')
            ocf = db['Output Change Fraction']
            db.close()
            tcascade = True
        except KeyError:
            tcascade = False

        if tcascade:
            db = dbm.open('config.dat', 'c')
            ocf = float(db['Output Change Fraction'])
            ose = int(db['Output Stagnation Epochs'])
            ccf = float(db['Candidate Change Fraction'])
            cse = int(db['Candidate Stagnation Epochs'])
            wm = float(db['Weight Multiplier'])
            cl = float(db['Candidate Limit'])
            max_oe = int(db['Maximum Out Epochs'])
            min_oe = int(db['Minimum Out Epochs'])
            max_ce = int(db['Maximum Candidate Epochs'])
            min_ce = int(db['Minimum Candidate Epochs'])
            num_cgroup = int(db['Number Candidate Groups'])
            db.close()
            self.ann.set_cascade_output_change_fraction(ocf)
            self.ann.set_cascade_output_stagnation_epochs(ose)
            self.ann.set_cascade_candidate_change_fraction(ccf)
            self.ann.set_cascade_candidate_stagnation_epochs(cse)
            self.ann.set_cascade_weight_multiplier(wm)
            self.ann.set_cascade_candidate_limit(cl)
            self.ann.set_cascade_max_out_epochs(max_oe)
            #self.ann.set_cascade_min_out_epochs(min_oe)
            self.ann.set_cascade_max_cand_epochs(max_ce)
            #self.ann.set_cascade_min_cand_epochs(min_ce)
            self.ann.set_cascade_num_candidate_groups(num_cgroup)
        if ncascade:
            self.ann.cascadetrain_on_file(tfile, max_neurons,
                                          neurons_between_reports,
                                          cdesired_error)
        else:
            self.ann.train_on_file(tfile, max_iterations,
                                   iterations_between_reports, desired_error)
        print "Testing network"
        train_data = libfann.training_data()
        train_data.read_train_from_file(tfile)
        test_data = libfann.training_data()
        test_data.read_train_from_file(test_file)
        print "\nTrain error: %f \nTest error: %f\n\n" % (
            ann.test_data(train_data), ann.test_data(test_data))
Beispiel #50
0
if opts.output_activation == "SIGMOID_SYMMETRIC_STEPWISE":
	ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE)
elif opts.output_activation == "GAUSSIAN":
	ann.set_activation_function_output(libfann.GAUSSIAN)
elif opts.output_activation == "GAUSSIAN_SYMMETRIC":
	ann.set_activation_function_output(libfann.GAUSSIAN_SYMMETRIC)
elif opts.output_activation == "SIGMOID":
	ann.set_activation_function_output(libfann.SIGMOID)
else:
	ann.set_activation_function_output(libfann.SIGMOID_STEPWISE)
ann.set_activation_steepness_output(opts.steep_out)


########## Import training data #####################
print "Getting training data : %s" % opts.training_file
train_data = libfann.training_data()
train_data.read_train_from_file(opts.training_file.replace(".pat",".ann"))
#train_data.scale_train_data(0.0,1.0)

########## GA Training #####################
print "Setting GA training parameters"
genome = G1DConnections.G1DConnections()
genome.evaluator.set(GAnnEvaluators.evaluateMSE)

genome.setParams(rangemin=opts.range_min, rangemax=opts.range_max, layers=layers, bias=bias, gauss_mu=opts.gauss_mu, gauss_sigma=opts.gauss_sigma)
#genome.mutator.set(GAnnMutators.G1DConnMutateNodes)
ga = GAnnGA.GAnnGA(genome, ann, train_data)
ga.setMutationRate(opts.mutation_rate)
ga.setPopulationSize(opts.population)
ga.setGenerations(opts.generations)
print "Start running GA"
Beispiel #51
0
def run_fann(num_hidden=4,
             fname="ann_ws496.net",
             fname_data_prefix='',
             n_iter=100,
             disp=True,
             graph=True):
    print "num_hidden =", num_hidden

    fname_data_train = fname_data_prefix + "train_in.data"
    fname_data_test = fname_data_prefix + "test_in.data"

    connection_rate = 1
    learning_rate = 0.7
    num_input = 1024
    #num_hidden = 40
    num_output = 1

    desired_error = 0.0001
    max_iterations = 1
    iterations_between_reports = 1

    ann = libfann.neural_net()
    ann.create_sparse_array(connection_rate,
                            (num_input, num_hidden, num_output))
    ann.set_learning_rate(learning_rate)
    ann.set_activation_function_hidden(libfann.SIGMOID_SYMMETRIC)
    ann.set_activation_function_output(libfann.LINEAR)

    # train_data is loaded
    train_data = libfann.training_data()
    train_data.read_train_from_file(fname_data_train)

    # test_data is loaded
    test_data = libfann.training_data()
    test_data.read_train_from_file(fname_data_test)
    train_mse = list()
    test_mse = list()
    for ii in range(n_iter):
        # Training is performed with training data
        ann.reset_MSE()
        ann.train_on_data(train_data, max_iterations,
                          iterations_between_reports, desired_error)

        # Testing is performed with test data
        ann.reset_MSE()
        ann.test_data(train_data)
        mse_train = ann.get_MSE()
        train_mse.append(mse_train)

        # Testing is performed with test data
        ann.reset_MSE()
        ann.test_data(test_data)
        mse_test = ann.get_MSE()
        test_mse.append(mse_test)

        if disp:
            print ii, "MSE of train, test", mse_train, mse_test

    ann.save(fname)

    # We show the results of ANN training with validation.
    if graph:
        plot(train_mse, label='train')
        plot(test_mse, label='test')
        legend(loc=1)
        xlabel('iteration')
        ylabel('MSE')
        grid()
        show()

    return train_mse, test_mse