Beispiel #1
0
 def trainSingleNetwork(self, networkIndex, trainingInput, trainingOutput, testInput, testOutput):
     trainingData = libfann.training_data()
     
     trainingData.set_train_data(trainingInput, trainingOutput)
     
     testData = libfann.training_data()
     testData.set_train_data(testInput, testOutput)
     
     mses = []
     for i in range(0,300):
         for j in range(0,10):
             self.networks[networkIndex].train_epoch(trainingData)
         
         mses.append(self.networks[networkIndex].test_data(testData))
         
         if len(mses) > 10:
             del mses[0]
             
             neg = 0
             for j in range(0,9):
                 if mses[j+1] - mses[j] < 0:
                     neg += 1
             
             if neg < 2:
                 break
             
     self.mses[networkIndex] = mses[9]
Beispiel #2
0
def test(file):
    """
    excepts that <filename>.net and <filename>_test.data exists
    Hardkoodattu kaksi tuloa ja yksi lähtö tulostuksiin

    :param file:  filename without extensions.
    :return:
    """
    net_file = file + '.net'
    data_file = file + '_test.data'

    testdata = libfann.training_data()  # Luo olion
    testdata.read_train_from_file(data_file)  # Lukee testi materiaalin joka pitäisi olla eri kuin opetusmateriaali

    inputs = testdata.get_input()
    outputs = testdata.get_output()

    ann = libfann.neural_net()
    ann.create_from_file(net_file)  # Lataa aikaisemmin luotu verkko

    print("MSE ERROR : %.5f" %(ann.test_data(testdata))) # Ilmoittaa verkon virheen testidatalla

    for i in range(len(inputs)): # Tulostaa testidatan läpi
        result = ann.run(inputs[i])
        print("Input: %.2f %.2f, Output %.4f, Excepted %.4f" %(inputs[i][0], inputs[i][1], result[0], outputs[i][0] ))
Beispiel #3
0
    def train(self, train_data):
        for sent in train_data.my_sents(self.name):
            self.ids.add_sent(sent)

        inputs = []
        outputs = []

        def add(vec, out):
            inputs.append(self.vectorize(vec))
            outputs.append([out])

        def pollute(sent, p):
            sent = sent[:]
            for _ in range(int((len(sent) + 2) / 3)):
                sent.insert(p, ':null:')
            add(sent, self.LENIENCE)

        def weight(sent):
            def calc_weight(w): return pow(len(w), 3.0)
            total_weight = 0.0
            for word in sent:
                total_weight += calc_weight(word)
            for word in sent:
                weight = 0 if word.startswith('{') else calc_weight(word)
                add([word], weight / total_weight)

        for sent in train_data.my_sents(self.name):
            add(sent, 1.0)
            weight(sent)

            # Generate samples with extra unknown tokens unless
            # the sentence is supposed to allow unknown tokens via the special :0
            if not any(word[0] == ':' and word != ':' for word in sent):
                pollute(sent, 0)
                pollute(sent, len(sent))

        for sent in train_data.other_sents(self.name):
            add(sent, 0.0)
        add([':null:'], 0.0)
        add([], 0.0)

        for sent in train_data.my_sents(self.name):
            without_entities = sent[:]
            for i, token in enumerate(without_entities):
                if token.startswith('{'):
                    without_entities[i] = ':null:'
            if without_entities != sent:
                add(without_entities, 0.0)

        inputs, outputs = resolve_conflicts(inputs, outputs)

        train_data = fann.training_data()
        train_data.set_train_data(inputs, outputs)

        for _ in range(10):
            self.configure_net()
            self.net.train_on_data(train_data, 1000, 0, 0)
            self.net.test_data(train_data)
            if self.net.get_bit_fail() == 0:
                break
Beispiel #4
0
    def learn(self, episodes):
        state_size = len(episodes[0].states[0])

        # Create the model if needed
        if self._model is None:
            self._model = libfann.neural_net()
            self._model.create_sparse_array(
                1, (state_size, self.hidden_neurons, self.nb_actions))
            self._model.randomize_weights(-0.1, 0.1)
            self._model.set_activation_function_layer(libfann.GAUSSIAN, 1)
            self._model.set_activation_function_layer(libfann.LINEAR, 2)

        # Store the values of all the states encountered in all the episodes
        states = []
        values = []

        for episode in episodes:
            states.extend(episode.states)
            values.extend(episode.values)

        # Train for these values
        data = libfann.training_data()
        data.set_train_data(states, values)

        self._model.train_on_data(data, 150, 50, 1e-5)
Beispiel #5
0
    def train(self, train_data):
        self.set_train_data(train_data)

        hidden_layers = [self.hidden_neurons] * self.hidden_layers
        layers = [self.train_data.num_input]
        layers.extend(hidden_layers)
        layers.append(self.train_data.num_output)

        sys.stderr.write("Network layout:\n")
        sys.stderr.write("* Neuron layers: %s\n" % layers)
        sys.stderr.write("* Connection rate: %s\n" % self.connection_rate)
        if self.training_algorithm not in ('TRAIN_RPROP',):
            sys.stderr.write("* Learning rate: %s\n" % self.learning_rate)
        sys.stderr.write("* Activation function for the hidden layers: %s\n" % self.activation_function_hidden)
        sys.stderr.write("* Activation function for the output layer: %s\n" % self.activation_function_output)
        sys.stderr.write("* Training algorithm: %s\n" % self.training_algorithm)

        self.ann = libfann.neural_net()
        self.ann.create_sparse_array(self.connection_rate, layers)
        self.ann.set_learning_rate(self.learning_rate)
        self.ann.set_activation_function_hidden(getattr(libfann, self.activation_function_hidden))
        self.ann.set_activation_function_output(getattr(libfann, self.activation_function_output))
        self.ann.set_training_algorithm(getattr(libfann, self.training_algorithm))

        fann_train_data = libfann.training_data()
        fann_train_data.set_train_data(self.train_data.get_input(), self.train_data.get_output())

        self.ann.train_on_data(fann_train_data, self.epochs, self.iterations_between_reports, self.desired_error)
        return self.ann
 def fit(self, X, y):
     self._create_network()
     x_train = libfann.training_data()
     if len(y.shape) == 1 and y.shape[0] > 0:
         """ fann requires a row vector"""
         y = y[:, np.newaxis]
     x_train.set_train_data(X, y)
     self.ann.train_on_data(x_train, self.epoch, self.show, self.desired_error)
Beispiel #7
0
 def fit(self, X, y):
     self._create_network()
     x_train = libfann.training_data()
     if len(y.shape) == 1 and y.shape[0] > 0:
         """ fann requires a row vector"""
         y = y[:, np.newaxis]
     x_train.set_train_data(X, y)
     self.ann.train_on_data(x_train, self.epoch, self.show,
                            self.desired_error)
Beispiel #8
0
    def test(self, test_data):
        self.set_test_data(test_data)

        fann_test_data = libfann.training_data()
        fann_test_data.set_train_data(self.test_data.get_input(), self.test_data.get_output())

        self.ann.reset_MSE()
        self.ann.test_data(fann_test_data)

        return self.ann.get_MSE()
	def fann_train(self, train_data, train_tar, net):
		data = libfann.training_data()
		data.set_train_data(train_data, train_tar)
		input_layer_num = data.num_input_train_data()
		output_layer_num = data.num_output_train_data()
		hidden_layer_num = int(float(input_layer_num + output_layer_num) / 2.0)
		print "Training Network With Params:\nInput: %s, Hidden: %s, Output: %s" %(input_layer_num, hidden_layer_num, output_layer_num)
		net.create_sparse_array(1, (input_layer_num, hidden_layer_num, output_layer_num))
		net.train_on_data(data, self.epocs, self.disp_round, self.error_rate)
		return net
Beispiel #10
0
 def fann_train(self, train_data, train_tar, net):
     data = libfann.training_data()
     data.set_train_data(train_data, train_tar)
     input_layer_num = data.num_input_train_data()
     output_layer_num = data.num_output_train_data()
     hidden_layer_num = int(float(input_layer_num + output_layer_num) / 2.0)
     print "Training Network With Params:\nInput: %s, Hidden: %s, Output: %s" % (
         input_layer_num, hidden_layer_num, output_layer_num)
     net.create_sparse_array(
         1, (input_layer_num, hidden_layer_num, output_layer_num))
     net.train_on_data(data, self.epocs, self.disp_round, self.error_rate)
     return net
Beispiel #11
0
    def train(self, train_data):
        for sent in train_data.my_sents(self.name):
            self.ids.add_sent(sent)

        inputs = []
        outputs = []

        def add(vec, out):
            inputs.append(self.vectorize(vec))
            outputs.append([out])

        def pollute(sent, p):
            sent = sent[:]
            for _ in range(int((len(sent) + 2) / 3)):
                sent.insert(p, ':null:')
            add(sent, self.LENIENCE)

        def weight(sent):
            def calc_weight(w):
                return pow(len(w), 3.0)

            total_weight = 0.0
            for word in sent:
                total_weight += calc_weight(word)
            for word in sent:
                weight = 0 if word.startswith('{') else calc_weight(word)
                add([word], weight / total_weight)

        for sent in train_data.my_sents(self.name):
            add(sent, 1.0)
            weight(sent)
            if not any(word[0] == ':' for word in sent):
                pollute(sent, 0)
                pollute(sent, len(sent))

        for sent in train_data.other_sents(self.name):
            add(sent, 0.0)
        add([], 0.0)

        inputs, outputs = resolve_conflicts(inputs, outputs)

        train_data = fann.training_data()
        train_data.set_train_data(inputs, outputs)

        for _ in range(10):
            self.configure_net()
            self.net.train_on_data(train_data, 1000, 0, 0)
            self.net.test_data(train_data)
            if self.net.get_bit_fail() == 0:
                break
Beispiel #12
0
    def train(self, train_data):
        for sent in train_data.my_sents(self.intent_name):
            if self.token in sent:
                for i in range(
                        sent.index(self.token) + self.dir, self.get_end(sent),
                        self.dir):
                    if sent[i][0] != '{':
                        self.ids.add_token(sent[i])

        inputs, outputs = [], []

        def pollute(sent, i, out_val):
            """Simulates multiple token words in adjacent entities"""
            for j, check_token in enumerate(sent):
                d = j - i
                if int(d > 0) - int(
                        d < 0) == self.dir and check_token.startswith('{'):
                    for pol_len in range(1, 4):
                        s = sent[:j] + [':0'] * pol_len + sent[j + 1:]
                        p = i + (pol_len - 1) * int(self.dir < 0)
                        inputs.append(self.vectorize(s, p))
                        outputs.append([out_val])

        def add_sents(sents, out_fn):
            for sent in sents:
                for i, token in enumerate(sent):
                    out_val = out_fn(token)
                    inputs.append(self.vectorize(sent, i))
                    outputs.append([out_val])
                    if out_val == 1.0:
                        pollute(sent, i, 1.0)

        add_sents(train_data.my_sents(self.intent_name),
                  lambda x: float(x == self.token))
        add_sents(train_data.other_sents(self.intent_name), lambda x: 0.0)
        inputs, outputs = resolve_conflicts(inputs, outputs)

        data = fann.training_data()
        data.set_train_data(inputs, outputs)

        for _ in range(10):
            self.configure_net()
            self.net.train_on_data(data, 1000, 0, 0)
            self.net.test_data(data)
            if self.net.get_bit_fail() == 0:
                break
Beispiel #13
0
    def train_on_minibatch(self):
        n = min(300, len(self.learnbuffer))
        if n < 300: return
        minibatch = random.sample(self.learnbuffer, n)

        inputs = []
        outputs = []
        for oldstate, action, newstate, reward in minibatch:
            diff, val = self.value_update(oldstate, action, newstate, reward)
            val[action] += diff
            inputs += [[oldstate[0] / 7., oldstate[1] / 5.]]
            outputs += [val]

        #print("training minibatch of size %i:\n%s\n%s\n\n"%(n, str(inputs), str(outputs)))

        training_data = libfann.training_data()
        training_data.set_train_data(inputs, outputs)
        self.NN.train_epoch(training_data)
Beispiel #14
0
    def train_on_minibatch(self):
        n = min(300, len(self.learnbuffer))
        if n < 300: return
        minibatch = random.sample(self.learnbuffer, n)

        inputs = []
        outputs = []
        for oldstate, action, newstate, reward in minibatch:
            diff, val = self.value_update(oldstate, action, newstate, reward)
            val[action] += diff
            inputs += [ [oldstate[0]/7., oldstate[1]/5.] ]
            outputs += [ val ]

        #print("training minibatch of size %i:\n%s\n%s\n\n"%(n, str(inputs), str(outputs)))

        training_data = libfann.training_data()
        training_data.set_train_data(inputs, outputs)
        self.NN.train_epoch(training_data)
Beispiel #15
0
def XY_to_fann_train_data(X, Y):
    if len(X) != len(Y):
        raise ValueError("X and Y must have the same number of lines.")

    train_data = libfann.training_data()

    if len(X):
        dim_X, dim_Y = len(X[0]), len(Y[0])

        tmp = tempfile.NamedTemporaryFile(delete=False)
        with tmp:
            tmp.write("%d %d %d\n"%(len(X), dim_X,  dim_Y))
            for i in xrange(len(X)):
                for line in [ X[i], Y[i] ]:
                    tmp.write("%s\n"% ' '.join( str(float(val)) for val in line ))

        train_data.read_train_from_file(tmp.name)
        tmp.unlink(tmp.name)

    return train_data
Beispiel #16
0
def learn(train_file):
    """
    :param train_file: filename without extensions.
    :return:
    method will create nn called <filename>.net
    and except that <filename>.data exists

    Kaikki arvot on kovakoodattu tällä hetkellä. Voisi olla vapaaehtoisia parametrejä.
    Hidden layerien määrän hallintaan pitää keksiä jokin juttu.
    """
    net_file = train_file + '.net'
    data_file = train_file + '_train.data'

    connection_rate = 1
    learning_rate = 0.5 # learning rate ei saa olla liian suuri, toisaalta liian pienellä oppiminen kestää kauan
    num_input = 2
    num_hidden = 4
    num_output = 1

    desired_error = 0.00005
    max_iterations = 100000
    iterations_between_reports = 10

    ann = libfann.neural_net()

    # Lue tiedosto
    trainindata = libfann.training_data()
    trainindata.read_train_from_file(data_file)

    # Luo verkon
    ann.create_sparse_array(connection_rate, (num_input, num_hidden, num_hidden, num_output))
    ann.set_learning_rate(learning_rate)

    ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC_STEPWISE) # Aktivointi funktio
    ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL) # Oppimis algoritmi
    ann.train_on_data(trainindata, max_iterations, iterations_between_reports, desired_error)

    ann.save(net_file)
Beispiel #17
0
def createDistanceTestFile(filename, length, max=0.707, trainTest=False):
    """
    Esimerkki funktio datan luonnista. Tein ihan itse

    :param filename: desired filename without exteensions
    :param length: How many input, output pairs
    :return:
    """
    inputs = []
    outputs = []
    for i in range(length):
        if trainTest:
            p = random.randrange(1)
            if p == 0:
                a = random.uniform(0, 0.2)
            else:
                a = random.uniform(0.5, max)

            p = random.randrange(1)
            if p == 0:
                b = random.uniform(0, 0.2)
            else:
                b = random.uniform(0.5, max)

        else:
            a = random.uniform(0, max)
            b = random.uniform(0, max)
        c = math.sqrt(math.pow(a, 2) + math.pow(b, 2))
        inputs.append([a, b])
        outputs.append([c])

    data = libfann.training_data()
    data.set_train_data(inputs, outputs)
    #data.scale_input_train_data(-1.0, 1.0)
    #data.scale_output_train_data(-1.0, 1.0)

    data.save_train(filename)
Beispiel #18
0
def trainANN(features, labels, connRate, hidNodes, error, binary):
    """
		Train the neural network using the given training data and 
		parameters. Returns a fully trained ANN.
	"""
    # Organize ANN parameters
    connection_rate = connRate
    num_input = 72
    num_hidden = hidNodes
    num_output = 3
    desired_error = error
    max_iterations = 100000

    # Print out two reports for every ANN
    iterations_between_reports = 50000

    # Binarize labels as it is necessary for ANN
    labels = binary.fit_transform(labels)

    # Cast numpy to python list
    annFeatures = features.tolist()
    annLabels = labels.tolist()

    # Create an ANN training data instance and set data
    training = libfann.training_data()
    training.set_train_data(annFeatures, annLabels)

    ann = libfann.neural_net()

    ann.create_sparse_array(connection_rate,
                            (num_input, num_hidden, num_output))

    # Train the ANN
    ann.train_on_data(training, max_iterations, iterations_between_reports,
                      desired_error)

    return ann
Beispiel #19
0
def trainANN( features, labels, connRate, hidNodes, error, binary ):
	"""
		Train the neural network using the given training data and 
		parameters. Returns a fully trained ANN.
	"""
	# Organize ANN parameters
	connection_rate = connRate
	num_input = 72
	num_hidden = hidNodes
	num_output = 3
	desired_error = error
	max_iterations = 50000
	
	# Print out two reports for every ANN
	iterations_between_reports = 50000
	
	
	# Binarize labels as it is necessary for ANN
	labels = binary.fit_transform( labels )
	
	# Cast numpy to python list
	annFeatures = features.tolist()
	annLabels = labels.tolist()
	
	# Create an ANN training data instance and set data
	training = libfann.training_data()
	training.set_train_data( annFeatures, annLabels )
	
	ann = libfann.neural_net()
	
	ann.create_sparse_array( connection_rate, (num_input, num_hidden, num_output) )
	
	# Train the ANN
	ann.train_on_data( training, max_iterations, iterations_between_reports, desired_error )
	
	return ann
Beispiel #20
0
    def learn(self, episodes):
        state_size = len(episodes[0].states[0])

        # Create the model if needed
        if self._model is None:
            self._model = libfann.neural_net()
            self._model.create_sparse_array(1, (state_size, self.hidden_neurons, self.nb_actions))
            self._model.randomize_weights(-0.1, 0.1)
            self._model.set_activation_function_layer(libfann.GAUSSIAN, 1)
            self._model.set_activation_function_layer(libfann.LINEAR, 2)

        # Store the values of all the states encountered in all the episodes
        states = []
        values = []

        for episode in episodes:
            states.extend(episode.states)
            values.extend(episode.values)

        # Train for these values
        data = libfann.training_data()
        data.set_train_data(states, values)

        self._model.train_on_data(data, 150, 50, 1e-5)
Beispiel #21
0
import os
from fann2 import libfann as fann
import cv2 as cv

ENG_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
outputs = []
inputs = []
for letter in os.listdir('./letters/'):
    print(letter)
    for letter_img in os.listdir('./letters/' + letter):
        img = cv.imread(os.path.join('./letters', letter, letter_img))
        img = cv.resize(img, (35, 35))
        img = cv.threshold(img, 0, 255, cv.THRESH_BINARY)[1]
        img_bin = []
        for x in img:
            for y in x:
                if sum(y) == 0:
                    img_bin.append(1)
                else:
                    img_bin.append(0)
        result_array = ([0] * 26)
        result_array[ENG_ALPHABET.index(letter)] = 1
        outputs.append(result_array)
        inputs.append(img_bin)

td = fann.training_data()
td.set_train_data(inputs, outputs)
td.save_train('./train.dat')
Beispiel #22
0
    def train(self):
        print("Retraining the ANN using specified predictors.\n")

        # Collect the ratings by the predictors
        R_T = []

        for p in self.predictors:
            g = pandas.read_csv(
                ROOT + '/results/blend/' + p + '_probe',
                header=None,
                names=['rating']
            )['rating'].tolist()

            # Convert the list to a numpy array
            g = np.array(g, dtype=float)

            # Subtract out the mean rating of p
            # g -= np.sum(g) / len(g)
            g /= 5

            R_T.append(g)

        # Convert the predictor ratings into a matrix to transpose it
        R_T = np.matrix(R_T)
        R = R_T.T

        # Get it back in list of list form, each inner list is the set of
        # predictions for a single user/movie combination
        self.inputs = R.tolist()

        # Get a list of the actual probe ratings
        probe = pandas.read_csv(
            ROOT + '/data/parsed/probe_ratings.dta',
            header=None,
            names=['rating']
        )['rating'].tolist()

        # Convert it into a numpy array
        probe = np.array(probe, dtype=float)

        # Subtract out the mean rating of probe
        # probe -= np.sum(probe) / len(probe)
        probe /= 5

        # Create a list for each item of the list.
        outputs = [[x] for x in probe.tolist()]

        # Parameters for neural network
        # Originally, 5e-4 and 3e-7
        # 5e-5 and 1.5e-8 worked well and was still decreasing in RMSE
        # until 3000 epochs
        ANN_LEARNRATE = 0.0005
        ANN_LEARNRATE_STEP = 0.00000015
        NUM_INPUT = len(self.inputs[0])
        NUM_OUTPUT = len(outputs[0])  # Should be 1
        NUM_HIDDEN = 19

        # Create an artificial neural network.
        self.ann = libfann.neural_net()
        self.ann.set_learning_rate(ANN_LEARNRATE)
        self.ann.set_activation_function_output(libfann.SIGMOID_SYMMETRIC)
        # ann.set_activation_function_output(libfann.LINEAR)
        # ann.set_activation_function_output(libfann.SIGMOID)
        self.ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
        self.ann.create_standard_array((NUM_INPUT, NUM_HIDDEN, NUM_OUTPUT))

        # Set up the training data for the neural network.
        train_data = libfann.training_data()
        train_data.set_train_data(self.inputs, outputs)

        # Train the network
        MAX_ITER = 3000
        REPORT_SPACING = 25
        # DESIRED_E = 0.0001

        # Sanity check
        assert ANN_LEARNRATE - (MAX_ITER * ANN_LEARNRATE_STEP) > 0

        # ann.train_on_data(train_data, MAX_ITER, REPORT_SPACING, DESIRED_E)
        for i in range(MAX_ITER):
            learn_rate = ANN_LEARNRATE - (i * ANN_LEARNRATE_STEP)

            self.ann.set_learning_rate(learn_rate)
            self.ann.train_epoch(train_data)

            epoch = i+1

            if epoch == 1:
                rmse = np.sqrt(25 * self.ann.get_MSE())
                best_rmse = rmse
                best_epoch = 1

            if epoch % REPORT_SPACING == 0 or epoch == 1:
                rmse = np.sqrt(25 * self.ann.get_MSE())

                # Keep track of the best output so far
                if rmse < best_rmse:
                    best_rmse = rmse
                    best_epoch = epoch

                print("Finished epoch {0} with RMSE {1}.".format(epoch, rmse))

                # TODO: Also need to predict probe ratings if we want to use
                # these blends in a further linear blend.
                print("Predicting qual ratings for epoch {0}.".format(epoch))
                self.predict(suffix=str(epoch))
                print("  - Done predicting qual ratings.")

                print("Predicting probe ratings for epoch {0}.".format(epoch))
                self.probe_predict(suffix=str(epoch))
                print("  - Done predicting probe ratings.\n")

        # Save the network
        netfile = ROOT + '/results/ann.net'
        print("Neural network saved to {0}\n".format(netfile))
        self.ann.save(netfile)

        # Print best RMSE & epoch
        print("Best epoch RMSE was {0}.".format(best_rmse))
        print("  - Corresponds to epoch {0}".format(best_epoch))
Beispiel #23
0
def train_and_test(results, iterations, output_dir, name):
    data = read_metrics(results)
    split = int(len(data) * 0.8)
    num_input = len(data[0]) - 1
    random.shuffle(data)

    # # Plot train data output distribution
    # outputs_train = sorted([row[num_input] for row in data[:split]])
    # outputs_test = sorted([row[num_input] for row in data[split:]])
    # fig = plt.figure(figsize=(6, 4))
    # plt.plot(outputs_train)
    # plt.ylim([0, 2])
    # plt.show()

    # fig = plt.figure(figsize=(6, 4))
    # plt.plot(outputs_test)
    # plt.ylim([0, 2])
    # plt.show()

    # return None

    train_data_file = _get_ann_data_file(data[:split])
    test_data_file = _get_ann_data_file(data[split:])

    ann = libfann.neural_net()
    ann.create_standard_array([num_input, 20, 5, 1])
    # ann.set_activation_function_layer(libfann.SIGMOID, 1)
    # ann.set_activation_function_layer(libfann.SIGMOID, 2)

    ann.set_activation_steepness_hidden(1.0)

    # Train
    # TEST TEST TEST
    # train_data = libfann.training_data()
    # train_data.read_train_from_file(train_data_file.name)
    # ann.init_weights(train_data)
    # ann.set_learning_rate(1.0)
    # ann.set_training_algorithm(libfann.TRAIN_INCREMENTAL)
    ann.train_on_file(train_data_file.name, iterations, 10, 0.0)

    # Save results
    ann.save(os.path.join(output_dir, name + '.net'))
    shutil.copy(train_data_file.name, os.path.join(output_dir, name + '_train.data'))
    shutil.copy(test_data_file.name, os.path.join(output_dir, name + '_test.data'))

    # Test
    print "train MSE: " + str(ann.get_MSE())
    test_data = libfann.training_data()
    test_data.read_train_from_file(test_data_file.name)
    ann.reset_MSE()
    ann.test_data(test_data)
    print "test MSE: " + str(ann.get_MSE())
    test_data.destroy_train()

    outputs = sorted([
        (ann.run(row[:num_input]), row[num_input])
        for row in data[split:]
    ], key=lambda p: p[1])

    fig = plt.figure(figsize=(6, 4))
    plt.plot([p[0] for p in outputs])
    plt.plot([p[1] for p in outputs])
    plt.ylim([0, 1.2])
    plt.show()

    train_data_file.close()
    test_data_file.close()
    return None
Beispiel #24
0
  #     	res.append([data[i+N+1]])

indvec = range(len(data))
random.shuffle(indvec)
#print(len(indvec))
#print("indvec",indvec)
vec_train = [data[i] for i in indvec[:int(len(data)*0.8)]]
res_train = [ext[i] for i in indvec[:int(len(data)*0.8)]]
vec_test = [data[i] for i in indvec[int(len(data)*0.8):]]
res_test = [ext[i] for i in indvec[int(len(data)*0.8):]]
#print(len(vec_train),len(res_train))"
#print vec_train

#print len(data)
#print len(ext)
tr_data = libfann.training_data()
tr_data.set_train_data(vec_train,res_train)
#print(vec_train)
ann.train_on_data(tr_data,max_iter,iter_betw_repor,error)
thr = 0.0
auc = 0.0
last_fpr = 0
last_tpr = 0
max_acc = 0.0
for p in range(0,1001):
	 #0.1....0.9 postrROP
	(fp,tp,fn,tn) = (0,0,0,0) # hypothises is this vector is from malicious process
#run on test sample
	for i in range(len(vec_test)):
		bbb = ann.run(vec_test[i])
		if res_test[i][0] == 1.0:
Beispiel #25
0
from fann2 import libfann as pyfann

connection_rate = .5
learning_rate = 0.5

desired_error = 0.00000001
max_iterations = 3500
iterations_between_reports = 50


training = pyfann.training_data()
training.read_train_from_file("/home/ubuntu/svidur/data_train.data")

num_input = len(training.get_input()[0])
num_neurons_hidden_1 = 24
num_neurons_hidden_2 = 7
num_output = len(training.get_output()[0])

ann = pyfann.neural_net()
#ann.create_sparse_array(connection_rate, (num_input,num_neurons_hidden_1,num_neurons_hidden_2, num_output))

ann.create_standard_array((num_input,num_neurons_hidden_1,num_neurons_hidden_2,num_output))
ann.set_activation_function_hidden(pyfann.ELLIOT_SYMMETRIC)
ann.set_activation_function_output(pyfann.ELLIOT_SYMMETRIC)
ann.set_training_algorithm(pyfann.TRAIN_BATCH)
ann.set_train_error_function(pyfann.ERRORFUNC_LINEAR)

ann.train_on_data(training, max_iterations, iterations_between_reports, desired_error)

print "Testing network"
test_data = pyfann.training_data()
Beispiel #26
0
#     	res.append([data[i+N+1]])

indvec = range(len(data))
random.shuffle(indvec)
#print(len(indvec))
#print("indvec",indvec)
vec_train = [data[i] for i in indvec[:int(len(data) * 0.8)]]
res_train = [ext[i] for i in indvec[:int(len(data) * 0.8)]]
vec_test = [data[i] for i in indvec[int(len(data) * 0.8):]]
res_test = [ext[i] for i in indvec[int(len(data) * 0.8):]]
#print(len(vec_train),len(res_train))"
#print vec_train

#print len(data)
#print len(ext)
tr_data = libfann.training_data()
tr_data.set_train_data(vec_train, res_train)
#print(vec_train)
ann.train_on_data(tr_data, max_iter, iter_betw_repor, error)
thr = 0.0
auc = 0.0
last_fpr = 0
last_tpr = 0
max_acc = 0.0
for p in range(0, 1001):
    #0.1....0.9 postrROP
    (fp, tp, fn,
     tn) = (0, 0, 0, 0)  # hypothises is this vector is from malicious process
    #run on test sample
    for i in range(len(vec_test)):
        bbb = ann.run(vec_test[i])
Beispiel #27
0
 def get_train_data(self, trainfile):
     traindata = libfann.training_data()
     traindata.read_train_from_file(trainfile)
     return traindata
Beispiel #28
0
connection_rate = 1
learning_rate = 0.7
num_neurons_hidden = 100

desired_error = 0.0001
max_iterations = 100
iterations_between_reports = 1

early_stopping_threshold = 3
break_on_early_stopping = False

ann = libfann.neural_net()


train_data = libfann.training_data()
train_data.read_train_from_file("data/dev-kmeans-10-pca.data")
# train_data.scale_input_train_data(0, 1)

# ann.create_from_file("minimal.net")
ann.create_sparse_array(connection_rate, (train_data.num_input_train_data(), 40, 20, 10, train_data.num_output_train_data()))

ann.set_learning_rate(learning_rate)
ann.set_activation_function_output(libfann.SIGMOID)


test_data = libfann.training_data()
test_data.read_train_from_file("data/test-kmeans-10-pca.data")
# test_data.scale_input_train_data(0, 1)

count = 0