Exemplo n.º 1
0
def prevertemperatura(params):
    inputs = []
    inputs.append(params['chuva'])
    inputs.append(params['hora'])
    inputs.append(params['tanterior'])
    inputs.append(params['tatual'])
    inputs.append(params['ranterior'])
    inputs.append(params['ratual'])
    inputs.append(params['poanterior'])
    inputs.append(params['poatual'])

    temperaturas = []
    temperaturas.append(float(params['tatual']))

    with open('A702_normalizado.pickle', 'r') as arquivo:
        arquivoinput = pickle.load(arquivo)

    inputNorm = nl.tool.Norm(arquivoinput)
    inputNormTarget = inputNorm(inputs)

    for i in range(1, 7):
        with open('A702_t+' + str(i) + '.pickle', 'r') as arquivo:
            output = pickle.load(arquivo)
            net = nl.load('t+' + str(i) + '.net')
            out = net.sim(inputNormTarget)
            outputNorm = nl.tool.Norm(output)
            temperaturas.append(outputNorm.renorm(out)[0][0])
    return temperaturas
Exemplo n.º 2
0
def neuro_predict(x0, y0, y, name):
    import neurolab as nl 
    import numpy as np 
    
    size = len(y)
    
    inp = np.concatenate((x0, y0), axis=1)
    tar = y.reshape(size,1) 
    
    # data preprocessing
    mean_tar = tar.mean(axis=0)[0]
    std_tar = tar.std(axis=0)[0] 
 
    import os
    cwd = os.getcwd() 
    sub_dir = cwd + '/training_models/neuro_network'
    filename = 'model_' + name   
    #net = nl.load( os.path.join(sub_dir, filename) )
    net = nl.load(filename)
    
    out = net.sim(inp).reshape(size)
    
    out = out * 2 * std_tar + mean_tar
    
    return out
Exemplo n.º 3
0
def preverradiacao(params):
    inputs = []
    inputs.append(params['chuva'])
    inputs.append(params['hora'])
    inputs.append(params['tanterior'])
    inputs.append(params['tatual'])
    inputs.append(params['ranterior'])
    inputs.append(params['ratual'])
    inputs.append(params['poanterior'])
    inputs.append(params['poatual'])

    radiacaos = []
    radiacaos.append(float(params['ratual']))

    with open('A702_normalizado.pickle', 'r') as arquivo:
        arquivoinput = pickle.load(arquivo)

    inputNorm = nl.tool.Norm(arquivoinput)
    inputNormTarget = inputNorm(inputs)

    for i in range(1, 7):
        with open('A702_radiacao+' + str(i) + '.pickle', 'r') as arquivo:
            output = pickle.load(arquivo)
            net = nl.load('radiacao+' + str(i) + '.net')
            out = net.sim(inputNormTarget)
            outputNorm = nl.tool.Norm(output)
            saida = outputNorm.renorm(out)[0][0]
            if saida < 1:
                saida = 0
            radiacaos.append(saida)
    return radiacaos
Exemplo n.º 4
0
def setNN(input, output, path, neurons, epochs):
    print("\tResilient backpropagation network with {0} neurons in hidden layer and {1} epochs"\
          .format(neurons, epochs))
    XInput, YOutput, XTest, YTest = splitData(input, output)
    if os.path.isfile(path):
        print("\n\tLoading network from " + path)
        return nl.load(path)

    net = nl.net.newff([[-1, 1]] * len(XInput[0]), [neurons, 16])
    net.trainf = nl.net.train.train_rprop
    net.init()
    result = net.train(XInput, YOutput, epochs=epochs, show=10, goal=0.0001)
    print("\tSaving network in " + path)
    net.save(path)
    res = net.sim(XInput)
    plot.plot(XInput, YOutput)

    #how many good classifications
    true = 0
    for i in range(0, len(res)):
        for j in range(0, 16):
            if (res[i][j] == max(res[i])):
                if (YOutput[i][j] == 1.0):
                    true += 1
    return net
Exemplo n.º 5
0
def neuro_predict(x0, y0, y, name):
    import neurolab as nl
    import numpy as np

    size = len(y)

    inp = np.concatenate((x0, y0), axis=1)
    tar = y.reshape(size, 1)

    # data preprocessing
    mean_tar = tar.mean(axis=0)[0]
    std_tar = tar.std(axis=0)[0]

    import os
    cwd = os.getcwd()
    sub_dir = cwd + '/training_models/neuro_network'
    filename = 'model_' + name
    #net = nl.load( os.path.join(sub_dir, filename) )
    net = nl.load(filename)

    out = net.sim(inp).reshape(size)

    out = out * 2 * std_tar + mean_tar

    return out
Exemplo n.º 6
0
def red_neuronal():  #Parametros de entrada para el tamaño del tablero
    if os.path.isfile('../Pentominos/redes/red-50k-onlymax.net') == False:
        funcion_activacion = trans.LogSig()
        red = net.newff(minmax=[[0, 1]] * 63,
                        size=[7, 7, 63],
                        transf=[funcion_activacion] * 3)
        red.reset()
        entrada, objetivo, prueba = get_entrada_objetivo()

        #Sesgos y pesos iniciales
        np.random.seed(3287426346)
        #     red.reset()
        for capa in red.layers:
            capa.initf = init.init_zeros

        red.init()

        red.trainf = train.train_gd
        red.errorf = error.MAE()

        print("Comienza el entrenamiento")
        print("Net.ci: " + str(red.ci))

        red.train(entrada, objetivo, lr=0.1, epochs=50000, show=500, goal=0.01)

        red.sim(prueba)
        red.save('../Pentominos/redes/red-50k-onlymax.net')
    #     return red.sim(prueba), prueba
    else:
        red = nl.load('../Pentominos/redes/red-50k-onlymax.net')
    return red
Exemplo n.º 7
0
def create_nn(input_data,
              output_data,
              path,
              hidden_layers=8,
              epochs=500,
              goal=0.5):

    if os.path.isfile(path):
        print("\n\tLoading network from " + path)
        net = nl.load(path)
        res = net.sim(input_data)
        return net, res

    # init the neural network
    net = nl.net.newff([[-100, 100]] * len(input_data[0]),
                       [hidden_layers, len(output_data[0])])
    net.trainf = nl.net.train.train_rprop
    net.init()
    # train the neural network
    net.train(input_data, output_data, epochs=epochs, show=10, goal=goal)

    # save network to a file
    print("\tSaving network in " + path)
    net.save(path)
    # get the results
    res = net.sim(input_data)

    return net, res
Exemplo n.º 8
0
    def pegarRedeSalva(self, widget):
        try:
            dialog = Gtk.FileChooserDialog("Por favor, escolha um arquivo .net",
                                None,
                                Gtk.FileChooserAction.OPEN,
                                (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
                                Gtk.STOCK_OPEN, Gtk.ResponseType.OK))

            response = dialog.run()
            if response == Gtk.ResponseType.OK:
                #carrega o arquivo
                self.net = neurolab.load(dialog.get_filename())
                #extrai e coloca nos liststores
                numTargets = [float(self.targets[i][j])
                        for i in xrange(len(self.targets)) for j in xrange(1)]
                self._setListStore(self.inputs, numTargets, True)
                self.storeRA.append([str(self.net.trainf)])
                #altera status para nome do arquivo
                contexto = self.statusFile.get_context_id("fileAbrir")
                self.statusFile.push(contexto,str(dialog.get_filename()))

        except:
            self.feedStatus.gerarStatus(self.feedStatus.contexto_load)
            #pega a excecao gerada
            trace = traceback.format_exc()
            #imprime
            #print "Ocorreu um erro: \n",trace
            #salva em arquivo
            file("trace.log","a").write(trace)

        finally:
            dialog.destroy()
Exemplo n.º 9
0
def testFANN(paths, network):
    net = nl.load(network)
    predict = net.sim(np.array(getImages(paths))).tolist()
    for n, x in enumerate(predict):
        predict[n] = x[0]
    real = getReal(paths)
    return [real, predict]
Exemplo n.º 10
0
 def __init__(self, index):
     threading.Thread.__init__(self)
     self.index = index
     self.c = Converter()
     target = scipy.io.loadmat('./algorithms/t3_output.mat')
     self.net = nl.load('./algorithms/t3_estimator.net')
     t = target['t3_output']
     self.norm_t = nl.tool.Norm(t)
Exemplo n.º 11
0
def print_weights_of_saved_net(path):
    try:
        net = nl.load(path)
        for layer in net.layers:
            pprint(layer.np['w'])
    except IOError as e:
        logging.error("Can not open file to print weights of net")
        logging.error(e)
Exemplo n.º 12
0
 def __init__(self, index):
     threading.Thread.__init__(self)
     self.index = index
     self.c = Converter()
     target   = scipy.io.loadmat('./algorithms/t3_output.mat')
     self.net = nl.load('./algorithms/t3_estimator.net')
     t = target['t3_output']
     self.norm_t = nl.tool.Norm(t)
def main(args, argv):

    # PARSING ARGUMENTS <-- TEST_FOLDER, INPUT_NET, OUTPUT_FILE

    test_input_data = list()
    results = str()

    if (args != 3):
        exit(1)

    try:
        # Testing data
        w_files_test = os.listdir(argv[0])
        if not (argv[1].endswith("NET")):
            exit(2)
        w_file_output = open(argv[2], "w")
        net = nl.load(argv[1])

    except:
        exit(3)


# LOADING TESTING DATA

    print("\nLOADING TEST DATA\n")

    w_files_test = wav_to_mfcc(argv[0], w_files_test, test_input_data)

    # TESTING

    print("\n\tTESTING with test data\n")

    for index in range(len(test_input_data)):
        score = net.sim([test_input_data[index]])[0][0]

        file_name = w_files_test[index].split('/')
        file_name = file_name[-1].split('.')
        file_name = file_name[-2]

        if (score > 0.92):
            decision = 1
        else:
            decision = 0

        if (score > 1):
            score = 1
        elif (score < 0):
            score = 0

        print(file_name + " " + str(score) + " " + str(decision))
        results += file_name + " " + str(score) + " " + str(decision) + "\n"

    w_file_output.write(results)
    w_file_output.close()

    print("\nPROCESSED\n")

    return 0
Exemplo n.º 14
0
    def test_res(self, path_for_testing):
        """
        Provide test of created ann for examples.

        Parameters
        ----------
        :param path_for_testing: str or unicode
            Path to files for testing.

        Returns
        -------
        :return:
            Return statistic of tests.
        """
        prc_sum, nm_sum = 0.0, 0.0
        for fls in os.listdir(path_for_testing):
            self.logger.info(u"Word %s" % fls)
            nm_sum += 1
            pron = extractor.MelExtractor(glob_path=path_for_testing+u"%s" % fls)
            ext_res = pron.viewer()

            prc_n, nm_n, prc_b, nm_b = 0.0, 0.0, 0.0, 0.0
            for i in self.lst_of_commands:
                try:
                    tmp = open(u'networks/%s_brain' % i)
                    nt = pickle.load(tmp)
                    tmp.close()
                    net = nl.load(u'networks/%s_neurolab' % i)

                    nm_n += 1
                    nm_b += 1
                    example = self.ext_t(ext_res)
                    if fls.startswith(i):
                        if round(net.sim([example])[0][0]) == 1.0:
                            prc_n += 1
                        if round(nt.activate(example)) == 1.0:
                            prc_b += 1
                    else:
                        if round(net.sim([example])[0][0]) == 0.0:
                            prc_n += 1
                        if round(nt.activate(example)) == 0.0:
                            prc_b += 1

                except IOError:
                    self.logger.critical(u"no created networks for %s" % i)
            if prc_n/nm_n*100 == 100.0:
                self.logger.info(u'Word was recognized by Neurolab')
                prc_sum += 1
            else:
                self.logger.debug(u'Neurolab %s' % unicode(prc_n/nm_n*100))
            if prc_b/nm_b*100 == 100.0:
                self.logger.info(u'Word was recognized by PyBrain')
            else:
                self.logger.debug(u'PyBrain %s' % unicode(prc_b/nm_b*100))

            self.logger.info(u'Result is %s' % unicode(prc_sum/nm_sum*100))
        print u"Result of recognition by Neurolab is %s percents" % unicode(prc_sum/nm_sum*100)
def test(data):

    global net
    net = nl.load('part2_16.txt')
    count = 0
    for i in data:
        #print len(i[0]),type(i[0])
        out = net.sim([i[0]])[0]
        val = i[1]
        #print out, val
        if np.argmax(out) == val.index(max(val)):
            count += 1

    print "accuracy : ", (float(count) / len(data)) * 100
def test(data):

    global net
    net = nl.load('autoencoder2_4_second_weights_2.txt')
    count = 0
    for i in data:
        #print len(i[0]),type(i[0])
        out = i[0]
        val = i[1]
        #print out, val
        if np.argmax(out) == val.index(max(val)):
            count += 1

    print "accuracy : ", (float(count) / len(data)) * 100
Exemplo n.º 17
0
def run(from_file=False, to_file=False):
    functions = [(f'damped_sine_wave_{i}', get_damped_sine_wave_fun(),
                  (0, 10 * math.pi, 10**i + 1)) for i in range(1, 3)]
    functions.extend([
        ('rosenbrock_1', get_rosenbrock_fun(), (-2, 2, 11), (-1, 3, 11)),
        # ('rosenbrock_2', get_rosenbrock_fun(), (-2, 2, 21), (-1, 3, 21)),
        ('3d_sine_1', get_3d_sine_fun(), (0, 10 * math.pi, 11),
         (0, 10 * math.pi, 11)),
    ])
    for f_name, f, *ranges in functions:
        file_prefix = f'output/nl_{f_name}'
        print(f"### Using neurolab package for training neural network to "
              f"approximate {f_name.replace('_', ' ')} function on ranges: "
              f"{ranges}. Output files prefix: {file_prefix}")

        X, y = generate_fun_samples(f, *ranges)
        X_scaler = preprocessing.StandardScaler()
        X_scaled = X_scaler.fit_transform(X)
        y_scaler = preprocessing.MaxAbsScaler()
        y_scaled = y_scaler.fit_transform(y)

        if from_file:
            net = nl.load(file_prefix + NET)
            with open(file_prefix + HIS, 'rb') as file:
                training_history = pickle.load(file)
        else:
            layers = [NEURONS_BY_LAYER] * LAYERS_COUNT + [1]
            net = nl.net.newff([[-2, 2] for _ in ranges], layers)

            @timeit
            def train():
                return nl.train.train_gdm(net,
                                          X_scaled,
                                          y_scaled,
                                          epochs=EPOCHS,
                                          show=100)

            training_history = train()
        if to_file:
            net.save(file_prefix + NET)
            with open(file_prefix + HIS, 'wb') as file:
                pickle.dump(training_history, file)

        def simulate(X_sim):
            return y_scaler.inverse_transform(
                net.sim(X_scaler.transform(X_sim)))

        plot_experiment_results(f, file_prefix, simulate, ranges, X, y,
                                training_history)
def some_func():
    try:
        net = nl.load(Conf.NET_FILE)
    except IOError as e:
        print e

    songs_info_loader = SongsInfoLoader(Conf.SONG_INFO_WITH_ANNOTATIONS_PATH)
    songs_list = songs_info_loader.get_songs_list()

    # split song for development list and evaluate list
    dev_songs_list, eval_song_list = split_songs_for_purpose(songs_list)

    dev_input, dev_targets = change_to_proper_format(dev_songs_list)
    eval_input, eval_targets = change_to_proper_format(eval_song_list)
    result = net.sim(dev_targets)
    pprint(result)
def get_activations(data, index):
    net = nl.load('autoencoder2_16_second_weights.txt')
    a = data[index]
    # for i in range(len(net.layers)):
    weights = []
    biases = []
    biases.append(net.layers[0].np['b'])
    weights.append(net.layers[0].np['w'])
    biases = np.array(biases)
    weights = np.array(weights)
    print "hello:", weights[0].shape, (np.transpose(np.dot(weights[0], a)) +
                                       biases).shape
    #print len(weights),len(weights[0]),len(weights[0][0]),weights[0].shape,np.transpose(a).shape,biases.shape
    a = sigmoid_vec(np.add(np.transpose(np.dot(weights[0], a)), biases))
    print a.shape
    return a[0].tolist()
Exemplo n.º 20
0
def test_neuro():
    Ndata,Pdata = main()

    
    p_data = []
    x = len(Ndata)#N file number
    y = len(Ndata[0])#channel number here is 8
    o = len(Pdata)#P file number
    p = len(Pdata[0])#channel number here is 8

    bpnet=nl.load('relief_hurst.net')
    answer = bpnet.sim(Ndata[7])
    print answer
    for i in range(o):
        n_answer = bpnet.sim(Pdata[i])
        print n_answer
Exemplo n.º 21
0
    def neuronas(self, filedir):
        imagen = cv2.imread(filedir)
        imagen = ecnontrar_tomate(imagen)
        cv2.imwrite("tomate-recortado.jpg", imagen)
        cadena = sacar_pixels("tomate-recortado.jpg")
        if (os.path.exists("datos-tomate.csv") == True):
            os.remove("datos-tomate.csv")
        archivo_entrenamiento = open("datos-tomate.csv", "a")
        archivo_entrenamiento.write(cadena)
        archivo_entrenamiento.close()
        datos = np.matrix(sp.genfromtxt("datos-tomate.csv", delimiter=" "))
        rna = nl.load("red-neuronal-artificial.tmt")
        salida = rna.sim(datos)
        self.salida1(str(salida[0][0]))
        self.salida2(str(salida[0][1]))
        self.salida3(str(salida[0][2]))
        podrido = salida[0][0] * 100
        maduro = salida[0][1] * 100
        verde = salida[0][2] * 100

        if (podrido > 80.):
            if (maduro > 40.):
                resultado = "el tomate esta a punto de podrirse"
                self.estado(resultado)
            else:
                resultado = "el tomate esta podrido"
                self.estado(resultado)
        elif (maduro > 80.):
            if (podrido > 40.):
                resultado = "el tomate esta pasandose de su madurez"
                self.estado(resultado)
            elif (verde > 40.):
                resultado = "El tomate esta a punto de llegar a su madurez"
                self.estado(resultado)
            else:
                resultado = "El tomate esta en su mejor punto"
                self.estado(resultado)
        elif (verde > 80.):
            if (maduro > 40.):
                resultado = "el tomate esta madurando"
                self.estado(resultado)
            else:
                resultado = "el tomate esta verde"
                self.estado(resultado)
Exemplo n.º 22
0
def neuro_validate(x, y, timeseries, name):
    import neurolab as nl
    import numpy as np
    
    size = len(x)
    
    inp = x.reshape(size,3)
    tar = y.reshape(size,1)
    
    # data preprocessing
    mean_tar = tar.mean(axis=0)[0]
    std_tar = tar.std(axis=0)[0]
    
    tar = (tar - mean_tar) / (2 * std_tar)
    
    # add another feature
    inp2 = np.zeros(size)
    # add another feature
    for i, k in enumerate(y):
        inp2[i] = tar[i - 1][0] 
        
    inp2 = inp2.reshape(size,1)
    
    inp = np.concatenate((inp,inp2), axis=1)  

    import os
    cwd = os.getcwd() 
    sub_dir = cwd + '/training_models/neuro_network'
    filename = 'model_' + name 
    #net = nl.load( os.path.join(sub_dir, filename) )
    net = nl.load(filename)
    
    # Simulate network
    out = net.sim(inp).reshape(size)
    
    out = out * 2 * std_tar + mean_tar
    
    # Plot result
    import pylab as pl
    
    pl.subplot(111)
    pl.plot(timeseries, y, '.-r', timeseries, out, 'p-b')
    pl.legend(['train target', 'net output'])
    pl.show()
Exemplo n.º 23
0
def neuro_validate(x, y, timeseries, name):
    import neurolab as nl
    import numpy as np

    size = len(x)

    inp = x.reshape(size, 3)
    tar = y.reshape(size, 1)

    # data preprocessing
    mean_tar = tar.mean(axis=0)[0]
    std_tar = tar.std(axis=0)[0]

    tar = (tar - mean_tar) / (2 * std_tar)

    # add another feature
    inp2 = np.zeros(size)
    # add another feature
    for i, k in enumerate(y):
        inp2[i] = tar[i - 1][0]

    inp2 = inp2.reshape(size, 1)

    inp = np.concatenate((inp, inp2), axis=1)

    import os
    cwd = os.getcwd()
    sub_dir = cwd + '/training_models/neuro_network'
    filename = 'model_' + name
    #net = nl.load( os.path.join(sub_dir, filename) )
    net = nl.load(filename)

    # Simulate network
    out = net.sim(inp).reshape(size)

    out = out * 2 * std_tar + mean_tar

    # Plot result
    import pylab as pl

    pl.subplot(111)
    pl.plot(timeseries, y, '.-r', timeseries, out, 'p-b')
    pl.legend(['train target', 'net output'])
    pl.show()
Exemplo n.º 24
0
def testNL():
    net = nl.load('test_tretia.net')
    sct = mss()

    while (True):
        screen_np = np.array(sct.grab({'top': 0, 'left': 0, 'width': 600, 'height': 200}))
        inputs = inputs_from_process_img(screen_np)

        # # drawing of line ROI
        # cv2.line(screen_np, (90, 155), (400, 155), (0, 0, 0), 1)
        # cv2.line(screen_np, (90, 175), (400, 175), (0, 0, 0), 1)
        # cv2.imshow("Screencapture:", screen_np)

        # print("inputs: {}".format(inputs))
        outputs = net.sim(inputs)
        # print(inputs)
        # print("outputs: {}".format(outputs))
        th.start_new_thread(decide, (outputs,))

        if cv2.waitKey(15) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
Exemplo n.º 25
0
import Image
import json
import neurolab as nl
import matplotlib.pyplot as pl
entry=[]
with open("input.txt","r") as input_file:
	entry=json.load(input_file)
target=[]
input=[]
for i in range(0,50):
	if(i%5==0):
		target.append(entry[1][i])
		input.append(entry[0][i])
structure=[]
net=nl.load("train.data")

result=net.sim(input)
print result
#print euclidean_distance(target, result)  
Exemplo n.º 26
0
import neurolab as nl
import numpy as np
import svm
import functools
from multiprocessing import Pool, cpu_count
import random


def kernel(xm, xn):
    return (1 + np.dot(xm, xn)) ** 8

random.seed()
train_num = 500
net = nl.load("../features/ann80.net")
print("Loaded NN")

all_data = np.loadtxt("../combinedData.txt")
#index_list = range(0, all_data.shape[0])
#train_list = random.sample(index_list, train_num)
#test_list = [x for x in index_list if x not in train_list]
target = all_data[:, 0]
pixel_info = all_data[:, 1:]

train_info = pixel_info[:train_num]
print train_info.shape

test_info = pixel_info[train_num:]
print("Loaded info")

#cuts the last layer off of ann making the net that makes the features
inputParams = [[-1, 1]] * pixel_info.shape[1]
Exemplo n.º 27
0
cadena =  sacar_pixels("tomate-recortado.jpg")

if(os.path.exists("datos-tomate.csv")== True):
    os.remove("datos-tomate.csv")

archivo_entrenamiento = open("datos-tomate.csv", "a")

archivo_entrenamiento.write(cadena)
archivo_entrenamiento.close()

datos = np.matrix(sp.genfromtxt("datos-tomate.csv", delimiter=" "))

print datos.shape

rna = nl.load("red-neuronal-artificial.tmt")

salida = rna.sim(datos)

podrido = salida[0][0] * 100
maduro = salida[0][1] * 100
verde = salida[0][2] * 100

resultado = ""

if (podrido > 80.):
    if (maduro > 40.):
        resultado = "el tomate esta a punto de podrirse"
    else:
        resultado = "el tomate esta podrido"
elif (maduro > 80.):
Exemplo n.º 28
0
with open("npq_training_dataset.p", "rb") as f:
	inp, tar = pickle.load(f)

num_input_units = len(inp[0])
num_output_units = len(tar[0])
minmax = [[0, 1]] * num_input_units
# One of the thumb rule to set nh = 2/3 * (ni + no)
size = [(num_input_units + num_output_units) * 3 / 5, num_output_units]

inp = inp.reshape(len(inp), num_input_units)
tar = tar.reshape(len(tar), num_output_units)

trans = [nl.trans.TanSig()] * (len(size) - 1) + [nl.trans.LogSig()]
# Create network with n layers
#net = nl.net.newff(minmax, size, transf=trans)
net = nl.load(raw_input('Model name: '))

# Change traning func, by default uses train_bfgs
#net.trainf = nl.train.train_gdx  # Gradient descent with momentum backpropagation and adaptive lr

# Change error func, by default uses SSE()
#net.errorf = nl.error.MSE()
goal = 0.01

print "*" * 50
print "     #Samples: ", len(inp)
print "      #Epochs: ", epochs
print " #Input Units: ", net.ci
print "#Hidden Units: ", size[0]
print "#Output Units: ", net.co
print "        #Goal: ", goal
Exemplo n.º 29
0
def fix_transf(filename, sz):
    net = nl.load(filename)
    new_net = net_from_layers([[-1.0, 1.0]] * 38, net.layers[:-1], [sz], last_lin=True)
    new_net.save(filename)
Exemplo n.º 30
0
# Create train samples
input = np.array(input.data)
target = np.asfarray(target.data)
input = input[: target.shape[0]]

# Create network with 2 layers and random initialized
#norm = Norm(input)
#input = norm(input)
print input.shape
print target.shape
print '----------',minmax(input)
net = nl.net.newff(minmax(input), [12, 4], transf = [nl.trans.TanSig(), nl.trans.LogSig()])
net.trainf = nl.train.train_bfgs
error = net.train(input, target, epochs=1000, show=10, goal=0.02)
print '-------',error[-1]
net.save('net.net')
#Simulate network
print '\nprinting the simulated output';
net=nl.load('net.net')
output = net.sim(input)
out = output
for i in range(len(output)):
	m=max(output[i])
	print '[',m,']',
	for j in range(4):
		if output[i,j] == m:
			print j+1,
	print ';;',target[i]
net.save('net.net')
Exemplo n.º 31
0
#!/usr/bin/python
import neurolab as nl
import numpy as np
import input
import target
from neurolab.tool import minmax

# Create train samples
input = np.array(input.data)
target = np.asfarray(target.data)
input = input[:target.shape[0]]
net = nl.load('net.net')
output = net.sim(input)
err = 0
for i in range(len(output)):
    m = max(output[i])
    for j in range(4):
        if output[i, j] == m:
            if target[i][j] == 0:
                err += 1
print len(target), err
Exemplo n.º 32
0
# print i.shape
# print target.shape
# sys.exit()

norm_t = nl.tool.Norm(t)
t = norm_t(t)

# train and save the neural network
"""
net = nl.net.newff(nl.tool.minmax(i), [10, 1])
err = net.train(i, t, epochs=1000, show=1)
net.save('t3_estimator.net')
"""

# load the neural network for transcoding time estimation
net = nl.load("t3_estimator.net")

# test the input using the neural network
out = net.sim(i)
out = norm_t.renorm(out)
t = norm_t.renorm(t)

# print target['t'] - out
e = t - out
# print type(e)
# print e.shape
# print len(e)

j = 0
while j < len(e):
    print t[j], "   ", e[j]
Exemplo n.º 33
0
 def loadnet(self):
     return nl.load(self.netconfig)
Exemplo n.º 34
0
def loadnetwork(file):
	return nl.load(file)
Exemplo n.º 35
0
 def load(cls, agent):
     dir = join(agent, cls.name)
     comp = cls(dir)
     comp.nn = nl.load(join(dir, "utility.net"))
     return comp
for y in xrange(0, (15)):
    for x in xrange(0, (15)):
        datos[ubicacion] = matriz_imagen[x, y][0]
        datos[ubicacion + 1] = matriz_imagen[x, y][1]
        datos[ubicacion + 2] = matriz_imagen[x, y][2]
        ubicacion = ubicacion + 3
datos = datos / 255
#a contendra los datos para pasar por la red neuronal
a = datos
#calidad es la red mejor entrenada, calidad2 le sigue y por ultimo calidad3
#redes para calidad: calidad.net, calidad2.net, calidad3.net.
#red4 es la mejor para madurez, le sigue red3, despues red y por ultimo red2
#redes para madurez: red4.net, red3.net, red.net, red2.net
#se carga la red neuronal a utilizar.
#no importa si es para calidad o para madurez, el programa detecta que es lo que se esta midiendo
net = nl.load("red4.net")
#Resultado1 es para comprobar si existe un error en la busqueda, y no encuentra nada (es como una bandera)
resultado1 = 0
#posicion en donde encuentra a cual se parece mas
posicion = 0
#simular en la red con los datos en a
out = net.sim([a])
#maximo es una variable auxiliar para ayudar a encontrar al que se parece mas
maximo = out[0][0]
for x in range(len(out[0])):
    resultado1 = 1
    #el siguiente if busca a cual se parece mas lo que acaba de encontrar
    if out[0][x] > maximo:
        maximo = out[0][x],
        posicion = x
import pandas as pd
import numpy as np
import neurolab as nl
from neurolab import trans

# Read Data
I_valid = pd.read_csv('../train.csv')
X_labels = (I_valid.ix[:, 0].values).astype('int')
X_valid = (I_valid.ix[:, 1:].values).astype('int')

# Pre-processing
Y_valid = np.zeros((42000, 784))
for i in range(42000):
    for j in range(784):
        if (X_valid[i, j] > 200):
            Y_valid[i, j] = 1
del I_valid, X_valid

# Test
net = nl.load('newff.net')

out = net.sim(Y_valid)

# Validate
j = 0
for i in range(42000):
    if (np.argmax(out[i]) == X_labels[i]):
        j = j + 1
print('Accuracy: ')
print(j / 42000)
training = open('nnTraining.txt', 'r').readlines()
trainingIN = []
trainingOUT = []
for line in training:
    tokens = line.rstrip().split()
    nextIn = [float(x) for x in tokens[3:]]
    trainingIN.append(nextIn)
    nextOut = [float(x) for x in tokens[:3]]
    trainingOUT.append(nextOut)
#inputs = [ [-1.0, 1.0] for j in range( 0, 3 ) ]
#net = neurolab.net.newff( inputs, [10,18,3] )
#net.trainf = neurolab.train.train_rprop
#net.train( trainingIN, trainingOUT, show=10 )

#net = nl.load('final_network')# topology[10,3]
net = nl.load('final_network2')  #topology[10,18,3]
Validation = open('nnvalidation.txt', 'r').readlines()
ValidationIN = []
ValidationOUT = []
for line in Validation:
    tokens = line.rstrip().split()
    nextIn = [float(x) for x in tokens[3:]]
    ValidationIN.append(nextIn)
    nextOut = [float(x) for x in tokens[:3]]
    ValidationOUT.append(nextOut)
res = net.sim(ValidationIN)
j = 0
predicted = []
while j in range(0, len(res)):
    predicted.append([1.0 if x == res[j].max() else -1.0 for x in res[j]])
    j += 1
Exemplo n.º 39
0
    os.remove("dato-prueba.csv")

archivo = open("dato-prueba.csv", "a")

archivo.write(cadena)
archivo.close()

datos = np.matrix(sp.genfromtxt("dato-prueba.csv", delimiter=" "))
matiz = 0
i = 0
#while(i != 1200):
 #   matiz = matiz + datos[0][i]
 #   i = i + 3
#print (datos.shape)

rna = nl.load("red-entrenada.tmt")

salida = rna.sim(datos)
verde = salida[0][0] * 100
maduro = salida[0][1] * 100
podrido = salida[0][2] * 100

resultado = ""
if(gradoMadurez <= 30):
    print("Su tiempo estimado de vida es: 7 días")
if(gradoMadurez >= 31 and gradoMadurez <= 60):
    print("Su tiempo estimado de vida es: 4 días")
if(gradoMadurez >= 61 and gradoMadurez <= 85):
    print("Su tiempo estimado de vida es : 2 días")
if(gradoMadurez >= 86 and gradoMadurez <= 100):
    print("La naranja ya caducó")
Exemplo n.º 40
0
import neurolab as nl, cPickle as pickle, sys, numpy as np, time
start_time = time.time()

def discretize(alist):
	return np.array([0 if x < 0.5 else 1 for x in alist])

if len(sys.argv) != 1:
	model = sys.argv[1]
else:
	print "Command line argument missing! Input model's filename..."
	sys.exit()

with open("keypair_data.p", "rb") as inptarfile:
	data, target = pickle.load(inptarfile)

net = nl.load(model)
test = net.sim(data)

exp = target[0]
gen = discretize(test[0])

print "*" * 50
print " Expected: ", exp
print "Generated: ", gen

if (exp == gen).all():
	print "\nSame!!!"
else:
	print "\nNot same!!!"

print "*" * 50
Exemplo n.º 41
0
import neurolab as nl
import numpy as np
import svm
import functools
from multiprocessing import Pool, cpu_count
import random


def kernel(xm, xn):
    return (1 + np.dot(xm, xn))**8


random.seed()
train_num = 500
net = nl.load("../features/ann80.net")
print("Loaded NN")

all_data = np.loadtxt("../combinedData.txt")
#index_list = range(0, all_data.shape[0])
#train_list = random.sample(index_list, train_num)
#test_list = [x for x in index_list if x not in train_list]
target = all_data[:, 0]
pixel_info = all_data[:, 1:]

train_info = pixel_info[:train_num]
print train_info.shape

test_info = pixel_info[train_num:]
print("Loaded info")

#cuts the last layer off of ann making the net that makes the features
Exemplo n.º 42
0
#print i.shape
#print target.shape
#sys.exit()

norm_t = nl.tool.Norm(t)
t = norm_t(t)

#train and save the neural network
'''
net = nl.net.newff(nl.tool.minmax(i), [10, 1])
err = net.train(i, t, epochs=1000, show=1)
net.save('t3_estimator.net')
'''

#load the neural network for transcoding time estimation
net = nl.load('t3_estimator.net')

#test the input using the neural network
out = net.sim(i)
out = norm_t.renorm(out)
t = norm_t.renorm(t)

#print target['t'] - out
e = t - out
#print type(e)
#print e.shape
#print len(e)

j = 0
while j < len(e):
    print t[j], '   ', e[j]
training = open('nnTraining.txt', 'r').readlines()
trainingIN =[]
trainingOUT = []
for line in training:
	tokens = line.rstrip().split()
	nextIn = [float(x) for x in tokens[3:]]
	trainingIN.append(nextIn)
	nextOut = [float(x) for x in tokens[:3]]
	trainingOUT.append(nextOut)
#inputs = [ [-1.0, 1.0] for j in range( 0, 3 ) ]
#net = neurolab.net.newff( inputs, [10,18,3] )
#net.trainf = neurolab.train.train_rprop
#net.train( trainingIN, trainingOUT, show=10 )

#net = nl.load('final_network')# topology[10,3]
net = nl.load('final_network2') #topology[10,18,3]
Validation = open('nnvalidation.txt', 'r').readlines()
ValidationIN =[]
ValidationOUT = []
for line in Validation:
	tokens = line.rstrip().split()
	nextIn = [float(x) for x in tokens[3:]]
	ValidationIN.append(nextIn)
	nextOut = [float(x) for x in tokens[:3]]
	ValidationOUT.append(nextOut)
res = net.sim(ValidationIN)
j =0
predicted =[]
while j in range(0, len(res)):
	predicted.append([1.0 if x == res[j].max() else -1.0 for x in res[j]])
	j+=1
Exemplo n.º 44
0
# In[7]:

#get the data in two arrays classifications and the actual data

#classificationTrain,trainData = getData("ZipDigits.train.txt")
#classificationTest,testData = getData("ZipDigits.test.txt")
#combined set since we will use or on seperation technique
classification,data = getData('combinedData.txt')


# In[8]:

# Create network with 256 inputs, 2 neurons in hidden layer
# And 256 in output layer

ann = nl.load('features/ann80.net')
size = 500
subData = data[:size]
#inputParams = [[-1, 1]] * len(subData[0])
#ann = nl.net.newff(inputParams, [80,256])
#ann.trainf = nl.train.train_rprop


# In[8]:

#cuts the last layer off of ann making the net that makes the features
inputParams = [[-1, 1]] * len(subData[0])
featureNet = nl.net.newff(inputParams, [80])
featureNet.layers[0].np['w'][:] = ann.layers[0].np['w']
featureNet.layers[0].np['b'][:] = ann.layers[0].np['b']
Exemplo n.º 45
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul  1 21:01:18 2016

@author: meza
"""

import neurolab as nl
import numpy as np

net = nl.net.newff([[0,1]] * 3, [4,2])

net.save("test.net")

net = nl.load("test.net")
# show layer weights and biases
for i in range(0,len(net.layers)):
    print "Net layer", i
    print net.layers[i].np['w']
    print "Net bias", i
    print net.layers[i].np['b']

#try setting layer weights
net.layers[0].np['w'][:] = np.array ([[0,1,2],  
                                     [3,4,5],  
                                     [4,5,6],  
                                     [6,7,8]]
                                     )


# show layer weights and biases 
Exemplo n.º 46
0
def main():
    featureFile = open(
        'C:/Users/Abhi/workspace/MalwareClassification/asmTestFeatures.csv',
        'r')
    inp = featureFile.readlines()
    inp1 = []
    for line in inp:
        inp1.append(line.split(","))
    #print inp1[10868]
    inp1 = inp1[1:]
    #print inp1[0]
    labels = []
    for i in range(len(inp1)):
        labels.append(inp1[i][-1][:-1])
        inp1[i] = inp1[i][0:-1]
    print len(labels[0])
    print len(inp1)
    for i in range(len(inp1)):
        for j in range(len(inp1[0])):
            inp1[i][j] = float(inp1[i][j])

    for i in range(len(inp1)):
        nConstant = sum(inp1[i])
        for j in range(len(inp1[0])):
            inp1[i][j] = inp1[i][j] / nConstant

    #print len(inp1)
    #print out

    inp2 = np.array(inp1)
    #out2 = np.array(out)
    #tar = out2.reshape(len(out2),1)

    #print inp2
    #print len(inp2[0])
    #print tar
    #print len(inp2)
    #print len(tar)'''
    '''okCount=0
    notokCount=0
    for item in inp2:
        if sum(item)> 0.99:
            okCount += 1
        else:
            notokCount +=1
    print okCount
    print notokCount'''

    newnet = nl.load('classify4.net')
    print "fine"
    roundans = []
    ans = newnet.sim(inp2)
    for i in range(len(ans)):
        if round(ans[i][0], 3) <= 0.15:
            roundans.append(0.1)
        elif round(ans[i][0], 3) <= 0.25:
            roundans.append(0.2)
        elif round(ans[i][0], 3) <= 0.35:
            roundans.append(0.3)
        elif round(ans[i][0], 3) <= 0.45:
            roundans.append(0.4)
        elif round(ans[i][0], 3) <= 0.55:
            roundans.append(0.5)
        elif round(ans[i][0], 3) <= 0.65:
            roundans.append(0.6)
        elif round(ans[i][0], 3) <= 0.75:
            roundans.append(0.7)
        elif round(ans[i][0], 3) <= 0.85:
            roundans.append(0.8)
        elif round(ans[i][0], 3) <= 0.95:
            roundans.append(0.9)
        else:
            roundans.append(0.0)

    roundans = np.array(roundans)
    roundans = roundans.reshape(len(roundans), 1)
    #print roundans
    #calculateError(roundans,tar)'''
    printprobs(labels, roundans)
def main():  
    featureFile = open('C:/Users/Abhi/workspace/MalwareClassification/asmTestFeatures.csv', 'r')
    inp = featureFile.readlines()
    inp1=[]
    for line in inp:
        inp1.append(line.split(","))
    #print inp1[10868]
    inp1 = inp1[1:]
    #print inp1[0]
    labels = []
    for i in range(len(inp1)):
        labels.append(inp1[i][-1][:-1])
        inp1[i] = inp1[i][0:-1]
    print len(labels[0])
    print len(inp1)            
    for i in range(len(inp1)):
        for j in range (len(inp1[0])):
            inp1[i][j] = float(inp1[i][j])
     
    for i in range(len(inp1)):
        nConstant = sum(inp1[i])
        for j in range (len(inp1[0])):
            inp1[i][j] = inp1[i][j]/nConstant
    
    #print len(inp1)
    #print out
    
    inp2 = np.array(inp1)
    #out2 = np.array(out)
    #tar = out2.reshape(len(out2),1)
    
    #print inp2
    #print len(inp2[0])
    #print tar
    #print len(inp2)
    #print len(tar)'''
    '''okCount=0
    notokCount=0
    for item in inp2:
        if sum(item)> 0.99:
            okCount += 1
        else:
            notokCount +=1
    print okCount
    print notokCount'''

    newnet = nl.load('classify4.net')
    print "fine"
    roundans=[]
    ans = newnet.sim(inp2)
    for i in range(len(ans)):
        if round(ans[i][0],3) <= 0.15:
            roundans.append(0.1)
        elif round(ans[i][0],3) <= 0.25:
            roundans.append(0.2)
        elif round(ans[i][0],3) <= 0.35:
            roundans.append(0.3)
        elif round(ans[i][0],3) <= 0.45:
            roundans.append(0.4)
        elif round(ans[i][0],3) <= 0.55:
            roundans.append(0.5)
        elif round(ans[i][0],3) <= 0.65:
            roundans.append(0.6)
        elif round(ans[i][0],3) <= 0.75:
            roundans.append(0.7)
        elif round(ans[i][0],3) <= 0.85:
            roundans.append(0.8)
        elif round(ans[i][0],3) <= 0.95:
            roundans.append(0.9)
        else:
            roundans.append(0.0)

    roundans = np.array(roundans)
    roundans = roundans.reshape(len(roundans),1)
    #print roundans
    #calculateError(roundans,tar)'''
    printprobs(labels,roundans)
Exemplo n.º 48
0
def predictSingleImage(image, network="semhistograma.net"):
    net = nl.load(network)
    predict = net.sim(np.array([getValues(image)]))
    return (round(predict[0][0]))
from matplotlib import pylab as pl
from pprint import pprint


def some_func():
    try:
        net = nl.load(Conf.NET_FILE)
    except IOError as e:
        print e

    songs_info_loader = SongsInfoLoader(Conf.SONG_INFO_WITH_ANNOTATIONS_PATH)
    songs_list = songs_info_loader.get_songs_list()

    # split song for development list and evaluate list
    dev_songs_list, eval_song_list = split_songs_for_purpose(songs_list)

    dev_input, dev_targets = change_to_proper_format(dev_songs_list)
    eval_input, eval_targets = change_to_proper_format(eval_song_list)
    result = net.sim(dev_targets)
    pprint(result)


if __name__ == "__main__":
    err = open_pickle(Conf.PICKLED_ERR_PATH)
    processed_songs = open_pickle(Conf.PICKLED_SONGS_PATH)
    net = nl.load(Conf.NET_FILE)

    # ev = net.sim(eval_data)
    pl.plot(err)
    pl.show()
Exemplo n.º 50
0
   def importW (self, fin):
        if check.isfile (fin):
 	   self.nn = nl.load (fin)
Exemplo n.º 51
0
    def reg_net(self, reg_data):
        net = nl.load("test.net")
        inp = np.array(reg_data)
        out = net.sim(inp)

        return out
Exemplo n.º 52
0
import matplotlib
import matplotlib.pyplot as plt
#img=matplotlib.image.imread('./test.png')
#print img
import neurolab as nl
net = nl.load('autoencoder2_16_second_weights.txt')
img = list(map(lambda x: x * 255, net.layers[0].np['w']))
img1 = [img[i:i + 2] for i in range(len(img) - 2)]
img2 = [img1[i:i + 16] for i in range(len(img1) - 16)]
print img
plt.imshow(img, cmap='gray', vmin=0, vmax=255)  #,origin='lower')
plt.show()
Exemplo n.º 53
0
with open("m2c_rsa_generate.p", "rb") as f:
	inp, tar = pickle.load(f)

num_input_units = len(inp[0])
num_output_units = len(tar[0])
minmax = [[0, 1]] * num_input_units

size = [3 * num_input_units * 3 / 5, 2 * num_input_units, num_output_units]

inp = inp.reshape(len(inp), num_input_units)
tar = tar.reshape(len(tar), num_output_units)

trans = [nl.trans.TanSig()] * (len(size) - 1) + [nl.trans.LogSig()]
# Create network with n layers
#net = nl.net.newff(minmax, size, transf=trans)
net = nl.load(raw_input('Model name: '))

# Change traning func, by default uses train_bfgs
#net.trainf = nl.train.train_gdx  # Gradient descent with momentum backpropagation and adaptive lr

npq_net = nl.load(raw_input('NPQ Model name: '))
net.layers[0].np['w'][:] = npq_net.layers[0].np['w'][:]
net.layers[0].np['b'][:] = npq_net.layers[0].np['b'][:]

net.layers[1].np['w'][:] = npq_net.layers[1].np['w'][:]
net.layers[1].np['b'][:] = npq_net.layers[1].np['b'][:]

goal = 0.01

print "*" * 50
print "#Training Samples: ", len(inp)