예제 #1
0
def cycle():
        lerns = SupervisedDataSet(1024, 2)
        do_R = input_image.analyse()
        for i in range():
            adress = "learning_data/folder"+n
            data = do_R.transform_in_data(adress)
            lerns.addSample((data), (1,0))
        return lerns
def makeMnistDataSets(path):
    """Return a pair consisting of two datasets, the first being the training
    and the second being the test dataset."""
    test = SupervisedDataSet(28 * 28, 10)
    test_image_file = os.path.join(path, 't10k-images-idx3-ubyte')
    test_label_file = os.path.join(path, 't10k-labels-idx1-ubyte')
    test_images = images(test_image_file)
    test_labels = (flaggedArrayByIndex(l, 10) for l in labels(test_label_file))

    for image, label in zip(test_images, test_labels):
        test.addSample(image, label)

    train = SupervisedDataSet(28 * 28, 10)
    train_image_file = os.path.join(path, 'train-images-idx3-ubyte')
    train_label_file = os.path.join(path, 'train-labels-idx1-ubyte')
    train_images = images(train_image_file)
    train_labels = (flaggedArrayByIndex(l, 10)
                    for l in labels(train_label_file))
    for image, label in zip(train_images, train_labels):
        train.addSample(image, label)

    return train, test
예제 #3
0
class download_data_to_learn():
	
	def __init__(self):
		self.learnig_data_placement = os.path.dirname(os.path.realpath(__file__))+'/learnig_data/Folder'
		self.all_files = []
		self.imgs_arrays = [[],[],[],[]]
		self.weights = SupervisedDataSet(1024, 2)
		
	def Found_Learnig_Filles(self):
		for i in range(4):
			self.all_files.append(os.listdir(self.learnig_data_placement+str(i)))

	def add_learning_information(self,resultat_nember=0):
		img_adresse = self.all_files[resultat_nember]
		for image_name in img_adresse:
			img = self.binnarizing_img(self.learnig_data_placement+str(resultat_nember)+'/'+image_name)
			self.imgs_arrays[resultat_nember].append(np.asarray(img))

		for img_nember in range(len(self.all_files[resultat_nember])-1):	
			end_response = self.flatting_arrays(self.imgs_arrays[resultat_nember][img_nember].tolist())
			self.weights.addSample((end_response), self.tranlater(resultat_nember))

	def binnarizing_img(self, img_adresse):
		file = Image.open(img_adresse)
		img_convert = file.convert("L")
		data = np.asarray(img_convert)
		resultat = (data < 200) * 1
		return resultat
	
	def flatting_arrays(self, array):
		flatten = lambda l: [item for sublist in l for item in sublist]
		return flatten(array)

	def tranlater(self, x):
		binar_number = '{0:02b}'.format(x)
		return [int(i) for i in list(binar_number)]
예제 #4
0
def cycle():
    lerns = SupervisedDataSet(1024, 2)
    do_R = lb.analyse()
    for i in range(1, 104):
        adress = "folder 1/" + str(i) + ".jpg"
        data = do_R.transform_in_data(adress)
        lerns.addSample((data), (1, 0))

    for i in range(1, 106):
        adress = "folder 2/" + str(i) + ".jpg"
        data = do_R.transform_in_data(adress)
        lerns.addSample((data), (0, 0))

    for i in range(1, 75):
        adress = "folder 3/" + str(i) + ".jpg"
        data = do_R.transform_in_data(adress)
        lerns.addSample((data), (0, 1))

    for i in range(1, 66):
        adress = "folder 4/" + str(i) + ".jpg"
        data = do_R.transform_in_data(adress)
        lerns.addSample((data), (1, 1))
    return lerns
예제 #5
0
archivo2 = 'caracteristicas sin lata.csv'
datos2 =  pd.read_csv(archivo2, header=0)

#configuracion de la red neuronal (caracteristicas, capas de la red, salidas)
net = buildNetwork(7, 40, 2, bias=True)
ds = SupervisedDataSet(7, 2)

#lecctura de cada dato de la imagen
for j in range(0,len(datos)):
    data.clear()
    data2.clear()
    for i in range(0,7):
        data.append(datos.iloc[j,i])
        data2.append(datos2.iloc[j,i])
    #print('paso: ', data)
    ds.addSample((data), (1,))  #asignacion de caracteristicas y etiqueta
    ds.addSample((data2), (0,))
    #print('paso: ', j)
    #print(ds)
    trainer = BackpropTrainer(net, ds) #entrenamiento de la red mediante la comparacion del error esperado
    er = round(trainer.train(), 3)
    while er > 0.112: #error esperado
        er = round(trainer.train(), 3)
        #print(er)

filename = "NNa{0}b{1}.pk1".format(2,3)
pickle.dump(net, open(filename,'wb'))

def preprocesamiento_img(img):
    kernel = np.ones((3, 3), np.uint8)
    hsv_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
from pybrain3.datasets import SupervisedDataSet

inputDataSet = SupervisedDataSet(35, 20)  # Creating new DataSet

# A
inputDataSet.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
    (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

# B
inputDataSet.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
    (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

# C
inputDataSet.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
     1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),
    (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

# D
inputDataSet.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1,
     -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
    (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

# F
inputDataSet.addSample(
    (1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1,
#!/usr/bin/python

from pybrain3.datasets import SupervisedDataSet

inputLettersDataSet = SupervisedDataSet(35, 1)

#A
inputLettersDataSet.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1), 1)

#B
inputLettersDataSet.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), 1)

#C
inputLettersDataSet.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
     1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1), 1)

#D
inputLettersDataSet.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1,
     -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1), 1)

#F
inputLettersDataSet.addSample(
    (1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, -1, 1,
     -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1), 1)
예제 #8
0
    [3.21061321e+02, 8.96041364e-01, 2.04813023e-04, 0.00000000e+00]
]
testFeaturesArray = np.array(testFeaturesArray, dtype=np.float64)
confusionMatrix = np.array(([[0, 0, 0], [0, 0, 0], [0, 0, 0]]), dtype=np.uint8)

height, width = trainFeaturesArray.shape

print(trainFeaturesArray[0, 0])
print((trainFeaturesArray[0, 0], trainFeaturesArray[0, 1],
       trainFeaturesArray[0, 2]), (0))
print((0.8, 0.4), (0.7))
ds = SupervisedDataSet(2, 1)
for i in range(height):
    if i < 25:
        ds.addSample(
            (float(trainFeaturesArray[i, 0]), float(trainFeaturesArray[i, 1])),
            (0))
    elif i >= 25 and i < 50:
        ds.addSample(
            (float(trainFeaturesArray[i, 0]), float(trainFeaturesArray[i, 1])),
            (1))
    else:
        ds.addSample(
            (float(trainFeaturesArray[i, 0]), float(trainFeaturesArray[i, 1])),
            (2))
# for i in range(height):
#     if i < 25:
#         ds.addSample((float(trainFeaturesArray[i, 0]), float(trainFeaturesArray[i, 1]), float(trainFeaturesArray[i, 2])), (0))
#     elif i >= 25 and i < 50:
#         ds.addSample((float(trainFeaturesArray[i, 0]), float(trainFeaturesArray[i, 1]), float(trainFeaturesArray[i, 2])), (1))
#     else:
예제 #9
0
           [403, 524, 47], [557, 772, 37]]
puntos = benigno
for i in range(0, 2):
    if i == 1:
        puntos = maligno
    for j in range(1, Ninput[i]):
        #print(str(i)+':'+str(j))
        #image = cv2.imread("all-mias/mdb076.pgm")
        image = cv2.imread('all-mias/' + str(i) + '/(' + str(j) + ').pgm', 0)
        if image is None:
            print("no se encontro")
        else:
            momentos = procesado(image, puntos[j - 1][0], puntos[j - 1][1],
                                 puntos[j - 1][2])
            #momentos = cal_momentos(img_procesada)
            ds.addSample(momentos, output[i])

trainer = BackpropTrainer(net, ds)

er = round(trainer.train(), 3)
#print(er)
while er <= 0.113:
    er = round(trainer.train(), 3)
    print(er)

puntos = benigno
for i in range(17, 27):
    img_test = cv2.imread(
        'C:/Users/David VM/Downloads/all-mias/' + str(0) + '/(' + str(i) +
        ').pgm', 0)
    carat = procesado(img_test, puntos[i - 1][0], puntos[i - 1][1],
예제 #10
0

if __name__ == '__main__':

    from pylab import figure, show

    # --- example on how to use the GP in 1 dimension
    ds = SupervisedDataSet(1, 1)
    gp = GaussianProcess(indim=1, start=-3, stop=3, step=0.05)
    figure()

    x = mgrid[-3:3:0.2]
    y = 0.1 * x**2 + x + 1
    z = sin(x) + 0.5 * cos(y)

    ds.addSample(-2.5, -1)
    ds.addSample(-1.0, 3)
    gp.mean = 0

    # new feature "autonoise" adds uncertainty to data depending on
    # it's distance to other points in the dataset. not tested much yet.
    # gp.autonoise = True

    gp.trainOnDataset(ds)
    gp.plotCurves(showSamples=True)

    # you can also test the gp on single points, but this deletes the
    # original testing grid. it can be restored with a call to _buildGrid()
    print(gp.testOnArray(array([[0.4]])))

    # --- example on how to use the GP in 2 dimensions
예제 #11
0
    max_ = max(map(lambda t: len(authors[t]), authors))
    max_len = max(
        map(lambda t: max(map(lambda t1: t1[1], authors[t])), authors))
    classes = [_ for _ in authors]
    L = len(classes)
    ds = SupervisedDataSet(33 * 33, L)
    for author in authors:
        for text in authors[author]:
            arr = np.zeros(33 * 33)
            for j in text[0]:
                arr[int(j)] = text[0][j]
            for _ in range(
                    math.ceil(max_ / len(authors[author]) * 10 *
                              (text[1] / max_len))):
                arr2 = np.zeros(L)
                arr2[classes.index(author)] = 1
                ds.addSample(arr, arr2)

    net = buildNetwork(33 * 33, 10, 10, L, bias=True)
    trainer = RPropMinusTrainer(net, dataset=ds)
    trainer.trainEpochs(100)
    print(trainer.testOnData())
    NetworkWriter.writeToFile(net, 'filename_2.xml')
    s = 0
    time3 = time.time()
    print(time3 - time2)

    #for inp, tar in ds:
    #      print(tar[0], net.activate(inp)[0]-tar[0])
    #s+=(net.activate(inp)[0]-tar[0])**2
from pybrain3.tools.shortcuts import buildNetwork  # Para criar a rede neural
from pybrain3.datasets import SupervisedDataSet  # Datasets/Conjunto de dados
from pybrain3.supervised.trainers import BackpropTrainer  # Algoritmo para treinamento

datasets = SupervisedDataSet(2, 1)  # Entradas e saídas

datasets.addSample(
    (0.8, 0.4), 0.7
)  # Quantidade de horas dormidas e quantidade de horas estudadas tendo tirado a nota
datasets.addSample((0.5, 0.7), 0.5)
datasets.addSample((0.1, 0.7), 0.95)

rede_neural = buildNetwork(
    2, 4, 1, bias=True
)  # Passada arquitetura da rede (2 neuronios na camada de entrada +
# 4 neuronio na camada oculta + 1 neuronio na camada de saída)

trainer = BackpropTrainer(rede_neural, datasets)  # Treinador

for i in range(2000):  # Treinando a rede neural 2000 vezes
    print(trainer.train())

while True:
    dormiu = float(input('Dormiu quanto tempo? '))
    estudou = float(input('Estudou quanto tempo? '))
    z = rede_neural.activate((dormiu, estudou))[0] * 10
    print(f'Precisão da nota: {z}')
예제 #13
0
from pybrain3.tools.shortcuts import buildNetwork
from pybrain3.datasets import SupervisedDataSet
from pybrain3.supervised.trainers import BackpropTrainer

ds = SupervisedDataSet(2, 1)
#valor_inicial, numero_parcela, valor_final
ds.addSample((100, 1), (100))
ds.addSample((100, 2), (52.63))
ds.addSample((100, 3), (35.69))
ds.addSample((100, 4), (27.22))
ds.addSample((100, 5), (22.14))
ds.addSample((100, 6), (18.76))
ds.addSample((100, 7), (16.35))
ds.addSample((100, 8), (14.54))
ds.addSample((100, 9), (13.14))
ds.addSample((100, 10), (12.02))
ds.addSample((100, 11), (11.10))
ds.addSample((100, 12), (10.34))
'''
ds.addSample((200, 12), (20.68))
ds.addSample((6518.78, 1), (6518.78))
ds.addSample((6518.78, 3), (2326.33))
ds.addSample((6518.78, 5), (1443.38))
ds.addSample((6518.78, 8), (947.93))
ds.addSample((6518.78, 9), (856.47))
ds.addSample((6518.78, 10), (783.43))
ds.addSample((6518.78, 11), (723.79))
ds.addSample((6518.78, 12), (674.19))'''

nn = buildNetwork(2, 4, 1)
예제 #14
0
from pybrain3.tools.shortcuts import buildNetwork
from pybrain3.datasets import SupervisedDataSet
from pybrain3.supervised.trainers import BackpropTrainer

ds = SupervisedDataSet(2, 1)

ds.addSample((0.8, 0.4), (0.7))
ds.addSample((0.5, 0.7), (0.5))
ds.addSample((1.0, 0.8), (0.95))

#Utilizando "bias" para o algoritmo treinar mais rápido
nn = buildNetwork(2, 16, 1, bias=True)

#treinando o algoritmo
trainer = BackpropTrainer(nn, ds)

#analisando a evolução do algoritmo
for i in range(2000):
    print(trainer.train())

#com o buildNetwork utilizando 4 neurônios, temos 0.013496550372475475 de margem de erro
#com o buildNetwork utilizando 16 neurônios, temos 0.0009338957668739359 de margem de erro
#teste 1 com 512 neurônios: 1.6434602192104412e-32 de margem de erro
#teste 2 com 512 neurônios: 1.3702349577667055e-30 de margem de erro
#irei utilizar neste algoritmo apenas 16 neurônios

#alguns testes por curiosidade
while True:
    sleep = float(input("How many hours did you sleep? "))
    study = float(input("How many hours did you study? "))
예제 #15
0
from pybrain3.datasets import SupervisedDataSet

daneWejsciowe = SupervisedDataSet(35, 20)

daneWejsciowe.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1),
    (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

daneWejsciowe.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, -1,
     -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
    (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

daneWejsciowe.addSample(
    (-1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1,
     1, -1, -1, -1, -1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1),
    (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

daneWejsciowe.addSample(
    (1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, -1, -1, -1, 1, 1,
     -1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1),
    (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

daneWejsciowe.addSample(
    (1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, -1,
     -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, 1, 1),
    (0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))

daneWejsciowe.addSample(
    (1, 1, 1, 1, 1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
예제 #16
0
# Redes Neurais utilizando Pybrain

from pybrain3.datasets import SupervisedDataSet
from pybrain3.tools.shortcuts import buildNetwork
from pybrain3.supervised import BackpropTrainer

# dimensões dos vetores de entrada e do objetivo
dataset = SupervisedDataSet(2, 1)

dataset.addSample([1, 1], [0])
dataset.addSample([1, 0], [1])
dataset.addSample([0, 1], [1])
dataset.addSample([0, 0], [0])

network = buildNetwork(dataset.indim, 4, dataset.outdim, bias=True)
trainer = BackpropTrainer(network, dataset, learningrate=0.01, momentum=0.99)
'''
for epoch in range(1000):
    trainer.train()
'''

trainer.trainEpochs(1000)
'''
    treinar até a convergência: trainer.trainUntilConvergence
'''

test_data = SupervisedDataSet(2, 1)

test_data.addSample([1, 1], [0])
test_data.addSample([1, 0], [1])
test_data.addSample([0, 1], [1])
예제 #17
0
from pybrain3.datasets import SupervisedDataSet

inputDataSet = SupervisedDataSet(35, 1)         #Creating new DataSet

#A
inputDataSet.addSample((                        #Adding first sample to dataset
    -1, 1, 1, 1, -1,
    1, -1, -1, -1, 1,
    1, -1, -1, -1, 1,
    1, 1, 1, 1, 1,
    1, -1, -1, -1, 1,
    1, -1, -1, -1, 1,
    1, -1, -1, -1, 1
    ),
    1)

#B
inputDataSet.addSample((
    1, 1, 1, 1, -1,
    1, -1, -1, -1, 1,
    1, -1, -1, -1, 1,
    1, 1, 1, 1, -1,
    1, -1, -1, -1, 1,
    1, -1, -1, -1, 1,
    1, 1, 1, 1, -1
    ),
    1)

#C
inputDataSet.addSample((
    -1, 1, 1, 1, -1,
예제 #18
0

ds = SupervisedDataSet(2, 1)

#aqui vai a nossa base de aprendizado
base = (
    #8 horas dormidas, 2 horas estudadas, 7.1 de nota na prova
    ((8, 2), (7.1,)),
    ((10, 1), (2.3,)),
    ((7.5, 3), (8.0,)),
    ((3.5, 10), (2.5)),
)

#aqui aplicamos os numeros usados como base de aprendizado acima
for example in base:
    ds.addSample(example[0], example[1])


#aqui criamos a rede neural com a base de aprendizado anterior
nn = buildNetwork(2, 4, 1) # 2=neurónios, 4=camadas ocultas, 1 = uma saida


#aqui definimos o treinador, informamos a rede neural e a base de aprendizado
trainer = BackpropTrainer(nn, ds)

#aqui vamos treinar a rede neural, quanto maior de treinamentos, menores serão as chances de erros
for i in range(10000):#melhorar a base de aprendizado é infinitamente mais eficiente que deixar esse numero alto
    print(trainer.train())

#aqui vamos perguntar ao usuário o tempo dormindo e horas estudadas e vamos prever a nota dele
while True:
예제 #19
0
    print(time2 - time1)
    max_ = max(map(lambda t: len(authors[t]), authors))
    max_len = max(
        map(lambda t: max(map(lambda t1: t1[1], authors[t])), authors))
    classes = [_ for _ in authors]
    L = len(classes)

    for author in authors:
        data = SupervisedDataSet(33 * 33 * 33, L)
        for text in authors[author]:
            arr = np.zeros(33 * 33 * 33, dtype='int8')
            for j in text[0]:
                arr[int(j)] = text[0][j]
            arr2 = np.zeros(L, dtype='int8')
            arr2[classes.index(author)] = 1
            data.addSample(arr, arr2)
            del arr, arr2
            print('!')
        data.saveToFile('cashes/data_3_' + str(author) + '.mod')
        del data
        print(author, 'constructed')
    print('data constructed')
    del authors
    net = buildNetwork(33 * 33 * 33, 10, 10, L, bias=True)
    trainer = RPropMinusTrainer(net)
    print('training started')
    for i in range(20):
        for author in authors:
            data = SupervisedDataSet.loadFromFile('cashes/data_3_' +
                                                  str(author) + '.mod')
            for j in range(5):
        for i in range(0, len(hist)):
            oblicuidad = oblicuidad + ((hist[i] - media)**3)
        oblicuidad = oblicuidad / (len(hist) * (desv_estandar**3))
        print(oblicuidad)
        # areaInteres = regionInteres(img_limpia)
        # momentos=moments(areaInteres)
        #print(momentos)

        #-----------------------------Entrenamiento------------------------------
        net = buildNetwork(
            4, 10, 1,
            bias=True)  #Red neuronal. Num. Entradas,Capas ocultas, y salidas
        ds = SupervisedDataSet(4, 1)
        if j == 1:
            print("normales")
            ds.addSample((media, desv_estandar, suavidad, oblicuidad), (0, ))
        if j == 2:
            print("tumor")
            ds.addSample((media, desv_estandar, suavidad, oblicuidad), (1, ))

        trainer = BackpropTrainer(net, ds)

        error = round(trainer.train(), 7)

        while error > 0.15:  #Minimo error soportado
            error = round(trainer.train(), 7)
            #print(error)

        cv2.waitKey(3000)
        #if cv2.waitKey(33) == ord("d"):
        cv2.destroyAllWindows()
예제 #21
0
class NeuralNetwork:
    def __init__(self, unique_words, total_comments, hidden=400):
        self._max_value = 0.9
        self._min_value = 0.1
        self.__unique_words = unique_words
        self.__total_comments = total_comments
        self.__conversion_rate = 0.5
        print("Total de Comentários: ", self.__total_comments)
        print("Total de Palavras Únicas: ", len(self.__unique_words))

        unique_words_length = len(self.__unique_words)
        # Construcao da rede com quantPalavrasUnicas na entradas, 1000 camadas ocultas e 1 sai­da
        self.__network = buildNetwork(unique_words_length, hidden, 1)
        # Base de dados com quantPalavrasUnicas atributos prevzisores e uma clase
        self.__base = SupervisedDataSet(unique_words_length, 1)
        '''
        self.__network = buildNetwork(2, 3, 1, outclass = SoftmaxLayer,
                            hiddenclass = SigmoidLayer, bias = False)
        print(self.__network['in'])
        print(self.__network['hidden0'])
        print(self.__network['out'])
        print(self.__network['bias'])
        '''

    def float_round(self, number, close_to):
        """import math
        return math.isclose(float(number), close_to, abs_tol=0.45)"""
        if float(number) >= 0.5:
            return self._max_value == close_to
        else:
            return self._min_value == close_to

    def __add_training_set(self, training_base):

        # Adicionando os dados (Entrada), (Classe) para o treinamento
        for index in range(0, self.__total_comments):
            training_set_length = 2500  # int(self.__total_comments * 0.8)
            if index < training_set_length:
                array = []
                # ********************** TROCAR PELO ARRAY DE ENTRADAS *******************
                entry_array = training_base[index][0]

                if training_base[index][1] >= 3.5:
                    comment_class = self._max_value
                else:
                    comment_class = self._min_value

                    # print(entry_array, comment_class)
                for key in entry_array:
                    # print (entry_array[key])
                    array.append(entry_array[key])

                self.__base.addSample(array, comment_class)

        # Imprimir a entrada e a classe supervisionada
        # print(base['input'])
        # print(base['target'])

    def training_network(self, training_base, number_of_trainings=20):

        print("Start Training")
        self.__add_training_set(training_base)
        training = BackpropTrainer(self.__network,
                                   dataset=self.__base,
                                   learningrate=0.01,
                                   momentum=0.06)

        # Fazer o treinamento number_of_trainings vezes e mostrar o erro
        '''for count in range(0, number_of_trainings):
                print("Training Number %d" % count + 1)
                print("Error %s" % training.train())'''
        # above, use training with validation
        training.trainUntilConvergence(maxEpochs=number_of_trainings,
                                       verbose=True,
                                       validationProportion=0.25)

    def test_network(self, test_base):
        # self.__network = NetworkReader.readFrom('filename.xml')
        test_base_length = len(test_base)
        corrects = 0
        errors = 0
        # ******************* PASSAR OS COMENTÁRIOS PARA O TESTE *********************
        for index in range(0, test_base_length):
            array = []
            # ********************** TROCAR PELO ARRAY DE ENTRADAS *******************
            entry_array = test_base[index][0]

            if test_base[index][1] >= 3.5:
                comment_class = self._max_value
            else:
                comment_class = self._min_value

                # print(entry_array, comment_class)
            for key in entry_array:
                array.append(entry_array[key])
            try:
                found = self.__network.activate(array)
                if self.float_round(found, comment_class):
                    corrects += 1
                else:
                    errors += 1
                print(found, comment_class,
                      self.float_round(found, comment_class))
            except (AssertionError, IndexError) as error:
                print("Have an error message: %s" % error)
        print("%f%%" % ((corrects * 100) / (errors + corrects)))

    def save_network(self, location):
        NetworkWriter.writeToFile(self.__network, location)

    def load_network(self, location):
        self.__network = NetworkReader.readFrom(location)