Пример #1
0
def run(num_hidden, datasets):
    train = datasets['train']
    train_targets = datasets['train_targets']
    valid = datasets['valid']
    valid_targets = datasets['valid_targets']
    test = datasets['test']
    test_targets = datasets['test_targets']

    net = mlp.Mlp(train, train_targets, num_hidden)
    net.earlystopping(train, train_targets, valid, valid_targets)
    return net.confusion(test, test_targets)
Пример #2
0
        X_valid.append(getSPData[1])  # SPdata_noise
        y_valid.append(param)
    it_split = it_split + 1

# Training data
X_train = np.array(X_train)
y_train = np.array(y_train)

# size X_train and y_train
print('size X_train: ', X_train.shape)
print('size y_train: ', y_train.shape)

# === Create MLP object
# input: 101 | hidden 1: 2 | hidden 2: 5 | output: 4
mlp_regression = mlp.Mlp(size_layers=[101, 2, 5, 4],
                         act_funct='relu',  # tanh
                         reg_lambda=0.1,
                         bias_flag=True)
print(mlp_regression)

# === Training MLP object
# Training with Backpropagation and 400 iterations
iterations = 150    # epoch
loss = np.zeros([iterations, 1])

for ix in range(iterations):
    mlp_regression.train(X_train, y_train, 1)
    Y_hat = mlp_regression.predict(X_train)
    y_tmp = np.argmax(Y_hat, axis=1)
    # y_hat = labels[y_tmp]
    y_hat = Y_hat
Пример #3
0
import torch.nn.functional as F
import torch.autograd as autograd
import mlp
import json
import random
import os

baselrate = 20
lrate = baselrate / 100000.0
print("Learning Rate:", lrate)
epochnum = 200
idx = []
for i in range(0, 2342):
    idx.append(i)

model = mlp.Mlp()
model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=lrate, weight_decay=1e-5)

file = open('traindata.json', 'r')
data = json.load(file)
file.close()

file = open('trainlabel.json', 'r')
label = json.load(file)
file.close()

print("Training Set Loaded")

file = open('validdata.json', 'r')
validdata = json.load(file)
Пример #4
0
    ix_tmp = np.where(test_y == labels[ix_label])[0]
    test_Y[ix_tmp, ix_label] = 1

# ## 2. Parameters of MLP
#  * __Number of layers__ : 4 (input, hidden1, hidden2 output)
#  * __Elements in layers__ : [784, 25, 10, 10]
#  * __Activation function__ : Rectified Linear function
#  * __Regularization parameter__ : 1

# ## 3. Creating MLP object

# In[5]:

# Creating the MLP object initialize the weights
mlp_classifier = mlp.Mlp(size_layers=[784, 25, 10, 10],
                         act_funct='relu',
                         reg_lambda=0,
                         bias_flag=True)
print(mlp_classifier)

# ## 4. Training MLP object

# In[6]:

# Training with Backpropagation and 400 iterations
iterations = 400
loss = np.zeros([iterations, 1])

for ix in range(iterations):
    mlp_classifier.train(train_X, train_Y, 1)
    Y_hat = mlp_classifier.predict(train_X)
    y_tmp = np.argmax(Y_hat, axis=1)
Пример #5
0
from sklearn.decomposition import LatentDirichletAllocation

env = sys.argv[1]
config.dictionary_size = 1014
config.vocabulary_size = 1
# print(labels)
print("Total labels: ", len(config.labels))
print(config.vocabulary_size)

path = ""
if env == "local":
    path = "data/reuters/"
elif env == "server":
    path = "data/reuters/"

cnn = cn.Mlp()
# Construct model
pred = cnn.network(cnn.x, cnn.weights, cnn.biases, cnn.dropout)

# Define loss and optimizer
#cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred, labels=cnn.y))
#cost = tf.reduce_mean(bpmll_out_module.bp_mll(pred, cnn.y))
cost = -tf.reduce_sum(
    ((cnn.y * tf.log(pred + 1e-9)) + ((1 - cnn.y) * tf.log(1 - pred + 1e-9))),
    name='xentropy') + 0.01 * (tf.nn.l2_loss(cnn.weights['wd1']) +
                               tf.nn.l2_loss(cnn.weights['out']))
optimizer = tf.train.AdamOptimizer(
    learning_rate=cnn.learning_rate).minimize(cost)

# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(cnn.y, 1))
Пример #6
0
import pickle
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer

import config
import utils
#utils.read_labels("rcv")
import class_DatasetAgN as ds
import mlp as ml
import lemma_tokenizer as lt
from stop_words import get_stop_words

env = sys.argv[1]

mlp = ml.Mlp()
# Construct model
pred = mlp.network(mlp.x, mlp.weights, mlp.biases, mlp.dropout)

# Define loss and optimizer
cost = -tf.reduce_sum(((mlp.y * tf.log(pred + 1e-9)) + ((1-mlp.y) * tf.log(1 - pred + 1e-9)) )  , name='entropy' ) + 0.01 * (tf.nn.l2_loss(mlp.weights['wd1']) + tf.nn.l2_loss(mlp.weights['out']))
optimizer = tf.train.AdamOptimizer(learning_rate=mlp.learning_rate).minimize(cost)

#cost = tf.reduce_mean(bpmll_out_module.bp_mll(pred, mlp.y))# + 0.01 * (tf.nn.l2_loss(mlp.weights['wd1']) + tf.nn.l2_loss(mlp.weights['out']))
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=mlp.y))
optimizer = tf.train.AdamOptimizer(learning_rate=mlp.learning_rate).minimize(cost)

#collator = icu.Collator.createInstance(icu.Locale('UTF-8'))
stop_words = get_stop_words('en')

#vectorizer = CountVectorizer(min_df=1, stop_words = stop_words) #bag of words
Пример #7
0
    acertos = 0
    for i in range(0,len(matTest)):
        saida = m.test(matTest[i][:2])
        if(saida==matTest[i][2]):
            acertos += 1
    lstAcertos.append(acertos)

print(lstEpocas)
print(lstAcertos)
resu = getEstatisticas(lstEpocas,lstAcertos)
print("Epocas", resu[0],"Acertos",resu[1],"Casos nao conver.",resu[2])'''

matTrain = [[0, 1, 1], [0, 0, 0], [1, 0, 1], [1, 1, 0]]
#matTrain = [[1,1,0],[0,1,1],[0,0,0],[1,0,1]]
m = mlp.Mlp(TAXA_APRENDIZAGEM, 4, MAX_EPOCAS)
epo = m.train(matTrain)

print("Terminou treinamento", epo)

print("test", m.test([0, 1]))
print("test", m.test([1, 1]))
print("test", m.test([1, 0]))
print("test", m.test([0, 0]))

#print vect[0]
''''''
'''Inicialização da rede mlp'''
#m = mlp.Mlp(TAXA_APRENDIZAGEM,OPT_FUNCAO)
'''Treinamento da rede'''
#epocas = m.train(matTrain)