def train_regression_predictor(train_x, train_y, learning_rule='sgd', learning_rate=0.002, n_iter=20, units=4):
    mlp = Regressor(layers=[Layer('Rectifier', units=units),
                            Layer('Linear')],
                   learning_rule=learning_rule,
                   learning_rate=learning_rate,
                   n_iter=n_iter)
    mlp.fit(train_x, train_y)
    print mlp.score(train_x, train_y)
    return mlp
Ejemplo n.º 2
0
def CreateNetwork(data, predicates):
    # входная размерность
    dim_in = len(predicates)
    # выходная размерность
    dim_out = len(data[0]) - 1
    # конфигурация сети
    neural_network = Regressor(
        layers=[
            Layer("Rectifier", units=50),
            Layer("Linear")],
        learning_rate=0.001,
        n_iter=5000)
    # формирование обучающей выборки
    x_train = np.array([CalcPredicates(row[0], predicates) for row in data])
    y_train = np.array([apply(float, row[1:]) for row in data])
    # обучение
    logging.info('Start training')
    logging.info('\n'+str(x_train))
    logging.info('\n'+str(y_train))
    try:
        neural_network.fit(x_train, y_train)
    except KeyboardInterrupt:
        logging.info('User break')
        pass
    logging.info('Network created successfully')
    logging.info('score = '+str(neural_network.score(x_train, y_train)))
    # сохранение обученной сети
    pickle.dump(neural_network, open(datetime.datetime.now().isoformat()+'.pkl', 'wb'))
    return neural_network
Ejemplo n.º 3
0
print "Generate X_test and y_test"
n_input = 11
print "X_test..."
n_sample2 = np.asarray([[raw_data.ix[t-i][4] for i in range(1,n_input)] for t in np.arange (289*400,289*410)])
print "y_test..."
n_test2 =  np.asarray([raw_data.ix[t][4] for t in np.arange(289*400+1,289*410+1)])
# <codecell>
print "Training time!!!!"
nn.fit(X_training,y_training)
#
# # <codecell>
#
n_input = 11
n_sample2 = np.asarray([[raw_data.ix[t-i][4] for i in range(1,n_input)] for t in np.arange (289*400,289*410)])
n_test2 =  np.asarray([raw_data.ix[t][4] for t in np.arange(289*400+1,289*410+1)])
print nn.score(n_sample2,n_test2)
#
# # <codecell>
#
# pred = np.asarray(nn.predict(n_sample2))
# ax = pl.subplot()
# ax.set_color_cycle(['blue','red'])
# pl.plot(n_test2)
# pl.plot(pred)
# pl.show()
#
# # <codecell>
#
# pd.DataFrame(zip(pred,n_test2),columns=["Prediction","Real"])
#
# # <codecell>
Ejemplo n.º 4
0
#TODO: add autoencoder-pretrain


######################
# PREDICTION         #
######################
y_predicted = nn.predict(X_test)  # predict


#################
#  EVALUATION   # (to evaluate how well the REGRESSION did). For now we evaluate in the TRAINING DATA
#################
#TEST DATA
# R^2 measure
R2 = nn.score(X_test, y_test)
print('R^2_test= ',R2) #evaluating predictions with R^2
# My EVALUATION metric (mean cosine similarity)
cos = 0
for i in range(1,y_test.shape[0]):
    #cos = cos + np.dot(np.array(y_predicted[i,]), np.array(y_test[i,]))/ (np.linalg.norm(np.array(y_test[i,])) * np.linalg.norm(np.array(y_predicted[i,])))
    cos = cos + np.dot(y_predicted[i,], y_test[i,]) / (np.linalg.norm(y_test[i,]) * np.linalg.norm(y_predicted[i,]))
meanCos = cos/y_train.shape[0]
print('mean cos similarity_test= ', meanCos)

#TRAIN DATA:
y_predicted_train = nn.predict(X_train)  # predict
# R^2 measure
R2_train = nn.score(X_train, y_train)
print('R^2_train= ',R2_train) #evaluating predictions with R^2
# My EVALUATION metric (mean cosine similarity)