def direct(dataset, testnum, featurenum):
    dataset = dataset[np.newaxis, :]
    x_train_one, x_test, y_train_one, y_test = generate_data(
        dataset, testnum, featurenum)
    x_train_two = x_train_one[0:x_train_one.shape[0] - 1, :]
    y_train_two = y_train_one[1:y_train_one.shape[0]]
    x_test_two = x_test[::2, :]
    y_test_two = y_test[1::2]
    x_test_one = x_test[::2, :]
    y_test_one = y_test[::2]
    min_max_scaler1 = MinMaxScaler()
    x_train_one = min_max_scaler1.fit_transform(x_train_one)
    x_test_one = min_max_scaler1.transform(x_test_one)
    min_max_scaler2 = MinMaxScaler()
    x_train_two = min_max_scaler2.fit_transform(x_train_two)
    x_test_two = min_max_scaler2.transform(x_test_two)
    dbn1 = dbn.DBN(x_train=x_train_one,
                   y_train=y_train_one,
                   x_test=x_test_one,
                   y_test=y_test_one,
                   hidden_layer=[250],
                   learning_rate_rbm=0.0005,
                   batch_size_rbm=150,
                   n_epochs_rbm=200,
                   verbose_rbm=1,
                   random_seed_rbm=500,
                   activation_function_nn='tanh',
                   learning_rate_nn=0.005,
                   batch_size_nn=200,
                   n_epochs_nn=300,
                   verbose_nn=1,
                   decay_rate=0)
    dbn1.pretraining()
    dbn1.finetuning()
    dataset_pred_one = dbn1.result[:, 0]
    dbn2 = dbn.DBN(x_train=x_train_two,
                   y_train=y_train_two,
                   x_test=x_test_two,
                   y_test=y_test_two,
                   hidden_layer=[250],
                   learning_rate_rbm=0.0005,
                   batch_size_rbm=150,
                   n_epochs_rbm=200,
                   verbose_rbm=1,
                   random_seed_rbm=500,
                   activation_function_nn='tanh',
                   learning_rate_nn=0.005,
                   batch_size_nn=200,
                   n_epochs_nn=300,
                   verbose_nn=1,
                   decay_rate=0)
    dbn2.pretraining()
    dbn2.finetuning()
    dataset_pred_two = dbn2.result[:, 0]
    dataset_pred = []
    for i in range(len(dbn2.result[:, 0])):
        dataset_pred.append(dataset_pred_one[i])
        dataset_pred.append(dataset_pred_two[i])
    return dataset_pred
def predict_with_dwt(dataset, testnum, featurenum):
    ca, cd = dwt.dwt(dataset)
    ca_matrix = ca[np.newaxis, :]
    print('DWT finish.')
    x_train, x_test, y_train, y_test = generate_data(ca_matrix,
                                                     int(testnum / 2),
                                                     featurenum)
    min_max_scaler = MinMaxScaler()
    x_train = min_max_scaler.fit_transform(x_train)
    x_test = min_max_scaler.transform(x_test)
    dbn1 = dbn.DBN(x_train=x_train,
                   y_train=y_train,
                   x_test=x_test,
                   y_test=y_test,
                   hidden_layer=[250],
                   learning_rate_rbm=0.0005,
                   batch_size_rbm=150,
                   n_epochs_rbm=200,
                   verbose_rbm=1,
                   random_seed_rbm=500,
                   activation_function_nn='tanh',
                   learning_rate_nn=0.005,
                   batch_size_nn=150,
                   n_epochs_nn=1500,
                   verbose_nn=1,
                   decay_rate=0)
    dbn1.pretraining()
    dbn1.finetuning()
    ca_pred = dbn1.result[:, 0]
    print('Lowpass coefficient estimation finish.')
    mu, sigma_2, cd_pred = generateData(cd[0:len(cd) - int(testnum / 2)],
                                        outputnum=int(testnum / 2))
    print('Highpass coefficient estimation finish.')
    dataset_pred = dwt.idwt(ca_pred, cd_pred)
    print('IDWT finish.')
    dataset_test = dataset[len(dataset) - testnum:len(dataset)]
    ca_test, cd_test = dwt.dwt(dataset_test)
    plt.figure(figsize=(12, 9), dpi=100)
    plt.subplot(3, 1, 1)
    plt.plot(ca_test)
    plt.plot(ca_pred)
    plt.legend(['lowpass_real', 'lowpass_prediction'], loc='upper right')
    plt.title('lowpass coefficient prediction result', fontsize=16)
    plt.subplot(3, 1, 2)
    plt.plot(cd_test)
    plt.plot(cd_pred)
    plt.legend(['highpass_real', 'highpass_prediction'], loc='upper right')
    plt.title('highpass coefficient prediction result', fontsize=16)
    plt.subplot(3, 1, 3)
    mse = mean_squared_error(dataset_pred, dataset_test)
    plt.plot(dataset_test)
    plt.plot(dataset_pred)
    plt.legend(['dataset_real', 'dataset_prediction'], loc='upper right')
    plt.title('sequence prediction result', fontsize=16)
    plt.xlabel('MSE = %f' % mse)
    plt.draw()
    #plt.show()
    return dataset_pred, mse
def recursive(dataset, testnum, featurenum):
    dataset = dataset[np.newaxis, :]
    x_train, x_test, y_train, y_test = generate_data(dataset, testnum,
                                                     featurenum)
    x_test_one = x_test[::2, :]
    y_test_one = y_test[::2]
    y_test_two = y_test[1::2]
    min_max_scaler = MinMaxScaler()
    x_train = min_max_scaler.fit_transform(x_train)
    x_test_one = min_max_scaler.transform(x_test_one)
    dataset_pred = []
    dbn1 = dbn.DBN(x_train=x_train,
                   y_train=y_train,
                   x_test=x_test_one,
                   y_test=y_test_one,
                   hidden_layer=[250],
                   learning_rate_rbm=0.0005,
                   batch_size_rbm=150,
                   n_epochs_rbm=200,
                   verbose_rbm=1,
                   random_seed_rbm=500,
                   activation_function_nn='tanh',
                   learning_rate_nn=0.005,
                   batch_size_nn=200,
                   n_epochs_nn=300,
                   verbose_nn=1,
                   decay_rate=0)
    dbn1.pretraining()
    dbn1.finetuning()
    dataset_pred_one = dbn1.result[:, 0]
    x_test_two = np.delete(x_test[::2, :], 0, axis=1)
    x_test_two = np.hstack(
        (x_test_two, dbn1.result.reshape(len(dbn1.result), 1)))
    x_test_two = min_max_scaler.transform(x_test_two)
    dataset_pred_two = dbn1.predict(x_test_two)
    for i in range(len(dataset_pred_one)):
        dataset_pred.append(dataset_pred_one[i])
        dataset_pred.append(dataset_pred_two[i])
    return dataset_pred
def predict_without_dwt(dataset, testnum, featurenum):
    dataset = dataset[np.newaxis, :]
    x_train, x_test, y_train, y_test = generate_data(dataset, testnum,
                                                     featurenum)
    min_max_scaler = MinMaxScaler()
    x_train = min_max_scaler.fit_transform(x_train)
    x_test = min_max_scaler.transform(x_test)
    dbn1 = dbn.DBN(x_train=x_train,
                   y_train=y_train,
                   x_test=x_test,
                   y_test=y_test,
                   hidden_layer=[250],
                   learning_rate_rbm=0.0005,
                   batch_size_rbm=150,
                   n_epochs_rbm=200,
                   verbose_rbm=1,
                   random_seed_rbm=500,
                   activation_function_nn='tanh',
                   learning_rate_nn=0.005,
                   batch_size_nn=150,
                   n_epochs_nn=1500,
                   verbose_nn=1,
                   decay_rate=0)
    dbn1.pretraining()
    dbn1.finetuning()
    dataset_pred = dbn1.result[:, 0]
    dataset_test = dataset[0, dataset.shape[1] - testnum:dataset.shape[1]]
    mse = mean_squared_error(dataset_pred, dataset_test)
    plt.figure(figsize=(12, 9), dpi=100)
    plt.plot(dataset_test)
    plt.plot(dataset_pred)
    plt.legend(['dataset_real', 'dataset_prediction'], loc='upper right')
    plt.title('sequence prediction result', fontsize=16)
    plt.xlabel('MSE = %f' % mse)
    plt.draw()
    #plt.show()
    return dataset_pred, mse
Exemple #5
0
import pickle
import preprocessing

preprocessing.generate_data()
print("done.")

with open(b'network.p', 'rb') as f:
    data = pickle.load(f)

for item in data.publications:
    print(item.title)

for item in data.authors:
    print(item.name)

for item in data.institute:
    print(item.name)
Exemple #6
0
            }
            setup.update(setup2)

            model_eval, history = neural_network(**setup)
            grid.append([i, j, model_eval])
            histories.append(history)

            print(i, ' done out of ', i_max)

    save(histories, 'histories2')
    print(grid)
    save(grid, 'grid-search2')

    X_train, X_dev, X_test, y_train, y_dev, y_test = load('splited')
    np.random.seed(5)
    X_train = np.vstack([X_train, prep.generate_data(X_train)])
    y_train = np.vstack([y_train, y_train])

    hyper = [[0.0005, 8], [0.001, 32], [0.01, 256]]

    histories = []
    grid = []

    make_keras_picklable()  # enables to pickle keras models

    i_max = 3
    for i, (learning_rate, batch_size) in enumerate(hyper):
        setup = {
            'batch_size': batch_size,
            'learning_rate': learning_rate,
        }
Exemple #7
0
model = Sequential()
model.add(Merge([context_branch, question_branch], mode='concat'))
model.add(Dense(200, activation="relu"))
model.add(Dense(2, activation="relu"))

model.compile(optimizer='adam', loss='mse')
model.summary()


TRAIN_OR_TEST = "train"
WEIGHTS = "model_weights.h5"
PREDICTION_OUTPUT = "squad/custom-mse-pred.txt"

if TRAIN_OR_TEST == "train":
    # Definimos los set de datos
    validation_dataset = next(generate_data("squad/dev-v1.1.json", batch_memory=1))
    X, y = next(generate_data("squad/train-v1.1.json", batch_memory=8))

    # Definimos las métricas
    mdelaf_metric = MdelafMetric(model, validation_dataset)
    checkpoint = ModelCheckpoint(filepath=WEIGHTS, save_weights_only=True)

    # Entrenamos
    history = model.fit(X, y, epochs=15, batch_size=128, validation_split=0.2,
                        shuffle=True, callbacks=[checkpoint, mdelaf_metric])

    with open("history.json", "wt") as fp:
        json.dump([history.history, mdelaf_metric.history], fp)

else:
    #model.load_weights(WEIGHTS)
Exemple #8
0
import os
import h5py
from mxnet import nd, gluon, autograd
from model import SrCnn
from mxnet.gluon import loss as gloss
import time
import random

train_data = '../data/srcnn/Train/'
lr = 1e-4
epoch = 10
batch_size = 128

if __name__ == "__main__":
    if not os.path.exists("train.h5"):
        generate_data(train_data, "train.h5")
    with h5py.File("train.h5", 'r') as hf:
        train_input = nd.array(hf.get('input'))
        train_label = nd.array(hf.get('label'))
    net = SrCnn()
    net.initialize(ctx=try_gpu())
    if os.path.exists("srcnn.params"):
        net.load_parameters("srcnn.params")
    ctx = try_gpu()
    trainer = gluon.Trainer(net.collect_params(),
                            'sgd', {'learning_rate': lr})
    print('training on', ctx)
    loss = gloss.L2Loss()
    for ep in range(epoch):
        train_l_sum,  n, start = 0.0, 0, time.time()
        # batch_idxs = len(train_input) // batch_size
Exemple #9
0
    import pickle

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--strength',
                        type=float,
                        default=1000.0,
                        help='how much to weigh tree-regularization term.')
    args = parser.parse_args()

    window_length = 10

    # format is (num sequences, timesteps, features)
    # We need (features, timesteps, num sequences)

    X, Y = generate_data(9, 'small_df_labeled.csv')
    obs_set, out_set = X.T, Y.T
    proportion = obs_set.shape[2] / 2
    obs_train, out_train = obs_set[:, :, :proportion], out_set[:, :, :
                                                               proportion]
    obs_test, out_test = obs_set[:, :, proportion:], out_set[:, :, proportion:]

    obs_train, fcpt_train, out_train = map_3d_to_2d(obs_train, out_train)
    obs_test, fcpt_test, out_test = map_3d_to_2d(obs_test, out_test)

    X_train = obs_train
    F_train = fcpt_train
    y_train = out_train

    print(X_train.shape)
input_length = 50
output_length = 1
batch_size = 256
hidden_size = 128
num_layers = 1
dropout = 0
testnum = 500
# interval is sample interval between last input and first output.
interval = 0

epoch = 100
device = 'cuda'

# Generate sin dataset for training and testing.
dataset = np.sin([i / 50 * 2 * np.pi for i in range(2000)])
x_train, y_train, x_test, y_test, normalizer = generate_data(
    dataset, 'minmax', input_length, output_length, testnum, interval)

# Build, train and predict.
model = GRU(1, hidden_size, num_layers, 1, dropout)
optimizer = opt.Adam(model.parameters())
loss = nn.MSELoss()
batch_train_loss, batch_val_loss = train(model, x_train, y_train, epoch,
                                         batch_size, optimizer, loss, device)
y_predict, y_real, _ = predict(model, x_test, y_test, loss, device, normalizer,
                               batch_size)

# Draw result
plt.plot(y_predict, label='prediction')
plt.plot(y_real, label='real')
plt.legend()
plt.show()