Exemplo n.º 1
0
    def m(self):
        import keras_model

        if len(self.model_names) == 0:
            print('Creating keras model from scratch...')
            return keras_model.create_model()
        else:
            path = os.path.join(self.directory, self.model_names[-1] + '.hdf5')
            print('Creating keras model from file {}'.format(path))
            return self.keras.models.load_model(path)
Exemplo n.º 2
0
def main():
    # load data
    x_train_pre, y_train, x_val_pre, y_val = load_data("FM_dataset.dat")

    # Search for best hyperparameters
    params = parameter_search(x_train_pre, y_train)
    print("Best parameters: {p}".format(p=params))
    #params = {"neurons": 500, "activation": "sigmoid", "epochs": 300, "batch_size": 50}

    # Create a model using best params
    model = create_model(neurons=params['neurons'],
                         activation=params['activation'])
    model.fit(x_train_pre,
              y_train,
              epochs=params['epochs'],
              batch_size=params['batch_size'],
              verbose=0)

    # Perform final evaluation on model
    evaluate_architecture(model, x_val_pre, y_val)

    # Save model
    save_model(model)
Exemplo n.º 3
0
train_data_generator = BatchGenerator(train_data,
                                      CONFIG.number_of_words,
                                      CONFIG.batch_size,
                                      total_words,
                                      skip_step=CONFIG.number_of_words)
valid_data_generator = BatchGenerator(valid_data,
                                      CONFIG.number_of_words,
                                      CONFIG.batch_size,
                                      total_words,
                                      skip_step=CONFIG.number_of_words)

optimizer = tf.keras.optimizers.Adam(lr=CONFIG.learning_rate,
                                     decay=CONFIG.learning_rate_decay)

model = create_model(total_words=total_words,
                     hidden_size=CONFIG.hidden_size,
                     num_steps=CONFIG.number_of_words,
                     optimizer=optimizer)

print(model.summary())

checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(
    os.getcwd(), 'model', 'checkpoint', 'model-{epoch:02d}.h5'),
                                                  verbose=1)

save_json(stringToIndex, os.path.join(os.getcwd(), 'data',
                                      'stringToIndex.json'))

save_json(indexToString, os.path.join(os.getcwd(), 'data',
                                      'indexToString.json'))

model.fit_generator(
    'batch_size': 64,
    'n_classes': 2,
    'max_len': 100,
    'n_words': 1000,
    'shuffle': False
}

learning_rate = 0.5

df_train = pd.read_csv(DATA_TRAIN, chunksize=100)
df_valid = pd.read_csv(DATA_TEST, chunksize=100)

# Generators

training_generator = DataGenerator(df_train, **params)
validation_generator = DataGenerator(df_valid, **params)

print("training_generator", isinstance(training_generator, Sequence))
print("validation_generator", isinstance(validation_generator, Sequence))
# Design model
model = create_model(total_words=1000,
                     hidden_size=128,
                     num_steps=100,
                     optimizer='adam')

# Train model on dataset
model.fit_generator(generator=training_generator,
                    validation_data=validation_generator,
                    use_multiprocessing=True,
                    workers=6)
Exemplo n.º 5
0
train_data_generator = BatchGenerator(train_data,
                                      CONFIG.num_steps,
                                      CONFIG.batch_size,
                                      total_words,
                                      skip_step=CONFIG.num_steps)
valid_data_generator = BatchGenerator(valid_data,
                                      CONFIG.num_steps,
                                      CONFIG.batch_size,
                                      total_words,
                                      skip_step=CONFIG.num_steps)

optimizer = Adam(lr=CONFIG.learning_rate, decay=CONFIG.learning_rate_decay)

model = create_model(total_words=total_words,
                     hidden_size=CONFIG.hidden_size,
                     num_steps=CONFIG.num_steps,
                     optimizer='adam',
                     wordlist=wordlist)

print(model.summary())

checkpointer = ModelCheckpoint(filepath=os.path.join(os.getcwd(), 'model',
                                                     'checkpoint',
                                                     'model-{epoch:02d}.h5'),
                               verbose=1)

save_json(dictionary,
          os.path.join(os.getcwd(), 'web', 'web_model', 'dictionary.json'))

save_json(
    reversed_dictionary,
Exemplo n.º 6
0
env = make(ENV_NAME)

input_file = 'data/MSFT_1d_test.csv'
output_file = 'data/MSFT_1d_test_fe.csv'
w_file_name = 'data/MSFT_1d.h5f'

feature_extractor = FeatureExtractor(input_file, output_file)
feature_extractor.extract()

feature_list = feature_extractor.get_feature_names()

trade_cost = 0.03
env.init_file(output_file, feature_list, trade_cost, False)

model = create_model(env)
memory = SequentialMemory(limit=5000, window_length=1)
policy = GreedyQPolicy()
dqn = DQNAgent(model=model,
               nb_actions=env.action_size,
               memory=memory,
               nb_steps_warmup=50,
               target_model_update=1e-2,
               policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mse'])

dqn.load_weights(w_file_name)

dqn.test(env,
         nb_episodes=1,
         action_repetition=1,
Exemplo n.º 7
0
    labels_cat = to_categorical(labels)

    kfold = StratifiedKFold(n_splits=30, shuffle=True, random_state=12)
    cvscores = []
    models = []
    test_data = []
    """ Ready to train """
    print(" data shape {}".format(data.shape))
    print(" train shape {}".format(labels.shape))
    for train, test in kfold.split(data, labels):
        # As keras does not have support for multi filters in cnn on same output from embedding layer hence proceeding with one layer of cnn with one filte
        Y = labels_cat[train]
        Y_test = labels_cat[test]
        model = create_model(vocab_size,
                             100,
                             50,
                             0.3,
                             embedding_matrix=glove.embedding_matrix)
        model.fit(data[train], Y, epochs=10, batch_size=64)
        scores = model.evaluate(data[test], Y_test, verbose=1)
        print("{} {}".format(model.metrics, scores))
        cvscores.append(scores[2])
        models.append(model)
        test_data.append(test)
    print(cvscores)
    max_index = np.array(cvscores).argmax()
    model = models[max_index]
    t_data = test_data[max_index]
    predicted = model.predict(data[t_data])
    print(np.round(predicted))
    print(labels_cat[t_data])