def _get_model(is_train): if FLAGS.model == 'lstm': return model.LSTMModel(is_train=is_train) elif FLAGS.model == 'cnn': return model.SimpleCNNModel(is_train=is_train) elif FLAGS.model == 'conv-lstm': return model.ConvLSTMModel(is_train=is_train) else: raise AttributeError("Model unimplemented: " + FLAGS.model)
def main(args): # TODO: Create an independent function for training & validation to improve readability # TODO: Fix gradient vanishing problem after a certain epochs model_config = ModelConfig() data = CryptoData(path="../data/market_data.csv", model_config=model_config) with tf.Graph().as_default(), tf.Session() as sess: m = model.LSTMModel(is_training=True, model_config=model_config) tf.global_variables_initializer().run() global_step = 0 for epoch in range(model_config.max_epoches): lr_decay = model_config.lr_decay_rate**max( epoch - model_config.decay_range, 0) sess.run(m.update_lr, feed_dict={m._new_lr: lr_decay * model_config.initial_lr}) data.reset() train_loss = 0 batch_count = 0 while not data.is_finished(): trainX, trainY = data.next_training_batch( batch_size=model_config.batch_size, num_steps=model_config.num_steps, time_lap=model_config.time_lap) if len(trainX) == model_config.batch_size: _, _loss = sess.run([m.train_op, m.loss], feed_dict={ m.input: trainX, m.target: trainY }) global_step += 1 train_loss += _loss batch_count += 1 valX, valY = data.validation_set(model_config.batch_size) val_loss = sess.run(m.loss, feed_dict={ m.input: valX, m.target: valY }) print( "Epoch: {:d}, Global steps: {:d}\n Training Loss: {:.1f}, Validation Loss: {:.1f}" .format(epoch, global_step, train_loss / batch_count, val_loss))
scaler.fit(csv_data) csv_data.iloc[:, :] = scaler.transform(csv_data)[:, :] INPUT_DIM = len(csv_data.columns) OUTPUT_DIM = INPUT_DIM HIDDEN_DIM = 15 BATCH_SIZE = 1 NUM_LAYERS = 1 NUM_EPOCHS = 1 SEQ_SIZE = 60 SKIP_SIZE = 1 model = model.LSTMModel( input_dim=INPUT_DIM, output_dim=OUTPUT_DIM, hidden_dim=HIDDEN_DIM, batch_size=BATCH_SIZE, num_layers=NUM_LAYERS, ) model.to(device=DEVICE) loss_fn = torch.nn.MSELoss(reduction="sum") optimiser = torch.optim.Adam(model.parameters(), lr=1e-1) training_data = make_training_data() print(training_data[0][0].shape[0]) real = csv_data.iloc[:, -1] """ loss = 0 for epoch in range(NUM_EPOCHS):