Exemplo n.º 1
0
def run_service():
    stocks = StockHelper.get_stock_symbol_mapping()
    for stock, symbol in stocks.items():
        logger.info(f"Starting training for {stock} [{symbol}] at {ctime()}")
        models = {
            "SVM": SVM(symbol, scaler=StandardScaler),
            "ARIMA": ARIMA(symbol, scaler=LogScaler),
            "LSTM": LSTM(symbol, scaler=MinMaxScaler, is_keras=True),
        }

        for model_name, model in models.items():
            logger.info(f"\tTraining {model_name} for {stock}")
            start_time = time()
            train_data = model.train_data

            n_days = 300
            test_data = Series(index=get_next_n_trading_days(n_days))

            predictions = model.fit_predict(n_days)
            predictions = Series(data=predictions, index=test_data.index[: len(predictions)])

            save_predictions(predictions, type(model).__name__, symbol, train_data.index.max().to_pydatetime())
            logger.info(f"\tTrained {model_name} for {stock} in {time() - start_time:.3f} seconds")

        logger.info(f"Finished training for {stock} [{symbol}] at {ctime()}")
def predict(img_shape, model):
    prediction_file = 'predictions.csv'
    predict_folder = './data/test_stg1/*.jpg'
    image_paths = list(glob(predict_folder))

    features, _ = preprocess(img_shape, image_paths)
    predictions = model.predict(features)

    save_predictions(
        prediction_file, image_paths,
        ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT'],
        predictions)
def run():
    model_path = '/tmp/output_graph.pb'
    prediction_file = 'predictions.csv'
    image_paths = list(glob('./data/test_stg1/*.jpg'))
    image_predictions = []

    with open('/tmp/output_labels.txt', 'rb') as labels_file:
        labels = [
            line.decode("utf-8").upper()
            for line in labels_file.read().splitlines()
        ]

    with tf.Session() as sess:
        create_graph(model_path)
        for image_path in tqdm(image_paths):
            image_data = tf.gfile.FastGFile(image_path, 'rb').read()

            softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
            predictions = sess.run(softmax_tensor,
                                   {'DecodeJpeg/contents:0': image_data})
            predictions = np.squeeze(predictions)
            image_predictions.append(predictions)

    save_predictions(prediction_file, image_paths, labels, image_predictions)
Exemplo n.º 4
0
ax.plot(train_data, c="red", label="Training Data")
ax.plot(test_data, c="black", label="Testing Data")
ax.legend()
plt.show()

# %%
predictions = model.fit_predict(n_days)
predictions
# %%
predictions = Series(data=predictions,
                     index=test_data.index[:len(predictions)])

# %%
if to_save_predictions:
    save_predictions(predictions,
                     type(model).__name__, symbol,
                     train_data.index.max().to_pydatetime())

# %%
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(train_data, c="red", label="Training Data")
ax.plot(test_data, c="black", label="Testing Data")
ax.plot(predictions, c="green", label="Predicted Price")
ax.grid(True)
ax.legend()
plt.show()

# %%
if testing:
    mae_score = mean_absolute_error(test_data, predictions)
    print(f"MAE: {mae_score}")
def evaluate_forecasting(model, loader, args, isTest=False, getLoss=False):
    with torch.no_grad():
        model.eval()

        for name, param in model.named_parameters():
            if "transition_model" in name and "weight" in name and "stage" in name:
                print(name)
                #print(param)
                print(torch.norm(param[:, :40], p=2, keepdim=True))
                print(torch.norm(param[:, 40:], p=2, keepdim=True))
        #sys.exit()

        state_predictions = []
        state_groundTruth = []

        loss_total = 0
        objwise_mse = 0
        total_len = 0
        pred_states = []
        next_states = []
        input_space = False
        for batch_idx, data_batch in enumerate(loader):
            data_batch = [tensor.to(device) for tensor in data_batch]
            statePast = data_batch[0].float()
            contPast = data_batch[1].float()
            action = data_batch[2].float()
            nextState = data_batch[3].float()

            # multi step
            loss = 0
            message_loss = 0
            per_obj_loss = 0
            l1_term = 0

            state_pred = []
            state_gt = []
            for i in range(action.shape[2]):
                if input_space:
                    state_encoding, cont_encoding, action_encoding = model.getEncodings(
                        torch.cat([
                            statePast[:, :, i:], nextState[:, :, :i] if i == 0
                            else predicted[:, :,
                                           max(0, i - args.window_size):i]
                        ],
                                  dim=-1),
                        torch.cat([
                            contPast[:, :, i:],
                            action[:, :, max(0, i - args.window_size):i]
                        ],
                                  dim=-1), action[:, :, i].unsqueeze(2))
                    pred = model.getTransition(
                        state_encoding, cont_encoding, action_encoding
                    ) if args.full else state_encoding + model.getTransition(
                        state_encoding if i == 0 else pred, cont_encoding,
                        action_encoding)
                    if (i == 0):
                        predicted = model.decode(pred)
                    else:
                        predicted = torch.cat(
                            [predicted, model.decode(pred)], dim=-1)
                    #print(predicted.size())
                else:
                    state_encoding, cont_encoding, action_encoding = model.getEncodings(
                        torch.cat([statePast[:, :, i:], nextState[:, :, :i]],
                                  dim=-1),
                        torch.cat([
                            contPast[:, :, i:],
                            action[:, :, max(0, i - args.window_size):i]
                        ],
                                  dim=-1), action[:, :, i].unsqueeze(2))
                    pred = model.getTransition(
                        state_encoding if i == 0 else pred, cont_encoding,
                        action_encoding
                    ) if args.full else state_encoding + model.getTransition(
                        state_encoding if i == 0 else pred, cont_encoding,
                        action_encoding)
                if (args.message_pass):
                    message_loss += model.get_l1_Message()
                mse_loss = MSEloss(model.decode(pred),
                                   nextState[:, :, i].unsqueeze(-1))
                objwise = helper.per_obj_mse(model.decode(pred),
                                             nextState[:, :, i].unsqueeze(-1))
                loss += mse_loss
                per_obj_loss += objwise

                if (args.save_predictions):
                    state_gt.append(nextState[:, :, i].unsqueeze(-1))
                    state_pred.append(model.decode(pred))
                    if (i == action.shape[2] - 1):
                        state_groundTruth.append(torch.cat(state_gt, dim=-1))
                        state_predictions.append(torch.cat(state_pred, dim=-1))

                    #helper.save_predictions(nextState[:,:,i].unsqueeze(2), pred, i)
                    # helper.save_predictions(save_folder, used_params, nextState[:,:,i].unsqueeze(-1), model.decode(pred), i)

            if (args.hierarchical_ls and args.layer_l1):
                l1_term = args.l1 * helper.getStages_norm(model)
            elif (args.per_node_MLP and args.layer_l1):
                l1_term = args.l1 * helper.getTM_norm(model)

            if (args.soft_decoder_l1):
                decoder_params = [
                    x.view(-1) for x in model.decoder.parameters()
                ][0]
                l1_term = args.decoder_l1 * torch.norm(
                    decoder_params, 1) / decoder_params.size()[0]

            if (args.message_pass and getLoss):
                message_loss = args.message_l1 * message_loss
                loss += message_loss

            if (getLoss):
                loss_total += ((loss.item()) * len(pred) + l1_term)
            else:
                loss_total += (loss.item()) * len(pred)
            objwise_mse += per_obj_loss * len(pred)

            total_len += len(pred)

        if getLoss:
            model.train()
            return loss_total / float(total_len)

        if (args.save_predictions):
            state_groundTruth = torch.cat(state_groundTruth, dim=0)
            state_predictions = torch.cat(state_predictions, dim=0)
            helper.save_predictions(save_folder, used_params,
                                    state_groundTruth, state_predictions, 0)

        objwise_mse_list = (objwise_mse / float(total_len)).tolist()
        dump_object_wise = [
            '{}'.format(per_object) for per_object in objwise_mse_list
        ]
        save_name = 'result_M3.txt' if not args.sepCTRL else 'results_M4.txt'
        re_loss = loss_total / float(total_len)
        objwise_mse_list.append(re_loss)
        results = np.expand_dims(np.array(objwise_mse_list), axis=0)

        print('Reconstruction Loss {}'.format(loss_total / float(total_len)))
        print('per_obj_mse: ' + str(dump_object_wise))
        return results[0]