예제 #1
0
def sentiment_analysis(load_model, label_type, embs_convert_type,
                       label_type_folder, target_data_folder, save_folder):
    check_type(label_type,
               types_list=['tonality', 'toxicity'],
               type_name='label')
    check_type(embs_convert_type,
               types_list=['mean', 'length_64'],
               type_name='embeddings convert')

    x_target, y_target = load_target_data(label_type=label_type,
                                          convert_type=embs_convert_type,
                                          data_folder=target_data_folder)

    if not load_model:
        x_source, x_source_test, y_source, y_source_test = load_source_data(
            label_type=label_type,
            label_data_folder=label_type_folder,
            convert_type=embs_convert_type)
        model = train_lstm(x_source=x_source,
                           y_source=y_source,
                           label_type=label_type,
                           convert_type=embs_convert_type,
                           save_folder=save_folder,
                           epochs=5)
        predict(model=model, x=x_source_test, y=y_source_test, title='Source')
    else:
        model = load_lstm(label_type=label_type,
                          convert_type=embs_convert_type,
                          folder=save_folder)

    y_pred = predict(model=model, x=x_target, y=y_target, title='Target')

    return y_pred
예제 #2
0
def stocks_prediction(prediction_days, stock):
    print(f"Executing prediction for {stock}")
    yf.pdr_override()
    now = time.strftime('%Y-%m-%d', time.localtime(time.time() + 86400))
    data = pdr.get_data_yahoo(stock, start=config.HISTORY_START_DATE, end=now)
    db = database.Database()
    db.use("taurus")
    db.panda_write("taurus", data, stock)

    # Save Simple Prediction to DB
    prediction_results = prediction.simple_prediction(prediction_days, data)
    simple_data = _save("Prediction", prediction_results, stock)

    model = lstm.model(prediction_days, data)
    # Save LSTM Prediction to DB
    lstm_results = lstm.predict(model[0], model[1], model[2], model[3],
                                model[4])
    lstm_data = _save("LSTMPrediction", lstm_results, stock)

    # Save Root Deviation to DB
    rmse_results = lstm.root_deviation(lstm_results, model[0])
    # The var writing is on the opposite way so I can generate a query on influxdb for all the deviations.
    _save(stock, rmse_results, "Deviation")

    return simple_data, lstm_data
def train_score(network={}):
    print(network)

    global seq_len
    global num_features
    # initialize model according to the given values of the network
    model = lstm.build_model(input_shape=[seq_len, num_features],
                             lstm_size=network['lstm_size'],
                             num_lstm=network['num_lstm'],
                             dropout=network['dropout'],
                             activation=network['activation'],
                             recurrent_activation=network['recurrent_activation'],
                             optimizer=network['optimizer'])

    # model = lstm.build_model(input_shape=[seq_len, num_features],
    #                          lstm_size=30,
    #                          num_lstm=1,
    #                          dropout=0.2,
    #                          activation='tanh',
    #                          recurrent_activation='selu',
    #                          optimizer='rmsprop')

    model.fit(
        dataset[0],
        dataset[1],
        validation_split=0.2)

    print('Training duration (s) : ', time.time() - global_start_time)
    # model.save('model.h5')

    predictions = lstm.predict(model, dataset[2])
    global scaler
    try:
        predicted_load = lstm.inverse_transform(dataset[2], predictions, scaler)
        true_load = lstm.inverse_transform(dataset[2], dataset[3], scaler)

        rmse = sqrt(mean_squared_error(true_load, predicted_load))
        mape = np.mean(np.abs((true_load - predicted_load) / true_load)) * 100
    except Exception as e:
        print(e)
        rmse=100.0
        mape=100.0
    print('Test RMSE: %.3f' % rmse)
    print('Test MAPE: %.3f ' % mape)

    pyplot.plot(true_load, label='True')
    pyplot.plot(predicted_load,'--', label='predicted')
    pyplot.legend()
    pyplot.show()
    return rmse, mape
def predictions_to_csv():
    for i in range(num_users):
        model = load_model('lstm_model' + str(i) + '.h5')
        X_train, y_train, X_test, y_test, scaler = lstm.load_data(
            'fuzzy_out' + str(i) + '.csv', seq_len, validation_percentage=0)
        predictions = positive_values(lstm.predict(model, X_train))
        norm_data = X_train[:, 0, :]
        data = scaler.inverse_transform(norm_data)
        norm_data = np.append(norm_data[:, :-1], predictions, axis=1)
        new_data = scaler.inverse_transform(norm_data)
        data = np.append(new_data,
                         np.reshape(data[:, -1], [data.shape[0], 1]),
                         axis=1)
        np.savetxt("user" + str(i) + ".csv", np.array(data), delimiter=",")
        print('saved as :' + "user" + str(i) + ".csv")
예제 #5
0
파일: app.py 프로젝트: TomTheHuman/capstone
def query():
    try:
        json_data = request.json
        connection = db.get_db()
        connection.connect()
        if connection.is_connected():
            print("Fetching data frame for " + json_data['brand'] + ', ' + json_data['size'], file=sys.stdout)
            query = str("SELECT * FROM orders WHERE brand = '" + json_data['brand'] + "' AND package_size = '" + json_data['size']) +"'"
            data_table = pd.read_sql(query, con=connection)
            predicted_data = lstm.predict(data_table)
            connection.close()

            # response_data = pd.DataFrame(predicted_data, columns = ['invoice_date', 'cases_sold', 'cases_pred'])
            response = predicted_data.to_json(orient="records", date_format="iso")
            parsed = json.loads(response)
            return {'predict': parsed }


    except Error as e:
        return "Error fetching table..."
예제 #6
0
import compute
import data
import lstm
import parameters
import plot


%load_ext autoreload
%autoreload 2

# Initialization of seeds
set_random_seed(2)
seed(2)

(params, _, yraw, y, yhat, num_errors) =\
    lstm.predict('params_3y_1L256_09i.yaml')
plot.prediction(y, yhat,
                yraw, num_errors, params,
                inv_scale=False, inv_diff=False, inv_log=False)

#
# --  Averaging predictions.
#
# y_avg = ((yhat1 + yhat2 + yhat3) / 3.0)
# rmse, num_errors_avg = compute.error(y1, y_avg)
# plot.prediction(y1, y_avg, num_errors_avg, params1)


#
# -- Single value prediction.
#