예제 #1
0
def Predict(share, company, category, seqlen):
    #category = 'High' # 'Low', 'Close'
    # Load Data

    Xtrain, Xtest, ytrain, ytest, XtrainNorm, XtestNorm = split(share,
                                                                category,
                                                                window=0.01,
                                                                train=0.60,
                                                                normalize=True)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
    Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))

    # Train
    layers = [1, 100, 100, 1]
    nbatch = 512
    epochs = 5
    nvalidation = 0.05

    rnn = RNN(layers, cell=RNNCell.LSTM)

    rnn.fit(Xtrain,
            ytrain,
            batch_size=nbatch,
            nb_epoch=epochs,
            validation_split=nvalidation)

    # Test
    predicted = rnn.predict(Xtest)
    mean_squared_error(ytest, predicted)

    #print("Show trend")
    #pplt.plot(ytest)
    #pplt.plot(predicted)
    #pplt.show()

    #print("Show exact values")
    #pplt.plot(XtestNorm[:,1])
    #pplt.plot(XtestNorm[:,0]*(predicted[:,0] + 1))
    #pplt.show()

    #print("Show predicted sequences")
    #seq = (Xtest[len(Xtest)-1]+1)*XtestNorm[len(XtestNorm)-1,0]
    #predictednew, predictednewNorm = rnn.sequence(seq, 100)
    #pplt.plot(predictednew)
    #pplt.plot(predictednewNorm)
    #pplt.show()

    seq = (Xtest[len(Xtest) - 1] + 1) * XtestNorm[len(XtestNorm) - 1, 0]
    predictednew, predictednewNorm = rnn.sequence(seq, seqlen)
    testNorm = XtestNorm[:, 0] * (predicted[:, 0] + 1)
    dataNorm = np.array([*testNorm, *predictednewNorm])

    #fig = pplt.figure()
    pplt.plot(XtestNorm[:, 1])
    pplt.plot(dataNorm)
    title = "{}{}(min={},max={})".format(company, category,
                                         np.amin(predictednewNorm),
                                         np.amax(predictednewNorm))
    pplt.title(title)
예제 #2
0
def predict_next_from_current_share(var_share, model):
    _, Xtest, _, ytest = split(var_share, 'Close', normalize=True, train=0.0)
    _, ori_Xtest, _, ori_ytest = split(var_share,
                                       'Close',
                                       normalize=False,
                                       train=0.0)
    Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))

    # Format the Xtest
    last_Xtest = Xtest[-1:]
    last_Xtest = np.reshape(last_Xtest,
                            (last_Xtest.shape[0], last_Xtest.shape[1], 1))

    # Format the ori_ytest
    last_ori_ytest = ori_ytest[-1]

    # Get the prediction
    predict = model.predict(last_Xtest)

    # convert it back
    new_pre = rever_back([last_ori_ytest], [predict])[0][0][0]
    return new_pre
예제 #3
0
def predict(request):
    # print(request.body)
    # s = json.dumps(request.body)
    # data = json.loads(s)
    # print(data['years'])
    # print(request.body[len(request.body)-1])
    os.environ["BULBEA_QUANDL_API_KEY"] = 'AENaz-R8uBmUxQsYrLzD'
    quandl.ApiConfig.api_key = 'AENaz-R8uBmUxQsYrLzD'
    now = datetime.datetime.now()
    my_json = request.body.decode('utf8').replace("'", '"')
    data = json.loads(my_json)
    # dataFrameJSON = json.dumps(data["data"], indent=4, sort_keys=True)
    print(data["CompanyName"])
    df = pd.read_json(data["data"], orient='split')
    # print(dataFrameJSON)
    # print(type(df))
    dates = []
    prices = []
    lst_for_result = []
    predictionYear = 0
    predictionType = data["pred_type"]
    if (predictionType == 'Years'):
        predictionYear = now.year + int(data["years"])
        # print(df.index)
        for i in df.index:
            # print(str(i).split()[0].split('-')[2])
            dates.append(int(str(i).split()[0].split('-')[0]))
        for j in df['Close']:
            prices.append(float(str(j)))

    if (predictionType == 'Months'):
        predictionYear = int(data["months"])
        # print(df.index)
        for i in df.index:
            # print(str(i).split()[0].split('-')[2])
            dates.append(int(str(i).split()[0].split('-')[1]))
        for j in df['Close']:
            prices.append(float(str(j)))
    # predictionYear = now.year + int(data["years"])
    # for i in df.index:
    #     # print(str(i).split()[0].split('-')[2])
    #     dates.append(int(str(i).split()[0].split('-')[0]))
    # for j in df['Close']:
    #     prices.append(float(str(j)))
    dates = np.reshape(dates, (len(dates), 1))
    svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
    svr_rbf.fit(dates, prices)
    predictedValue = svr_rbf.predict(predictionYear)

    share = bb.Share(source='NSE', ticker=data["CompanyName"])
    Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize=True)
    Xtrain = pd.DataFrame(Xtrain)
    Xtest = pd.DataFrame(Xtest)
    ytrain = pd.DataFrame(ytrain)
    ytest = pd.DataFrame(ytest)
    Xtrain = Xtrain.dropna(axis=0, how='any')
    Xtest = Xtest.dropna(axis=0, how='any')
    ytrain = ytrain.dropna(axis=0, how='any')
    ytest = ytest.dropna(axis=0, how='any')
    Xtrain = Xtrain.as_matrix()
    Xtest = Xtest.as_matrix()
    ytrain = ytrain.as_matrix()
    ytest = ytest.as_matrix()
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
    Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))
    rnn = RNN([1, 100, 100, 1])  # number of neurons in each layer
    rnn.fit(Xtrain, ytrain)
    p = rnn.predict(Xtest)
    mse = mean_squared_error(ytest, p)
    print(mse)
    fig, ax = plt.subplots()
    ax.plot(ytest)
    ax.plot(p)
    # ax.scatter(dates, prices, color='black', label='Data')
    # ax.plot(dates, svr_rbf.predict(dates), color='red', label='RBF Model')
    # ax.set_xticklabels(dates, rotation=90, ha='left', fontsize=10)
    dictRepPredictionGraph = mpld3.fig_to_dict(fig)
    plt.savefig("/Users/rjmac/Desktop/prediction.png")
    result = {}
    result['predictedValue'] = predictedValue[0]
    result['yearPredicted'] = predictionYear
    with open("/Users/rjmac/Desktop/prediction.png", "rb") as img:
        st = base64.b64encode(img.read())
        print(type(st))
        strng = st.decode("utf-8")
        result['predictionGraph'] = strng
    # lst_for_result.append(result)
    json_data = json.dumps(result)
    return HttpResponse(json_data, content_type='application/json')
예제 #4
0
    def save(self, file):
        return self.model.save(file)

    def load(self, file):

        return load_model(file)


# Saving a file in python

# file = "f.txt"
# path = os.path.join("test/", file)
# file = open(path, "w")

stock_list = [
    'ADM', 'FLS', 'ADI', 'ADBE', 'CB', 'FAST', 'ABBV', 'SJM', 'DHI', 'ACN',
    'AAP', 'ZTS', 'SIG', 'CME', 'XOM', 'CMCSA', 'ABC', 'ABT', 'JBHT', 'DHR',
    'GOOGL', 'AAL', 'XLNX', 'MMC', 'RRC', 'ROST', 'GPC', 'AAPL', 'DLTR', 'WM'
]

for i in stock_list:
    stock = bb.Share('Wiki', i)

    Xtrain, Xtest, ytrain, ytest = split(stock, 'Close', normalize=True)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
    Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))
    rnn = RNN([1, 100, 100, 1])  # number of neurons in each layer
    rnn.fit(Xtrain, ytrain)

    rnn.save("models/" + i + ".h5")
예제 #5
0
import os

os.environ["BULBEA_QUANDL_API_KEY"] = 'VGjjpcct_DscJ4Fa8DCP'
import bulbea as bb

share = bb.Share(source='SSE', ticker='MSFT', provider='alphavantage')
# share = bb.Share(source='SSE', ticker='AMZ', provider='quandl')
share.groupDataByAttribute()
'''print(share.data)'''
from bulbea.learn.evaluation import split

import numpy as np

Xtrain, Xtest, ytrain, ytest = split(share, '4. close', normalize=True)
# Xtrain, Xtest, ytrain, ytest = split(share, 'Last', normalize=True)

Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))

from bulbea.learn.models import RNN

rnn = RNN([1, 100, 100, 1])  # number of neurons in each layer
rnn.fit(Xtrain, ytrain)

from sklearn.metrics import mean_squared_error

p = rnn.predict(Xtest)
mean_squared_error(ytest, p)
import matplotlib.pyplot as pplt

pplt.plot(ytest)
예제 #6
0
from bulbea.learn.models import RNN

# ### Training & Testing

# In[11]:

from bulbea.learn.evaluation import split
import numpy as np

# In[ ]:

rnn_arr = []
for index, share in enumerate(share_array):
    print("{} hour. {}".format(index + 1, len(share.data)))
    Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize=True)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1))
    Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))

    # Training
    rnn = RNN([1, 100, 100, 1])  # number of neurons in each layer
    rnn.fit(Xtrain, ytrain)
    rnn_arr.append(rnn)

# #### TESTING

# In[13]:

from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as pplt
from bulbea.entity.share import _reverse_cummulative_return