def main(): args = parser.parse_args() source = args.SOURCE symbol = args.SYMBOL alias = args.alias share = bb.Share(source, symbol, alias = alias)
def predict(request): # print(request.body) # s = json.dumps(request.body) # data = json.loads(s) # print(data['years']) # print(request.body[len(request.body)-1]) os.environ["BULBEA_QUANDL_API_KEY"] = 'AENaz-R8uBmUxQsYrLzD' quandl.ApiConfig.api_key = 'AENaz-R8uBmUxQsYrLzD' now = datetime.datetime.now() my_json = request.body.decode('utf8').replace("'", '"') data = json.loads(my_json) # dataFrameJSON = json.dumps(data["data"], indent=4, sort_keys=True) print(data["CompanyName"]) df = pd.read_json(data["data"], orient='split') # print(dataFrameJSON) # print(type(df)) dates = [] prices = [] lst_for_result = [] predictionYear = 0 predictionType = data["pred_type"] if (predictionType == 'Years'): predictionYear = now.year + int(data["years"]) # print(df.index) for i in df.index: # print(str(i).split()[0].split('-')[2]) dates.append(int(str(i).split()[0].split('-')[0])) for j in df['Close']: prices.append(float(str(j))) if (predictionType == 'Months'): predictionYear = int(data["months"]) # print(df.index) for i in df.index: # print(str(i).split()[0].split('-')[2]) dates.append(int(str(i).split()[0].split('-')[1])) for j in df['Close']: prices.append(float(str(j))) # predictionYear = now.year + int(data["years"]) # for i in df.index: # # print(str(i).split()[0].split('-')[2]) # dates.append(int(str(i).split()[0].split('-')[0])) # for j in df['Close']: # prices.append(float(str(j))) dates = np.reshape(dates, (len(dates), 1)) svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1) svr_rbf.fit(dates, prices) predictedValue = svr_rbf.predict(predictionYear) share = bb.Share(source='NSE', ticker=data["CompanyName"]) Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize=True) Xtrain = pd.DataFrame(Xtrain) Xtest = pd.DataFrame(Xtest) ytrain = pd.DataFrame(ytrain) ytest = pd.DataFrame(ytest) Xtrain = Xtrain.dropna(axis=0, how='any') Xtest = Xtest.dropna(axis=0, how='any') ytrain = ytrain.dropna(axis=0, how='any') ytest = ytest.dropna(axis=0, how='any') Xtrain = Xtrain.as_matrix() Xtest = Xtest.as_matrix() ytrain = ytrain.as_matrix() ytest = ytest.as_matrix() Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1)) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1)) rnn = RNN([1, 100, 100, 1]) # number of neurons in each layer rnn.fit(Xtrain, ytrain) p = rnn.predict(Xtest) mse = mean_squared_error(ytest, p) print(mse) fig, ax = plt.subplots() ax.plot(ytest) ax.plot(p) # ax.scatter(dates, prices, color='black', label='Data') # ax.plot(dates, svr_rbf.predict(dates), color='red', label='RBF Model') # ax.set_xticklabels(dates, rotation=90, ha='left', fontsize=10) dictRepPredictionGraph = mpld3.fig_to_dict(fig) plt.savefig("/Users/rjmac/Desktop/prediction.png") result = {} result['predictedValue'] = predictedValue[0] result['yearPredicted'] = predictionYear with open("/Users/rjmac/Desktop/prediction.png", "rb") as img: st = base64.b64encode(img.read()) print(type(st)) strng = st.decode("utf-8") result['predictionGraph'] = strng # lst_for_result.append(result) json_data = json.dumps(result) return HttpResponse(json_data, content_type='application/json')
def save(self, file): return self.model.save(file) def load(self, file): return load_model(file) # Saving a file in python # file = "f.txt" # path = os.path.join("test/", file) # file = open(path, "w") stock_list = [ 'ADM', 'FLS', 'ADI', 'ADBE', 'CB', 'FAST', 'ABBV', 'SJM', 'DHI', 'ACN', 'AAP', 'ZTS', 'SIG', 'CME', 'XOM', 'CMCSA', 'ABC', 'ABT', 'JBHT', 'DHR', 'GOOGL', 'AAL', 'XLNX', 'MMC', 'RRC', 'ROST', 'GPC', 'AAPL', 'DLTR', 'WM' ] for i in stock_list: stock = bb.Share('Wiki', i) Xtrain, Xtest, ytrain, ytest = split(stock, 'Close', normalize=True) Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1)) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1)) rnn = RNN([1, 100, 100, 1]) # number of neurons in each layer rnn.fit(Xtrain, ytrain) rnn.save("models/" + i + ".h5")
import bulbea as bb share = bb.Share('YAHOO', 'GOOGL') a=1
def on_click_search(self): database = self.frame.entry[App.Frame.ENTRY_DATABASE].get() symbol = self.frame.entry[App.Frame.ENTRY_SYMBOL].get() share = bb.Share(database, symbol) axes = share.plot(['Close'])
import os os.environ["BULBEA_QUANDL_API_KEY"] = 'VGjjpcct_DscJ4Fa8DCP' import bulbea as bb share = bb.Share(source='SSE', ticker='MSFT', provider='alphavantage') # share = bb.Share(source='SSE', ticker='AMZ', provider='quandl') share.groupDataByAttribute() '''print(share.data)''' from bulbea.learn.evaluation import split import numpy as np Xtrain, Xtest, ytrain, ytest = split(share, '4. close', normalize=True) # Xtrain, Xtest, ytrain, ytest = split(share, 'Last', normalize=True) Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1)) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1)) from bulbea.learn.models import RNN rnn = RNN([1, 100, 100, 1]) # number of neurons in each layer rnn.fit(Xtrain, ytrain) from sklearn.metrics import mean_squared_error p = rnn.predict(Xtest) mean_squared_error(ytest, p) import matplotlib.pyplot as pplt pplt.plot(ytest)
def __openStock(self, symbol, source='WIKI'): self.stock = bb.Share(source='WIKI', ticker=symbol)
features = ["SMA", "RSI"] # Financial indicators of choice trainStart = 0 # Start of training period trainEnd = 1000 # End of training period testStart = 1001 # Start of testing period testEnd = 5000 # End of testing period buyThreshold = 0.1 # Confidence threshold for predicting buy (default = 0.65) sellThreshold = 0.65 # Confidence threshold for predicting sell (default = 0.65) continuedTraining = False # Continue training during testing period? (default = false) # Initialize backtester backtest = Backtest(features, trainStart, trainEnd, testStart, testEnd, buyThreshold, sellThreshold, continuedTraining) # gets share data provider = 'alphavantage' share = bb.Share(source='SSE', ticker='MSFT', provider=provider) df = pd.DataFrame(share.data) df = df.transpose() df.columns = ['high','low','open','dropme1','close','dropme2', 'dropme3', 'dropme4'] df = df.drop(['dropme1','dropme2','dropme3', 'dropme4'], axis=1) df['date'] = pd.to_datetime(df.index, infer_datetime_format=True) df['high'] = pd.to_numeric(df['high']) df['low'] = pd.to_numeric(df['low']) df['open'] = pd.to_numeric(df['open']) df['close'] = pd.to_numeric(df['close']) cols = df.columns.tolist() cols = cols[-1:] + cols[:-1] df = df[cols] df['SMA'] = talib.SMA(df["close"]) df['RSI'] = talib.RSI(df["close"]) # df['MACD'] = talib.MACD(df["close"])[0]
models = load_model_helper(coin_name) # In[4]: from coinmarketcap_draw import coinmarketcap_data # In[5]: data = coinmarketcap_data(coin_name) # In[6]: import bulbea as bb figsize = (20, 15) get_ipython().magic('matplotlib inline') share = bb.Share("123", '123', data=data) # In[7]: share.plot(figsize=figsize) # ## Convert the data to hourly # In[8]: from datetime import timedelta import pandas as pd def convert_with_n_hour_gap(data, n): times = data.index.copy()
import bulbea as bb share = bb.Share('WIKI', 'GOOGL') from bulbea.learn.evaluation import split Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize=True)
import numpy as np import bulbea as bb #import matplotlib.pyplot as pplt from bulbea.learn.evaluation import split from bulbea.learn.models import RNN from sklearn.metrics import mean_squared_error if __name__ == '__main__': if len(sys.argv) != 5: print( "Usage : python update.py [currency_1] [currency_2] [start_date] [end_date], date format : YYYY-MM-DD" ) sys.exit(0) #Get initial object share share = bb.Share(source=sys.argv[1], ticker=sys.argv[2], start=sys.argv[3], end=sys.argv[4]) #Get the data and save to .csv file share.update() print(share.data.head()) share.save(filename='{sym}_{start}_{end}.csv'.format( sym='{cur1}{cur2}'.format(cur1=sys.argv[1], cur2=sys.argv[2]), start=sys.argv[3], end=sys.argv[4])) #Pre-process Xtrain, Xtest, ytrain, ytest = split(share, 'Close', normalize=True) Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], 1)) Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], 1))