def predictionGraph(self): # Selecting prices and dates prices = self.stock[:, 0].reshape(-1, 1) dates = self.stock[:, 1].reshape(-1, 1) # Train and fit input = 1 output = 1 layers = [('F', self.hidden), ('AF', 'tanh'), ('F', self.hidden), ('AF', 'tanh'), ('F', self.hidden), ('AF', 'tanh'), ('F', output)] mlpr = ANNR([input], layers, batchSize=256, maxIter=self.iterations, tol=self.tolerance, reg=1e-4, verbose=False) holdDays = 5 totalDays = len(dates) mlpr.fit(dates[0:(totalDays - holdDays)], prices[0:(totalDays - holdDays)]) # Predict pricePredict = mlpr.predict(dates) # Plot the graph fig = Figure() axis = fig.add_subplot() axis.plot(dates, prices) axis.plot(dates, pricePredict, c='#5aa9ab') return fig
def T13(): ''' Tests restoring a model from file ''' m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 't12ann1') rv = m1.RestoreModel('./', 't12ann1') return rv
def page2(request): stock_data = np.loadtxt(os.path.join( BASE_DIR, f"prediction/project_api/{file_names[context['selected_stock']]}"), delimiter=",", skiprows=1, usecols=(1, 4)) stock_data = scale(stock_data) prices = stock_data[:, 1].reshape(-1, 1) dates = stock_data[:, 0].reshape(-1, 1) input = 1 output = 1 hidden = 50 layers = [('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', output)] mlpr = ANNR([input], layers, batchSize=256, maxIter=20000, tol=0.2, reg=1e-4, verbose=True) holdDays = 5 totalDays = len(dates) mlpr.fit(dates[0:(totalDays - holdDays)], prices[0:(totalDays - holdDays)]) pricePredict = mlpr.predict(dates) fig = plt.figure() plt.plot(dates, prices) plt.plot(dates, pricePredict, c='#5aa9ab') context['graph1'] = mpld3.fig_to_html(fig) plt.close() return render(request, 'prediction/graphs.html', context)
def T12(): ''' Tests saving a model to file ''' A = np.random.rand(32, 4) Y = (A.sum(axis = 1) ** 2).reshape(-1, 1) m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 't12ann1') m1.fit(A, Y) m1.SaveModel('./t12ann1') return True
def generator(): #%%Path to store cached currency data datPath = 'CurDat/' if not os.path.exists(datPath): os.mkdir(datPath) cl = ['BTC', 'LTC', 'ETH', 'XMR'] #Columns of price data to use CN = ['close', 'high', 'low', 'open', 'volume'] #Store data frames for each of above types D = [] for ci in cl: dfp = os.path.join(datPath, ci + '.csv') try: df = pd.read_csv(dfp, sep=',') except FileNotFoundError: df = GetCurDF(ci, dfp) D.append(df) #%%Only keep range of data that is common to all currency types cr = min(Di.shape[0] for Di in D) for i in range(len(cl)): D[i] = D[i][(D[i].shape[0] - cr):] NC = B.shape[2] #2 1-D conv layers with relu followed by 1-d conv output layer ns = [('C1d', [8, NC, NC * 2], 4), ('AF', 'relu'), ('C1d', [8, NC * 2, NC * 2], 2), ('AF', 'relu'), ('C1d', [8, NC * 2, NC], 2)] #Create the neural network in TensorFlow cnnr = ANNR(B[0].shape, ns, batchSize=32, learnRate=2e-5, maxIter=64, reg=1e-5, tol=1e-2, verbose=True) cnnr.fit(B, Y) CI = list(range(C.shape[0])) AI = list(range(C.shape[0] + PTS.shape[0] - HP)) NDP = PTS.shape[0] #Number of days predicted for i, cli in enumerate(cl): fig, ax = mpl.subplots(figsize=(16 / 1.5, 10 / 1.5)) hind = i * len(CN) + CN.index('high') ax.plot(CI[-4 * HP:], C[-4 * HP:, hind], label='Actual') ax.plot(AI[-(NDP + 1):], A[-(NDP + 1):, hind], '--', label='Prediction') ax.legend(loc='upper left') ax.set_title(cli + ' (High)') ax.set_ylabel('USD') ax.set_xlabel('Time') ax.axes.xaxis.set_ticklabels([]) mpl.show()
def T8(): ''' Tests if multiple ANNRs can be created without affecting each other ''' A = np.random.rand(32, 4) Y = (A.sum(axis = 1) ** 2).reshape(-1, 1) m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16) m1.fit(A, Y) R1 = m1.GetWeightMatrix(0) m2 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16) m2.fit(A, Y) R2 = m1.GetWeightMatrix(0) if (R1 != R2).any(): return False return True
def T9(): ''' Tests if multiple ANNRs can be created without affecting each other ''' A = np.random.rand(32, 4) Y = (A.sum(axis = 1) ** 2).reshape(-1, 1) m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16) m1.fit(A, Y) s1 = m1.score(A, Y) m2 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16) m2.fit(A, Y) s2 = m1.score(A, Y) if s1 != s2: return False return True
def T14(): ''' Tests saving and restore a model ''' A = np.random.rand(32, 4) Y = (A.sum(axis = 1) ** 2).reshape(-1, 1) m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 't12ann1') m1.fit(A, Y) m1.SaveModel('./t12ann1') R1 = m1.GetWeightMatrix(0) ANN.Reset() m1 = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 't12ann2') m1.RestoreModel('./', 't12ann1') R2 = m1.GetWeightMatrix(0) if (R1 != R2).any(): return False return True
def regression(year, predyear, consumption, tolerance): input = 1 output = 1 hidden = 50 layers = [('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', output)] mlpr = ANNR([input], layers, batchSize=64, maxIter=25000, tol=tolerance, reg=1e-4, verbose=True) holdYears = 5 totalYears = len(year) mlpr.fit(year[0:(totalYears - holdYears)], consumption[0:(totalYears - holdYears)]) predicted = mlpr.predict(predyear) return predicted
def TrainCNN(self, epochs=500, learning_rate=2e-5): #creates the model, trains it for the given number of epochs, saves it print('Training CNN...') #2 1-D conv layers with relu followed by 1-d conv output layer networkArchitecture = [ ('C1d', [8, self.number_of_features, self.number_of_features * 2], 4), ('AF', 'relu'), ('C1d', [8, self.number_of_features * 2, self.number_of_features * 2], 2), ('AF', 'relu'), ('C1d', [8, self.number_of_features * 2, self.number_of_features], 2) ] cnnr = ANNR(self.X_train[0].shape, networkArchitecture, batchSize=self.batch_size, learnRate=learning_rate, maxIter=epochs, reg=1e-5, tol=1e-5, verbose=True) cnnr.fit(self.X_train, self.y_train) f = self._dataFolderTensorFlowModels + self.modelName + '.meta' if epochs > 5 or not os.path.isfile(f) or True: print('Saving model...') saver = tf.train.Saver() saver.save(cnnr.GetSes(), self._dataFolderTensorFlowModels + self.modelName) print('Logits: ', cnnr.O[-1]) print('Inputs: ', cnnr.X) #print('All TF: ', tf.trainable_variables) self.PredictCNN(cnnr) cnnr.Reset()
def nn_train_OLD(self, dates, prices): input = 1 output = 1 hidden = 50 #array of layers, 3 hidden and 1 output, along with the tanh activation function layers = [('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', output)] #construct the model and dictate params mlpr = ANNR([input], layers, batchSize=256, maxIter=20000, tol=0.2, reg=1e-4, verbose=True) #number of days for the hold-out period used to access progress holdDays = 5 totalDays = len(dates) #fit the model to the data "Learning" mlpr.fit(dates[0:(totalDays - holdDays)], prices[0:(totalDays - holdDays)])
def T1(): ''' Tests basic functionality of ANNR ''' A = np.random.rand(32, 4) Y = np.random.rand(32, 1) a = ANNR([4], [('F', 4), ('AF', 'tanh'), ('F', 1)], maxIter = 16, name = 'mlpr1') a.fit(A, Y) S = a.score(A, Y) if np.isnan(S): return False YH = a.predict(A) if Y.shape != YH.shape: return False return True
def T7(): ''' Tests basic functionality of CNNC ''' A = np.random.rand(32, 9, 9, 3) Y = np.random.rand(32, 1) ws = [('C', [3, 3, 3, 4], [1, 1, 1, 1]), ('AF', 'relu'), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 16), ('AF', 'tanh'), ('F', 1)] a = ANNR([9, 9, 3], ws, maxIter = 12, name = "cnnr1") a.fit(A, Y) S = a.score(A, Y) if np.isnan(S): return False YH = a.predict(A) if Y.shape != YH.shape: return False return True
while int(h / k) > 1: layers.append(('F', h / k)) layers.append(('AF', 'relu6')) k *= 2 layers.append(('F', o)) #""" layers = [('F', int(h)), ('AF', 'tanh'), ('F', int(h / 2)), ('AF', 'tanh'), ('F', int(h / 4)), ('AF', 'tanh'), ('F', int(h / 8)), ('AF', 'tanh'), ('F', int(h / 16)), ('AF', 'tanh'), ('F', int(h / 16)), ('AF', 'tanh'), ('F', int(h / 32)), ('AF', 'tanh'), ('F', int(h / 64)), ('AF', 'tanh'), ('F', o)] # """ mlpr = ANNR([i], layers, batchSize=256, maxIter=100000, tol=0.05, reg=1e-4, verbose=True, name='Stocker') mlpr.RestoreModel('model/', mlpr.name) #Begin prediction yHat = mlpr.predict(A) y = scaler.inverse_transform(y) A = scaler.inverse_transform(A) yHat = scaler.inverse_transform(yHat) #Plot the results mpl.plot(A[-20:-future_n], y[-(20 - future_n):], c='#b0403f', label='Stock value')
#%%Architecture of the neural network from TFANN import ANNR NC = B.shape[2] #2 1-D conv layers with relu followed by 1-d conv output layer ns = [('C1d', [8, NC, NC * 2], 4), ('AF', 'relu'), ('C1d', [8, NC * 2, NC * 2], 2), ('AF', 'relu'), ('C1d', [8, NC * 2, NC], 2)] #Create the neural network in TensorFlow cnnr = ANNR(B[0].shape, ns, batchSize=32, learnRate=2e-5, maxIter=64, reg=1e-5, tol=1e-2, verbose=True) cnnr.fit(B, Y)
dates = stock_data[:, 0].reshape(-1, 1) #creates a plot of the data and then displays it mpl.plot(dates[:, 0], prices[:, 0]) mpl.show() #Number of neurons in the input, output, and hidden layers input = 1 output = 1 hidden = 50 #array of layers, 3 hidden and 1 output, along with the tanh activation function layers = [('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', output)] #construct the model and dictate params mlpr = ANNR([input], layers, batchSize = 256, maxIter = 20000, tol = 0.2, reg = 1e-4, verbose = True) #number of days for the hold-out period used to access progress holdDays = 5 totalDays = len(dates) #fit the model to the data "Learning" mlpr.fit(dates[0:(totalDays-holdDays)], prices[0:(totalDays-holdDays)]) #Predict the stock price using the model pricePredict = mlpr.predict(dates) #Display the predicted reuslts agains the actual data mpl.plot(dates, prices) mpl.plot(dates, pricePredict, c='#5aa9ab') mpl.show() #Number of neurons in the input, output, and hidden layers
print(dates) plt.plot(dates[:, 0], prices[:, 0]) plt.show() #Number of neurons in the input, output, and hidden layers input2 = 1 output2 = 1 hidden2 = 50 #array of layers, 3 hidden and 1 output, along with the tanh activation function #array of layers, 3 hidden and 1 output, along with the tanh activation function layers = [('F', hidden2), ('AF', 'tanh'), ('F', hidden2), ('AF', 'tanh'), ('F', hidden2), ('AF', 'tanh'), ('F', output2)] #construct the model and dictate params mlpr = ANNR([input2], layers, batchSize=256, maxIter=20000, tol=0.1, reg=1e-4, verbose=True) holdDays = 5 totalDays = len(dates) #fit the model to the data "Learning" mlpr.fit(dates, prices) #Predict the stock price using the model pricePredict = mlpr.predict(dates) #Display the predicted reuslts agains the actual data plt.plot(dates, prices) plt.plot(dates, pricePredict, c='#5aa9ab') plt.show()
def Main(): if len(sys.argv) <= 1: return A, Y = GenerateData(ns=2048) #Create layer sizes; make 6 layers of nf neurons followed by a single output neuron L = [A.shape[1]] * 6 + [1] print('Layer Sizes: ' + str(L)) if sys.argv[1] == 'theano': print('Running theano benchmark.') from TheanoANN import TheanoMLPR #Create the Theano MLP tmlp = TheanoMLPR(L, batchSize=128, learnRate=1e-5, maxIter=100, tol=1e-3, verbose=True) MakeBenchDataSample(tmlp, A, Y, 16, 'TheanoSampDat.csv') print('Done. Data written to TheanoSampDat.csv.') if sys.argv[1] == 'theanogpu': print('Running theano GPU benchmark.') #Set optional flags for the GPU #Environment flags need to be set before importing theano os.environ["THEANO_FLAGS"] = "device=gpu" from TheanoANN import TheanoMLPR #Create the Theano MLP tmlp = TheanoMLPR(L, batchSize=128, learnRate=1e-5, maxIter=100, tol=1e-3, verbose=True) MakeBenchDataSample(tmlp, A, Y, 16, 'TheanoGPUSampDat.csv') print('Done. Data written to TheanoGPUSampDat.csv.') if sys.argv[1] == 'tensorflow': print('Running tensorflow benchmark.') #Create the Tensorflow model NA = [('F', A.shape[1]), ('AF', 'tanh')] * 6 + [('F', 1)] mlpr = ANNR([A.shape[1]], NA, batchSize=128, learnRate=1e-5, maxIter=100, tol=1e-3, verbose=True) MakeBenchDataSample(mlpr, A, Y, 16, 'TfSampDat.csv') print('Done. Data written to TfSampDat.csv.') if sys.argv[1] == 'plot': print('Displaying results.') try: T1 = np.loadtxt('TheanoSampDat.csv', delimiter=',', skiprows=1) except OSError: T1 = None try: T2 = np.loadtxt('TfSampDat.csv', delimiter=',', skiprows=1) except OSError: T2 = None try: T3 = np.loadtxt('TheanoGPUSampDat.csv', delimiter=',', skiprows=1) except OSError: T3 = None fig, ax = mpl.subplots(1, 2) if T1 is not None: PlotBenchmark(T1[:, 0], T1[:, 1], ax[0], '# Samples', 'Train', 'Theano') PlotBenchmark(T1[:, 0], T1[:, 2], ax[1], '# Samples', 'Test', 'Theano') if T2 is not None: PlotBenchmark(T2[:, 0], T2[:, 1], ax[0], '# Samples', 'Train', 'Tensorflow') PlotBenchmark(T2[:, 0], T2[:, 2], ax[1], '# Samples', 'Test', 'Tensorflow') if T3 is not None: PlotBenchmark(T3[:, 0], T3[:, 1], ax[0], '# Samples', 'Train', 'Theano GPU') PlotBenchmark(T3[:, 0], T3[:, 2], ax[1], '# Samples', 'Test', 'Theano GPU') mpl.show()
while int(h / k) > 1: layers.append(('F', int(h / k))) layers.append(('AF', 'relu6')) k *= 2 layers.append(('F', o)) #""" layers = [('F', int(h)), ('AF', 'tanh'), ('F', int(h / 2)), ('AF', 'tanh'), ('F', int(h / 4)), ('AF', 'tanh'), ('F', int(h / 8)), ('AF', 'tanh'), ('F', int(h / 16)), ('AF', 'tanh'), ('F', int(h / 16)), ('AF', 'tanh'), ('F', int(h / 32)), ('AF', 'tanh'), ('F', int(h / 64)), ('AF', 'tanh'), ('F', o)] # """ mlpr = ANNR([i], layers, batchSize=256, maxIter=100000, tol=0.05, reg=1e-4, verbose=True, name='Stocker') #Learn the data mlpr.fit(A[0:(n - nDays)], y[0:(n - nDays)]) #save the model mlpr.SaveModel('model/' + mlpr.name) #Begin prediction yHat = mlpr.predict(A) #Plot the results mpl.plot(A[-20:], y[-20:], c='#b0403f') mpl.plot(A[-20:], yHat[-20:], c='#5aa9ab') mpl.show()
def T15(): ''' Tests setting weight matrix and bias vectors ''' m1 = ANNR([4], [('F', 4), ('F', 2)], maxIter = 16, name = 't12ann1') m1.SetWeightMatrix(0, np.zeros((4, 4))) m1.SetBias(0, np.zeros(4)) m1.SetWeightMatrix(1, np.zeros((4, 2))) m1.SetBias(1, np.zeros(2)) YH = m1.predict(np.random.rand(16, 4)) if (YH != 0).any(): return False m1.SetWeightMatrix(0, np.ones((4, 4))) m1.SetBias(0, np.ones(4)) m1.SetWeightMatrix(1, np.ones((4, 2))) m1.SetBias(1, np.ones(2)) YH = m1.predict(np.ones((16, 4))) if np.abs(YH - 21).max() > 1e-5: print(np.abs(YH - 21).max()) return False return True
def get_fund_expected_growth(fund_name): # reads data from the file and ceates a matrix with only the dates and the prices base_path = r'C:\Users\aadmohan\Desktop\Howathon' final_path = os.path.join(base_path, fund_name + ".csv") stock_data = np.loadtxt(final_path, delimiter=",", skiprows=1, usecols=(1, 4)) list_prices = stock_data[:, 1] list_date = stock_data[:, 0] last_recorded_price, mean_prices, std_dv_prices = calculate_mean_std_dv_n_last_value( list_prices) last_recorded_date, mean_date, std_dv_date = calculate_mean_std_dv_n_last_value( list_date) distance_from_mean_of_future_date = get_distance_from_mean_of_future_date( last_recorded_date, mean_date, std_dv_date) #scales the data to smaller values stock_data = scale(stock_data) #gets the price and dates from the matrix prices = stock_data[:, 1].reshape(-1, 1) dates = stock_data[:, 0].reshape(-1, 1) # #creates a plot of the data and then displays it # mpl.plot(dates[:, 0], prices[:, 0]) # mpl.show() #Number of neurons in the input, output, and hidden layers input = 1 output = 1 hidden = 50 #array of layers, 3 hidden and 1 output, along with the tanh activation function layers = [('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', hidden), ('AF', 'tanh'), ('F', output)] #construct the model and dictate params mlpr = ANNR([input], layers, batchSize=256, maxIter=2000, tol=0.2, reg=1e-4, verbose=True) #number of days for the hold-out period used to access progress holdDays = 5 totalDays = len(dates) #fit the model to the data "Learning" mlpr.fit(dates[0:(totalDays - holdDays)], prices[0:(totalDays - holdDays)]) data = {'Predicted_date': [distance_from_mean_of_future_date]} df = pd.DataFrame(data) #Predict the stock price using the model #pricePredict = mlpr.predict(dates) predicted_price_in_distance_from_mean = mlpr.predict(df) predicted_price = mean_prices + predicted_price_in_distance_from_mean[ 0] * std_dv_prices expected_growth_on_predicted_date = get_expected_growth_on_predicted_date( predicted_price, last_recorded_price) print("expected_growth_on_predicted_date", expected_growth_on_predicted_date) #Display the predicted reuslts agains the actual data # mpl.plot(dates, prices) # mpl.plot(dates, pricePredict, c='#5aa9ab') # mpl.show() return expected_growth_on_predicted_date
def Main(args): if(len(args) != 3 and len(args) != 4): PrintUsage() return #Test if file exists try: open(args[0]) except Exception as e: print('Error opening file: ' + args[0]) print(str(e)) PrintUsage() return #Test validity of start date string try: datetime.strptime(args[1], '%m/%d/%Y').timestamp() except Exception as e: print(e) print('Error parsing date: ' + args[1]) PrintUsage() return #Test validity of end date string try: datetime.strptime(args[2], '%m/%d/%Y').timestamp() except Exception as e: print('Error parsing date: ' + args[2]) PrintUsage() return #Test validity of final optional argument if(len(args) == 4): predPrd = args[3].upper() if(predPrd == 'D'): predPrd = 'daily' elif(predPrd == 'W'): predPrd = 'weekly' elif(predPrd == 'M'): predPrd = 'monthly' else: PrintUsage() return else: predPrd = 'daily' #Everything looks okay; proceed with program #Grab the data frame D = ParseData(args[0]) #The number of previous days of data used #when making a prediction numPastDays = 16 PlotData(D) #Number of neurons in the input layer i = numPastDays * 7 + 1 #Number of neurons in the output layer o = D.shape[1] - 1 #Number of neurons in the hidden layers h = int((i + o) / 2) #The list of layer sizes layers = [('F', h), ('AF', 'tanh'), ('F', h), ('AF', 'tanh'), ('F', o)] R = ANNR([i], layers, maxIter = 1000, tol = 0.01, reg = 0.001, verbose = True) #R = KNeighborsRegressor(n_neighbors = 5) sp = StockPredictor(R, nPastDays = numPastDays) #Learn the dataset and then display performance statistics sp.Learn(D) sp.TestPerformance() #Perform prediction for a specified date range P = sp.PredictDate(args[1], args[2], predPrd) #Keep track of number of predicted results for plot n = P.shape[0] #Append the predicted results to the actual results D = P.append(D) #Predicted results are the first n rows PlotData(D, range(n + 1)) return (P, n)
def tfann_type_1(): #%%Path to store cached currency data datPath = "CurDat/" if not os.path.exists(datPath): os.mkdir(datPath) # Different cryptocurrency types cl = ["BTC", "LTC", "ETH", "XMR"] # Columns of price data to use CN = ["close", "high", "low", "open", "volume"] # Store data frames for each of above types D = [] for ci in cl: dfp = os.path.join(datPath, ci + ".csv") try: df = pd.read_csv(dfp, sep=",") except FileNotFoundError: df = GetCurDF(ci, dfp) D.append(df) #%%Only keep range of data that is common to all currency types cr = min(Di.shape[0] for Di in D) for i in range(len(cl)): D[i] = D[i][(D[i].shape[0] - cr):] #%%Features are channels C = np.hstack((Di[CN] for Di in D))[:, None, :] HP = 16 # Holdout period A = C[0:-HP] SV = A.mean(axis=0) # Scale vector C /= SV # Basic scaling of data #%%Make samples of temporal sequences of pricing data (channel) NPS, NFS = 256, 16 # Number of past and future samples ps = PastSampler(NPS, NFS) B, Y = ps.transform(A) #%%Architecture of the neural network NC = B.shape[2] # 2 1-D conv layers with relu followed by 1-d conv output layer ns = [ ("C1d", [8, NC, NC * 2], 4), ("AF", "relu"), ("C1d", [8, NC * 2, NC * 2], 2), ("AF", "relu"), ("C1d", [8, NC * 2, NC], 2), ] # Create the neural network in TensorFlow cnnr = ANNR( B[0].shape, ns, batchSize=32, learnRate=2e-5, maxIter=64, reg=1e-5, tol=1e-2, verbose=True, ) cnnr.fit(B, Y) PTS = [] # Predicted time sequences P, YH = B[[-1]], Y[[-1]] # Most recent time sequence for i in range(HP // NFS): # Repeat prediction P = np.concatenate([P[:, NFS:], YH], axis=1) YH = cnnr.predict(P) PTS.append(YH) PTS = np.hstack(PTS).transpose((1, 0, 2)) A = np.vstack([A, PTS]) # Combine predictions with original data A = np.squeeze(A) * SV # Remove unittime dimension and rescale C = np.squeeze(C) * SV nt = 4 PF = cnnr.PredictFull(B[:nt]) for i in range(nt): fig, ax = mpl.subplots(1, 4, figsize=(16 / 1.24, 10 / 1.25)) ax[0].plot(PF[0][i]) ax[0].set_title("Input") ax[1].plot(PF[2][i]) ax[1].set_title("Layer 1") ax[2].plot(PF[4][i]) ax[2].set_title("Layer 2") ax[3].plot(PF[5][i]) ax[3].set_title("Output") fig.text(0.5, 0.06, "Time", ha="center") fig.text(0.06, 0.5, "Activation", va="center", rotation="vertical") mpl.show() CI = list(range(C.shape[0])) AI = list(range(C.shape[0] + PTS.shape[0] - HP)) NDP = PTS.shape[0] # Number of days predicted for i, cli in enumerate(cl): fig, ax = mpl.subplots(figsize=(16 / 1.5, 10 / 1.5)) hind = i * len(CN) + CN.index("high") ax.plot(CI[-4 * HP:], C[-4 * HP:, hind], label="Actual") ax.plot(AI[-(NDP + 1):], A[-(NDP + 1):, hind], "--", label="Prediction") ax.legend(loc="upper left") ax.set_title(cli + " (High)") ax.set_ylabel("USD") ax.set_xlabel("Time") ax.axes.xaxis.set_ticklabels([]) mpl.show()
ps = PastSampler(NPS, NFS) B, Y = ps.transform(A) #%%Architecture of the neural network from TFANN import ANNR NC = B.shape[2] #2 1-D conv layers with relu followed by 1-d conv output layer ns = [('C1d', [8, NC, NC * 2], 4), ('AF', 'relu'), ('C1d', [8, NC * 2, NC * 2], 2), ('AF', 'relu'), ('C1d', [8, NC * 2, NC], 2)] #Create the neural network in TensorFlow cnnr = ANNR(B[0].shape, ns, batchSize=16, learnRate=2e-5, maxIter=64, reg=1e-5, tol=1e-2, verbose=True) cnnr.fit(B, Y) #prediction PTS = [] #Predicted time sequences P = B[[-1]] #Most recent time sequence for i in range(HP // NFS + 1): #Repeat prediction YH = cnnr.predict(P) P = np.concatenate([P[:, NFS:], YH], axis=1) PTS.append(YH) PTS = np.hstack(PTS).transpose((1, 0, 2)) A = np.vstack([A, PTS]) #Combine predictions with original data A = np.squeeze(A) * SV #Remove unittime dimension and rescale