def CreateTFGraphTrain(self): #Tensorflow 4 CNN Model #Classifier model; Architecture of the CNN ws = [('C', [3, 3, 3, 10], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('C', [3, 3, 10, 5], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 32), ('F', 16), ('F', 2)] ims = [self.ss[1] // self.m, self.ss[0] // self.n, 3] #Images from skimage are of shape (height, width, 3) hmcIms = 30 * 100 * 3 #Number of pixels in health/mana checker images carg = {'batchSize': 40, 'learnRate': 1e-3, 'maxIter': 40, 'reg': 6e-4, 'tol': 25e-3, 'verbose': True} self.OC = CNNC(ims, ws, name = 'obcnn', **carg) self.OC.RestoreClasses(['C', 'O']) #Used to detect enemies self.EC = CNNC(ims, ws, name = 'encnn', **carg) self.EC.RestoreClasses(['N', 'E', 'I']) #CNN for detecting movement self.MC = CNNC(ims, ws, name = 'mvcnn', **carg) self.MC.RestoreClasses(['Y', 'N']) #Classifier for lightning-warp self.LC = CNNC(ims, ws, name = 'lwcnn', **carg) self.LC.RestoreClasses(['Y', 'N']) #Regressor for health and mana bar checker self.HR = MLPR([hmcIms, 1], maxIter = 0, name = 'hrmlp') self.MR = MLPR([hmcIms, 1], maxIter = 0, name = 'mrmlp') if not self.Restore(): print('Model could not be loaded.') self.TFS = self.LC.GetSes() self.DIM = LoadDataset() self.FitCModel(self.OC, {'Closed/Dried Lake':'C', 'Closed/Oasis':'C', 'Open/Dried Lake':'O', 'Open/Oasis':'O', 'Enemy/Dried Lake':'O', 'Enemy/Oasis':'O'}) self.FitCModel(self.EC, {'Open/Dried Lake':'N', 'Open/Oasis':'N', 'Enemy/Dried Lake':'Y', 'Enemy/Oasis':'Y', 'Item/Dried Lake':'N'}) self.FitCModel(self.MC, {'Move/Dried Lake':'Y', 'NoMove/Dried Lake':'N'}) #self.LC.Reinitialize() self.FitCModel(self.LC, {'LW/Dried Lake':'Y', 'LW/Oasis':'Y', 'NLW/Dried Lake':'N', 'NLW/Oasis':'N'}) self.FitRModel(self.HR, 'HR') self.FitRModel(self.MR, 'MR') self.Save()
def T13(): ''' Tests restoring a model from file ''' m1 = MLPR([4, 4, 1], maxIter=16, name='t12ann1') rv = m1.RestoreModel('./', 't12ann1') return rv
def T12(): ''' Tests saving a model to file ''' A = np.random.rand(32, 4) Y = (A.sum(axis=1)**2).reshape(-1, 1) m1 = MLPR([4, 4, 1], maxIter=16, name='t12ann1') m1.fit(A, Y) m1.SaveModel('./t12ann1') return True
def T9(): ''' Tests if multiple MLPRs can be created without affecting each other ''' A = np.random.rand(32, 4) Y = (A.sum(axis=1)**2).reshape(-1, 1) m1 = MLPR([4, 4, 1], maxIter=16) m1.fit(A, Y) s1 = m1.score(A, Y) m2 = MLPR([4, 4, 1], maxIter=16) m2.fit(A, Y) s2 = m1.score(A, Y) if s1 != s2: return False return True
def T8(): ''' Tests if multiple MLPRs can be created without affecting each other ''' A = np.random.rand(32, 4) Y = (A.sum(axis=1)**2).reshape(-1, 1) m1 = MLPR([4, 4, 1], maxIter=16) m1.fit(A, Y) R1 = m1.GetWeightMatrix(0) m2 = MLPR([4, 4, 1], maxIter=16) m2.fit(A, Y) R2 = m1.GetWeightMatrix(0) if (R1 != R2).any(): return False return True
def T14(): ''' Tests saving and restore a model ''' A = np.random.rand(32, 4) Y = (A.sum(axis=1)**2).reshape(-1, 1) m1 = MLPR([4, 4, 1], maxIter=16, name='t12ann1') m1.fit(A, Y) m1.SaveModel('./t12ann1') R1 = m1.GetWeightMatrix(0) ANN.Reset() m1 = MLPR([4, 4, 1], maxIter=16, name='t12ann2') m1.RestoreModel('./', 't12ann1') R2 = m1.GetWeightMatrix(0) if (R1 != R2).any(): return False return True
def T1(): ''' Tests basic functionality of MLPR ''' A = np.random.rand(32, 4) Y = np.random.rand(32, 1) a = MLPR([4, 4, 1], maxIter=16, name='mlpr1') a.fit(A, Y) a.score(A, Y) a.predict(A) return True
def Main(): if len(sys.argv) <= 1: return A, Y = GenerateData(ns = 2048) #Create layer sizes; make 6 layers of nf neurons followed by a single output neuron L = [A.shape[1]] * 6 + [1] print('Layer Sizes: ' + str(L)) if sys.argv[1] == 'theano': print('Running theano benchmark.') from TheanoANN import TheanoMLPR #Create the Theano MLP tmlp = TheanoMLPR(L, batchSize = 128, learnRate = 1e-5, maxIter = 100, tol = 1e-3, verbose = True) MakeBenchDataSample(tmlp, A, Y, 16, 'TheanoSampDat.csv') print('Done. Data written to TheanoSampDat.csv.') if sys.argv[1] == 'theanogpu': print('Running theano GPU benchmark.') #Set optional flags for the GPU #Environment flags need to be set before importing theano os.environ["THEANO_FLAGS"] = "device=gpu" from TheanoANN import TheanoMLPR #Create the Theano MLP tmlp = TheanoMLPR(L, batchSize = 128, learnRate = 1e-5, maxIter = 100, tol = 1e-3, verbose = True) MakeBenchDataSample(tmlp, A, Y, 16, 'TheanoGPUSampDat.csv') print('Done. Data written to TheanoGPUSampDat.csv.') if sys.argv[1] == 'tensorflow': print('Running tensorflow benchmark.') from TFANN import MLPR #Create the Tensorflow model mlpr = MLPR(L, batchSize = 128, learnRate = 1e-5, maxIter = 100, tol = 1e-3, verbose = True) MakeBenchDataSample(mlpr, A, Y, 16, 'TfSampDat.csv') print('Done. Data written to TfSampDat.csv.') if sys.argv[1] == 'plot': print('Displaying results.') try: T1 = np.loadtxt('TheanoSampDat.csv', delimiter = ',', skiprows = 1) except OSError: T1 = None try: T2 = np.loadtxt('TfSampDat.csv', delimiter = ',', skiprows = 1) except OSError: T2 = None try: T3 = np.loadtxt('TheanoGPUSampDat.csv', delimiter = ',', skiprows = 1) except OSError: T3 = None fig, ax = mpl.subplots(1, 2) if T1 is not None: PlotBenchmark(T1[:, 0], T1[:, 1], ax[0], '# Samples', 'Train', 'Theano') PlotBenchmark(T1[:, 0], T1[:, 2], ax[1], '# Samples', 'Test', 'Theano') if T2 is not None: PlotBenchmark(T2[:, 0], T2[:, 1], ax[0], '# Samples', 'Train', 'Tensorflow') PlotBenchmark(T2[:, 0], T2[:, 2], ax[1], '# Samples', 'Test', 'Tensorflow') if T3 is not None: PlotBenchmark(T3[:, 0], T3[:, 1], ax[0], '# Samples', 'Train', 'Theano GPU') PlotBenchmark(T3[:, 0], T3[:, 2], ax[1], '# Samples', 'Test', 'Theano GPU') mpl.show()
def score(self, A, y): #Number of neurons in the input layer i = 1 #Number of neurons in the output layer o = 1 #Number of neurons in the hidden layers h = 32 #The list of layer sizes layers = [i, h, h, h, h, h, h, h, h, h, o] mlpr = MLPR(layers, maxItr = 1000, tol = 0.40, reg = 0.001, verbose = True)
def CreateTFGraphTest(self): #Tensorflow 4 CNN Model #Classifier model; Architecture of the CNN ws = [('C', [3, 3, 3, 10], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('C', [3, 3, 10, 5], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 32), ('F', 16), ('F', 2)] ims = [self.ss[1] // self.m, self.ss[0] // self.n, 3] #Image size for CNN model hmcIms = 30 * 100 * 3 #Number of pixels in health/mana checker images self.I1 = tf.placeholder("float", [self.NSS] + ims, name = 'S_I1') #Previous image placeholder self.I2 = tf.placeholder("float", [self.NSS] + ims, name = 'S_I2') #Current image placeholder self.TV = tf.placeholder("float", [self.NSS, 2], name = 'S_TV') #Target values for binary classifiers self.LWI = tf.placeholder("float", [2] + ims, name = 'S_LWI') self.LWTV = tf.placeholder("float", [2, 2], name = 'S_LWTV') self.HRI = tf.placeholder("float", [1, hmcIms], name = 'S_HRI') self.MRI = tf.placeholder("float", [1, hmcIms], name = 'S_MRI') self.RTV = tf.placeholder("float", [1, 1], name = 'S_RTV') Z = tf.zeros([self.NSS] + ims, name = "S_Z") #Completely black grid of image cells wcnd = tf.abs(self.I1 - self.I2) > 16 #Where condition ID = tf.where(wcnd, self.I2, Z, name = 'S_ID') #Difference between images #Used to detect Obstacles; carg = {'batchSize': self.NSS, 'learnRate': 1e-3, 'maxIter': 2, 'reg': 6e-4, 'tol': 1e-2, 'verbose': True} self.OC = CNNC(ims, ws, name = 'obcnn', X = self.I2, Y = self.TV, **carg) self.OC.RestoreClasses(['C', 'O']) #Used to detect enemies self.EC = CNNC(ims, ws, name = 'encnn', X = self.I2, Y = self.TV, **carg) self.EC.RestoreClasses(['N', 'E', 'I']) #CNN for detecting movement self.MC = CNNC(ims, ws, name = 'mvcnn', X = ID, Y = self.TV, **carg) self.MC.RestoreClasses(['Y', 'N']) #Classifier for lightning-warp self.LC = CNNC(ims, ws, name = 'lwcnn', X = self.LWI, Y = self.LWTV, **carg) self.LC.RestoreClasses(['Y', 'N']) #Regressor for health-bar checker self.HR = MLPR([hmcIms, 1], name = 'hrmlp', X = self.HRI, Y = self.RTV, **carg) self.MR = MLPR([hmcIms, 1], name = 'mrmlp', X = self.MRI, Y = self.RTV, **carg) if not self.Restore(): print('Model could not be loaded.') self.TFS = self.LC.GetSes()
class TargetingSystem: def __init__(self, m, n, ss, sb, cp, train = False): ''' m: Number of rows n: Number of cols ss: Screen size (x, y) sb: Screen border (left, top, right, bottom) (images passed are already cropped using this border) cp: Character position (x, y) ''' self.S = None #Good screen cells self.SC = np.array([ [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6], [0, 7], [0, 8], [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6], [1, 7], [1, 8], [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7], [2, 8], [3, 0], [3, 1], [3, 2], [3, 3], [3, 4], [3, 5], [3, 6], [3, 7], [3, 8], [4, 0], [4, 1], [4, 2], [4, 3], [4, 4], [4, 5], [4, 6], [4, 7], [4, 8], [5, 0], [5, 1], [5, 2], [5, 3], [5, 4], [5, 5], [5, 6], [5, 7], [5, 8], [6, 3], [6, 4], [6, 5] ]) #self.GCLU = dict(zip(self.SC, range(len(self.SC)))) #Lookup for good cells to indices self.GCLU = np.array( #Indices of good cells in screen [ -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, -1, -1, -1, 53, 54, 55, -1, -1, -1]) self.GSC = np.array( #Indices of good cells in screen [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 57, 58, 59 ]) self.NSS = self.GSC.shape[0] self.YH = None #Latest predictions for screen input self.m, self.n = m, n #Number of rows/columns in screen division for cnn pediction self.ss = (ss[0] - sb[0] - sb[2], ss[1] - sb[1] - sb[3]) #Actual screen size is original size minus borders self.sb = sb #Screen border (left, top, right bottom) self.cs = (self.ss[0] // self.n, self.ss[1] // self.m) #Cell size in pixels (x, y) self.cp = cp #Character position in pixels (x, y) self.cc = np.zeros([self.m * self.n, 2]) #Center of prediction cell (i, j) in pixels (x, y) for i in range(self.m): for j in range(self.n): self.cc[i * self.n + j] = (sb[0] + (self.cs[0] // 2) * (2 * j + 1), sb[1] + (self.cs[1] // 2) * (2 * i + 1)) self.train = train #Force train will train the model further even if a saved one exists if train: self.CreateTFGraphTrain() else: self.CreateTFGraphTest() def CellCorners(self): ''' Gets the top left corners of the CNN prediction cells in pixels (x, y) ''' return np.mgrid[self.sb[0]:(self.ss[0] + self.sb[0] + 1):self.cs[0], self.sb[1]:(self.ss[1] + self.sb[1] + 1):self.cs[1]].reshape(2, -1).T def CellLookup(self, c): ci = self.GCLU[np.multiply(c, np.array([self.n, 1])).sum(axis = 1)] nnci = np.nonzero(ci >= 0)[0] return self.YH[ci[nnci]], nnci def CellRectangle(self, c): ''' Gets the pixel values of the rectangle of the cell at index (i, j) Return (left, top, right, bottom) ''' return (self.cs[0] * c[1] + self.sb[0], self.cs[1] * c[0] + self.sb[1], self.cs[0] * (c[1] + 1) + self.sb[0], self.cs[1] * (c[0] + 1) + self.sb[0]) def CharPos(self): ''' Gets the character's position on the screen ''' return self.cp def CreateTFGraphTest(self): #Tensorflow 4 CNN Model #Classifier model; Architecture of the CNN ws = [('C', [3, 3, 3, 10], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('C', [3, 3, 10, 5], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 32), ('F', 16), ('F', 2)] ims = [self.ss[1] // self.m, self.ss[0] // self.n, 3] #Image size for CNN model hmcIms = 30 * 100 * 3 #Number of pixels in health/mana checker images self.I1 = tf.placeholder("float", [self.NSS] + ims, name = 'S_I1') #Previous image placeholder self.I2 = tf.placeholder("float", [self.NSS] + ims, name = 'S_I2') #Current image placeholder self.TV = tf.placeholder("float", [self.NSS, 2], name = 'S_TV') #Target values for binary classifiers self.LWI = tf.placeholder("float", [2] + ims, name = 'S_LWI') self.LWTV = tf.placeholder("float", [2, 2], name = 'S_LWTV') self.HRI = tf.placeholder("float", [1, hmcIms], name = 'S_HRI') self.MRI = tf.placeholder("float", [1, hmcIms], name = 'S_MRI') self.RTV = tf.placeholder("float", [1, 1], name = 'S_RTV') Z = tf.zeros([self.NSS] + ims, name = "S_Z") #Completely black grid of image cells wcnd = tf.abs(self.I1 - self.I2) > 16 #Where condition ID = tf.where(wcnd, self.I2, Z, name = 'S_ID') #Difference between images #Used to detect Obstacles; carg = {'batchSize': self.NSS, 'learnRate': 1e-3, 'maxIter': 2, 'reg': 6e-4, 'tol': 1e-2, 'verbose': True} self.OC = CNNC(ims, ws, name = 'obcnn', X = self.I2, Y = self.TV, **carg) self.OC.RestoreClasses(['C', 'O']) #Used to detect enemies self.EC = CNNC(ims, ws, name = 'encnn', X = self.I2, Y = self.TV, **carg) self.EC.RestoreClasses(['N', 'E', 'I']) #CNN for detecting movement self.MC = CNNC(ims, ws, name = 'mvcnn', X = ID, Y = self.TV, **carg) self.MC.RestoreClasses(['Y', 'N']) #Classifier for lightning-warp self.LC = CNNC(ims, ws, name = 'lwcnn', X = self.LWI, Y = self.LWTV, **carg) self.LC.RestoreClasses(['Y', 'N']) #Regressor for health-bar checker self.HR = MLPR([hmcIms, 1], name = 'hrmlp', X = self.HRI, Y = self.RTV, **carg) self.MR = MLPR([hmcIms, 1], name = 'mrmlp', X = self.MRI, Y = self.RTV, **carg) if not self.Restore(): print('Model could not be loaded.') self.TFS = self.LC.GetSes() def CreateTFGraphTrain(self): #Tensorflow 4 CNN Model #Classifier model; Architecture of the CNN ws = [('C', [3, 3, 3, 10], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('C', [3, 3, 10, 5], [1, 1, 1, 1]), ('P', [1, 4, 4, 1], [1, 2, 2, 1]), ('F', 32), ('F', 16), ('F', 2)] ims = [self.ss[1] // self.m, self.ss[0] // self.n, 3] #Images from skimage are of shape (height, width, 3) hmcIms = 30 * 100 * 3 #Number of pixels in health/mana checker images carg = {'batchSize': 40, 'learnRate': 1e-3, 'maxIter': 40, 'reg': 6e-4, 'tol': 25e-3, 'verbose': True} self.OC = CNNC(ims, ws, name = 'obcnn', **carg) self.OC.RestoreClasses(['C', 'O']) #Used to detect enemies self.EC = CNNC(ims, ws, name = 'encnn', **carg) self.EC.RestoreClasses(['N', 'E', 'I']) #CNN for detecting movement self.MC = CNNC(ims, ws, name = 'mvcnn', **carg) self.MC.RestoreClasses(['Y', 'N']) #Classifier for lightning-warp self.LC = CNNC(ims, ws, name = 'lwcnn', **carg) self.LC.RestoreClasses(['Y', 'N']) #Regressor for health and mana bar checker self.HR = MLPR([hmcIms, 1], maxIter = 0, name = 'hrmlp') self.MR = MLPR([hmcIms, 1], maxIter = 0, name = 'mrmlp') if not self.Restore(): print('Model could not be loaded.') self.TFS = self.LC.GetSes() self.DIM = LoadDataset() self.FitCModel(self.OC, {'Closed/Dried Lake':'C', 'Closed/Oasis':'C', 'Open/Dried Lake':'O', 'Open/Oasis':'O', 'Enemy/Dried Lake':'O', 'Enemy/Oasis':'O'}) self.FitCModel(self.EC, {'Open/Dried Lake':'N', 'Open/Oasis':'N', 'Enemy/Dried Lake':'Y', 'Enemy/Oasis':'Y', 'Item/Dried Lake':'N'}) self.FitCModel(self.MC, {'Move/Dried Lake':'Y', 'NoMove/Dried Lake':'N'}) #self.LC.Reinitialize() self.FitCModel(self.LC, {'LW/Dried Lake':'Y', 'LW/Oasis':'Y', 'NLW/Dried Lake':'N', 'NLW/Oasis':'N'}) self.FitRModel(self.HR, 'HR') self.FitRModel(self.MR, 'MR') self.Save() def DivideIntoSubimages(self, A): ''' Divide 1 large image into rectangular sub-images The screen is chopped into self.m rows and self.n columns ''' return A.reshape(self.m, self.cs[1], self.n, self.cs[0], 3).swapaxes(1, 2).reshape(self.m * self.n, self.cs[1], self.cs[0], 3) def EnemyPositionsToTargets(self): ''' Given past prediction, identify places to target to hit enemies. Targets are cells predicted to have enemies AND movement ''' return self.cc[self.GSC[(self.YHD & self.CM).astype(np.bool)]] def FitCModel(self, C, DM): ''' Fit a classification model and shows the accuracy C: The classifier model to fit DM: The mapping of directories to labels ''' A = np.concatenate([self.DIM[Di] for Di in DM]) Y = np.concatenate([np.repeat(Li, len(self.DIM[Di])) for Di, Li in DM.items()]) self.Train(C, A, Y, Acc) def FitRModel(self, R, D): ''' Fits a regression model and displays the MSE C: The classifier model to fit D: The directory name ''' from sklearn.linear_model import LinearRegression A = self.DIM[D] #Last column is target value lr = LinearRegression() lr.fit(A[:, :-1], A[:, [-1]]) A1 = R.W[0].assign(lr.coef_.reshape(-1, 1)) A2 = R.B[0].assign(lr.intercept_.reshape(-1)) self.TFS.run([A1, A2]) self.Train(R, A[:, :-1], A[:, [-1]], MSE) def GetCellIJ(self, k): return self.SC[k] def GetItemLocations(self): ''' Given past prediction, locates items on the screen ''' if len(self.CM) == 0: return np.array([]) ICP = self.GSC[self.CM[self.YHD == 'I']] return [(ipi[0] + self.SC[icpi][0] * self.cs[0], ipi[1] + self.SC[icpi][1] * self.cs[1]) for icpi in ICP for ipi in self.GetItemPixels(self.S[icpi])] def GetItemPixels(self, I): ''' Locates items that should be picked up on the screen ''' ws = [8, 14] D1 = np.abs(I - np.array([10.8721, 12.8995, 13.9932])).sum(axis = 2) < 15 D2 = np.abs(I - np.array([118.1302, 116.0938, 106.9063])).sum(axis = 2) < 76 R1 = view_as_windows(D1, ws, ws).sum(axis = (2, 3)) R2 = view_as_windows(D2, ws, ws).sum(axis = (2, 3)) FR = ((R1 + R2 / np.prod(ws)) >= 1.0) & (R1 > 10) & (R2 > 10) PL = np.transpose(np.nonzero(FR)) * np.array(ws) if len(PL) <= 0: return [] bc = Birch(threshold = 50, n_clusters = None) bc.fit(PL) return bc.subcluster_centers_ def IsEdgeCell(self, ci, cj): ci = np.array([[ci - 1, ci, ci + 1, ci], [cj, cj - 1, cj, cj + 1]]) if (ci < 0).any(): return True try: return (self.GCLU.reshape(self.m, self.n)[ci[0], ci[1]] == -1).any() except IndexError: return True return False def PixelToCell(self, p): ''' Determine cell into which a pixel coordinate falls (thresholds values) ''' return (np.maximum(np.minimum(p - self.sb[0:2], self.ss), 0) / self.cs)[:, ::-1].astype(np.int) def ProcessScreen(self, I1, I2): CI1 = self.DivideIntoSubimages(I1) CI2 = self.DivideIntoSubimages(I2) CNNYH = [self.OC.YHL, self.EC.YHL, self.MC.YHL, self.LC.YHL, self.HR.YH, self.MR.YH] MBIM = I2[488:, 719:749].reshape(1, -1) #Mana bar image HBIM = I2[488:, 52:82].reshape(1, -1) #Health bar image FD = {self.I1: CI1[self.GSC], self.I2: CI2[self.GSC], self.LWI: CI2[[22, 31]], self.HRI: HBIM, self.MRI: MBIM} self.YH, self.YHD, self.CM, LW, HL, ML = self.TFS.run(CNNYH, feed_dict = FD) return self.YH, self.YHD, self.CM, LW, HL, ML def Restore(self): return self.MR.RestoreModel(os.path.join('TFModel', ''), 'targsys') def Save(self): try: #Create directory if it doesn't exist os.makedirs(os.path.join('TFModel')) except OSError as e: pass self.MR.SaveModel(os.path.join('TFModel', 'targsys')) def Train(self, C, A, Y, SF): ''' Train the classifier using the sample matrix A and target matrix Y ''' C.fit(A, Y) YH = np.zeros(Y.shape, dtype = np.object) for i in np.array_split(np.arange(A.shape[0]), 32): #Split up verification into chunks to prevent out of memory YH[i] = C.predict(A[i]) s1 = SF(Y, YH) print('All:{:8.6f}'.format(s1)) '''
print(stockData2) print(scale(stockDataASC)) # to get only specific columns of data from a array use : # data[:, [1, 9]] where data is array and you want columns 1, 9 (index start from 0) # scale the stock data, volume to ease the calculations and fit within the data range # Number of neurons in the input layer # 4 neurons to indicate the candle stick doji patterns # 4 neurons to indicate the previous most tested highs which are greater than previous day closing prices # 4 neurons to indicate the previous most tested lows which are less than previous day closing prices # 5 neurons to indicate the previous day open, close, high and low prices, and volume i = 17 # Number of neurons in the output layer # 5 neurons to indicate the current day open, close, high and low prices and volume o = 5 #Number of neurons in the hidden layers h = 17 #The list of layer sizes layers = [i, h, h, h, h, h, h, h, h, h, o] mlpr = MLPR(layers, maxIter=1000, tol=0.40, reg=0.001, verbose=True) mlpr.fit() #Begin prediction yHat = mlpr.predict(A) #Plot the results mpl.plot(A, Y, c='#b0403f') mpl.plot(A, yHat, c='#5aa9ab') mpl.show()
stockData = numpy.loadtxt(path, delimiter=",", skiprows=1, usecols=(1,2,3,4)) # stockData2 = numpy.genfromtxt(path, delimiter=",", dtype=None, skip_header=1, usecols=(1, 2, 3, 4, 5)) # scale down the data and reverse the array A = scale(stockData)[::-1] # A is input data Y = A # Y is expected output #Number of neurons in the input layer i = 4 #Number of neurons in the output layer o = 4 #Number of neurons in the hidden layers h = 4 #The list of layer sizes layers = [i, h, o] mlpr = MLPR(layers, maxIter = 10000, tol = 0.0010, reg = 0.001, verbose = True) #Length of the hold-out period n = len(A) nDays = int(round(n*.3)) #Learn the data mlpr.fit(A[0: (n - nDays)], Y[1:(n - nDays + 1)]) #Begin prediction yHat = mlpr.predict(A[0: (n - 1)]) #Plot the results # mPlotLib.plot(list(range(nDays - 1)), Y[(n-nDays + 1):, (0)].reshape(-1, 1), c='#b04fff') # mPlotLib.plot(list(range(nDays - 1)), yHat[:, (0)].reshape(-1, 1), c='#000000') # # # mPlotLib.plot(A[(n-nDays): (n-1)], yHat, c='#5aa9ab') # mPlotLib.show()