def main(): #加载配置 cfg=cfgLoader.get_cfg(CFG_FILE_PATH) if cfg.read_me_mode:show_readme() #excel原始数据导入 raw_data=xlLoader.get_data(cfg) #数据转化 data=dataConverter.convert(raw_data,cfg) #输出 dataParser.parse(data,cfg)
def reParse(self): ''' To re-parse the data set is the 'reParse=True' ''' if self._check_exist(): print("It is already stored. Loading dataset...") return self._dataFrame['book_title'] = self._catalog self._dataFrame['book_id'] = self._bookId # print(self._catalog) for index, tree_list in enumerate(self._dirTree.items()): first, second = tree_list temp_book = '' print("Parsing...", self._catalog[index]) for item in second: singleFilePath = self.dirPath + first + '/' + item # parse single file, is temp temp_book = temp_book + ' ' + dataParser.parse(singleFilePath) self._dataFrame.iloc[index][1] = temp_book # this line of code, may need to be changed when it comes error. a[index][index]=value self._storeFile()
update_momentum=0.9, regression=True, # flag to indicate we're dealing with regression problem max_epochs=1500, # we want to train this many epochs verbose=1, ) #if (loadFile != ""): #net1.load_params_from(loadFile) net1.max_epochs = 50 net1.update_learning_rate = ln; return net1 generations, generationsToInputs, generationsToOutputs = dataParser.parse(fname = "whole_population_0.txt") iters = 150 saveFile = "LasagneWeights400_2Layer" trainingInputs, trainingOutputs, testInputs, testOutputs = dataParser.makeSets(generationsToInputs, generationsToOutputs, generations[0:200], 1, 0.25) ln = 0.01 X = Normalizers.gaussNormalize(trainingInputs) Xtest = Normalizers.gaussNormalize(testInputs) # Y = Normalizers.gaussNormalize(trainingOutputs) Ytest = Normalizers.gaussNormalize(testOutputs) X = np.asarray(X, np.float32) Y = np.asarray(Y, np.float32) Xtest= np.asarray(Xtest, np.float32) Ytest = np.asarray(Ytest, np.float32) net = createNet(X, Y, ln, saveFile)
def on_data(self, data): return dataParser.parse(data)