def main(): parser = Parser(True) # Tokenize the data parser.tokenize("src/europarl-v7.es-en.es") parser.tokenize("src/europarl-v7.es-en.en") parser.tokenize("src/europarl-v7.fr-en.en") parser.tokenize("src/europarl-v7.fr-en.fr") # Normalize the data parser.cleanse("data/europarl-v7.es-en.es.tok", "data/europarl-v7.es-en.en.tok") parser.cleanse("data/europarl-v7.fr-en.en.tok", "data/europarl-v7.fr-en.fr.tok") # Split data into train, tune, test sets parser.split_train_tune_test("data/europarl-v7.es-en.es.tok.cleansed", "data/europarl-v7.es-en.en.tok.cleansed", "data/europarl-v7.fr-en.en.tok.cleansed", "data/europarl-v7.fr-en.fr.tok.cleansed", .6, .2) parser.match("data/test/europarl-v7.es-en.es.tok.cleansed.test", "data/test/europarl-v7.es-en.en.tok.cleansed.test", "data/test/europarl-v7.fr-en.en.tok.cleansed.test", "data/test/europarl-v7.fr-en.fr.tok.cleansed.test") trainer = Train(True) # Build target language models trainer.build_language_models("data/train/europarl-v7.es-en.en.tok.cleansed.train") trainer.build_language_models("data/train/europarl-v7.fr-en.fr.tok.cleansed.train") # Train each leg of the translation system trainer.train("data/train/europarl-v7.es-en.es.tok.cleansed.train", "data/train/europarl-v7.es-en.en.tok.cleansed.train", "es-en.working") trainer.train("data/train/europarl-v7.fr-en.en.tok.cleansed.train", "data/train/europarl-v7.fr-en.fr.tok.cleansed.train", "en-fr.working") # Tune the system on held out data tuner = Tune(True) tuner.tune("data/tune/europarl-v7.es-en.es.tok.cleansed.tune", "data/tune/europarl-v7.es-en.en.tok.cleansed.tune", "es-en.working") tuner.tune("data/tune/europarl-v7.fr-en.en.tok.cleansed.tune", "data/tune/europarl-v7.fr-en.fr.tok.cleansed.tune", "en-fr.working") test = Test(True) # Run interactive translator server test.test_translator_interactive("es-en.working") test.test_translator_interactive("en-fr.working") # Score translation quality between pivot translations using held out test data test.test_translation_quality("data/test/europarl-v7.es-en.es.tok.cleansed.test", "data/test/europarl-v7.es-en.en.tok.cleansed.test", "es-en.working") test.test_translation_quality("data/test/europarl-v7.fr-en.en.tok.cleansed.test", "data/test/europarl-v7.fr-en.fr.tok.cleansed.test", "en-fr.working") # Run interactive translator on pivoting system test.test_pivoting_interactive("es-en.working", "en-fr.working") # Score translation quality on entire translation using matched test data test.test_pivoting_quality("data/test/europarl-v7.es-en.es.tok.cleansed.test.matched", "es-en.working", "data/test/europarl-v7.fr-en.fr.tok.cleansed.test.matched", "en-fr.working")
def main(): # Create Train instance, set verbose to True to see whats happening trainer = Train(True) # Build target language models for only the target languages. In this # scenario, the desired target languages are the pivot language in the # source to pivot leg of the translation and the target language in the # pivot to target leg of the scenario. Language models are saved in the # lm directory trainer.build_language_models("data/train/europarl-v7.es-en.en.tok.cleansed.train") trainer.build_language_models("data/train/europarl-v7.fr-en.fr.tok.cleansed.train") # Train each leg of the translation system seperately. The first # parameter must be the path to the source training data, the second # will be the path to the pivot training data, and the third is the # name for the directory which will store the system's results. trainer.train("data/train/europarl-v7.es-en.es.tok.cleansed.train", "data/train/europarl-v7.es-en.en.tok.cleansed.train", "es-en.working") trainer.train("data/train/europarl-v7.fr-en.en.tok.cleansed.train", "data/train/europarl-v7.fr-en.fr.tok.cleansed.train", "en-fr.working")
from Train import Train import json, sys, requests stdout = sys.stdout while True: trainNo = input("Enter the Train Number: ") train = '' try: train = Train(trainNo) trainData = { 'trainName': train.getName(), 'trainOrigin': train.getOrigin(), 'trainDestination': train.getDestination(), 'trainWeekDays': train.getWeekDays(), 'trainType': train.getType(), 'trainSchedule': train.getSchedule() } sys.stdout = open(trainNo+'.json', 'w') print(json.dumps(trainData)) sys.stdout = stdout except IndexError as e: print("No such Train found.") continue except requests.ConnectionError as e: print("Connection Error occurred. Looking for data in local storage..") try: f = open(trainNo+'.json','r') trainData = json.load(f) f.close() except FileNotFoundError as e: print("Internet Connection is required to fetch Train Info.")
from Train import Train t = Train() t.Train()
section[s_in["section"]].addSensor(sensor[s_in["id"]]) # Setup signals and add these to sections for s_in in data["signals"]: signal[s_in["id"]] = Signal(s_in["id"], s_in["section"], s_in["placement"], s_in["aspects"], redis, "signal_action") try: s_in["color"] != "" signal[s_in["id"]].setColor(s_in["color"]) except: signal[s_in["id"]].setColor("red") section[s_in["section"]].addSignal(signal[s_in["id"]]) # Setup train locations, adding sections as references for t_in in data["trains"]: train[t_in["id"]] = Train(t_in["id"], t_in["sensor_count"], t_in["speeds"], redis, "traincon") for sec_in in t_in["sections"]: train[t_in["id"]].addSection(section[sec_in]) section[sec_in].setTrain(train[t_in["id"]]) ## TRAINING / DEBUG CODE ## Performs some very basic operations ## logging.debug(section[1].getCurrentDirection()); ## Get Train #3 #t_3 = train[3]; ## Get the sections that Train #3 is currently in #tsections = t_3.getSections(); #for section in tsections: ## Get the signals in this section # logging.debug("Current Section = " + str(section.getId()) + " direction = " + section.getCurrentDirection()) # nextSection = section.getNextSection()
class Bayes: def __init__(self,conf={}): self.data=conf.get("BAYES_DATA",None) if not self.data: self.data = Train(conf = conf) self.data.train() self.conf=conf def posterior(self,prior = None,likelihood = None): prior = prior or self.prior() likelihood = likelihood or self.likelihood() posterior = {} for strategicName in likelihood: for statisticsName in likelihood[strategicName]: if not strategicName in posterior: posterior[strategicName] = {} posterior[strategicName][statisticsName] = prior[statisticsName] * likelihood[strategicName][statisticsName] return posterior @classmethod def combining(cls,*posteriors): """ Combining Probabilities P1 * P2 ... P15 P = --------------------------------------- P1 * P2 ... P15 + (1-P1)(1-P2)...(1-P15) >>> P1 = 0.9 >>> P2 = 0.8 >>> P3 = 0.7 >>> Bayes.combining(P1,P2,P3) 0.9882352941176471 """ pm=1 pcm=1 if len(posteriors)==1 and type(posteriors[0]) ==list: posteriors = posteriors[0] if posteriors: for posterior in posteriors: pm = pm * float(posterior) pcm = pcm * float(1-posterior) result = pm / (pm + pcm) return result def prior(self): """ Prior Probability """ return self.data['prior'] def likelihood(self): """ Likelihood """ return self.data['likelihood'] def likelihood2(self,priorCount,pbaCount,strategicCount,total): """ 计算可能性因子。 设B为策略模型,A为统计事件 P(B|A) 指统计事件中符合策略的概率 ,如上涨%5的股票中属于策略A的概率 P(B|A) = len(B union A)/len(A) len(A) = priorCount len(B^A) = pbaCount P(B|A')指非统计事件中符合策略的概率,如没有上涨%5的股票中属于策略A的概率 A'指 complement A P(B|A') = (len(B)-len(B union A)) / (total - len(A)) len(B) = len(strategic) likelihood 可能性因子 P(B|A) likelihood = ------------------------------- P(B|A) * P(A) + P(B|A') * P(A') >> testBayesTrain.initTestCalculatelikelihood(train) >> train.calculateLikelihood() >> train.likelihood2(priorCount,pbaCount) >> train.likelihood2() == testBayesTrain.result['calculateLikelihood']['likelihood'] True @param pbaCount a发生的同时b发生的次数统计 @param priorCount 整体发生的次数统计 @param strategicCount 所有策略发生的统计次数 @param total 所有数据次数统计 count { "priorCount":{ "statistics1" }, "strategicCount":{ "strategic": }, "total": } """ likelihood = {} for strategicName in pbaCount: for statisticsName in pbaCount[strategicName]: if not strategicName in likelihood: likelihood[strategicName]={} if priorCount[statisticsName]: subPbaCount = float(pbaCount[strategicName][statisticsName]) pba = float(subPbaCount/priorCount[statisticsName]) pbca=float(strategicCount[strategicName] - subPbaCount) \ / float(total - priorCount[statisticsName]) prior = float(priorCount[statisticsName])/total likelihood=pba/float(pba * prior + pbca *( 1 - prior)) else: likelihood = 0 likelihood[strategicName][statisticsName] = likelihood return likelihood
def initializeRL(self, filename): f = open(self.config.workDir + 'Logs/' + filename + '.csv', 'w', 0) f.write( 'Iteration,AvgMDPReward,AvgEventReward,AvgSystemReward,KMDPReward,KEventReward,KSystemReward,Time\n' ) b = open(self.config.workDir + 'Logs/Baseline_' + filename + '.csv', 'w', 0) b.write('Iteration,') for j in xrange(0, self.num_agents): b.write('Agent_' + str(j) + '_Initial Loss,' + 'Agent_' + str(j) + '_Final Loss, ,') b.write('\n') pol = open(self.config.workDir + 'Logs/Policy_' + filename + '.csv', 'w', 0) pol.write('Iteration,') for j in xrange(0, self.num_agents): pol.write('Agent_' + str(j) + '_Initial Loss,' + 'Agent_' + str(j) + '_Final Loss, ,') pol.write('\n') trains = [] event_maps = [] test_maps = [] files = [] placeholders = [] for i in xrange(0, self.num_agents): train_instance = Train(i, self.config) event_maps.append(train_instance.eventMap) trains.append(train_instance) g_1 = tf.Graph() with g_1.as_default(): for i in xrange(0, self.num_agents): with tf.device('/device:CPU:1' + str(i)): pl = PlaceholderClass(agent=i, c=self.config, t=trains[i]) pl = pl.computationalGraphs() trains[i].policy = pl.policy trains[i].baseline = pl.baseline assert pl.t.policy != None assert pl.t.baseline != None placeholders.append(pl) with tf.Session(graph=g_1, config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=False)) as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() if self.config.runWithSavedModel: saver.restore(sess, "../models/model_" + filename + ".ckpt") print("Model restored.") avg_arr = [] ev_avg_arr = [] mdp_avg_arr = [] p = Pool(processes=(self.num_agents + 2)) stopwatch = Stopwatch() orig_base_loss = [ placeholders[j].loss_baseline for j in xrange(0, self.num_agents) ] learn_baseline = [ placeholders[j].learning_step_baseline for j in xrange(0, self.num_agents) ] new_base_loss = [ placeholders[j].loss_baseline for j in xrange(0, self.num_agents) ] orig_policy_loss = [ placeholders[j].loss for j in xrange(0, self.num_agents) ] learn_policy = [ placeholders[j].learning_step for j in xrange(0, self.num_agents) ] new_policy_loss = [ placeholders[j].loss for j in xrange(0, self.num_agents) ] writer = tf.summary.FileWriter('../tflogs/' + filename, sess.graph) for curr_iter in xrange(1, self.config.numIterations + 1): # print "---------Current Iteration----------: ", curr_iter event_maps = [] test_maps = [] res = p.amap(self._instance_method_alias_run_iteration, trains, [sess] * self.num_agents) trains = res.get() for j in xrange(0, self.num_agents): event_maps.append(trains[j].eventMap) res = p.amap(self._instance_method_alias_update_eventArrays, trains, [event_maps] * self.num_agents) trains = res.get() feeding_dicts = [] baseline_dicts = [] test_maps = [] for j in xrange(0, self.num_agents): feeding_d, baseline_d, testMap_d = trains[ j].giveMeEverything(placeholders[j]) feeding_dicts.append(feeding_d) baseline_dicts.append(baseline_d) test_maps.append(testMap_d) assert self.num_agents >= 2 merged_dicts_baseline = self.merge_two_dicts( baseline_dicts[0], baseline_dicts[1]) merged_dicts_feeding = self.merge_two_dicts( feeding_dicts[0], feeding_dicts[1]) for j in xrange(2, self.num_agents): merged_dicts_baseline = self.merge_two_dicts( merged_dicts_baseline, baseline_dicts[j]) merged_dicts_feeding = self.merge_two_dicts( merged_dicts_feeding, feeding_dicts[j]) #----------------------- # Baseline training #----------------------- for j in xrange(0, self.num_agents): # Set training mode to False for evaluating loss. merged_dicts_baseline[ placeholders[j].train_mode_baseline] = False # print "Original Baseline Loss: ", orig_base_loss_vals = sess.run(orig_base_loss, feed_dict=merged_dicts_baseline) # print orig_base_loss_vals for j in xrange(0, self.num_agents): # Set training mode to True for learning step. merged_dicts_baseline[ placeholders[j].train_mode_baseline] = True sess.run(learn_baseline, feed_dict=merged_dicts_baseline) for j in xrange(0, self.num_agents): # Set training mode to False for evaluating loss after training. merged_dicts_baseline[ placeholders[j].train_mode_baseline] = False # print "New Baseline Loss: ", new_base_loss_vals = sess.run(new_base_loss, feed_dict=merged_dicts_baseline) # print new_base_loss_vals #----------------------- # Policy training #----------------------- for j in xrange(0, self.num_agents): # Set training mode to False for evaluating loss. merged_dicts_feeding[ placeholders[j].policy.train_mode] = False orig_policy_loss_vals = sess.run( orig_policy_loss, feed_dict=merged_dicts_feeding) # print "Original Policy Loss: ", orig_policy_loss_vals for j in xrange(0, self.num_agents): # Set training mode to True for learning step. merged_dicts_feeding[ placeholders[j].policy.train_mode] = True sess.run(learn_policy, feed_dict=merged_dicts_feeding) for j in xrange(0, self.num_agents): # Set training mode to False for evaluating loss after training. merged_dicts_feeding[ placeholders[j].policy.train_mode] = False new_policy_loss_vals = sess.run(new_policy_loss, feed_dict=merged_dicts_feeding) # print "New Policy Loss: ", new_policy_loss_vals mdp_avg_val, event_avg_val, system_avg_val, all_mdp_values = self.getAvgRewardFromEvents( test_maps) # print '\tIT:', curr_iter, ' Individual Average MDP Return:', all_mdp_values # print '\tIT:', curr_iter, ' Average MDP Return:', mdp_avg_val # print '\tIT:', curr_iter, ' Average Event Return:', event_avg_val # print '\tIT:', curr_iter, ' Average Return:', system_avg_val mdp_avg_arr.append(mdp_avg_val) mdp_avg_k_val = np.mean(np.array(mdp_avg_arr[-100:])) # print '\tLast K iter MDP avg ', mdp_avg_k_val, '\n' ev_avg_arr.append(event_avg_val) ev_avg_k_val = np.mean(np.array(ev_avg_arr[-100:])) # print '\tLast K iter Event avg ', ev_avg_k_val, '\n' avg_arr.append(system_avg_val) avg_arr_k_val = np.mean(np.array(avg_arr[-100:])) # print '\tLast K iter System avg ', avg_arr_k_val, '\n' elapTime = stopwatch.elapsedTime() # print '\tElapsed Time: ', elapTime avg_time_per_iter = elapTime / curr_iter # print '\tAverage Time per Iteration: ', avg_time_per_iter if curr_iter % self.config.savingThreshold == 0: save_path = saver.save( sess, "../models/model_" + filename + ".ckpt") print("Model saved in path: %s" % save_path) if curr_iter >= self.config.loggingThreshold: summary = tf.Summary() summary.value.add(tag='Average/mdp_avg', simple_value=mdp_avg_val) summary.value.add(tag='Average/event_avg', simple_value=event_avg_val) summary.value.add(tag='Average/system_avg', simple_value=system_avg_val) summary.value.add(tag='KAverage/mdp_avg_k', simple_value=mdp_avg_k_val) summary.value.add(tag='KAverage/event_avg_k', simple_value=ev_avg_k_val) summary.value.add(tag='KAverage/system_avg_k', simple_value=avg_arr_k_val) summary.value.add(tag='Timing/elapsed_time_var', simple_value=elapTime) summary.value.add(tag='Timing/avg_time_per_iteration', simple_value=avg_time_per_iter) writer.add_summary(summary, curr_iter) writer.flush() f.write( str(curr_iter) + ',' + str(mdp_avg_val) + ',' + str(event_avg_val) + ',' + str(system_avg_val) + ',') f.write(str(mdp_avg_k_val) + ',') f.write(str(ev_avg_k_val) + ',') f.write(str(avg_arr_k_val) + ',') f.write(str(elapTime) + '\n') b.write(str(curr_iter) + ",") pol.write(str(curr_iter) + ",") for j in xrange(0, self.num_agents): b.write( str(orig_base_loss_vals[j]) + "," + str(new_base_loss_vals[j]) + ", ,") pol.write( str(orig_policy_loss_vals[j]) + "," + str(new_policy_loss_vals[j]) + ", ,") b.write("\n") pol.write("\n") p.close() f.close() b.close() pol.close()
def setup(self): # предустановки игры # препятствия self.blocks = arcade.SpriteList(use_spatial_hash=True) for block in sp_coordinates_obstacles: number = randint(1, 2) self.blocks.append( arcade.Sprite("images/obstacles/2-%d.png" % number, 1.5, center_x=self.permission[0] * block[0], center_y=self.permission[1] * block[1], hit_box_algorithm="Detailed")) #путь поезда self.paint_reils_way_flag = False self.way_file = 'train_way.txt' self.way_list = [] self.read_train_way() #поезд self.train = Train(0, 150, 12) #поле self.background = arcade.load_texture("images/земля.png") #персонаж self.people_list = arcade.SpriteList() self.player_sprite = Player() self.player_sprite.center_x = 400 self.player_sprite.center_y = 50 self.people_list.append(self.player_sprite) #охрана self.guards_list = arcade.SpriteList() for i in range(len(sp_coordinates_guards)): x, y = sp_coordinates_guards[i] self.guards_sprite = Guard(x, y) self.guards_sprite.barrier_list = arcade.AStarBarrierList( self.guards_sprite, self.blocks, 20, 0, window.get_size()[0], 0, window.get_size()[1]) self.guards_list.append(self.guards_sprite) self.people_list.append(self.guards_sprite) #пули self.bullet_list = arcade.SpriteList() hero = self.player_sprite self.mouse_pos = { 'x': hero.center_x, 'y': hero.center_y, 'dx': 0, 'dy': 0, 'button': 0 } #задаём координаты мышки #общий список self.all_sprites = arcade.SpriteList() self.all_sprites.extend(self.people_list) self.all_sprites.extend(self.bullet_list) self.train.append_all(self.all_sprites) #время self.time = 0 self.start_time = time.time() self.time_for_collision = time.time() #для работы перезарядки self.start_recharge = self.time #эта переменная для постоянной смены хитбоксов # 0 - нижняя четверть спрайта # 1 - весь спрайт self.index = 0
def setUp(self): self.train = Train(1, 2, 'tA') self.mockStation = MockStation() self.cargo = Cargo(self.mockStation)
__author__ = 'panjinbo' import sys import cPickle from PreProcess import PreProcess from Train import Train if __name__ == "__main__": new_filename = sys.argv[2] PreProcess(sys.argv[1]).ToCleanText(new_filename) nmf = Train(new_filename) nmf.train(int(sys.argv[3])) nmf.show_result(int(sys.argv[4])) cPickle.dump(nmf, open('model', 'wb'))
"********************************* LOAD PARAMETERS *********************************" ) # load pre-trained parameters load_dir = "./Results_dir/2020-10-09-14-42-10000" policy.load_parameters(load_dir) value.load_parameters(load_dir) if TRAIN_FLAG == 1: print_iters = 10 print( "********************************** START TRAINING **********************************" ) print("************************** PRINT LOSS EVERY " + str(print_iters) + "iterations ***************************") # train the network by policy iteration train = Train() # train.agent_batch = vehicleDynamics.initialize_state() if LOAD_PARA_FLAG == 1: train.agent_batch = torch.load( os.path.join(load_dir, 'agent_buffer.pth')) train.setInitState() else: # train.agent_batch = vehicleDynamics.initialize_state() train.initialize_state() while True: train.update_state(policy, vehicleDynamics) value_loss = train.policy_evaluation(policy, value, vehicleDynamics) policy_loss = train.policy_improvement(policy, value) writer.add_scalar("policy_loss", policy_loss,
class TrainTest(unittest.TestCase): def setUp(self): self.train = Train(1, 2, 'tA') self.mockStation = MockStation() self.cargo = Cargo(self.mockStation) def testSpeed(self): self.assertEqual(1, self.train.speed, 'train has speed') def testCapacity(self): self.assertEqual(2, self.train.available_capacity, 'train has capacity') def testId(self): self.assertEqual('tA', self.train.name, 'train has id') def testTravelTime(self): startTime = time.time() self.train.travel() self.assertGreaterEqual(time.time() - startTime, float(3/self.train.speed)) def testLoadCargoLoadingTime(self): startTime = time.time() self.train.load_cargo(self.cargo) self.assertGreaterEqual(time.time() - startTime, 3) def testLoadCargoAdditionToTrainCargo(self): self.train.load_cargo(self.cargo) self.assertEqual(1, self.train.num_of_train_cargo()) def testUnloadCargoUnloadingTime(self): self.train.load_cargo(self.cargo) startTime = time.time() self.train.unload_cargo(self.mockStation) self.assertGreaterEqual(time.time() - startTime, 3) def testUnlodCargoRemoveFromTrainCargo(self): self.train.load_cargo(self.cargo) self.train.unload_cargo(self.mockStation) self.assertEqual(0, self.train.num_of_train_cargo()) def testUnlodCargoMultipleCargoDestinations(self): new_station = MockStation() new_cargo = Cargo(new_station) self.train.load_cargo(self.cargo) self.train.load_cargo(new_cargo) self.train.unload_cargo(self.mockStation) self.assertEqual(1, self.train.num_of_train_cargo())
import argparse import sys def create_parser(): parser = argparse.ArgumentParser() parser.add_argument('-file_path', '--file_path', default='src/cheys.txt') parser.add_argument('-model', '--model', default='bigrams') return parser if __name__ == '__main__': parser = create_parser() arg = parser.parse_args(sys.argv[1:]) print(arg) if arg.model == 'bigrams': model = Train(arg.file_path).get_model() for i in range(5): text = Generate(model).generate_text() print(text) elif arg.model == 'probabilistic_model': # создаем и обучаем модель model = Train('src/cheys.txt').auto_refactoring_for_bigrams() # выводим текст for i in range(5): text = Generate(model).get_text_from_second_model() print(text) else: print('ошибка')
def setNewTrain(self): if self.train.getNextTime() == 0: self.train = Train(self.position, self, random.randint(1, self.total), random.randint(1, self.total))
#!/usr/local/bin/python3 import sys, os sys.path.append(os.path.join(os.path.dirname(sys.path[0]))) from Train import Train from Protocol import Message, MsgTypes pos0 = (0, 0) tr = Train(1, pos0, "none.txt", log=True) msg = Message(MsgTypes.req, sender=-1, pickup='Point 1', dropoff='Point 2') msgStr = msg.encode() msg2 = Message(MsgTypes.elec, sender=3, distance=10, client=-1) msg2Str = msg2.encode() msg3 = Message(MsgTypes.elec_ack, sender=2, client=-1, receiver=3) msg3Str = msg3.encode() msg4 = Message(MsgTypes.elec_ack, sender=2, client=-1, receiver=1) msg4Str = msg4.encode() # print "Initial buffer: %s" % tr.messageBuffer tr.receive_message(msgStr) # print "After receiveing first message: %s" % tr.messageBuffer tr.step() tr.step() tr.receive_message(msg2Str) tr.step() tr.receive_message(msg3Str) tr.step() tr.step() tr.step()
name = os.path.basename(__file__).split('.')[0] data_pth = '/home/hxianglong' + '/Data/cifar-100-python-cnn-features-transform-order-no-neg' batch_size = 256 * factor epochs = 100 num_net = 10 / f # train_net = 10 / f train_net = 10 out_cls = 10 * f train_pr_net = 5000 * f test_pr_net = 1000 * f print_freq = 5 val_freq = 20 a = 10e1 b = 10e1 c = 10e2 lr = 7e-2 decay = -0.5 para_ls = [(7e-2, -0.5), (7e-2, -0.4), (7e-2, -0.6), (7e-2, -0.7), (7e-2, -0.8), (5e-2, -0.55), (4e-2, -0.55), (4e-2, -0.45)] # para_ls = [(1e-2, -0.4), (1e-2, -0.6), (1e-2, -0.7), (7e-2, -0.8), (5e-2, -0.55), (4e-2, -0.55), (4e-2, -0.45)] train = Train(name=name, data_pth=data_pth, batch_size=batch_size, epochs=epochs, num_net=num_net, train_net=train_net, test_pr_net=test_pr_net, train_pr_net=train_pr_net, out_cls=out_cls, print_freq=print_freq, para_ls=para_ls, a=a, b=b, c=c, learning_rate=lr, decay=decay) try: train.train(val_freq=val_freq, dir_txt=currentdir+'/') except: e = traceback.format_exc() traceback.print_exc() info = 'ERROR in your code!!!\n\n %s'%e mail(subject=name, info=info)
(400, 900), ] for i in range(numStations): stations.append(Station(t_pos[i], screen)) routes = [] t_stations = [[0, 1, 2, 3], [1, 2, 4, 5]] t_circular = [True, False] t_colors = [(0, 0, 255), (255, 0, 0)] for i in range(numRoutes := 2): s = [stations[x] for x in t_stations[i]] routes.append(Route(s, t_circular[i], screen, t_colors[i])) trains = [] for i in range(numTrains): trains.append(Train(routes[i], screen)) return stations, routes, trains pygame.init() bg_image = pygame.image.load("bg.png") screen = pygame.display.set_mode((1000, 1000)) pygame.display.set_caption("minimetro") screen.blit(bg_image, (0, 0)) stations, routes, trains = setup(6, 2, screen) running = True
# sys.path.insert(0,parentdir) from Train import Train factor = 1 f = 5 name = 'test50-150-nc' data_pth = '/home/hxianglong' + '/Data/cifar-100-python-cnn-features-transform-order-no-neg' batch_size = 256 * factor epochs = 150 num_net = 10 / f train_net = 10 / f out_cls = 10 * f train_pr_net = 5000 * f test_pr_net = 1000 * f print_freq = 5 b = 0 c = 0 train = Train(name, data_pth, batch_size, epochs, num_net, train_net, test_pr_net, train_pr_net, out_cls, print_freq=print_freq, b=b, c=c) train.train(out_cls=out_cls)
from Train import Train t = Train() # t.train_1_class() # t.train_n_classes() # t.train()
# -*- coding: utf-8 -*- import os, sys, inspect # currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) # parentdir = os.path.dirname(currentdir) # sys.path.insert(0,parentdir) from Train import Train factor = 1 name = 'test-150-0.45' data_pth = '/home/hxianglong' + '/Data/cifar-100-python-cnn-features-transform-order-no-neg' batch_size = 256 * factor epochs = 150 num_net = 10 train_net = 10 train_pr_net = 5000 test_pr_net = 1000 print_freq = 5 decay = -0.45 train = Train(name, data_pth, batch_size, epochs, num_net, train_net, test_pr_net, train_pr_net, print_freq=print_freq, decay=decay) train.train()
def doGetBestAction(self, _data, _state_list): return np.argmax(_data, 1).tolist() class Shared(Chain): def __init__(self): super(Shared, self).__init__( linear=L.Linear(1, 5) ) def __call__(self, _x, _is_train): y = self.linear(_x) return y class Head(Chain): def __init__(self): super(Head, self).__init__( linear=L.Linear(5, 2) ) def __call__(self, _x, _is_train): y = self.linear(_x) return y agent = Agent(Shared, Head, DemoEnv(), Replay()) train = Train(agent) train.run()
class GameView(arcade.View): #игра def __init__(self): super().__init__() self.field = window.get_size() self.background = None self.player_sprite = None self.people_list = None self.guards_sprite = None self.blocks = None self.land = None self.recharge = False self.textures = [] self.permission = window.get_size() def setup(self): # предустановки игры # препятствия self.blocks = arcade.SpriteList(use_spatial_hash=True) for block in sp_coordinates_obstacles: number = randint(1, 2) self.blocks.append( arcade.Sprite("images/obstacles/2-%d.png" % number, 1.5, center_x=self.permission[0] * block[0], center_y=self.permission[1] * block[1], hit_box_algorithm="Detailed")) #путь поезда self.paint_reils_way_flag = False self.way_file = 'train_way.txt' self.way_list = [] self.read_train_way() #поезд self.train = Train(0, 150, 12) #поле self.background = arcade.load_texture("images/земля.png") #персонаж self.people_list = arcade.SpriteList() self.player_sprite = Player() self.player_sprite.center_x = 400 self.player_sprite.center_y = 50 self.people_list.append(self.player_sprite) #охрана self.guards_list = arcade.SpriteList() for i in range(len(sp_coordinates_guards)): x, y = sp_coordinates_guards[i] self.guards_sprite = Guard(x, y) self.guards_sprite.barrier_list = arcade.AStarBarrierList( self.guards_sprite, self.blocks, 20, 0, window.get_size()[0], 0, window.get_size()[1]) self.guards_list.append(self.guards_sprite) self.people_list.append(self.guards_sprite) #пули self.bullet_list = arcade.SpriteList() hero = self.player_sprite self.mouse_pos = { 'x': hero.center_x, 'y': hero.center_y, 'dx': 0, 'dy': 0, 'button': 0 } #задаём координаты мышки #общий список self.all_sprites = arcade.SpriteList() self.all_sprites.extend(self.people_list) self.all_sprites.extend(self.bullet_list) self.train.append_all(self.all_sprites) #время self.time = 0 self.start_time = time.time() self.time_for_collision = time.time() #для работы перезарядки self.start_recharge = self.time #эта переменная для постоянной смены хитбоксов # 0 - нижняя четверть спрайта # 1 - весь спрайт self.index = 0 def paint_reils_way( self, update=False, end=False ): #длинная функция создания пути поезда - мы рисуем примерный путь поезда if update: list1 = self.way_list if len(list1) == 0: self.way_list.append( [self.mouse_pos['x'], self.mouse_pos['y']]) delta_x = (self.mouse_pos['x'] - list1[-1][0]) delta_y = (self.mouse_pos['y'] - list1[-1][1]) if sqrt((delta_x)**2 + (delta_y)**2) > 25: angle = atan2(delta_y, delta_x) self.way_list.append([ 25 * cos(angle) + list1[-1][0], 25 * sin(angle) + list1[-1][1] ]) if end: list_map = [] file = open(self.way_file, 'w') for x, y in self.way_list: coord = (12.5 + 25 * int(x / 25), 12.5 + 25 * int(y / 25)) if coord not in list_map: if len(list_map) > 1: if abs(list_map[-2][0] - coord[0]) == 25 and abs(list_map[-2][1] - coord[1]) == 25: print( F'между {list_map[-2]} {coord} del list_map[-1]' ) del list_map[-1] list_map.append(coord) for coord in list_map: file.write(F'{coord[0]} {coord[1]}\n') file.close() print('Завершено создание карты') list_map.clear() self.way_list = [] def read_train_way(self): # читает из файла путь поезда try: way_file = open(self.way_file, 'r') for line in way_file: self.way_list.append(list(map(float, line.split()))) print('good read') except: print('ошибка чтения') def on_draw(self): #рисуем!)) arcade.start_render() # эта команда начинает процесс рисовки #отрисовка пути поезда arcade.draw_line_strip(self.way_list, (255, 0, 0)) #отрисовка всего что есть от нижних слоёв к верхним arcade.draw_lrwh_rectangle_textured(0, 0, window.get_size()[0], window.get_size()[1], self.background) self.bullet_list.draw() self.people_list.draw() if window.get_size() != self.permission: self.permission = window.get_size() for i in range(len(self.blocks)): self.blocks[i].center_x = self.permission[ 0] * sp_coordinates_obstacles[i][0] self.blocks[i].center_y = self.permission[ 1] * sp_coordinates_obstacles[i][1] self.blocks.draw() else: self.blocks.draw() #for i in range(1, len(self.way_list)): # arcade.draw_line(self.way_list[i-1][0], self.way_list[i-1][1], self.way_list[i][0], self.way_list[i][1], (255, 0, 255), 2) #отрисовка времени и кол-ва пуль в обойме. Позже запихну это в отдельный класс Интерфейса arcade.draw_text( F'{int(self.time-self.start_time)//60}:{int(self.time-self.start_time)%60}', SCREEN_WIDTH - 100, 20, arcade.color.WHITE, 16) arcade.draw_text(F':{self.player_sprite.bullet_now}', 40, 20, arcade.color.WHITE, 16) #отображение жизней hero = self.player_sprite draw_hp(hero.center_x, hero.center_y, hero._height, hero.max_hp, hero.hp) for guard in self.people_list: draw_hp(guard.center_x, guard.center_y, guard._height, guard.max_hp, guard.hp) for thing in self.all_sprites: thing.draw_hit_box() #отображение пути до гг for guard in self.guards_list: if guard.path: arcade.draw_line_strip(guard.path, arcade.color.BLUE, 2) self.train.draw_all() def shot(self, shooter_sprite): #функция для стрельбы if shooter_sprite == self.player_sprite: hero = shooter_sprite if hero.bullet_now > 0: self.recharge = False #отмена перезарядки при выстреле hero.bullet_now -= 1 one_bullet = Bullet(shooter_sprite, { 'x': hero.center_x, 'y': hero.center_y }, self.mouse_pos) self.bullet_list.append(one_bullet) self.all_sprites.append(one_bullet) elif hero.bullet_now == 0: #перезарядка при выстреле с пустым магазином self.start_recharge = self.time self.recharge = True else: pass def recharge_move(self): #перезарядка time_recharge = 1.0 #время зарядки одной пули if self.time - self.start_recharge > time_recharge + 0.03: self.start_recharge = self.time if self.player_sprite.bullet_now < 6 and self.recharge: if self.time - self.start_recharge >= time_recharge: self.player_sprite.bullet_now += 1 self.start_recharge = self.time else: self.recharge = False def collision(self): # коллизии #коллизии между людьми (взаимное отталкивание) для index = 0 if self.index == 0: for people1 in self.all_sprites: # Временно стоят все спрайты. Далее заменить на people_list people_with_people = arcade.check_for_collision_with_list( people1, self.people_list) for people2 in people_with_people: rad = atan2(people1.center_y - people2.center_y, people1.center_x - people2.center_x) people1.hitbox_y = people1.center_y - 3 * people1.height // 8 people2.hitbox_y = people2.center_y - 3 * people2.height // 8 # отталкивание по горизонтали и вертикали раздельно. По горизонтали, при нецентральном ударе # может происходить смещение по горизонтальи до пракращения взаимодействия if abs(people1.hitbox_y - people2.hitbox_y ) >= abs(people1.height - people2.height) // 2: people2.center_y += -sin(rad) / abs( sin(rad)) * people1.speed people1.center_y += sin(rad) / abs( sin(rad)) * people2.speed elif abs(people1.center_x - people2.center_x ) >= abs(people1.width - people1.width) // 2: people2.center_x += -cos(rad) / abs( cos(rad)) * people1.speed people1.center_x += cos(rad) / abs( cos(rad)) * people2.speed if self.index == 1: # проверка взаимодейсвия пуль и охраны для полных спрайтов index = 1 # исчезает при столкновении. Если столкновение с живым спрайтом, то отномает жизни for bullet in self.bullet_list: all_shot_list = arcade.check_for_collision_with_list( bullet, self.all_sprites) guards_shot_list = arcade.check_for_collision_with_list( bullet, self.people_list) for item in all_shot_list: if item != bullet.shooter_sprite: self.bullet_list.remove(bullet) self.all_sprites.remove(bullet) if item in guards_shot_list: guard = item guard.hp -= bullet.atk if guard.hp < 1: self.people_list.remove(guard) self.all_sprites.remove(guard) break if bullet.center_x > SCREEN_WIDTH + 10 or bullet.center_x < -10 or \ bullet.center_y > SCREEN_HEIGHT + 10 or bullet.center_y < -10: self.bullet_list.remove(bullet) self.all_sprites.remove(bullet) def on_update(self, delta_time): # рабочий цикл self.train.update() self.people_list.update() for guard in self.guards_list: distant = int( sqrt((guard.center_x - self.player_sprite.center_x)**2 + (guard.center_y - self.player_sprite.center_y)**2)) if (distant > 100 and distant < 150) and arcade.has_line_of_sight( guard.position, self.player_sprite.position, self.blocks, 200): guard.path = arcade.astar_calculate_path( guard.position, self.player_sprite.position, guard.barrier_list, diagonal_movement=False) else: guard.path = [] #self.player_sprite.update_angle(self.mouse_pos) #передаём координаты мыши персу если требуется # выстрел по нажитию левой кнопки мыши if self.mouse_pos['button'] == 1: self.shot(self.player_sprite) self.bullet_list.update() if self.paint_reils_way_flag: self.paint_reils_way(update=True) # собственно коллизии self.collision() # перезарядка self.recharge_move() self.time = time.time() self.mouse_pos['button'] = 0 # смена хитбокса раз в n*0.016 секунд, где n - колличество циклов системы if time.time() - self.time_for_collision >= 0.048: self.index = (self.index + 1) % 2 for thing in self.people_list: change_hit_box(thing, self.index) self.time_for_collision = time.time() def on_key_press( self, key, modifiers): #передвижение перса wsad или стрелочками при нажатии if key == arcade.key.UP or key == arcade.key.W: self.player_sprite.y_sp.append(1) if key == arcade.key.DOWN or key == arcade.key.S: self.player_sprite.y_sp.append(-1) if key == arcade.key.LEFT or key == arcade.key.A: self.player_sprite.x_sp.append(-1) if key == arcade.key.RIGHT or key == arcade.key.D: self.player_sprite.x_sp.append(1) if key == arcade.key.M: self.paint_reils_way_flag = True self.way_list = [] if key == arcade.key.SPACE: game_over_view = MenuView() self.window.show_view(game_over_view) def on_key_release(self, key, modifiers): #обработка, если клавишу отпустили player = self.player_sprite if key == arcade.key.UP or key == arcade.key.W: del player.y_sp[player.y_sp.index(1)] if key == arcade.key.DOWN or key == arcade.key.S: del player.y_sp[player.y_sp.index(-1)] if key == arcade.key.LEFT or key == arcade.key.A: del player.x_sp[player.x_sp.index(-1)] if key == arcade.key.RIGHT or key == arcade.key.D: del player.x_sp[player.x_sp.index(1)] if key == arcade.key.M: self.paint_reils_way_flag = False self.paint_reils_way(end=True) if key == arcade.key.R: self.start_recharge = self.time self.recharge = True def on_mouse_motion( self, x, y, dx, dy): #если мышка двигается, то запоминаем её координаты self.mouse_pos = {'x': x, 'y': y, 'dx': dx, 'dy': dy, 'button': 0} def on_mouse_press( self, x, y, button, modifiers): # считывает нажатие кнопок мыши. стрелям при нажатии self.mouse_pos['button'] = button
train_net = 10 out_cls = 10 * f train_pr_net = 5000 * f test_pr_net = 1000 * f print_freq = 5 val_freq = 100 a = 10e1 b = 10e1 c = 10e2 lr_func = 1 # para_ls = [(7e-2, -0.55), (7e-2, -0.54), (7e-2, -0.56), (7e-2, -0.525), (7e-2, -0.5), (7e-2, -0.575), (7e-2, -0.6), (6e-2, -0.55), (6e-2, -0.525), (6e-2, -0.5), (6e-2, -0.575), (6e-2, -0.6)] # para_ls = [(7e-2, -0.54), (7e-2, -0.56), (7e-2, -0.525), (7e-2, -0.5), (7e-2, -0.575), (7e-2, -0.6)] # para_ls = [(1.5e-2, 0.1), (1.25e-2, 0.1), (1.75e-2, 0.1), (2e-2, 0.1)] para_ls = [(1e-3, 0.1), (1.25e-3, 0.1), (1.5e-3, 0.1), (1.75e-2, 0.1), (2e-2, 0.1)] # para_ls = [(1e-2, -0.4), (1e-2, -0.6), (1e-2, -0.7), (7e-2, -0.8), (5e-2, -0.55), (4e-2, -0.55), (4e-2, -0.45)] model_pth = parentdir + '/runs/test-150/model_best_class_9.pth.tar' tp1 = 50 net_ls = [8, 9, 3, 7] train = Train(name=name, data_pth=data_pth, batch_size=batch_size, epochs=epochs, num_net=num_net, train_net=train_net, test_pr_net=test_pr_net, train_pr_net=train_pr_net, out_cls=out_cls, print_freq=print_freq, para_ls=para_ls, a=a, b=b, c=c, lr_func = lr_func, tp1=tp1) try: # train.train(val_freq=val_freq, dir_txt=currentdir+'/') train.resume(pth=model_pth, net_ls=net_ls, dir_txt=currentdir + '/', val_freq=val_freq) except: e = traceback.format_exc() traceback.print_exc() info = 'ERROR in your code!!!\n\n %s'%e mail(subject=name + ' Exception', info=info)
sim = Simulation() net = Network(sim, log=False) sim.clientRange = int(map_size * client_range) sim.trainRange = 3 * sim.clientRange # ------------------------------ # Creating train objects nTrains = args.number_of_trains v_step = args.step_speed for i in range(nTrains): pos = vert_pos[ randint(0,nVertices-1) ] tr = Train(i, pos, v_step, mapPath, availability, net, log=True) sim.devices += [tr] # ------------------------------ # Creating initial client object nClients = 0 currCli = 0.5 clientList = [] init = randint(0, len(stoppingPointsPos) - 1) fin = randint(0, len(stoppingPointsPos) - 1) if fin == init: fin += 1 if fin == len(stoppingPointsPos): fin = 0
def __init__(self, modelClass): super(TensorflowFuncRun, self).__init__() self.modelClass = modelClass self.train = Train()
def getHint(self, Train): return Train.getNextTime()
positive = set() data = open("../../data/" + str(year) + "/author_paper_topic") for line in data: a_i = int(line.split()[0]) t_i = int(line.split()[1]) if t_i == topic and a_i in author_set: positive.add(a_i) print "-.-.-.-.-.-.-.-.-.-.-.-.", topic, topic_match[topic][ 2], year, "-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-" Train_Data, weight = Ftr_Ext(year, out_folder, topic, author_set) try: beta = Train(Train_Data, year, out_folder, weight) fout.write(str(topic) + "\t" + str(year)) for b in beta: fout.write("\t" + str(b)) fout.write("\n") except: pass continue beta_0 = 0 ground_truth = 0 result_arr = {} result_arr[1], result_arr[2], result_arr[3], result_arr[4] = Test( year + 1, out_folder, beta, Train_Data, weight, topic,
def __init__(self,conf={}): self.data=conf.get("BAYES_DATA",None) if not self.data: self.data = Train(conf = conf) self.data.train() self.conf=conf
def train(train_gen, valid_gen, df_train, df_val, batch_size, target_size): epochs = 150 Drawer.draw_data_samples(df_train) unet_model = Model.create_model(input_size=target_size + (3,)) Train.run_train(unet_model, train_gen=train_gen, valid_gen=valid_gen, batch_size=batch_size, df_train=df_train, df_val=df_val, epochs=epochs)
GLOBAL_RUNNING_R[key].append(ep_r[key]) print( t[key], '|Ep_r' + key + ': %.2f' % ep_r[key], ) if __name__ == '__main__': UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() GLOBAL_PPO = DPPO_class() QUEUE = queue.Queue() UPDATE_EVENT.clear() # not update now ROLLING_EVENT.set() # start to roll out COORD = tf.train.Coordinator() SAVER = tfv.train.Saver() if os.path.isfile(SAVE_PATH + '.meta'): SAVER.restore(GLOBAL_PPO.sess, SAVE_PATH) GLOBAL_UPDATE_COUNTER = 0 GLOBAL_RUNNING_R = {} train = Train(GLOBAL_PPO) thread_train = threading.Thread(target=Train.learn, ) thread_train.start() thread = threading.Thread(target=GLOBAL_PPO.update, ) thread.start() app.run(host='0.0.0.0', port=8080, debug=True) adjust_get.run(host='0.0.0.0', port=8080, debug=True) COORD.join(thread)
''' Created on Jan 23, 2017 @author: camilo ''' from Train import Train from Test import Test if __name__ == '__main__': #Train("CDR-train.xml",180,"CDR") #Test("DDI-test.xml",20,"CDR") # Train("DDI-train-*.xml",242,"DDI") # Test("DDI-test.xml",30,"DDI") Train("DDI-train-*.xml",120,"DDI",mode="-t 5 -C T+V ") Test("DDI-test.xml",60,"DDI")
from Train import Train params = { 'EPOCHS': 15, 'BATCH_SIZE': 64, 'LEARNING_RATE': 0.0001, # Starting Learning rate 'N_CLASS': 85, 'DIVIDE_LEARNING_RATE_AT': [5], # Which epochs, learning rate should be divided. Starts from 0. 'IMAGE_SIZE': (224, 224), 'TRAIN_PATH': './Data/train', 'TEST_PATH': './Data/test', 'TRAIN_VAL_RATIO': 0.98, # Keeping a small percent for validation data # We are not doing stratified sampling 'DATA_LABELS': './Data/meta-data/train.csv' } t = Train(params) t.train() t.test()
from Test import Test from Train import Train from tfHelper import tfHelper import model import os te = Test() tr = Train() if os.path.exists("model.h5"): model = tfHelper.load_model("model") else: model = model.model() while True: te.test(model) model = tr.train(model)
if line_count > i: edge_count += 1 line_count += 1 if nEdges != edge_count: raise Exception( "Wrong input file format. Number of edges given doesn't match the specified number" ) print("\t - Read over %d edges in graph" % edge_count) # Creating Network sim = Simulation() net = Network(sim, log=True) # Creating train object pos = list(vert_pos[1]) tr = Train(0, pos, mapPath, availability, net, log=True) sim.devices += [tr] tr.path = [(0, 15), (10, 15), (10, 5), (20, 5), (20, 0), (30, 0), (30, 5), (30, 15), (20, 15), (20, 25)] simTime = 0 fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(1, 1, 1) fig.suptitle("TR.AI.NS Simulation", fontweight='bold', fontsize=17) plt.show(block=False) while tr.path: print("Simulation counter: {}".format(simTime))
from Test import Test from Train import Train TRAIN = True df = pd.read_csv(file) trains = [(), (), (), ()] if TRAIN: for epo, bat in trains: train = Train(data_file=df, epochs=epo, batch_size=bat, H1=128, H2=256, save=True, NAME='AAPL') train.run() train.run_test() else: testing = Test( data_file=df, model_file='Outputs/AAPL_1min_10shift_979585.pth', H1=128, H2=256, fx_pair='AAPL', round_to=4,
import tensorflow as tf import numpy as np from Train import Train from Test import Test from Config import config_pacman as config # install requirements with: # pip install -r requirements.txt if __name__ == "__main__": # Basic numberic global config settings # 42 is the answer to everything tf.keras.backend.set_floatx('float64') np.random.seed(42) tf.random.set_seed(42) train_session = Train(config) if (train_session.start()): Test(config)
from Train import Train from Client import Client from Network import Network # import simulation... from Protocol import Message, MsgTypes class Simulation: def __init__(self): self.devices = [] self.trainRange = 15 self.clientRange = 5 tr1 = Train(1, (-1, 0), "map.txt") tr2 = Train(2, (2, 3), "map.txt") tr3 = Train(3, (0, 7), "map.txt") cl1 = Client(-1, (0, 0), "map.txt") sim = Simulation() net = Network(sim, log=True) sim.devices = [tr1, cl1, tr2, tr3] m = Message(MsgTypes.req, sender=cl1.id, pickup='Point 1', dropoff='Point 2') mE = Message(MsgTypes.elec, sender=tr1.id, distance=10, client=-1) net.broadcast(m.encode(), cl1) print("Train 1: %s" % tr1.messageBuffer)
if train_mode=='SCRATCH': big_model_name =None if train_mode=='WEIGHTS': if model_name[-3:]=='_bn': ensembleModel=VGGNetNthLayer(dataset,[13,26,39,52],model_name) else: ensembleModel=VGGNetNthLayer(dataset,[9,18,27,36],model_name) print('Train mode='+train_mode) Trainloader,Testloader=LoadDataSet(dataset).data big_model_name,ensemblModel=Load_BigModel(model_name,train_mode,dataset) model=LoadModel(model_name,dataset,train_mode).model optimizer=torch.optim.Adam(model.parameters(), lr=1e-4, betas=(0.9, 0.999), eps=1e-08,weight_decay=0.0005) scheduler=ReduceLROnPlateau(optimizer, 'max',verbose=True,patience=5,eps=1e-9) #%% trainAcc_to_file,testAcc_to_file,trainloss_to_file,testloss_to_file,Parameters=Train(model, optimizer, Trainloader, Testloader, Model_name=model_name, dataset= dataset, epochs=None, Train_mode=train_mode, scheduler=scheduler, big_model_name=big_model_name, ensembleModel=ensemblModel) save_model(model,trainAcc_to_file,testAcc_to_file,trainloss_to_file,testloss_to_file,Parameters, model_name,train_mode,dataset,plot=False)
#!/usr/bin/env python # -*- coding: utf-8 -*- # 测试Train类 import sys sys.path.append("../") from Train import Train from Station import Station train = Train("K920", "2015-01-27") print train.getAllNumbers() for item in train.getStationList(): print item.name, item.date, item.arrive_time, item.leave_time, item.distance print train.getArriveMoment(u"许昌") print train.getArriveMoment(u"驻马店") print train.getArriveMoment(u"西平")