def createTrains(self): # inicjalizacja pociągów x1 = self.line3.get_length_fromobj(0) - self.trains_length[0] / 2 self.train1 = train.Train(0, x1, 1, self.kalman_trains[0], self.trainsLine[0], self.trains_length[0]) self.train1.setColor(Qt.darkGreen) self.train1.setReverse(False) x2 = self.line3.get_x_fromobj(len(self.line3.map_object) - 2) + self.trains_length[1] / 2 self.train2 = train.Train(0, x2, 2, self.kalman_trains[1], self.trainsLine[1], self.trains_length[1], -2) self.train2.setColor(Qt.darkBlue) self.train2.setUp(True) self.train2.setReverse(True) x3 = self.line1.get_length_fromobj(0) - self.trains_length[2] / 2 self.train3 = train.Train(0, x3, 5, self.kalman_trains[2], self.trainsLine[2], self.trains_length[2]) self.train3.setColor(Qt.darkRed) self.train3.setReverse(False) x4 = self.line1.get_x_fromobj(len(self.line1.map_object) - 1) + self.trains_length[3] / 2 + 12 self.train4 = train.Train(0, x4, 6, self.kalman_trains[3], self.trainsLine[3], self.trains_length[3], -2) self.train4.setColor(Qt.darkMagenta) self.train4.setUp(True) self.train4.setReverse(True)
def Train(model): reader_tr_xs = [] # this is the training data comming from other models for other in others: reader = data.PathReader(join(other, 'train'), cfg.names_tr) reader_tr_xs.append(reader) reader_tr_x_origin = data.PathReader(cfg.path_train, cfg.names_tr) # this is the original training data reader_tr_xs.append(reader_tr_x_origin) reader_tr_y = data.PathReader(cfg.path_label, cfg.names_tr) gen_tr = data.DataGenerator(reader_tr_xs, reader_tr_y).GetGenerator() reader_val_xs = [] # this is the training data comming from other models for other in others: reader = data.PathReader(join(other, 'train'), cfg.names_val) reader_val_xs.append(reader) reader_val_x_origin = data.PathReader(cfg.path_train, cfg.names_val) # this is the original training data reader_val_xs.append(reader_val_x_origin) reader_val_y = data.PathReader(cfg.path_label, cfg.names_val) gen_val = data.DataGenerator(reader_val_xs, reader_val_y).GetGenerator() train.Train(model, gen_tr, gen_val) model.save_weights(GetModelPath())
def Run(output, test, gold, rule, dicts): print("Running...") train.Train(output, rule, dicts) analyze.Analyze(output, test, output + '.fore', rule) synthesize.Synthesize(output, output + '.phon', output + '.back', rule) clean.Clean(output, output + '.raw', rule) evaluate.Evaluate(output, output + '.parsed', gold)
def perform_training(self, pts, ret, num_day): path = "./training_history/" + \ str(pts) + "_"+str(ret)+"_"+str(num_day)+"/" if not os.path.isdir(path): os.mkdir(path) if not self.datafiles: print("datafiles empty") return for i in self.datafiles: folder = path + self.datafiles[i][:11] + "/" if not os.path.isdir(folder): os.mkdir(folder) try: t = train.Train(folder, i) t.training("trend_following_weights", pts, ret, num_day) except: d = { "path": path, "pts_val": pts, "min_ret": ret, "num_days": num_day } s = store_results.Store_model_result() s.record_error(d) print("error recorded")
def Train(db): curHead = request.json['face'] train.AddTrain(curHead) print 'Added to train' train.ReadHeads() train.Train() tags.TagThis(db, curHead) return {'result': True}
def train(self): self.progress_bar() #os.system("C:\\Users\\acer\\AppData\\Local\\Temp\\cnn.py\\train.py") obj=train.Train() messagebox.showinfo("Successfully trained","Successfully trained",parent=root) self.TProgressbar1['value'] = 0 root.update_idletasks()
def main(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7) config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) sess = tf.InteractiveSession(config=config) model = train.Train(sess, args) if args.trainable: model.train() else: print(model.test())
def _train(): logger.info('*' * 100) logger.info('Initializing the training environment') training = train.Train() logger.info('Training environment are initialized successfully') logger.info('*' * 100) return training.run()
def main(): Train = train.Train(trial=OPTIONS.trial, step=OPTIONS.step, size=[HEIGHT, WIDTH, CHANNEL], batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, max_epoch=EPOCH, tfrecord_path=TF_RECORD_PATH, checkpoint_dir=CHECK_POINT_DIR, num_of_data=NUM_OF_DATA, conf=conf) Train()
def main(): if args.is_train == True: data_generator = dataGenerator.dataGenerator( output_shape=[HEIGHT, WIDTH, CHANNEL], meta_batch_size=META_BATCH_SIZE, task_batch_size=TASK_BATCH_SIZE, tfrecord_path=TFRECORD_PATH) Trainer = train.Train(trial=args.trial, step=args.step, size=[HEIGHT, WIDTH, CHANNEL], scale_list=SCALE_LIST, meta_batch_size=META_BATCH_SIZE, meta_lr=META_LR, meta_iter=META_ITER, task_batch_size=TASK_BATCH_SIZE, task_lr=TASK_LR, task_iter=TASK_ITER, data_generator=data_generator, checkpoint_dir=CHECKPOINT_DIR, conf=conf) Trainer() else: if args.model == 0: model_path = 'SR/Rep-10000-MLR4-TLR2-TI10-TBS8/model-10000' img_path = sorted(glob.glob(os.path.join(args.inputpath, '*.png'))) gt_path = sorted(glob.glob(os.path.join(args.gtpath, '*.png'))) scale = 2.0 try: kernel = scipy.io.loadmat(args.kernelpath)['kernel'] except: kernel = 'cubic' Tester = test.Test(model_path, args.savepath, kernel, scale, conf, args.model, args.num_of_adaptation) P = [] for i in range(len(img_path)): img = imread(img_path[i]) gt = imread(gt_path[i]) _, pp = Tester(img, gt, img_path[i]) P.append(pp) avg_PSNR = np.mean(P, 0) print('[*] Average PSNR ** Initial: %.4f, Final : %.4f' % tuple(avg_PSNR))
def train_one_model(path, datafile, model_type, pts_val, min_ret, num_days): path += "/" if not os.path.isdir(path): os.mkdir(path) t = train.Train(path, datafile) if model_type == "trend_following_weights": t.training("trend_following_weights") elif model_type == "trend_following": t.training("trend_following") elif model_type == "mean_reverting": t.training("mean_reverting", pts_val, min_ret, num_days) else: print("error")
def generate(): # data1 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=1, # num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data1_index) # data2 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=1, # num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data2_index) data3 = dataset.read_data_sets(DATA1_DIR, DATA2_DIR, reshape=False, one_hot=True, noise=0, num_train=NUM_TRAIN, num_test=NUM_TEST, data_index = data3_index) for i in range(num_sample): print("---------- Iteration " + str(i) + " ----------") # train.Train(MODEL1_DIR + str(i), data1) # train.Train(MODEL2_DIR + str(i), data2) train.Train(MODEL3_DIR + str(i), data3)
def train_one_model(path, datafile, model_type, pts_val, min_ret): f = path + "_" + model_type if not os.path.isdir(f): os.mkdir(f) t = train.Train(f, datafile) if model_type == "trend_following_weights": t.training("trend_following_weights") elif model_type == "trend_following": t.training("trend_following") elif model_type == "mean_reverting": t.training("mean_reverting") else: print("error")
def Train(model): reader_tr_x = data.PathReader(cfg.path_train, cfg.names_tr) reader_tr_y = data.PathReader(cfg.path_label, cfg.names_tr) gen_tr = data.DataGenerator([reader_tr_x], reader_tr_y).GetGenerator() #if cfg.debug: data.DebugGenerator(gen_tr) reader_val_x = data.PathReader(cfg.path_train, cfg.names_val) reader_val_y = data.PathReader(cfg.path_label, cfg.names_val) gen_val = data.DataGenerator([reader_val_x], reader_val_y).GetGenerator() #if cfg.debug: data.DebugGenerator(gen_val) train.Train(model, gen_tr, gen_val) model.save_weights(GetModelPath())
def __init__(self): """ Initialization of all variables needed """ #480p 2.39:1 720x302 #2048x2048 is more than 7.3GB of vRAM for the Master DISC model #Loading the preprocessed data preprocessVars = Preprocess() #The training and display of the trained models self.modelTrain = train.Train(preprocessVars) self.disp = display.Display(preprocessVars)
def train(self): self.hide_pane() self.train = train.Train() self.lower_frame = Frame(self.root, bg='#42c2f4', bd=10) self.lower_frame.place(relx=0.5, rely=0.4625, relwidth=0.8, relheight=0.1, anchor='n') self.label1 = Label(self.lower_frame, font=60, bg='#42c2f4', fg='black', text="Train Selesai !!") self.label1.pack()
def __init__(self): self.sensor_collector = SensorDataCollector() self.apliance_controller = AplianceController() self.sitting_history = deque(maxlen=10) self.motor = Motor() self.leave = True self.come = False self.sit_time = 0 self.sleeping_time = 2 self.exercise_time = 40 self.model = train.Train('train') self.model.run() print("Start train model!") self.model.train() print("Train model completely!") pass
def main(): errorRate = 0.00001 allNodes = read.Read() weight = weightPath.CreateWeightPath(allNodes[0]) print(allNodes[0]) allNodes[0], result, difference = tryNetwork.Try(allNodes[0], weight, True) delta = cloneNode.CloneNodeValue(weight, 0) #first delta must 0 delta,nodeS=calculate.CalculateDelta\ (allNodes[0],weight,result,delta) print("NewNodes=", allNodes[0]) print("Results=", result) print("Differences=", difference) end = False iteration = 0 firstTime = time.time() while not end: best = True for nodeWeight in allNodes: iteration += 1 weight = train.Train(weight, delta) nodeWeight,result,difference=\ tryNetwork.Try(nodeWeight,weight,False) delta,nodeS=calculate.CalculateDelta\ (nodeWeight,weight,result,delta) for i in range(len(difference)): if difference[i] >= errorRate: best = False if best: end = True print("\n\nTraining Successed!!!\n") print("Iteration=", iteration) print("Time=", round(float(time.time() - firstTime), 4), "\n") break end = "" inputLen = len(allNodes[0][0]) - 1 outputLen = len(allNodes[0][-1]) end = input( "Please Press Enter For Trying(leave for=after 'e' press enter)") while end != 'e': for i in range(inputLen): nodeWeight[0][i] = float(input("Please Enter Input")) nodeWeight,result,difference=\ tryNetwork.Try(nodeWeight,weight,False) print("Results=", result) end = input( "Please Press Enter For Trying(leave for=after 'e' press enter)")
def main(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) #设置要使用的gpu内存 不然默认会占用所有的内存 config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) #当allow_growth设置为True时,分配器将不会指定所有的GPU内存,而是根据需求增长 sess = tf.InteractiveSession(config=config) #我们可以先构建一个session然后再定义操作(operation) model = train.Train(sess, args) #args 参数 #第一个参数:选择DCGAN模式还是WGAN-GP模式,二者的不同主要在于损失函数不同和优化器的学习率不同,其他都一样 #第二个参数是args.trainable,训练还是测试,训练时为True,测试是False #第三个参数 表示是否选择加载训练好的权重 #第四个参数 有标签的样本的数目 if args.trainable: model.train() else: print model.test()
def read_input_data(self, input_filename): """ Read the metro network form the file :param input_filename: filename contains metro network information with the right format :return: None """ for one_line in open(input_filename, "r"): one_line = one_line.rstrip('\n') if "#" in one_line: current_line = line.Line(one_line.split("#")[1].rstrip()) current_line.stations = [] self.lines.append(current_line) if "START" in one_line: current_line = None new_start = one_line.split(':') self.start_station = station.Station( new_start[0].split("=")[1], int(new_start[1]), "", "S") if "END" in one_line: current_line = None new_end = one_line.split(':') self.end_station = station.Station(new_end[0].split("=")[1], int(new_end[1]), "", "E") if 'TRAINS' in one_line: current_line = None train_num = int(one_line.split('=')[1]) for one_train in range(1, train_num + 1): new_train = train.Train("T." + str(one_train), self.start_station.code) self.trains.append(new_train) # update trains statistic value self.total_train = len(self.trains) if ":" in one_line and current_line is not None: station_infos = one_line.split(':') if len(station_infos) > 1: new_station = station.Station(current_line.name, int(station_infos[0]), station_infos[1]) current_line.stations.append(new_station)
def main(): parser = build_parser() options = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = options.gpu_num NUM_OF_DATA = 640000 TF_RECORD_PATH = ['../train_SR_bicubic_X2.tfrecord'] Trainer = train.Train(trial=options.trial, step=options.global_step, size=[HEIGHT, WIDTH, CHANNEL], batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE, max_epoch=EPOCH, tfrecord_path=TF_RECORD_PATH, checkpoint_dir=CHECK_POINT_DIR, scale=SCALE, num_of_data=NUM_OF_DATA, conf=conf) Trainer.run()
def demo(size=100, alpha=0.1, cost=logcost): ins = t.getImgs(0, size) stds = t.getStds(0, size) ins, stds = deal_data()(ins, stds) nets = ListNet() momentum = 0.99999 l2c = 0.0001 net = nets.push(fullnet(28 * 28, 10 * 10, l2c, momentum)) net = nets.push(belta_net(net, None, momentum)) net = nets.push(loss_net(net)) net = nets.push(relunet(net)) #net = nets.push(batch_normal_net(net,keep_mean = 0.01)) #net = nets.push(linear_net(net,None, l2c, momentum)) #net = nets.push(belta_net(net,None, momentum)) #net = nets.push(batch_normal_net_weights(net,l2c,momentum,keep_mean = 0.01)) net = nets.push(fullnet(net, 10, l2c, momentum)) net = nets.push(belta_net(net, None, momentum)) net = nets.push(sigmodnet(net)) net = nets.push(cost(net)) tr = train.Train(nets, 0.1, ins, stds) return tr
def addTrain( self, track, trackIndex, goalIndex, pathToImage, trainName, velocity=constants.BASE_VELOCITY, ): newTrain = train.Train(track, pathToImage, trainName, velocity) newTrain.place(trackIndex) newTrain.setGoal(goalIndex) newTrain.trains = self.trainList # notify all trains about the new train for knownTrain in self.trainList: knownTrain.onTrainAdded(newTrain) self.trainList.add(newTrain) return newTrain
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), args.lr) best_loss = sys.maxsize param_dict = helper.count_parameters(model) print('number of trainable parameters = ', numpy.sum(list(param_dict.values()))) if args.cuda: model = model.cuda() if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = helper.load_checkpoint(args.resume) args.start_epoch = checkpoint['epoch'] best_loss = checkpoint['best_loss'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) # ############################################################################### # # Train the model # ############################################################################### train = train.Train(model, optimizer, dictionary, args, best_loss) train.train_epochs(train_corpus, dev_corpus, args.start_epoch, args.epochs)
debug = False input_df = pd.read_csv('dados/input.csv') cols = [ 'Sexo', 'COVID19 IgG BOOL', 'COVID19 IgG NUM', 'Monócitos NUM', 'Neutrófilos NUM', 'Eosinófilos NUM', 'Basófilos NUM' ] igg_df = input_df[cols] igg_df = igg_df.dropna(how='any') igg_df = igg_df.reset_index(drop=True) igg_df['Sexo'].replace({'F': 1, 'M': 0}, inplace=True) if debug: sns.set(style="darkgrid") sns.countplot(x='COVID19 IgG BOOL', data=igg_df) # HAHHAHHAHAHAAHA plt.show() X = [ 'Sexo', 'COVID19 IgG NUM', 'Monócitos NUM', 'Neutrófilos NUM', 'Eosinófilos NUM', 'Basófilos NUM' ] Y = ['COVID19 IgG BOOL'] input_size = len(X) igg_net = my_nn.Net_2hl(input_size, 128, 128) treinamento = train.Train(igg_net, lr=0.01, X=X, Y=Y, epochs=1000, df=igg_df) treinamento.train()
# -*-coding:utf-8-*- import train if __name__ == "__main__": train.Train("R", 128).main()
optimizer = optim_fn(filter(lambda p: p.requires_grad, model.parameters()), **optim_params) best_acc = 0 # for training on multiple GPUs. use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use if 'CUDA_VISIBLE_DEVICES' in os.environ: cuda_visible_devices = [int(x) for x in os.environ['CUDA_VISIBLE_DEVICES'].split(',')] if len(cuda_visible_devices) > 1: model = torch.nn.DataParallel(model, device_ids=cuda_visible_devices) if args.cuda: model = model.cuda() if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_acc = checkpoint['best_acc'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) # ############################################################################### # # Train the model # ############################################################################### train = train.Train(model, optimizer, dictionary, embeddings_index, args, best_acc) train.train_epochs(train_corpus, dev_corpus, args.start_epoch, args.epochs)
metavar='level', type=str, help='Which level to run, Eg. 1-1', default='1-1', nargs='?') args = parser.parse_args() if (args.mode.upper() == "TRAIN" or args.mode.upper() == "CONT_TRAIN") and args.gen is None: parser.error("Please specify number of generations!") if args.mode.upper() == "CONT_TRAIN" and args.file is None: parser.error( "Please specify checkpoint file (" "./Files/neat-checkpoint-2492 can be used to start from generation 2492)!" ) if args.mode.upper() == "TRAIN": t = t.Train(args.gen, args.parallel, args.level) t.main(config_file=args.config) elif args.mode.upper() == "CONT_TRAIN": c = ct.Train(args.gen, args.file, args.parallel, args.level) c.main(config_file=args.config) elif args.mode.upper() == "RUN": args.file = "finisher.pkl" if args.file is None else args.file r.main(args.config, args.file, args.level) else: print("Please enter 'train' or 'mode' or 'cont_train")
best_acc = 0 # best test accuracy start_epoch = 0 # start from epoch 0 or last checkpoint epoch acc_record = list([]) loss_train_record = list([]) loss_test_record = list([]) # Init SNN snn = SCNN() #snn.load_state_dict(torch.load('./checkpoint/ckpt' + names + '.t7')) snn.eval() snn.to(device) criterion = nn.MSELoss() optimizer = torch.optim.Adam(snn.parameters(), lr=learning_rate) # Init YOLO yolo = yolo.Train() for epoch in range(num_epochs): # - EPOCH - print('Epoch [%d/%d]' % (epoch + 1, num_epochs)) # Train running_loss = 0 # Only used for printing start_time = time.time() bb_tracker = 99999 bb_index = 0 outputs = torch.zeros((255, 3, 1, 255), device=device) while not video.done: # Perform prediction every ls time step if video.current_time >= bb_tracker: # Check outputs every ls micro seconds labels = [] # this will need to be formatted for yolo while bb[bb_index][0] == bb_tracker:
print('NN init==============================') rhwd = lm.RecognizeDigits() rhwd.initNN() if useCache: # let result = (useTheanoWeight && rhwd.NN.loadFromTheano()) || rhwd.NN.loadFromFile() # if (!result) # { # print("Cannot load cache") # return; # } raise Exception('useCache is not implemented yet !') else: train = ta.Train() train.run(mnist, rhwd) # rhwd.NN.saveToFile() print("NN Recall============================== \(NSDate())") penalty = 0 total = 0 for ins in mnist.iTestInstances: nnInput = ins.iImage output = rhwd.NN.forward(nnInput) outputLabel = cm.getOutputLabel(output) print('Test {} > {}'.format(ins.iLabel, outputLabel)) if ins.iLabel != outputLabel: