def main(args): init_logger() set_seeds() tokenizer = load_tokenizer(args) if args.do_train: if args.model_4: first =TrainerFirst(args, tokenizer) first.train() second = SecondClassifier(args, tokenizer) second.classifier() third = Trainermmd(args, tokenizer) third.train() elif args.new_model_4: first =FirstTrainer(args, tokenizer) first.train() second = SecondClassifier_n(args, tokenizer) second.classifier() third = Trainermmd_n(args, tokenizer) third.train() else: trainer = Trainer(args, tokenizer) trainer.train() elif args.do_test: if args.model_4 or args.new_model_4: tester = Predictor(args, tokenizer) tester.predict() else: tester = Tester(args, tokenizer) tester.test() elif args.do_interactive: interactive_predict(args)
def getListOfDotCoords(self, imagePath, dotImagesPath): t = Trainer() e = Extractor() v = Vectors() imageTemplateList = t.createListOfImageTemplates("dot", dotImagesPath) pts = sorted(e.getMatchingPoints(imagePath, imageTemplateList, 0.9)[0]) for pt in pts: for pt2 in pts[pts.index(pt):]: if pt == pt2: continue else: if v.distance(pt, pt2) < 10: pts.remove(pt2) return pts
def __init__(self, videoPaths, objectNames): #self.imagePath = imagePath self.objectNames = objectNames e = Extractor() t = Trainer() objectIndex = 0 numTimesObjectVidDissected = {} for path in videoPaths: if objectNames[objectIndex] not in numTimesObjectVidDissected: numTimesObjectVidDissected[objectNames[objectIndex]] = "0" t.createIntervalFrameImages(path, "training_images_from_videos", objectNames[objectIndex],numTimesObjectVidDissected[objectNames[objectIndex]]) numTimesObjectVidDissected[objectNames[objectIndex]] = str(int(numTimesObjectVidDissected[objectNames[objectIndex]])+1) objectIndex += 1 for o in objectNames: exec("self."+o+"TemplateList=t.createListOfImageTemplates('"+o+"', 'training_images_from_videos')")
def main(args): # 加载数据 x, y = load_data(args.dataset) n_clusters = len(np.unique(y)) # 设置参数 if args.dataset == 'mnist' or args.dataset == 'fmnist': args.update_interval = 140 args.pretrain_epochs = 301 ae_weights_init = tf.variance_scaling_initializer(scale=1. / 3., mode='fan_in', distribution='uniform') # add feature dimension size to the beginning of hidden_dims feature_dim = x.shape[1] args.encoder_dims = [feature_dim] + args.encoder_dims print(args.encoder_dims) if args.pretrain == True: # 预训练 print('Begin Pretraining') t0 = time() pretrainer = Pretrainer(args, ae_weights_init) saver = pretrainer(x, y) # print(saver) print('Pretraining time: %ds' % round(time() - t0)) # 清理计算图 tf.reset_default_graph() # Model训练 print('Begin Model training') t1 = time() trainer = Trainer(args, ae_weights_init, n_clusters) trainer(x, y) print('Model training time: %ds' % round(time() - t1))
isAudio = False if (len(sys.argv) == 3): isAudio = True NLU.Helpers.logger.info("Audio mode") if sys.argv[1] == "Train": """ Training mode Trains GeniSys. """ NLU.Helpers.logger.info("Training mode") Train = Trainer() Train.trainModel() elif sys.argv[1] == "Server": """ Server mode Allows communication with GeniSys via HTTP requests. """ NLU.Helpers.logger.info("Server mode") NLU.engine(isAudio) NLU.iotJumpWayConn() NLU.threading() NLU.Helpers.logger.info("Inference Started In SERVER Mode")
parser.add_argument('-l', '--label_smoothing', type=bool_type_check, default=False, help="\nlabel smoothing 적용\n" + "default : False\n\n") parser.add_argument('-p', '--ckpt_path', type=str, default=None, help="\ncheckpoint path - default : None\n" + "argument는 Train.py에서 folder 값 또는 checkpoint file name\n" + "ex1) -c ./foo/results/2019-04-18__004330\n" + "ex2) -c ./foo/results/2019-04-18__004330/ckpt.file\n\n") parser.add_argument( '-E', '--ckpt_epoch', type=int, default=None, help="\ncheckpoint path가 folder일 경우 불러올 checkpoint의 epoch\n" + "만약 checkpoint의 path가 folder일 때, checkpoint_epoch를 설정하지 않으면\n" + "가장 최근의 checkpoint를 불러옴\n\n") args = parser.parse_args() kwargs = vars(args) transformer = Trainer(**kwargs) transformer.start()
"ex) -S 256 256 128\n" + "default : 256\n\n") parser.add_argument('-l', '--label_smoothing', type=bool_type_check, default=False, help="\nlabel smoothing 적용\n" + "default : False\n\n") parser.add_argument('-p', '--ckpt_path', type=str, default=None, help="\ncheckpoint path - default : None\n" + "argument는 Train.py에서 folder 값 또는 checkpoint file name\n" + "ex1) -c ./foo/results/2019-04-18__004330\n" + "ex2) -c ./foo/results/2019-04-18__004330/ckpt.file\n\n") parser.add_argument( '-E', '--ckpt_epoch', type=int, default=None, help="\ncheckpoint path가 folder일 경우 불러올 checkpoint의 epoch\n" + "만약 checkpoint의 path가 folder일 때, checkpoint_epoch를 설정하지 않으면\n" + "가장 최근의 checkpoint를 불러옴\n\n") args = parser.parse_args() kwargs = vars(args) translator = Trainer(**kwargs) translator.start()
from __future__ import print_function from argparse import ArgumentParser, RawTextHelpFormatter from Train import Trainer parser = ArgumentParser(formatter_class = RawTextHelpFormatter) parser.add_argument('-f', '--result_folder', type = str, default = None, help = "\n모델의 진행 사항을 저장할 폴더\n" + "default : 현재 위치에 Result folder 생성\n\n") parser.add_argument('-P', '--ckpt_path', type = str, default = None, help = "\ncheckpoint path - default : None\n" + "argument는 Train.py에서 folder 값 또는 checkpoint file name\n" + "ex1) -c ./foo/results/2019-04-18__004330\n" + "ex2) -c ./foo/results/2019-04-18__004330/ckpt.file\n\n") parser.add_argument('-E', '--ckpt_epoch', type = int, default = None, help = "\ncheckpoint path가 folder일 경우 불러올 checkpoint의 epoch\n" + "만약 checkpoint의 path가 folder일 때, checkpoint_epoch를 설정하지 않으면\n" + "가장 최근의 checkpoint를 불러옴\n\n") args = parser.parse_args() kwargs = vars(args) model = Trainer(**kwargs) model.start()
sort_keys=True), status=200, mimetype="application/json") if __name__ == "__main__": if sys.argv[1] == "TRAIN": ############################################################### # # Is triggered when the 1st commandline line argument is TRAIN # ############################################################### Train = Trainer(NLU.jumpWayClient) Train.trainModel() elif sys.argv[1] == "SERVER": ############################################################### # # Is triggered when the 1st commandline line argument is SERVER # ############################################################### NLU.initNLU() NLU.Helpers.logMessage(NLU.LogFile, "Inference", "INFO", "Inference Started In SERVER Mode")
'epochs': 50, 'batch_size': 128, 'z_latent': 20, # eta1 = dx 'eta1': 10.0, 'eta2': 1e-3, 'eta3': 10e-5, 'x_dim': 159 } # eta2 = dz # eta3 = regularization elif (model_name == 'IV'): hyper_params = { 'lr': 0.001, 'epochs': 110, 'batch_size': 128, 'z_latent': 20, 'eta1': 10.0, 'eta2': 1e-4, 'eta3': 1e-5, 'alpha': 0.050, 'steps_inner': 10 } else: raise NameError('Wrong model name') model = get_model(model_name, hyper_params, rng) step_sample = 50 trainer = Trainer(model, hyper_params, step_sample, shuffle=False) z_pred, x_pred = trainer.fit(X, dX, z_ref, rng_batch) pdb.set_trace()
from Train import Trainer a = Trainer() a.run()
import sys sys.path.insert(0, '../src/modules/Feature_Extractor') from Train import Trainer from Loc_Extractor import Extractor e = Extractor() t = Trainer() t.createIntervalFrameImages("../data/object_videos/cube1.mov", "out_dir", "cube","1") #t.createIntervalFrameImages("../data/object_videos/sphere1.mov", "out_dir", "sphere","1") #t.createIntervalFrameImages("../data/object_videos/sphere2.mov", "out_dir", "sphere","2") cubeTemplateList = t.createListOfImageTemplates("cube", "out_dir") #templateListList = [cubeTemplateList,sphereTemplateList] print e.getObjectLoc("../data/image.png", cubeTemplateList, 0.7) #e.getObjectLoc( "../data/cube.png", cubeTemplateList, 0.8) #print e.findAllObjects("../data/both_alt_env.png",templateListList, 0.8)
def train_run(**kwargs): cost = kwargs['loss_info'] optimizer = kwargs['optimizer_info'] learning_rate = kwargs['learning_rate'] drop_out_rate = kwargs['drop_out_rate'] act_func = kwargs['act_func'] layer_cnt = int(kwargs['layer_cnt']) model_id = int(kwargs['tr_model_id']) normal_data = kwargs['normal_data'] abnormal_data = kwargs['abnormal_data'] validation = kwargs['tr_validation'] k_fold_list = [] # validation 수치 변경 if int(validation) == 2: print(validation, type(validation)) k_fold_list.append(50) elif int(validation) == 5: print(validation, type(validation)) k_fold_list.append(20) elif int(validation) == 10: print(validation, type(validation)) k_fold_list.append(10) elif int(validation) == 15: print(validation, type(validation)) k_fold_list.append(18) elif int(validation) == 20: print(validation, type(validation)) k_fold_list.append(5) # print('dl_option : ', cost, optimizer, learning_rate, drop_out_rate, act_func, layer_cnt, model_id, validation) k_fold = k_fold_list[0] def _return_loop_number(string): if string != '_': under_Bar = string.find('_') first_data_n = int(string[:under_Bar]) last_data_n = int(string[under_Bar + 1:]) return first_data_n, last_data_n # path = '/home/obsk/Javis_dl_system/data/I66' # /opt/home/data/I66/DICOM/train/abnormal/I66_mri_AB_00000029 # /home/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000029 # path = 'D:/data/I66' # normal data get normal_file_path_list = [] normal_data_chklist = [] if normal_data != '_': normal_start, normal_fin = _return_loop_number(normal_data) # normal_length = normal_fin - normal_start + 1 # client = MongoClient('mongodb://172.16.52.79:27017') # db = client.ohif_deployment # cursor = db.study_normtraining.find().sort('normtrain_id', pymongo.ASCENDING) cur = OraDB.prepareCursor() cur.execute( "select del_yn, dataid, file_path, normtrain_id from study_normtraining order by normtrain_id asc" ) for row in cur: del_yn, dataid, file_path, normtrain_id = list(row) if 'del_yn' != 'y' and 'I66' in dataid: normal_data_chklist.append(file_path) print(normal_data_chklist) for idx in range(normal_start - 1, normal_fin): normal_file_path_list.append(normal_data_chklist[idx]) print(normal_file_path_list) OraDB.releaseConn() # abnormal data get abnormal_file_path_list = [] abnormal_data_chklist = [] if abnormal_data != '_': abnormal_start, abnormal_fin = _return_loop_number(abnormal_data) # abnormal_length = abnormal_fin - abnormal_start + 1 # client = MongoClient('mongodb://172.16.52.79:27017') # db = client.ohif_deployment # cursor = db.study_abnormtraining.find().sort('abnormtrain_id', pymongo.ASCENDING) cur = OraDB.prepareCursor() cur.execute( "select del_yn, dataid, file_path, normtrain_id from study_abnormtraining order by abnormtrain_id asc" ) for row in cur: del_yn, dataid, file_path, normtrain_id = list(row) if del_yn != 'y' and 'I66' in dataid: abnormal_data_chklist.append(file_path) print(abnormal_data_chklist) for idx in range(abnormal_start - 1, abnormal_fin): abnormal_file_path_list.append(abnormal_data_chklist[idx]) print(abnormal_file_path_list) OraDB.releaseConn() # copy nas normal data to local temp directory local_n_datapath_list = [] for nas_path in normal_file_path_list: temp_path = nas_path.replace( '/medimg/', '/home/user01/Javis_dl_system/data_temp/') if not os.path.exists(temp_path): os.makedirs(temp_path) data_mover.nas_to_dlserver(nas_path, temp_path) local_n_datapath_list.append(temp_path) # copy nas abnormal data to local temp directory local_ab_datapath_list = [] for nas_path in abnormal_file_path_list: temp_path = nas_path.replace( '/medimg/', '/home/user01/Javis_dl_system/data_temp/') if not os.path.exists(temp_path): os.makedirs(temp_path) data_mover.nas_to_dlserver(nas_path, temp_path) local_ab_datapath_list.append(temp_path) tot_datapath_list = local_n_datapath_list + local_ab_datapath_list # print('tot_data_id_list : ', tot_datapath_list) if len(tot_datapath_list) <= 1: print('CANNOT RUN WITH 0, 1 DATA SET') raise FileNotFoundError shuffle(tot_datapath_list) fin_datapath_list = [] # path : /home/bjh/obsk/v_nas2/I66/DICOM/train/abnormal/I66_mri_AB_00000039 for path in tot_datapath_list: datasaver = LabelDataSaver.DataSaver(path) datasaver.saveYdata_labeled() x_path = path + '/img/x' y_path = path + '/img/y' if os.path.isdir(y_path) is True: if len(os.listdir(y_path)) == len(os.listdir(x_path)): fin_datapath_list.append(path) dataset_cnt = len(fin_datapath_list) # n # b_size = 1 # k = k_fold data_loader = TrainDataLoader.DataLoader(data_path_list=fin_datapath_list, k_fold=k_fold, c_size=256, i_channel=1, n_class=2) trainer = Trainer(data_loader=data_loader, model_id=model_id, optimizer=optimizer, learning_rate=learning_rate, cost_name=cost, act_func=act_func, layer_n=layer_cnt) trainer.train(n_epochs=1, n_t_iters=(math.ceil(dataset_cnt / k_fold * (k_fold - 1)) - 1) * 8, n_v_iters=math.ceil(dataset_cnt / k_fold) * 8, b_size=1, keep_prob=drop_out_rate)