logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler(os.path.join(log_dir, '%s-%d.log' % (args.model, time.time()))), logging.StreamHandler() ] ) logger = logging.getLogger() if hp.data.train == '' or hp.data.val == '': logger.error("hp.data.train, hp.data.val cannot be empty") raise Exception("Please specify directories of train data.") if hp.model.graph0 == '' or hp.model.graph1 == '' or hp.model.graph2 == '': logger.error("hp.model.graph0, graph1, graph2 cannot be empty") raise Exception("Please specify random DAG architecture.") graphs = [ read_graph(hp.model.graph0), read_graph(hp.model.graph1), read_graph(hp.model.graph2), ] writer = MyWriter(log_dir) trainset = KMNIST_dataloader(hp, args, True) valset = KMNIST_dataloader(hp, args, False) train(out_dir, chkpt_path, trainset, valset, writer, logger, hp, hp_str, graphs, in_channels=1)
def ga_trainer(args, index_list, f_path, f_name): # parser = argparse.ArgumentParser() # parser.add_argument('-c', '--config', type=str, required=True, # help="yaml file for configuration") # parser.add_argument('-p', '--checkpoint_path', type=str, default=None, required=False, # help="path of checkpoint pt file") # parser.add_argument('-m', '--model', type=str, required=True, # help="name of the model. used for logging/saving checkpoints") # args = parser.parse_args() individual_model_name = args.model + "_{}_{}_{}".format( index_list[0], index_list[1], index_list[2]) hp = HParam(args.config) with open(args.config, 'r') as f: hp_str = ''.join(f.readlines()) ## pytoch 모델 저장하는 위치 pt_path = os.path.join('.', hp.log.chkpt_dir) ## 모델 사전에 정의한 모델 이름으로 저장 out_dir = os.path.join(pt_path, individual_model_name) os.makedirs(out_dir, exist_ok=True) log_dir = os.path.join('.', hp.log.log_dir) log_dir = os.path.join(log_dir, individual_model_name) os.makedirs(log_dir, exist_ok=True) if args.checkpoint_path is not None: chkpt_path = args.checkpoint_path else: chkpt_path = None logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler( os.path.join( log_dir, '%s-%d.log' % (args.model, time.time()))), logging.StreamHandler() ]) logger = logging.getLogger() if hp.data.train == '' or hp.data.val == '': logger.error("hp.data.train, hp.data.val cannot be empty") raise Exception("Please specify directories of train data.") if hp.model.graph0 == '' or hp.model.graph1 == '' or hp.model.graph2 == '': logger.error("hp.model.graph0, graph1, graph2 cannot be empty") raise Exception("Please specify random DAG architecture.") # graphs = [ # read_graph(hp.model.graph0), # read_graph(hp.model.graph1), # read_graph(hp.model.graph2), # ] ## 새로 생성한 파일 위치에서 그래프 읽기 #print(os.path.join(f_path, args.model + '_' + str(7) +'.txt')) graphs = [ read_graph(os.path.join(f_path, args.model + '_' + str(idx) + '.txt')) for idx in index_list ] writer = MyWriter(log_dir) dataset = hp.data.type switcher = { 'MNIST': MNIST_dataloader, 'CIFAR10': CIFAR10_dataloader, 'ImageNet': create_dataloader, } assert dataset in switcher.keys(), 'Dataset type currently not supported' dl_func = switcher[dataset] trainset = dl_func(hp, args, True) valset = dl_func(hp, args, False) val_acc = ga_train(out_dir, chkpt_path, trainset, valset, writer, logger, hp, hp_str, graphs) return val_acc