def main():
    # capture the config path from the run arguments
    # then process the json configration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = ExampleModel(config)
    # load model if exist
    model.load(sess)
    # create your data generator
    data = DataGenerator(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)

    # here you train your model
    trainer.train()
Ejemplo n.º 2
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    create_dirs([config.summary_dir, config.checkpoint_dir])

    sess = tf.Session()

    data = DataGenerator(config)

    model = mlp(config)

    logger = Logger(sess, config)

    trainer = ExampleTrainer(sess, model, data, config, logger)

    model.load(sess)
    #trainer파일을 확인하면 trainer.train()과 새로 작성한 trainer.test()의 차이를 확인할 수 있다.
    #y는 테스트데이터의 실제 ppa, result는 학습된 모델의 추정 ppa값을 리스트로 받아온다.
    #result는 세션의 return이 [1][데이터개수]의 2차원 리스트의 형태이고 [0][i]로 각 input의 결과를 확인할 수 있다
    y, result = trainer.test()
    cnt = 0
    print(result[0])
    for i in range(len(y)):
        #실제값-추측값을 실제값으로 나누어 오차10%내의 데이터의 수를 센다
        if (abs(y[i] - float(result[0][i])) / y[i] <= 0.1):
            cnt += 1
    print('10% 내외로 예측한 데이터는 ', cnt / len(y), '% 이다')
Ejemplo n.º 3
0
class ImageClassificationPytorch:
    def __init__(self, config):
        gpu_id = config['gpu_id']
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
        print(config)
        #check_config_dict(config)
        self.config = config
        self.init()


    def init(self):
        # create net
        self.model = ExampleModel(self.config)
        # load
        self.model.load()
        # create your data generator
        self.train_loader, self.test_loader = get_data_loader(self.config)
        # create logger
        self.logger = ExampleLogger(self.config)
        # create trainer and path all previous components to it
        self.trainer = ExampleTrainer(self.model, self.train_loader, self.test_loader, self.config, self.logger)


    def run(self):
        # here you train your model
        self.trainer.train()


    def close(self):
        # close
        self.logger.close()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    config = process_config("configs/multi_output.json")

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()

    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    # load model if exists
    model.load(sess)
    # here you train your model
    t = time.time()
    trainer.train()
    elapsed = time.time() - t
    print("".join(["Elapsed time: ", str(elapsed)]))

    plotmaker = PlotMaker(trainer, model, data)
    plotmaker.write_all_plots("figures/multi_output")
    plotmaker.write_results_json("figures/multi_output")
Ejemplo n.º 5
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        json_file = '../configs/example.json'
        # config = process_config(args.config)
        config = process_config(json_file)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.compat.v1.Session()
    # create your data generator
    data = DataGenerator(config)
    data.generate_data()

    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
    # here you evaluate your model
    evaluator = Evaluator(trainer.sess, trainer.model, data, config, logger)
    evaluator.evaluate()
    evaluator.analysis_results()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data = DataGenerator(config)
    
    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 7
0
def main():
    #-c 'json파일경로'로 받아온 json경로를 config객체에 저장한다
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # 모델의 학습 결과와 가중치를 저장할 경로를 설정한다
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # 텐서플로우의 세션을 생성한다
    sess = tf.Session()
    # 데이터를 불러온다. 전달한 config객체는 batch사이즈로 데이터를 쪼개기위해 사용된다
    data = DataGenerator(config)
    # 사용할 모델의 개형을 불러온다. 해당 프로젝트에는 input사이즈외에 참고하지 않았지만
    #본래 모델의 깊이,모양,loss함수,optimizer 등 config 값에 따라 다른 모델을 불러올 수 있다
    model = mlp(config)
    # 학습진행과 저장을 담당하는 logger객체를 생성한다
    logger = Logger(sess, config)
    #먼저 생성한 학습에 필요한 세션,모델,데이터셋,설정,logger를 전달해 학습 준비를 마친다
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #기존에 학습중이던 같은 모델이 있다면 해당 모델을 이어서 학습한다
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 8
0
 def init(self):
     # create net
     self.model = ExampleModel(self.config)
     # create your data generator
     self.train_loader, self.test_loader = get_data_loader(self.config)
     # create logger
     self.logger = ExampleLogger(self.config)
     # create trainer and path all previous components to it
     self.trainer = ExampleTrainer(self.model, self.train_loader, self.test_loader, self.config, self.logger)
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    config.num_epochs = None
    image_loader = ImageDataLoader(config, True)

    model = ExampleModel(config, image_loader)
    model.init_train_model()
    with tf.Session() as sess:
        trainer = ExampleTrainer(sess, model, config)
        trainer.train()
Ejemplo n.º 10
0
def main():
    # set up logging
    configure_logging()
    logger = logging.getLogger(__name__)
    # Capture the config path from the run arguments then process the json configuration file
    config_handler = ConfigHandler()
    try:
        config_handler.parse_args()
    except:
        logger.exception("Missing or invalid arguments")
        exit(0)
    # Read config file(s)
    config_handler.process_config()
    # Create the experiments dirs
    config_handler.create_config_dirs()
    # Run experiments
    for exp_name in config_handler.experiment_names:
        config = config_handler.config[exp_name]
        logger.info('Start running experiment {}'.format(exp_name))
        tf.reset_default_graph()
        sess = tf.Session()
        try:
            # Create your data generator
            data = DataGenerator(config)
            # Create an instance of the model you want
            model = ExampleModel(config)
            # Create tensorboard logger
            summary_logger = SummaryLogger(sess, config)
            # Create test instance
            tester = ExampleTest(sess, model, data, config, summary_logger)
            # Create trainer and pass all the previous components to it
            trainer = ExampleTrainer(sess, model, data, config, summary_logger,
                                     tester)
            # Load model
            model.load(sess)
            # Start training
            trainer.train()
        except:
            logger.exception('An exception occured during training')
            sess.close()
        finally:
            sess.close()
    logger.info('Finished all experiments')
Ejemplo n.º 11
0
def main(is_pretrain,
         tfrecords_files,
         kernal_channel=64,
         fc_num=32,
         count_stop=10):

    config = process_config(os.path.join(rootPath, "configs/example.json"))

    for train_data_file in sorted(os.listdir(tfrecords_files)):

        _path = os.path.join(tfrecords_files, train_data_file)
        if os.path.isdir(_path):
            config.train_data_file = train_data_file
            config.summary_dir = os.path.join(rootPath, "result", "saresnet",
                                              train_data_file, "summary")
            config.checkpoint_dir = os.path.join(rootPath, "result",
                                                 "saresnet", train_data_file,
                                                 "checkpoint")
            config.basicmodel_dir = os.path.join(rootPath, "result",
                                                 "basic_model")
            config.pred_result_dir = os.path.join(rootPath, "result",
                                                  "pred_result")
            config.result_csv_dir = os.path.join(rootPath, "result",
                                                 "result_csv")
            config.is_pretrain = is_pretrain
            if not os.path.exists(config.pred_result_dir):
                os.makedirs(config.pred_result_dir)
            if not os.path.exists(config.result_csv_dir):
                os.makedirs(config.result_csv_dir)
            config._path = _path
            config.train_file_name = train_data_file
            config.count_stop = count_stop
            create_dirs([
                config.summary_dir, config.checkpoint_dir,
                config.basicmodel_dir
            ])

            file_name = os.path.join(config.checkpoint_dir, "checkpoint")
            file_name_before = os.path.join(config.basicmodel_dir,
                                            "checkpoint")

            if not is_pretrain:
                if not os.path.exists(file_name):
                    copyfile(
                        os.path.join(config.basicmodel_dir,
                                     "saresnet_basic.data-00000-of-00001"),
                        os.path.join(config.checkpoint_dir,
                                     "saresnet_basic.data-00000-of-00001"))
                    copyfile(
                        os.path.join(config.basicmodel_dir,
                                     "saresnet_basic.index"),
                        os.path.join(config.checkpoint_dir,
                                     "saresnet_basic.index"))
                    copyfile(
                        os.path.join(config.basicmodel_dir,
                                     "saresnet_basic.meta"),
                        os.path.join(config.checkpoint_dir,
                                     "saresnet_basic.meta"))

                    with open(file_name_before) as f_before, open(
                            file_name, "w") as f:
                        lines = f_before.readlines()
                        for line in lines:
                            line_ = line.replace("%s", train_data_file) + "\n"
                            f.write(line_)

            # gpu_options = tf.GPUOptions(allow_growth=True)
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.28)
            tf_config = tf.ConfigProto(gpu_options=gpu_options)

            sess = tf.Session(config=tf_config)
            data = DataGenerator(config)
            model = SAResnetModel(config,
                                  kernal_channel=kernal_channel,
                                  fc_num=fc_num)

            logger = Logger(sess, config)
            trainer = ExampleTrainer(sess, model, data, config, logger)
            model.load(sess)
            trainer.train()

            tf.reset_default_graph()
Ejemplo n.º 12
0
def main():

    new_exp = input("Is this a new experiment? [Y/N]")
    if new_exp == 'Y':
        # capture the config path from the run arguments
        # then process the json configuration file
        config_filename = '/home/ADAMGE/action_recognition/action_recognition_v1/configs/params.json'

    elif new_exp == 'N':
        config_filename = input(
            "Enter the full path of the config file in the old experiment folder"
        )
    else:
        print("Wrong input")
        exit()

    paths_filename = '/home/ADAMGE/action_recognition/action_recognition_v1/configs/paths.json'
    config = process_config(config_filename, paths_filename, new_exp)

    # create the experiments dirs and write the JSON file to the dir
    create_dirs([config.summary_dir, config.checkpoint_dir], config_filename)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)

    # create tensorflow session
    sess = tf.Session(config=sess_config)

    # create an instance of the model you want
    model = ExampleModel(config)

    # create your data generator
    data_train = DataGenerator(model,
                               config,
                               sess,
                               'train',
                               shuffle=True,
                               augment=False)
    data_validate = DataGenerator(model,
                                  config,
                                  sess,
                                  'validate',
                                  shuffle=False,
                                  augment=False)

    # create tensorboard logger
    logger = Logger(sess, config)

    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data_train, data_validate, config,
                             logger)

    # restore mobile net
    model.restore_mobile_net(sess)

    # load model if exists - only the lstm
    if new_exp == 'N':
        model.load(sess)

    # training
    trainer.train()