Ejemplo n.º 1
0
def main():

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs(([config.summary_dir, config.checkpoint_dir]))

    print("Create the data generator")
    data_generator = DataGenerator(config)

    print("Create the model.")
    model = CNNModel(config, data_generator.get_word_index())

    print("Trainer initiatise")
    trainer = ModelTrainer(model.model, data_generator.get_train_data(),
                           config)

    print("Training Start")
    trainer.train()

    print("Visualization of loss and accuracy")
    trainer.visualize("FastText +CNN")
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])

    print('Create the data generator.')
    data_generator = DataGenerator(config)

    print('Create the model.')
    model = MultiLabelConvModel(config, data_generator.get_word_index())

    print('Create the trainer')
    trainer = MultiLabelConvModelTrainer(model.model,
                                         data_generator.get_train_data(),
                                         config)

    print('Start training the model.')
    trainer.train()

    print('Visualize the losses')
    trainer.visualize()
Ejemplo n.º 3
0
def main():
    # try:
    #     args = get_args()
    #     config = process_config(args.config)
    #
    # except:
    #     print("missing or invalid arguments")
    #     exit(0)
    config_file = "configs/config.json"
    config = process_config(config_file)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])

    # embed_path = "data/glove.trimmed.{}.npz".format(config.embedding_size)
    # config.embed_path = embed_path
    # vocab_path = "data/vocab.dat"
    # config.vocab_path = vocab_path

    # create tensorflow session
    sess = tf.Session()

    # create data generator
    data = DataGenerator(config)
    sequence_length = data.sequence_length()
    vocab_size = data.get_vocab_size()
    # create an instance of the model
    model = CNNClassifier(config, sequence_length, vocab_size)
    #load model if exists
    model.load(sess)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config, logger)

    trainer.train()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data = DataGenerator(config)
    # use mnist dataset
    data.load_mnist()

    # create an instance of the cnn model
    model = CnnMnistModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = CnnMnistTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 5
0
def run_trail(config):
    # capture the config path from the run arguments
    # then process the json configuration file
    # try:
    # except:
    #     print("missing or invalid arguments")
    #     exit(0)

    if config.gpu_mode is True and not torch.cuda.is_available(): #虽然开启gpu模式,但是找不到GPU
        raise Exception("No GPU found, please run without --gpu_mode=False")

    # create an instance of the model you want
    # model = Net(config)
    # model = torch.nn.DataParallel(Net(config), device_ids=[0, 1])
    model = torch.nn.DataParallel(Net(config))

    # set the logger
    log_dir = os.path.join(config.save_dir, 'logs_'+config.exp_name)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    # logger = Logger(log_dir)
    logger = None

    train_indices, test_indices = shuffle()
    # create your data generator
    data_train = DataGenerator(config, 'train').load_dataset()
    # create your data generator
    data_test = DataGenerator(config, 'test').load_dataset()

    # create trainer and pass all the previous components to it
    trainer = Trainer(model, config, data_train, logger, data_test)
    trainer.train_test()
    def __init__(self, config):
        super(GEO1dDataLoader, self).__init__(config)

        'Create the training data generator'
        print('Creating the training data generator')
        tr_ids = os.listdir(self.config.data_loader.train_dir)
        tr_labels_str = list(map(self.label_image, tr_ids))
        tr_ids = list(map(self.png_to_id, tr_ids))
        tr_labels = dict(zip(tr_ids, tr_labels_str))
        self.train_generator = DataGenerator(tr_ids, tr_labels,
                                self.config.data_loader.train_dir,
                                batch_size=self.config.trainer.batch_size,
                                dim=[self.config.model.input_shape[0],self.config.model.input_shape[1]],
                                n_channels=1,
                                n_classes=self.config.model.output_shape,
                                shuffle=False,
                                class_mode=self.config.model.loss)

        'Create the testing data generator'
        print('Creating the testing data generator')
        te_ids = os.listdir(self.config.data_loader.test_dir)
        te_labels_str = list(map(self.label_image, te_ids))
        te_ids = list(map(self.png_to_id, te_ids))
        te_labels = dict(zip(te_ids, te_labels_str))
        self.test_generator = DataGenerator(te_ids, te_labels,
                                self.config.data_loader.test_dir,
                                batch_size=self.config.trainer.batch_size,
                                dim=[self.config.model.input_shape[0],self.config.model.input_shape[1]],
                                n_channels=1,
                                n_classes=self.config.model.output_shape,
                                shuffle=False,
                                class_mode=self.config.model.loss)
def train():
    def monkey_patched_train(self):
        """
        Train from current epoch to number of epochs in the config.
            Call train_epoch for each epoch, and increase cur_epoch_tensor.
        """
        tf.logging.info('Training...')
        begin_epoch = self.model.cur_epoch_tensor.eval(sess)
        # initialize training data set
        self.sess.run([self.data_loader.data_set_init_ops['train']])
        for cur_epoch in range(begin_epoch, begin_epoch + self.config.epochs):
            self.train_epoch()
            self.sess.run(self.model.increment_cur_epoch_tensor)
            '''validation'''
            if cur_epoch % self.config.epochs_per_validation == 0:
                '''validate'''
                validater.validate()
                '''recover training environment'''
                self.sess.run([self.data_loader.data_set_init_ops['train']])

    train_config = process_config("configs/train_with_validation.json")
    validate_config = process_config("configs/validate.json")
    g = tf.Graph()
    with g.as_default():
        # train data
        train_data_gen = DataGenerator()
        # validate data
        validate_data_gen = DataGenerator()
        data_loader = DataSetLoader(train_config, {
            'train': train_data_gen,
            'validate': validate_data_gen
        },
                                    default_set_name='train')
        next_data = data_loader.next_data
        # TODO
        # create an instance of the model you want
        model = FCNetModel(train_config, next_data)
        with tf.Session() as sess:
            # create tensorboard logger
            train_logger = Logger(sess, train_config)
            # create trainer and pass all the previous components to it
            trainer = ExampleTrainer(sess, model, data_loader, train_config,
                                     train_logger)
            # create tensorboard logger
            validate_logger = Logger(sess, validate_config)
            # create validater and pass all the previous components to it
            validater = ExampleValidater(sess, model, data_loader,
                                         validate_config, validate_logger)
            # load model if exists
            model.load(sess)
            # make a monkey patch to model
            trainer.train = monkey_patched_train.__get__(
                trainer, ExampleTrainer)
            # here you train your model
            trainer.train()
            # save model
            model.save(sess)
Ejemplo n.º 8
0
def test_data():
    config_file = 'configs/config.json'
    config = process_config(config_file)
    generator = DataGenerator(config)
    x, y = generator.generateBatch(2)
    print(x[0].shape)
    # print(generator.dictionary[''])
    print(generator.dictionary[' '])
    print(generator.dictionary['['])
    print(generator.dictionary[']'])
    print(generator.dictionary[','])
    print(generator.dictionary['。'])
Ejemplo n.º 9
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("An error occurred during processing the configuration file")
        print(e)
        exit(0)

        # create the experiments dirs
        create_dirs([
            config.summary_dir, config.checkpoint_dir, config.config_file_dir
        ])

        # create tensorflow session
        sess = tf.Session()

        # create your data generator
        train_data = DataGenerator(config,
                                   sess,
                                   old_tfrecords=config.old_tfrecords,
                                   train=True)
        test_data = DataGenerator(config,
                                  sess,
                                  old_tfrecords=config.old_tfrecords,
                                  train=False)

        model = EncodeProcessDecode_v2(config)

        # create tensorboard logger
        logger = Logger(sess, config)

        # create trainer and pass all the previous components to it
        trainer = MPCTrainer(sess,
                             model,
                             train_data,
                             test_data,
                             config,
                             logger,
                             N=10)

        # load model if exists
        model.load(sess)

        results = trainer.mpc()
Ejemplo n.º 10
0
def main():
    # capture the config path from the run argments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except Exception as err:
        print("Missing or invalid arguments {}".format(err))
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data_loader = DataGenerator(config)

    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, config, logger, data_loader)
    # load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
    def summarize(self):
        """Creating summaries for feature importance and other attributes of the graph 
        
        :return: graph summaries of the weights and mse error for training and testing process
        :rtype: a scalar Tensor of type string.
        """

        features_map = DataGenerator.get_features_names()

        # getting the first weights of shape (filter_size, filter_size, filter_size, input_channel, out_channel)
        w_zeroconv = self.sess.graph.get_tensor_by_name(
            'convolution/conv_layer_0/w:0')

        # just splitting the weights by each feature, axis points to the input_channel split
        feature_weights = tf.split(w_zeroconv,
                                   w_zeroconv.shape[-2].value,
                                   axis=3)
        # features_importance = tf.reduce_sum(tf.abs(w_zeroconv), reduction_indices = [1,2,4], name = "feature_importance")

        summaries = tf.summary.merge(
            (tf.summary.histogram('weights', w_zeroconv),
             *(tf.summary.histogram(f'weights_{feature_name}', value)
               for feature_name, value in zip(features_map, feature_weights)),
             tf.summary.scalar(
                 'mse', self.sess.graph.get_tensor_by_name('training/mse:0')),
             tf.summary.scalar(
                 'mse',
                 self.sess.graph.get_tensor_by_name('training/loss:0'))))

        return summaries
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create an instance of the model you want
    model = MyModel(config)
    # create your data generator
    data = DataGenerator(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = SimpleTrainer(sess, model, data, config, logger)
    saverExternal = tf.train.Saver(var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='external'))
    saverExternal.restore(sess, "experiments/model_8/model8.ckpt_2")

    # here you train your model
    trainer.train()
    trainer.validate()
Ejemplo n.º 13
0
def main():
    tmp = FLAGS.params  # necessary to get updated FLAGS values
    config = process_config("../configs/config.json", FLAGS.flag_values_dict())
    create_dirs([config.summary_dir, config.checkpoint_dir])
    print(config)
    sess = tf.Session()
    # Data process and generator
    print("Data Start Loading!", datetime.now())
    data = DataGenerator(config)
    print("Data Loaded!", datetime.now())
    # Create model
    # model_name = FLAGS.model_name
    model = S2S_TAtt(data.handle, config, data.next_batch)
    print("Model Initialized!", datetime.now())
    if config.tensorboard == 1:
        # Tensorboard logger
        logger = Logger(sess, config)
        print("Logger Initialized!", datetime.now())
        # Model trainer
        trainer = S2STrainer(sess, model, data, config, logger)
    else:
        trainer = S2STrainer(sess, model, data, config)
    print("Trainer Initialized!", datetime.now())
    # Training processs
    if FLAGS.train:
        trainer.train()
    with h5py.File("./result.h5", "w") as f:
        f.create_dataset("predictions",
                         data=trainer.predictions,
                         compression="lzf")
        f.create_dataset("groundtruth",
                         data=trainer.ground_truth,
                         compression="lzf")
        f.create_dataset("weights", data=trainer.weights, compression="lzf")
Ejemplo n.º 14
0
def main():
    #-c 'json파일경로'로 받아온 json경로를 config객체에 저장한다
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # 모델의 학습 결과와 가중치를 저장할 경로를 설정한다
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # 텐서플로우의 세션을 생성한다
    sess = tf.Session()
    # 데이터를 불러온다. 전달한 config객체는 batch사이즈로 데이터를 쪼개기위해 사용된다
    data = DataGenerator(config)
    # 사용할 모델의 개형을 불러온다. 해당 프로젝트에는 input사이즈외에 참고하지 않았지만
    #본래 모델의 깊이,모양,loss함수,optimizer 등 config 값에 따라 다른 모델을 불러올 수 있다
    model = mlp(config)
    # 학습진행과 저장을 담당하는 logger객체를 생성한다
    logger = Logger(sess, config)
    #먼저 생성한 학습에 필요한 세션,모델,데이터셋,설정,logger를 전달해 학습 준비를 마친다
    trainer = ExampleTrainer(sess, model, data, config, logger)
    #기존에 학습중이던 같은 모델이 있다면 해당 모델을 이어서 학습한다
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 15
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = ExampleModel(config)
    # create your data generator
    data = DataGenerator(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)

    # here you train your model
    trainer.train()
Ejemplo n.º 16
0
def eval():
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # Get Model
    model_types = import_module('models.' + config.architecture + '_model')
    Model = getattr(model_types, 'Model')

    # create tensorflow session
    sess = tf.Session()

    # create your data generator
    data_loader = DataGenerator(config, eval_phase=True, eval_on_test_data=True)

    # create instance of the model you want
    model = Model(data_loader, config)

    # create trainer and path all previous components to it
    trainer = Trainer(sess, model, config, None, data_loader, load_best=True)

    # here we evaluate on the test dataset
    test_loss, test_cer = trainer.test(tqdm_enable=True)
    print('\nTest set Loss:', test_loss)
    print('Test set CER:', round(test_cer*100, 2), '%')
Ejemplo n.º 17
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # config = process_config('configs/imc.json')
    create_dirs([config.summary_dir, config.checkpoint_dir])
    sess = tf.Session()
    # create instance of the model you want
    model = IMCModel(config)
    # load model if exist
    model.load(sess)
    # create your data generator
    imc_data = DataGenerator(config)

    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = IMCTrainer(sess, model, imc_data, config, logger)
    # # here you train your model
    trainer.train()

    # trainer.recognize()
    recogflag = input("recognize your image now? image in {} Y/N".format(
        config.dir_af))
    if recogflag == "Y" or recogflag == "y" or recogflag == '':
        recognize_image(config.dir_af, config.checkpoint_dir, config.graph_dir,
                        config.image_size)
    else:
        print("Process end!")
Ejemplo n.º 18
0
    def train(self, max_iters, restore=False):
        """Network training loop."""
        data_layer = DataGenerator(self.roidb, self.imdb.nrof_classes,
                                   self.data)
        total_loss, model_loss, rpn_cross_entropy, rpn_loss_box = self.model.build_loss(
            ohem=cfg.TRAIN.OHEM)
        summary_op, log_image, log_image_data, log_image_name = self.logger.init_summary(
            rpn_reg_loss=rpn_loss_box,
            rpn_cls_loss=rpn_cross_entropy,
            model_loss=model_loss,
            total_loss=total_loss)
        train_op, lr = self.get_train_op(total_loss)
        # intialize variables
        self.sess.run(tf.global_variables_initializer())
        restore_iter = self.load_model(restore)
        fetch_list = [
            total_loss, model_loss, rpn_cross_entropy, rpn_loss_box,
            summary_op, train_op
        ]

        print(restore_iter, max_iters)
        for _iter in range(restore_iter, max_iters, cfg.TRAIN.EPOCH_SIZE):
            losses = self.train_epoch(_iter, lr, data_layer, fetch_list)

            print('iter: %d / %d, total loss: %.4f, model loss: %.4f, rpn_loss_cls: %.4f, rpn_loss_box: %.4f, lr: %f' % \
                  (_iter+cfg.TRAIN.EPOCH_SIZE, max_iters, losses[0], losses[1], losses[2], losses[3], losses[5].eval()))
            self.logger.summarize(losses[4], self.global_step.eval())
            self.save(_iter + cfg.TRAIN.EPOCH_SIZE)
Ejemplo n.º 19
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        if os.path.isfile("configs/example.json"):
            config = process_config("configs/example.json")
        else:
            config = process_config("../configs/example.json")

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = ExampleModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = ExampleTrainer(sess, model, data, config, logger)
    # load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 20
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)
    os.environ['CUDA_VISIBLE_DEVICES'] = config.CUDA_VISIBLE_DEVICES
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = AutoEncodingConv2dBNModel(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = MyModelTrainer(sess, model, data, config, logger)
    #load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 21
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception:
        print("missing or invalid arguments")
        raise
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    configSess = tf.ConfigProto(allow_soft_placement=True,
                                log_device_placement=False)
    configSess.gpu_options.allow_growth = True
    sess = tf.Session(config=configSess)
    # create your data generator
    data = DataGenerator(config)
    # create an instance of the model you want
    try:
        ModelInit = all_models[config.model]
        model = ModelInit(config)
    except AttributeError:
        raise
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the previous components to it
    trainer = NetworkTrainer(sess, model, data, config, logger)
    # load model if exists
    model.load(sess, args.checkpoint_nb)
    # here you train your model
    trainer.train()
Ejemplo n.º 22
0
def main(argv=sys.argv):
    """
    main function
    :param argv:    Incoming parameters
    :return:
    """
    # parse and load parameters
    parameters = load_parameters('parameters.json')
    arguments = parse_arguments(argv[1:])
    parameters = utils.parse_params(arguments, parameters)
    utils.print_parametes('parameters', parameters)
    # get model parameters
    model_parametes = get_model_parametes(parameters)
    # log file
    log_file = os.path.join(parameters["output_dir"], "log_%d" % time.time())
    log_f = utils.get_log_f(log_file)
    # data generator
    data_generator = DataGenerator(parameters)
    # create, train and infer model
    with tf.Session() as sess:
        model = ExampleModel(model_parametes)
        trainer = ExampleTrainer(sess, model, data_generator, parameters,
                                 log_f)
        trainer.train()

        # inference
        inference = ExampleInference(sess, model, data_generator, parameters,
                                     log_f)
        inference.infer()
Ejemplo n.º 23
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    create_dirs([config.summary_dir, config.checkpoint_dir])

    sess = tf.Session()

    data = DataGenerator(config)

    model = mlp(config)

    logger = Logger(sess, config)

    trainer = ExampleTrainer(sess, model, data, config, logger)

    model.load(sess)
    #trainer파일을 확인하면 trainer.train()과 새로 작성한 trainer.test()의 차이를 확인할 수 있다.
    #y는 테스트데이터의 실제 ppa, result는 학습된 모델의 추정 ppa값을 리스트로 받아온다.
    #result는 세션의 return이 [1][데이터개수]의 2차원 리스트의 형태이고 [0][i]로 각 input의 결과를 확인할 수 있다
    y, result = trainer.test()
    cnt = 0
    print(result[0])
    for i in range(len(y)):
        #실제값-추측값을 실제값으로 나누어 오차10%내의 데이터의 수를 센다
        if (abs(y[i] - float(result[0][i])) / y[i] <= 0.1):
            cnt += 1
    print('10% 내외로 예측한 데이터는 ', cnt / len(y), '% 이다')
Ejemplo n.º 24
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create your data generator
    data = DataGenerator(config)
    # create tensorflow session
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)
    K.set_session(sess)
    # create an instance of the model you want
    model = GolfBallModel(config, data)
    # load model if exists
    model.load(sess)
    # create tensorboard logger
    logger = Logger(sess, config)
    if config.do_training:
        # create trainer and pass all the previous components to it
        trainer = GolfBallTrainer(sess, model, data, config, logger)
        # here you train your model
        trainer.train()
    if config.do_predict:
        GolfBallPrediction(sess, model, data, config)
Ejemplo n.º 25
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_devices"] = config.gpu
    import tensorflow as tf

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpu
    sess = tf.Session(config=gpuconfig)
    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = invariant_basic(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config)
    # load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Ejemplo n.º 26
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

        config.old_tfrecords = args.old_tfrecords
        config.normalize_data = False

    except Exception as e:
        print("An error occurred during processing the configuration file")
        print(e)
        exit(0)

        # create the experiments dirs
    create_dirs(
        [config.summary_dir, config.checkpoint_dir, config.config_file_dir])

    # create tensorflow session
    sess = tf.Session()

    # create your data generator
    #train_data = DataGenerator(config, sess, train=True)

    test_data = DataGenerator(config, sess, train=False)
    next_element = test_data.get_next_batch()
    dir_name = "test"

    while True:
        try:
            features = sess.run(next_element)
            features = convert_dict_to_list_subdicts(features,
                                                     config.test_batch_size)
            features = features[0]
            summaries = create_target_summary_dicts(features)

            dir_path = check_exp_folder_exists_and_create(features, dir_name)
            if dir_path is not None:
                export_summary_images(config=config,
                                      summaries_dict_images=summaries,
                                      dir_path=dir_path)

        except tf.errors.OutOfRangeError:
            print("done exporting")
            break
Ejemplo n.º 27
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file
    # try:


    data_loader = DataLoader(data_dir, config)
    data_loader.load_directory('.tif')
    data_loader.create_np_arrays()
    data_loader.create_data_label_pairs()

    preptt = PrepTrainTest(config, data_loader)

    for data_label_pair in data_loader.data_label_pairs:
        x_data = data_label_pair[0][data_loader]
        y_true = data_label_pair[1][data_loader.data_label_pairs[i][1][:, :, 0]]

        preptt.add_data(x_data, y_true)

    # Create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir, config.input_dir])

    # Create tensorflow session
    sess = tf.Session()

    # Create instance of the model you want
    model = PopModel(config)

    # Load model if exist
    model.load(sess)

    # Create Tensorboard logger
    logger = Logger(sess, config)
    logger.log_config()

    # Create your data generator
    data = DataGenerator(config, preptraintest = preptt)

    data.create_traintest_data()

    # Create trainer and path all previous components to it
    trainer = PopTrainer(sess, model, data, config, logger)

    # Train model
    trainer.train()
Ejemplo n.º 28
0
    def train_and_eval(self, config):

        # serving_feature_spec = tf.feature_column.make_parse_example_spec(self._get_feature_columns())
        # serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(serving_feature_spec)

        # exporter = tf.estimator.BestExporter(
        #     name="best_exporter",
        #     serving_input_receiver_fn=serving_input_receiver_fn,
        #     exports_to_keep=5)
        tf.estimator.train_and_evaluate(
            self.model,
            train_spec=tf.estimator.TrainSpec(
                input_fn=DataGenerator('train', config).get_dataset),
            eval_spec=tf.estimator.EvalSpec(input_fn=DataGenerator(
                'validation', config).get_dataset,
                                            steps=1000,
                                            start_delay_secs=0,
                                            throttle_secs=1))
Ejemplo n.º 29
0
 def test(self, config):
     predictions = self.model.predict(
         DataGenerator(config.mode, config).get_dataset)
     cls = [p['classes'] for p in predictions]
     cls_pred = np.array(cls, dtype='int').squeeze()
     df = pd.read_csv(config.submission_file)
     df['AdoptionSpeed'] = cls_pred
     df.to_csv(config.submission_file)
     return cls_pred
def main():

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(-1)

    sess = tf.Session()

    if (args.model == 'dcgan'):
        from models.dcgan_model import DCGANModel
        from trainers.dcgan_trainer import DCGANTrain

        create_dirs([config.summary_dir, config.checkpoint_dir, config.resized_data_dir])
        data = DataGenerator(config.data_dir, config.resized_data_dir, config.t_size, config.batch_size)
        model = DCGANModel(config)
        logger = Logger(sess, config)
        trainer = DCGANTrain(sess, model, data, config, logger)
        model.load(sess)
        trainer.train()

    elif (args.model == 'cyclegan'):
        from models.cyclegan_model import CycleGANModel
        from trainers.cyclegan_trainer import CycleGANTrain

        create_dirs([config.summary_dir, config.checkpoint_dir, config.resized_data_dir_a, config.resized_data_dir_b])
        dataA = DataGenerator(config.data_dir_a, config.resized_data_dir_a, config.t_size, config.batch_size)
        dataB = DataGenerator(config.data_dir_b, config.resized_data_dir_b, config.t_size, config.batch_size)
        model = CycleGANModel(config)
        logger = Logger(sess, config)
        trainer = CycleGANTrain(sess, model, [dataA, dataB], config, logger)
        model.load(sess)
        trainer.train()

    else:
        print("model doesn't exist")
        exit(-1)