예제 #1
0
    def __init__(self,
                 cfg,
                 multi_gpu=False,
                 version=None,
                 load_last_ckp=True,
                 during_training=False,
                 epoch_train=None,
                 step_train=None,
                 model_arch_info=None):

        # Config
        self.cfg = cfg
        self.multi_gpu = multi_gpu
        self.version = version
        self.load_last_ckp = load_last_ckp
        self.during_training = during_training
        self.epoch_train = epoch_train
        self.step_train = step_train
        self.append_info = self.info[0]

        # Use encode transfer learning
        if self.cfg.TRANSFER_LEARNING == 'encode':
            self.tl_encode = True
        else:
            self.tl_encode = False

        # Get paths for testing
        self.checkpoint_path, self.test_log_path, self.test_image_path = \
            self._get_paths()

        # Save config
        utils.save_config_log(self.test_log_path, self.cfg, model_arch_info)

        # Load data
        self.x_test, self.y_test, self.imgs_test = self._load_data()
예제 #2
0
    def __init__(self, cfg, model_arch, mode='normal'):
        """Load data and initialize models."""
        # Global start time
        self.start_time = time.time()

        # Config
        self.cfg = cfg
        self.multi_gpu = True

        if mode == 'multi-tasks':
            self.multi_gpu = True
            model = CapsNetMultiTasks(cfg, model_arch)
        elif mode == 'multi-gpu':
            self.multi_gpu = True
            model = CapsNetDistribute(cfg, model_arch)
        else:
            self.multi_gpu = False
            model = CapsNet(cfg, model_arch)

        # Use encode transfer learning
        if self.cfg.TRANSFER_LEARNING == 'encode':
            self.tl_encode = True
        else:
            self.tl_encode = False

        # Get paths from configuration
        self.preprocessed_path, self.train_log_path, \
            self.summary_path, self.checkpoint_path, \
            self.train_image_path = self._get_paths()

        # Load data
        self.x_train, self.y_train, self.imgs_train, \
            self.x_valid, self.y_valid, self.imgs_valid = self._load_data()

        # Calculate number of batches
        self.n_batch_train = len(self.y_train) // cfg.BATCH_SIZE
        self.n_batch_valid = len(self.y_valid) // cfg.BATCH_SIZE

        # Build graph
        utils.thick_line()
        print('Building graph...')
        tf.reset_default_graph()
        self.step, self.train_graph, self.inputs, self.labels, self.input_imgs,\
            self.is_training, self.optimizer, self.saver, self.summary, \
            self.loss, self.accuracy, self.clf_loss, self.rec_loss, \
            self.rec_images, self.preds = model.build_graph(
                input_size=self.x_train.shape[1:],
                image_size=self.imgs_train.shape[1:],
                num_class=self.y_train.shape[1])

        # Save config
        self.clf_arch_info = model.clf_arch_info
        self.rec_arch_info = model.rec_arch_info
        utils.save_config_log(self.train_log_path, cfg, self.clf_arch_info,
                              self.rec_arch_info)
예제 #3
0
    def __init__(self, cfg):

        # Config
        self.cfg = cfg

        # Get checkpoint path
        self.checkpoint_path = join(
            cfg.CHECKPOINT_PATH,
            '{}/models.ckpt-{}'.format(self.cfg.TEST_VERSION,
                                       self.cfg.TEST_CKP_IDX))

        # Get log path, append information if the directory exist.
        test_log_path_ = join(
            self.cfg.TEST_LOG_PATH, '{}-{}'.format(self.cfg.TEST_VERSION,
                                                   self.cfg.TEST_CKP_IDX))
        self.test_log_path = test_log_path_
        i_append_info = 0
        while isdir(self.test_log_path):
            i_append_info += 1
            self.test_log_path = test_log_path_ + '({})'.format(i_append_info)

        # Path for saving images
        self.test_image_path = join(self.test_log_path, 'images')

        # Check directory of paths
        utils.check_dir([self.test_log_path])
        if self.cfg.TEST_WITH_RECONSTRUCTION:
            if self.cfg.TEST_SAVE_IMAGE_STEP is not None:
                utils.check_dir([self.test_image_path])

        # Save config
        utils.save_config_log(self.test_log_path, self.cfg)

        # Load data
        utils.thick_line()
        print('Loading data...')
        utils.thin_line()
        preprocessed_path_ = join(cfg.DPP_DATA_PATH, cfg.DATABASE_NAME)
        self.x_test = utils.load_data_from_pkl(
            join(preprocessed_path_, 'x_test.p'))
        self.y_test = utils.load_data_from_pkl(
            join(preprocessed_path_, 'y_test.p'))

        # Calculate number of batches
        self.n_batch_test = len(self.y_test) // self.cfg.TEST_BATCH_SIZE
예제 #4
0
    def __init__(self, model, cfg):
        """
    Load data and initialize models.

    Args:
      model: the models which will be trained
    """
        # Global start time
        self.start_time = time.time()

        # Config
        self.cfg = cfg

        # Get paths from configuration
        train_log_path_ = join(cfg.TRAIN_LOG_PATH, cfg.VERSION)
        test_log_path_ = join(cfg.TEST_LOG_PATH, cfg.VERSION)
        summary_path_ = join(cfg.SUMMARY_PATH, cfg.VERSION)
        checkpoint_path_ = join(cfg.CHECKPOINT_PATH, cfg.VERSION)
        self.preprocessed_path = join(cfg.DPP_DATA_PATH, cfg.DATABASE_NAME)

        # Get log paths, append information if the directory exist.
        self.train_log_path = train_log_path_
        i_append_info = 0
        while isdir(self.train_log_path):
            i_append_info += 1
            self.train_log_path = train_log_path_ + '({})'.format(
                i_append_info)

        if i_append_info > 0:
            self.summary_path = summary_path_ + '({})'.format(i_append_info)
            self.checkpoint_path = checkpoint_path_ + '({})'.format(
                i_append_info)
            self.test_log_path = test_log_path_ + '({})'.format(i_append_info)
        else:
            self.summary_path = summary_path_
            self.checkpoint_path = checkpoint_path_
            self.test_log_path = test_log_path_

        # Images saving path
        self.train_image_path = join(self.train_log_path, 'images')
        self.test_image_path = join(self.test_log_path, 'images')

        # Check directory of paths
        utils.check_dir([self.train_log_path, self.checkpoint_path])
        if cfg.WITH_RECONSTRUCTION:
            if cfg.SAVE_IMAGE_STEP is not None:
                utils.check_dir([self.train_image_path])

        # Load data
        utils.thick_line()
        print('Loading data...')
        utils.thin_line()
        self.x_train = utils.load_data_from_pkl(
            join(self.preprocessed_path, 'x_train.p'))
        self.y_train = utils.load_data_from_pkl(
            join(self.preprocessed_path, 'y_train.p'))
        self.x_valid = utils.load_data_from_pkl(
            join(self.preprocessed_path, 'x_valid.p'))
        self.y_valid = utils.load_data_from_pkl(
            join(self.preprocessed_path, 'y_valid.p'))

        # Calculate number of batches
        self.n_batch_train = len(self.y_train) // cfg.BATCH_SIZE
        self.n_batch_valid = len(self.y_valid) // cfg.BATCH_SIZE

        # Build graph
        utils.thick_line()
        print('Building graph...')
        tf.reset_default_graph()
        self.step, self.train_graph, self.inputs, self.labels, self.is_training, \
            self.optimizer, self.saver, self.summary, self.loss, self.accuracy,\
            self.clf_loss, self.rec_loss, self.rec_images = model.build_graph(
                image_size=self.x_train.shape[1:],
                num_class=self.y_train.shape[1])

        # Save config
        utils.save_config_log(self.train_log_path, cfg, model.clf_arch_info,
                              model.rec_arch_info)