Exemplo n.º 1
0
  def __init__(self):
    # setting output file
    self._OUTPUT = filesystem.mkdir('../_outputs/')
    self._DATASET = '../_datasets'

    # logger config
    self._LOG_DATE = True
    self._LOG_SYS = False
    self._LOG_TRAIN = True
    self._LOG_TEST = True
    self._LOG_VAL = True
    self._LOG_NET = True
    self._LOG_WARN = True
    self._LOG_INFO = True
    self._LOG_ERR = True

    # compile config - processing *.py files
    self._COMPILE_DIR_BIN = '../_bin'  # output dir of binary file
    self._COMPILE_DIR_SRC = '../_src'  # output dir of code source
    self._COMPILE_EXCLUDE_ROOT = ['_', '.git', '.vscode']  # skip folders
    self._COMPILE_EXCLUDE_FILE = ['compile.py']  # skip files

    # SUMMARY SETTING
    self._SUMMARY_GRAD_STAT = False
    self._SUMMARY_GRAD_HIST = False
    self._SUMMARY_WEIGHT_STAT = False
    self._SUMMARY_WEIGHT_HIST = False
Exemplo n.º 2
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # get network
        logit, net = self._net(data)
        loss, mae, rmse = self._loss(logit, label)

        # prepare
        total_num = self.data.total_num
        batchsize = self.data.batchsize
        num_iter = int(total_num / batchsize)
        info = string.concat(batchsize, [path, label, logit * self.data.range])
        mean_loss, mean_mae, mean_rmse = 0, 0, 0

        # get saver
        saver = tf.train.Saver()
        with tf.Session() as sess:
            # get latest checkpoint
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        # running session to acuqire value
                        _loss, _mae, _rmse, _info = sess.run(
                            [loss, mae, rmse, info])
                        mean_loss += _loss
                        mean_mae += _mae
                        mean_rmse += _rmse
                        # save tensor info to text file
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_mae = 1.0 * mean_mae / num_iter
                    mean_rmse = 1.0 * mean_rmse / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'mae', 'rmse']
            vals = [total_num, num_iter, mean_loss, mean_mae, mean_rmse]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/loss', 'test/mae', 'test/rmse'],
                              values=[mean_loss, mean_mae, mean_rmse])

            self._exit_()
            return mean_mae
Exemplo n.º 3
0
    def test(self):
        """ we need acquire threshold from validation first """
        with tf.Graph().as_default():
            self._enter_('val')
            val_dir = filesystem.mkdir(self.config.output_dir + '/val/')
            self._val_or_test(val_dir)
            self._exit_()

        with tf.Graph().as_default():
            self._enter_('test')
            test_dir = filesystem.mkdir(self.config.output_dir + '/test/')
            step = self._val_or_test(test_dir)
            val_err, val_thed, test_err = similarity.get_all_result(
                self.val_x, self.val_y, self.val_l, self.test_x, self.test_y,
                self.test_l, False)
            keys = ['val_error', 'thred', 'test_error']
            vals = [val_err, val_thed, test_err]
            logger.test(logger.iters(int(step) - 1, keys, vals))
            self._exit_()
Exemplo n.º 4
0
  def _initialize(self):
    pid = datetime.strftime(datetime.now(), '%y%m%d%H%M%S')
    # 1. setting output dir
    if self.config.output_dir is None:
      self.config.output_dir = filesystem.mkdir(
          env._OUTPUT + self.config.name + '.' +
          self.config.target + '.' + pid)

    # 2. setting logger location
    logger.init(self.config.name + '.' + pid, self.config.output_dir)
    logger.info('Initilized logger successful.')
    logger.info('Current model in %s' % self.config.output_dir)
Exemplo n.º 5
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # total_num
        total_num = self.data.total_num
        batchsize = self.data.batchsize

        # network
        logit, net = self._net(data)
        loss, error, pred = self._loss(logit, label)

        # prepare
        info = string.concat(batchsize, [path, label, pred])
        num_iter = int(total_num / batchsize)
        mean_err, mean_loss = 0, 0

        # get saver
        saver = tf.train.Saver()
        with context.DefaultSession() as sess:
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        _loss, _err, _info = sess.run([loss, error, info])
                        mean_loss += _loss
                        mean_err += _err
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_err = 1.0 * mean_err / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'error']
            vals = [total_num, num_iter, mean_loss, mean_err]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/error', 'test/loss'],
                              values=[mean_err, mean_loss])

            self._exit_()
            return mean_err
Exemplo n.º 6
0
    def test(self):
        """ random test a group of image
    """
        self._enter_('test')
        test_dir = filesystem.mkdir(self.config['output_dir'] + '/test/')

        # add conditional gan
        y = [[i for i in range(10)] for i in range(10)]
        logit_G, net_G = self._generator(y, 'test')

        saver = tf.train.Saver(name='restore_all')
        with tf.Session() as sess:
            step = self.snapshot.restore(sess, saver)
            imgs = sess.run(logit_G)
            image.save_multi_to_one(imgs, [10, 10],
                                    path.join_step(test_dir, step, 'png'))

        self._exit_()
Exemplo n.º 7
0
    def _val_or_test(self, dstdir):
        """ COMMON FOR TRAIN AND VAL """
        # considering output train image
        data, info, path = loads(self.config)
        c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
        c1_path, p1_path, c2_path, p2_path = tf.unstack(path, axis=1)
        label, cond = tf.unstack(info, axis=1)

        # encode image to a vector
        c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)
        p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)

        c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu))
        c1_z = self._generator(c1_z, cond)
        c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8)

        p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu))
        p2_z = self._generator(p2_z, cond, True)
        p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8)

        R_loss, loss = self._loss_metric(feat_c1, feat_p2, None)

        saver = tf.train.Saver()
        c1_zs, p2_zs = 0, 0

        with context.DefaultSession() as sess:
            step = self.snapshot.restore(sess, saver)
            filesystem.mkdir(dstdir + '/' + step + '_c1')
            filesystem.mkdir(dstdir + '/' + step + '_p2')

            info = string.concat(
                self.data.batchsize,
                [c1_path, p1_path, c2_path, p2_path, label, loss])

            output = [
                c1_fake, p2_fake, c1_path, p2_path, c1_real, c2_real, p1_real,
                p2_real, info, feat_c1, feat_p2, label
            ]

            fw = open(dstdir + '%s.txt' % step, 'wb')
            with context.QueueContext(sess):
                for i in range(self.epoch_iter):
                    _c1, _p2, _c1p, _p2p, _c1r, _c2r, _p1r, _p2r, _info, _x, _y, _label = sess.run(
                        output)
                    self._write_feat_to_npy(i, _x, _y, _label)
                    [fw.write(_line + b'\r\n') for _line in _info]

                    if self.is_save_all_images:
                        image.save_multi(dstdir + '/' + step + '_c1', _c1,
                                         _c1p)
                        image.save_multi(dstdir + '/' + step + '_p2', _p2,
                                         _p2p)

                    if self.is_save_batch_images:
                        image.save_batchs_to_one(
                            imgs_list=[_c1, _p2, _c1r, _c2r, _p1r, _p2r],
                            batchsize=self.data.batchsize,
                            dst=dstdir,
                            step=step,
                            name_list=[
                                '_c1', '_p2', '_c1r', '_c2r', '_p1r', '_p2r'
                            ])

                    if self.is_save_feats:
                        c1_zs = _x if type(c1_zs) == int else np.row_stack(
                            (c1_zs, _x))
                        p2_zs = _y if type(p2_zs) == int else np.row_stack(
                            (p2_zs, _y))

            fw.close()
            if self.is_save_feats:
                np.save(dstdir + '/' + step + '_c1.npy', c1_zs)
                np.save(dstdir + '/' + step + '_p2.npy', p2_zs)

            return step