Beispiel #1
0
    def _val_or_test(self, dst):
        """ common component for val and run
    dst: save record to dst filename
    config: config file to running
    """
        # get data pipeline
        data, label, path = loads(self.config)
        x1, x2 = tf.unstack(data, axis=1)
        path1, path2 = tf.unstack(path, axis=1)

        # get loss
        x1, x2 = self._network(x1, x2)
        loss, loss_batch = cosine.get_loss(x1, x2, label, self.batchsize,
                                           False)
        # write records
        info = string.concat(self.batchsize,
                             [path1, path1, path2, label, loss_batch])

        # setting session running info
        output = [x1, x2, label, info]
        saver = tf.train.Saver()
        with context.DefaultSession() as sess:
            step = self.snapshot.restore(sess, saver)
            fw = open('%s/%s.txt' % (dst, step), 'wb')
            with context.QueueContext(sess):
                for i in range(self.epoch_iter):
                    _f1, _f2, _label, _info = sess.run(output)
                    self._write_feat_to_npy(i, _f1, _f2, _label)
                    [fw.write(_line + b'\r\n') for _line in _info]
            fw.close()
            return step
Beispiel #2
0
    def train(self):
        # set phase
        self._enter_('train')

        # get data pipeline
        data, info, _ = loads(self.config)
        c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
        label, cond = tf.unstack(info, axis=1)

        # encode image to a vector
        c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)
        p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)

        # children to parent
        c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu))
        c1_z = self._generator(c1_z, cond)
        c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8)

        # parent to children
        p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu))
        p2_z = self._generator(p2_z, cond, True)
        p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8)

        # loss for genertor
        E1_loss = self._loss_vae(c1_real, c1_fake, c1_mu, c1_sigma)
        E2_loss = self._loss_vae(p2_real, p2_fake, p2_mu, p2_sigma)
        E_loss = E1_loss + E2_loss

        loss = E_loss

        # # allocate two optimizer
        global_step = tf.train.create_global_step()

        var_e = variables.select_vars('encoder')
        var_g = variables.select_vars('generator')

        op1 = updater.default(self.config, loss, global_step, var_e, 0)
        op2 = updater.default(self.config, loss, None, var_g, 1)
        train_op = tf.group(op1, op2)

        # update at the same time
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=global_step,
                                 keys=['E'],
                                 values=[E_loss],
                                 func_test=self.test,
                                 func_val=None))

        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)
Beispiel #3
0
    def train(self):
        """
    """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, label, _ = loads(self.config)

        # generate fake images
        logit_G, net_G = self._generator(label)

        # discriminate fake images
        logit_F, net_F, out_F = self._discriminator(logit_G,
                                                    label,
                                                    reuse=False)
        # discriminate real images
        logit_R, net_R, out_R = self._discriminator(data, label, reuse=True)

        # classify fake
        c_F = self._classifier(out_F, reuse=False)
        # classify real
        c_R = self._classifier(out_R, reuse=True)

        # loss
        D_loss, G_loss, C_loss = self._loss(label, logit_F, logit_R, c_F, c_R)

        # t_vars = tf.trainable_variables()
        d_vars = variables.select_vars('discriminator')
        g_vars = variables.select_vars('generator')
        c_vars = variables.select_vars('classifier')

        # update
        step = tf.train.create_global_step()
        d_op = updater.default(self.config, D_loss, step, d_vars, 0)
        g_op = updater.default(self.config, G_loss, step, g_vars, 0)
        c_op = updater.default(self.config, C_loss, step, c_vars, 0)

        # assemble
        train_op = [[d_op, g_op, c_op]]
        saver = tf.train.Saver(variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=step,
                                 keys=['D_loss', 'G_loss', 'C_loss'],
                                 values=[D_loss, G_loss, C_loss],
                                 func_test=self.test,
                                 func_val=None))

        # monitor session
        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)
Beispiel #4
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # get network
        logit, net = self._net(data)
        loss, mae, rmse = self._loss(logit, label)

        # prepare
        total_num = self.data.total_num
        batchsize = self.data.batchsize
        num_iter = int(total_num / batchsize)
        info = string.concat(batchsize, [path, label, logit * self.data.range])
        mean_loss, mean_mae, mean_rmse = 0, 0, 0

        # get saver
        saver = tf.train.Saver()
        with tf.Session() as sess:
            # get latest checkpoint
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        # running session to acuqire value
                        _loss, _mae, _rmse, _info = sess.run(
                            [loss, mae, rmse, info])
                        mean_loss += _loss
                        mean_mae += _mae
                        mean_rmse += _rmse
                        # save tensor info to text file
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_mae = 1.0 * mean_mae / num_iter
                    mean_rmse = 1.0 * mean_rmse / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'mae', 'rmse']
            vals = [total_num, num_iter, mean_loss, mean_mae, mean_rmse]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/loss', 'test/mae', 'test/rmse'],
                              values=[mean_loss, mean_mae, mean_rmse])

            self._exit_()
            return mean_mae
Beispiel #5
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # total_num
        total_num = self.data.total_num
        batchsize = self.data.batchsize

        # network
        logit, net = self._net(data)
        loss, error, pred = self._loss(logit, label)

        # prepare
        info = string.concat(batchsize, [path, label, pred])
        num_iter = int(total_num / batchsize)
        mean_err, mean_loss = 0, 0

        # get saver
        saver = tf.train.Saver()
        with context.DefaultSession() as sess:
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        _loss, _err, _info = sess.run([loss, error, info])
                        mean_loss += _loss
                        mean_err += _err
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_err = 1.0 * mean_err / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'error']
            vals = [total_num, num_iter, mean_loss, mean_err]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/error', 'test/loss'],
                              values=[mean_err, mean_loss])

            self._exit_()
            return mean_err
Beispiel #6
0
    def train(self):
        """
    """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, labels, path = loads(self.config)
        # data = tf.reduce_sum(data, axis=3)
        # data = tf.expand_dims(data, axis=3)
        # data = tf.sign(data)

        label, cond = tf.unstack(labels, axis=1)
        # get network
        logit, net = self._net(data)
        cond = tf.reshape(cond, [self.data.batchsize, 1])
        label = tf.reshape(label, [self.data.batchsize, 1])
        label = label * cond
        # label = label * (cond - 234.) / (689. - 234.)
        # get loss
        loss, mae, rmse = self._loss(logit, label)

        # update
        global_step = tf.train.create_global_step()
        train_op = updater.default(self.config, loss, global_step)

        # for storage
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        snapshot_hook = self.snapshot.init()
        summary_hook = self.summary.init()
        running_hook = context.Running_Hook(config=self.config.log,
                                            step=global_step,
                                            keys=['loss', 'mae', 'rmse'],
                                            values=[loss, mae, rmse],
                                            func_test=self.test,
                                            func_val=None)

        # monitor session
        with tf.train.MonitoredTrainingSession(
                hooks=[
                    running_hook, snapshot_hook, summary_hook,
                    tf.train.NanTensorHook(loss)
                ],
                save_checkpoint_secs=None,
                save_summaries_steps=None) as sess:

            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)
    def _val_or_test(self, dstdir):
        """ COMMON FOR TRAIN AND VAL """
        # considering output train image
        data, info, path = loads(self.config)
        c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
        c1_path, p1_path, c2_path, p2_path = tf.unstack(path, axis=1)
        label, cond = tf.unstack(info, axis=1)

        # encode image to a vector
        feat_c1, feat_p2 = self._network(c1_real, p2_real)
        R_loss, loss = self._loss_metric(feat_c1, feat_p2, None)

        saver = tf.train.Saver()
        c1_zs, p2_zs = 0, 0

        with context.DefaultSession() as sess:
            step = self.snapshot.restore(sess, saver)

            info = string.concat(
                self.batchsize,
                [c1_path, p1_path, c2_path, p2_path, label, loss])

            output = [info, feat_c1, feat_p2, label]
            fw = open(dstdir + '%s.txt' % step, 'wb')

            with context.QueueContext(sess):
                for i in range(self.epoch_iter):
                    _info, _x, _y, _label = sess.run(output)
                    self._write_feat_to_npy(i, _x, _y, _label)
                    [fw.write(_line + b'\r\n') for _line in _info]
                    c1_zs = _x if type(c1_zs) == int else np.row_stack(
                        (c1_zs, _x))
                    p2_zs = _y if type(p2_zs) == int else np.row_stack(
                        (p2_zs, _y))

            fw.close()
            np.save(dstdir + '/' + step + '_c1.npy', c1_zs)
            np.save(dstdir + '/' + step + '_p2.npy', p2_zs)

            return step
Beispiel #8
0
    def train(self):
        """
    """
        """ """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, label, path = loads(self.config)
        x1, x2 = tf.unstack(data, axis=1)
        path1, path2 = tf.unstack(path, axis=1)

        # encode image to a vector
        x1, x2 = self._network(x1, x2)
        loss, loss_batch = cosine.get_loss(x1, x2, label, self.batchsize, True)

        # # allocate two optimizer
        global_step = tf.train.create_global_step()
        train_op = updater.default(self.config, loss, global_step)

        # update at the same time
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=global_step,
                                 keys=['R'],
                                 values=[loss],
                                 func_test=self.test,
                                 func_val=None))

        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)
Beispiel #9
0
    def train(self):
        """
    """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, label, path = loads(self.config)
        # get network
        logit, net = self._net(data)
        # get loss
        loss, error, pred = self._loss(logit, label)

        # update
        global_step = tf.train.create_global_step()
        train_op = updater.default(self.config, loss, global_step)

        # for storage
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=global_step,
                                 keys=['loss', 'error'],
                                 values=[loss, error],
                                 func_test=self.test,
                                 func_val=None))

        # monitor session
        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            # Profiler.time_memory(self.config['output_dir'], sess, train_op)
            while not sess.should_stop():
                sess.run(train_op)
Beispiel #10
0
  def train(self):
    """ """
    # set phase
    self._enter_('train')

    # get data pipeline
    data, info, _ = loads(self.config)
    c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
    label, cond = tf.unstack(info, axis=1)

    # encode image to a vector
    feat_c1, feat_p2 = self._network(c1_real, p2_real)
    loss, loss_batch = self._loss_metric(feat_c1, feat_p2, label)

    # # allocate two optimizer
    global_step = tf.train.create_global_step()
    train_op = updater.default(self.config, loss, global_step)

    # update at the same time
    saver = tf.train.Saver(var_list=variables.all())

    # hooks
    self.add_hook(self.snapshot.init())
    self.add_hook(self.summary.init())
    self.add_hook(context.Running_Hook(
        config=self.config.log,
        step=global_step,
        keys=['R'],
        values=[loss],
        func_test=self.test,
        func_val=None))

    with context.DefaultSession(self.hooks) as sess:
      self.snapshot.restore(sess, saver)
      while not sess.should_stop():
        sess.run(train_op)
Beispiel #11
0
    def train(self):
        """
    """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, label, path = loads(self.config)
        print(data, label, path)
        # get network
        logit, net = self._net(data)
        # get loss
        loss, mae, rmse = self._loss(logit, label)

        # update
        global_step = tf.train.create_global_step()
        train_op = updater.default(self.config, loss, global_step)

        # update at the same time
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=global_step,
                                 keys=['loss', 'mae', 'rmse'],
                                 values=[loss, mae, rmse],
                                 func_test=self.test,
                                 func_val=None))

        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)
Beispiel #12
0
    def _val_or_test(self, dstdir):
        """ COMMON FOR TRAIN AND VAL """
        # considering output train image
        data, info, path = loads(self.config)
        c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
        c1_path, p1_path, c2_path, p2_path = tf.unstack(path, axis=1)
        label, cond = tf.unstack(info, axis=1)

        # encode image to a vector
        c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)
        p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)

        c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu))
        c1_z = self._generator(c1_z, cond)
        c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8)

        p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu))
        p2_z = self._generator(p2_z, cond, True)
        p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8)

        R_loss, loss = self._loss_metric(feat_c1, feat_p2, None)

        saver = tf.train.Saver()
        c1_zs, p2_zs = 0, 0

        with context.DefaultSession() as sess:
            step = self.snapshot.restore(sess, saver)
            filesystem.mkdir(dstdir + '/' + step + '_c1')
            filesystem.mkdir(dstdir + '/' + step + '_p2')

            info = string.concat(
                self.data.batchsize,
                [c1_path, p1_path, c2_path, p2_path, label, loss])

            output = [
                c1_fake, p2_fake, c1_path, p2_path, c1_real, c2_real, p1_real,
                p2_real, info, feat_c1, feat_p2, label
            ]

            fw = open(dstdir + '%s.txt' % step, 'wb')
            with context.QueueContext(sess):
                for i in range(self.epoch_iter):
                    _c1, _p2, _c1p, _p2p, _c1r, _c2r, _p1r, _p2r, _info, _x, _y, _label = sess.run(
                        output)
                    self._write_feat_to_npy(i, _x, _y, _label)
                    [fw.write(_line + b'\r\n') for _line in _info]

                    if self.is_save_all_images:
                        image.save_multi(dstdir + '/' + step + '_c1', _c1,
                                         _c1p)
                        image.save_multi(dstdir + '/' + step + '_p2', _p2,
                                         _p2p)

                    if self.is_save_batch_images:
                        image.save_batchs_to_one(
                            imgs_list=[_c1, _p2, _c1r, _c2r, _p1r, _p2r],
                            batchsize=self.data.batchsize,
                            dst=dstdir,
                            step=step,
                            name_list=[
                                '_c1', '_p2', '_c1r', '_c2r', '_p1r', '_p2r'
                            ])

                    if self.is_save_feats:
                        c1_zs = _x if type(c1_zs) == int else np.row_stack(
                            (c1_zs, _x))
                        p2_zs = _y if type(p2_zs) == int else np.row_stack(
                            (p2_zs, _y))

            fw.close()
            if self.is_save_feats:
                np.save(dstdir + '/' + step + '_c1.npy', c1_zs)
                np.save(dstdir + '/' + step + '_p2.npy', p2_zs)

            return step
    def train(self):
        """ """
        # set phase
        self._enter_('train')

        # get data pipeline
        data, info, _ = loads(self.config)
        c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1)
        label, cond = tf.unstack(info, axis=1)

        # encode image to a vector
        c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real)
        p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True)

        # children to parent
        with tf.variable_scope('net1'):
            c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu))
            c1_z = self._generator(c1_z, cond)
            c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8)

        # parent to children
        with tf.variable_scope('net2'):
            p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu))
            p2_z = self._generator(p2_z, cond)
            p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8)

        # discriminator
        D_c1_fake = self._discriminator(c1_fake, cond, reuse=False)
        D_p1_real = self._discriminator(p1_real, cond, reuse=True)

        D_p2_fake = self._discriminator(p2_fake, cond, reuse=True)
        D_c2_real = self._discriminator(c2_real, cond, reuse=True)

        # loss for encoder
        R_loss, _ = self._loss_metric(feat_c1, feat_p2, label)

        # loss for genertor
        E1_loss = self._loss_vae(p1_real, c1_fake, c1_mu, c1_sigma)
        E2_loss = self._loss_vae(c2_real, p2_fake, p2_mu, p2_sigma)
        E_loss = E1_loss + E2_loss

        # loss for discriminator
        D1_loss, G1_loss = self._loss_gan(D_c1_fake, D_p1_real)
        D2_loss, G2_loss = self._loss_gan(D_p2_fake, D_c2_real)
        D_loss = D1_loss + D2_loss
        G_loss = G1_loss + G2_loss

        loss = E_loss + D_loss + G_loss + R_loss

        # # allocate two optimizer
        global_step = tf.train.create_global_step()

        var_e = variables.select_vars('encoder')
        var_g = variables.select_vars('generator')
        var_d = variables.select_vars('discriminator')

        op1 = updater.default(self.config, loss, global_step, var_e, 0)
        op2 = updater.default(self.config, loss, None, var_g, 1)
        op3 = updater.default(self.config, loss, None, var_d, 0)
        train_op = tf.group(op1, op2, op3)

        # update at the same time
        saver = tf.train.Saver(var_list=variables.all())

        # hooks
        self.add_hook(self.snapshot.init())
        self.add_hook(self.summary.init())
        self.add_hook(
            context.Running_Hook(config=self.config.log,
                                 step=global_step,
                                 keys=['E', 'D', 'G', 'R'],
                                 values=[E_loss, D_loss, G_loss, R_loss],
                                 func_test=self.test,
                                 func_val=None))

        with context.DefaultSession(self.hooks) as sess:
            self.snapshot.restore(sess, saver)
            while not sess.should_stop():
                sess.run(train_op)