def _val_or_test(self, dst): """ common component for val and run dst: save record to dst filename config: config file to running """ # get data pipeline data, label, path = loads(self.config) x1, x2 = tf.unstack(data, axis=1) path1, path2 = tf.unstack(path, axis=1) # get loss x1, x2 = self._network(x1, x2) loss, loss_batch = cosine.get_loss(x1, x2, label, self.batchsize, False) # write records info = string.concat(self.batchsize, [path1, path1, path2, label, loss_batch]) # setting session running info output = [x1, x2, label, info] saver = tf.train.Saver() with context.DefaultSession() as sess: step = self.snapshot.restore(sess, saver) fw = open('%s/%s.txt' % (dst, step), 'wb') with context.QueueContext(sess): for i in range(self.epoch_iter): _f1, _f2, _label, _info = sess.run(output) self._write_feat_to_npy(i, _f1, _f2, _label) [fw.write(_line + b'\r\n') for _line in _info] fw.close() return step
def test(self): """ """ # save current context self._enter_('test') # create a folder to save test_dir = filesystem.mkdir(self.config.output_dir + '/test/') # get data pipeline data, label, path = loads(self.config) # get network logit, net = self._net(data) loss, mae, rmse = self._loss(logit, label) # prepare total_num = self.data.total_num batchsize = self.data.batchsize num_iter = int(total_num / batchsize) info = string.concat(batchsize, [path, label, logit * self.data.range]) mean_loss, mean_mae, mean_rmse = 0, 0, 0 # get saver saver = tf.train.Saver() with tf.Session() as sess: # get latest checkpoint global_step = self.snapshot.restore(sess, saver) with open(test_dir + '%s.txt' % global_step, 'wb') as fw: with context.QueueContext(sess): for _ in range(num_iter): # running session to acuqire value _loss, _mae, _rmse, _info = sess.run( [loss, mae, rmse, info]) mean_loss += _loss mean_mae += _mae mean_rmse += _rmse # save tensor info to text file [fw.write(_line + b'\r\n') for _line in _info] # statistic mean_loss = 1.0 * mean_loss / num_iter mean_mae = 1.0 * mean_mae / num_iter mean_rmse = 1.0 * mean_rmse / num_iter # display results on screen keys = ['total sample', 'num batch', 'loss', 'mae', 'rmse'] vals = [total_num, num_iter, mean_loss, mean_mae, mean_rmse] logger.test(logger.iters(int(global_step), keys, vals)) # write to summary self.summary.adds(global_step=global_step, tags=['test/loss', 'test/mae', 'test/rmse'], values=[mean_loss, mean_mae, mean_rmse]) self._exit_() return mean_mae
def test(self): """ """ # save current context self._enter_('test') # create a folder to save test_dir = filesystem.mkdir(self.config.output_dir + '/test/') # get data pipeline data, label, path = loads(self.config) # total_num total_num = self.data.total_num batchsize = self.data.batchsize # network logit, net = self._net(data) loss, error, pred = self._loss(logit, label) # prepare info = string.concat(batchsize, [path, label, pred]) num_iter = int(total_num / batchsize) mean_err, mean_loss = 0, 0 # get saver saver = tf.train.Saver() with context.DefaultSession() as sess: global_step = self.snapshot.restore(sess, saver) with open(test_dir + '%s.txt' % global_step, 'wb') as fw: with context.QueueContext(sess): for _ in range(num_iter): _loss, _err, _info = sess.run([loss, error, info]) mean_loss += _loss mean_err += _err [fw.write(_line + b'\r\n') for _line in _info] # statistic mean_loss = 1.0 * mean_loss / num_iter mean_err = 1.0 * mean_err / num_iter # display results on screen keys = ['total sample', 'num batch', 'loss', 'error'] vals = [total_num, num_iter, mean_loss, mean_err] logger.test(logger.iters(int(global_step), keys, vals)) # write to summary self.summary.adds(global_step=global_step, tags=['test/error', 'test/loss'], values=[mean_err, mean_loss]) self._exit_() return mean_err
def _val_or_test(self, dstdir): """ COMMON FOR TRAIN AND VAL """ # considering output train image data, info, path = loads(self.config) c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1) c1_path, p1_path, c2_path, p2_path = tf.unstack(path, axis=1) label, cond = tf.unstack(info, axis=1) # encode image to a vector feat_c1, feat_p2 = self._network(c1_real, p2_real) R_loss, loss = self._loss_metric(feat_c1, feat_p2, None) saver = tf.train.Saver() c1_zs, p2_zs = 0, 0 with context.DefaultSession() as sess: step = self.snapshot.restore(sess, saver) info = string.concat( self.batchsize, [c1_path, p1_path, c2_path, p2_path, label, loss]) output = [info, feat_c1, feat_p2, label] fw = open(dstdir + '%s.txt' % step, 'wb') with context.QueueContext(sess): for i in range(self.epoch_iter): _info, _x, _y, _label = sess.run(output) self._write_feat_to_npy(i, _x, _y, _label) [fw.write(_line + b'\r\n') for _line in _info] c1_zs = _x if type(c1_zs) == int else np.row_stack( (c1_zs, _x)) p2_zs = _y if type(p2_zs) == int else np.row_stack( (p2_zs, _y)) fw.close() np.save(dstdir + '/' + step + '_c1.npy', c1_zs) np.save(dstdir + '/' + step + '_p2.npy', p2_zs) return step
def _val_or_test(self, dstdir): """ COMMON FOR TRAIN AND VAL """ # considering output train image data, info, path = loads(self.config) c1_real, p1_real, c2_real, p2_real = tf.unstack(data, axis=1) c1_path, p1_path, c2_path, p2_path = tf.unstack(path, axis=1) label, cond = tf.unstack(info, axis=1) # encode image to a vector c1_mu, c1_sigma, feat_c1 = self._encoder(c1_real) p2_mu, p2_sigma, feat_p2 = self._encoder(p2_real, True) c1_z = c1_mu + c1_sigma * tf.random_normal(tf.shape(c1_mu)) c1_z = self._generator(c1_z, cond) c1_fake = tf.clip_by_value(c1_z, 1e-8, 1 - 1e-8) p2_z = p2_mu + p2_sigma * tf.random_normal(tf.shape(p2_mu)) p2_z = self._generator(p2_z, cond, True) p2_fake = tf.clip_by_value(p2_z, 1e-8, 1 - 1e-8) R_loss, loss = self._loss_metric(feat_c1, feat_p2, None) saver = tf.train.Saver() c1_zs, p2_zs = 0, 0 with context.DefaultSession() as sess: step = self.snapshot.restore(sess, saver) filesystem.mkdir(dstdir + '/' + step + '_c1') filesystem.mkdir(dstdir + '/' + step + '_p2') info = string.concat( self.data.batchsize, [c1_path, p1_path, c2_path, p2_path, label, loss]) output = [ c1_fake, p2_fake, c1_path, p2_path, c1_real, c2_real, p1_real, p2_real, info, feat_c1, feat_p2, label ] fw = open(dstdir + '%s.txt' % step, 'wb') with context.QueueContext(sess): for i in range(self.epoch_iter): _c1, _p2, _c1p, _p2p, _c1r, _c2r, _p1r, _p2r, _info, _x, _y, _label = sess.run( output) self._write_feat_to_npy(i, _x, _y, _label) [fw.write(_line + b'\r\n') for _line in _info] if self.is_save_all_images: image.save_multi(dstdir + '/' + step + '_c1', _c1, _c1p) image.save_multi(dstdir + '/' + step + '_p2', _p2, _p2p) if self.is_save_batch_images: image.save_batchs_to_one( imgs_list=[_c1, _p2, _c1r, _c2r, _p1r, _p2r], batchsize=self.data.batchsize, dst=dstdir, step=step, name_list=[ '_c1', '_p2', '_c1r', '_c2r', '_p1r', '_p2r' ]) if self.is_save_feats: c1_zs = _x if type(c1_zs) == int else np.row_stack( (c1_zs, _x)) p2_zs = _y if type(p2_zs) == int else np.row_stack( (p2_zs, _y)) fw.close() if self.is_save_feats: np.save(dstdir + '/' + step + '_c1.npy', c1_zs) np.save(dstdir + '/' + step + '_p2.npy', p2_zs) return step