def apply(self, queue): ''' Run Entry ''' # The model shape is updated here params.MNROWS.value = 64 params.MNCOLS.value = 64 params.MNCNLS.value = 3 model = Model() if params.RESTART: init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # gpu_options = tf.GPUOptions(allow_growth=True) # sess_config = tf.ConfigProto(gpu_options=gpu_options) with tf.Session() as sess: summary_writer = tf.summary.FileWriter(str(params.SUMMARY_PATH), graph=sess.graph, max_queue=32, flush_secs=300) saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=12, pad_step_number=True, save_relative_paths=True) if params.RESTART: sess.run(init) else: model_path = tf.train.latest_checkpoint( str(params.CKPT_PATH), latest_filename=params.CKPT_FILE) saver.restore(sess, model_path) iqueue = queue['prep'] oqueue = queue[self.name] rt = ModelRuntime(sess, summary_writer, saver) while not (params.SHOULD_FINISH.value == b'prep' and iqueue.empty()): try: msg = iqueue.get(timeout=params.QUEUE_TIMEOUT) except Q.Empty: continue utils.msg_ud(msg, 'queue_info|run', oqueue.qsize()) msg = model.apply(msg, rt) oqueue.put(msg) oqueue.close() oqueue.join_thread() params.SHOULD_FINISH.value = self.bname utils.eprint('runner: exit')
def apply(self, queue): iqueue = queue['run'] oqueue = queue[self.name] while not (params.SHOULD_FINISH.value == b'run' and iqueue.empty()): try: msg = iqueue.get(timeout=params.QUEUE_TIMEOUT) except Q.Empty: continue utils.msg_ud(msg, 'queue_info|post', oqueue.qsize()) oqueue.put(msg) oqueue.close() oqueue.join_thread() params.SHOULD_FINISH.value = self.bname utils.eprint('postprocessor: exit')
def update_runtime_meta(self, msg): ''' Write the runtime meta file for restore running ''' RT_META_PATH = params.LOGGING_PATH / params.RT_META_FILE meta = self.msgfactory.create_runtime_meta() utils.msg_ud(meta, 'gidx', utils.msg_gt(msg, 'message_info|gidx')) utils.msg_ud(meta, 'tidx', utils.msg_gt(msg, 'message_info|tidx')) utils.msg_ud(meta, 'vidx', utils.msg_gt(msg, 'message_info|vidx')) with RT_META_PATH.open('w') as f: json.dump(meta, f)
def apply(self, queue): # The image shape maybe updated before preprocess params.INROWS.value = params.INROWS.value params.INCOLS.value = params.INCOLS.value params.INCNLS.value = params.INCNLS.value iqueue = queue['generate'] oqueue = queue[self.name] while not (params.SHOULD_FINISH.value == b'generate' and iqueue.empty()): try: msg = iqueue.get(timeout=params.QUEUE_TIMEOUT) except Q.Empty: continue utils.msg_ud(msg, 'queue_info|prep', oqueue.qsize()) mode = utils.msg_gt(msg, 'message_info|mode') if mode == 'train': covr, hide = utils.msg_gt(msg, 'image|covr/train'), utils.msg_gt( msg, 'image|hide/train') elif mode == 'valid': covr, hide = utils.msg_gt(msg, 'image|covr/valid'), utils.msg_gt( msg, 'image|hide/valid') else: raise RuntimeError('Invalid mode: %s' % mode) utils.msg_ud(msg, 'image|orig_covr', covr) utils.msg_ud(msg, 'image|orig_hide', hide) oqueue.put(msg) oqueue.close() oqueue.join_thread() params.SHOULD_FINISH.value = self.bname utils.eprint('preprocessor: exit')
def inference(self, msg, runtime): ''' Inference model ''' covr_img_v = utils.msg_gt(msg, 'image|orig_covr') hide_img_v = utils.msg_gt(msg, 'image|orig_hide') batch_size = params.BATCH_SIZE image_shape = [ params.INROWS.value, params.INCOLS.value, params.INCNLS.value ] model_shape = [ params.MNROWS.value, params.MNCOLS.value, params.MNCNLS.value ] mnrows, mncols, _ = image_shape inrows, incols, _ = model_shape slicer = utils.ImageSlicer(inrows, incols, mnrows, mncols) t_beg = timeit.default_timer() steg_img_v = np.zeros(shape=(batch_size, *image_shape)) dcpt_img_v = np.zeros(shape=(batch_size, *image_shape)) loss_va = [] rcst_loss_va, rcst_vars_va, dcpt_loss_va, dcpt_vars_va = [], [], [], [] for row_idx in range(inrows // mnrows): for col_idx in range(incols // mncols): # slice an image fragment at (row_idx, col_idx) covr_img_vs = slicer.slice(covr_img_v, row_idx, col_idx) hide_img_vs = slicer.slice(hide_img_v, row_idx, col_idx) loss_v, \ rcst_loss_v, rcst_vars_v, dcpt_loss_v, dcpt_vars_v, \ steg_img_vs, dcpt_img_vs = runtime.sess.run([ self.loss, self.rcst_loss, self.rcst_vars, self.dcpt_loss, self.dcpt_vars, self.steg_img, self.dcpt_img ], feed_dict={self.covr_img: covr_img_vs, self.hide_img: hide_img_vs}) slicer.slice_assign(steg_img_v, row_idx, col_idx, steg_img_vs) slicer.slice_assign(dcpt_img_v, row_idx, col_idx, dcpt_img_vs) loss_va.append(loss_v) rcst_loss_va.append(rcst_loss_v) rcst_vars_va.append(rcst_vars_v) dcpt_loss_va.append(dcpt_loss_v) dcpt_vars_va.append(dcpt_vars_v) t_end = timeit.default_timer() t_diff = t_end - t_beg run_time = t_diff utils.msg_ud(msg, 'running|timing', run_time) utils.msg_ud(msg, 'running|train_cycle_timing', None) utils.msg_st(msg, 'image|steg', steg_img_v) utils.msg_st(msg, 'image|dcpt_covr', None) utils.msg_st(msg, 'image|dcpt_hide', dcpt_img_v) utils.msg_st(msg, 'post_info|loss', np.average(loss_va)) utils.msg_st(msg, 'post_info|rcst_loss', np.average(rcst_loss_va)) utils.msg_st(msg, 'post_info|rcst_vars', np.average(rcst_vars_va)) utils.msg_st(msg, 'post_info|dcpt_loss', np.average(dcpt_loss_va)) utils.msg_st(msg, 'post_info|dcpt_vars', np.average(dcpt_vars_va)) return msg
def train_once(self, msg, runtime): ''' Train once ''' utils.msg_ud(msg, 'running|task', 'train_once') covr_img_v = utils.msg_gt(msg, 'image|orig_covr') hide_img_v = utils.msg_gt(msg, 'image|orig_hide') gmode = params.GMODE mode = utils.msg_gt(msg, 'message_info|mode') heavy_logging = utils.msg_gt(msg, 'message_info|heavy_logging') batch_size = params.BATCH_SIZE image_shape = [ params.INROWS.value, params.INCOLS.value, params.INCNLS.value ] model_shape = [ params.MNROWS.value, params.MNCOLS.value, params.MNCNLS.value ] mnrows, mncols, _ = image_shape inrows, incols, _ = model_shape slicer = utils.ImageSlicer(inrows, incols, mnrows, mncols) t_beg = timeit.default_timer() steg_img_v = np.zeros(shape=(batch_size, *image_shape)) dcpt_img_v = np.zeros(shape=(batch_size, *image_shape)) loss_va = [] rcst_loss_va, rcst_vars_va, dcpt_loss_va, dcpt_vars_va = [], [], [], [] for row_idx in range(inrows // mnrows): for col_idx in range(incols // mncols): # slice an image fragment at (row_idx, col_idx) covr_img_vs = slicer.slice(covr_img_v, row_idx, col_idx) hide_img_vs = slicer.slice(hide_img_v, row_idx, col_idx) if gmode == 'train' and mode == 'train': optm = self.optm else: optm = self.dummy if heavy_logging: smry = self.smry_hv else: smry = self.smry_lt _, smry_v, \ loss_v, \ rcst_loss_v, rcst_vars_v, dcpt_loss_v, dcpt_vars_v, \ steg_img_vs, dcpt_img_vs = runtime.sess.run([ optm, smry, self.loss, self.rcst_loss, self.rcst_vars, self.dcpt_loss, self.dcpt_vars, self.steg_img, self.dcpt_img ], feed_dict={self.covr_img: covr_img_vs, self.hide_img: hide_img_vs}) slicer.slice_assign(steg_img_v, row_idx, col_idx, steg_img_vs) slicer.slice_assign(dcpt_img_v, row_idx, col_idx, dcpt_img_vs) loss_va.append(loss_v) rcst_loss_va.append(rcst_loss_v) rcst_vars_va.append(rcst_vars_v) dcpt_loss_va.append(dcpt_loss_v) dcpt_vars_va.append(dcpt_vars_v) t_end = timeit.default_timer() t_diff = t_end - t_beg run_time = t_diff utils.msg_ud(msg, 'running|timing', run_time) utils.msg_ud(msg, 'running|train_cycle_timing', run_time) utils.msg_st(msg, 'image|steg', steg_img_v) utils.msg_st(msg, 'image|dcpt_covr', None) utils.msg_st(msg, 'image|dcpt_hide', dcpt_img_v) utils.msg_st(msg, 'post_info|loss', np.average(loss_va)) utils.msg_st(msg, 'post_info|rcst_loss', np.average(rcst_loss_va)) utils.msg_st(msg, 'post_info|rcst_vars', np.average(rcst_vars_va)) utils.msg_st(msg, 'post_info|dcpt_loss', np.average(dcpt_loss_va)) utils.msg_st(msg, 'post_info|dcpt_vars', np.average(dcpt_vars_va)) if gmode == 'train' and mode == 'train': step_v = runtime.sess.run(self.g_step) runtime.summary_writer.add_summary(smry_v, step_v) runtime.sess.run(self.g_next_step) return msg
def generator_valid(self, queue): ''' Default valid generator for one step ''' covr_valid, hide_valid = queue['covr/valid'].get_nowait(), queue['hide/valid'].get_nowait() epoch = self.tidx // params.DATASET_TRAIN_SIZE batch = self.tidx % params.DATASET_TRAIN_SIZE msg = self.message.create_message() utils.msg_ud(msg, 'queue_info|generator', queue[self.name].qsize()) utils.msg_ud(msg, 'message_info|gidx', self.gidx) utils.msg_ud(msg, 'message_info|lidx', self.lidx) utils.msg_ud(msg, 'message_info|tidx', self.tidx) utils.msg_ud(msg, 'message_info|vidx', self.vidx) utils.msg_ud(msg, 'message_info|epoch', epoch) utils.msg_ud(msg, 'message_info|batch', batch) utils.msg_ud(msg, 'message_info|mode', 'valid') utils.msg_ud(msg, 'message_info|heavy_logging', True) utils.msg_ud(msg, 'image|covr/valid', covr_valid) utils.msg_ud(msg, 'image|hide/valid', hide_valid) self.gidx += 1 self.lidx += 1 self.vidx += 1 return msg
def generator_train(self, queue): ''' Default train generator for one step ''' covr_train, hide_train = queue['covr/train'].get_nowait(), queue['hide/train'].get_nowait() epoch = self.tidx // (params.DATASET_TRAIN_SIZE // params.BATCH_SIZE) batch = self.tidx % (params.DATASET_TRAIN_SIZE // params.BATCH_SIZE) heavy_logging = self.tidx % params.HEAVY_LOGGING_INTERVAL == 0 msg = self.message.create_message() utils.msg_ud(msg, 'queue_info|generator', queue[self.name].qsize()) utils.msg_ud(msg, 'message_info|gidx', self.gidx) utils.msg_ud(msg, 'message_info|lidx', self.lidx) utils.msg_ud(msg, 'message_info|tidx', self.tidx) utils.msg_ud(msg, 'message_info|vidx', self.vidx) utils.msg_ud(msg, 'message_info|epoch', epoch) utils.msg_ud(msg, 'message_info|batch', batch) utils.msg_ud(msg, 'message_info|mode', 'train') utils.msg_ud(msg, 'message_info|heavy_logging', heavy_logging) utils.msg_ud(msg, 'image|covr/train', covr_train) utils.msg_ud(msg, 'image|hide/train', hide_train) self.gidx += 1 self.lidx += 1 self.tidx += 1 return msg