def main(args): tf.logging.set_verbosity(tf.logging.INFO) # Load configs model_cls_list = [transformer.Transformer for model in args.models] params_list = [default_parameters() for _ in range(len(model_cls_list))] params_list = [ merge_parameters(params, model_cls.get_parameters()) for params, model_cls in zip(params_list, model_cls_list) ] params_list = [ import_params(args.models[i], model_cls_list[i].get_name(), params_list[i]) for i in range(len(args.models)) ] params_list = [ override_parameters(params_list[i], args) for i in range(len(model_cls_list)) ] # Build Graph with tf.Graph().as_default(): model_var_lists = [] # Load checkpoints for i, checkpoint in enumerate(args.models): tf.logging.info("Loading %s" % checkpoint) var_list = tf.train.list_variables(checkpoint) values = {} reader = tf.train.load_checkpoint(checkpoint) for (name, shape) in var_list: if not name.startswith( model_cls_list[i].get_name()): #ignore global_step continue tensor = reader.get_tensor(name) values[name] = tensor model_var_lists.append(values) # Build models model_fns = [] for i in range(len(args.models)): name = model_cls_list[i].get_name() model = model_cls_list[i](params_list[i], name + "_%d" % i) model_fn = model.get_inference_func() model_fns.append(model_fn) params = params_list[0] # Read input file sorted_keys, sorted_inputs = dataset.sort_input_file(args.input) # Build input queue features = dataset.get_inference_input(sorted_inputs, params) # Create placeholders placeholders = [] for i in range(len(params.device_list)): placeholders.append({ "source": tf.placeholder(tf.int32, [None, None], "source_%d" % i), "source_length": tf.placeholder(tf.int32, [None], "source_length_%d" % i) }) predictions = parallel.data_parallelism( params.device_list, lambda f: beamsearch.create_inference_graph(model_fns, f, params), placeholders) # Create assign ops assign_ops_all = [] assign_placeholders_all = [] assign_values_all = [] all_var_list = tf.trainable_variables() for i in range(len(args.models)): un_init_var_list = [] name = model_cls_list[i].get_name() for v in all_var_list: if v.name.startswith(name + "_%d" % i): un_init_var_list.append(v) assign_placeholders, assign_ops, assign_values = set_variables( un_init_var_list, model_var_lists[i], name + "_%d" % i) assign_placeholders_all.append(assign_placeholders) assign_ops_all.append(assign_ops) assign_values_all.append(assign_values) #assign_op = tf.group(*assign_ops) results = [] # Create session with tf.Session(config=session_config(params)) as sess: # Restore variables for i in range(len(args.models)): for p, assign_op, v in zip(assign_placeholders_all[i], assign_ops_all[i], assign_values_all[i]): sess.run(assign_op, {p: v}) sess.run(tf.tables_initializer()) while True: try: feats = sess.run(features) ops, feed_dict = shard_features(feats, placeholders, predictions) results.append(sess.run(ops, feed_dict=feed_dict)) message = "Finished batch %d" % len(results) tf.logging.log(tf.logging.INFO, message) except tf.errors.OutOfRangeError: break # Convert to plain text vocab = params.vocabulary["target"] outputs = [] scores = [] for result in results: for item in result[0]: outputs.append(item.tolist()) for item in result[1]: scores.append(item.tolist()) outputs = list(itertools.chain(*outputs)) scores = list(itertools.chain(*scores)) restored_inputs = [] restored_outputs = [] restored_scores = [] for index in range(len(sorted_inputs)): restored_inputs.append(sorted_inputs[sorted_keys[index]]) restored_outputs.append(outputs[sorted_keys[index]]) restored_scores.append(scores[sorted_keys[index]]) # Write to file with open(args.output, "w") as outfile: count = 0 for outputs, scores in zip(restored_outputs, restored_scores): for output, score in zip(outputs, scores): decoded = [] for idx in output: if isinstance(idx, six.integer_types): symbol = vocab[idx] else: symbol = idx if symbol == params.eos: break decoded.append(symbol) decoded = str.join(" ", decoded) if not args.log: outfile.write("%s\n" % decoded) break else: pattern = "src[%d]: %s \n trans[%.4f]: %s \n" source = restored_inputs[count] values = (count, source, score, decoded) outfile.write(pattern % values) count += 1
def main(args): tf.logging.set_verbosity(tf.logging.INFO) # Load configs params_list = [default_parameters() for _ in range(len(args.checkpoints))] params_list = [ import_params(args.checkpoints[i], params_list[i]) for i in range(len(args.checkpoints)) ] params_list = [ override_parameters(params_list[i], args) for i in range(len(args.checkpoints)) ] # Build Graph with tf.Graph().as_default(): model_var_lists = [] # Load checkpoints for i, checkpoint in enumerate(args.checkpoints): tf.logging.info("Loading %s" % checkpoint) var_list = tf.train.list_variables(checkpoint) values = {} reader = tf.train.load_checkpoint(checkpoint) for (name, shape) in var_list: if name.find("losses_avg") >= 0: continue tensor = reader.get_tensor(name) values[name] = tensor model_var_lists.append(values) # Build models model_fns = [] for i in range(len(args.checkpoints)): model = pixellink.PixelLinkNetwork(params_list[i], 'PixelLinkNetwork' + "_%d" % i) model_fn = model.get_inference_func() model_fns.append(model_fn) params = params_list[0] # Build input queue features = dataset.get_inference_input(params) # Create placeholders placeholders = [] for i in range(len(params.device_list)): placeholders.append({ "input_img": tf.placeholder(tf.float32, [None, None, None, 3], "input_img_%d" % i), 'lens': tf.placeholder(tf.float32, [ None, ], 'lens_%d' % i), 'cnts': tf.placeholder(tf.float32, [None, None, None, None], 'cnts_%d' % i), 'care': tf.placeholder(tf.float32, [ None, ], 'care_%d' % i), }) # {'input_img': ( # tf.Dimension(None), tf.Dimension(None), tf.Dimension(None), # 3), # 'lens': (tf.Dimension(None),), # 'cnts': ( # tf.Dimension(None), tf.Dimension(None), tf.Dimension(None), # tf.Dimension(None)), # 'care': (tf.Dimension(None),)} # ) # A list of outputs predictions_dict = parallel.data_parallelism(params.device_list, model_fns, placeholders) # Create assign ops assign_ops = [] all_var_list = tf.trainable_variables() for i in range(len(args.checkpoints)): un_init_var_list = [] for v in all_var_list: if v.name.startswith('PixelLinkNetwork' + "_%d" % i): un_init_var_list.append(v) ops = set_variables(un_init_var_list, model_var_lists[i], 'PixelLinkNetwork' + "_%d" % i) assign_ops.extend(ops) assign_op = tf.group(*assign_ops) # Create session with tf.Session(config=session_config(params)) as sess: # Restore variables sess.run(assign_op) sess.run(tf.tables_initializer()) time = 0 recall_sum, precise_sum, gt_n_sum, pred_n_sum = 0, 0, 0, 0 while True: try: feats = sess.run(features) op, feed_dict = shard_features(feats, placeholders, predictions_dict) results = [] temp = sess.run(predictions_dict, feed_dict=feed_dict) # print(temp) results.append(temp) message = "Finished batch %d" % len(results) tf.logging.log(tf.logging.INFO, message) #TODO: save and reconstruct for res in results: # print(len(results)) outputs = res[0] img = outputs['input_img'] prediction = outputs['prediction'] lens = outputs['lens'] cnts = outputs['cnts'] cnts = [(x / 2).astype(np.int32) for x in cnts] #print(cnts) cnts = _depad(cnts, lens) care = outputs['care'] # imname = outputs['imname'] # print(imname) for i in range(img.shape[0]): re_cnts = reconstruct(img[i], prediction[i]) TR, TP, T_gt_n, T_pred_n, PR, PP, P_gt_n, P_pred_n = \ evaluate(img[i], cnts, re_cnts, care) tf.logging.info(' recall: ' + str(TR) + '; precise: ' + str(TP)) recall_sum += TR * T_gt_n precise_sum += TP * T_pred_n gt_n_sum += T_gt_n pred_n_sum += T_pred_n height, width = prediction.shape[ 1], prediction.shape[2] imgoutput = np.zeros(shape=(height * 2, width * 2, 3), dtype=np.uint8) imgoutput[0:height, width:width * 2, :] = cv2.resize( img[0], (width, height)) imgoutput[height:height * 2, width:width * 2, :] = ( _softmax(prediction[i, :, :, 0:2]) * 255).astype(np.uint8) cv2.drawContours(imgoutput, cnts, -1, (0, 0, 255)) cv2.drawContours(imgoutput, re_cnts, -1, (0, 255, 0)) cv2.imwrite( os.path.join( params.output, 'output_{:03d}_r{}_p{}.png'.format( time, TR, TP)), imgoutput) time += 1 # for i in range(len(predictions)): # res = reconstruct(None, predictions[i]) # print(res) except tf.errors.OutOfRangeError: if int(gt_n_sum) != 0: ave_r = recall_sum / gt_n_sum else: ave_r = 0.0 if int(pred_n_sum) != 0: ave_p = precise_sum / pred_n_sum else: ave_p = 0.0 if ave_r != 0.0 and ave_p != 0.0: ave_f = 2 / (1 / ave_r + 1 / ave_p) else: ave_f = 0.0 tf.logging.info('ave recall:{}, precise:{}, f:{}'.format( ave_r, ave_p, ave_f)) tf.logging.info('end evaluation') time += 1 break