def predict(i_ckpt): tf.reset_default_graph() print '================', if FLAGS.data_type == 16: #print 'using tf.float16 =====================' data_type = tf.float16 else: #print 'using tf.float32 =====================' data_type = tf.float32 image_size = FLAGS.test_image_size #print '=====because using pspnet, the inputs have a fixed size and should be divided by 48:', image_size assert FLAGS.test_image_size % 48 == 0 num_classes = 2 IMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32) with tf.device('/cpu:0'): coord = tf.train.Coordinator() reader = ImageReader('./infer', 'test.txt', '480,480', 'False', 'False', 255, IMG_MEAN, coord) images_pl = [tf.placeholder(tf.float32, [None, image_size, image_size, 3])] labels_pl = [tf.placeholder(tf.int32, [None, image_size, image_size, 1])] with tf.variable_scope('resnet_v1_50'): model = pspnet_mg.PSPNetMG( num_classes, None, None, None, mode=FLAGS.mode, bn_epsilon=FLAGS.epsilon, resnet='resnet_v1_50', norm_only=FLAGS.norm_only, float_type=data_type, has_aux_loss=False, structure_in_paper=FLAGS.structure_in_paper, resize_images_method=FLAGS.resize_images_method) l = model.inference(images_pl) # ========================= end of building model ================================ gpu_options = tf.GPUOptions(allow_growth=False) config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options, allow_soft_placement=True) sess = tf.Session(config=config) sess.run( [tf.global_variables_initializer(), tf.local_variables_initializer()]) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) if i_ckpt is not None: loader = tf.train.Saver(max_to_keep=0) loader.restore(sess, i_ckpt) eval_step = i_ckpt.split('-')[-1] #print('Succesfully loaded model from %s at step=%s.' % (i_ckpt, eval_step)) print '======================= eval process begins =========================' if FLAGS.save_prediction == 0 and FLAGS.mode != 'test': print 'not saving prediction ... ' average_loss = 0.0 confusion_matrix = np.zeros((num_classes, num_classes), dtype=np.int64) if FLAGS.save_prediction == 1 or FLAGS.mode == 'test': try: os.mkdir('./' + FLAGS.mode + '_set') except: pass prefix = './' + FLAGS.mode + '_set' try: os.mkdir(os.path.join(prefix, FLAGS.weights_ckpt.split('/')[-2])) except: pass prefix = os.path.join(prefix, FLAGS.weights_ckpt.split('/')[-2]) if FLAGS.ms == 1: scales = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] else: scales = [1.0] images_filenames = reader.image_list # labels_filenames = reader.label_list if FLAGS.test_max_iter is None: max_iter = len(images_filenames) else: max_iter = FLAGS.test_max_iter # IMG_MEAN = [123.680000305, 116.778999329, 103.939002991] # RGB mean from official PSPNet step = 0 while step < max_iter: image = cv2.imread(images_filenames[step], 1) # label = np.reshape(label, [1, label.shape[0], label.shape[1], 1]) image_height, image_width = image.shape[0], image.shape[1] total_logits = np.zeros((image_height, image_width, num_classes), np.float32) for scale in scales: imgsplitter = ImageSplitter(image, scale, FLAGS.color_switch, image_size, IMG_MEAN) crops = imgsplitter.get_split_crops() # This is a suboptimal solution. More batches each iter, more rapid. # But the limit of batch size is unknown. # TODO: Or there should be a more efficient way. if crops.shape[0] > 10: half = crops.shape[0] / 2 feed_dict = {images_pl[0]: crops[0:half]} [logits_0] = sess.run([model.probabilities], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops[half:]} [logits_1] = sess.run([model.probabilities], feed_dict=feed_dict) logits = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = {images_pl[0]: imgsplitter.get_split_crops()} [logits] = sess.run([model.probabilities], feed_dict=feed_dict) scale_logits = imgsplitter.reassemble_crops(logits) if FLAGS.mirror == 1: image_mirror = image[:, ::-1] imgsplitter_mirror = ImageSplitter(image_mirror, scale, FLAGS.color_switch, image_size, IMG_MEAN) crops_m = imgsplitter_mirror.get_split_crops() if crops_m.shape[0] > 10: half = crops_m.shape[0] / 2 feed_dict = {images_pl[0]: crops_m[0:half]} [logits_0] = sess.run([model.probabilities], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops_m[half:]} [logits_1] = sess.run([model.probabilities], feed_dict=feed_dict) logits_m = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = { images_pl[0]: imgsplitter_mirror.get_split_crops() } [logits_m] = sess.run([model.probabilities], feed_dict=feed_dict) logits_m = imgsplitter_mirror.reassemble_crops(logits_m) scale_logits += logits_m[:, ::-1] if scale != 1.0: scale_logits = cv2.resize(scale_logits, (image_width, image_height), interpolation=cv2.INTER_LINEAR) total_logits += scale_logits prediction = np.argmax(total_logits, axis=-1) # print np.max(label), np.max(prediction) if FLAGS.database == 'Cityscapes' and (FLAGS.save_prediction == 1 or FLAGS.mode == 'test'): image_prefix = images_filenames[step].split('/')[-1].split('_leftImg8bit.png')[0] + '_' \ + FLAGS.weights_ckpt.split('/')[-2] cv2.imwrite(os.path.join(prefix, image_prefix + '_prediction.png'), trainid_to_labelid(prediction)) if FLAGS.coloring == 1: color_prediction = coloring(prediction) cv2.imwrite( os.path.join(prefix, image_prefix + '_coloring.png'), cv2.cvtColor(color_prediction, cv2.COLOR_BGR2RGB)) elif FLAGS.database == 'sonardata' and (FLAGS.save_prediction == 1 or FLAGS.mode == 'test'): image_prefix = images_filenames[step].split('/')[-1].split( '.png')[0] cv2.imwrite(os.path.join(prefix, image_prefix + '.png'), prediction) else: pass step += 1 coord.request_stop() coord.join(threads) return average_loss / max_iter
def inf_one_image(image_path): t0 = datetime.datetime.now() image = cv2.imread(image_path, 1) image_height, image_width = image.shape[0], image.shape[1] total_logits = np.zeros((image_height, image_width, num_classes), np.float32) for scale in scales: imgsplitter = ImageSplitter(image, scale, FLAGS.color_switch, image_size, img_mean) crops = imgsplitter.get_split_crops() # This is a suboptimal solution. More batches each iter, more rapid. # But the limit of batch size is unknown. # TODO: Or there should be a more efficient way. if crops.shape[0] > 10 and FLAGS.database == 'Cityscapes': half = crops.shape[0] // 2 feed_dict = {images_pl[0]: crops[0:half]} [logits_0] = sess.run([probas_op], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops[half:]} [logits_1] = sess.run([probas_op], feed_dict=feed_dict) logits = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = {images_pl[0]: imgsplitter.get_split_crops()} [logits] = sess.run([probas_op], feed_dict=feed_dict) scale_logits = imgsplitter.reassemble_crops(logits) if FLAGS.mirror == 1: image_mirror = image[:, ::-1] imgsplitter_mirror = ImageSplitter(image_mirror, scale, FLAGS.color_switch, image_size, img_mean) crops_m = imgsplitter_mirror.get_split_crops() if crops_m.shape[0] > 10: half = crops_m.shape[0] // 2 feed_dict = {images_pl[0]: crops_m[0:half]} [logits_0] = sess.run([probas_op], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops_m[half:]} [logits_1] = sess.run([probas_op], feed_dict=feed_dict) logits_m = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = { images_pl[0]: imgsplitter_mirror.get_split_crops() } [logits_m] = sess.run([probas_op], feed_dict=feed_dict) logits_m = imgsplitter_mirror.reassemble_crops(logits_m) scale_logits += logits_m[:, ::-1] if scale != 1.0: scale_logits = cv2.resize(scale_logits, (image_width, image_height), interpolation=cv2.INTER_LINEAR) total_logits += scale_logits prediction = np.argmax(total_logits, axis=-1) image_prefix = image_path.split('/')[-1].split( '.')[0] + '_' + FLAGS.weights_ckpt.split('/')[-2] if FLAGS.database == 'Cityscapes': cv2.imwrite(os.path.join(prefix, image_prefix + '_prediction.png'), trainid_to_labelid(prediction)) cv2.imwrite(os.path.join(prefix, image_prefix + '_coloring.png'), cv2.cvtColor(coloring(prediction), cv2.COLOR_BGR2RGB)) else: cv2.imwrite(os.path.join(prefix, image_prefix + '_prediction.png'), prediction) # TODO: add coloring for databases other than Cityscapes. delta_t = (datetime.datetime.now() - t0).total_seconds() print('\n[info]\t saved!', delta_t, 'seconds.')
def predict(i_ckpt): assert i_ckpt is not None if FLAGS.float_type == 16: print('\n< using tf.float16 >\n') float_type = tf.float16 else: print('\n< using tf.float32 >\n') float_type = tf.float32 image_size = FLAGS.test_image_size assert FLAGS.test_image_size % 48 == 0 with tf.device('/cpu:0'): reader = SegmentationImageReader(FLAGS.database, FLAGS.mode, (image_size, image_size), random_scale=False, random_mirror=False, random_blur=False, random_rotate=False, color_switch=FLAGS.color_switch) images_pl = [tf.placeholder(tf.float32, [None, image_size, image_size, 3])] model = pspnet_mg.PSPNetMG(reader.num_classes, mode='val', resnet=FLAGS.network, data_format=FLAGS.data_format, float_type=float_type, has_aux_loss=False, structure_in_paper=FLAGS.structure_in_paper) logits = model.inference(images_pl) probas_op = tf.nn.softmax(logits[0], dim=1 if FLAGS.data_format == 'NCHW' else 3) # ========================= end of building model ================================ gpu_options = tf.GPUOptions(allow_growth=False) config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options, allow_soft_placement=True) sess = tf.Session(config=config) sess.run( [tf.global_variables_initializer(), tf.local_variables_initializer()]) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) loader = tf.train.Saver(max_to_keep=0) loader.restore(sess, i_ckpt) print('Succesfully loaded model from %s.' % i_ckpt) print( '======================= eval process begins =========================' ) if FLAGS.save_prediction == 0 and FLAGS.mode != 'test': print('not saving prediction ... ') average_loss = 0.0 confusion_matrix = np.zeros((reader.num_classes, reader.num_classes), dtype=np.int64) if FLAGS.save_prediction == 1 or FLAGS.mode == 'test': try: os.mkdir('./' + FLAGS.mode + '_set') except: pass prefix = './' + FLAGS.mode + '_set' try: os.mkdir(os.path.join(prefix, FLAGS.weights_ckpt.split('/')[-2])) except: pass prefix = os.path.join(prefix, FLAGS.weights_ckpt.split('/')[-2]) if FLAGS.ms == 1: scales = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] else: scales = [1.0] images_filenames = reader.image_list labels_filenames = reader.label_list img_mean = reader.img_mean if FLAGS.test_max_iter is None: max_iter = len(images_filenames) else: max_iter = FLAGS.test_max_iter step = 0 while step < max_iter: image, label = cv2.imread(images_filenames[step], 1), cv2.imread(labels_filenames[step], 0) label = np.reshape(label, [1, label.shape[0], label.shape[1], 1]) image_height, image_width = image.shape[0], image.shape[1] total_logits = np.zeros( (image_height, image_width, reader.num_classes), np.float32) for scale in scales: imgsplitter = ImageSplitter(image, scale, FLAGS.color_switch, image_size, img_mean) crops = imgsplitter.get_split_crops() # This is a suboptimal solution. More batches each iter, more rapid. # But the limit of batch size is unknown. # TODO: Or there should be a more efficient way. if crops.shape[0] > 10 and FLAGS.database == 'Cityscapes': half = crops.shape[0] // 2 feed_dict = {images_pl[0]: crops[0:half]} [logits_0] = sess.run([probas_op], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops[half:]} [logits_1] = sess.run([probas_op], feed_dict=feed_dict) logits = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = {images_pl[0]: imgsplitter.get_split_crops()} [logits] = sess.run([probas_op], feed_dict=feed_dict) scale_logits = imgsplitter.reassemble_crops(logits) if FLAGS.mirror == 1: image_mirror = image[:, ::-1] imgsplitter_mirror = ImageSplitter(image_mirror, scale, FLAGS.color_switch, image_size, img_mean) crops_m = imgsplitter_mirror.get_split_crops() if crops_m.shape[0] > 10: half = crops_m.shape[0] // 2 feed_dict = {images_pl[0]: crops_m[0:half]} [logits_0] = sess.run([probas_op], feed_dict=feed_dict) feed_dict = {images_pl[0]: crops_m[half:]} [logits_1] = sess.run([probas_op], feed_dict=feed_dict) logits_m = np.concatenate((logits_0, logits_1), axis=0) else: feed_dict = { images_pl[0]: imgsplitter_mirror.get_split_crops() } [logits_m] = sess.run([probas_op], feed_dict=feed_dict) logits_m = imgsplitter_mirror.reassemble_crops(logits_m) scale_logits += logits_m[:, ::-1] if scale != 1.0: scale_logits = cv2.resize(scale_logits, (image_width, image_height), interpolation=cv2.INTER_LINEAR) total_logits += scale_logits prediction = np.argmax(total_logits, axis=-1) # print np.max(label), np.max(prediction) image_prefix = images_filenames[step].split('/')[-1].split( '.')[0] + '_' + FLAGS.weights_ckpt.split('/')[-2] if FLAGS.database == 'Cityscapes': cv2.imwrite(os.path.join(prefix, image_prefix + '_prediction.png'), trainid_to_labelid(prediction)) if FLAGS.coloring == 1: cv2.imwrite( os.path.join(prefix, image_prefix + '_coloring.png'), cv2.cvtColor(coloring(prediction), cv2.COLOR_BGR2RGB)) else: cv2.imwrite(os.path.join(prefix, image_prefix + '_prediction.png'), prediction) # TODO: add coloring for databases other than Cityscapes. step += 1 compute_confusion_matrix(label, prediction, confusion_matrix) if step % 20 == 0: print('%s %s] %d / %d. iou updating' \ % (str(datetime.datetime.now()), str(os.getpid()), step, max_iter)) compute_iou(confusion_matrix) print(average_loss / step) precision = compute_iou(confusion_matrix) coord.request_stop() coord.join(threads) return average_loss / max_iter, precision
def predict(i_ckpt): # < single gpu version > # < use FLAGS.batch_size as batch size > # < use FLAGS.weight_ckpt as i_ckpt > reader_init = [] with tf.device('/cpu:0'): if FLAGS.reader_method == 'queue': eval_image_reader = reader.QueueBasedImageReader(FLAGS.database, FLAGS.test_subset) eval_image, eval_label, eval_image_filename = eval_image_reader.get_eval_batch(FLAGS.color_switch) else: eval_image_reader = reader.ImageReader(FLAGS.database, FLAGS.test_subset) eval_reader_iterator = eval_image_reader.get_eval_iterator(FLAGS.color_switch) eval_image, eval_label, eval_image_filename = eval_reader_iterator.get_next() # one image. reader_init.append(eval_reader_iterator.initializer) crop_size = FLAGS.test_image_size # < network > model = pspnet_mg.PSPNetMG(eval_image_reader.num_classes, FLAGS.network, gpu_num(), FLAGS.initializer, FLAGS.weight_decay_mode, FLAGS.fine_tune_filename, FLAGS.optimizer, FLAGS.momentum, FLAGS.train_like_in_caffe, FLAGS.three_convs_beginning, FLAGS.new_layer_names, consider_dilated=FLAGS.consider_dilated) images_pl = [tf.placeholder(tf.float32, [None, crop_size, crop_size, 3])] eval_probas_op = model.build_forward_ops(images_pl) gpu_options = tf.GPUOptions(allow_growth=False) config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options, allow_soft_placement=True) sess = tf.Session(config=config) if FLAGS.reader_method == 'queue': coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) init = [tf.global_variables_initializer(), tf.local_variables_initializer()] + reader_init sess.run(init) loader = tf.train.Saver(max_to_keep=0) loader.restore(sess, i_ckpt) prefix = i_ckpt.split('model.ckpt')[0] + FLAGS.test_subset + '_set/' if not os.path.exists(prefix) and 'test' in FLAGS.test_subset: os.mkdir(prefix) print('saving predictions to', prefix) confusion_matrix = np.zeros((eval_image_reader.num_classes, eval_image_reader.num_classes), dtype=np.int64) scales = [1.0] if FLAGS.ms == 1: scales = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75] def run_once(input_image): H, W, channel = input_image.shape # < in case that input_image is smaller than crop_size > dif_height = H - crop_size dif_width = W - crop_size if dif_height < 0 or dif_width < 0: input_image = helper.numpy_pad_image(input_image, dif_height, dif_width) H, W, channel = input_image.shape # < split this image into crops > split_crops = [] heights = helper.decide_intersection(H, crop_size) widths = helper.decide_intersection(W, crop_size) for height in heights: for width in widths: image_crop = input_image[height:height + crop_size, width:width + crop_size] split_crops.append(image_crop[np.newaxis, :]) # < > num_chunks = int((len(split_crops) - 1) / FLAGS.batch_size) + 1 proba_crops_list = [] for chunk_i in range(num_chunks): feed_dict = {} start = chunk_i * FLAGS.batch_size end = min((chunk_i+1)*FLAGS.batch_size, len(split_crops)) feed_dict[images_pl[0]] = np.concatenate(split_crops[start:end]) proba_crops_part = sess.run(eval_probas_op, feed_dict=feed_dict) proba_crops_list.append(proba_crops_part[0]) proba_crops = np.concatenate(proba_crops_list) # < reassemble > reassemble = np.zeros((H, W, eval_image_reader.num_classes), np.float32) index = 0 for height in heights: for width in widths: reassemble[height:height + crop_size, width:width + crop_size] += proba_crops[index] index += 1 # < crop to original image > if dif_height < 0 or dif_width < 0: reassemble = helper.numpy_crop_image(reassemble, dif_height, dif_width) return reassemble for i in range(len(eval_image_reader.image_list)): orig_one_image, one_label, image_filename = sess.run([eval_image, eval_label, eval_image_filename]) orig_height, orig_width, channel = orig_one_image.shape total_proba = np.zeros((orig_height, orig_width, eval_image_reader.num_classes), dtype=np.float32) for scale in scales: if scale != 1.0: one_image = cv2.resize(orig_one_image, dsize=(0, 0), fx=scale, fy=scale) else: one_image = np.copy(orig_one_image) proba = run_once(one_image) if FLAGS.mirror == 1: proba_mirror = run_once(one_image[:, ::-1]) proba += proba_mirror[:, ::-1] if scale != 1.0: proba = cv2.resize(proba, (orig_width, orig_height)) total_proba += proba prediction = np.argmax(total_proba, axis=-1) helper.compute_confusion_matrix(one_label, prediction, confusion_matrix) if 'test' in FLAGS.test_subset: if FLAGS.database == 'Cityscapes': cv2.imwrite(prefix + prediction_image_create(image_filename), helper_cityscapes.trainid_to_labelid(prediction)) if FLAGS.coloring == 1: cv2.imwrite(prefix + coloring_image_create(image_filename), cv2.cvtColor(helper_cityscapes.coloring(prediction), cv2.COLOR_BGR2RGB)) else: cv2.imwrite(prefix + prediction_image_create(image_filename, key_word='.'), prediction) if i % 100 == 0: print('%s %s] %d / %d. iou updating' \ % (str(datetime.datetime.now()), str(os.getpid()), i, len(eval_image_reader.image_list))) helper.compute_iou(confusion_matrix) print('%s %s] %d / %d. iou updating' \ % (str(datetime.datetime.now()), str(os.getpid()), len(eval_image_reader.image_list), len(eval_image_reader.image_list))) miou = helper.compute_iou(confusion_matrix) log_file = i_ckpt.split('model.ckpt')[0] + 'predict-ms' + str(FLAGS.ms) + '-mirror' + str(FLAGS.mirror) + '.txt' f_log = open(log_file, 'w') f_log.write(sorted_str_dict(FLAGS.__dict__) + '\n') ious = helper.compute_iou_each_class(confusion_matrix) f_log.write(str(ious) + '\n') for i in range(confusion_matrix.shape[0]): f_log.write(str(ious[i]) + '\n') f_log.write(str(miou) + '\n') if FLAGS.reader_method == 'queue': coord.request_stop() coord.join(threads) return