def __init__(self, image_dir, label_dir, batch_size, width, height, no_use_skip_connections, base_filter_size, no_use_batch_norm): # test data reader iter_items = data.img_xys_iterator( image_dir=image_dir, label_dir=label_dir, batch_size=batch_size, patch_width_height=None, # i.e. no patchs distort_rgb=False, flip_left_right=False, random_rotation=False, repeat=False, width=width, height=height, one_shot=False) self.iter_init_op, (self.test_imgs, self.test_xys_bitmaps) = iter_items # build the model self.model = model.Model( self.test_imgs, is_training=False, use_skip_connections=not no_use_skip_connections, base_filter_size=base_filter_size, use_batch_norm=not no_use_batch_norm) # define loss ops for calculating xent self.model.calculate_losses_wrt(labels=self.test_xys_bitmaps)
if not os.path.exists(ckpt_dir): os.makedirs(ckpt_dir) with open("%s/opts.json" % ckpt_dir, "w") as f: f.write(json.dumps(vars(opts))) #from tensorflow.python import debug as tf_debug #tf.keras.backend.set_session(tf_debug.LocalCLIDebugWrapperSession(tf.Session())) # Build readers / model for training # training can be either patch based, or full resolution train_imgs_xys_bitmaps = data.img_xys_iterator( image_dir=opts.train_image_dir, label_dir=opts.label_dir, batch_size=opts.batch_size, patch_width_height=opts.patch_width_height, distort_rgb=True, flip_left_right=opts.flip_left_right, random_rotation=opts.random_rotate, repeat=True, width=None if opts.patch_width_height else opts.width, height=None if opts.patch_width_height else opts.height) # TODO: could we do all these calcs in test.pr_stats (rather than iterating twice) ?? # test images are always full res test_imgs_xys_bitmaps = data.img_xys_iterator(image_dir=opts.test_image_dir, label_dir=opts.label_dir, batch_size=opts.batch_size, patch_width_height=None, distort_rgb=False, flip_left_right=False, random_rotation=False,
default=None, help='If set, max number of seconds to run.') opts = parser.parse_args() print("opts %s" % opts, file=sys.stderr) np.set_printoptions(precision=2, threshold=10000, suppress=True, linewidth=10000) # Build readers for train and test data. train_imgs, train_xys_bitmaps = data.img_xys_iterator( image_dir=opts.train_image_dir, label_dir=opts.label_dir, batch_size=opts.batch_size, patch_fraction=opts.patch_fraction, distort_rgb=True, flip_left_right=opts.flip_left_right, random_rotation=opts.random_rotate, repeat=True) test_imgs, test_xys_bitmaps = data.img_xys_iterator( image_dir=opts.test_image_dir, label_dir=opts.label_dir, batch_size=1, patch_fraction=1, distort_rgb=False, flip_left_right=False, random_rotation=False, repeat=True) print(test_imgs.get_shape()) print(test_xys_bitmaps.get_shape())
formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--image-dir', type=str, required=True) parser.add_argument('--label-dir', type=str, required=True) parser.add_argument('--run', type=str, required=True, help='model') parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--no-use-skip-connections', action='store_true') parser.add_argument('--no-use-batch-norm', action='store_true') parser.add_argument('--base-filter-size', type=int, default=16) opts = parser.parse_args() print(opts) # test data reader test_imgs, test_xys_bitmaps = data.img_xys_iterator(image_dir=opts.image_dir, label_dir=opts.label_dir, batch_size=opts.batch_size, patch_fraction=1, distort_rgb=False, flip_left_right=False, random_rotation=False, repeat=False) with tf.variable_scope("train_test_model") as scope: # clumsy :/ model = model.Model(test_imgs, is_training=False, use_skip_connections=not opts.no_use_skip_connections, base_filter_size=opts.base_filter_size, use_batch_norm=not opts.no_use_batch_norm) model.calculate_losses_wrt(labels=test_xys_bitmaps, batch_size=opts.batch_size) sess = tf.Session() model.restore(sess, "ckpts/%s" % opts.run)
parser.add_argument('--base-filter-size', type=int, default=8) parser.add_argument('--width', type=int, default=768, help='input image width') parser.add_argument('--height', type=int, default=1024, help='input image height') opts = parser.parse_args() print(opts) # test data reader test_imgs, test_xys_bitmaps = data.img_xys_iterator( image_dir=opts.image_dir, label_dir=opts.label_dir, batch_size=opts.batch_size, patch_width_height=None, # i.e. no patchs distort_rgb=False, flip_left_right=False, random_rotation=False, repeat=False, width=opts.width, height=opts.height) model = model.Model(test_imgs, is_training=False, use_skip_connections=not opts.no_use_skip_connections, base_filter_size=opts.base_filter_size, use_batch_norm=not opts.no_use_batch_norm) model.calculate_losses_wrt(labels=test_xys_bitmaps) sess = tf.Session() model.restore(sess, "ckpts/%s" % opts.run)