default='datasets/D1/artificial/D001', help='Document.'
)
parser.add_argument(
    '-i', '--i', action='store', dest='i', required=False, type=int,
    default=2, help='Si.'
)
parser.add_argument(
    '-j', '--j', action='store', dest='j', required=False, type=int,
    default=3, help='Sj.'
)
args = parser.parse_args()

# model
images_ph = tf.placeholder(tf.float32, name='images_ph', shape=(None, 3, input_size, input_size)) # channels first
images_adjust_op = tf.image.convert_image_dtype(images_ph, tf.float32)
logits_op = squeezenet(images_ph, 'val', 2, channels_first=True)
probs_op = tf.nn.softmax(logits_op)
predictions_op = tf.argmax(logits_op, 1)

# pair
i, j = args.i, args.j
strips = Strips(path=args.doc, filter_blanks=True)
si, sj = strips.strips[i], strips.strips[j]
hi, wi, _ = si.image.shape
hj, wj, _ = sj.image.shape
min_y = radius_search + radius_feat
max_y = min(hi, hj) - 1 - radius_search - radius_feat
smi = np.correlate(si.offsets_r, [0.05, 0.1, 0.7, 0.1, 0.05], mode='same')
smj = np.correlate(sj.offsets_l, [0.05, 0.1, 0.7, 0.1, 0.05], mode='same')
support = np.hstack([si.filled_image(), sj.filled_image()])
hs, ws, _ = support.shape
    features.append((left, right))
    #left = (255 * np.transpose(left, axes=(1, 2, 0))).astype(np.uint8)
    #right = (255 * np.transpose(right, axes=(1, 2, 0))).astype(np.uint8)
    #cv2.imwrite('test/test_globalscore3/{}_left.jpg'.format(i), left)
    #cv2.imwrite('test/test_globalscore3/{}_right.jpg'.format(i), (right)
    #if i > 0 and i < N - 1:
    #    stacked = np.hstack([last, left])#, axis=1)
    #    cv2.imwrite('test/test_globalscore3/{}-{}.jpg'.format(i, i + 1), stacked)
    #last = right
tfeat = time.time() - t0
print(':: elapsed time={:.2f} sec.'.format(tfeat))

# model
input_image = np.ones((3, input_size_h, input_size_w), dtype=np.float32)
images_ph = tf.placeholder(tf.float32, name='images_ph', shape=(None, 3, input_size_h, input_size_w)) # channels first
logits_op, conv10_op = squeezenet(images_ph, 'test', NUM_CLASSES, channels_first=True)
probs_op = tf.nn.softmax(logits_op)
predictions_op = tf.argmax(logits_op, 1)

with tf.Session() as sess:
    # preparing model
    sess.run(tf.global_variables_initializer())
    params_fname = open('best_model.txt').read()
    load(params_fname, sess, model_scope='SqueezeNet')

    t0_global = time.time()
    wl = math.ceil(input_size_w / 2)
    wr = int(input_size_w / 2)
    batch = np.ones((2 * radius_search + 1, 3, input_size_h, input_size_w), dtype=np.float32)
    for i in range(N):
        batch[:, :, :, : wr] = features[i][1]