Esempio n. 1
0
def evaluation_pipeline(col, fwd_col, ref, number_of_images):
    # Set up validation (input queues, graph)
    irr = LabImageRecordReader('val_lab_images_*.tfrecord', dir_tfrecord)
    read_batched_examples = irr.read_batch(number_of_images, shuffle=False)
    imgs_l_val = read_batched_examples['image_l']
    imgs_true_ab_val = read_batched_examples['image_ab']
    imgs_emb_val = read_batched_examples['image_embedding']
    imgs_ab_val = col.build(imgs_l_val, imgs_emb_val)
    imgs_fwd_ab_val = fwd_col.build(imgs_l_val)#, imgs_emb_val)
    #shape = imgs_ab_val.shape
    #imgs_fwd_ab_val = tf.image.resize_images(imgs_fwd_ab_val, (shape[1], shape[2]))
    # Concatenate imgs_l_val, imgs_fwd_ab_val and imgs_ab_val as imgs_lab_val to eval on Refinement Network
    imgs_lab_val = tf.concat([imgs_l_val, imgs_fwd_ab_val, imgs_ab_val], axis=3)
    imgs_ref_ab_val = ref.build(imgs_lab_val)
    cost, summary = loss_with_metrics(imgs_ab_val, imgs_true_ab_val,
                                      'validation_col')
    cost_fwd, summary_fwd = loss_with_metrics(imgs_fwd_ab_val, imgs_true_ab_val,
                                      'validation_fwd')
    cost_ref, summary_ref = loss_with_metrics(imgs_ref_ab_val, imgs_true_ab_val,
                                      'validation_ref')
    return {
        'imgs_l': imgs_l_val,
        'imgs_ab': imgs_ab_val,
        'imgs_fwd_ab': imgs_fwd_ab_val,
        'imgs_true_ab': imgs_true_ab_val,
        'imgs_emb': imgs_emb_val,
        'imgs_lab': imgs_lab_val,
        'imgs_ref_ab': imgs_ref_ab_val,
        'cost': cost,
        'cost_fwd': cost_fwd,
        'cost_ref': cost_ref,
        'summary': summary,
        'summary_fwd': summary_fwd,
        'summary_ref': summary_ref,
    }
Esempio n. 2
0
def evaluation_pipeline(col, number_of_images):
    # Set up validation (input queues, graph)
    irr = LabImageRecordReader('val_lab_images_*.tfrecord', dir_tfrecord)
    read_batched_examples = irr.read_batch(number_of_images, shuffle=False)
    imgs_l_val = read_batched_examples['image_l']
    imgs_true_ab_val = read_batched_examples['image_ab']
    imgs_emb_val = read_batched_examples['image_embedding']
    imgs_ab_val = col.build(imgs_l_val, imgs_emb_val)
    cost, summary = loss_with_metrics(imgs_ab_val, imgs_true_ab_val,
                                      'validation')
    return {
        'imgs_l': imgs_l_val,
        'imgs_ab': imgs_ab_val,
        'imgs_true_ab': imgs_true_ab_val,
        'imgs_emb': imgs_emb_val,
        'cost': cost,
        'summary': summary
    }
Esempio n. 3
0
    def _lab_image_read(self):
        # Important: read_batch MUST be called before start_queue_runners,
        # otherwise the internal shuffle queue gets created but its
        # threads won't start
        irr = LabImageRecordReader('test_lab_images.tfrecord', dir_tfrecord)
        read_one_example = irr.read_operation
        read_batched_examples = irr.read_batch(20)

        with tf.Session() as sess:
            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            # Coordinate the loading of image files.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            # Reading images sequentially one by one
            for i in range(0, 12, 2):
                res = sess.run(read_one_example)
                img = lab_to_rgb(res['image_l'], res['image_ab'])
                img_gray = l_to_rgb(res['image_l'])
                plt.subplot(3, 4, i + 1)
                plt.imshow(img_gray)
                plt.axis('off')
                plt.subplot(3, 4, i + 2)
                plt.imshow(img)
                plt.axis('off')
                print('Read', basename(res['image_name']))
            plt.show()

            # Reading images in batch
            res = sess.run(read_batched_examples)
            print(res['image_name'],
                  res['image_l'].shape,
                  res['image_ab'].shape,
                  res['image_embedding'].shape,
                  sep='\n')

            # Finish off the filename queue coordinator.
            coord.request_stop()
            coord.join(threads)
Esempio n. 4
0
def training_pipeline(col, learning_rate, batch_size):
    # Set up training (input queues, graph, optimizer)
    irr = LabImageRecordReader('lab_images_*.tfrecord', dir_tfrecord)
    read_batched_examples = irr.read_batch(batch_size, shuffle=True)
    # read_batched_examples = irr.read_one()
    imgs_l = read_batched_examples['image_l']
    imgs_true_ab = read_batched_examples['image_ab']
    imgs_emb = read_batched_examples['image_embedding']
    imgs_ab = col.build(imgs_l, imgs_emb)
    cost, summary = loss_with_metrics(imgs_ab, imgs_true_ab, 'training')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
        cost, global_step=global_step)
    return {
        'global_step': global_step,
        'optimizer': optimizer,
        'cost': cost,
        'summary': summary
    }, irr, read_batched_examples
Esempio n. 5
0
def training_pipeline(col, fwd_col, ref, learning_rate, batch_size):
    # Set up training (input queues, graph, optimizer)
    irr = LabImageRecordReader('lab_images_*.tfrecord', dir_tfrecord)
    read_batched_examples = irr.read_batch(batch_size, shuffle=True)
    # read_batched_examples = irr.read_one()
    imgs_l = read_batched_examples['image_l']
    imgs_true_ab = read_batched_examples['image_ab']
    imgs_emb = read_batched_examples['image_embedding']
    imgs_ab = col.build(imgs_l, imgs_emb)
    imgs_fwd_ab = fwd_col.build(imgs_l)#, imgs_emb)
    #shape = imgs_ab.shape
    #imgs_fwd_ab = tf.image.resize_images(imgs_fwd_ab, (shape[1], shape[2]))
    # Concatenate imgs_l, imgs_fwd_ab and imgs_true_ab as imgs_lab to train on Refinement Network
    imgs_lab = tf.concat([imgs_l, imgs_true_ab, imgs_true_ab], axis=3)
    imgs_ref_ab = ref.build(imgs_lab)
    cost, summary = loss_with_metrics(imgs_ab, imgs_true_ab, 'training_col')
    cost_fwd, summary_fwd = loss_with_metrics(imgs_fwd_ab, imgs_true_ab, 'training_fwd')
    cost_ref, summary_ref = loss_with_metrics(imgs_ref_ab, imgs_true_ab, 'training_ref')
    global_step = tf.Variable(0, name='global_step', trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
        cost, global_step=global_step)
    optimizer_fwd = tf.train.AdamOptimizer(learning_rate).minimize(
        cost_fwd, global_step=global_step)
    optimizer_ref = tf.train.AdamOptimizer(learning_rate).minimize(
        cost_ref, global_step=global_step)
    return {
        'global_step': global_step,
        'optimizer': optimizer,
        'optimizer_fwd': optimizer_fwd,
        'optimizer_ref': optimizer_ref,
        'cost': cost,
        'cost_fwd': cost_fwd,
        'cost_ref': cost_ref,
        'summary': summary,
        'summary_fwd': summary_fwd,
        'summary_ref': summary_ref,
    }#, irr, read_batched_examples