Exemple #1
0
def main(_):
    """Evaluate FlowNet for FlyingChair test set"""

    with tf.Graph().as_default():
        # Generate tensors from numpy images and flows.
        var_num = 1
        img_0, img_1, flow = flownet_tools.get_data_flow_s(
            FLAGS.datadir, False, var_num)

        imgs_0 = tf.squeeze(tf.stack([img_0 for i in range(FLAGS.batchsize)]))
        imgs_1 = tf.squeeze(tf.stack([img_1 for i in range(FLAGS.batchsize)]))
        flows = tf.squeeze(tf.stack([flow for i in range(FLAGS.batchsize)]))
        # img summary after loading
        flownet.image_summary(imgs_0, imgs_1, "A_input", flows)

        # Get flow tensor from flownet model
        calc_flows = architectures.flownet_dropout(imgs_0, imgs_1, flows)

        flow_mean, confidence, conf_img = var_mean(calc_flows)

        # confidence = tf.image.convert_image_dtype(confidence, tf.uint16)
        # calc EPE / AEE = ((x1-x2)^2 + (y1-y2)^2)^1/2
        # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3478865/

        aee = aee_f(flow, flow_mean, var_num)
        # bilateral solverc
        img_0 = tf.squeeze(tf.stack(img_0))
        flow_s = tf.squeeze(tf.stack(flow))
        solved_flow = flownet.bil_solv_var(img_0, flow_mean, confidence,
                                           flow_s)
        aee_bs = aee_f(flow, solved_flow, var_num)

        metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map(
            {
                "AEE": slim.metrics.streaming_mean(aee),
                "AEE_BS": slim.metrics.streaming_mean(aee_bs),
                # "AEE_BS_No_Confidence": slim.metrics.streaming_mean(aee_bs),
            })

        for name, value in metrics_to_values.iteritems():
            tf.summary.scalar(name, value)
        # Define the summaries to write:
        flownet.image_summary(None, None, "FlowNetS_no_mean", calc_flows)
        solved_flows = tf.squeeze(
            tf.stack([solved_flow for i in range(FLAGS.batchsize)]))
        flow_means = tf.squeeze(
            tf.stack([flow_mean for i in range(FLAGS.batchsize)]))
        conf_imgs = tf.squeeze(
            tf.stack([conf_img for i in range(FLAGS.batchsize)]))
        flownet.image_summary(None, None, "FlowNetS BS", solved_flows)
        flownet.image_summary(None, None, "FlowNetS Mean", flow_means)
        flownet.image_summary(conf_imgs, conf_imgs, "Confidence", None)
        # Run the actual evaluation loop.
        num_batches = math.ceil(FLAGS.testsize)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.logdir + '/train',
            logdir=FLAGS.logdir + '/eval_var_flownet_s',
            num_evals=num_batches,
            eval_op=metrics_to_updates.values(),
            eval_interval_secs=FLAGS.eval_interval_secs,
            summary_op=tf.summary.merge_all(),
            session_config=config,
            timeout=60 * 60)
Exemple #2
0
def main(_):
    """Evaluate FlowNet for Kitti 2012 training set"""
    with tf.Graph().as_default():

        # just get one triplet at a time
        var_num = 1
        img_0, img_1, flow_3 = flownet_tools.get_data_kitti(
            FLAGS.datadir, False, var_num)
        u, v, b = tf.split(flow_3, 3, axis=3)[:]

        # Kitti conversion
        u = (u - 2**15) / 64.0
        v = (v - 2**15) / 64.0
        flow = tf.stack([tf.squeeze(u), tf.squeeze(v)], 2)
        b = tf.squeeze(b)

        img_0_rs = tf.squeeze(tf.image.resize_images(
            img_0, FLAGS.d_shape_img[:2]))
        img_1_rs = tf.squeeze(tf.image.resize_images(
            img_1, FLAGS.d_shape_img[:2]))
        flow_rs = tf.squeeze(tf.image.resize_images(
            flow, FLAGS.d_shape_img[:2]))

        # stack for simple multiple inference
        imgs_0_rs = tf.squeeze(
            tf.stack([img_0_rs for i in range(FLAGS.batchsize)]))
        imgs_1_rs = tf.squeeze(
            tf.stack([img_1_rs for i in range(FLAGS.batchsize)]))
        flows_rs = tf.squeeze(
            tf.stack([flow_rs for i in range(FLAGS.batchsize)]))

        # img summary after loading
        flownet.image_summary(imgs_0_rs, imgs_1_rs, "A_input", flows_rs)
        calc_flows = model(imgs_0_rs, imgs_1_rs, flows_rs)
        calc_flows = tf.image.resize_images(calc_flows, FLAGS.img_shape[:2])

        if FLAGS.dropout and FLAGS.is_training:
            flow_split = tf.split(calc_flows, FLAGS.batchsize, axis=0)
            # calc mean / variance and images for that
            aee_mean = np.zeros(FLAGS.batchsize)
            mean_di = {}
            for i in range(1, FLAGS.batchsize):
                to_mean = tf.squeeze(tf.stack([flow_split[:i + 1]]))
                mean_flow, conf_x, conf_y, conf_img = var_mean(to_mean)
                mean_di[i] = add_gt(flow, mean_flow, b)

            mean_flow, conf_x, conf_y, conf_img = var_mean(calc_flows)
            # start bilateral solver
            img_0 = tf.squeeze(tf.stack(img_0))
            flow_s = tf.squeeze(tf.stack(flow))
            solved_flow = flownet.bil_solv_var(
                img_0, mean_flow, conf_x, conf_y, flow_s)
            # calc aee for solver
            aee_bs = add_gt(flow, solved_flow, b)
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
                "AEE_2": slim.metrics.streaming_mean(mean_di[1]),
                "AEE_10": slim.metrics.streaming_mean(mean_di[9]),
                "AEE_20": slim.metrics.streaming_mean(mean_di[9]),
                "AEE_40": slim.metrics.streaming_mean(mean_di[9]),
                "AEE_bs": slim.metrics.streaming_mean(aee_bs),
            })
        else:
            calc_flow = tf.squeeze(
                tf.split(calc_flows, FLAGS.batchsize, axis=0)[0])

            aee = add_gt(flow, calc_flow, b)
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
                "AEE": slim.metrics.streaming_mean(aee),

            })

        # write summary
        for name, value in metrics_to_values.iteritems():
            tf.summary.scalar(name, value)

        # summary images
        flownet.image_summary(None, None, "FlowNetS", calc_flows)
        if FLAGS.dropout and FLAGS.is_training:
            solved_flows = tf.squeeze(
                tf.stack([solved_flow for i in range(FLAGS.batchsize)]))
            mean_flows = tf.squeeze(
                tf.stack([mean_flow for i in range(FLAGS.batchsize)]))
            conf_imgs = tf.squeeze(
                tf.stack([conf_img for i in range(FLAGS.batchsize)]))
            flownet.image_summary(None, None, "FlowNetS BS", solved_flows)
            flownet.image_summary(None, None, "FlowNetS Mean", mean_flows)
            flownet.image_summary(None, None, "Confidence", conf_imgs)

        # Run the actual evaluation loop.
        num_batches = math.ceil(FLAGS.testsize) - 1
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.logdir + '/train',
            logdir=FLAGS.logdir + '/eval_var_kitti_no_drop',
            num_evals=num_batches,
            eval_op=metrics_to_updates.values(),
            eval_interval_secs=FLAGS.eval_interval_secs,
            summary_op=tf.summary.merge_all(),
            session_config=config,
            timeout=60 * 60
        )
Exemple #3
0
def main(_):
    """Evaluate FlowNet for FlyingChair test set"""

    with tf.Graph().as_default():
        # just get one triplet at a time
        var_num = 1
        img_0, img_1, flow = flownet_tools.get_data_flow_s(
            FLAGS.datadir, False, var_num)

        # stack same image for simple multiple inference
        imgs_0 = tf.squeeze(tf.stack([img_0 for i in range(FLAGS.batchsize)]))
        imgs_1 = tf.squeeze(tf.stack([img_1 for i in range(FLAGS.batchsize)]))
        flows = tf.squeeze(tf.stack([flow for i in range(FLAGS.batchsize)]))

        # img summary after loading
        flownet.image_summary(imgs_0, imgs_1, "A_input", flows)

        # calc flow
        calc_flows = model(imgs_0, imgs_1, flows)

        # dropout and we want mean (no weight scaling)
        if FLAGS.dropout and FLAGS.is_training:
            flow_split = tf.split(calc_flows, FLAGS.batchsize, axis=0)
            # calc mean / variance and images for that
            aee_mean = np.zeros(FLAGS.batchsize)
            mean_di = {}
            for i in range(1, FLAGS.batchsize):
                calc_flows = tf.squeeze(tf.stack([flow_split[:i + 1]]))
                mean_flow, conf_x, conf_y, conf_img = var_mean(calc_flows)
                mean_di[i] = aee_f(flow, mean_flow)
            # start bilateral solver
            img_0 = tf.squeeze(tf.stack(img_0))
            flow_s = tf.squeeze(tf.stack(flow))
            solved_flow = flownet.bil_solv_var(img_0, mean_flow, conf_x,
                                               conf_y, flow_s)
            # calc aee for solver
            aee_bs = aee_f(flow, solved_flow)
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map(
                {
                    "AEE_2": slim.metrics.streaming_mean(mean_di[1]),
                    "AEE_10": slim.metrics.streaming_mean(mean_di[9]),
                    "AEE_25": slim.metrics.streaming_mean(mean_di[24]),
                    "AEE_45": slim.metrics.streaming_mean(mean_di[44]),
                    "AEE_bs": slim.metrics.streaming_mean(aee_bs),
                })
        else:
            calc_flow = tf.squeeze(
                tf.split(calc_flows, FLAGS.batchsize, axis=0)[0])
            aae = aee_f(flow, calc_flow)
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map(
                {
                    "AEE": slim.metrics.streaming_mean(aae),
                })
        # summary images

        # write summary
        for name, value in metrics_to_values.iteritems():
            tf.summary.scalar(name, value)

        flownet.image_summary(None, None, "FlowNetS", calc_flows)
        if FLAGS.dropout and FLAGS.is_training:
            solved_flows = tf.squeeze(
                tf.stack([solved_flow for i in range(FLAGS.batchsize)]))
            mean_flows = tf.squeeze(
                tf.stack([mean_flow for i in range(FLAGS.batchsize)]))
            conf_imgs = tf.squeeze(
                tf.stack([conf_img for i in range(FLAGS.batchsize)]))
            flownet.image_summary(None, None, "FlowNetS BS", solved_flows)
            flownet.image_summary(None, None, "FlowNetS Mean", mean_flows)
            flownet.image_summary(None, None, "Confidence", conf_imgs)

        # Run the actual evaluation loop.
        num_batches = math.ceil(FLAGS.testsize) - 1
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.logdir + '/train',
            logdir=FLAGS.logdir + '/eval_flownet_drop',
            num_evals=num_batches,
            eval_op=metrics_to_updates.values(),
            eval_interval_secs=FLAGS.eval_interval_secs,
            summary_op=tf.summary.merge_all(),
            session_config=config,
            timeout=60 * 60)
def main(_):

    """Evaluate FlowNet for Sintel test set"""

    with tf.Graph().as_default():

        # just get one triplet at a time
        var_num = 1
        img_0, img_1, flow = flownet_tools.get_data_sintel(FLAGS.datadir, False, var_num)

        # reize from [436, 1024] to [448, 1024] since 436/2**3 not even -> stride problems
        # this is a change of height ~3% -> scale of flow in vertical direction needed
        img_0_rs = tf.squeeze(tf.image.resize_images(img_0, FLAGS.d_shape_img[:2]))
        img_1_rs = tf.squeeze(tf.image.resize_images(img_1, FLAGS.d_shape_img[:2]))
        flow_rs = tf.squeeze(tf.image.resize_images(flow, FLAGS.d_shape_img[:2]))

        ratio_h = tf.cast(FLAGS.d_shape_img[0], tf.float32) / FLAGS.img_shape[0]
        ratio_w = tf.cast(FLAGS.d_shape_img[1], tf.float32) / FLAGS.img_shape[1]
        flow_rs = tf.squeeze(tf.stack([tf.split(flow_rs, 2, axis=-1)[0]*ratio_w,
                                tf.split(flow_rs, 2, axis=-1)[1]*ratio_h], -1))

        # stack for simple multiple inference
        imgs_0_rs = tf.squeeze(tf.stack([img_0_rs for i in range(FLAGS.batchsize)]))
        imgs_1_rs = tf.squeeze(tf.stack([img_1_rs for i in range(FLAGS.batchsize)]))
        flows_rs = tf.squeeze(tf.stack([flow_rs for i in range(FLAGS.batchsize)]))

        # img summary after loading
        flownet.image_summary(imgs_0_rs, imgs_1_rs, "A_input", flows_rs)

        calc_flows = model(imgs_0_rs, imgs_1_rs, flows_rs)

        if FLAGS.dropout and FLAGS.is_training:
            flow_split=tf.split(calc_flows,FLAGS.batchsize, axis=0)
            # calc mean / variance and images for that
            aee_mean = np.zeros(FLAGS.batchsize)
            mean_di = {}
            for i in range(1, FLAGS.batchsize):
                calc_flows = tf.squeeze(tf.stack([flow_split[:i+1]]))
                mean_flow, conf_x, conf_y, conf_img  = var_mean(calc_flows)
                mean_di[i] = aee_f(flow_rs, mean_flow)
            # start bilateral solver
            img_0 = tf.squeeze(tf.stack(img_0))
            flow_s = tf.squeeze(tf.stack(flow_rs))
            solved_flow = flownet.bil_solv_var(img_0_rs, mean_flow, conf_x, conf_y, flow_s)
            # calc aee for solver
            aee_bs = aee_f(flow_rs, solved_flow)

            # MC dropout / Mean for different numbers of samples
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
                      "AEE_2": slim.metrics.streaming_mean(mean_di[1]),
                      "AEE_10": slim.metrics.streaming_mean(mean_di[9]),
                      "AEE_25": slim.metrics.streaming_mean(mean_di[24]),
                      "AEE_40": slim.metrics.streaming_mean(mean_di[39]),
                      "AEE_bs": slim.metrics.streaming_mean(aee_bs),
            })
        else:
            # this is for weight scaling - is_training=False
            # or when dropout is off
            calc_flow = tf.squeeze(tf.split(calc_flows, FLAGS.batchsize, axis=0)[0])
            aae = aee_f(flow_rs, calc_flow)
            metrics_to_values, metrics_to_updates = slim.metrics.aggregate_metric_map({
                      "AEE": slim.metrics.streaming_mean(aae),
            })

        # write summary
        for name, value in metrics_to_values.iteritems():
            		tf.summary.scalar(name, value)

        # summary images
        flownet.image_summary(None, None, "FlowNetS", calc_flows)
        if FLAGS.dropout and FLAGS.is_training:
            solved_flows = tf.squeeze(tf.stack([solved_flow for i in range(FLAGS.batchsize)]))
            mean_flows = tf.squeeze(tf.stack([mean_flow for i in range(FLAGS.batchsize)]))
            conf_imgs = tf.squeeze(tf.stack([conf_img for i in range(FLAGS.batchsize)]))
            flownet.image_summary(None, None, "FlowNetS BS", solved_flows)
            flownet.image_summary(None, None, "FlowNetS Mean", mean_flows)
            flownet.image_summary(None, None, "Confidence", conf_imgs)

        # Run the actual evaluation loop.
        num_batches = math.ceil(FLAGS.testsize) - 1
        config = tf.ConfigProto()
        config.gpu_options.allow_growth=True

        slim.evaluation.evaluation_loop(
        	master=FLAGS.master,
            checkpoint_dir=FLAGS.logdir + '/train',
            logdir=FLAGS.logdir + '/eval_sintel_clean_scale',
            num_evals=num_batches,
            eval_op=metrics_to_updates.values(),
            eval_interval_secs=FLAGS.eval_interval_secs,
            summary_op=tf.summary.merge_all(),
            session_config=config,
            timeout=60*60
        )