Ejemplo n.º 1
0
                        default=None,
                        help='test steps')
    parser.add_argument("--corr_type",
                        dest="corr_type",
                        type=str,
                        default="tf",
                        help="correlation layer realization - 'tf' or 'cuda'")

    args = parser.parse_args()

    ft3d_dataset = ft3d_filenames(args.dataset_path)

    tf.logging.set_verbosity(tf.logging.ERROR)
    dispnet = DispNet(mode="test",
                      ckpt_path=args.checkpoint_path,
                      dataset=ft3d_dataset,
                      input_size=INPUT_SIZE,
                      batch_size=args.batch_size,
                      corr_type=args.corr_type)

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)
    if not ckpt:
        logging.error("no checkpoint in provided path found!")
        sys.exit()
    init_logger(args.checkpoint_path)
    log_step = args.log_step
    if args.n_steps is None:
        N_test = len(ft3d_dataset["TEST"])
    else:
        N_test = args.n_steps

    gpu_options = tf.GPUOptions(allow_growth=True)
Ejemplo n.º 2
0
    right_placeholder = tf.placeholder(dtype=tf.float32, shape=[None, None, 3])

    left_input = tf.expand_dims(left_placeholder, axis=0)
    right_input = tf.expand_dims(right_placeholder, axis=0)

    #left_img = tf.placeholder(dtype=tf.float32,shape=[1,Npne,None,3])
    #right_img  = tf.placeholder(dtype=tf.float32,)

    # build input batch
    #left_img_batch, right_img_batch, name_batch, resolution_batch = tf.train.batch([left_img, right_img, left_fn, original_resolution], args.batch_size, num_threads=4, capacity=args.batch_size * 100, allow_smaller_final_batch=True)

    # build model
    is_corr = args.corr_type != 'none'
    dispnet = DispNet(mode="inference",
                      ckpt_path=args.checkpoint_path,
                      batch_size=1,
                      is_corr=is_corr,
                      corr_type=args.corr_type,
                      image_ops=[left_input, right_input])
    raw_prediction = dispnet.predictions_test[0]
    rescaled_prediction = tf.image.resize_images(
        raw_prediction,
        tf.shape(left_placeholder)[0:2],
        method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
    cropped_prediction = tf.image.resize_image_with_crop_or_pad(
        rescaled_prediction, target_shape[0], target_shape[1])

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(dispnet.init)
        print("initialized")
Ejemplo n.º 3
0
                        help='batch size')
    parser.add_argument("-l", "--log_step", dest="log_step", type=int, default=100,
                        help='log step size')
    parser.add_argument("-s", "--save_step", dest="save_step", type=int, default=5000,
                        help='save checkpoint step size')
    parser.add_argument("-n", "--n_steps", dest="n_steps", type=int, default=500000,
                        help='test steps')
    parser.add_argument('--use_corr', action='store_true', default=False)
    parser.add_argument('--weight_schedule', action='store_true', default=False)

    args = parser.parse_args()

    ft3d_dataset = ft3d_filenames(args.dataset_path)

    tf.logging.set_verbosity(tf.logging.ERROR)
    dispnet = DispNet(mode="traintest", ckpt_path=args.checkpoint_path, dataset=ft3d_dataset,
                      batch_size=args.batch_size, is_corr=args.use_corr)

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)
    if not ckpt:
        if not os.path.exists(args.checkpoint_path):
            os.makedirs(args.checkpoint_path)
    model_name = "DispNet"
    if args.use_corr:
        model_name += "Corr1D"

    init_logger(args.checkpoint_path, name=model_name)
    writer = tf.summary.FileWriter(args.checkpoint_path)

    schedule_step = 50000
    if args.weight_schedule is True:
      weights_schedule = [[0., 0., 0., 0., .2, 1.],
Ejemplo n.º 4
0
                        type=str,
                        metavar="FILE",
                        help='model checkpoint path')
    parser.add_argument("-o",
                        "--output",
                        dest="output_path",
                        required=True,
                        type=str,
                        metavar="FILE",
                        help='path to output frozen model')
    parser.add_argument('--use_corr', action='store_true', default=False)

    args = parser.parse_args()

    dispnet = DispNet(mode="inference",
                      ckpt_path=args.checkpoint_path,
                      input_size=INPUT_SIZE,
                      is_corr=args.use_corr)

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)

    gpu_options = tf.GPUOptions(allow_growth=True)
    with tf.Session(graph=dispnet.graph,
                    config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(dispnet.init)
        dispnet.saver.restore(sess=sess, save_path=ckpt)
        print("Restoring from %s" % ckpt)

        freeze_graph_def = graph_util.convert_variables_to_constants(
            sess, sess.graph.as_graph_def(), ['prediction/conv/BiasAdd'])
        if not os.path.exists(args.output_path):
            os.makedirs(args.output_path)
Ejemplo n.º 5
0
                        help='batch size')
    parser.add_argument("-l", "--log_step", dest="log_step", type=int, default=100,
                        help='log step size')
    parser.add_argument("-s", "--save_step", dest="save_step", type=int, default=5000,
                        help='save checkpoint step size')
    parser.add_argument("-n", "--n_steps", dest="n_steps", type=int, default=None,
                        help='test steps')
    parser.add_argument("--corr_type", dest="corr_type", type=str, default="tf",
                        help="correlation layer realization - 'tf' or 'cuda'")

    args = parser.parse_args()
    
    ft3d_dataset = ft3d_filenames(args.dataset_path)

    tf.logging.set_verbosity(tf.logging.ERROR)
    dispnet = DispNet(mode="traintest", ckpt_path=args.checkpoint_path, dataset=ft3d_dataset,
                      batch_size=args.batch_size, is_corr=CORR, corr_type="cuda")

    ckpt = tf.train.latest_checkpoint(args.checkpoint_path)
    if not ckpt:
        if not os.path.exists(args.checkpoint_path):
            os.mkdir(args.checkpoint_path)
    model_name = "DispNet"
    if CORR:
        model_name += "Corr1D"
    init_logger(args.checkpoint_path, name=model_name)
    writer = tf.summary.FileWriter(args.checkpoint_path)

    schedule_step = 50000
    weights_schedule = [[0., 0., 0., 0., .2, 1.],
                        [0., 0., 0., .2, 1., .5],
                        [0., 0., .2, 1., .5, 0.],
Ejemplo n.º 6
0
                        help="flag to read confidence as a 16 bit png",
                        action='store_true')

    args = parser.parse_args()

    dataset = trainingLists_conf(args.training,
                                 args.testing,
                                 kittiGt=args.kittigt,
                                 doublePrecisionConf=args.doubleConf)

    tf.logging.set_verbosity(tf.logging.ERROR)
    is_corr = args.corr_type != 'none'
    dispnet = DispNet(mode="traintest",
                      ckpt_path=args.checkpoint_path,
                      dataset=dataset,
                      batch_size=args.batch_size,
                      is_corr=is_corr,
                      corr_type=args.corr_type,
                      smoothness_lambda=args.smooth,
                      confidence_th=args.confidence_th)

    if not os.path.exists(args.checkpoint_path):
        os.mkdir(args.checkpoint_path)
    init_logger(args.checkpoint_path)
    writer = tf.summary.FileWriter(args.checkpoint_path)

    #Flying Things train
    # schedule_step = 100000  # ORIGINAL
    # weights_schedule = [[0., 0., 0., 0., .2, 1.],
    #                     [0., 0., 0., .2, 1., .5],
    #                     [0., 0., .2, 1., .5, 0.],
    #                     [0., .2, 1., .5, 0., 0.],