예제 #1
0
    inplanes = sess.run(sess.graph.get_tensor_by_name('inplanes:0'))
    priors = sess.run(sess.graph.get_tensor_by_name('priors:0'))
    variances = sess.run(sess.graph.get_tensor_by_name('variances:0'))
    priors = np.concatenate((priors, variances), axis=1)

    # Get tensor handles
    tensor_in = sess.graph.get_tensor_by_name('input:0')
    tensor_loc = sess.graph.get_tensor_by_name('locations:0')
    tensor_cla = sess.graph.get_tensor_by_name('class_probs:0')
    tensor_view = sess.graph.get_tensor_by_name('view_probs:0')
    tensor_inpl = sess.graph.get_tensor_by_name('inplane_probs:0')

    if len(models) == 1:  # If single-object network
        models = ['obj_{:02d}'.format(sequence)]  # Overwrite model name

    bench = load_sixd(sixd_base, nr_frames=nr_frames, seq=sequence)

    input_shape = (1, 299, 299, 3)
    print('Models:', models)
    print('Views:', len(views))
    print('Inplanes:', len(inplanes))
    print('Priors:', priors.shape)

    print('Precomputing projections for each used model...')
    model_map = bench.models  # Mapping from name to model3D instance
    for model_name in models:
        m = model_map[model_name]
        m.projections = precompute_projections(views, inplanes, bench.cam, m)

    # Process each frame separately
    for f in bench.frames:
예제 #2
0
파일: run.py 프로젝트: billow06/ssd-6d
    inplanes = sess.run(sess.graph.get_tensor_by_name('inplanes:0'))
    priors = sess.run(sess.graph.get_tensor_by_name('priors:0'))
    variances = sess.run(sess.graph.get_tensor_by_name('variances:0'))
    priors = np.concatenate((priors, variances), axis=1)
    
    # Get tensor handles
    tensor_in = sess.graph.get_tensor_by_name('input:0')
    tensor_loc = sess.graph.get_tensor_by_name('locations:0')
    tensor_cla = sess.graph.get_tensor_by_name('class_probs:0')
    tensor_view = sess.graph.get_tensor_by_name('view_probs:0')
    tensor_inpl = sess.graph.get_tensor_by_name('inplane_probs:0')

    if len(models) == 1:  # If single-object network
        models = ['obj_{:02d}'.format(sequence)]  # Overwrite model name

    bench = load_sixd(sixd_base, nr_frames=nr_frames, seq=sequence)

    input_shape = (1, 299, 299, 3)
    print('Models:', models)
    print('Views:', len(views))
    print('Inplanes:', len(inplanes))
    print('Priors:', priors.shape)

    print('Precomputing projections for each used model...')
    model_map = bench.models  # Mapping from name to model3D instance
    for model_name in models:
        m = model_map[model_name]
        m.projections = precompute_projections(views, inplanes, bench.cam, m)

    # Process each frame separately
    for f in bench.frames:
        print(
            '===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).'
        )
    while (writer.running()):
        pass
    writer.stop()
    final_result = writer.results()

    # Till now all the results from detections are in final_result.
    # # Output results in json file
    write_json(final_result, args.outputpath)
    ''' 
        Evaluate final_result*******************************************************
    '''
    print("Loading ground truth benchmark dataset...")
    bench_info = load_sixd(sixd_base, seq=obj_id, nr_frames=0)
    diameter = bench_info.diameter[obj_id]
    frames_of_ground_truth = bench_info.frames
    # Metrics Initialization
    add_errs = []
    adds = []
    proj_2d_errs = []
    ious = []
    np.set_printoptions(suppress=True)
    # for f in tqdm(final_result, ncols=80, ascii=True):
    for idx, f in enumerate(final_result):
        imgname = f['imgname']
        imgname = int(imgname[0:-4])  # throw '.png'
        gt_frame = frames_of_ground_truth[imgname]
        assert imgname == gt_frame.nr
        # embed()
from refiner.refiner import Refiner, Refinable
from rendering.utils import perturb_pose, trans_rot_err

from timeit import default_timer as timer
from docopt import docopt

args = docopt(__doc__)

sixd_base = args["--dataset"]
obj = args["--object"]
network = args["--network"]
max_rot_pert = float(args["--max_rot_pert"]) / 180. * np.pi
max_trans_pert = float(args["--max_trans_pert"])
iterations = int(args["--iterations"])

bench = load_sixd(sixd_base, nr_frames=1, seq=obj)
croppings = yaml.load(open('config/croppings.yaml', 'r'))

if 'linemod' in network:
    dataset_name = 'linemod'
elif 'tejani' in network:
    dataset_name = 'tejani'
else:
    raise Exception('Could not determine dataset')

with tf.Session() as session:
    architecture = Architecture(network_file=network, sess=session)
    ren = Renderer((640, 480), bench.cam)
    refiner = Refiner(architecture=architecture, ren=ren, session=session)

    for frame in bench.frames:
예제 #5
0
        print(
            '===========================> If this step takes too long, you can enable the --vis_fast flag to use fast rendering (real-time).'
        )
    while (writer.running()):
        pass
    writer.stop()
    final_result = writer.results()

    # Till now all the results from detections are in final_result.
    # # Output results in json file
    write_json(final_result, args.outputpath)
    ''' 
        Evaluate final_result*******************************************************
    '''
    print("Loading ground truth of OCCLUSION dataset...")
    bench_info = load_sixd(sixd_base, seq=2, nr_frames=0)
    diameter = bench_info.diameter[obj_id]
    frames_of_ground_truth = bench_info.frames
    # Metrics Initialization
    add_errs = []
    adds = []
    proj_2d_errs = []
    ious = []
    # for f in tqdm(final_result, ncols=80, ascii=True):
    for idx, f in enumerate(final_result):
        imgname = f['imgname']
        imgname = int(imgname[0:-4])  # throw '.png'
        gt_frame = frames_of_ground_truth[imgname]
        assert imgname == gt_frame.nr
        for ground_truth in gt_frame.gt:
            gt_obj_id = ground_truth[0]
예제 #6
0
"""

from docopt import docopt
import numpy as np
from rendering.utils import precompute_projections
import glob
from utils.sixd import load_sixd
import os
import math


def calc_azimuth(x, y):
    two_pi = 2.0 * math.pi
    return (math.atan2(y, x) + two_pi) % two_pi


args = docopt(__doc__)
sixd_base = 'dataset/' + args["--dataset"]
sequences = args["--sequence"].split()
pts = np.load("numpy_files/viewpoints.npy")

sequences = range(1, 21)
models = ['obj_{:06d}'.format(int(x))
          for x in sequences]  # Overwrite model name
bench = load_sixd(sixd_base, sequences=sequences)
print('Models:', models)
print('Precomputing projections for each used model...')
# image_files = glob.glob(os.path.join(os.path.expanduser("~"), 'data', "images/*.jpg"))    # put coco images as background in this folder
model_map = bench.models  # Mapping from name to model3D instance
precompute_projections(args["--dataset"], pts, bench.cam, model_map, models)