예제 #1
0
def localise_all(model, img_paths):
    x, q = [], []
    std_x, std_q = [], []
    with Localiser(model,
                   uncertainty=False,
                   output_type='axis',
                   dropout=args.dropout) as localiser:
        for i, path in enumerate(img_paths):
            img = read_image(path, expand_dims=False, normalise=True)
            img = resize_image(centre_crop(img), args.size)
            pred = localiser.localise(img, samples=args.samples)
            x.append(pred['x'])
            q.append(pred['q'])
            #std_x.append(pred['std_x_all'])
            #std_q.append(pred['std_q_all'])
    return np.array(x), np.array(q)  #, np.array(std_x), np.array(std_q)
예제 #2
0
else:
    imgs = []

n_images = len(imgs)
if n_images > 1 and not args.output:
    print('--output argument required')
    sys.exit(1)
if n_images == 0:
    print('No images found')
    sys.exit(1)

if args.output and not os.path.isdir(args.output):
    os.makedirs(args.output)

# Localise
with Localiser(args.model,
               output_type='axis' if args.axis else 'quat') as localiser:
    for i in range(n_images):
        # Load normalised image
        image = read_image(imgs[i], normalise=True)
        image = resize_image(centre_crop(image), args.size)

        # Compute the saliency map
        grad = localiser.saliency(image)
        grad = median(grad, disk(3))
        if args.scale:
            grad = np.clip(1.0 * grad / args.scale, 0, 1)
        else:
            grad = 1.0 * grad / np.max(grad)

        if args.red:
            grad = np.stack([
예제 #3
0
if args.model:
    input_size = 256
    test_reader = ImageReader(args.dataset,
                              batch_size=1,
                              image_size=[input_size, input_size],
                              randomise=False)
    n_images = test_reader.total_images()

    positions = np.empty([0, 3])
    orientations = np.empty([0, 4])
    if args.uncertainty:
        std_x = []
        std_q = []

    with Localiser(args.model, uncertainty=args.uncertainty) as localiser:
        for i in range(n_images):
            images_feed, labels_feed = test_reader.next_batch()

            # Make prediction
            predicted = localiser.localise(images_feed)
            positions = np.concatenate(
                (positions, np.asarray([predicted['x']])))
            orientations = np.concatenate(
                (orientations, np.asarray([predicted['q']])))
            if args.uncertainty:
                std_x.append(predicted['std_x'])
                std_q.append(predicted['std_q'])

            progress_bar(1.0 * (i + 1) / n_images, 30, text='Localising')
        print('')
예제 #4
0

n_images = len(imgs)
if n_images > 1 and not args.output:
    print('--output argument required')
    sys.exit(1)
if n_images == 0:
    print('No images found')
    sys.exit(1)

if args.output and not os.path.isdir(args.output):
    os.makedirs(args.output)

# Localise
input_size = 256
with Localiser(args.model) as localiser:
    for i in range(n_images):
        # Load normalised image
        image = read_image(imgs[i], normalise=True, size=[input_size, input_size])

        # Compute the saliency map
        grad = localiser.saliency(image)
        grad = median(grad, disk(3))
        grad = 1.0*grad/np.max(grad)

        if args.output:
            fname = os.path.join(args.output, os.path.basename(imgs[i]))
            scipy.misc.imsave(fname, grad)
            progress_bar(1.0*(i+1)/n_images, 30, text='Computing')
        else:
            # Display image
예제 #5
0
import matplotlib.pyplot as plt

input_size = 224
test_reader = ImageReader(args.dataset,
                          batch_size=1,
                          image_size=[input_size, input_size],
                          random_crop=False,
                          randomise=False)
n_images = test_reader.total_images()

pos_errors = []
orient_errors = []
positions = np.empty([0, 3])
orientations = np.empty([0, 4])

with Localiser(input_size, args.model) as localiser:
    for i in range(n_images):
        images_feed, labels_feed = test_reader.next_batch()
        gt = {'x': labels_feed[0][0:3], 'q': labels_feed[0][3:7]}

        # Make prediction
        predicted = localiser.localise(images_feed)
        x, y, z = predicted['x']
        q1, q2, q3, q4 = predicted['q']

        pos_error = l2_distance(gt['x'], predicted['x'])
        orient_error = quaternion_distance(gt['q'],
                                           predicted['q']) * 180 / np.pi
        pos_errors.append(pos_error)
        orient_errors.append(orient_error)
예제 #6
0
#x_min, x_max = 0.5, 5.997787

img_paths, labels = read_label_file(dataset,
                                    full_paths=True,
                                    convert_to_axis=True)
n_images = len(img_paths)

x_scale = np.linspace(x_min, x_max, n_images)
x_gt = map(lambda l: l[0:3], labels)
q_gt = map(lambda l: l[3:], labels)
x = []
q = []
std_x = []
std_q = []

with Localiser(model, uncertainty=False, output_type='axis',
               dropout=0.5) as localiser:
    for i, path in enumerate(img_paths):
        img = read_image(path, expand_dims=False, normalise=True)
        img = resize_image(centre_crop(img), [input_size, input_size])
        pred = localiser.localise(img, samples=40)
        x.append(pred['x'])
        q.append(pred['q'])
        if 'std_x_all' in pred:
            std_x.append(pred['std_x_all'])
            std_q.append(pred['std_q_all'])
        progress_bar(1.0 * (i + 1) / n_images, 30, text='Localising')
print ''

x_gt = np.array(x_gt)
q_gt = np.array(q_gt)
x = np.array(x)
예제 #7
0
args = parser.parse_args()

input_size = 224
test_reader = ImageReader(args.dataset,
                          batch_size=1,
                          image_size=[input_size, input_size],
                          random_crop=False,
                          randomise=False)
n_images = test_reader.total_images()

# Read the definition file to get file names
paths, _ = read_label_file(args.dataset)
paths = map(lambda x: os.path.basename(x), paths)

# Localise
with Localiser(input_size, args.model,
               uncertainty=args.uncertainty) as localiser:
    if args.output:
        f = open(args.output, 'w')
        f.write('\n\n\n')  # Hack for now

    for i in range(n_images):
        images_feed, labels_feed = test_reader.next_batch()

        # Make prediction
        predicted = localiser.localise(images_feed)
        x = [round(v, 6) for v in predicted['x']]
        q = [round(v, 6) for v in predicted['q']]

        if args.output:
            f.write('{} {} {} {} {} {} {} {}\n'.format(paths[i], x[0], x[1],
                                                       x[2], q[0], q[1], q[2],
예제 #8
0
    test_reader = ImageReader(args.dataset,
                              batch_size=1,
                              image_size=input_size,
                              crop_size=crop_size,
                              centre_crop=True,
                              randomise=False)
    n_images = test_reader.total_images()

    positions = []
    orientations = []
    if args.uncertainty:
        std_x = []
        std_q = []

    with Localiser(args.model,
                   uncertainty=args.uncertainty,
                   output_type=output_type,
                   dropout=0.3) as localiser:
        for i in range(n_images):
            images_feed, labels_feed = test_reader.next_batch()

            # Make prediction
            predicted = localiser.localise(images_feed)
            positions.append(predicted['x'])
            orientations.append(predicted['q'])
            if args.uncertainty:
                std_x.append(predicted['std_x'])
                std_q.append(predicted['std_q'])

            progress_bar(1.0 * (i + 1) / n_images, 30, text='Localising')
        print('')
    positions = np.array(positions)