示例#1
0
        image = read_image(imgs[i], normalise=True)
        image = resize_image(centre_crop(image), args.size)

        # Compute the saliency map
        grad = localiser.saliency(image)
        grad = median(grad, disk(3))
        if args.scale:
            grad = np.clip(1.0 * grad / args.scale, 0, 1)
        else:
            grad = 1.0 * grad / np.max(grad)

        if args.red:
            grad = np.stack([
                np.ones(grad.shape),
                np.zeros(grad.shape),
                np.zeros(grad.shape), grad
            ],
                            axis=2)

        if args.output:
            fname = os.path.join(args.output, os.path.basename(imgs[i]))
            scipy.misc.toimage(grad, cmin=0, cmax=1).save(fname)
            progress_bar(1.0 * (i + 1) / n_images, 30, text='Computing')
        else:
            # Display image
            plt.imshow(grad, cmap='gray')
            plt.show()

    if n_images > 1:
        print('')
示例#2
0
                [train_loss]+train_summaries, feed_dict={x: train_images_feed, y: train_labels_feed})
            for res in results[1:]:
                summary_writer.add_summary(res, i)
            if args.verbose:
                print('{} (training): Loss = {:.6f}'.format(i, results[0]))

        if args.validate and (i % n_disp_validation == 0):
            val_images_feed, val_labels_feed = validation_reader.next_batch()
            results = sess.run(
                [validation_loss]+validation_summaries, feed_dict={x: val_images_feed, y: val_labels_feed})
            for res in results[1:]:
                summary_writer.add_summary(res, i)
            if args.verbose:
                print('{} (validation): Loss = {:.6f}'.format(i, results[0]))

        if not args.verbose:
            progress_bar(1.0*(i+1-args.iter_start)/args.n_iters, 30, text='Training', 
                         epilog='iter {}'.format(i))
        
        if i+1 in args.save_iters:
            save_path = os.path.join(args.save_dir, args.name + '_iter{}.ckpt'.format(i+1))
            saver.save(sess, save_path)

    print('')
    
    # Save the model
    save_path = os.path.join(args.save_dir, args.name + '.ckpt')
    saver.save(sess, save_path)
    print("Final model saved in file: %s" % save_path)

示例#3
0
    with Localiser(args.model, uncertainty=args.uncertainty) as localiser:
        for i in range(n_images):
            images_feed, labels_feed = test_reader.next_batch()

            # Make prediction
            predicted = localiser.localise(images_feed)
            positions = np.concatenate(
                (positions, np.asarray([predicted['x']])))
            orientations = np.concatenate(
                (orientations, np.asarray([predicted['q']])))
            if args.uncertainty:
                std_x.append(predicted['std_x'])
                std_q.append(predicted['std_q'])

            progress_bar(1.0 * (i + 1) / n_images, 30, text='Localising')
        print('')
else:
    positions = positions_gt
    orientations = orientations_gt


def draw_segment(start, end, color='black', lw=1):
    plt.plot([start[0], end[0]], [start[1], end[1]], [start[2], end[2]],
             linestyle='-',
             color=color,
             lw=lw)


def draw_sphere(pos, r, color='black', lw=1):
    u = np.linspace(0, 2 * np.pi, 100)