ax.axhline(0, color='grey', ls='dashed') #ax.set_title('q{}'.format(i)) def plot_position(ax, i): ax.plot(positions_gt[:, i], color='grey') ax.plot(positions_test[:, i]) #ax.yaxis.set_ticks(np.arange(min_pos, max_pos, 1)) ax.set_ylim( min( 0, math.floor(min(min(positions_gt[:, i]), min(positions_test[:, i]))) - 1)) _, labels_gt = read_label_file(def_file, convert_to_axis=True) _, labels_test = read_label_file(test_file, convert_to_axis=False) positions_gt = np.array(map(lambda x: x[0:3], labels_gt)) positions_test = np.array(map(lambda x: x[0:3], labels_test)) orientations_gt = np.array(map(lambda x: x[3:], labels_gt)) orientations_test = np.array(map(lambda x: x[3:], labels_test)) if orientations_gt.shape[1] == 3: pass elif flip == 'auto': for i, x in enumerate(orientations_gt): if sum(abs(-x - orientations_test[i])) < sum( abs(x - orientations_test[i])): orientations_gt[i] = -x
parser.add_argument('--arrow_len', action='store', type=float, required=False, default=1) parser.add_argument('--connect', action='store_true', help='''Connect consecutive camera positions with lines''') parser.add_argument('--rings', action='store', nargs='*', required=False) parser.add_argument('--plot_gt', action='store_true') parser.add_argument('--plot_diff', action='store_true') parser.add_argument('-u', '--uncertainty', action='store_true') args = parser.parse_args() if not args.model or args.plot_gt or args.plot_diff: _, labels = read_label_file(args.dataset) positions_gt = np.array([l[0:3] for l in labels]) orientations_gt = np.array([l[3:7] for l in labels]) if args.model: input_size = 256 test_reader = ImageReader(args.dataset, batch_size=1, image_size=[input_size, input_size], randomise=False) n_images = test_reader.total_images() positions = np.empty([0, 3]) orientations = np.empty([0, 4]) if args.uncertainty: std_x = []
plt.plot(pos[0] + radius * np.cos(t), pos[1] + radius * np.sin(t), color, marker='.', lw=0, ms=10) def_file = 'results_new/david_6a/test2_localised.txt' david = True side = True extrapolation = False radius = 3.5 #*np.sin(1.2) lim = 4.5 _, labels = read_label_file(def_file, convert_to_axis=False) positions = np.array([l[0:3] for l in labels]) orientations = np.array([l[3:] for l in labels]) if side and david: positions_2d = [(np.sqrt(p[0] * p[0] + p[1] * p[1]), p[2]) for p in positions] orientations_2d = [(-1, 0) for o in orientations] elif side: positions_2d = [((p[0] + p[1]) / np.sqrt(2), p[2]) for p in positions] orientations_2d = [((o[0] + o[1]) / np.sqrt(2), o[2]) for o in orientations] else: positions_2d = [(p[0], p[1]) for p in positions] orientations_2d = [(o[0], o[1]) for o in orientations]
from posenet.utils import progress_bar parser = argparse.ArgumentParser() parser.add_argument('-m', '--model', action='store', required=True, help='''Path to a trained Tensorflow model (.ckpt file)''') parser.add_argument('-d', '--dataset', action='store', required=True, help='''Path to a text file listing images and camera poses''') parser.add_argument('-o', '--output', action='store', required=False) args = parser.parse_args() if os.path.isdir(args.dataset): imgs = glob.glob('{}/*.png'.format(args.dataset)) elif args.dataset.endswith('.txt'): imgs, _ = read_label_file(args.dataset, full_paths=True) elif args.dataset.endswith('.png'): imgs = [args.dataset] from matplotlib import pyplot as plt else: imgs = [] n_images = len(imgs) if n_images > 1 and not args.output: print('--output argument required') sys.exit(1) if n_images == 0: print('No images found') sys.exit(1)
required=True, help='''Path to a text file listing images and camera poses''') parser.add_argument('-o', '--output', action='store', required=False) parser.add_argument('-u', '--uncertainty', action='store_true') args = parser.parse_args() input_size = 224 test_reader = ImageReader(args.dataset, batch_size=1, image_size=[input_size, input_size], random_crop=False, randomise=False) n_images = test_reader.total_images() # Read the definition file to get file names paths, _ = read_label_file(args.dataset) paths = map(lambda x: os.path.basename(x), paths) # Localise with Localiser(input_size, args.model, uncertainty=args.uncertainty) as localiser: if args.output: f = open(args.output, 'w') f.write('\n\n\n') # Hack for now for i in range(n_images): images_feed, labels_feed = test_reader.next_batch() # Make prediction predicted = localiser.localise(images_feed) x = [round(v, 6) for v in predicted['x']]
if args.agg: matplotlib.use('Agg') import matplotlib.colors as colors import matplotlib.cm as cmx from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt from posenet.core.image_reader import ImageReader, read_label_file from posenet.core.localiser import Localiser from posenet.utils import progress_bar from posenet.utils import rotate_by_quaternion output_type = 'axis' if args.axis else 'quat' if not args.model or args.plot_gt or args.plot_diff: _, labels = read_label_file(args.dataset, convert_to_axis=args.convert_to_axis) positions_gt = np.array([l[0:3] for l in labels]) orientations_gt = np.array([l[3:] for l in labels]) if args.model: input_size = [256, 455] crop_size = [256, 256] test_reader = ImageReader(args.dataset, batch_size=1, image_size=input_size, crop_size=crop_size, centre_crop=True, randomise=False) n_images = test_reader.total_images() positions = []