def mkdir_if_missing(output_dir): if not fu.exists(output_dir): try: fu.makedirs(output_dir) except: logging.error( "Something went wrong in mkdir_if_missing. " "Probably some other process created the directory already.")
def _launcher(config_name, logdir): args = _setup_args(config_name, logdir) fu.makedirs(args.logdir) if args.control.train: _train(args) if args.control.test: _test(args)
def plot_trajectory(dt, hardness, orig_maps, out_dir): out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), FLAGS.imset) fu.makedirs(out_dir) out_file = os.path.join(out_dir, 'all_locs_at_t.pkl') dt['hardness'] = hardness utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True) #Plot trajectories onto the maps plt.set_cmap('gray') for i in range(4000): goal_loc = dt['all_goal_locs'][i, :, :] locs = np.concatenate((dt['all_locs'][i,:,:], dt['all_locs'][i,:,:]), axis=0) xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24) xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24) fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6)) ax.set_axis_on() ax.patch.set_facecolor((0.333, 0.333, 0.333)) ax.set_xticks([]) ax.set_yticks([]) all_locs = dt['all_locs'][i,:,:]*1 uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1 uniq = np.sort(uniq).tolist() uniq.insert(0,0) uniq = np.array(uniq) all_locs = all_locs[uniq, :] ax.plot(dt['all_locs'][i, 0, 0], dt['all_locs'][i, 0, 1], 'b.', markersize=24) ax.plot(dt['all_goal_locs'][i, 0, 0], dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19) ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2) ax.scatter(all_locs[:,0], all_locs[:,1], c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0], cmap='Reds', s=30, linewidth=0) ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal') ax.set_xlim([xy1[0], xy2[0]]) ax.set_ylim([xy1[1], xy2[1]]) file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i)) print(file_name) with fu.fopen(file_name, 'w') as f: plt.savefig(f) plt.close(fig)
def mkdir_if_missing(output_dir): if not fu.exists(output_dir): fu.makedirs(output_dir)
def plot_trajectory_first_person(dt, orig_maps, out_dir): out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), FLAGS.imset) fu.makedirs(out_dir) # Load the model so that we can render. plt.set_cmap('gray') samples_per_action = 8; wait_at_action = 0; Writer = animation.writers['mencoder'] writer = Writer(fps=3*(samples_per_action+wait_at_action), metadata=dict(artist='anonymous'), bitrate=1800) args = sna.get_args_for_config(FLAGS.config_name + '+bench_'+FLAGS.imset) args.navtask.logdir = None navtask_ = copy.deepcopy(args.navtask) navtask_.camera_param.modalities = ['rgb'] navtask_.task_params.modalities = ['rgb'] sz = 512 navtask_.camera_param.height = sz navtask_.camera_param.width = sz navtask_.task_params.img_height = sz navtask_.task_params.img_width = sz R = lambda: nav_env.get_multiplexer_class(navtask_, 0) R = R() b = R.buildings[0] f = [0 for _ in range(wait_at_action)] + \ [float(_)/samples_per_action for _ in range(samples_per_action)]; # Generate things for it to render. inds_to_do = [] inds_to_do += [1, 4, 10] #1291, 1268, 1273, 1289, 1302, 1426, 1413, 1449, 1399, 1390] for i in inds_to_do: fig = plt.figure(figsize=(10,8)) gs = GridSpec(3,4) gs.update(wspace=0.05, hspace=0.05, left=0.0, top=0.97, right=1.0, bottom=0.) ax = fig.add_subplot(gs[:,:-1]) ax1 = fig.add_subplot(gs[0,-1]) ax2 = fig.add_subplot(gs[1,-1]) ax3 = fig.add_subplot(gs[2,-1]) axes = [ax, ax1, ax2, ax3] # ax = fig.add_subplot(gs[:,:]) # axes = [ax] for ax in axes: ax.set_axis_off() node_ids = dt['all_node_ids'][i, :, 0]*1 # Prune so that last node is not repeated more than 3 times? if np.all(node_ids[-4:] == node_ids[-1]): while node_ids[-4] == node_ids[-1]: node_ids = node_ids[:-1] num_steps = np.minimum(FLAGS.num_steps, len(node_ids)) xyt = b.to_actual_xyt_vec(b.task.nodes[node_ids]) xyt_diff = xyt[1:,:] - xyt[:-1:,:] xyt_diff[:,2] = np.mod(xyt_diff[:,2], 4) ind = np.where(xyt_diff[:,2] == 3)[0] xyt_diff[ind, 2] = -1 xyt_diff = np.expand_dims(xyt_diff, axis=1) to_cat = [xyt_diff*_ for _ in f] perturbs_all = np.concatenate(to_cat, axis=1) perturbs_all = np.concatenate([perturbs_all, np.zeros_like(perturbs_all[:,:,:1])], axis=2) node_ids_all = np.expand_dims(node_ids, axis=1)*1 node_ids_all = np.concatenate([node_ids_all for _ in f], axis=1) node_ids_all = np.reshape(node_ids_all[:-1,:], -1) perturbs_all = np.reshape(perturbs_all, [-1, 4]) imgs = b.render_nodes(b.task.nodes[node_ids_all,:], perturb=perturbs_all) # Get action at each node. actions = [] _, action_to_nodes = b.get_feasible_actions(node_ids) for j in range(num_steps-1): action_to_node = action_to_nodes[j] node_to_action = dict(zip(action_to_node.values(), action_to_node.keys())) actions.append(node_to_action[node_ids[j+1]]) def init_fn(): return fig, gt_dist_to_goal = [] # Render trajectories. def worker(j): # Plot the image. step_number = j/(samples_per_action + wait_at_action) img = imgs[j]; ax = axes[0]; ax.clear(); ax.set_axis_off(); img = img.astype(np.uint8); ax.imshow(img); tt = ax.set_title( "First Person View\n" + "Top corners show diagnostics (distance, agents' action) not input to agent.", fontsize=12) plt.setp(tt, color='white') # Distance to goal. t = 'Dist to Goal:\n{:2d} steps'.format(int(dt['all_d_at_t'][i, step_number])) t = ax.text(0.01, 0.99, t, horizontalalignment='left', verticalalignment='top', fontsize=20, color='red', transform=ax.transAxes, alpha=1.0) t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) # Action to take. action_latex = ['$\odot$ ', '$\curvearrowright$ ', '$\curvearrowleft$ ', r'$\Uparrow$ '] t = ax.text(0.99, 0.99, action_latex[actions[step_number]], horizontalalignment='right', verticalalignment='top', fontsize=40, color='green', transform=ax.transAxes, alpha=1.0) t.set_bbox(dict(color='white', alpha=0.85, pad=-0.1)) # Plot the map top view. ax = axes[-1] if j == 0: # Plot the map locs = dt['all_locs'][i,:num_steps,:] goal_loc = dt['all_goal_locs'][i,:,:] xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) xy1 = (xymax+xymin)/2. - 0.7*np.maximum(np.max(xymax-xymin), 24) xy2 = (xymax+xymin)/2. + 0.7*np.maximum(np.max(xymax-xymin), 24) ax.set_axis_on() ax.patch.set_facecolor((0.333, 0.333, 0.333)) ax.set_xticks([]); ax.set_yticks([]); ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0) ax.plot(goal_loc[:,0], goal_loc[:,1], 'g*', markersize=12) locs = dt['all_locs'][i,:1,:] ax.plot(locs[:,0], locs[:,1], 'b.', markersize=12) ax.set_xlim([xy1[0], xy2[0]]) ax.set_ylim([xy1[1], xy2[1]]) locs = dt['all_locs'][i,step_number,:] locs = np.expand_dims(locs, axis=0) ax.plot(locs[:,0], locs[:,1], 'r.', alpha=1.0, linewidth=0, markersize=4) tt = ax.set_title('Trajectory in topview', fontsize=14) plt.setp(tt, color='white') return fig, line_ani = animation.FuncAnimation(fig, worker, (num_steps-1)*(wait_at_action+samples_per_action), interval=500, blit=True, init_func=init_fn) tmp_file_name = 'tmp.mp4' line_ani.save(tmp_file_name, writer=writer, savefig_kwargs={'facecolor':'black'}) out_file_name = os.path.join(out_dir, 'vis_{:04d}.mp4'.format(i)) print(out_file_name) if fu.exists(out_file_name): gfile.Remove(out_file_name) gfile.Copy(tmp_file_name, out_file_name) gfile.Remove(tmp_file_name) plt.close(fig)
def main(_): args = config_distill.get_args_for_config(FLAGS.config_name) args.logdir = FLAGS.logdir args.solver.num_workers = FLAGS.num_workers args.solver.task = FLAGS.task args.solver.ps_tasks = FLAGS.ps_tasks args.solver.master = FLAGS.master args.buildinger.env_class = nav_env.MeshMapper fu.makedirs(args.logdir) args.buildinger.logdir = args.logdir R = nav_env.get_multiplexor_class(args.buildinger, args.solver.task) if False: pr = cProfile.Profile() pr.enable() rng = np.random.RandomState(0) for i in range(1): b, instances_perturbs = R.sample_building(rng) inputs = b.worker(*(instances_perturbs)) for j in range(inputs['imgs'].shape[0]): p = os.path.join('tmp', '{:d}.png'.format(j)) img = inputs['imgs'][j, 0, :, :, :3] * 1 img = (img).astype(np.uint8) fu.write_image(p, img) print(inputs['imgs'].shape) inputs = R.pre(inputs) pr.disable() pr.print_stats(2) if args.control.train: if not gfile.Exists(args.logdir): gfile.MakeDirs(args.logdir) m = utils.Foo() m.tf_graph = tf.Graph() config = tf.ConfigProto() config.device_count['GPU'] = 1 config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.8 with m.tf_graph.as_default(): with tf.device(tf.train.replica_device_setter( args.solver.ps_tasks)): m = distill.setup_to_run(m, args, is_training=True, batch_norm_is_training=True) train_step_kwargs = distill.setup_train_step_kwargs_mesh( m, R, os.path.join(args.logdir, 'train_bkp'), rng_seed=args.solver.task, is_chief=args.solver.task == 0, iters=1, train_display_interval=args.summary.display_interval) final_loss = slim.learning.train( train_op=m.train_op, logdir=args.logdir, master=args.solver.master, is_chief=args.solver.task == 0, number_of_steps=args.solver.max_steps, train_step_fn=tf_utils.train_step_custom, train_step_kwargs=train_step_kwargs, global_step=m.global_step_op, init_op=m.init_op, init_fn=m.init_fn, sync_optimizer=m.sync_optimizer, saver=m.saver_op, summary_op=None, session_config=config) if args.control.test: m = utils.Foo() m.tf_graph = tf.Graph() checkpoint_dir = os.path.join(format(args.logdir)) with m.tf_graph.as_default(): m = distill.setup_to_run(m, args, is_training=False, batch_norm_is_training=args.control. force_batchnorm_is_training_at_test) train_step_kwargs = distill.setup_train_step_kwargs_mesh( m, R, os.path.join(args.logdir, args.control.test_name), rng_seed=args.solver.task + 1, is_chief=args.solver.task == 0, iters=args.summary.test_iters, train_display_interval=None) sv = slim.learning.supervisor.Supervisor( graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) last_checkpoint = None while True: last_checkpoint = slim.evaluation.wait_for_new_checkpoint( checkpoint_dir, last_checkpoint) checkpoint_iter = int( os.path.basename(last_checkpoint).split('-')[1]) start = time.time() logging.info( 'Starting evaluation at %s using checkpoint %s.', time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), last_checkpoint) config = tf.ConfigProto() config.device_count['GPU'] = 1 config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.8 with sv.managed_session(args.solver.master, config=config, start_standard_services=False) as sess: sess.run(m.init_op) sv.saver.restore(sess, last_checkpoint) sv.start_queue_runners(sess) vals, _ = tf_utils.train_step_custom(sess, None, m.global_step_op, train_step_kwargs, mode='val') if checkpoint_iter >= args.solver.max_steps: break
def main(_): args = config_distill.get_args_for_config(FLAGS.config_name) args.logdir = FLAGS.logdir args.solver.num_workers = FLAGS.num_workers args.solver.task = FLAGS.task args.solver.ps_tasks = FLAGS.ps_tasks args.solver.master = FLAGS.master args.buildinger.env_class = nav_env.MeshMapper fu.makedirs(args.logdir) args.buildinger.logdir = args.logdir R = nav_env.get_multiplexor_class(args.buildinger, args.solver.task) if False: pr = cProfile.Profile() pr.enable() rng = np.random.RandomState(0) for i in range(1): b, instances_perturbs = R.sample_building(rng) inputs = b.worker(*(instances_perturbs)) for j in range(inputs['imgs'].shape[0]): p = os.path.join('tmp', '{:d}.png'.format(j)) img = inputs['imgs'][j,0,:,:,:3]*1 img = (img).astype(np.uint8) fu.write_image(p, img) print(inputs['imgs'].shape) inputs = R.pre(inputs) pr.disable() pr.print_stats(2) if args.control.train: if not gfile.Exists(args.logdir): gfile.MakeDirs(args.logdir) m = utils.Foo() m.tf_graph = tf.Graph() config = tf.ConfigProto() config.device_count['GPU'] = 1 config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.8 with m.tf_graph.as_default(): with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks)): m = distill.setup_to_run(m, args, is_training=True, batch_norm_is_training=True) train_step_kwargs = distill.setup_train_step_kwargs_mesh( m, R, os.path.join(args.logdir, 'train'), rng_seed=args.solver.task, is_chief=args.solver.task==0, iters=1, train_display_interval=args.summary.display_interval) final_loss = slim.learning.train( train_op=m.train_op, logdir=args.logdir, master=args.solver.master, is_chief=args.solver.task == 0, number_of_steps=args.solver.max_steps, train_step_fn=tf_utils.train_step_custom, train_step_kwargs=train_step_kwargs, global_step=m.global_step_op, init_op=m.init_op, init_fn=m.init_fn, sync_optimizer=m.sync_optimizer, saver=m.saver_op, summary_op=None, session_config=config) if args.control.test: m = utils.Foo() m.tf_graph = tf.Graph() checkpoint_dir = os.path.join(format(args.logdir)) with m.tf_graph.as_default(): m = distill.setup_to_run(m, args, is_training=False, batch_norm_is_training=args.control.force_batchnorm_is_training_at_test) train_step_kwargs = distill.setup_train_step_kwargs_mesh( m, R, os.path.join(args.logdir, args.control.test_name), rng_seed=args.solver.task+1, is_chief=args.solver.task==0, iters=args.summary.test_iters, train_display_interval=None) sv = slim.learning.supervisor.Supervisor( graph=ops.get_default_graph(), logdir=None, init_op=m.init_op, summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op) last_checkpoint = None while True: last_checkpoint = slim.evaluation.wait_for_new_checkpoint(checkpoint_dir, last_checkpoint) checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1]) start = time.time() logging.info('Starting evaluation at %s using checkpoint %s.', time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()), last_checkpoint) config = tf.ConfigProto() config.device_count['GPU'] = 1 config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.8 with sv.managed_session(args.solver.master,config=config, start_standard_services=False) as sess: sess.run(m.init_op) sv.saver.restore(sess, last_checkpoint) sv.start_queue_runners(sess) vals, _ = tf_utils.train_step_custom( sess, None, m.global_step_op, train_step_kwargs, mode='val') if checkpoint_iter >= args.solver.max_steps: break