def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): """Save distance to goal at all time steps. Args: outputs : [gt_dist_to_goal]. global_step : number of iterations. output_dir : output directory. metric_summary : to append scalars to summary. N : number of outputs to process. """ d_at_t = np.concatenate(map(lambda x: x[0][:, :, 0] * 1, outputs), axis=0) fig, axes = utils.subplot(plt, (1, 1), (5, 5)) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) return None
def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N): # outputs is [gt_map, pred_map]: if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('jet') fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): gt_map, pred_map = outputs[i] for j in [0]: for k in range(gt_map.shape[4]): # Display something like the midpoint of the trajectory. id = np.int(gt_map.shape[1]/2) ax = axes.pop(); ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('gt_map') ax = axes.pop(); ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('pred_map') file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig)
def plot_trajectories(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to plot the trajectories in the top view. Args: outputs : [locs, orig_maps, goal_loc]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('gray') fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): locs, orig_maps, goal_loc = outputs[i] is_semantic = np.isnan(goal_loc[0,0,1]) for j in range(orig_maps.shape[0]): ax = axes.pop(); ax.plot(locs[j,0,0], locs[j,0,1], 'ys') # Plot one by one, so that they come in different colors. for k in range(goal_loc.shape[1]): if not is_semantic: ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's') if False: ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3) ax.imshow(orig_maps[j,0,:,:,0], origin='lower') ax.set_axis_off(); else: ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]), cmap='jet', s=10, lw=0) ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0) if not is_semantic: xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0)) xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0)) else: xymin = np.min(locs[j,:,:], axis=0) xymax = np.max(locs[j,:,:], axis=0) xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12) xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12) ax.set_xlim([xy1[0], xy2[0]]) ax.set_ylim([xy1[1], xy2[1]]) ax.set_axis_off() file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig) return None
def _vis(outputs, global_step, output_dir, metric_summary, N): # Plot the value map, goal for various maps to see what if the model is # learning anything useful. # # outputs is [values, goals, maps, occupancy, conf]. # if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('jet') fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): values, goals, maps, occupancy, conf = outputs[i] for j in [0]: for k in range(values.shape[4]): # Display something like the midpoint of the trajectory. id = np.int(values.shape[1]/2) ax = axes.pop(); ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('goal') ax = axes.pop(); ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('occupancy') ax = axes.pop(); ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('conf') ax = axes.pop(); ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('value') ax = axes.pop(); ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('incr map') file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig)
def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): """Save distance to goal at all time steps. Args: outputs : [gt_dist_to_goal]. global_step : number of iterations. output_dir : output directory. metric_summary : to append scalars to summary. N : number of outputs to process. """ d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0) fig, axes = utils.subplot(plt, (1,1), (5,5)) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) return None
def eval_dist(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to 1. Plot the distance over time curve. 2. Compute mean and median distances. 3. Plots histogram of end distances. Args: outputs : [locs, goal_loc, gt_dist_to_goal]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ SUCCESS_THRESH = 3 if N >= 0: outputs = outputs[:N] # Plot distance at time t. d_at_t = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_at_t.append(gt_dist_to_goal[:, :, 0] * 1) # Plot the distance. fig, axes = utils.subplot(plt, (1, 1), (5, 5)) d_at_t = np.concatenate(d_at_t, axis=0) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) # Plot the trajectories and the init_distance and final distance. d_inits = [] d_ends = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_inits.append(gt_dist_to_goal[:, 0, 0] * 1) d_ends.append(gt_dist_to_goal[:, -1, 0] * 1) # Plot the distance. fig, axes = utils.subplot(plt, (1, 1), (5, 5)) d_inits = np.concatenate(d_inits, axis=0) d_ends = np.concatenate(d_ends, axis=0) axes.plot(d_inits + np.random.rand(*(d_inits.shape)) - 0.5, d_ends + np.random.rand(*(d_ends.shape)) - 0.5, '.', mec='red', mew=1.0) axes.set_xlabel('init dist') axes.set_ylabel('final dist') axes.grid('on') axes.axis('equal') title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format(np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100 * (np.mean(d_ends <= SUCCESS_THRESH))) axes.set_title(title_str) file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], overwrite=True) plt.close(fig) # Plot the histogram of the end_distance. with plt.style.context('seaborn-white'): d_ends_ = np.sort(d_ends) d_inits_ = np.sort(d_inits) leg = [] fig, ax = utils.subplot(plt, (1, 1), (5, 5)) ax.grid('on') ax.set_xlabel('Distance from goal') ax.xaxis.label.set_fontsize(16) ax.set_ylabel('Fraction of data') ax.yaxis.label.set_fontsize(16) ax.plot(d_ends_, np.arange(d_ends_.size) * 1. / d_ends_.size, 'r') ax.plot(d_inits_, np.arange(d_inits_.size) * 1. / d_inits_.size, 'k') leg.append('Final') leg.append('Init') ax.legend(leg, fontsize='x-large') ax.set_axis_on() title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format(np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100 * (np.mean(d_ends <= SUCCESS_THRESH))) ax.set_title(title_str) file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) # Log distance metrics. tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', 100 * (np.mean(d_inits <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', 100 * (np.mean(d_ends <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', np.percentile(d_inits, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', np.percentile(d_ends, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', np.median(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', np.median(d_ends)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', np.mean(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', np.mean(d_ends)) return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)
def eval_dist(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to 1. Plot the distance over time curve. 2. Compute mean and median distances. 3. Plots histogram of end distances. Args: outputs : [locs, goal_loc, gt_dist_to_goal]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ SUCCESS_THRESH = 3 if N >= 0: outputs = outputs[:N] # Plot distance at time t. d_at_t = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_at_t.append(gt_dist_to_goal[:,:,0]*1) # Plot the distance. fig, axes = utils.subplot(plt, (1,1), (5,5)) d_at_t = np.concatenate(d_at_t, axis=0) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) # Plot the trajectories and the init_distance and final distance. d_inits = [] d_ends = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_inits.append(gt_dist_to_goal[:,0,0]*1) d_ends.append(gt_dist_to_goal[:,-1,0]*1) # Plot the distance. fig, axes = utils.subplot(plt, (1,1), (5,5)) d_inits = np.concatenate(d_inits, axis=0) d_ends = np.concatenate(d_ends, axis=0) axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5, d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0) axes.set_xlabel('init dist'); axes.set_ylabel('final dist'); axes.grid('on'); axes.axis('equal'); title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format( np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100*(np.mean(d_ends <= SUCCESS_THRESH))) axes.set_title(title_str) file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], overwrite=True) plt.close(fig) # Plot the histogram of the end_distance. with plt.style.context('seaborn-white'): d_ends_ = np.sort(d_ends) d_inits_ = np.sort(d_inits) leg = []; fig, ax = utils.subplot(plt, (1,1), (5,5)) ax.grid('on') ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16); ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16); ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r') ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k') leg.append('Final'); leg.append('Init'); ax.legend(leg, fontsize='x-large'); ax.set_axis_on() title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format( np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100*(np.mean(d_ends <= SUCCESS_THRESH))) ax.set_title(title_str) file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) # Log distance metrics. tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', 100*(np.mean(d_inits <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', 100*(np.mean(d_ends <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', np.percentile(d_inits, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', np.percentile(d_ends, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', np.median(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', np.median(d_ends)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', np.mean(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', np.mean(d_ends)) return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)