def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): """Save distance to goal at all time steps. Args: outputs : [gt_dist_to_goal]. global_step : number of iterations. output_dir : output directory. metric_summary : to append scalars to summary. N : number of outputs to process. """ d_at_t = np.concatenate(map(lambda x: x[0][:, :, 0] * 1, outputs), axis=0) fig, axes = utils.subplot(plt, (1, 1), (5, 5)) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) return None
def load_variables(pickle_file_name): if fu.exists(pickle_file_name): with fu.fopen(pickle_file_name, 'r') as f: d = cPickle.load(f) return d else: raise Exception('{:s} does not exists.'.format(pickle_file_name))
def _vis_readout_maps(outputs, global_step, output_dir, metric_summary, N): # outputs is [gt_map, pred_map]: if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('jet') fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*2), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): gt_map, pred_map = outputs[i] for j in [0]: for k in range(gt_map.shape[4]): # Display something like the midpoint of the trajectory. id = np.int(gt_map.shape[1]/2) ax = axes.pop(); ax.imshow(gt_map[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('gt_map') ax = axes.pop(); ax.imshow(pred_map[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('pred_map') file_name = os.path.join(output_dir, 'readout_map_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig)
def save_variables(pickle_file_name, var, info, overwrite = False): if fu.exists(pickle_file_name) and overwrite == False: raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) # Construct the dictionary assert(type(var) == list); assert(type(info) == list); d = {} for i in xrange(len(var)): d[info[i]] = var[i] with fu.fopen(pickle_file_name, 'w') as f: cPickle.dump(d, f, cPickle.HIGHEST_PROTOCOL)
def save_variables(pickle_file_name, var, info, overwrite = False): if fu.exists(pickle_file_name) and overwrite == False: raise Exception('{:s} exists and over write is false.'.format(pickle_file_name)) # Construct the dictionary assert(type(var) == list); assert(type(info) == list); d = {} for i in range(len(var)): d[info[i]] = var[i] with fu.fopen(pickle_file_name, 'wb') as f: pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
def plot_trajectories(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to plot the trajectories in the top view. Args: outputs : [locs, orig_maps, goal_loc]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('gray') fig, axes = utils.subplot(plt, (N, outputs[0][1].shape[0]), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): locs, orig_maps, goal_loc = outputs[i] is_semantic = np.isnan(goal_loc[0,0,1]) for j in range(orig_maps.shape[0]): ax = axes.pop(); ax.plot(locs[j,0,0], locs[j,0,1], 'ys') # Plot one by one, so that they come in different colors. for k in range(goal_loc.shape[1]): if not is_semantic: ax.plot(goal_loc[j,k,0], goal_loc[j,k,1], 's') if False: ax.plot(locs[j,:,0], locs[j,:,1], 'r.', ms=3) ax.imshow(orig_maps[j,0,:,:,0], origin='lower') ax.set_axis_off(); else: ax.scatter(locs[j,:,0], locs[j,:,1], c=np.arange(locs.shape[1]), cmap='jet', s=10, lw=0) ax.imshow(orig_maps[j,0,:,:,0], origin='lower', vmin=-1.0, vmax=2.0) if not is_semantic: xymin = np.minimum(np.min(goal_loc[j,:,:], axis=0), np.min(locs[j,:,:], axis=0)) xymax = np.maximum(np.max(goal_loc[j,:,:], axis=0), np.max(locs[j,:,:], axis=0)) else: xymin = np.min(locs[j,:,:], axis=0) xymax = np.max(locs[j,:,:], axis=0) xy1 = (xymax+xymin)/2. - np.maximum(np.max(xymax-xymin), 12) xy2 = (xymax+xymin)/2. + np.maximum(np.max(xymax-xymin), 12) ax.set_xlim([xy1[0], xy2[0]]) ax.set_ylim([xy1[1], xy2[1]]) ax.set_axis_off() file_name = os.path.join(output_dir, 'trajectory_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig) return None
def save_variables(pickle_file_name, var, info, overwrite=False): if fu.exists(pickle_file_name) and overwrite == False: raise Exception( '{:s} exists and over write is false.'.format(pickle_file_name)) # Construct the dictionary assert (type(var) == list) assert (type(info) == list) for t in info: assert (type(t) == str), 'variable names are not strings' d = {} for i in range(len(var)): d[info[i]] = var[i] with fu.fopen(pickle_file_name, 'wb') as f: cPickle.dump(d, f)
def _vis(outputs, global_step, output_dir, metric_summary, N): # Plot the value map, goal for various maps to see what if the model is # learning anything useful. # # outputs is [values, goals, maps, occupancy, conf]. # if N >= 0: outputs = outputs[:N] N = len(outputs) plt.set_cmap('jet') fig, axes = utils.subplot(plt, (N, outputs[0][0].shape[4]*5), (5,5)) axes = axes.ravel()[::-1].tolist() for i in range(N): values, goals, maps, occupancy, conf = outputs[i] for j in [0]: for k in range(values.shape[4]): # Display something like the midpoint of the trajectory. id = np.int(values.shape[1]/2) ax = axes.pop(); ax.imshow(goals[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('goal') ax = axes.pop(); ax.imshow(occupancy[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('occupancy') ax = axes.pop(); ax.imshow(conf[j,id,:,:,k], origin='lower', interpolation='none', vmin=0., vmax=1.) ax.set_axis_off(); if i == 0: ax.set_title('conf') ax = axes.pop(); ax.imshow(values[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('value') ax = axes.pop(); ax.imshow(maps[j,id,:,:,k], origin='lower', interpolation='none') ax.set_axis_off(); if i == 0: ax.set_title('incr map') file_name = os.path.join(output_dir, 'value_vis_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) plt.close(fig)
def plot_trajectory(dt, hardness, orig_maps, out_dir): out_dir = os.path.join(out_dir, FLAGS.config_name+_get_suffix_str(), FLAGS.imset) fu.makedirs(out_dir) out_file = os.path.join(out_dir, 'all_locs_at_t.pkl') dt['hardness'] = hardness utils.save_variables(out_file, dt.values(), dt.keys(), overwrite=True) #Plot trajectories onto the maps plt.set_cmap('gray') for i in range(4000): goal_loc = dt['all_goal_locs'][i, :, :] locs = np.concatenate((dt['all_locs'][i,:,:], dt['all_locs'][i,:,:]), axis=0) xymin = np.minimum(np.min(goal_loc, axis=0), np.min(locs, axis=0)) xymax = np.maximum(np.max(goal_loc, axis=0), np.max(locs, axis=0)) xy1 = (xymax+xymin)/2. - 1.*np.maximum(np.max(xymax-xymin), 24) xy2 = (xymax+xymin)/2. + 1.*np.maximum(np.max(xymax-xymin), 24) fig, ax = utils.tight_imshow_figure(plt, figsize=(6,6)) ax.set_axis_on() ax.patch.set_facecolor((0.333, 0.333, 0.333)) ax.set_xticks([]) ax.set_yticks([]) all_locs = dt['all_locs'][i,:,:]*1 uniq = np.where(np.any(all_locs[1:,:] != all_locs[:-1,:], axis=1))[0]+1 uniq = np.sort(uniq).tolist() uniq.insert(0,0) uniq = np.array(uniq) all_locs = all_locs[uniq, :] ax.plot(dt['all_locs'][i, 0, 0], dt['all_locs'][i, 0, 1], 'b.', markersize=24) ax.plot(dt['all_goal_locs'][i, 0, 0], dt['all_goal_locs'][i, 0, 1], 'g*', markersize=19) ax.plot(all_locs[:,0], all_locs[:,1], 'r', alpha=0.4, linewidth=2) ax.scatter(all_locs[:,0], all_locs[:,1], c=5+np.arange(all_locs.shape[0])*1./all_locs.shape[0], cmap='Reds', s=30, linewidth=0) ax.imshow(orig_maps, origin='lower', vmin=-1.0, vmax=2.0, aspect='equal') ax.set_xlim([xy1[0], xy2[0]]) ax.set_ylim([xy1[1], xy2[1]]) file_name = os.path.join(out_dir, 'trajectory_{:04d}.png'.format(i)) print(file_name) with fu.fopen(file_name, 'w') as f: plt.savefig(f) plt.close(fig)
def get_meta_data(self, file_name, data_dir=None): if data_dir is None: data_dir = self.get_data_dir() full_file_name = os.path.join(data_dir, 'meta', file_name) assert(fu.exists(full_file_name)), \ '{:s} does not exist'.format(full_file_name) ext = os.path.splitext(full_file_name)[1] if ext == '.txt': ls = [] with fu.fopen(full_file_name, 'r') as f: for l in f: ls.append(l.rstrip()) elif ext == '.pkl': ls = utils.load_variables(full_file_name) return ls
def save_d_at_t(outputs, global_step, output_dir, metric_summary, N): """Save distance to goal at all time steps. Args: outputs : [gt_dist_to_goal]. global_step : number of iterations. output_dir : output directory. metric_summary : to append scalars to summary. N : number of outputs to process. """ d_at_t = np.concatenate(map(lambda x: x[0][:,:,0]*1, outputs), axis=0) fig, axes = utils.subplot(plt, (1,1), (5,5)) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) return None
def eval_dist(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to 1. Plot the distance over time curve. 2. Compute mean and median distances. 3. Plots histogram of end distances. Args: outputs : [locs, goal_loc, gt_dist_to_goal]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ SUCCESS_THRESH = 3 if N >= 0: outputs = outputs[:N] # Plot distance at time t. d_at_t = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_at_t.append(gt_dist_to_goal[:, :, 0] * 1) # Plot the distance. fig, axes = utils.subplot(plt, (1, 1), (5, 5)) d_at_t = np.concatenate(d_at_t, axis=0) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) # Plot the trajectories and the init_distance and final distance. d_inits = [] d_ends = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_inits.append(gt_dist_to_goal[:, 0, 0] * 1) d_ends.append(gt_dist_to_goal[:, -1, 0] * 1) # Plot the distance. fig, axes = utils.subplot(plt, (1, 1), (5, 5)) d_inits = np.concatenate(d_inits, axis=0) d_ends = np.concatenate(d_ends, axis=0) axes.plot(d_inits + np.random.rand(*(d_inits.shape)) - 0.5, d_ends + np.random.rand(*(d_ends.shape)) - 0.5, '.', mec='red', mew=1.0) axes.set_xlabel('init dist') axes.set_ylabel('final dist') axes.grid('on') axes.axis('equal') title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format(np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100 * (np.mean(d_ends <= SUCCESS_THRESH))) axes.set_title(title_str) file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], overwrite=True) plt.close(fig) # Plot the histogram of the end_distance. with plt.style.context('seaborn-white'): d_ends_ = np.sort(d_ends) d_inits_ = np.sort(d_inits) leg = [] fig, ax = utils.subplot(plt, (1, 1), (5, 5)) ax.grid('on') ax.set_xlabel('Distance from goal') ax.xaxis.label.set_fontsize(16) ax.set_ylabel('Fraction of data') ax.yaxis.label.set_fontsize(16) ax.plot(d_ends_, np.arange(d_ends_.size) * 1. / d_ends_.size, 'r') ax.plot(d_inits_, np.arange(d_inits_.size) * 1. / d_inits_.size, 'k') leg.append('Final') leg.append('Init') ax.legend(leg, fontsize='x-large') ax.set_axis_on() title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format(np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100 * (np.mean(d_ends <= SUCCESS_THRESH))) ax.set_title(title_str) file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) # Log distance metrics. tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', 100 * (np.mean(d_inits <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', 100 * (np.mean(d_ends <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', np.percentile(d_inits, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', np.percentile(d_ends, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', np.median(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', np.median(d_ends)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', np.mean(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', np.mean(d_ends)) return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)
def eval_dist(outputs, global_step, output_dir, metric_summary, N): """Processes the collected outputs during validation to 1. Plot the distance over time curve. 2. Compute mean and median distances. 3. Plots histogram of end distances. Args: outputs : [locs, goal_loc, gt_dist_to_goal]. global_step : global_step. output_dir : where to store results. metric_summary : summary object to add summaries to. N : number of outputs to process. """ SUCCESS_THRESH = 3 if N >= 0: outputs = outputs[:N] # Plot distance at time t. d_at_t = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_at_t.append(gt_dist_to_goal[:,:,0]*1) # Plot the distance. fig, axes = utils.subplot(plt, (1,1), (5,5)) d_at_t = np.concatenate(d_at_t, axis=0) axes.plot(np.arange(d_at_t.shape[1]), np.mean(d_at_t, axis=0), 'r.') axes.set_xlabel('time step') axes.set_ylabel('dist to next goal') axes.grid('on') file_name = os.path.join(output_dir, 'dist_at_t_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_at_t_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_at_t], ['d_at_t'], overwrite=True) plt.close(fig) # Plot the trajectories and the init_distance and final distance. d_inits = [] d_ends = [] for i in range(len(outputs)): locs, goal_loc, gt_dist_to_goal = outputs[i] d_inits.append(gt_dist_to_goal[:,0,0]*1) d_ends.append(gt_dist_to_goal[:,-1,0]*1) # Plot the distance. fig, axes = utils.subplot(plt, (1,1), (5,5)) d_inits = np.concatenate(d_inits, axis=0) d_ends = np.concatenate(d_ends, axis=0) axes.plot(d_inits+np.random.rand(*(d_inits.shape))-0.5, d_ends+np.random.rand(*(d_ends.shape))-0.5, '.', mec='red', mew=1.0) axes.set_xlabel('init dist'); axes.set_ylabel('final dist'); axes.grid('on'); axes.axis('equal'); title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format( np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100*(np.mean(d_ends <= SUCCESS_THRESH))) axes.set_title(title_str) file_name = os.path.join(output_dir, 'dist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) file_name = os.path.join(output_dir, 'dist_{:d}.pkl'.format(global_step)) utils.save_variables(file_name, [d_inits, d_ends], ['d_inits', 'd_ends'], overwrite=True) plt.close(fig) # Plot the histogram of the end_distance. with plt.style.context('seaborn-white'): d_ends_ = np.sort(d_ends) d_inits_ = np.sort(d_inits) leg = []; fig, ax = utils.subplot(plt, (1,1), (5,5)) ax.grid('on') ax.set_xlabel('Distance from goal'); ax.xaxis.label.set_fontsize(16); ax.set_ylabel('Fraction of data'); ax.yaxis.label.set_fontsize(16); ax.plot(d_ends_, np.arange(d_ends_.size)*1./d_ends_.size, 'r') ax.plot(d_inits_, np.arange(d_inits_.size)*1./d_inits_.size, 'k') leg.append('Final'); leg.append('Init'); ax.legend(leg, fontsize='x-large'); ax.set_axis_on() title_str = 'mean: {:0.1f}, 50: {:0.1f}, 75: {:0.2f}, s: {:0.1f}' title_str = title_str.format( np.mean(d_ends), np.median(d_ends), np.percentile(d_ends, q=75), 100*(np.mean(d_ends <= SUCCESS_THRESH))) ax.set_title(title_str) file_name = os.path.join(output_dir, 'dist_hist_{:d}.png'.format(global_step)) with fu.fopen(file_name, 'w') as f: fig.savefig(f, bbox_inches='tight', transparent=True, pad_inches=0) # Log distance metrics. tf_utils.add_value_to_summary(metric_summary, 'dists/success_init: ', 100*(np.mean(d_inits <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/success_end: ', 100*(np.mean(d_ends <= SUCCESS_THRESH))) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (75): ', np.percentile(d_inits, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (75): ', np.percentile(d_ends, q=75)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (median): ', np.median(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (median): ', np.median(d_ends)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_init (mean): ', np.mean(d_inits)) tf_utils.add_value_to_summary(metric_summary, 'dists/dist_end (mean): ', np.mean(d_ends)) return np.median(d_inits), np.median(d_ends), np.mean(d_inits), np.mean(d_ends), \ np.percentile(d_inits, q=75), np.percentile(d_ends, q=75), \ 100*(np.mean(d_inits) <= SUCCESS_THRESH), 100*(np.mean(d_ends) <= SUCCESS_THRESH)