def show(self, filename=None, dpi=100): """ Show a figure. """ if self._logging_dir is None: vis.show() else: filename = os.path.join(self._policy_dir, filename) vis.savefig(filename, dpi=dpi)
def _visualize_affordance_map(self, preds, depth_im, scale, plot_max=True, output_dir=None): """Visualize an affordance map of the network predictions overlayed on the depth image.""" self._logger.info('Visualizing affordance map...') affordance_map = preds[0, ..., 0] tf_depth_im = depth_im.crop( depth_im.shape[0] - self._gqcnn_recep_h, depth_im.shape[1] - self._gqcnn_recep_w).resize(1.0 / self._gqcnn_stride) # plot vis.figure() vis.imshow(tf_depth_im) plt.imshow(affordance_map, cmap=plt.cm.RdYlGn, alpha=0.3, vmin=0.0, vmax=1.0) if plot_max: affordance_argmax = np.unravel_index(np.argmax(affordance_map), affordance_map.shape) plt.scatter(affordance_argmax[1], affordance_argmax[0], c='black', marker='.', s=scale * 25) vis.title('Grasp Affordance Map') if output_dir is not None: vis.savefig(os.path.join(output_dir, 'grasp_affordance_map.png')) else: vis.show()
def visualization(self, action, rgbd_im, offset, scale): # Vis final grasp. if self.policy_config["vis"]["final_grasp"]: vis.figure(size=(10, 10)) vis.imshow(rgbd_im.depth, vmin=self.policy_config["vis"]["vmin"], vmax=self.policy_config["vis"]["vmax"]) vis.grasp(action.grasp, scale=2.5, show_center=True, show_axis=True) vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format( action.grasp.depth * scale + offset, action.q_value)) vis.savefig('test_dataset/grasp.png')
def _visualize_2d(self, actions, preds, wrapped_depth_im, num_actions, scale, show_axis, output_dir=None): """Visualize the actions in 2D.""" self._logger.info("Visualizing actions in 2d...") # Plot actions in 2D. vis.figure() vis.imshow(wrapped_depth_im) for i in range(num_actions): vis.grasp(actions[i].grasp, scale=scale, show_axis=show_axis, color=plt.cm.RdYlGn(actions[i].q_value)) vis.title("Top {} Grasps".format(num_actions)) if output_dir is not None: vis.savefig(os.path.join(output_dir, "top_grasps.png")) else: vis.show()
policy = CrossEntropyRobustGraspingPolicy(policy_config) else: raise ValueError('Invalid policy type: {}'.format(policy_type)) pathDataset = "/home/lvianell/Desktop/Lorenzo_report/datasets/dexnet_maskcnn_human/" #creo dataset: pngCounter = 0 #len(glob.glob1(pathDataset,"*.txt")) # save depth image in /home/lvianell/Desktop/gqcnn/data/dataset_human_demostration vis.figure(size=(10, 10)) vis.imshow(rgbd_im.depth) #, #vmin=policy_config['vis']['vmin'], #vmax=policy_config['vis']['vmax']) #vis.savefig(pathDataset+"depth_im_"+str(pngCounter)) vis.savefig(path_depth_png) vis.title("DepthImage") vis.show() print path_grasp_position text_file = open( path_grasp_position, "w") #open(pathDataset+"grasps_"+str(pngCounter)+".txt", "w") text_file.write("[center_x, center_y]" + "\t" + "angle" + "\t" + "depth" + "\t" + "width" + "\t" + "Qvalue" + "\t" + "human_evaluation" + "\n") flag_human = "y" #raw_input("Do you want to use human contribute? [y/N]") # query policy policy_start = time.time()
def _plot(self, model_dir, model_output_dir, train_result, val_result): """Plot analysis curves.""" self.logger.info("Plotting") _, model_name = os.path.split(model_output_dir) # Set params. colors = ["g", "b", "c", "y", "m", "r"] styles = ["-", "--", "-.", ":", "-"] # PR, ROC. vis2d.clf() train_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label="TRAIN") val_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label="VAL") vis2d.title("Precision Recall Curves", fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc="best") figname = os.path.join(model_output_dir, "precision_recall.png") vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() train_result.roc_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label="TRAIN") val_result.roc_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label="VAL") vis2d.title("Reciever Operating Characteristic", fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc="best") figname = os.path.join(model_output_dir, "roc.png") vis2d.savefig(figname, dpi=self.dpi) # Plot histogram of prediction errors. num_bins = min(self.num_bins, train_result.num_datapoints) # Train positives. pos_ind = np.where(train_result.labels == 1)[0] diffs = np.abs(train_result.labels[pos_ind] - train_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Positive Training Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "pos_train_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Train negatives. neg_ind = np.where(train_result.labels == 0)[0] diffs = np.abs(train_result.labels[neg_ind] - train_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Negative Training Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "neg_train_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Histogram of validation errors. num_bins = min(self.num_bins, val_result.num_datapoints) # Val positives. pos_ind = np.where(val_result.labels == 1)[0] diffs = np.abs(val_result.labels[pos_ind] - val_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Positive Validation Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "pos_val_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Val negatives. neg_ind = np.where(val_result.labels == 0)[0] diffs = np.abs(val_result.labels[neg_ind] - val_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0, 1), normalized=False, plot=True) vis2d.title("Error on Negative Validation Examples", fontsize=self.font_size) vis2d.xlabel("Abs Prediction Error", fontsize=self.font_size) vis2d.ylabel("Count", fontsize=self.font_size) figname = os.path.join(model_output_dir, "neg_val_errors_histogram.png") vis2d.savefig(figname, dpi=self.dpi) # Losses. try: train_errors_filename = os.path.join(model_dir, GQCNNFilenames.TRAIN_ERRORS) val_errors_filename = os.path.join(model_dir, GQCNNFilenames.VAL_ERRORS) val_iters_filename = os.path.join(model_dir, GQCNNFilenames.VAL_ITERS) pct_pos_val_filename = os.path.join(model_dir, GQCNNFilenames.PCT_POS_VAL) train_losses_filename = os.path.join(model_dir, GQCNNFilenames.TRAIN_LOSSES) raw_train_errors = np.load(train_errors_filename) val_errors = np.load(val_errors_filename) val_iters = np.load(val_iters_filename) pct_pos_val = float(val_errors[0]) if os.path.exists(pct_pos_val_filename): pct_pos_val = 100.0 * np.load(pct_pos_val_filename) raw_train_losses = np.load(train_losses_filename) val_errors = np.r_[pct_pos_val, val_errors] val_iters = np.r_[0, val_iters] # Window the training error. i = 0 train_errors = [] train_losses = [] train_iters = [] while i < raw_train_errors.shape[0]: train_errors.append(np.mean(raw_train_errors[i:i + WINDOW])) train_losses.append(np.mean(raw_train_losses[i:i + WINDOW])) train_iters.append(i) i += WINDOW train_errors = np.array(train_errors) train_losses = np.array(train_losses) train_iters = np.array(train_iters) init_val_error = val_errors[0] norm_train_errors = train_errors / init_val_error norm_val_errors = val_errors / init_val_error norm_final_val_error = val_result.error_rate / val_errors[0] if pct_pos_val > 0: norm_final_val_error = val_result.error_rate / pct_pos_val vis2d.clf() vis2d.plot(train_iters, train_errors, linewidth=self.line_width, color="b") vis2d.plot(val_iters, val_errors, linewidth=self.line_width, color="g") vis2d.ylim(0, 100) vis2d.legend(("TRAIN (Minibatch)", "VAL"), fontsize=self.font_size, loc="best") vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Error Rate", fontsize=self.font_size) vis2d.title("Error Rate vs Training Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_error_rates.png") vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() vis2d.plot(train_iters, norm_train_errors, linewidth=4, color="b") vis2d.plot(val_iters, norm_val_errors, linewidth=4, color="g") vis2d.ylim(0, 2.0) vis2d.legend(("TRAIN (Minibatch)", "VAL"), fontsize=self.font_size, loc="best") vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Normalized Error Rate", fontsize=self.font_size) vis2d.title("Normalized Error Rate vs Training Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_norm_error_rates.png") vis2d.savefig(figname, dpi=self.dpi) train_losses[train_losses > MAX_LOSS] = MAX_LOSS # CAP LOSSES. vis2d.clf() vis2d.plot(train_iters, train_losses, linewidth=self.line_width, color="b") vis2d.ylim(0, 2.0) vis2d.xlabel("Iteration", fontsize=self.font_size) vis2d.ylabel("Loss", fontsize=self.font_size) vis2d.title("Training Loss vs Iteration", fontsize=self.font_size) figname = os.path.join(model_output_dir, "training_losses.png") vis2d.savefig(figname, dpi=self.dpi) # Log. self.logger.info("TRAIN") self.logger.info("Original error: %.3f" % (train_errors[0])) self.logger.info("Final error: %.3f" % (train_result.error_rate)) self.logger.info("Orig loss: %.3f" % (train_losses[0])) self.logger.info("Final loss: %.3f" % (train_losses[-1])) self.logger.info("VAL") self.logger.info("Original error: %.3f" % (pct_pos_val)) self.logger.info("Final error: %.3f" % (val_result.error_rate)) self.logger.info("Normalized error: %.3f" % (norm_final_val_error)) return (train_errors[0], train_result.error_rate, train_losses[0], train_losses[-1], pct_pos_val, val_result.error_rate, norm_final_val_error) except Exception as e: self.logger.error("Failed to plot training curves!\n" + str(e))
def _run_prediction_single_model(self, model_dir, model_output_dir, dataset_config): """Analyze the performance of a single model.""" # Read in model config. model_config_filename = os.path.join(model_dir, GQCNNFilenames.SAVED_CFG) with open(model_config_filename) as data_file: model_config = json.load(data_file) # Load model. self.logger.info("Loading model %s" % (model_dir)) log_file = None for handler in self.logger.handlers: if isinstance(handler, logging.FileHandler): log_file = handler.baseFilename gqcnn = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose, log_file=log_file) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode angular_bins = gqcnn.angular_bins # Read params from the config. if dataset_config is None: dataset_dir = model_config["dataset_dir"] split_name = model_config["split_name"] image_field_name = model_config["image_field_name"] pose_field_name = model_config["pose_field_name"] metric_name = model_config["target_metric_name"] metric_thresh = model_config["metric_thresh"] else: dataset_dir = dataset_config["dataset_dir"] split_name = dataset_config["split_name"] image_field_name = dataset_config["image_field_name"] pose_field_name = dataset_config["pose_field_name"] metric_name = dataset_config["target_metric_name"] metric_thresh = dataset_config["metric_thresh"] gripper_mode = dataset_config["gripper_mode"] self.logger.info("Loading dataset %s" % (dataset_dir)) dataset = TensorDataset.open(dataset_dir) train_indices, val_indices, _ = dataset.split(split_name) # Visualize conv filters. conv1_filters = gqcnn.filters num_filt = conv1_filters.shape[3] d = utils.sqrt_ceil(num_filt) vis2d.clf() for k in range(num_filt): filt = conv1_filters[:, :, 0, k] vis2d.subplot(d, d, k + 1) vis2d.imshow(DepthImage(filt)) figname = os.path.join(model_output_dir, "conv1_filters.pdf") vis2d.savefig(figname, dpi=self.dpi) # Aggregate training and validation true labels and predicted # probabilities. all_predictions = [] if angular_bins > 0: all_predictions_raw = [] all_labels = [] for i in range(dataset.num_tensors): # Log progress. if i % self.log_rate == 0: self.logger.info("Predicting tensor %d of %d" % (i + 1, dataset.num_tensors)) # Read in data. image_arr = dataset.tensor(image_field_name, i).arr pose_arr = read_pose_data( dataset.tensor(pose_field_name, i).arr, gripper_mode) metric_arr = dataset.tensor(metric_name, i).arr label_arr = 1 * (metric_arr > metric_thresh) label_arr = label_arr.astype(np.uint8) if angular_bins > 0: # Form mask to extract predictions from ground-truth angular # bins. raw_poses = dataset.tensor(pose_field_name, i).arr angles = raw_poses[:, 3] neg_ind = np.where(angles < 0) # TODO(vsatish): These should use the max angle instead. angles = np.abs(angles) % GeneralConstants.PI angles[neg_ind] *= -1 g_90 = np.where(angles > (GeneralConstants.PI / 2)) l_neg_90 = np.where(angles < (-1 * (GeneralConstants.PI / 2))) angles[g_90] -= GeneralConstants.PI angles[l_neg_90] += GeneralConstants.PI # TODO(vsatish): Fix this along with the others. angles *= -1 # Hack to fix reverse angle convention. angles += (GeneralConstants.PI / 2) pred_mask = np.zeros((raw_poses.shape[0], angular_bins * 2), dtype=bool) bin_width = GeneralConstants.PI / angular_bins for i in range(angles.shape[0]): pred_mask[i, int((angles[i] // bin_width) * 2)] = True pred_mask[i, int((angles[i] // bin_width) * 2 + 1)] = True # Predict with GQ-CNN. predictions = gqcnn.predict(image_arr, pose_arr) if angular_bins > 0: raw_predictions = np.array(predictions) predictions = predictions[pred_mask].reshape((-1, 2)) # Aggregate. all_predictions.extend(predictions[:, 1].tolist()) if angular_bins > 0: all_predictions_raw.extend(raw_predictions.tolist()) all_labels.extend(label_arr.tolist()) # Close session. gqcnn.close_session() # Create arrays. all_predictions = np.array(all_predictions) all_labels = np.array(all_labels) train_predictions = all_predictions[train_indices] val_predictions = all_predictions[val_indices] train_labels = all_labels[train_indices] val_labels = all_labels[val_indices] if angular_bins > 0: all_predictions_raw = np.array(all_predictions_raw) train_predictions_raw = all_predictions_raw[train_indices] val_predictions_raw = all_predictions_raw[val_indices] # Aggregate results. train_result = BinaryClassificationResult(train_predictions, train_labels) val_result = BinaryClassificationResult(val_predictions, val_labels) train_result.save(os.path.join(model_output_dir, "train_result.cres")) val_result.save(os.path.join(model_output_dir, "val_result.cres")) # Get stats, plot curves. self.logger.info("Model %s training error rate: %.3f" % (model_dir, train_result.error_rate)) self.logger.info("Model %s validation error rate: %.3f" % (model_dir, val_result.error_rate)) self.logger.info("Model %s training loss: %.3f" % (model_dir, train_result.cross_entropy_loss)) self.logger.info("Model %s validation loss: %.3f" % (model_dir, val_result.cross_entropy_loss)) # Save images. vis2d.figure() example_dir = os.path.join(model_output_dir, "examples") if not os.path.exists(example_dir): os.mkdir(example_dir) # Train. self.logger.info("Saving training examples") train_example_dir = os.path.join(example_dir, "train") if not os.path.exists(train_example_dir): os.mkdir(train_example_dir) # Train TP. true_positive_indices = train_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "true_positive_%03d.png" % (i))) # Train FP. false_positive_indices = train_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "false_positive_%03d.png" % (i))) # Train TN. true_negative_indices = train_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "true_negative_%03d.png" % (i))) # Train TP. false_negative_indices = train_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( "Datapoint %d: Pred: %.3f Label: %.3f" % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, "false_negative_%03d.png" % (i))) # Val. self.logger.info("Saving validation examples") val_example_dir = os.path.join(example_dir, "val") if not os.path.exists(val_example_dir): os.mkdir(val_example_dir) # Val TP. true_positive_indices = val_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "true_positive_%03d.png" % (i))) # Val FP. false_positive_indices = val_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "false_positive_%03d.png" % (i))) # Val TN. true_negative_indices = val_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "true_negative_%03d.png" % (i))) # Val TP. false_negative_indices = val_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, "false_negative_%03d.png" % (i))) # Save summary stats. train_summary_stats = { "error_rate": train_result.error_rate, "ap_score": train_result.ap_score, "auc_score": train_result.auc_score, "loss": train_result.cross_entropy_loss } train_stats_filename = os.path.join(model_output_dir, "train_stats.json") json.dump(train_summary_stats, open(train_stats_filename, "w"), indent=JSON_INDENT, sort_keys=True) val_summary_stats = { "error_rate": val_result.error_rate, "ap_score": val_result.ap_score, "auc_score": val_result.auc_score, "loss": val_result.cross_entropy_loss } val_stats_filename = os.path.join(model_output_dir, "val_stats.json") json.dump(val_summary_stats, open(val_stats_filename, "w"), indent=JSON_INDENT, sort_keys=True) return train_result, val_result
def _plot(self, model_dir, model_output_dir, train_result, val_result): """ Plot analysis curves """ self.logger.info('Plotting') _, model_name = os.path.split(model_output_dir) # set params colors = ['g', 'b', 'c', 'y', 'm', 'r'] styles = ['-', '--', '-.', ':', '-'] num_colors = len(colors) num_styles = len(styles) # PR, ROC vis2d.clf() train_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label='TRAIN') val_result.precision_recall_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label='VAL') vis2d.title('Precision Recall Curves', fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc='best') figname = os.path.join(model_output_dir, 'precision_recall.png') vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() train_result.roc_curve(plot=True, line_width=self.line_width, color=colors[0], style=styles[0], label='TRAIN') val_result.roc_curve(plot=True, line_width=self.line_width, color=colors[1], style=styles[1], label='VAL') vis2d.title('Reciever Operating Characteristic', fontsize=self.font_size) handles, labels = vis2d.gca().get_legend_handles_labels() vis2d.legend(handles, labels, loc='best') figname = os.path.join(model_output_dir, 'roc.png') vis2d.savefig(figname, dpi=self.dpi) # plot histogram of prediction errors num_bins = min(self.num_bins, train_result.num_datapoints) # train positives pos_ind = np.where(train_result.labels == 1)[0] diffs = np.abs(train_result.labels[pos_ind] - train_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0,1), normalized=False, plot=True) vis2d.title('Error on Positive Training Examples', fontsize=self.font_size) vis2d.xlabel('Abs Prediction Error', fontsize=self.font_size) vis2d.ylabel('Count', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'pos_train_errors_histogram.png') vis2d.savefig(figname, dpi=self.dpi) # train negatives neg_ind = np.where(train_result.labels == 0)[0] diffs = np.abs(train_result.labels[neg_ind] - train_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0,1), normalized=False, plot=True) vis2d.title('Error on Negative Training Examples', fontsize=self.font_size) vis2d.xlabel('Abs Prediction Error', fontsize=self.font_size) vis2d.ylabel('Count', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'neg_train_errors_histogram.png') vis2d.savefig(figname, dpi=self.dpi) # histogram of validation errors num_bins = min(self.num_bins, val_result.num_datapoints) # val positives pos_ind = np.where(val_result.labels == 1)[0] diffs = np.abs(val_result.labels[pos_ind] - val_result.pred_probs[pos_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0,1), normalized=False, plot=True) vis2d.title('Error on Positive Validation Examples', fontsize=self.font_size) vis2d.xlabel('Abs Prediction Error', fontsize=self.font_size) vis2d.ylabel('Count', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'pos_val_errors_histogram.png') vis2d.savefig(figname, dpi=self.dpi) # val negatives neg_ind = np.where(val_result.labels == 0)[0] diffs = np.abs(val_result.labels[neg_ind] - val_result.pred_probs[neg_ind]) vis2d.figure() utils.histogram(diffs, num_bins, bounds=(0,1), normalized=False, plot=True) vis2d.title('Error on Negative Validation Examples', fontsize=self.font_size) vis2d.xlabel('Abs Prediction Error', fontsize=self.font_size) vis2d.ylabel('Count', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'neg_val_errors_histogram.png') vis2d.savefig(figname, dpi=self.dpi) # losses try: train_errors_filename = os.path.join(model_dir, TRAIN_ERRORS_FILENAME) val_errors_filename = os.path.join(model_dir, VAL_ERRORS_FILENAME) train_iters_filename = os.path.join(model_dir, TRAIN_ITERS_FILENAME) val_iters_filename = os.path.join(model_dir, VAL_ITERS_FILENAME) pct_pos_val_filename = os.path.join(model_dir, PCT_POS_VAL_FILENAME) train_losses_filename = os.path.join(model_dir, TRAIN_LOSS_FILENAME) raw_train_errors = np.load(train_errors_filename) val_errors = np.load(val_errors_filename) raw_train_iters = np.load(train_iters_filename) val_iters = np.load(val_iters_filename) pct_pos_val = float(val_errors[0]) if os.path.exists(pct_pos_val_filename): pct_pos_val = 100.0 * np.load(pct_pos_val_filename) raw_train_losses = np.load(train_losses_filename) val_errors = np.r_[pct_pos_val, val_errors] val_iters = np.r_[0, val_iters] # window the training error i = 0 train_errors = [] train_losses = [] train_iters = [] while i < raw_train_errors.shape[0]: train_errors.append(np.mean(raw_train_errors[i:i+WINDOW])) train_losses.append(np.mean(raw_train_losses[i:i+WINDOW])) train_iters.append(i) i += WINDOW train_errors = np.array(train_errors) train_losses = np.array(train_losses) train_iters = np.array(train_iters) init_val_error = val_errors[0] norm_train_errors = train_errors / init_val_error norm_val_errors = val_errors / init_val_error norm_final_val_error = val_result.error_rate / val_errors[0] if pct_pos_val > 0: norm_final_val_error = val_result.error_rate / pct_pos_val vis2d.clf() vis2d.plot(train_iters, train_errors, linewidth=self.line_width, color='b') vis2d.plot(val_iters, val_errors, linewidth=self.line_width, color='g') vis2d.ylim(0, 100) vis2d.legend(('TRAIN (Minibatch)', 'VAL'), fontsize=self.font_size, loc='best') vis2d.xlabel('Iteration', fontsize=self.font_size) vis2d.ylabel('Error Rate', fontsize=self.font_size) vis2d.title('Error Rate vs Training Iteration', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'training_error_rates.png') vis2d.savefig(figname, dpi=self.dpi) vis2d.clf() vis2d.plot(train_iters, norm_train_errors, linewidth=4, color='b') vis2d.plot(val_iters, norm_val_errors, linewidth=4, color='g') vis2d.ylim(0, 2.0) vis2d.legend(('TRAIN (Minibatch)', 'VAL'), fontsize=self.font_size, loc='best') vis2d.xlabel('Iteration', fontsize=self.font_size) vis2d.ylabel('Normalized Error Rate', fontsize=self.font_size) vis2d.title('Normalized Error Rate vs Training Iteration', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'training_norm_error_rates.png') vis2d.savefig(figname, dpi=self.dpi) train_losses[train_losses > MAX_LOSS] = MAX_LOSS # CAP LOSSES vis2d.clf() vis2d.plot(train_iters, train_losses, linewidth=self.line_width, color='b') vis2d.ylim(0, 2.0) vis2d.xlabel('Iteration', fontsize=self.font_size) vis2d.ylabel('Loss', fontsize=self.font_size) vis2d.title('Training Loss vs Iteration', fontsize=self.font_size) figname = os.path.join(model_output_dir, 'training_losses.png') vis2d.savefig(figname, dpi=self.dpi) # log self.logger.info('TRAIN') self.logger.info('Original error: %.3f' %(train_errors[0])) self.logger.info('Final error: %.3f' %(train_result.error_rate)) self.logger.info('Orig loss: %.3f' %(train_losses[0])) self.logger.info('Final loss: %.3f' %(train_losses[-1])) self.logger.info('VAL') self.logger.info('Original error: %.3f' %(pct_pos_val)) self.logger.info('Final error: %.3f' %(val_result.error_rate)) self.logger.info('Normalized error: %.3f' %(norm_final_val_error)) return train_errors[0], train_result.error_rate, train_losses[0], train_losses[-1], pct_pos_val, val_result.error_rate, norm_final_val_error except Exception as e: self.logger.error('Failed to plot training curves!\n' + str(e))
def _run_prediction_single_model(self, model_dir, model_output_dir, dataset_config): """ Analyze the performance of a single model. """ # read in model config model_config_filename = os.path.join(model_dir, 'config.json') with open(model_config_filename) as data_file: model_config = json.load(data_file) # load model self.logger.info('Loading model %s' %(model_dir)) log_file = None for handler in self.logger.handlers: if isinstance(handler, logging.FileHandler): log_file = handler.baseFilename gqcnn = get_gqcnn_model(verbose=self.verbose).load(model_dir, verbose=self.verbose, log_file=log_file) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode angular_bins = gqcnn.angular_bins # read params from the config if dataset_config is None: dataset_dir = model_config['dataset_dir'] split_name = model_config['split_name'] image_field_name = model_config['image_field_name'] pose_field_name = model_config['pose_field_name'] metric_name = model_config['target_metric_name'] metric_thresh = model_config['metric_thresh'] else: dataset_dir = dataset_config['dataset_dir'] split_name = dataset_config['split_name'] image_field_name = dataset_config['image_field_name'] pose_field_name = dataset_config['pose_field_name'] metric_name = dataset_config['target_metric_name'] metric_thresh = dataset_config['metric_thresh'] gripper_mode = dataset_config['gripper_mode'] self.logger.info('Loading dataset %s' %(dataset_dir)) dataset = TensorDataset.open(dataset_dir) train_indices, val_indices, _ = dataset.split(split_name) # visualize conv filters conv1_filters = gqcnn.filters num_filt = conv1_filters.shape[3] d = utils.sqrt_ceil(num_filt) vis2d.clf() for k in range(num_filt): filt = conv1_filters[:,:,0,k] vis2d.subplot(d,d,k+1) vis2d.imshow(DepthImage(filt)) figname = os.path.join(model_output_dir, 'conv1_filters.pdf') vis2d.savefig(figname, dpi=self.dpi) # aggregate training and validation true labels and predicted probabilities all_predictions = [] if angular_bins > 0: all_predictions_raw = [] all_labels = [] for i in range(dataset.num_tensors): # log progress if i % self.log_rate == 0: self.logger.info('Predicting tensor %d of %d' %(i+1, dataset.num_tensors)) # read in data image_arr = dataset.tensor(image_field_name, i).arr pose_arr = read_pose_data(dataset.tensor(pose_field_name, i).arr, gripper_mode) metric_arr = dataset.tensor(metric_name, i).arr label_arr = 1 * (metric_arr > metric_thresh) label_arr = label_arr.astype(np.uint8) if angular_bins > 0: # form mask to extract predictions from ground-truth angular bins raw_poses = dataset.tensor(pose_field_name, i).arr angles = raw_poses[:, 3] neg_ind = np.where(angles < 0) angles = np.abs(angles) % GeneralConstants.PI angles[neg_ind] *= -1 g_90 = np.where(angles > (GeneralConstants.PI / 2)) l_neg_90 = np.where(angles < (-1 * (GeneralConstants.PI / 2))) angles[g_90] -= GeneralConstants.PI angles[l_neg_90] += GeneralConstants.PI angles *= -1 # hack to fix reverse angle convention angles += (GeneralConstants.PI / 2) pred_mask = np.zeros((raw_poses.shape[0], angular_bins*2), dtype=bool) bin_width = GeneralConstants.PI / angular_bins for i in range(angles.shape[0]): pred_mask[i, int((angles[i] // bin_width)*2)] = True pred_mask[i, int((angles[i] // bin_width)*2 + 1)] = True # predict with GQ-CNN predictions = gqcnn.predict(image_arr, pose_arr) if angular_bins > 0: raw_predictions = np.array(predictions) predictions = predictions[pred_mask].reshape((-1, 2)) # aggregate all_predictions.extend(predictions[:,1].tolist()) if angular_bins > 0: all_predictions_raw.extend(raw_predictions.tolist()) all_labels.extend(label_arr.tolist()) # close session gqcnn.close_session() # create arrays all_predictions = np.array(all_predictions) all_labels = np.array(all_labels) train_predictions = all_predictions[train_indices] val_predictions = all_predictions[val_indices] train_labels = all_labels[train_indices] val_labels = all_labels[val_indices] if angular_bins > 0: all_predictions_raw = np.array(all_predictions_raw) train_predictions_raw = all_predictions_raw[train_indices] val_predictions_raw = all_predictions_raw[val_indices] # aggregate results train_result = BinaryClassificationResult(train_predictions, train_labels) val_result = BinaryClassificationResult(val_predictions, val_labels) train_result.save(os.path.join(model_output_dir, 'train_result.cres')) val_result.save(os.path.join(model_output_dir, 'val_result.cres')) # get stats, plot curves self.logger.info('Model %s training error rate: %.3f' %(model_dir, train_result.error_rate)) self.logger.info('Model %s validation error rate: %.3f' %(model_dir, val_result.error_rate)) self.logger.info('Model %s training loss: %.3f' %(model_dir, train_result.cross_entropy_loss)) self.logger.info('Model %s validation loss: %.3f' %(model_dir, val_result.cross_entropy_loss)) # save images vis2d.figure() example_dir = os.path.join(model_output_dir, 'examples') if not os.path.exists(example_dir): os.mkdir(example_dir) # train self.logger.info('Saving training examples') train_example_dir = os.path.join(example_dir, 'train') if not os.path.exists(train_example_dir): os.mkdir(train_example_dir) # train TP true_positive_indices = train_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = train_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(train_example_dir, 'true_positive_%03d.png' %(i))) # train FP false_positive_indices = train_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = train_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(train_example_dir, 'false_positive_%03d.png' %(i))) # train TN true_negative_indices = train_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = train_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(train_example_dir, 'true_negative_%03d.png' %(i))) # train TP false_negative_indices = train_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = train_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(train_example_dir, 'false_negative_%03d.png' %(i))) # val self.logger.info('Saving validation examples') val_example_dir = os.path.join(example_dir, 'val') if not os.path.exists(val_example_dir): os.mkdir(val_example_dir) # val TP true_positive_indices = val_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = val_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(val_example_dir, 'true_positive_%03d.png' %(i))) # val FP false_positive_indices = val_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = val_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(val_example_dir, 'false_positive_%03d.png' %(i))) # val TN true_negative_indices = val_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = val_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(val_example_dir, 'true_negative_%03d.png' %(i))) # val TP false_negative_indices = val_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = val_indices[j] datapoint = dataset.datapoint(k, field_names=[image_field_name, pose_field_name]) vis2d.clf() if angular_bins > 0: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j]) else: self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig(os.path.join(val_example_dir, 'false_negative_%03d.png' %(i))) # save summary stats train_summary_stats = { 'error_rate': train_result.error_rate, 'ap_score': train_result.ap_score, 'auc_score': train_result.auc_score, 'loss': train_result.cross_entropy_loss } train_stats_filename = os.path.join(model_output_dir, 'train_stats.json') json.dump(train_summary_stats, open(train_stats_filename, 'w'), indent=JSON_INDENT, sort_keys=True) val_summary_stats = { 'error_rate': val_result.error_rate, 'ap_score': val_result.ap_score, 'auc_score': val_result.auc_score, 'loss': val_result.cross_entropy_loss } val_stats_filename = os.path.join(model_output_dir, 'val_stats.json') json.dump(val_summary_stats, open(val_stats_filename, 'w'), indent=JSON_INDENT, sort_keys=True) return train_result, val_result
def _run_prediction_single_model(self, model_dir, model_output_dir, dataset_config): """ Analyze the performance of a single model. """ # read in model config model_config_filename = os.path.join(model_dir, 'config.json') with open(model_config_filename) as data_file: model_config = json.load(data_file) # load model logging.info('Loading model %s' % (model_dir)) gqcnn = GQCNN.load(model_dir) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode # read params from the config if dataset_config is None: dataset_dir = model_config['dataset_dir'] split_name = model_config['split_name'] image_field_name = model_config['image_field_name'] pose_field_name = model_config['pose_field_name'] metric_name = model_config['target_metric_name'] metric_thresh = model_config['metric_thresh'] else: dataset_dir = dataset_config['dataset_dir'] split_name = dataset_config['split_name'] image_field_name = dataset_config['image_field_name'] pose_field_name = dataset_config['pose_field_name'] metric_name = dataset_config['target_metric_name'] metric_thresh = dataset_config['metric_thresh'] gripper_mode = dataset_config['gripper_mode'] logging.info('Loading dataset %s' % (dataset_dir)) dataset = TensorDataset.open(dataset_dir) train_indices, val_indices, _ = dataset.split(split_name) # visualize conv filters conv1_filters = gqcnn.filters num_filt = conv1_filters.shape[3] d = utils.sqrt_ceil(num_filt) vis2d.clf() for k in range(num_filt): filt = conv1_filters[:, :, 0, k] vis2d.subplot(d, d, k + 1) vis2d.imshow(DepthImage(filt)) figname = os.path.join(model_output_dir, 'conv1_filters.pdf') vis2d.savefig(figname, dpi=self.dpi) # aggregate training and validation true labels and predicted probabilities all_predictions = [] all_labels = [] for i in range(dataset.num_tensors): # log progress if i % self.log_rate == 0: logging.info('Predicting tensor %d of %d' % (i + 1, dataset.num_tensors)) # read in data image_arr = dataset.tensor(image_field_name, i).arr pose_arr = read_pose_data( dataset.tensor(pose_field_name, i).arr, gripper_mode) metric_arr = dataset.tensor(metric_name, i).arr label_arr = 1 * (metric_arr > metric_thresh) label_arr = label_arr.astype(np.uint8) # predict with GQ-CNN predictions = gqcnn.predict(image_arr, pose_arr) # aggregate all_predictions.extend(predictions[:, 1].tolist()) all_labels.extend(label_arr.tolist()) # close session gqcnn.close_session() # create arrays all_predictions = np.array(all_predictions) all_labels = np.array(all_labels) train_predictions = all_predictions[train_indices] val_predictions = all_predictions[val_indices] train_labels = all_labels[train_indices] val_labels = all_labels[val_indices] # aggregate results train_result = BinaryClassificationResult(train_predictions, train_labels) val_result = BinaryClassificationResult(val_predictions, val_labels) train_result.save(os.path.join(model_output_dir, 'train_result.cres')) val_result.save(os.path.join(model_output_dir, 'val_result.cres')) # get stats, plot curves logging.info('Model %s training error rate: %.3f' % (model_dir, train_result.error_rate)) logging.info('Model %s validation error rate: %.3f' % (model_dir, val_result.error_rate)) # save images vis2d.figure() example_dir = os.path.join(model_output_dir, 'examples') if not os.path.exists(example_dir): os.mkdir(example_dir) # train logging.info('Saving training examples') train_example_dir = os.path.join(example_dir, 'train') if not os.path.exists(train_example_dir): os.mkdir(train_example_dir) # train TP true_positive_indices = train_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( 'Datapoint %d: Pred: %.3f Label: %.3f' % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, 'true_positive_%03d.png' % (i))) # train FP false_positive_indices = train_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( 'Datapoint %d: Pred: %.3f Label: %.3f' % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, 'false_positive_%03d.png' % (i))) # train TN true_negative_indices = train_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( 'Datapoint %d: Pred: %.3f Label: %.3f' % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, 'true_negative_%03d.png' % (i))) # train TP false_negative_indices = train_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = train_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title( 'Datapoint %d: Pred: %.3f Label: %.3f' % (k, train_result.pred_probs[j], train_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(train_example_dir, 'false_negative_%03d.png' % (i))) # val logging.info('Saving validation examples') val_example_dir = os.path.join(example_dir, 'val') if not os.path.exists(val_example_dir): os.mkdir(val_example_dir) # val TP true_positive_indices = val_result.true_positive_indices np.random.shuffle(true_positive_indices) true_positive_indices = true_positive_indices[:self.num_vis] for i, j in enumerate(true_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, 'true_positive_%03d.png' % (i))) # val FP false_positive_indices = val_result.false_positive_indices np.random.shuffle(false_positive_indices) false_positive_indices = false_positive_indices[:self.num_vis] for i, j in enumerate(false_positive_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, 'false_positive_%03d.png' % (i))) # val TN true_negative_indices = val_result.true_negative_indices np.random.shuffle(true_negative_indices) true_negative_indices = true_negative_indices[:self.num_vis] for i, j in enumerate(true_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, 'true_negative_%03d.png' % (i))) # val TP false_negative_indices = val_result.false_negative_indices np.random.shuffle(false_negative_indices) false_negative_indices = false_negative_indices[:self.num_vis] for i, j in enumerate(false_negative_indices): k = val_indices[j] datapoint = dataset.datapoint( k, field_names=[image_field_name, pose_field_name]) vis2d.clf() self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode) vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' % (k, val_result.pred_probs[j], val_result.labels[j]), fontsize=self.font_size) vis2d.savefig( os.path.join(val_example_dir, 'false_negative_%03d.png' % (i))) # save summary stats train_summary_stats = { 'error_rate': train_result.error_rate, 'ap_score': train_result.ap_score, 'auc_score': train_result.auc_score } train_stats_filename = os.path.join(model_output_dir, 'train_stats.json') json.dump(train_summary_stats, open(train_stats_filename, 'w'), indent=JSON_INDENT, sort_keys=True) val_summary_stats = { 'error_rate': val_result.error_rate, 'ap_score': val_result.ap_score, 'auc_score': val_result.auc_score } val_stats_filename = os.path.join(model_output_dir, 'val_stats.json') json.dump(val_summary_stats, open(val_stats_filename, 'w'), indent=JSON_INDENT, sort_keys=True) return train_result, val_result
policy_config["type"])) else: policy_type = "cem" if "type" in policy_config: policy_type = policy_config["type"] if policy_type == "ranking": policy = RobustGraspingPolicy(policy_config) elif policy_type == "cem": policy = CrossEntropyRobustGraspingPolicy(policy_config) else: raise ValueError("Invalid policy type: {}".format(policy_type)) # Query policy. policy_start = time.time() action = policy(state) logger.info("Planning took %.3f sec" % (time.time() - policy_start)) # Vis final grasp. if policy_config["vis"]["final_grasp"]: vis.figure(size=(10, 10)) vis.imshow(rgbd_im.depth, vmin=policy_config["vis"]["vmin"], vmax=policy_config["vis"]["vmax"]) vis.grasp(action.grasp, scale=2.5, show_center=False, show_axis=True) vis.title("Planned grasp at depth {0:.3f}m with Q={1:.3f}".format( action.grasp.depth, action.q_value)) if save_path is None: vis.show() else: vis.savefig(save_path)