def _read_model(self, model_dir, checkpoints): """ Reads in the final model parameters from model dir. If self.analyse_checkpoints = True, the checkpoint models are read in as well. Parameters ---------- model_dir (str): Path to the model. checkpoints (list): List of the checkpoint strings. Returns ------- models (list): list of the loaded GQCNN models """ # Determine model name model_root, model_name = os.path.split(model_dir) self.logger.info("Analyzing model %s" % model_name) # Load model. self.logger.info("Loading model %s" % model_dir) models = [] # Load all checkpoint models if self.analyse_checkpoints: for checkpoint in checkpoints: if checkpoint == 'final': model = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose) else: model = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose, checkpoint_step=checkpoint) model.open_session() models.append(model) else: model = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose) model.open_session() models.append(model) return models
def _run_trial(self, analysis_config, train_config, dataset_dir, split_name, output_dir, model_name, train_progress_dict, trial_progress_dict, hyperparam_summary, gpu_avail="", cpu_cores_avail=[], backend='tf'): trial_progress_dict['status'] = TrialStatus.RUNNING try: os.system("taskset -pc {} {}".format(",".join(str(i) for i in cpu_cores_avail), os.getpid())) os.environ["CUDA_VISIBLE_DEVICES"] = gpu_avail gqcnn = get_gqcnn_model(backend, verbose=False)(train_config['gqcnn'], verbose=False) trainer = get_gqcnn_trainer(backend)(gqcnn, dataset_dir, split_name, output_dir, train_config, name=model_name, progress_dict=train_progress_dict, verbose=False) self._run(trainer) with open(os.path.join(output_dir, model_name, 'hyperparam_summary.json'), 'wb') as fhandle: json.dump(hyperparam_summary, fhandle, indent=GeneralConstants.JSON_INDENT) train_progress_dict['training_status'] = 'analyzing' analyzer = GQCNNAnalyzer(analysis_config, verbose=False) _, _, init_train_error, final_train_error, init_train_loss, final_train_loss, init_val_error, final_val_error, norm_final_val_error = analyzer.analyze(os.path.join(output_dir, model_name), output_dir) analysis_dict = {} analysis_dict['init_train_error'] = init_train_error analysis_dict['final_train_error'] = final_train_error analysis_dict['init_train_loss'] = init_train_loss analysis_dict['final_train_loss'] = final_train_loss analysis_dict['init_val_error'] = init_val_error analysis_dict['final_val_error'] = final_val_error analysis_dict['norm_final_val_error'] = norm_final_val_error train_progress_dict['analysis'] = analysis_dict train_progress_dict['training_status'] = 'finished' trial_progress_dict['status'] = TrialStatus.FINISHED sys.exit(0) except Exception as e: trial_progress_dict['status'] = TrialStatus.EXCEPTION trial_progress_dict['error_msg'] = str(e) sys.exit(0)
def _run_prediction(self, model_dir, model_output_dir, data_dir, noise_analysis, depth_analysis, perturb_analysis, single_analysis): """Predict the outcome of the file for a single model.""" # Read in model config. model_config_filename = os.path.join(model_dir, "config.json") with open(model_config_filename) as data_file: model_config = json.load(data_file) # Load model. self.logger.info("Loading model %s" % (model_dir)) log_file = None for handler in self.logger.handlers: if isinstance(handler, logging.FileHandler): log_file = handler.baseFilename gqcnn = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose, log_file=log_file) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode angular_bins = gqcnn.angular_bins # Load data if noise_analysis: image_arr, pose_arr, labels, width_arr, file_arr, noise_arr = self._read_data( data_dir, noise=True) elif depth_analysis: image_arr, pose_arr, labels, width_arr, file_arr, depth_arr = self._read_data( data_dir, depth=True) elif perturb_analysis: image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data( data_dir, perturb=True) elif single_analysis: image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data( data_dir, perturb=True) else: image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data( data_dir) # Predict outcomes predictions = gqcnn.predict(image_arr, pose_arr) gqcnn.close_session() results = BinaryClassificationResult(predictions[:, 1], labels) # Log the results if noise_analysis: # Analyse the error rates in regard to the noise levels of the images noise_levels = np.unique(noise_arr) levels = len(noise_levels) for current_noise in noise_levels: pred = predictions[noise_arr[:, 0] == current_noise] lab = labels[noise_arr[:, 0] == current_noise] res = BinaryClassificationResult(pred[:, 1], lab) self._plot_histograms(pred[:, 1], lab, str(current_noise), model_output_dir) self.logger.info("Noise: %.4f Model %s error rate: %.3f" % (current_noise, model_dir, res.error_rate)) self.logger.info( "Noise: %.4f Model %s loss: %.3f" % (current_noise, model_dir, res.cross_entropy_loss)) elif depth_analysis: # Analyse the error rates in regard to the grasping depth in the images depth_levels = np.unique(depth_arr) levels = len(depth_levels) for current_depth in depth_levels: if current_depth == -1: depth_mode = 'original' else: depth_mode = 'relative %.2f' % (current_depth) pred = predictions[depth_arr == current_depth] lab = labels[depth_arr == current_depth] res = BinaryClassificationResult(pred[:, 1], lab) self._plot_histograms(pred[:, 1], lab, depth_mode, model_output_dir) self.logger.info("Depth %s Model %s error rate: %.3f" % (depth_mode, model_dir, res.error_rate)) self.logger.info( "Depth: %s Model %s loss: %.3f" % (depth_mode, model_dir, res.cross_entropy_loss)) elif perturb_analysis: # Analyse the error rates in regard to the grasping perturb in the images perturb_levels = np.unique(perturb_arr) print("Perturb levels: ", perturb_levels) _rot = len(np.unique(perturb_arr[:, 0])) _trans = len(np.unique(perturb_arr[:, 1])) try: _transy = len(np.unique(perturb_arr[:, 2])) except: _transy = 0 print("No translation in y included") if _rot >= 2 and _trans <= 1 and _transy <= 1: perturbation = 'rotation' perturb_unit = 'deg' index = 0 elif _rot <= 1 and _trans >= 2 and _transy <= 1: perturbation = 'translation' perturb_unit = 'pixel' index = 1 elif _rot <= 1 and _trans <= 1 and _transy >= 2: perturbation = 'translationy' perturb_unit = 'pixel' index = 2 else: raise ValueError( "Perturbation array includes at least two different perturbation types. Can't be handled. Abort." ) return None levels = len(perturb_levels) accuracies = [] for current_perturb in perturb_levels: pred = predictions[perturb_arr[:, index] == current_perturb] lab = labels[perturb_arr[:, index] == current_perturb] res = BinaryClassificationResult(pred[:, 1], lab) perturb_mode = perturbation + ' %.0f ' % ( current_perturb) + perturb_unit self._plot_histograms( pred[:, 1], lab, perturbation + '_%.0f_' % (current_perturb) + perturb_unit, model_output_dir) self.logger.info("Grasp %s Model %s error rate: %.3f" % (perturb_mode, model_dir, res.error_rate)) accuracies.append(100 - res.error_rate) self.logger.info( "Grasp %s Model %s loss: %.3f" % (perturb_mode, model_dir, res.cross_entropy_loss)) self._plot_grasp_perturbations(perturb_levels, accuracies, model_output_dir, perturbation) elif single_analysis: # Analyse the error rates in regard to the grasping perturb in the images perturb_levels = np.unique(perturb_arr) _rot = np.count_nonzero(perturb_arr[:, 0]) _trans = np.count_nonzero(perturb_arr[:, 1]) _transy = np.count_nonzero(perturb_arr[:, 2]) _scalez = np.count_nonzero(perturb_arr[:, 3]) _scalex = np.count_nonzero(perturb_arr[:, 4]) if _rot >= 1 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex == 0: index = 0 perturbation = 'rotation' elif _rot == 0 and _trans >= 1 and _transy == 0 and _scalez == 0 and _scalex == 0: perturbation = 'translation' index = 1 elif _rot == 0 and _trans == 0 and _transy >= 1 and _scalez == 0 and _scalex == 0: perturbation = 'translationy' index = 2 elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez >= 1 and _scalex == 0: perturbation = 'scale_height' index = 3 elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex >= 1: perturbation = 'scalex' index = 4 else: perturbation = 'mixed' index = 5 # Create new output dir for single file and perturbation mode print(len(perturb_arr)) if len(perturb_arr) == 1: print("New output direction is: ", model_output_dir) else: model_output_dir = os.path.join( model_output_dir, str(file_arr[0][0]) + '_' + str(file_arr[0][1]) + '_' + perturbation) print("New output direction is: ", model_output_dir) if not os.path.exists(model_output_dir): os.mkdir(model_output_dir) # Set up new logger. self.logger = Logger.get_logger(self.__class__.__name__, log_file=os.path.join( model_output_dir, "analysis.log"), silence=(not self.verbose), global_log_file=self.verbose) levels = len(perturb_arr) abs_pred_errors = [] if levels == 1: self.logger.info( "Mixed perturbation. Translationx %.1f, Translationy %.1f, " "Rotation %.1f, Scale_height %.1f, Scale x %.1f" % (perturb_arr[0][1], perturb_arr[0][2], perturb_arr[0][0], perturb_arr[0][3], perturb_arr[0][4])) pred = predictions lab = labels res = BinaryClassificationResult(pred[:, 1], lab) self.logger.info("Grasp %s Model %s prediction: %.3f" % (perturbation, model_dir, pred[:, 1])) self.logger.info("Grasp %s Model %s error rate: %.3f" % (perturbation, model_dir, res.error_rate)) self.logger.info( "Grasp %s Model %s loss: %.3f" % (perturbation, model_dir, res.cross_entropy_loss)) else: for current_perturb in perturb_levels: pred = predictions[perturb_arr[:, index] == current_perturb] lab = labels[perturb_arr[:, index] == current_perturb] res = BinaryClassificationResult(pred[:, 1], lab) if perturbation == 'rotation': perturb_mode = 'rotation %.0f deg' % (current_perturb) elif perturbation == 'translation': perturb_mode = 'translation in x %.0f pixel' % ( current_perturb) elif perturbation == 'translationy': perturb_mode = 'translation in y %.0f pixel' % ( current_perturb) elif perturbation == 'scale_height': perturb_mode = 'scaling depth by %.0f' % ( current_perturb) elif perturbation == 'scalex': perturb_mode = 'scaling x by %.0f' % (current_perturb) pos_errors, neg_errors = self._calculate_prediction_errors( pred[:, 1], lab) # Only append positive errors if grasp was positive. if pos_errors: abs_pred_errors.append(pos_errors) self.logger.info("Grasp %s Model %s prediction: %.3f" % (perturb_mode, model_dir, pred[:, 1])) self.logger.info("Grasp %s Model %s error rate: %.3f" % (perturb_mode, model_dir, res.error_rate)) self.logger.info( "Grasp %s Model %s loss: %.3f" % (perturb_mode, model_dir, res.cross_entropy_loss)) if pos_errors: self._plot_single_grasp_perturbations( perturb_levels, abs_pred_errors, model_output_dir, perturbation) else: levels = 1 self._plot_histograms(predictions[:, 1], labels, '', model_output_dir) self.logger.info("Model %s error rate: %.3f" % (model_dir, results.error_rate)) self.logger.info("Model %s loss: %.3f" % (model_dir, results.cross_entropy_loss)) if obj_arr is not None and 'Cornell' in data_dir: unique = np.unique(obj_arr).tolist() object_label = pd.read_csv( DATA_PATH + "Cornell/original/z.txt", sep=" ", header=None, usecols=[1, 2]).drop_duplicates().to_numpy() true_pos = dict() false_neg = dict() false_pos = dict() true_neg = dict() for obj in unique: obj = int(obj) true_pos[object_label[obj, 1]] = 0 false_pos[object_label[obj, 1]] = 0 true_neg[object_label[obj, 1]] = 0 false_neg[object_label[obj, 1]] = 0 for obj, pred, label in zip(obj_arr, predictions[:, 1], labels): if label == 1 and pred >= 0.5: true_pos[object_label[obj, 1]] += 1 elif label == 1 and pred < 0.5: false_neg[object_label[obj, 1]] += 1 elif label == 0 and pred >= 0.5: false_pos[object_label[obj, 1]] += 1 elif label == 0 and pred < 0.5: true_neg[object_label[obj, 1]] += 1 print(true_pos) self._export_object_analysis(true_pos, false_neg, false_pos, true_neg, model_output_dir) # Log the ratios pos_lab = len(labels[labels == 1]) neg_lab = len(labels[labels == 0]) true_pos = len(results.true_positive_indices) true_neg = len(results.true_negative_indices) false_pos = neg_lab - true_neg false_neg = pos_lab - true_pos self.logger.info("%d samples, %d grasps" % (len(labels), len(labels) / levels)) self.logger.info("%d positive grasps, %d negative grasps" % (pos_lab / levels, neg_lab / levels)) self.logger.info("Model overall accuracy %.2f %%" % (100 * results.accuracy)) self.logger.info("Accuracy on positive grasps: %.2f %%" % (true_pos / pos_lab * 100)) self.logger.info("Accuracy on negative grasps: %.2f %%" % (true_neg / neg_lab * 100)) self.logger.info("True positive samples: %d" % true_pos) self.logger.info("True negative samples: %d" % true_neg) self.logger.info("Correct predictions: %d" % (true_pos + true_neg)) self.logger.info("False positive samples: %d" % false_pos) self.logger.info("False negative samples: %d" % false_neg) self.logger.info("False predictions: %d" % (false_pos + false_pos)) cnt = 0 # Counter for grouping the same images with different noise/depth levels if self.num_images is None or self.num_images > len(width_arr): self.num_images = len(width_arr) steps = int(len(width_arr) / self.num_images) for j in range(0, len(width_arr), steps): try: if file_arr[j][1] != file_arr[j - 1][1]: cnt = 0 else: cnt += 1 except: cnt += 1 if noise_analysis: image = self._plot_grasp(image_arr[j], width_arr[j], results, j, noise_arr=noise_arr) elif depth_analysis: image = self._plot_grasp(image_arr[j], width_arr[j], results, j, depth_arr=depth_arr) elif perturb_analysis or single_analysis: print("Plot grasp") image = self._plot_grasp(image_arr[j], width_arr[j], results, j, perturb_arr=perturb_arr) else: image = self._plot_grasp(image_arr[j], width_arr[j], results, j, plt_results=False) try: if noise_analysis or depth_analysis or perturb_analysis or single_analysis: image.save( os.path.join( model_output_dir, "%05d_%03d_example_%03d.png" % (file_arr[j][0], file_arr[j][1], cnt))) else: image.save( os.path.join( model_output_dir, "%05d_%03d.png" % (file_arr[j][0], file_arr[j][1]))) # data = self.scale(image_arr[j][:, :, 0]) # image = Image.fromarray(data).convert('RGB').resize((300, 300), resample=Image.NEAREST) # image.save(os.path.join(model_output_dir, "%05d_%03d_orig.png" % (file_arr[j][0], file_arr[j][1]))) except: image.save( os.path.join(model_output_dir, "Example_%03d.png" % (cnt))) if single_analysis: print("Plotting depth image") j = int(len(image_arr) / 2) # Plot pure depth image without prediction labeling. image = self._plot_grasp(image_arr[j], width_arr[j], results, j, plt_results=False) image.save(os.path.join(model_output_dir, "Depth_image.png")) return results
def visualise(self, model_dir, output_dir, data_dir): # Determine model name model_name = "" model_root = model_dir while model_name == "" and model_root != "": model_root, model_name = os.path.split(model_root) output_dir = os.path.join(output_dir, "Visualisation/") if not os.path.exists(output_dir): os.mkdir(output_dir) # Read in model config. model_config_filename = os.path.join(model_dir, "config.json") with open(model_config_filename) as data_file: model_config = json.load(data_file) # Set up logger self.logger = Logger.get_logger(self.__class__.__name__, log_file=os.path.join( output_dir, "analysis.log"), silence=(not self.verbose), global_log_file=self.verbose) self.logger.info("Analyzing model %s" % (model_name)) self.logger.info("Saving output to %s" % (output_dir)) mixture = False if "Cornell" in model_dir: model_name = "Cornell" elif "DexNet" in model_dir: model_name = "DexNet" if "Cornell" in data_dir: data_name = "Cornell" elif "DexNet" in data_dir: data_name = "DexNet" elif "Both" in data_dir: data_name = "mixed" mixture = True # Load model. self.logger.info("Loading model %s" % (model_dir)) log_file = None for handler in self.logger.handlers: if isinstance(handler, logging.FileHandler): log_file = handler.baseFilename gqcnn = get_gqcnn_model(verbose=self.verbose).load( model_dir, verbose=self.verbose, log_file=log_file) gqcnn.open_session() gripper_mode = gqcnn.gripper_mode angular_bins = gqcnn.angular_bins if mixture: image_arr, pose_arr, labels, width_arr, file_arr, obj_arr, identity_arr = self._read_data( data_dir, mixture=True) else: image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data( data_dir) print("Object arr: ", obj_arr) # Predict outcomes predictions = gqcnn.predict(image_arr, pose_arr) if predictions.shape[1] == 1: print("Only 1 image given. No t-SNE analysis of network possible") else: # Setting colors and labels color = [] monotone = False if mixture: for label, identity in zip(labels, identity_arr): if identity == 0: # Cornell if label == 0: # negative color.append('#FF8000') else: # positive color.append('#2D702F') # DexNet if label == 0: # negative color.append('#FF0404') else: # positive color.append('#23C328') if len(np.unique(labels)) == 1: monotone = True if labels[0] == 0: data_name += " negatives" pop_a = mpatches.Patch(color='#FF8000', label='Negative Cornell') pop_b = mpatches.Patch(color='#FF0404', label='Negative DexNet') else: data_name += " positives" pop_a = mpatches.Patch(color='#2D702F', label='Positive Cornell') pop_b = mpatches.Patch(color='#23C328', label='Positive DexNet') else: pop_a = mpatches.Patch(color='#FF8000', label='Negative Cornell') pop_b = mpatches.Patch(color='#FF0404', label='Negative DexNet') pop_c = mpatches.Patch(color='#2D702F', label='Positive Cornell') pop_d = mpatches.Patch(color='#23C328', label='Positive DexNet') else: color = ['r' if truth == 0 else 'g' for truth in labels] pop_a = mpatches.Patch(color='r', label='Negative grasp') pop_b = mpatches.Patch(color='g', label='Positive grasp') # t-SNE tsne_out = sklearn.manifold.TSNE( n_components=2).fit_transform(predictions) plt.scatter(tsne_out[:, 0], tsne_out[:, 1], marker='o', c=color) if mixture and not monotone: plt.legend(handles=[pop_a, pop_b, pop_c, pop_d]) else: plt.legend(handles=[pop_a, pop_b]) plt.title("TSNE output of %s data on a GQCNN trained on %s" % (data_name, model_name)) plt.savefig(output_dir + "/" + model_name + "_model_" + data_name + "_data_TSNE.png") plt.close() # PCA pca_out = sklearn.decomposition.PCA( n_components=2).fit_transform(predictions) plt.scatter(pca_out[:, 0], pca_out[:, 1], marker='o', c=color) plt.title("PCA output of %s data on a GQCNN trained on %s" % (data_name, model_name)) if mixture and not monotone: plt.legend(handles=[pop_a, pop_b, pop_c, pop_d]) else: plt.legend(handles=[pop_a, pop_b]) plt.savefig(output_dir + "/" + model_name + "_model_" + data_name + "_data_PCA.png") plt.close()
full_var_names[15])).numpy() self.b_p1 = tf.Variable(reader.get_tensor( full_var_names[16])).numpy() self.w_4 = np.hstack((self.w_41.T, self.w_42.T)).T @staticmethod def _leaky_relu(x, alpha=.1): return tf.maximum(alpha * x, x) # Replicated GQCNN model in tensorflow.keras style from checkpoint # We need to do this as the loaded GQCNN does not have a format where you can use outputs of specific layers and # calculate the gradient if tf.__version__ != '2.4.1': gqcnn = get_gqcnn_model(verbose=True).load(GQCNN_MODEL, verbose=True) gqcnn.open_session() if '4.0' in GQCNN_MODEL: model = GQCNN4().build() else: model = GQCNN2().build() model.compile(run_eagerly=True, loss=tf.keras.losses.SparseCategoricalCrossentropy()) n_examples = np.load(DATA_DIR + depth_str)['arr_0'].shape[0] # GQCNN outputs two predictions, the first for the probability of the grasp being bad and # the second for the probability of the grasp being good. They sum to one, so # the second one of them is always chosen. Compare to gqcnn/analysis/analyzer.py line 292 for y_c in [0, 1]: