示例#1
0
    def quality(self, state, actions, params):
        """Evaluate the quality of a set of actions according to a GQ-CNN.

        Parameters
        ----------
        state : :obj:`RgbdImageState`
            State of the world described by an RGB-D image.
        actions: :obj:`object`
            Set of grasping actions to evaluate.
        params: dict
            Optional parameters for quality evaluation.

        Returns
        -------
        :obj:`list` of float
            Real-valued grasp quality predictions for each action, between 0
            and 1.
        """
        # Form tensors.
        image_tensor, pose_tensor = self.grasps_to_tensors(actions, state)
        if params is not None and params["vis"]["tf_images"]:
            # Read vis params.
            k = params["vis"]["k"]
            d = utils.sqrt_ceil(k)

            # Display grasp transformed images.
            from visualization import Visualizer2D as vis2d
            vis2d.figure(size=(GeneralConstants.FIGSIZE,
                               GeneralConstants.FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis2d.subplot(d, d, i + 1)
                vis2d.imshow(DepthImage(image_tf))
                vis2d.title("Image %d: d=%.3f" % (i, depth))
            vis2d.show()

        # Predict grasps.
        num_actions = len(actions)
        null_arr = -1 * np.ones(self._batch_size)
        predict_start = time()
        output_arr = np.zeros([num_actions, 2])
        cur_i = 0
        end_i = cur_i + min(self._batch_size, num_actions - cur_i)
        while cur_i < num_actions:
            output_arr[cur_i:end_i, :] = self.gqcnn(
                image_tensor[cur_i:end_i, :, :, 0],
                pose_tensor[cur_i:end_i, 0], null_arr)[0]
            cur_i = end_i
            end_i = cur_i + min(self._batch_size, num_actions - cur_i)
        q_values = output_arr[:, -1]
        self._logger.debug("Prediction took %.3f sec" %
                           (time() - predict_start))
        return q_values.tolist()
示例#2
0
    def quality(self, state, actions, params):
        """ Evaluate the quality of a set of actions according to a GQ-CNN.

        Parameters
        ----------
        state : :obj:`RgbdImageState`
            state of the world described by an RGB-D image
        actions: :obj:`object`
            set of grasping actions to evaluate
        params: dict
            optional parameters for quality evaluation

        Returns
        -------
        :obj:`list` of float
            real-valued grasp quality predictions for each action, between 0 and 1
        """
        # form tensors
        image_tensor, pose_tensor = self.grasps_to_tensors(actions, state)
        if params is not None and params['vis']['tf_images']:
            # read vis params
            k = params['vis']['k']
            d = utils.sqrt_ceil(k)

            # display grasp transformed images
            from visualization import Visualizer2D as vis2d
            vis2d.figure(size=(FIGSIZE, FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis2d.subplot(d, d, i + 1)
                vis2d.imshow(GrayscaleImage(image_tf))
                vis2d.title('Image %d: d=%.3f' % (i, depth))
            vis2d.show()

        # predict grasps
        num_actions = len(actions)
        null_arr = -1 * np.ones(self._batch_size)
        predict_start = time()
        output_arr = np.zeros([num_actions, 2])
        cur_i = 0
        end_i = cur_i + min(self._batch_size, num_actions - cur_i)
        while cur_i < num_actions:
            output_arr[cur_i:end_i, :] = self.gqcnn(
                image_tensor[cur_i:end_i, :, :, 0],
                pose_tensor[cur_i:end_i, 0], null_arr)[0]
            cur_i = end_i
            end_i = cur_i + min(self._batch_size, num_actions - cur_i)
        q_values = output_arr[:, -1]
        logging.debug('Prediction took %.3f sec' % (time() - predict_start))
        return q_values.tolist()
示例#3
0
    def quality(self, state, actions, params):
        """Evaluate the quality of a set of actions according to a GQ-CNN.

        Parameters
        ----------
        state : :obj:`RgbdImageState`
            State of the world described by an RGB-D image.
        actions: :obj:`object`
            Set of grasping actions to evaluate.
        params: dict
            Optional parameters for quality evaluation.

        Returns
        -------
        :obj:`list` of float
            Real-valued grasp quality predictions for each
            action, between 0 and 1.
        """
        # Form tensors.
        tensor_start = time()
        image_tensor, pose_tensor = self.grasps_to_tensors(actions, state)
        self._logger.info("Image transformation took %.3f sec" %
                          (time() - tensor_start))
        if params is not None and params["vis"]["tf_images"]:
            # Read vis params.
            k = params["vis"]["k"]
            d = utils.sqrt_ceil(k)

            # Display grasp transformed images.
            from visualization import Visualizer2D as vis2d
            vis2d.figure(size=(GeneralConstants.FIGSIZE,
                               GeneralConstants.FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis2d.subplot(d, d, i + 1)
                vis2d.imshow(DepthImage(image_tf))
                vis2d.title("Image %d: d=%.3f" % (i, depth))
            vis2d.show()

        # Predict grasps.
        predict_start = time()
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]
        self._logger.info("Inference took %.3f sec" % (time() - predict_start))
        return q_values.tolist()
示例#4
0
    def quality(self, state, actions, params):
        """ Evaluate the quality of a set of actions according to a GQ-CNN.

        Parameters
        ----------
        state : :obj:`RgbdImageState`
            state of the world described by an RGB-D image
        actions: :obj:`object`
            set of grasping actions to evaluate
        params: dict
            optional parameters for quality evaluation

        Returns
        -------
        :obj:`list` of float
            real-valued grasp quality predictions for each action, between 0 and 1
        """
        # form tensors
        tensor_start = time()
        image_tensor, pose_tensor = self.grasps_to_tensors(actions, state)
        logging.info('Image transformation took %.3f sec' %
                     (time() - tensor_start))
        if params is not None and params['vis']['tf_images']:
            # read vis params
            k = params['vis']['k']
            d = utils.sqrt_ceil(k)

            # display grasp transformed images
            from visualization import Visualizer2D as vis2d
            vis2d.figure(size=(FIGSIZE, FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis2d.subplot(d, d, i + 1)
                vis2d.imshow(DepthImage(image_tf))
                vis2d.title('Image %d: d=%.3f' % (i, depth))
            vis2d.show()

        # predict grasps
        predict_start = time()
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]
        logging.info('Inference took %.3f sec' % (time() - predict_start))
        return q_values.tolist()
示例#5
0
def analyze_classification_performance(model_dir, config, dataset_path=None):
    # read params
    predict_batch_size = config['batch_size']
    randomize = config['randomize']
    
    plotting_config = config['plotting']
    figsize = plotting_config['figsize']
    font_size = plotting_config['font_size']
    legend_font_size = plotting_config['legend_font_size']
    line_width = plotting_config['line_width']
    colors = plotting_config['colors']
    dpi = plotting_config['dpi']
    style = '-'

    class_remapping = None
    if 'class_remapping' in config.keys():
        class_remapping = config['class_remapping']

    # read training config
    training_config_filename = os.path.join(model_dir, 'training_config.yaml')
    training_config = YamlConfig(training_config_filename)

    # read training params
    indices_filename = None
    if dataset_path is None:
        dataset_path = training_config['dataset']
        indices_filename = os.path.join(model_dir, 'splits.npz')
    dataset_prefix, dataset_name = os.path.split(dataset_path)
    if dataset_name == '':
        _, dataset_name = os.path.split(dataset_prefix)
    x_names = training_config['x_names']
    y_name = training_config['y_name']
    batch_size = training_config['training']['batch_size']
    iterator_config = training_config['data_iteration']
    x_name = x_names[0]

    # set analysis dir
    analysis_dir = os.path.join(model_dir, 'analysis')
    if not os.path.exists(analysis_dir):
        os.mkdir(analysis_dir)

    # setup log file
    experiment_log_filename = os.path.join(analysis_dir, '%s_analysis.log' %(dataset_name))
    if os.path.exists(experiment_log_filename):
        os.remove(experiment_log_filename)
    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    hdlr = logging.FileHandler(experiment_log_filename)
    hdlr.setFormatter(formatter)
    logging.getLogger().addHandler(hdlr)

    # setup plotting
    plt.figure(figsize=(figsize, figsize))

    # read dataset
    dataset = TensorDataset.open(dataset_path)

    # read dataset splits
    if indices_filename is None:
        splits = {dataset_name: np.arange(dataset.num_datapoints)}
    else:
        splits = np.load(indices_filename)['arr_0'].tolist()

    # load cnn
    logging.info('Loading model %s' %(model_dir))
    cnn = ClassificationCNN.open(model_dir)

    # save examples
    logging.info('Saving examples of each class')
    all_labels = np.arange(cnn.num_classes)
    label_counts = {}
    [label_counts.update({l:0}) for l in all_labels]
    for tensor_ind in range(dataset.num_tensors):
        tensor = dataset.tensor(y_name, tensor_ind)
        for label in tensor:
            label_counts[label] += 1

    d = utils.sqrt_ceil(cnn.num_classes)
    plt.clf()
    for i, label in enumerate(all_labels):
        tensor_ind = 0
        label_found = False
        while not label_found and tensor_ind < dataset.num_tensors:
            tensor = dataset.tensor(y_name, tensor_ind)
            ind = np.where(tensor.arr == label)[0]
            if ind.shape[0] > 0:
                ind = ind[0] + dataset.datapoints_per_tensor * (tensor_ind)
                label_found = True

            tensor_ind += 1
        
        if not label_found:
            continue
        datapoint = dataset[ind]
        example_im = datapoint[x_name]
        
        plt.subplot(d,d,i+1)
        plt.imshow(example_im[:,:,:3].astype(np.uint8))
        plt.title('Class %03d: %.3f%%' %(label, float(label_counts[label]) / dataset.num_datapoints), fontsize=3)
        plt.axis('off')
    plt.savefig(os.path.join(analysis_dir, '%s_classes.pdf' %(dataset_name)))

    # evaluate on dataset
    results = {}
    for split_name, indices in splits.iteritems():
        logging.info('Evaluating performance on split: %s' %(split_name))

        # predict
        if randomize:
            pred_probs, true_labels = cnn.evaluate_on_dataset(dataset, indices=indices, batch_size=predict_batch_size)
            pred_labels = np.argmax(pred_probs, axis=1)
        else:
            true_labels = []
            pred_labels = []
            pred_probs = []
            for datapoint in dataset:
                im = ColorImage(datapoint['color_ims'].astype(np.uint8)[:,:,:3])
                true_label = datapoint['stp_labels']
                pred_prob = cnn.predict(im)
                pred_label = np.argmax(pred_prob, axis=1)
                true_labels.append(true_label)
                pred_labels.append(pred_label)
                pred_probs.append(pred_prob.ravel())
                
                """
                if class_remapping is not None:
                    true_label = class_remapping[true_label]
                plt.figure()
                plt.imshow(im.raw_data)
                plt.title('T: %d, P: %d' %(true_label, pred_label))
                plt.show()
                """
            true_labels = np.array(true_labels)
            pred_labels = np.array(pred_labels)
            pred_probs = np.array(pred_probs)
                
        # apply optional class re-mapping
        if class_remapping is not None:
            new_true_labels = np.zeros(true_labels.shape)
            for orig_label, new_label in class_remapping.iteritems():
                new_true_labels[true_labels==orig_label] = new_label
            true_labels = new_true_labels

        # compute classification results
        result = ClassificationResult([pred_probs], [true_labels])
        results[split_name] = result

        # print stats
        logging.info('SPLIT: %s' %(split_name))
        logging.info('Acc: %.3f' %(result.accuracy))
        logging.info('AP: %.3f' %(result.ap_score))
        logging.info('AUC: %.3f' %(result.auc_score))

        # save confusion matrix
        confusion = result.confusion_matrix.data
        plt.clf()
        plt.imshow(confusion, cmap=plt.cm.gray, interpolation='none')
        plt.locator_params(nticks=cnn.num_classes)
        plt.xlabel('Predicted', fontsize=font_size)
        plt.ylabel('Actual', fontsize=font_size)
        plt.savefig(os.path.join(analysis_dir, '%s_confusion.pdf' %(split_name)), dpi=dpi)

        # save analysis
        result_filename = os.path.join(analysis_dir, '%s.cres' %(split_name))
        result.save(result_filename)

    # plot
    colormap = plt.get_cmap('tab10')
    num_colors = 9

    plt.clf()
    for i, split_name in enumerate(splits.keys()):
        result = results[split_name]
        precision, recall, taus = result.precision_recall_curve()
        color = colormap(float(colors[i%num_colors]) / num_colors)
        plt.plot(recall, precision, linewidth=line_width, color=color, linestyle=style, label=split_name)
    plt.xlabel('Recall', fontsize=font_size)
    plt.ylabel('Precision', fontsize=font_size)
    plt.title('Precision-Recall Curve', fontsize=font_size)
    handles, plt_labels = plt.gca().get_legend_handles_labels()
    plt.legend(handles, plt_labels, loc='best', fontsize=legend_font_size)
    plt.savefig(os.path.join(analysis_dir, '%s_precision_recall.pdf' %(dataset_name)), dpi=dpi)

    plt.clf()
    for i, split_name in enumerate(splits.keys()):
        result = results[split_name]
        fpr, tpr, taus = result.roc_curve()
        color = colormap(float(colors[i%num_colors]) / num_colors)
        plt.plot(fpr, tpr, linewidth=line_width, color=color, linestyle=style, label=split_name)
    plt.xlabel('FPR', fontsize=font_size)
    plt.ylabel('TPR', fontsize=font_size)
    plt.title('Receiver Operating Characteristic', fontsize=font_size)
    handles, plt_labels = plt.gca().get_legend_handles_labels()
    plt.legend(handles, plt_labels, loc='best', fontsize=legend_font_size)
    plt.savefig(os.path.join(analysis_dir, '%s_roc.pdf' %(dataset_name)), dpi=dpi)
示例#6
0
    def _run_prediction_single_model(self, model_dir, model_output_dir,
                                     dataset_config):
        """Analyze the performance of a single model."""
        # Read in model config.
        model_config_filename = os.path.join(model_dir,
                                             GQCNNFilenames.SAVED_CFG)
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # Load model.
        self.logger.info("Loading model %s" % (model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(
            model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins

        # Read params from the config.
        if dataset_config is None:
            dataset_dir = model_config["dataset_dir"]
            split_name = model_config["split_name"]
            image_field_name = model_config["image_field_name"]
            pose_field_name = model_config["pose_field_name"]
            metric_name = model_config["target_metric_name"]
            metric_thresh = model_config["metric_thresh"]
        else:
            dataset_dir = dataset_config["dataset_dir"]
            split_name = dataset_config["split_name"]
            image_field_name = dataset_config["image_field_name"]
            pose_field_name = dataset_config["pose_field_name"]
            metric_name = dataset_config["target_metric_name"]
            metric_thresh = dataset_config["metric_thresh"]
            gripper_mode = dataset_config["gripper_mode"]

        self.logger.info("Loading dataset %s" % (dataset_dir))
        dataset = TensorDataset.open(dataset_dir)
        train_indices, val_indices, _ = dataset.split(split_name)

        # Visualize conv filters.
        conv1_filters = gqcnn.filters
        num_filt = conv1_filters.shape[3]
        d = utils.sqrt_ceil(num_filt)
        vis2d.clf()
        for k in range(num_filt):
            filt = conv1_filters[:, :, 0, k]
            vis2d.subplot(d, d, k + 1)
            vis2d.imshow(DepthImage(filt))
            figname = os.path.join(model_output_dir, "conv1_filters.pdf")
        vis2d.savefig(figname, dpi=self.dpi)

        # Aggregate training and validation true labels and predicted
        # probabilities.
        all_predictions = []
        if angular_bins > 0:
            all_predictions_raw = []
        all_labels = []
        for i in range(dataset.num_tensors):
            # Log progress.
            if i % self.log_rate == 0:
                self.logger.info("Predicting tensor %d of %d" %
                                 (i + 1, dataset.num_tensors))

            # Read in data.
            image_arr = dataset.tensor(image_field_name, i).arr
            pose_arr = read_pose_data(
                dataset.tensor(pose_field_name, i).arr, gripper_mode)
            metric_arr = dataset.tensor(metric_name, i).arr
            label_arr = 1 * (metric_arr > metric_thresh)
            label_arr = label_arr.astype(np.uint8)
            if angular_bins > 0:
                # Form mask to extract predictions from ground-truth angular
                # bins.
                raw_poses = dataset.tensor(pose_field_name, i).arr
                angles = raw_poses[:, 3]
                neg_ind = np.where(angles < 0)
                # TODO(vsatish): These should use the max angle instead.
                angles = np.abs(angles) % GeneralConstants.PI
                angles[neg_ind] *= -1
                g_90 = np.where(angles > (GeneralConstants.PI / 2))
                l_neg_90 = np.where(angles < (-1 * (GeneralConstants.PI / 2)))
                angles[g_90] -= GeneralConstants.PI
                angles[l_neg_90] += GeneralConstants.PI
                # TODO(vsatish): Fix this along with the others.
                angles *= -1  # Hack to fix reverse angle convention.
                angles += (GeneralConstants.PI / 2)
                pred_mask = np.zeros((raw_poses.shape[0], angular_bins * 2),
                                     dtype=bool)
                bin_width = GeneralConstants.PI / angular_bins
                for i in range(angles.shape[0]):
                    pred_mask[i, int((angles[i] // bin_width) * 2)] = True
                    pred_mask[i, int((angles[i] // bin_width) * 2 + 1)] = True

            # Predict with GQ-CNN.
            predictions = gqcnn.predict(image_arr, pose_arr)
            if angular_bins > 0:
                raw_predictions = np.array(predictions)
                predictions = predictions[pred_mask].reshape((-1, 2))

            # Aggregate.
            all_predictions.extend(predictions[:, 1].tolist())
            if angular_bins > 0:
                all_predictions_raw.extend(raw_predictions.tolist())
            all_labels.extend(label_arr.tolist())

        # Close session.
        gqcnn.close_session()

        # Create arrays.
        all_predictions = np.array(all_predictions)
        all_labels = np.array(all_labels)
        train_predictions = all_predictions[train_indices]
        val_predictions = all_predictions[val_indices]
        train_labels = all_labels[train_indices]
        val_labels = all_labels[val_indices]
        if angular_bins > 0:
            all_predictions_raw = np.array(all_predictions_raw)
            train_predictions_raw = all_predictions_raw[train_indices]
            val_predictions_raw = all_predictions_raw[val_indices]

        # Aggregate results.
        train_result = BinaryClassificationResult(train_predictions,
                                                  train_labels)
        val_result = BinaryClassificationResult(val_predictions, val_labels)
        train_result.save(os.path.join(model_output_dir, "train_result.cres"))
        val_result.save(os.path.join(model_output_dir, "val_result.cres"))

        # Get stats, plot curves.
        self.logger.info("Model %s training error rate: %.3f" %
                         (model_dir, train_result.error_rate))
        self.logger.info("Model %s validation error rate: %.3f" %
                         (model_dir, val_result.error_rate))

        self.logger.info("Model %s training loss: %.3f" %
                         (model_dir, train_result.cross_entropy_loss))
        self.logger.info("Model %s validation loss: %.3f" %
                         (model_dir, val_result.cross_entropy_loss))

        # Save images.
        vis2d.figure()
        example_dir = os.path.join(model_output_dir, "examples")
        if not os.path.exists(example_dir):
            os.mkdir(example_dir)

        # Train.
        self.logger.info("Saving training examples")
        train_example_dir = os.path.join(example_dir, "train")
        if not os.path.exists(train_example_dir):
            os.mkdir(train_example_dir)

        # Train TP.
        true_positive_indices = train_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=train_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title(
                "Datapoint %d: Pred: %.3f Label: %.3f" %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             "true_positive_%03d.png" % (i)))

        # Train FP.
        false_positive_indices = train_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=train_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title(
                "Datapoint %d: Pred: %.3f Label: %.3f" %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             "false_positive_%03d.png" % (i)))

        # Train TN.
        true_negative_indices = train_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=train_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title(
                "Datapoint %d: Pred: %.3f Label: %.3f" %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             "true_negative_%03d.png" % (i)))

        # Train TP.
        false_negative_indices = train_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=train_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title(
                "Datapoint %d: Pred: %.3f Label: %.3f" %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             "false_negative_%03d.png" % (i)))

        # Val.
        self.logger.info("Saving validation examples")
        val_example_dir = os.path.join(example_dir, "val")
        if not os.path.exists(val_example_dir):
            os.mkdir(val_example_dir)

        # Val TP.
        true_positive_indices = val_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=val_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, "true_positive_%03d.png" % (i)))

        # Val FP.
        false_positive_indices = val_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=val_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, "false_positive_%03d.png" % (i)))

        # Val TN.
        true_negative_indices = val_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=val_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, "true_negative_%03d.png" % (i)))

        # Val TP.
        false_negative_indices = val_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint,
                                 image_field_name,
                                 pose_field_name,
                                 gripper_mode,
                                 angular_preds=val_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name,
                                 gripper_mode)
            vis2d.title("Datapoint %d: Pred: %.3f Label: %.3f" %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, "false_negative_%03d.png" % (i)))

        # Save summary stats.
        train_summary_stats = {
            "error_rate": train_result.error_rate,
            "ap_score": train_result.ap_score,
            "auc_score": train_result.auc_score,
            "loss": train_result.cross_entropy_loss
        }
        train_stats_filename = os.path.join(model_output_dir,
                                            "train_stats.json")
        json.dump(train_summary_stats,
                  open(train_stats_filename, "w"),
                  indent=JSON_INDENT,
                  sort_keys=True)

        val_summary_stats = {
            "error_rate": val_result.error_rate,
            "ap_score": val_result.ap_score,
            "auc_score": val_result.auc_score,
            "loss": val_result.cross_entropy_loss
        }
        val_stats_filename = os.path.join(model_output_dir, "val_stats.json")
        json.dump(val_summary_stats,
                  open(val_stats_filename, "w"),
                  indent=JSON_INDENT,
                  sort_keys=True)

        return train_result, val_result
示例#7
0
    def action(self, state):
        """ Plans the grasp with the highest probability of success on
        the given RGB-D image.

        Attributes
        ----------
        state : :obj:`RgbdImageState`
            image to plan grasps on

        Returns
        -------
        :obj:`ParallelJawGrasp`
            grasp to execute
        """
        # check valid input
        if not isinstance(state, RgbdImageState):
            raise ValueError('Must provide an RGB-D image state.')

        # parse state
        rgbd_im = state.rgbd_im
        camera_intr = state.camera_intr
        segmask = state.segmask

        # sample grasps
        grasps = self._grasp_sampler.sample(
            rgbd_im,
            camera_intr,
            self._num_seed_samples,
            segmask=segmask,
            visualize=self.config['vis']['grasp_sampling'],
            seed=self._seed)
        num_grasps = len(grasps)

        if num_grasps == 0:
            logging.warning('No valid grasps could be found')
            return None

        # form tensors
        image_tensor, pose_tensor = self.grasps_to_tensors(grasps, state)
        if self.config['vis']['tf_images']:
            # read vis params
            k = self.config['vis']['k']
            d = utils.sqrt_ceil(k)

            # display grasp transformed images
            vis.figure(size=(FIGSIZE, FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis.subplot(d, d, i + 1)
                vis.imshow(DepthImage(image_tf))
                vis.title('Image %d: d=%.3f' % (i, depth))

            # display grasp transformed images
            vis.figure(size=(FIGSIZE, FIGSIZE))
            for i in range(d):
                image_tf = image_tensor[i, ...]
                depth = pose_tensor[i][0]
                grasp = grasps[i]

                vis.subplot(d, 2, 2 * i + 1)
                vis.imshow(rgbd_im.depth)
                vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True)
                vis.title('Grasp %d: d=%.3f' % (i, depth))

                vis.subplot(d, 2, 2 * i + 2)
                vis.imshow(DepthImage(image_tf))
                vis.title('TF image %d: d=%.3f' % (i, depth))
            vis.show()

        # iteratively refit and sample
        for j in range(self._num_iters):
            logging.debug('CEM iter %d' % (j))

            # predict grasps
            predict_start = time()
            output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
            q_values = output_arr[:, -1]
            logging.debug('Prediction took %.3f sec' %
                          (time() - predict_start))

            # sort grasps
            q_values_and_indices = zip(q_values, np.arange(num_grasps))
            q_values_and_indices.sort(key=lambda x: x[0], reverse=True)

            if self.config['vis']['grasp_candidates']:
                # display each grasp on the original image, colored by predicted success
                vis.figure(size=(FIGSIZE, FIGSIZE))
                vis.imshow(rgbd_im.depth)
                for grasp, q in zip(grasps, q_values):
                    vis.grasp(grasp,
                              scale=1.5,
                              show_center=False,
                              show_axis=True,
                              color=plt.cm.RdYlBu(q))
                vis.title('Sampled grasps iter %d' % (j))
                vis.show()

            if self.config['vis']['grasp_ranking']:
                # read vis params
                k = self.config['vis']['k']
                d = utils.sqrt_ceil(k)

                # form camera intr for the thumbnail (to compute gripper width)
                scale_factor = float(self.gqcnn.im_width) / float(
                    self._crop_width)
                scaled_camera_intr = camera_intr.resize(scale_factor)

                vis.figure(size=(FIGSIZE, FIGSIZE))
                for i, p in enumerate(q_values_and_indices[:k]):
                    # read stats for grasp
                    q_value = p[0]
                    ind = p[1]
                    depth = pose_tensor[ind][0]
                    image = DepthImage(image_tensor[ind, ...])
                    grasp = Grasp2D(Point(image.center),
                                    0.0,
                                    depth,
                                    width=self._gripper_width,
                                    camera_intr=scaled_camera_intr)

                    # plot
                    vis.subplot(d, d, i + 1)
                    vis.imshow(image)
                    vis.grasp(grasp, scale=1.5)
                    vis.title('K=%d: d=%.3f, q=%.3f' % (i, depth, q_value))
                vis.show()

            # fit elite set
            num_refit = max(int(np.ceil(self._gmm_refit_p * num_grasps)), 1)
            elite_q_values = [i[0] for i in q_values_and_indices[:num_refit]]
            elite_grasp_indices = [
                i[1] for i in q_values_and_indices[:num_refit]
            ]
            elite_grasps = [grasps[i] for i in elite_grasp_indices]
            elite_grasp_arr = np.array([g.feature_vec for g in elite_grasps])

            if self.config['vis']['elite_grasps']:
                # display each grasp on the original image, colored by predicted success
                vis.figure(size=(FIGSIZE, FIGSIZE))
                vis.imshow(rgbd_im.depth)
                for grasp, q in zip(elite_grasps, elite_q_values):
                    vis.grasp(grasp,
                              scale=1.5,
                              show_center=False,
                              show_axis=True,
                              color=plt.cm.RdYlBu(q))
                vis.title('Elite grasps iter %d' % (j))
                vis.show()

            # normalize elite set
            elite_grasp_mean = np.mean(elite_grasp_arr, axis=0)
            elite_grasp_std = np.std(elite_grasp_arr, axis=0)
            elite_grasp_std[elite_grasp_std == 0] = 1.0
            elite_grasp_arr = (elite_grasp_arr -
                               elite_grasp_mean) / elite_grasp_std

            # fit a GMM to the top samples
            num_components = max(
                int(np.ceil(self._gmm_component_frac * num_refit)), 1)
            uniform_weights = (1.0 / num_components) * np.ones(num_components)
            gmm = GaussianMixture(n_components=num_components,
                                  weights_init=uniform_weights,
                                  reg_covar=self._gmm_reg_covar)
            train_start = time()

            gmm.fit(elite_grasp_arr)
            train_duration = time() - train_start
            logging.debug('GMM fitting with %d components took %.3f sec' %
                          (num_components, train_duration))

            # sample the next grasps
            sample_start = time()
            grasp_vecs, _ = gmm.sample(n_samples=self._num_gmm_samples)
            grasp_vecs = elite_grasp_std * grasp_vecs + elite_grasp_mean
            sample_duration = time() - sample_start
            logging.debug('GMM sampling took %.3f sec' % (sample_duration))

            # convert features to grasps
            grasps = []
            for grasp_vec in grasp_vecs:
                grasps.append(
                    Grasp2D.from_feature_vec(grasp_vec,
                                             width=self._gripper_width,
                                             camera_intr=camera_intr))
            num_grasps = len(grasps)
            if num_grasps == 0:
                logging.warning('No valid grasps could be found')
                return None

            # form tensors
            image_tensor, pose_tensor = self.grasps_to_tensors(grasps, state)
            if self.config['vis']['tf_images']:
                # read vis params
                k = self.config['vis']['k']
                d = utils.sqrt_ceil(k)

                # display grasp transformed images
                vis.figure(size=(FIGSIZE, FIGSIZE))
                for i, image_tf in enumerate(image_tensor[:k, ...]):
                    depth = pose_tensor[i][0]
                    vis.subplot(d, d, i + 1)
                    vis.imshow(DepthImage(image_tf))
                    vis.title('Image %d: d=%.3f' % (i, depth))
                vis.show()

        # predict final set of grasps
        predict_start = time()
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]
        logging.debug('Final prediction took %.3f sec' %
                      (time() - predict_start))

        if self.config['vis']['grasp_candidates']:
            # display each grasp on the original image, colored by predicted success
            vis.figure(size=(FIGSIZE, FIGSIZE))
            vis.imshow(rgbd_im.depth)
            for grasp, q in zip(grasps, q_values):
                vis.grasp(grasp,
                          scale=1.5,
                          show_center=False,
                          show_axis=True,
                          color=plt.cm.RdYlBu(q))
            vis.title('Final sampled grasps')
            vis.show()

        # select grasp
        index = self.select(grasps, q_values)
        grasp = grasps[index]
        q_value = q_values[index]
        image = DepthImage(image_tensor[index, ...])
        pose = pose_tensor[index, ...]
        depth = pose[0]
        if self.config['vis']['grasp_plan']:
            scale_factor = float(self.gqcnn.im_width) / float(self._crop_width)
            scaled_camera_intr = camera_intr.resize(scale_factor)
            grasp = Grasp2D(Point(image.center),
                            0.0,
                            pose[0],
                            width=self._gripper_width,
                            camera_intr=scaled_camera_intr)
            vis.figure()
            vis.imshow(image)
            vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True)
            vis.title('Best Grasp: d=%.3f, q=%.3f' % (depth, q_value))
            vis.show()

        # return action
        return ParallelJawGrasp(grasp, q_value, image)
示例#8
0
    def action(self, state):
        """ Plans the grasp with the highest probability of success on
        the given RGB-D image.

        Attributes
        ----------
        state : :obj:`RgbdImageState`
            image to plan grasps on

        Returns
        -------
        :obj:`ParallelJawGrasp`
            grasp to execute
        """
        # check valid input
        if not isinstance(state, RgbdImageState):
            raise ValueError('Must provide an RGB-D image state.')

        # parse state
        rgbd_im = state.rgbd_im
        camera_intr = state.camera_intr
        segmask = state.segmask

        # sample grasps
        grasps = self._grasp_sampler.sample(
            rgbd_im,
            camera_intr,
            self._num_grasp_samples,
            segmask=segmask,
            visualize=self.config['vis']['grasp_sampling'],
            seed=None)
        num_grasps = len(grasps)

        # form tensors
        image_tensor, pose_tensor = self.grasps_to_tensors(grasps, state)
        if self.config['vis']['tf_images']:
            # read vis params
            k = self.config['vis']['k']
            d = utils.sqrt_ceil(k)

            # display grasp transformed images
            vis.figure(size=(FIGSIZE, FIGSIZE))
            for i, image_tf in enumerate(image_tensor[:k, ...]):
                depth = pose_tensor[i][0]
                vis.subplot(d, d, i + 1)
                vis.imshow(DepthImage(image_tf))
                vis.title('Image %d: d=%.3f' % (i, depth))
            vis.show()

        # predict grasps
        predict_start = time()
        output_arr = self.gqcnn.predict(image_tensor, pose_tensor)
        q_values = output_arr[:, -1]
        logging.debug('Prediction took %.3f sec' % (time() - predict_start))

        if self.config['vis']['grasp_candidates']:
            # display each grasp on the original image, colored by predicted success
            vis.figure(size=(FIGSIZE, FIGSIZE))
            vis.imshow(rgbd_im.depth)
            for grasp, q in zip(grasps, q_values):
                vis.grasp(grasp,
                          scale=1.5,
                          show_center=False,
                          show_axis=True,
                          color=plt.cm.RdYlBu(q))
            vis.title('Sampled grasps')
            vis.show()

        if self.config['vis']['grasp_ranking']:
            # read vis params
            k = self.config['vis']['k']
            d = utils.sqrt_ceil(k)

            # form camera intr for the thumbnail (to compute gripper width)
            scale_factor = float(self.gqcnn.im_width) / float(self._crop_width)
            scaled_camera_intr = camera_intr.resize(scale_factor)

            # sort grasps
            q_values_and_indices = zip(q_values, np.arange(num_grasps))
            q_values_and_indices.sort(key=lambda x: x[0], reverse=True)

            vis.figure(size=(FIGSIZE, FIGSIZE))
            for i, p in enumerate(q_values_and_indices[:k]):
                # read stats for grasp
                q_value = p[0]
                ind = p[1]
                depth = pose_tensor[ind][0]
                image = DepthImage(image_tensor[ind, ...])
                grasp = Grasp2D(Point(image.center),
                                0.0,
                                depth,
                                width=self._gripper_width,
                                camera_intr=scaled_camera_intr)

                # plot
                vis.subplot(d, d, i + 1)
                vis.imshow(image)
                vis.grasp(grasp, scale=1.5)
                vis.title('K=%d: d=%.3f, q=%.3f' % (i, depth, q_value))
            vis.show()

        # select grasp
        index = self.select(grasps, q_values)
        grasp = grasps[index]
        q_value = q_values[index]
        image = DepthImage(image_tensor[index, ...])
        pose = pose_tensor[index, ...]
        depth = pose[0]
        if self.config['vis']['grasp_plan']:
            scale_factor = float(self.gqcnn.im_width) / float(self._crop_width)
            scaled_camera_intr = camera_intr.resize(scale_factor)
            grasp = Grasp2D(Point(image.center),
                            0.0,
                            pose[0],
                            width=self._gripper_width,
                            camera_intr=scaled_camera_intr)
            vis.figure()
            vis.imshow(image)
            vis.grasp(grasp, scale=1.5, show_center=False, show_axis=True)
            vis.title('Best Grasp: d=%.3f, q=%.3f' % (depth, q_value))
            vis.show()

        # return action
        return ParallelJawGrasp(grasp, q_value, image)
示例#9
0
    def _run_prediction_single_model(self, model_dir,
                                     model_output_dir,
                                     dataset_config):
        """ Analyze the performance of a single model. """
        # read in model config
        model_config_filename = os.path.join(model_dir, 'config.json')
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # load model
        self.logger.info('Loading model %s' %(model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins
        
        # read params from the config
        if dataset_config is None:
            dataset_dir = model_config['dataset_dir']
            split_name = model_config['split_name']
            image_field_name = model_config['image_field_name']
            pose_field_name = model_config['pose_field_name']
            metric_name = model_config['target_metric_name']
            metric_thresh = model_config['metric_thresh']
        else:
            dataset_dir = dataset_config['dataset_dir']
            split_name = dataset_config['split_name']
            image_field_name = dataset_config['image_field_name']
            pose_field_name = dataset_config['pose_field_name']
            metric_name = dataset_config['target_metric_name']
            metric_thresh = dataset_config['metric_thresh']
            gripper_mode = dataset_config['gripper_mode']
            
        self.logger.info('Loading dataset %s' %(dataset_dir))
        dataset = TensorDataset.open(dataset_dir)
        train_indices, val_indices, _ = dataset.split(split_name)
        
        # visualize conv filters
        conv1_filters = gqcnn.filters
        num_filt = conv1_filters.shape[3]
        d = utils.sqrt_ceil(num_filt)
        vis2d.clf()
        for k in range(num_filt):
            filt = conv1_filters[:,:,0,k]
            vis2d.subplot(d,d,k+1)
            vis2d.imshow(DepthImage(filt))
            figname = os.path.join(model_output_dir, 'conv1_filters.pdf')
        vis2d.savefig(figname, dpi=self.dpi)
        
        # aggregate training and validation true labels and predicted probabilities
        all_predictions = []
        if angular_bins > 0:
            all_predictions_raw = []
        all_labels = []
        for i in range(dataset.num_tensors):
            # log progress
            if i % self.log_rate == 0:
                self.logger.info('Predicting tensor %d of %d' %(i+1, dataset.num_tensors))

            # read in data
            image_arr = dataset.tensor(image_field_name, i).arr
            pose_arr = read_pose_data(dataset.tensor(pose_field_name, i).arr,
                                      gripper_mode)
            metric_arr = dataset.tensor(metric_name, i).arr
            label_arr = 1 * (metric_arr > metric_thresh)
            label_arr = label_arr.astype(np.uint8)
            if angular_bins > 0:
                # form mask to extract predictions from ground-truth angular bins
                raw_poses = dataset.tensor(pose_field_name, i).arr
                angles = raw_poses[:, 3]
                neg_ind = np.where(angles < 0)
                angles = np.abs(angles) % GeneralConstants.PI
                angles[neg_ind] *= -1
                g_90 = np.where(angles > (GeneralConstants.PI / 2))
                l_neg_90 = np.where(angles < (-1 * (GeneralConstants.PI / 2)))
                angles[g_90] -= GeneralConstants.PI
                angles[l_neg_90] += GeneralConstants.PI
                angles *= -1 # hack to fix reverse angle convention
                angles += (GeneralConstants.PI / 2)
                pred_mask = np.zeros((raw_poses.shape[0], angular_bins*2), dtype=bool)
                bin_width = GeneralConstants.PI / angular_bins
                for i in range(angles.shape[0]):
                    pred_mask[i, int((angles[i] // bin_width)*2)] = True
                    pred_mask[i, int((angles[i] // bin_width)*2 + 1)] = True

            # predict with GQ-CNN
            predictions = gqcnn.predict(image_arr, pose_arr)
            if angular_bins > 0:
                raw_predictions = np.array(predictions)
                predictions = predictions[pred_mask].reshape((-1, 2))
            
            # aggregate
            all_predictions.extend(predictions[:,1].tolist())
            if angular_bins > 0:
                all_predictions_raw.extend(raw_predictions.tolist())
            all_labels.extend(label_arr.tolist())
            
        # close session
        gqcnn.close_session()            

        # create arrays
        all_predictions = np.array(all_predictions)
        all_labels = np.array(all_labels)
        train_predictions = all_predictions[train_indices]
        val_predictions = all_predictions[val_indices]
        train_labels = all_labels[train_indices]
        val_labels = all_labels[val_indices]
        if angular_bins > 0:
            all_predictions_raw = np.array(all_predictions_raw)
            train_predictions_raw = all_predictions_raw[train_indices]
            val_predictions_raw = all_predictions_raw[val_indices]        

        # aggregate results
        train_result = BinaryClassificationResult(train_predictions, train_labels)
        val_result = BinaryClassificationResult(val_predictions, val_labels)
        train_result.save(os.path.join(model_output_dir, 'train_result.cres'))
        val_result.save(os.path.join(model_output_dir, 'val_result.cres'))

        # get stats, plot curves
        self.logger.info('Model %s training error rate: %.3f' %(model_dir, train_result.error_rate))
        self.logger.info('Model %s validation error rate: %.3f' %(model_dir, val_result.error_rate))

        self.logger.info('Model %s training loss: %.3f' %(model_dir, train_result.cross_entropy_loss))
        self.logger.info('Model %s validation loss: %.3f' %(model_dir, val_result.cross_entropy_loss))

        # save images
        vis2d.figure()
        example_dir = os.path.join(model_output_dir, 'examples')
        if not os.path.exists(example_dir):
            os.mkdir(example_dir)

        # train
        self.logger.info('Saving training examples')
        train_example_dir = os.path.join(example_dir, 'train')
        if not os.path.exists(train_example_dir):
            os.mkdir(train_example_dir)
            
        # train TP
        true_positive_indices = train_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j])
            else:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 train_result.pred_probs[j],
                                                                 train_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(train_example_dir, 'true_positive_%03d.png' %(i)))

        # train FP
        false_positive_indices = train_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 train_result.pred_probs[j],
                                                                 train_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(train_example_dir, 'false_positive_%03d.png' %(i)))

        # train TN
        true_negative_indices = train_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 train_result.pred_probs[j],
                                                                 train_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(train_example_dir, 'true_negative_%03d.png' %(i)))

        # train TP
        false_negative_indices = train_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=train_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 train_result.pred_probs[j],
                                                                 train_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(train_example_dir, 'false_negative_%03d.png' %(i)))

        # val
        self.logger.info('Saving validation examples')
        val_example_dir = os.path.join(example_dir, 'val')
        if not os.path.exists(val_example_dir):
            os.mkdir(val_example_dir)

        # val TP
        true_positive_indices = val_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 val_result.pred_probs[j],
                                                                 val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(val_example_dir, 'true_positive_%03d.png' %(i)))

        # val FP
        false_positive_indices = val_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 val_result.pred_probs[j],
                                                                 val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(val_example_dir, 'false_positive_%03d.png' %(i)))

        # val TN
        true_negative_indices = val_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 val_result.pred_probs[j],
                                                                 val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(val_example_dir, 'true_negative_%03d.png' %(i)))

        # val TP
        false_negative_indices = val_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(k, field_names=[image_field_name,
                                                          pose_field_name])
            vis2d.clf()
            if angular_bins > 0:
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode, angular_preds=val_predictions_raw[j])
            else: 
                self._plot_grasp(datapoint, image_field_name, pose_field_name, gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %(k,
                                                                 val_result.pred_probs[j],
                                                                 val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(os.path.join(val_example_dir, 'false_negative_%03d.png' %(i)))
            
        # save summary stats
        train_summary_stats = {
            'error_rate': train_result.error_rate,
            'ap_score': train_result.ap_score,
            'auc_score': train_result.auc_score,
            'loss': train_result.cross_entropy_loss
        }
        train_stats_filename = os.path.join(model_output_dir, 'train_stats.json')
        json.dump(train_summary_stats, open(train_stats_filename, 'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)

        val_summary_stats = {
            'error_rate': val_result.error_rate,
            'ap_score': val_result.ap_score,
            'auc_score': val_result.auc_score,
            'loss': val_result.cross_entropy_loss            
        }
        val_stats_filename = os.path.join(model_output_dir, 'val_stats.json')
        json.dump(val_summary_stats, open(val_stats_filename, 'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)        
        
        return train_result, val_result
示例#10
0
    def _run_prediction_single_model(self, model_dir, model_output_dir,
                                     dataset_config):
        """ Analyze the performance of a single model. """
        # read in model config
        model_config_filename = os.path.join(model_dir, 'config.json')
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # load model
        logging.info('Loading model %s' % (model_dir))
        gqcnn = GQCNN.load(model_dir)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode

        # read params from the config
        if dataset_config is None:
            dataset_dir = model_config['dataset_dir']
            split_name = model_config['split_name']
            image_field_name = model_config['image_field_name']
            pose_field_name = model_config['pose_field_name']
            metric_name = model_config['target_metric_name']
            metric_thresh = model_config['metric_thresh']
        else:
            dataset_dir = dataset_config['dataset_dir']
            split_name = dataset_config['split_name']
            image_field_name = dataset_config['image_field_name']
            pose_field_name = dataset_config['pose_field_name']
            metric_name = dataset_config['target_metric_name']
            metric_thresh = dataset_config['metric_thresh']
            gripper_mode = dataset_config['gripper_mode']

        logging.info('Loading dataset %s' % (dataset_dir))
        dataset = TensorDataset.open(dataset_dir)
        train_indices, val_indices, _ = dataset.split(split_name)

        # visualize conv filters
        conv1_filters = gqcnn.filters
        num_filt = conv1_filters.shape[3]
        d = utils.sqrt_ceil(num_filt)
        vis2d.clf()
        for k in range(num_filt):
            filt = conv1_filters[:, :, 0, k]
            vis2d.subplot(d, d, k + 1)
            vis2d.imshow(DepthImage(filt))
            figname = os.path.join(model_output_dir, 'conv1_filters.pdf')
        vis2d.savefig(figname, dpi=self.dpi)

        # aggregate training and validation true labels and predicted probabilities
        all_predictions = []
        all_labels = []
        for i in range(dataset.num_tensors):
            # log progress
            if i % self.log_rate == 0:
                logging.info('Predicting tensor %d of %d' %
                             (i + 1, dataset.num_tensors))

            # read in data
            image_arr = dataset.tensor(image_field_name, i).arr
            pose_arr = read_pose_data(
                dataset.tensor(pose_field_name, i).arr, gripper_mode)
            metric_arr = dataset.tensor(metric_name, i).arr
            label_arr = 1 * (metric_arr > metric_thresh)
            label_arr = label_arr.astype(np.uint8)

            # predict with GQ-CNN
            predictions = gqcnn.predict(image_arr, pose_arr)

            # aggregate
            all_predictions.extend(predictions[:, 1].tolist())
            all_labels.extend(label_arr.tolist())

        # close session
        gqcnn.close_session()

        # create arrays
        all_predictions = np.array(all_predictions)
        all_labels = np.array(all_labels)
        train_predictions = all_predictions[train_indices]
        val_predictions = all_predictions[val_indices]
        train_labels = all_labels[train_indices]
        val_labels = all_labels[val_indices]

        # aggregate results
        train_result = BinaryClassificationResult(train_predictions,
                                                  train_labels)
        val_result = BinaryClassificationResult(val_predictions, val_labels)
        train_result.save(os.path.join(model_output_dir, 'train_result.cres'))
        val_result.save(os.path.join(model_output_dir, 'val_result.cres'))

        # get stats, plot curves
        logging.info('Model %s training error rate: %.3f' %
                     (model_dir, train_result.error_rate))
        logging.info('Model %s validation error rate: %.3f' %
                     (model_dir, val_result.error_rate))

        # save images
        vis2d.figure()
        example_dir = os.path.join(model_output_dir, 'examples')
        if not os.path.exists(example_dir):
            os.mkdir(example_dir)

        # train
        logging.info('Saving training examples')
        train_example_dir = os.path.join(example_dir, 'train')
        if not os.path.exists(train_example_dir):
            os.mkdir(train_example_dir)

        # train TP
        true_positive_indices = train_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title(
                'Datapoint %d: Pred: %.3f Label: %.3f' %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             'true_positive_%03d.png' % (i)))

        # train FP
        false_positive_indices = train_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title(
                'Datapoint %d: Pred: %.3f Label: %.3f' %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             'false_positive_%03d.png' % (i)))

        # train TN
        true_negative_indices = train_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title(
                'Datapoint %d: Pred: %.3f Label: %.3f' %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             'true_negative_%03d.png' % (i)))

        # train TP
        false_negative_indices = train_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = train_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title(
                'Datapoint %d: Pred: %.3f Label: %.3f' %
                (k, train_result.pred_probs[j], train_result.labels[j]),
                fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(train_example_dir,
                             'false_negative_%03d.png' % (i)))

        # val
        logging.info('Saving validation examples')
        val_example_dir = os.path.join(example_dir, 'val')
        if not os.path.exists(val_example_dir):
            os.mkdir(val_example_dir)

        # val TP
        true_positive_indices = val_result.true_positive_indices
        np.random.shuffle(true_positive_indices)
        true_positive_indices = true_positive_indices[:self.num_vis]
        for i, j in enumerate(true_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, 'true_positive_%03d.png' % (i)))

        # val FP
        false_positive_indices = val_result.false_positive_indices
        np.random.shuffle(false_positive_indices)
        false_positive_indices = false_positive_indices[:self.num_vis]
        for i, j in enumerate(false_positive_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, 'false_positive_%03d.png' % (i)))

        # val TN
        true_negative_indices = val_result.true_negative_indices
        np.random.shuffle(true_negative_indices)
        true_negative_indices = true_negative_indices[:self.num_vis]
        for i, j in enumerate(true_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, 'true_negative_%03d.png' % (i)))

        # val TP
        false_negative_indices = val_result.false_negative_indices
        np.random.shuffle(false_negative_indices)
        false_negative_indices = false_negative_indices[:self.num_vis]
        for i, j in enumerate(false_negative_indices):
            k = val_indices[j]
            datapoint = dataset.datapoint(
                k, field_names=[image_field_name, pose_field_name])
            vis2d.clf()
            self._plot_grasp(datapoint, image_field_name, pose_field_name,
                             gripper_mode)
            vis2d.title('Datapoint %d: Pred: %.3f Label: %.3f' %
                        (k, val_result.pred_probs[j], val_result.labels[j]),
                        fontsize=self.font_size)
            vis2d.savefig(
                os.path.join(val_example_dir, 'false_negative_%03d.png' % (i)))

        # save summary stats
        train_summary_stats = {
            'error_rate': train_result.error_rate,
            'ap_score': train_result.ap_score,
            'auc_score': train_result.auc_score
        }
        train_stats_filename = os.path.join(model_output_dir,
                                            'train_stats.json')
        json.dump(train_summary_stats,
                  open(train_stats_filename, 'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)

        val_summary_stats = {
            'error_rate': val_result.error_rate,
            'ap_score': val_result.ap_score,
            'auc_score': val_result.auc_score
        }
        val_stats_filename = os.path.join(model_output_dir, 'val_stats.json')
        json.dump(val_summary_stats,
                  open(val_stats_filename, 'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)

        return train_result, val_result
示例#11
0
                rot_map = cv2.getRotationMatrix2D((f_center[1], f_center[0]),
                                                  -theta, 1)
                new_convW = np.zeros(convW.shape)

                for i in range(num_in_channels):
                    for j in range(num_out_channels):
                        orig_filt = convW[:, :, i, j]
                        rot_filt = cv2.warpAffine(
                            orig_filt,
                            rot_map, (width, height),
                            flags=cv2.INTER_LINEAR,
                            borderMode=cv2.BORDER_REPLICATE)
                        new_convW[:, :, i, j] = rot_filt

                if plot and layer == 'conv1_1':
                    d = utils.sqrt_ceil(num_out_channels)
                    plt.figure()
                    for i in range(num_out_channels):
                        plt.subplot(d, d, i + 1)
                        plt.imshow(convW[:, :, 0, i], cmap=plt.cm.gray_r)
                        plt.axis('off')
                    plt.figure()
                    for i in range(num_out_channels):
                        plt.subplot(d, d, i + 1)
                        plt.imshow(new_convW[:, :, 0, i], cmap=plt.cm.gray_r)
                        plt.axis('off')
                    plt.show()

                sess.run(convW_var.assign(new_convW))
                #gqcnn._output_tensor = gqcnn._build_network(gqcnn._input_im_node,
                #                                            gqcnn._input_pose_node)
示例#12
0
    if mesh_ext != '.obj':
        raise ValueError('Must provide mesh in Wavefront .OBJ format!')
    orig_mesh = ObjFile(mesh_filename).read()
    mesh = orig_mesh.subdivide(min_tri_length=0.01)
    mesh.compute_vertex_normals()
    stable_poses = mesh.stable_poses()

    if vis_normals:
        vis3d.figure()
        vis3d.mesh(mesh)
        vis3d.normals(NormalCloud(mesh.normals.T),
                      PointCloud(mesh.vertices.T),
                      subsample=10)
        vis3d.show()

    d = utils.sqrt_ceil(len(stable_poses))
    vis.figure(size=(16, 16))

    table_mesh = ObjFile('data/meshes/table.obj').read()
    table_mesh = table_mesh.subdivide()
    table_mesh.compute_vertex_normals()
    table_mat_props = MaterialProperties(color=(0, 255, 0),
                                         ambient=0.5,
                                         diffuse=1.0,
                                         specular=1,
                                         shininess=0)

    for k, stable_pose in enumerate(stable_poses):
        logging.info('Rendering stable pose %d' % (k))

        # set resting pose