예제 #1
0
def get_fc_gqcnn_model(backend="tf", verbose=True):
    """Get the FC-GQ-CNN model for the provided backend.

    Note:
        Currently only TensorFlow is supported.

    Parameters
    ----------
    backend : str
        The backend to use, currently only "tf" is supported.
    verbose : bool
        Whether or not to log initialization output to `stdout`.

    Returns
    -------
    :obj:`gqcnn.model.tf.FCGQCNNTF`
        FC-GQ-CNN model with TensorFlow backend.
    """

    # Set up logger.
    logger = Logger.get_logger("FCGQCNNModelFactory", silence=(not verbose))

    # Return desired Fully-Convolutional GQ-CNN instance based on backend.
    if backend == "tf":
        logger.info("Initializing FC-GQ-CNN with Tensorflow as backend...")
        return FCGQCNNTF
    else:
        raise ValueError("Invalid backend: {}".format(backend))
예제 #2
0
    def __init__(self, trial_cpu_load, trial_gpu_load, trial_gpu_mem, monitor_cpu=True, monitor_gpu=True, cpu_cores=[], gpu_devices=[]):
        self._monitor_cpu = monitor_cpu
        self._monitor_gpu = monitor_gpu

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__)

        if not monitor_cpu:
            self._logger.warning('Not monitoring cpu resources is not advised.')   
        if not monitor_gpu:
            self._logger.warning('Not monitoring gpu resources is not advised.')

        self._trial_cpu_load = trial_cpu_load
        self._trial_gpu_load = trial_gpu_load
        self._trial_gpu_mem = trial_gpu_mem

        self._cpu_cores = cpu_cores
        if len(self._cpu_cores) == 0:
            self._logger.warning('No CPU cores specified-proceeding to use all available cores.')
            self._cpu_cores = range(psutil.cpu_count())
        self._cpu_count = len(self._cpu_cores)

        self._gpu_devices = gpu_devices
        if len(self._gpu_devices) == 0:
            self._logger.warning('No GPU devices specified-proceeding to use all available devices.')
            self._gpu_devices = range(len(GPUtil.getGPUs()))
예제 #3
0
def get_fc_gqcnn_model(backend='tf', verbose=True):
    """
    Get the FC-GQ-CNN model for the provided backend. Currently only TensorFlow is supported.

    Parameters
    ----------
    backend : str
        the backend to use, currently only 'tf' is supported
    verbose : bool
        whether or not to log initialization output to stdout

    Returns
    -------
    :obj:`gqcnn.model.tf.FCGQCNNTF`
        FC-GQ-CNN model with TensorFlow backend
    """

    # set up logger
    logger = Logger.get_logger('FCGQCNNModelFactory', silence=(not verbose))

    # return desired Fully-Convolutional GQ-CNN instance based on backend
    if backend == 'tf':
        logger.info('Initializing FC-GQ-CNN with Tensorflow as backend...')
        return FCGQCNNTF
    else:
        raise ValueError('Invalid backend: {}'.format(backend))
예제 #4
0
    def __init__(self, config, verbose=True, log_file=None):
        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__,
                                         log_file=log_file,
                                         silence=(not verbose),
                                         global_log_file=verbose)

        # read config
        self._parse_config(config)
예제 #5
0
    def __init__(self, config, verbose=True, log_file=None):
        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__,
                                         log_file=log_file,
                                         silence=(not verbose),
                                         global_log_file=verbose)

        #TODO:(vsatish) Figure out why this needs to go before tensorflow.python.client.list_local_devices() in get_available_gpus()
        setup_tf_session()

        # read config
        self._parse_config(config)
예제 #6
0
파일: analyzer.py 프로젝트: wenlongli/gqcnn
    def analyze(self, model_dir, output_dir, dataset_config=None):
        """Run analysis.

        Parameters
        ----------
        model_dir : str
            Path to the GQ-CNN model to analyze.
        output_dir : str
            Path to save the analysis.
        dataset_config : dict
            Dictionary to configure dataset used for training evaluation if
            different from one used during training.

        Returns
        -------
        :obj:`autolab_core.BinaryClassificationResult`
            Result of analysis on training data.
        :obj:`autolab_core.BinaryClassificationResult`
            Result of analysis on validation data.
        """
        # Determine model output dir.
        model_name = ""
        model_root = model_dir
        while model_name == "" and model_root != "":
            model_root, model_name = os.path.split(model_root)

        model_output_dir = os.path.join(output_dir, model_name)
        if not os.path.exists(model_output_dir):
            os.mkdir(model_output_dir)

        # Set up logger.
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            model_output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)

        self.logger.info("Analyzing model %s" % (model_name))
        self.logger.info("Saving output to %s" % (output_dir))

        # Run predictions.
        train_result, val_result = self._run_prediction_single_model(
            model_dir, model_output_dir, dataset_config)

        # Finally plot curves.
        (init_train_error, final_train_error, init_train_loss,
         final_train_loss, init_val_error, final_val_error,
         norm_final_val_error) = self._plot(model_dir, model_output_dir,
                                            train_result, val_result)

        return (train_result, val_result, init_train_error, final_train_error,
                init_train_loss, final_train_loss, init_val_error,
                final_val_error, norm_final_val_error)
예제 #7
0
    def __init__(self, cfg, filters=None):
        """
        Parameters
        ----------
        cfg : dict
            Python dictionary of policy configuration parameters.
        filters : dict
            Python dictionary of kinematic filters to apply.
        """
        GraspingPolicy.__init__(self, cfg, init_sampler=False)

        # Init logger.
        self._logger = Logger.get_logger(self.__class__.__name__)

        self._cfg = cfg
        self._sampling_method = self._cfg["sampling_method"]

        # GQ-CNN parameters.
        self._gqcnn_stride = self._cfg["gqcnn_stride"]
        self._gqcnn_recep_h = self._cfg["gqcnn_recep_h"]
        self._gqcnn_recep_w = self._cfg["gqcnn_recep_w"]

        # Grasp filtering.
        self._filters = filters
        self._max_grasps_to_filter = self._cfg["max_grasps_to_filter"]
        self._filter_grasps = self._cfg["filter_grasps"]

        # Visualization parameters.
        self._vis_config = self._cfg["policy_vis"]
        self._vis_scale = self._vis_config["scale"]
        self._vis_show_axis = self._vis_config["show_axis"]

        self._num_vis_samples = self._vis_config["num_samples"]
        self._vis_actions_2d = self._vis_config["actions_2d"]
        self._vis_actions_3d = self._vis_config["actions_3d"]

        self._vis_affordance_map = self._vis_config["affordance_map"]

        self._vis_output_dir = None
        # If this exists in the config then all visualizations will be logged
        # here instead of displayed.
        if "output_dir" in self._vis_config:
            self._vis_output_dir = self._vis_config["output_dir"]
            self._state_counter = 0
예제 #8
0
    def __init__(self, cfg, filters=None):
        """
        Parameters
        ----------
        cfg : dict
            python dictionary of policy configuration parameters
        filters : dict
            python dictionary of kinematic filters to apply 
        """
        GraspingPolicy.__init__(self, cfg, init_sampler=False)

        # init logger
        self._logger = Logger.get_logger(self.__class__.__name__)

        self._cfg = cfg
        self._sampling_method = self._cfg['sampling_method']

        # gqcnn parameters
        self._gqcnn_stride = self._cfg['gqcnn_stride']
        self._gqcnn_recep_h = self._cfg['gqcnn_recep_h']
        self._gqcnn_recep_w = self._cfg['gqcnn_recep_w']

        # grasp filtering
        self._filters = filters
        self._max_grasps_to_filter = self._cfg['max_grasps_to_filter']
        self._filter_grasps = self._cfg['filter_grasps']

        # visualization parameters
        self._vis_config = self._cfg['policy_vis']
        self._vis_scale = self._vis_config['scale']
        self._vis_show_axis = self._vis_config['show_axis']

        self._num_vis_samples = self._vis_config['num_samples']
        self._vis_actions_2d = self._vis_config['actions_2d']
        self._vis_actions_3d = self._vis_config['actions_3d']

        self._vis_affordance_map = self._vis_config['affordance_map']

        self._vis_output_dir = None
        if 'output_dir' in self._vis_config:  # if this exists in the config then all visualizations will be logged here instead of displayed
            self._vis_output_dir = self._vis_config['output_dir']
            self._state_counter = 0
    def __init__(self, id_mask="F1804", ntaps=4, log=True):
        """Initialize the weight sensor.

        Parameters
        ----------
        id_mask : str
            A template for the first n digits of the device IDs
            for valid load cells.
        ntaps : int
            Maximum number of samples to perform filtering over.
        log : bool
            Use a logger
        """
        self._id_mask = id_mask
        self._ntaps = ntaps
        self._filter_coeffs = signal.firwin(ntaps, 0.1)
        self._running = False
        self._cur_weights = None
        self._read_thread = None
        self._write_lock = threading.Condition()
        self.logger = Logger.get_logger("WeightSensor") if log else None
예제 #10
0
    def __init__(self, analysis_config, train_configs, datasets, split_names, base_models=[], output_dir=None, search_name=None, monitor_cpu=True, monitor_gpu=True, cpu_cores=[], gpu_devices=[]):
        self._analysis_cfg = analysis_config
        
        # create trial output dir if not specified
        if search_name is None:
            search_name = 'gqcnn_hyperparam_search_{}'.format(gen_timestamp())  
        if output_dir is None:
            output_dir = 'models'
        self._trial_output_dir = os.path.join(output_dir, search_name)
        if not os.path.exists(self._trial_output_dir):
            os.makedirs(self._trial_output_dir)

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__, log_file=os.path.join(self._trial_output_dir, 'search.log'), global_log_file=True)

        # init resource manager
        self._resource_manager = ResourceManager(TrialConstants.TRIAL_CPU_LOAD, TrialConstants.TRIAL_GPU_LOAD, TrialConstants.TRIAL_GPU_MEM, monitor_cpu=monitor_cpu, monitor_gpu=monitor_gpu, cpu_cores=cpu_cores, gpu_devices=gpu_devices)
        
        # parse train configs and generate individual trial parameters
        if len(base_models) > 0:
            assert len(train_configs) == len(datasets) == len(split_names) == len(base_models), 'Must have equal number of training configs, datasets, split_names, and base models!'
        else:
            assert len(train_configs) == len(datasets) == len(split_names), 'Must have equal number of training configs, datasets, and split_names!'
        self._logger.info('Generating trial parameters...')
        trial_params = gen_trial_params(train_configs, datasets, split_names, base_models=base_models)

        # create pending trial queue
        self._trials_pending_queue = Queue()
        if len(base_models) > 0:
            for trial_name, hyperparam_summary, train_cfg, dataset, base_model, split_name in trial_params:
                self._trials_pending_queue.put(GQCNNFineTuningAndAnalysisTrial(self._analysis_cfg, train_cfg, dataset, base_model, split_name, self._trial_output_dir, trial_name, hyperparam_summary))
        else:
            for trial_name, hyperparam_summary, train_cfg, dataset, split_name in trial_params:
                self._trials_pending_queue.put(GQCNNTrainingAndAnalysisTrial(self._analysis_cfg, train_cfg, dataset, split_name, self._trial_output_dir, trial_name, hyperparam_summary))

        # create containers to hold running, finished, and errored-out trials
        self._trials_running = []
        self._trials_finished = []
        self._trials_errored = []
예제 #11
0
    def __init__(self,
                 siamese_net,
                 dataset_dir,
                 output_dir,
                 config,
                 model_name=None,
                 progress_dict=None,
                 log_file=None,
                 verbose=True):
        self._network = siamese_net
        self._dataset_dir = dataset_dir
        self._output_dir = output_dir
        self._model_name = model_name
        self._progress_dict = progress_dict

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__,
                                         log_file=log_file,
                                         silence=(not verbose),
                                         global_log_file=verbose)

        # read config
        self._parse_config(config)
예제 #12
0
    def run_analysis(self, model_dir, output_dir, data_dir, noise_analysis,
                     depth_analysis, perturb_analysis, single_analysis):

        # Determine model name
        model_name = ""
        model_root = model_dir
        while model_name == "" and model_root != "":
            model_root, model_name = os.path.split(model_root)

        # Store Noise and Depth investigation in their corresponding directories
        if noise_analysis:
            output_dir = os.path.join(output_dir, "Noise_Comparison/")
        if depth_analysis:
            output_dir = os.path.join(output_dir, "Depth_Comparison/")
        if perturb_analysis:
            output_dir = os.path.join(output_dir, "Perturbation_Analysis/")
        if single_analysis:
            output_dir = os.path.join(output_dir, "Single_Analysis/")

        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Set up logger.
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)

        self.logger.info("Analyzing model %s" % (model_name))
        self.logger.info("Saving output to %s" % (output_dir))

        # Run predictions
        result = self._run_prediction(model_dir, output_dir, data_dir,
                                      noise_analysis, depth_analysis,
                                      perturb_analysis, single_analysis)
예제 #13
0
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time
import os
import sys

import autolab_core.utils as utils
from autolab_core import YamlConfig, Logger
from gqcnn import get_gqcnn_model, get_gqcnn_trainer
from gqcnn import utils as gqcnn_utils

# setup logger
logger = Logger.get_logger('tools/finetune.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Fine-Tune a pre-trained Grasp Quality Convolutional Neural Network with TensorFlow'
    )
    parser.add_argument(
        'dataset_dir',
        type=str,
        default=None,
        help='path to the dataset to use for training and validation')
    parser.add_argument('base_model_name',
                        type=str,
                        default=None,
예제 #14
0
import numpy as np

from autolab_core import YamlConfig, Logger
from perception import (BinaryImage, CameraIntrinsics, ColorImage, DepthImage,
                        RgbdImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import (RobustGraspingPolicy,
                            CrossEntropyRobustGraspingPolicy, RgbdImageState,
                            FullyConvolutionalGraspingPolicyParallelJaw,
                            FullyConvolutionalGraspingPolicySuction)
from gqcnn.utils import GripperMode

# Set up logger.
logger = Logger.get_logger("examples/policy.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(
        description="Run a grasping policy on an example image")
    parser.add_argument("model_name",
                        type=str,
                        default=None,
                        help="name of a trained model to run")
    parser.add_argument(
        "--depth_image",
        type=str,
        default=None,
        help="path to a test depth image stored as a .npy file")
    parser.add_argument("--segmask",
예제 #15
0
    def __init__(self, config):
        # Set params.
        self._config = config

        # Setup logger.
        self._logger = Logger.get_logger(self.__class__.__name__)
예제 #16
0
import argparse
import json
import os
import time

import numpy as np

from autolab_core import RigidTransform, YamlConfig, Logger
from perception import BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage
from visualization import Visualizer2D as vis

from gqcnn.grasping import RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy, RgbdImageState, FullyConvolutionalGraspingPolicyParallelJaw, FullyConvolutionalGraspingPolicySuction
from gqcnn.utils import GripperMode, NoValidGraspsException

# set up logger
logger = Logger.get_logger('examples/policy.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Run a grasping policy on an example image')
    parser.add_argument('model_name',
                        type=str,
                        default=None,
                        help='name of a trained model to run')
    parser.add_argument(
        '--depth_image',
        type=str,
        default=None,
        help='path to a test depth image stored as a .npy file')
    parser.add_argument('--segmask',
예제 #17
0
path_depth_png = last_depth_image.replace("depth",
                                          "depth_PNG").replace("npy", 'png')

path_grasp_position = last_depth_image.replace("depth",
                                               "grasp").replace("npy", 'txt')

if os.path.isfile(last_mask_path):
    mask = last_mask_path
else:
    mask = None

#mask= "/home/lvianell/Desktop/Lorenzo_report/datasets/dexnet_maskcnn_human/mask_0.png"
#mask=None

# set up logger
logger = Logger.get_logger(
    '/home/lvianell/Desktop/Lorenzo_report/gqcnn/examples/policy_2.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Run a grasping policy on an example image')
    parser.add_argument('--model_name',
                        type=str,
                        default="GQCNN-4.0-PJ",
                        help='name of a trained model to run')
    parser.add_argument(
        '--depth_image',
        type=str,
        default=last_depth_image,
        help='path to a test depth image stored as a .npy file')
    parser.add_argument('--segmask',
예제 #18
0
    def visualise(self, model_dir, output_dir, data_dir):

        # Determine model name
        model_name = ""
        model_root = model_dir
        while model_name == "" and model_root != "":
            model_root, model_name = os.path.split(model_root)

        output_dir = os.path.join(output_dir, "Visualisation/")
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Read in model config.
        model_config_filename = os.path.join(model_dir, "config.json")
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # Set up logger
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)

        self.logger.info("Analyzing model %s" % (model_name))
        self.logger.info("Saving output to %s" % (output_dir))
        mixture = False

        if "Cornell" in model_dir:
            model_name = "Cornell"
        elif "DexNet" in model_dir:
            model_name = "DexNet"

        if "Cornell" in data_dir:
            data_name = "Cornell"
        elif "DexNet" in data_dir:
            data_name = "DexNet"
        elif "Both" in data_dir:
            data_name = "mixed"
            mixture = True

        # Load model.
        self.logger.info("Loading model %s" % (model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(
            model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins

        if mixture:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr, identity_arr = self._read_data(
                data_dir, mixture=True)
        else:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data(
                data_dir)
        print("Object arr: ", obj_arr)
        # Predict outcomes
        predictions = gqcnn.predict(image_arr, pose_arr)

        if predictions.shape[1] == 1:
            print("Only 1 image given. No t-SNE analysis of network possible")
        else:
            # Setting colors and labels
            color = []
            monotone = False
            if mixture:
                for label, identity in zip(labels, identity_arr):
                    if identity == 0:
                        # Cornell
                        if label == 0:
                            # negative
                            color.append('#FF8000')
                        else:
                            # positive
                            color.append('#2D702F')
                        # DexNet
                        if label == 0:
                            # negative
                            color.append('#FF0404')
                        else:
                            # positive
                            color.append('#23C328')
                if len(np.unique(labels)) == 1:
                    monotone = True
                    if labels[0] == 0:
                        data_name += " negatives"
                        pop_a = mpatches.Patch(color='#FF8000',
                                               label='Negative Cornell')
                        pop_b = mpatches.Patch(color='#FF0404',
                                               label='Negative DexNet')
                    else:
                        data_name += " positives"
                        pop_a = mpatches.Patch(color='#2D702F',
                                               label='Positive Cornell')
                        pop_b = mpatches.Patch(color='#23C328',
                                               label='Positive DexNet')
                else:
                    pop_a = mpatches.Patch(color='#FF8000',
                                           label='Negative Cornell')
                    pop_b = mpatches.Patch(color='#FF0404',
                                           label='Negative DexNet')
                    pop_c = mpatches.Patch(color='#2D702F',
                                           label='Positive Cornell')
                    pop_d = mpatches.Patch(color='#23C328',
                                           label='Positive DexNet')
            else:
                color = ['r' if truth == 0 else 'g' for truth in labels]
                pop_a = mpatches.Patch(color='r', label='Negative grasp')
                pop_b = mpatches.Patch(color='g', label='Positive grasp')

            # t-SNE
            tsne_out = sklearn.manifold.TSNE(
                n_components=2).fit_transform(predictions)
            plt.scatter(tsne_out[:, 0], tsne_out[:, 1], marker='o', c=color)
            if mixture and not monotone:
                plt.legend(handles=[pop_a, pop_b, pop_c, pop_d])
            else:
                plt.legend(handles=[pop_a, pop_b])
            plt.title("TSNE output of %s data on a GQCNN trained on %s" %
                      (data_name, model_name))
            plt.savefig(output_dir + "/" + model_name + "_model_" + data_name +
                        "_data_TSNE.png")
            plt.close()

            # PCA
            pca_out = sklearn.decomposition.PCA(
                n_components=2).fit_transform(predictions)
            plt.scatter(pca_out[:, 0], pca_out[:, 1], marker='o', c=color)
            plt.title("PCA output of %s data on a GQCNN trained on %s" %
                      (data_name, model_name))
            if mixture and not monotone:
                plt.legend(handles=[pop_a, pop_b, pop_c, pop_d])
            else:
                plt.legend(handles=[pop_a, pop_b])
            plt.savefig(output_dir + "/" + model_name + "_model_" + data_name +
                        "_data_PCA.png")
            plt.close()
예제 #19
0
Author: Jeff Mahler
"""
import argparse
import os
import random
import time

import numpy as np

from autolab_core import RigidTransform, YamlConfig, Logger
from gqcnn import RgbdImageState, ParallelJawGrasp
from gqcnn import CrossEntropyRobustGraspingPolicy
from visualization import Visualizer2D as vis2d

# set up logger
logger = Logger.get_logger('tools/run_policy.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Run a saved test case through a GQ-CNN policy. For debugging purposes only.'
    )
    parser.add_argument('test_case_path',
                        type=str,
                        default=None,
                        help='path to test case')
    parser.add_argument('--config_filename',
                        type=str,
                        default='cfg/tools/run_policy.yaml',
                        help='path to configuration file to use')
예제 #20
0
파일: finetune.py 프로젝트: wenlongli/gqcnn
Author
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time

from autolab_core import YamlConfig, Logger
import autolab_core.utils as utils
from gqcnn import get_gqcnn_model, get_gqcnn_trainer
from gqcnn import utils as gqcnn_utils

# Setup logger.
logger = Logger.get_logger("tools/finetune.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(description=(
        "Fine-Tune a pre-trained Grasp Quality Convolutional Neural Network"
        " with TensorFlow"))
    parser.add_argument(
        "dataset_dir",
        type=str,
        default=None,
        help="path to the dataset to use for training and validation")
    parser.add_argument("base_model_name",
                        type=str,
                        default=None,
                        help="name of the pre-trained model to fine-tune")
예제 #21
0
Author
------
Vishal Satish and Jeff Mahler
"""
import argparse
import os
import time
import os
import sys

from autolab_core import YamlConfig, Logger
from gqcnn import GQCNNAnalyzer

# setup logger
logger = Logger.get_logger('tools/analyze_gqcnn_performance.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Analyze a Grasp Quality Convolutional Neural Network with TensorFlow')
    parser.add_argument('model_name',
                        type=str,
                        default=None,
                        help='name of model to analyze')
    parser.add_argument('--output_dir',
                        type=str,
                        default=None,
                        help='path to save the analysis')
    parser.add_argument(
예제 #22
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autolab_core import YamlConfig, Logger
from perception import (BinaryImage, CameraIntrinsics, ColorImage, DepthImage,
                        RgbdImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import (RobustGraspingPolicy,
                            CrossEntropyRobustGraspingPolicy, RgbdImageState,
                            FullyConvolutionalGraspingPolicyParallelJaw,
                            FullyConvolutionalGraspingPolicySuction)
from gqcnn.utils import GripperMode
import cv2
import os
logger = Logger.get_logger("grabdepth_segmask_gqcnn.py")
config_filename = os.path.join("gqcnn_pj_realsense.yaml")
config = YamlConfig(config_filename)
policy_config = config["policy"]
policy_type = "cem"
policy = CrossEntropyRobustGraspingPolicy(policy_config)

import pybullet as p
import time
import numpy as np
import pybullet_data
from scipy.linalg import null_space
np.set_printoptions(formatter={'float_kind': lambda x: "{0:0.3f}".format(x)})
import math
pi = math.pi
import cv2
예제 #23
0
    def __init__(self, physics_engine, config):

        self._physics_engine = physics_engine
        self._config = config

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__)

        # read subconfigs
        obj_config = config['objects']
        workspace_config = config['workspace']

        self.num_objs_rv = sstats.poisson(config['mean_objs'] - 1)
        self.max_objs = config['max_objs']
        self.min_objs = 1
        if 'min_objs' in list(config.keys()):
            self.min_objs = config['min_objs']

        self.max_obj_diam = config['max_obj_diam']
        self.drop_height = config['drop_height']
        self.max_settle_steps = config['max_settle_steps']
        self.mag_v_thresh = config['mag_v_thresh']
        self.mag_w_thresh = config['mag_w_thresh']

        # bounds of heap center in the table plane
        min_heap_center = np.array(config['center']['min'])
        max_heap_center = np.array(config['center']['max'])
        self.heap_center_space = gym.spaces.Box(min_heap_center,
                                                max_heap_center,
                                                dtype=np.float32)

        # Set up object configs
        # bounds of object drop pose in the table plane
        # organized as [tx, ty, theta] where theta is in degrees
        min_obj_pose = np.r_[obj_config['planar_translation']['min'], 0]
        max_obj_pose = np.r_[obj_config['planar_translation']['max'],
                             2 * np.pi]
        self.obj_planar_pose_space = gym.spaces.Box(min_obj_pose,
                                                    max_obj_pose,
                                                    dtype=np.float32)

        # bounds of object drop orientation
        min_sph_coords = np.array([0.0, 0.0])
        max_sph_coords = np.array([2 * np.pi, np.pi])
        self.obj_orientation_space = gym.spaces.Box(min_sph_coords,
                                                    max_sph_coords,
                                                    dtype=np.float32)

        # bounds of center of mass
        delta_com_sigma = max(1e-6, obj_config['center_of_mass']['sigma'])
        self.delta_com_rv = sstats.multivariate_normal(np.zeros(3),
                                                       delta_com_sigma**2)

        self.obj_density = obj_config['density']

        # bounds of workspace (for checking out of bounds)
        min_workspace_trans = np.array(workspace_config['min'])
        max_workspace_trans = np.array(workspace_config['max'])
        self.workspace_space = gym.spaces.Box(min_workspace_trans,
                                              max_workspace_trans,
                                              dtype=np.float32)

        # Setup object keys and directories
        object_keys = []
        mesh_filenames = []
        self._train_pct = obj_config['train_pct']
        num_objects = obj_config['num_objects']
        self._mesh_dir = obj_config['mesh_dir']
        if not os.path.isabs(self._mesh_dir):
            self._mesh_dir = os.path.join(os.getcwd(), self._mesh_dir)
        for root, dirs, files in os.walk(self._mesh_dir):
            dataset_name = os.path.basename(root)
            if dataset_name in list(obj_config['object_keys'].keys()):
                for f in files:
                    filename, ext = os.path.splitext(f)
                    if ext.split('.')[1] in trimesh.exchange.load.mesh_formats() \
                        and (filename in obj_config['object_keys'][dataset_name] or obj_config['object_keys'][dataset_name] == 'all'):
                        obj_key = '{}{}{}'.format(dataset_name, KEY_SEP_TOKEN,
                                                  filename)
                        object_keys.append(obj_key)
                        mesh_filenames.append(os.path.join(root, f))

        inds = np.arange(len(object_keys))
        np.random.shuffle(inds)
        self.all_object_keys = list(np.array(object_keys)[inds][:num_objects])
        mesh_filenames = list(np.array(mesh_filenames)[inds][:num_objects])
        self.train_keys = self.all_object_keys[:int(
            len(self.all_object_keys) * self._train_pct)]
        self.test_keys = self.all_object_keys[
            int(len(self.all_object_keys) * self._train_pct):]
        self.obj_ids = dict([(key, i + 1)
                             for i, key in enumerate(self.all_object_keys)])
        self.mesh_filenames = {}
        [
            self.mesh_filenames.update({k: v})
            for k, v in zip(self.all_object_keys, mesh_filenames)
        ]
예제 #24
0
    def _run_prediction(self, model_dir, model_output_dir, data_dir,
                        noise_analysis, depth_analysis, perturb_analysis,
                        single_analysis):
        """Predict the outcome of the file for a single model."""

        # Read in model config.
        model_config_filename = os.path.join(model_dir, "config.json")
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # Load model.
        self.logger.info("Loading model %s" % (model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(
            model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins

        # Load data
        if noise_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, noise_arr = self._read_data(
                data_dir, noise=True)
        elif depth_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, depth_arr = self._read_data(
                data_dir, depth=True)
        elif perturb_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data(
                data_dir, perturb=True)
        elif single_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data(
                data_dir, perturb=True)
        else:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data(
                data_dir)
        # Predict outcomes
        predictions = gqcnn.predict(image_arr, pose_arr)
        gqcnn.close_session()
        results = BinaryClassificationResult(predictions[:, 1], labels)

        # Log the results
        if noise_analysis:
            # Analyse the error rates in regard to the noise levels of the images
            noise_levels = np.unique(noise_arr)
            levels = len(noise_levels)
            for current_noise in noise_levels:
                pred = predictions[noise_arr[:, 0] == current_noise]
                lab = labels[noise_arr[:, 0] == current_noise]
                res = BinaryClassificationResult(pred[:, 1], lab)
                self._plot_histograms(pred[:, 1], lab, str(current_noise),
                                      model_output_dir)
                self.logger.info("Noise: %.4f Model %s error rate: %.3f" %
                                 (current_noise, model_dir, res.error_rate))
                self.logger.info(
                    "Noise: %.4f Model %s loss: %.3f" %
                    (current_noise, model_dir, res.cross_entropy_loss))
        elif depth_analysis:
            # Analyse the error rates in regard to the grasping depth in the images
            depth_levels = np.unique(depth_arr)
            levels = len(depth_levels)
            for current_depth in depth_levels:
                if current_depth == -1:
                    depth_mode = 'original'
                else:
                    depth_mode = 'relative %.2f' % (current_depth)
                pred = predictions[depth_arr == current_depth]
                lab = labels[depth_arr == current_depth]
                res = BinaryClassificationResult(pred[:, 1], lab)
                self._plot_histograms(pred[:, 1], lab, depth_mode,
                                      model_output_dir)
                self.logger.info("Depth %s Model %s error rate: %.3f" %
                                 (depth_mode, model_dir, res.error_rate))
                self.logger.info(
                    "Depth: %s Model %s loss: %.3f" %
                    (depth_mode, model_dir, res.cross_entropy_loss))
        elif perturb_analysis:
            # Analyse the error rates in regard to the grasping perturb in the images
            perturb_levels = np.unique(perturb_arr)
            print("Perturb levels: ", perturb_levels)
            _rot = len(np.unique(perturb_arr[:, 0]))
            _trans = len(np.unique(perturb_arr[:, 1]))
            try:
                _transy = len(np.unique(perturb_arr[:, 2]))
            except:
                _transy = 0
                print("No translation in y included")
            if _rot >= 2 and _trans <= 1 and _transy <= 1:
                perturbation = 'rotation'
                perturb_unit = 'deg'
                index = 0
            elif _rot <= 1 and _trans >= 2 and _transy <= 1:
                perturbation = 'translation'
                perturb_unit = 'pixel'
                index = 1
            elif _rot <= 1 and _trans <= 1 and _transy >= 2:
                perturbation = 'translationy'
                perturb_unit = 'pixel'
                index = 2
            else:
                raise ValueError(
                    "Perturbation array includes at least two different perturbation types. Can't be handled. Abort."
                )
                return None
            levels = len(perturb_levels)
            accuracies = []
            for current_perturb in perturb_levels:
                pred = predictions[perturb_arr[:, index] == current_perturb]
                lab = labels[perturb_arr[:, index] == current_perturb]
                res = BinaryClassificationResult(pred[:, 1], lab)
                perturb_mode = perturbation + ' %.0f ' % (
                    current_perturb) + perturb_unit
                self._plot_histograms(
                    pred[:, 1], lab,
                    perturbation + '_%.0f_' % (current_perturb) + perturb_unit,
                    model_output_dir)

                self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                 (perturb_mode, model_dir, res.error_rate))
                accuracies.append(100 - res.error_rate)
                self.logger.info(
                    "Grasp %s Model %s loss: %.3f" %
                    (perturb_mode, model_dir, res.cross_entropy_loss))
            self._plot_grasp_perturbations(perturb_levels, accuracies,
                                           model_output_dir, perturbation)
        elif single_analysis:
            # Analyse the error rates in regard to the grasping perturb in the images
            perturb_levels = np.unique(perturb_arr)
            _rot = np.count_nonzero(perturb_arr[:, 0])
            _trans = np.count_nonzero(perturb_arr[:, 1])
            _transy = np.count_nonzero(perturb_arr[:, 2])
            _scalez = np.count_nonzero(perturb_arr[:, 3])
            _scalex = np.count_nonzero(perturb_arr[:, 4])
            if _rot >= 1 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex == 0:
                index = 0
                perturbation = 'rotation'
            elif _rot == 0 and _trans >= 1 and _transy == 0 and _scalez == 0 and _scalex == 0:
                perturbation = 'translation'
                index = 1
            elif _rot == 0 and _trans == 0 and _transy >= 1 and _scalez == 0 and _scalex == 0:
                perturbation = 'translationy'
                index = 2
            elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez >= 1 and _scalex == 0:
                perturbation = 'scale_height'
                index = 3
            elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex >= 1:
                perturbation = 'scalex'
                index = 4
            else:
                perturbation = 'mixed'
                index = 5
            # Create new output dir for single file and perturbation mode
            print(len(perturb_arr))
            if len(perturb_arr) == 1:
                print("New output direction is: ", model_output_dir)
            else:
                model_output_dir = os.path.join(
                    model_output_dir,
                    str(file_arr[0][0]) + '_' + str(file_arr[0][1]) + '_' +
                    perturbation)
                print("New output direction is: ", model_output_dir)
            if not os.path.exists(model_output_dir):
                os.mkdir(model_output_dir)
            # Set up new logger.
            self.logger = Logger.get_logger(self.__class__.__name__,
                                            log_file=os.path.join(
                                                model_output_dir,
                                                "analysis.log"),
                                            silence=(not self.verbose),
                                            global_log_file=self.verbose)
            levels = len(perturb_arr)
            abs_pred_errors = []
            if levels == 1:
                self.logger.info(
                    "Mixed perturbation. Translationx %.1f, Translationy %.1f, "
                    "Rotation %.1f, Scale_height %.1f, Scale x %.1f" %
                    (perturb_arr[0][1], perturb_arr[0][2], perturb_arr[0][0],
                     perturb_arr[0][3], perturb_arr[0][4]))
                pred = predictions
                lab = labels
                res = BinaryClassificationResult(pred[:, 1], lab)
                self.logger.info("Grasp %s Model %s prediction: %.3f" %
                                 (perturbation, model_dir, pred[:, 1]))
                self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                 (perturbation, model_dir, res.error_rate))
                self.logger.info(
                    "Grasp %s Model %s loss: %.3f" %
                    (perturbation, model_dir, res.cross_entropy_loss))

            else:
                for current_perturb in perturb_levels:
                    pred = predictions[perturb_arr[:,
                                                   index] == current_perturb]
                    lab = labels[perturb_arr[:, index] == current_perturb]
                    res = BinaryClassificationResult(pred[:, 1], lab)

                    if perturbation == 'rotation':
                        perturb_mode = 'rotation %.0f deg' % (current_perturb)
                    elif perturbation == 'translation':
                        perturb_mode = 'translation in x %.0f pixel' % (
                            current_perturb)
                    elif perturbation == 'translationy':
                        perturb_mode = 'translation in y %.0f pixel' % (
                            current_perturb)
                    elif perturbation == 'scale_height':
                        perturb_mode = 'scaling depth by %.0f' % (
                            current_perturb)
                    elif perturbation == 'scalex':
                        perturb_mode = 'scaling x by %.0f' % (current_perturb)
                    pos_errors, neg_errors = self._calculate_prediction_errors(
                        pred[:, 1], lab)
                    # Only append positive errors if grasp was positive.
                    if pos_errors:
                        abs_pred_errors.append(pos_errors)
                    self.logger.info("Grasp %s Model %s prediction: %.3f" %
                                     (perturb_mode, model_dir, pred[:, 1]))
                    self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                     (perturb_mode, model_dir, res.error_rate))
                    self.logger.info(
                        "Grasp %s Model %s loss: %.3f" %
                        (perturb_mode, model_dir, res.cross_entropy_loss))
                if pos_errors:
                    self._plot_single_grasp_perturbations(
                        perturb_levels, abs_pred_errors, model_output_dir,
                        perturbation)
        else:
            levels = 1
            self._plot_histograms(predictions[:, 1], labels, '',
                                  model_output_dir)
            self.logger.info("Model %s error rate: %.3f" %
                             (model_dir, results.error_rate))
            self.logger.info("Model %s loss: %.3f" %
                             (model_dir, results.cross_entropy_loss))

            if obj_arr is not None and 'Cornell' in data_dir:
                unique = np.unique(obj_arr).tolist()
                object_label = pd.read_csv(
                    DATA_PATH + "Cornell/original/z.txt",
                    sep=" ",
                    header=None,
                    usecols=[1, 2]).drop_duplicates().to_numpy()
                true_pos = dict()
                false_neg = dict()
                false_pos = dict()
                true_neg = dict()
                for obj in unique:
                    obj = int(obj)
                    true_pos[object_label[obj, 1]] = 0
                    false_pos[object_label[obj, 1]] = 0
                    true_neg[object_label[obj, 1]] = 0
                    false_neg[object_label[obj, 1]] = 0

                for obj, pred, label in zip(obj_arr, predictions[:, 1],
                                            labels):
                    if label == 1 and pred >= 0.5:
                        true_pos[object_label[obj, 1]] += 1
                    elif label == 1 and pred < 0.5:
                        false_neg[object_label[obj, 1]] += 1
                    elif label == 0 and pred >= 0.5:
                        false_pos[object_label[obj, 1]] += 1
                    elif label == 0 and pred < 0.5:
                        true_neg[object_label[obj, 1]] += 1
                print(true_pos)
                self._export_object_analysis(true_pos, false_neg, false_pos,
                                             true_neg, model_output_dir)

        # Log the ratios
        pos_lab = len(labels[labels == 1])
        neg_lab = len(labels[labels == 0])

        true_pos = len(results.true_positive_indices)
        true_neg = len(results.true_negative_indices)
        false_pos = neg_lab - true_neg
        false_neg = pos_lab - true_pos

        self.logger.info("%d samples, %d grasps" %
                         (len(labels), len(labels) / levels))
        self.logger.info("%d positive grasps, %d negative grasps" %
                         (pos_lab / levels, neg_lab / levels))
        self.logger.info("Model overall accuracy %.2f %%" %
                         (100 * results.accuracy))
        self.logger.info("Accuracy on positive grasps: %.2f %%" %
                         (true_pos / pos_lab * 100))
        self.logger.info("Accuracy on negative grasps: %.2f %%" %
                         (true_neg / neg_lab * 100))
        self.logger.info("True positive samples: %d" % true_pos)
        self.logger.info("True negative samples: %d" % true_neg)
        self.logger.info("Correct predictions: %d" % (true_pos + true_neg))
        self.logger.info("False positive samples: %d" % false_pos)
        self.logger.info("False negative samples: %d" % false_neg)
        self.logger.info("False predictions: %d" % (false_pos + false_pos))

        cnt = 0  # Counter for grouping the same images with different noise/depth levels
        if self.num_images is None or self.num_images > len(width_arr):
            self.num_images = len(width_arr)
        steps = int(len(width_arr) / self.num_images)
        for j in range(0, len(width_arr), steps):
            try:
                if file_arr[j][1] != file_arr[j - 1][1]:
                    cnt = 0
                else:
                    cnt += 1
            except:
                cnt += 1
            if noise_analysis:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         noise_arr=noise_arr)
            elif depth_analysis:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         depth_arr=depth_arr)
            elif perturb_analysis or single_analysis:
                print("Plot grasp")
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         perturb_arr=perturb_arr)
            else:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         plt_results=False)
            try:
                if noise_analysis or depth_analysis or perturb_analysis or single_analysis:
                    image.save(
                        os.path.join(
                            model_output_dir, "%05d_%03d_example_%03d.png" %
                            (file_arr[j][0], file_arr[j][1], cnt)))
                else:
                    image.save(
                        os.path.join(
                            model_output_dir, "%05d_%03d.png" %
                            (file_arr[j][0], file_arr[j][1])))
                    # data = self.scale(image_arr[j][:, :, 0])
                    # image = Image.fromarray(data).convert('RGB').resize((300, 300), resample=Image.NEAREST)
                    # image.save(os.path.join(model_output_dir, "%05d_%03d_orig.png" % (file_arr[j][0], file_arr[j][1])))
            except:
                image.save(
                    os.path.join(model_output_dir, "Example_%03d.png" % (cnt)))
        if single_analysis:
            print("Plotting depth image")
            j = int(len(image_arr) / 2)
            # Plot pure depth image without prediction labeling.
            image = self._plot_grasp(image_arr[j],
                                     width_arr[j],
                                     results,
                                     j,
                                     plt_results=False)
            image.save(os.path.join(model_output_dir, "Depth_image.png"))
        return results
예제 #25
0
"""
Script for searching over Grasp Quality Convolutional Neural Network (GQ-CNN) hyper-parameters.

Author
------
Vishal Satish
"""
import argparse
import sys

from gqcnn import GQCNNSearch
from autolab_core import YamlConfig, Logger

# set up logger
logger = Logger.get_logger('tools/hyperparam_search.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Hyper-parameter search for GQ-CNN.')
    parser.add_argument('datasets',
                        nargs='+',
                        default=None,
                        help='path to datasets')
    parser.add_argument('--base_model_dirs',
                        nargs='+',
                        default=[],
                        help='path to pre-trained base models for fine-tuning')
    parser.add_argument('--train_configs',
                        nargs='+',
                        default=['cfg/train.yaml'],
예제 #26
0
Script for training a Grasp Quality Neural Network (GQ-CNN).

Author
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time

import autolab_core.utils as utils
from autolab_core import YamlConfig, Logger
from gqcnn import get_gqcnn_model, get_gqcnn_trainer, utils as gqcnn_utils

# setup logger
logger = Logger.get_logger('tools/train.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Train a Grasp Quality Convolutional Neural Network with TensorFlow')
    parser.add_argument(
        'dataset_dir',
        type=str,
        default=None,
        help='path to the dataset to use for training and validation')
    parser.add_argument('--split_name',
                        type=str,
                        default='image_wise',
                        help='name of the split to train on')
예제 #27
0
Be sure to register camera beforehand!!!
Author: Jeff Mahler
"""
import argparse
import numpy as np
import os

import rospy
import matplotlib.pyplot as plt

from autolab_core import RigidTransform, Box, YamlConfig, Logger
import autolab_core.utils as utils
from perception import RgbdSensorFactory, Image

# set up logger
logger = Logger.get_logger('tools/capture_test_images.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(description='Capture a set of RGB-D images from a set of sensors')
    parser.add_argument('output_dir', type=str, help='path to save captured images')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/capture_test_images.yaml', help='path to configuration file to use')
    args = parser.parse_args()
    config_filename = args.config_filename
    output_dir = args.output_dir

    # read config
    config = YamlConfig(config_filename)
    vis = config['vis']

    # make output dir if needed
예제 #28
0
    def visualise(self, model_dir, output_dir):
        """
        Evaluates the model on the dataset in self.datadir. Plots and saves the resulting classification accuracies.


        Parameters
        ----------
        model_dir (str): Path to the model.
        output_dir (str): Path to store the classification accuracies of the models

        Returns
        -------

        """
        # Create output dir if it doesn't exist yet
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Set up logger
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)
        self.logger.info("Saving output to %s" % output_dir)

        model_config = YamlConfig(model_dir + '/config.json')
        self.gripper_mode = model_config['gqcnn']['gripper_mode']
        if 'pose_input' in model_config['gqcnn']:
            self.pose_input = model_config['gqcnn']['pose_input']

        # Load models
        checkpoints = []
        if self.analyse_checkpoints:
            model_files = os.listdir(model_dir)
            for model_file in model_files:
                if 'model' in model_file:
                    if model_file[5] == '_':
                        checkpoints.append(int(model_file[6:].split('.')[0]))
        checkpoints.append('final')
        checkpoints = list(set(checkpoints))
        models = self._read_model(model_dir, checkpoints)

        # Initiate accuracy variables
        elev_bins = np.arange(2.5, 72.5, 5)

        accuracies = {}
        for checkpoint in checkpoints:
            accuracies[checkpoint] = {}
            for elev in elev_bins:
                accuracies[checkpoint][elev] = {'acc': [], 'tp': [], 'tn': [], 'num_p': [], 'num_n': []}
        stepsize = 50

        # Read and predict data with all models
        for steps in range(0, len(self.files), stepsize):
            self.logger.info("Read in tensors %d to %d" % (steps, steps+stepsize))
            image_arr, pose_arr, all_labels, elev_arr = self._read_data(steps, stepsize)
            for elev in elev_bins:
                mask = (elev_arr.squeeze() >= elev - 2.5) & (elev_arr.squeeze() < elev + 2.5)
                images = image_arr[mask]
                poses = pose_arr[mask]
                labels = all_labels[mask]
                for cnt, model in enumerate(models):
                    preds = model.predict(images, poses)
                    if preds is not None:
                        results = BinaryClassificationResult(preds[:, 1], labels)
                        accuracies[checkpoints[cnt]][elev]['acc'].append(100 * results.accuracy)
                        accuracies[checkpoints[cnt]][elev]['tp'].append(len(results.true_positive_indices))
                        accuracies[checkpoints[cnt]][elev]['tn'].append(len(results.true_negative_indices))
                        accuracies[checkpoints[cnt]][elev]['num_p'].append(len(labels[labels == 1]))
                        accuracies[checkpoints[cnt]][elev]['num_n'].append(len(labels[labels == 0]))

        # Calculate prediction accuracy for all models and all elevation (phi) angles
        for checkpoint in checkpoints:
            true_acc = []
            false_acc = []
            all_acc = []
            self.logger.info("Checkpoint: " + str(checkpoint))
            for elev in elev_bins:
                try:
                    tacc = sum(accuracies[checkpoint][elev]['tp']) / sum(accuracies[checkpoint][elev]['num_p']) * 100
                    facc = sum(accuracies[checkpoint][elev]['tn']) / sum(accuracies[checkpoint][elev]['num_n']) * 100
                    acc = (sum(accuracies[checkpoint][elev]['tn']) + sum(accuracies[checkpoint][elev]['tp']))/\
                          (sum(accuracies[checkpoint][elev]['num_p']) + sum(accuracies[checkpoint][elev]['num_n'])) * 100
                    true_acc.append(tacc)
                    false_acc.append(facc)
                    all_acc.append(acc)
                    self.logger.info("Elev: %.1f, Accuracy positive grasps: %.1f %%" % (elev, tacc))
                    self.logger.info("Elev: %.1f, Accuracy negative grasps: %.1f %%" % (elev, facc))
                    self.logger.info("Elev: %.1f, Accuracy all grasps: %.1f %%" % (elev, acc))
                except ZeroDivisionError:
                    self.logger.info("Elev: %.1f, no grasps" % elev)

            # Save output to txt file
            np.savetxt(output_dir + '/' + str(checkpoint) + '_tacc', true_acc, '%.1f')
            np.savetxt(output_dir + '/' + str(checkpoint) + '_facc', false_acc, '%.1f')
            np.savetxt(output_dir + '/' + str(checkpoint) + '_acc', all_acc, '%.1f')

            # Plot the outputs
            plt.figure()
            plt.plot(elev_bins, true_acc)
            plt.title("Prediction accuracy on positive grasps over varying elevation angles")
            plt.xlabel("Elevation angle [deg]")
            plt.ylabel("Accuracy [%]")
            plt.ylim((0, 100))
            plt.xlim((0, 60))
            plt.savefig(output_dir + '/' + str(checkpoint) + '_True_Accuracy.png')
            plt.close()

            plt.figure()
            plt.plot(elev_bins, false_acc)
            plt.title("Prediction accuracy on negative grasps over varying elevation angles")
            plt.xlabel("Elevation angle [deg]")
            plt.ylabel("Accuracy [%]")
            plt.ylim((0, 100))
            plt.xlim((0, 60))
            plt.savefig(output_dir + '/' + str(checkpoint) + '_Neg_Accuracy.png')
            plt.close()
예제 #29
0
import shutil
import time
import traceback
import matplotlib.pyplot as plt

from autolab_core import TensorDataset, YamlConfig, Logger
import autolab_core.utils as utils
from perception import DepthImage, GrayscaleImage, BinaryImage, ColorImage

from sd_maskrcnn.envs import BinHeapEnv
from sd_maskrcnn.envs.constants import *

SEED = 744

# set up logger
logger = Logger.get_logger('tools/generate_segmask_dataset.py')


def generate_segmask_dataset(output_dataset_path,
                             config,
                             save_tensors=True,
                             warm_start=False):
    """ Generate a segmentation training dataset

    Parameters
    ----------
    dataset_path : str
        path to store the dataset
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    save_tensors : bool
예제 #30
0
Vishal Satish & Jeff Mahler
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import time

from autolab_core import YamlConfig, Logger
import autolab_core.utils as utils
from gqcnn import get_gqcnn_model, get_gqcnn_trainer, utils as gqcnn_utils

# Setup logger.
logger = Logger.get_logger("tools/train.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(
        description=("Train a Grasp Quality Convolutional Neural Network with"
                     " TensorFlow"))
    parser.add_argument(
        "dataset_dir",
        type=str,
        default=None,
        help="path to the dataset to use for training and validation")
    parser.add_argument("--split_name",
                        type=str,
                        default="image_wise",
                        help="name of the split to train on")