예제 #1
0
    def __init__(self,
                 trial_cpu_load,
                 trial_gpu_load,
                 trial_gpu_mem,
                 monitor_cpu=True,
                 monitor_gpu=True,
                 cpu_cores=[],
                 gpu_devices=[]):
        self._monitor_cpu = monitor_cpu
        self._monitor_gpu = monitor_gpu

        # Set up logger.
        self._logger = Logger.get_logger(self.__class__.__name__)

        if not monitor_cpu:
            self._logger.warning(
                "Not monitoring cpu resources is not advised.")
        if not monitor_gpu:
            self._logger.warning(
                "Not monitoring gpu resources is not advised.")

        self._trial_cpu_load = trial_cpu_load
        self._trial_gpu_load = trial_gpu_load
        self._trial_gpu_mem = trial_gpu_mem

        self._cpu_cores = cpu_cores
        if len(self._cpu_cores) == 0:
            self._logger.warning(
                "No CPU cores specified-proceeding to use all available cores."
            )
            self._cpu_cores = range(psutil.cpu_count())
        self._cpu_count = len(self._cpu_cores)

        self._gpu_devices = gpu_devices
        if len(self._gpu_devices) == 0:
            no_gpus_specified_msg = ("No GPU devices specified-proceeding to"
                                     " use all available devices.")
            self._logger.warning(no_gpus_specified_msg)
            self._gpu_devices = range(len(GPUtil.getGPUs()))
예제 #2
0
    def __init__(self, analysis_config, train_configs, datasets, split_names, base_models=[], output_dir=None, search_name=None, monitor_cpu=True, monitor_gpu=True, cpu_cores=[], gpu_devices=[]):
        self._analysis_cfg = analysis_config
        
        # create trial output dir if not specified
        if search_name is None:
            search_name = 'gqcnn_hyperparam_search_{}'.format(gen_timestamp())  
        if output_dir is None:
            output_dir = 'models'
        self._trial_output_dir = os.path.join(output_dir, search_name)
        if not os.path.exists(self._trial_output_dir):
            os.makedirs(self._trial_output_dir)

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__, log_file=os.path.join(self._trial_output_dir, 'search.log'), global_log_file=True)

        # init resource manager
        self._resource_manager = ResourceManager(TrialConstants.TRIAL_CPU_LOAD, TrialConstants.TRIAL_GPU_LOAD, TrialConstants.TRIAL_GPU_MEM, monitor_cpu=monitor_cpu, monitor_gpu=monitor_gpu, cpu_cores=cpu_cores, gpu_devices=gpu_devices)
        
        # parse train configs and generate individual trial parameters
        if len(base_models) > 0:
            assert len(train_configs) == len(datasets) == len(split_names) == len(base_models), 'Must have equal number of training configs, datasets, split_names, and base models!'
        else:
            assert len(train_configs) == len(datasets) == len(split_names), 'Must have equal number of training configs, datasets, and split_names!'
        self._logger.info('Generating trial parameters...')
        trial_params = gen_trial_params(train_configs, datasets, split_names, base_models=base_models)

        # create pending trial queue
        self._trials_pending_queue = Queue()
        if len(base_models) > 0:
            for trial_name, hyperparam_summary, train_cfg, dataset, base_model, split_name in trial_params:
                self._trials_pending_queue.put(GQCNNFineTuningAndAnalysisTrial(self._analysis_cfg, train_cfg, dataset, base_model, split_name, self._trial_output_dir, trial_name, hyperparam_summary))
        else:
            for trial_name, hyperparam_summary, train_cfg, dataset, split_name in trial_params:
                self._trials_pending_queue.put(GQCNNTrainingAndAnalysisTrial(self._analysis_cfg, train_cfg, dataset, split_name, self._trial_output_dir, trial_name, hyperparam_summary))

        # create containers to hold running, finished, and errored-out trials
        self._trials_running = []
        self._trials_finished = []
        self._trials_errored = []
예제 #3
0
    def __init__(self,
                 siamese_net,
                 dataset_dir,
                 output_dir,
                 config,
                 model_name=None,
                 progress_dict=None,
                 log_file=None,
                 verbose=True):
        self._network = siamese_net
        self._dataset_dir = dataset_dir
        self._output_dir = output_dir
        self._model_name = model_name
        self._progress_dict = progress_dict

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__,
                                         log_file=log_file,
                                         silence=(not verbose),
                                         global_log_file=verbose)

        # read config
        self._parse_config(config)
예제 #4
0
    def run_analysis(self, model_dir, output_dir, data_dir, noise_analysis,
                     depth_analysis, perturb_analysis, single_analysis):

        # Determine model name
        model_name = ""
        model_root = model_dir
        while model_name == "" and model_root != "":
            model_root, model_name = os.path.split(model_root)

        # Store Noise and Depth investigation in their corresponding directories
        if noise_analysis:
            output_dir = os.path.join(output_dir, "Noise_Comparison/")
        if depth_analysis:
            output_dir = os.path.join(output_dir, "Depth_Comparison/")
        if perturb_analysis:
            output_dir = os.path.join(output_dir, "Perturbation_Analysis/")
        if single_analysis:
            output_dir = os.path.join(output_dir, "Single_Analysis/")

        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Set up logger.
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)

        self.logger.info("Analyzing model %s" % (model_name))
        self.logger.info("Saving output to %s" % (output_dir))

        # Run predictions
        result = self._run_prediction(model_dir, output_dir, data_dir,
                                      noise_analysis, depth_analysis,
                                      perturb_analysis, single_analysis)
예제 #5
0
import shutil
import time
import traceback
import matplotlib.pyplot as plt

from autolab_core import TensorDataset, YamlConfig, Logger
import autolab_core.utils as utils
from perception import DepthImage, GrayscaleImage, BinaryImage, ColorImage

from sd_maskrcnn.envs import BinHeapEnv
from sd_maskrcnn.envs.constants import *

SEED = 744

# set up logger
logger = Logger.get_logger('tools/generate_segmask_dataset.py')


def generate_segmask_dataset(output_dataset_path,
                             config,
                             save_tensors=True,
                             warm_start=False):
    """ Generate a segmentation training dataset

    Parameters
    ----------
    dataset_path : str
        path to store the dataset
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    save_tensors : bool
예제 #6
0
    def visualise(self, model_dir, output_dir):
        """
        Evaluates the model on the dataset in self.datadir. Plots and saves the resulting classification accuracies.


        Parameters
        ----------
        model_dir (str): Path to the model.
        output_dir (str): Path to store the classification accuracies of the models

        Returns
        -------

        """
        # Create output dir if it doesn't exist yet
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Set up logger
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)
        self.logger.info("Saving output to %s" % output_dir)

        model_config = YamlConfig(model_dir + '/config.json')
        self.gripper_mode = model_config['gqcnn']['gripper_mode']
        if 'pose_input' in model_config['gqcnn']:
            self.pose_input = model_config['gqcnn']['pose_input']

        # Load models
        checkpoints = []
        if self.analyse_checkpoints:
            model_files = os.listdir(model_dir)
            for model_file in model_files:
                if 'model' in model_file:
                    if model_file[5] == '_':
                        checkpoints.append(int(model_file[6:].split('.')[0]))
        checkpoints.append('final')
        checkpoints = list(set(checkpoints))
        models = self._read_model(model_dir, checkpoints)

        # Initiate accuracy variables
        elev_bins = np.arange(2.5, 72.5, 5)

        accuracies = {}
        for checkpoint in checkpoints:
            accuracies[checkpoint] = {}
            for elev in elev_bins:
                accuracies[checkpoint][elev] = {'acc': [], 'tp': [], 'tn': [], 'num_p': [], 'num_n': []}
        stepsize = 50

        # Read and predict data with all models
        for steps in range(0, len(self.files), stepsize):
            self.logger.info("Read in tensors %d to %d" % (steps, steps+stepsize))
            image_arr, pose_arr, all_labels, elev_arr = self._read_data(steps, stepsize)
            for elev in elev_bins:
                mask = (elev_arr.squeeze() >= elev - 2.5) & (elev_arr.squeeze() < elev + 2.5)
                images = image_arr[mask]
                poses = pose_arr[mask]
                labels = all_labels[mask]
                for cnt, model in enumerate(models):
                    preds = model.predict(images, poses)
                    if preds is not None:
                        results = BinaryClassificationResult(preds[:, 1], labels)
                        accuracies[checkpoints[cnt]][elev]['acc'].append(100 * results.accuracy)
                        accuracies[checkpoints[cnt]][elev]['tp'].append(len(results.true_positive_indices))
                        accuracies[checkpoints[cnt]][elev]['tn'].append(len(results.true_negative_indices))
                        accuracies[checkpoints[cnt]][elev]['num_p'].append(len(labels[labels == 1]))
                        accuracies[checkpoints[cnt]][elev]['num_n'].append(len(labels[labels == 0]))

        # Calculate prediction accuracy for all models and all elevation (phi) angles
        for checkpoint in checkpoints:
            true_acc = []
            false_acc = []
            all_acc = []
            self.logger.info("Checkpoint: " + str(checkpoint))
            for elev in elev_bins:
                try:
                    tacc = sum(accuracies[checkpoint][elev]['tp']) / sum(accuracies[checkpoint][elev]['num_p']) * 100
                    facc = sum(accuracies[checkpoint][elev]['tn']) / sum(accuracies[checkpoint][elev]['num_n']) * 100
                    acc = (sum(accuracies[checkpoint][elev]['tn']) + sum(accuracies[checkpoint][elev]['tp']))/\
                          (sum(accuracies[checkpoint][elev]['num_p']) + sum(accuracies[checkpoint][elev]['num_n'])) * 100
                    true_acc.append(tacc)
                    false_acc.append(facc)
                    all_acc.append(acc)
                    self.logger.info("Elev: %.1f, Accuracy positive grasps: %.1f %%" % (elev, tacc))
                    self.logger.info("Elev: %.1f, Accuracy negative grasps: %.1f %%" % (elev, facc))
                    self.logger.info("Elev: %.1f, Accuracy all grasps: %.1f %%" % (elev, acc))
                except ZeroDivisionError:
                    self.logger.info("Elev: %.1f, no grasps" % elev)

            # Save output to txt file
            np.savetxt(output_dir + '/' + str(checkpoint) + '_tacc', true_acc, '%.1f')
            np.savetxt(output_dir + '/' + str(checkpoint) + '_facc', false_acc, '%.1f')
            np.savetxt(output_dir + '/' + str(checkpoint) + '_acc', all_acc, '%.1f')

            # Plot the outputs
            plt.figure()
            plt.plot(elev_bins, true_acc)
            plt.title("Prediction accuracy on positive grasps over varying elevation angles")
            plt.xlabel("Elevation angle [deg]")
            plt.ylabel("Accuracy [%]")
            plt.ylim((0, 100))
            plt.xlim((0, 60))
            plt.savefig(output_dir + '/' + str(checkpoint) + '_True_Accuracy.png')
            plt.close()

            plt.figure()
            plt.plot(elev_bins, false_acc)
            plt.title("Prediction accuracy on negative grasps over varying elevation angles")
            plt.xlabel("Elevation angle [deg]")
            plt.ylabel("Accuracy [%]")
            plt.ylim((0, 100))
            plt.xlim((0, 60))
            plt.savefig(output_dir + '/' + str(checkpoint) + '_Neg_Accuracy.png')
            plt.close()
예제 #7
0
Script for training a Grasp Quality Neural Network (GQ-CNN).

Author
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time

import autolab_core.utils as utils
from autolab_core import YamlConfig, Logger
from gqcnn import get_gqcnn_model, get_gqcnn_trainer, utils as gqcnn_utils

# setup logger
logger = Logger.get_logger('tools/train.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Train a Grasp Quality Convolutional Neural Network with TensorFlow')
    parser.add_argument(
        'dataset_dir',
        type=str,
        default=None,
        help='path to the dataset to use for training and validation')
    parser.add_argument('--split_name',
                        type=str,
                        default='image_wise',
                        help='name of the split to train on')
예제 #8
0
 def __init__(self, model="GQCNN-4.0-PJ", config_filepath=None):
     self.logger = Logger.get_logger(__name__)
     self.model = model
     self.grasping_policy = None
     self._get_cfg(config_filepath)
예제 #9
0
Author
------
Vishal Satish and Jeff Mahler
"""
import argparse
import os
import time
import os
import sys

from autolab_core import YamlConfig, Logger
from gqcnn import GQCNNAnalyzer

# setup logger
logger = Logger.get_logger('tools/analyze_gqcnn_performance.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Analyze a Grasp Quality Convolutional Neural Network with TensorFlow')
    parser.add_argument('model_name',
                        type=str,
                        default=None,
                        help='name of model to analyze')
    parser.add_argument('--output_dir',
                        type=str,
                        default=None,
                        help='path to save the analysis')
    parser.add_argument(
예제 #10
0
파일: utils.py 프로젝트: richardliaw/gqcnn
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Simple utility functions
Authors: Jeff Mahler, Vishal Satish, Lucas Manuelli
"""
import os

import numpy as np

from autolab_core import Logger
from enums import GripperMode

# set up logger
logger = Logger.get_logger('gqcnn/utils/utils.py')

def set_cuda_visible_devices(gpu_list):
    """
    Sets CUDA_VISIBLE_DEVICES environment variable to only show certain gpus
    If gpu_list is empty does nothing
    :param gpu_list: list of gpus to set as visible
    :return: None
    """

    if len(gpu_list) == 0:
        return

    cuda_visible_devices = ""
    for gpu in gpu_list:
        cuda_visible_devices += str(gpu) + ","
예제 #11
0
    def __init__(self, config):
        # Set params.
        self._config = config

        # Setup logger.
        self._logger = Logger.get_logger(self.__class__.__name__)
예제 #12
0
    def __init__(self, physics_engine, config):

        self._physics_engine = physics_engine
        self._config = config

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__)

        # read subconfigs
        obj_config = config['objects']
        workspace_config = config['workspace']

        self.num_objs_rv = sstats.poisson(config['mean_objs'] - 1)
        self.max_objs = config['max_objs']
        self.min_objs = 1
        if 'min_objs' in list(config.keys()):
            self.min_objs = config['min_objs']

        self.max_obj_diam = config['max_obj_diam']
        self.drop_height = config['drop_height']
        self.max_settle_steps = config['max_settle_steps']
        self.mag_v_thresh = config['mag_v_thresh']
        self.mag_w_thresh = config['mag_w_thresh']

        # bounds of heap center in the table plane
        min_heap_center = np.array(config['center']['min'])
        max_heap_center = np.array(config['center']['max'])
        self.heap_center_space = gym.spaces.Box(min_heap_center,
                                                max_heap_center,
                                                dtype=np.float32)

        # Set up object configs
        # bounds of object drop pose in the table plane
        # organized as [tx, ty, theta] where theta is in degrees
        min_obj_pose = np.r_[obj_config['planar_translation']['min'], 0]
        max_obj_pose = np.r_[obj_config['planar_translation']['max'],
                             2 * np.pi]
        self.obj_planar_pose_space = gym.spaces.Box(min_obj_pose,
                                                    max_obj_pose,
                                                    dtype=np.float32)

        # bounds of object drop orientation
        min_sph_coords = np.array([0.0, 0.0])
        max_sph_coords = np.array([2 * np.pi, np.pi])
        self.obj_orientation_space = gym.spaces.Box(min_sph_coords,
                                                    max_sph_coords,
                                                    dtype=np.float32)

        # bounds of center of mass
        delta_com_sigma = max(1e-6, obj_config['center_of_mass']['sigma'])
        self.delta_com_rv = sstats.multivariate_normal(np.zeros(3),
                                                       delta_com_sigma**2)

        self.obj_density = obj_config['density']

        # bounds of workspace (for checking out of bounds)
        min_workspace_trans = np.array(workspace_config['min'])
        max_workspace_trans = np.array(workspace_config['max'])
        self.workspace_space = gym.spaces.Box(min_workspace_trans,
                                              max_workspace_trans,
                                              dtype=np.float32)

        # Setup object keys and directories
        object_keys = []
        mesh_filenames = []
        self._train_pct = obj_config['train_pct']
        num_objects = obj_config['num_objects']
        self._mesh_dir = obj_config['mesh_dir']
        if not os.path.isabs(self._mesh_dir):
            self._mesh_dir = os.path.join(os.getcwd(), self._mesh_dir)
        for root, dirs, files in os.walk(self._mesh_dir):
            dataset_name = os.path.basename(root)
            if dataset_name in list(obj_config['object_keys'].keys()):
                for f in files:
                    filename, ext = os.path.splitext(f)
                    if ext.split('.')[1] in trimesh.exchange.load.mesh_formats() \
                        and (filename in obj_config['object_keys'][dataset_name] or obj_config['object_keys'][dataset_name] == 'all'):
                        obj_key = '{}{}{}'.format(dataset_name, KEY_SEP_TOKEN,
                                                  filename)
                        object_keys.append(obj_key)
                        mesh_filenames.append(os.path.join(root, f))

        inds = np.arange(len(object_keys))
        np.random.shuffle(inds)
        self.all_object_keys = list(np.array(object_keys)[inds][:num_objects])
        mesh_filenames = list(np.array(mesh_filenames)[inds][:num_objects])
        self.train_keys = self.all_object_keys[:int(
            len(self.all_object_keys) * self._train_pct)]
        self.test_keys = self.all_object_keys[
            int(len(self.all_object_keys) * self._train_pct):]
        self.obj_ids = dict([(key, i + 1)
                             for i, key in enumerate(self.all_object_keys)])
        self.mesh_filenames = {}
        [
            self.mesh_filenames.update({k: v})
            for k, v in zip(self.all_object_keys, mesh_filenames)
        ]
예제 #13
0
Author: Jeff Mahler
"""
import argparse
import os
import random
import time

import numpy as np

from autolab_core import RigidTransform, YamlConfig, Logger
from gqcnn import RgbdImageState, ParallelJawGrasp
from gqcnn import CrossEntropyRobustGraspingPolicy
from visualization import Visualizer2D as vis2d

# set up logger
logger = Logger.get_logger('tools/run_policy.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Run a saved test case through a GQ-CNN policy. For debugging purposes only.'
    )
    parser.add_argument('test_case_path',
                        type=str,
                        default=None,
                        help='path to test case')
    parser.add_argument('--config_filename',
                        type=str,
                        default='cfg/tools/run_policy.yaml',
                        help='path to configuration file to use')
예제 #14
0
파일: utils.py 프로젝트: anmakon/gqcnn
-------
Jeff Mahler, Vishal Satish, Lucas Manuelli
"""
from functools import reduce
import os
import sys

import numpy as np
import skimage.transform as skt

from autolab_core import Logger
from .enums import GripperMode
from PIL import Image, ImageDraw

# Set up logger.
logger = Logger.get_logger("gqcnn/utils/utils.py")


def is_py2():
    return sys.version[0] == "2"


def set_cuda_visible_devices(gpu_list):
    """Sets CUDA_VISIBLE_DEVICES environment variable to only show certain
    gpus.

    Note
    ----
    If gpu_list is empty does nothing.

    Parameters
예제 #15
0
    def visualise(self, model_dir, output_dir, data_dir):

        # Determine model name
        model_name = ""
        model_root = model_dir
        while model_name == "" and model_root != "":
            model_root, model_name = os.path.split(model_root)

        output_dir = os.path.join(output_dir, "Visualisation/")
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # Read in model config.
        model_config_filename = os.path.join(model_dir, "config.json")
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # Set up logger
        self.logger = Logger.get_logger(self.__class__.__name__,
                                        log_file=os.path.join(
                                            output_dir, "analysis.log"),
                                        silence=(not self.verbose),
                                        global_log_file=self.verbose)

        self.logger.info("Analyzing model %s" % (model_name))
        self.logger.info("Saving output to %s" % (output_dir))
        mixture = False

        if "Cornell" in model_dir:
            model_name = "Cornell"
        elif "DexNet" in model_dir:
            model_name = "DexNet"

        if "Cornell" in data_dir:
            data_name = "Cornell"
        elif "DexNet" in data_dir:
            data_name = "DexNet"
        elif "Both" in data_dir:
            data_name = "mixed"
            mixture = True

        # Load model.
        self.logger.info("Loading model %s" % (model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(
            model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins

        if mixture:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr, identity_arr = self._read_data(
                data_dir, mixture=True)
        else:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data(
                data_dir)
        print("Object arr: ", obj_arr)
        # Predict outcomes
        predictions = gqcnn.predict(image_arr, pose_arr)

        if predictions.shape[1] == 1:
            print("Only 1 image given. No t-SNE analysis of network possible")
        else:
            # Setting colors and labels
            color = []
            monotone = False
            if mixture:
                for label, identity in zip(labels, identity_arr):
                    if identity == 0:
                        # Cornell
                        if label == 0:
                            # negative
                            color.append('#FF8000')
                        else:
                            # positive
                            color.append('#2D702F')
                        # DexNet
                        if label == 0:
                            # negative
                            color.append('#FF0404')
                        else:
                            # positive
                            color.append('#23C328')
                if len(np.unique(labels)) == 1:
                    monotone = True
                    if labels[0] == 0:
                        data_name += " negatives"
                        pop_a = mpatches.Patch(color='#FF8000',
                                               label='Negative Cornell')
                        pop_b = mpatches.Patch(color='#FF0404',
                                               label='Negative DexNet')
                    else:
                        data_name += " positives"
                        pop_a = mpatches.Patch(color='#2D702F',
                                               label='Positive Cornell')
                        pop_b = mpatches.Patch(color='#23C328',
                                               label='Positive DexNet')
                else:
                    pop_a = mpatches.Patch(color='#FF8000',
                                           label='Negative Cornell')
                    pop_b = mpatches.Patch(color='#FF0404',
                                           label='Negative DexNet')
                    pop_c = mpatches.Patch(color='#2D702F',
                                           label='Positive Cornell')
                    pop_d = mpatches.Patch(color='#23C328',
                                           label='Positive DexNet')
            else:
                color = ['r' if truth == 0 else 'g' for truth in labels]
                pop_a = mpatches.Patch(color='r', label='Negative grasp')
                pop_b = mpatches.Patch(color='g', label='Positive grasp')

            # t-SNE
            tsne_out = sklearn.manifold.TSNE(
                n_components=2).fit_transform(predictions)
            plt.scatter(tsne_out[:, 0], tsne_out[:, 1], marker='o', c=color)
            if mixture and not monotone:
                plt.legend(handles=[pop_a, pop_b, pop_c, pop_d])
            else:
                plt.legend(handles=[pop_a, pop_b])
            plt.title("TSNE output of %s data on a GQCNN trained on %s" %
                      (data_name, model_name))
            plt.savefig(output_dir + "/" + model_name + "_model_" + data_name +
                        "_data_TSNE.png")
            plt.close()

            # PCA
            pca_out = sklearn.decomposition.PCA(
                n_components=2).fit_transform(predictions)
            plt.scatter(pca_out[:, 0], pca_out[:, 1], marker='o', c=color)
            plt.title("PCA output of %s data on a GQCNN trained on %s" %
                      (data_name, model_name))
            if mixture and not monotone:
                plt.legend(handles=[pop_a, pop_b, pop_c, pop_d])
            else:
                plt.legend(handles=[pop_a, pop_b])
            plt.savefig(output_dir + "/" + model_name + "_model_" + data_name +
                        "_data_PCA.png")
            plt.close()
예제 #16
0
-------------------
model_dir : str
    Command line argument, the path to the model whose errors are to plotted.
    All plots and other metrics will be saved to this directory.
"""
import os
import sys

import matplotlib.pyplot as plt
import numpy as np

from autolab_core import Logger
from gqcnn.utils import GeneralConstants, GQCNNFilenames

# Set up logger.
logger = Logger.get_logger("tools/plot_training_losses.py")

if __name__ == "__main__":
    result_dir = sys.argv[1]
    train_errors_filename = os.path.join(result_dir,
                                         GQCNNFilenames.TRAIN_ERRORS)
    val_errors_filename = os.path.join(result_dir, GQCNNFilenames.VAL_ERRORS)
    train_iters_filename = os.path.join(result_dir, GQCNNFilenames.TRAIN_ITERS)
    val_iters_filename = os.path.join(result_dir, GQCNNFilenames.VAL_ITERS)
    pct_pos_val_filename = os.path.join(result_dir, GQCNNFilenames.PCT_POS_VAL)
    train_losses_filename = os.path.join(result_dir, GQCNNFilenames.TRAIN_LOSS)
    val_losses_filename = os.path.join(result_dir, GQCNNFilenames.VAL_LOSS)

    raw_train_errors = np.load(train_errors_filename)
    val_errors = np.load(val_errors_filename)
    raw_train_iters = np.load(train_iters_filename)
예제 #17
0
    def __init__(self):

        # set up logger
        self._logger = Logger.get_logger(self.__class__.__name__)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os

from autolab_core import RigidTransform, YamlConfig, Logger
from perception import RgbdImage, RgbdSensorFactory
from visualization import Visualizer2D as vis

from gqcnn.grasping import AntipodalDepthImageGraspSampler

# Set up logger.
logger = Logger.get_logger("examples/antipodal_grasp_sampling.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(description=(
        "Sample antipodal grasps on a depth image from an RgbdSensor"))
    parser.add_argument("--config_filename",
                        type=str,
                        default="cfg/examples/antipodal_grasp_sampling.yaml",
                        help="path to configuration file to use")
    args = parser.parse_args()
    config_filename = args.config_filename

    # Read config.
    config = YamlConfig(config_filename)
    sensor_type = config["sensor"]["type"]
예제 #19
0
def generate_segmask_dataset(output_dataset_path,
                             config,
                             save_tensors=True,
                             warm_start=False):
    """ Generate a segmentation training dataset

    Parameters
    ----------
    dataset_path : str
        path to store the dataset
    config : dict
        dictionary-like objects containing parameters of the simulator and visualization
    save_tensors : bool
        save tensor datasets (for recreating state)
    warm_start : bool
        restart dataset generation from a previous state
    """

    # read subconfigs
    dataset_config = config['dataset']
    image_config = config['images']
    vis_config = config['vis']

    # debugging
    debug = config['debug']
    if debug:
        np.random.seed(SEED)

    # read general parameters
    num_states = config['num_states']
    num_images_per_state = config['num_images_per_state']

    states_per_flush = config['states_per_flush']
    states_per_garbage_collect = config['states_per_garbage_collect']

    # set max obj per state
    max_objs_per_state = config['state_space']['heap']['max_objs']

    # read image parameters
    im_height = config['state_space']['camera']['im_height']
    im_width = config['state_space']['camera']['im_width']
    segmask_channels = max_objs_per_state + 1

    # create the dataset path and all subfolders if they don't exist
    if not os.path.exists(output_dataset_path):
        os.mkdir(output_dataset_path)

    image_dir = os.path.join(output_dataset_path, 'images')
    if not os.path.exists(image_dir):
        os.mkdir(image_dir)
    color_dir = os.path.join(image_dir, 'color_ims')
    if image_config['color'] and not os.path.exists(color_dir):
        os.mkdir(color_dir)
    depth_dir = os.path.join(image_dir, 'depth_ims')
    if image_config['depth'] and not os.path.exists(depth_dir):
        os.mkdir(depth_dir)
    amodal_dir = os.path.join(image_dir, 'amodal_masks')
    if image_config['amodal'] and not os.path.exists(amodal_dir):
        os.mkdir(amodal_dir)
    modal_dir = os.path.join(image_dir, 'modal_masks')
    if image_config['modal'] and not os.path.exists(modal_dir):
        os.mkdir(modal_dir)
    semantic_dir = os.path.join(image_dir, 'semantic_masks')
    if image_config['semantic'] and not os.path.exists(semantic_dir):
        os.mkdir(semantic_dir)

    # setup logging
    experiment_log_filename = os.path.join(output_dataset_path,
                                           'dataset_generation.log')
    if os.path.exists(experiment_log_filename) and not warm_start:
        os.remove(experiment_log_filename)
    Logger.add_log_file(logger, experiment_log_filename, global_log_file=True)
    config.save(
        os.path.join(output_dataset_path, 'dataset_generation_params.yaml'))
    metadata = {}
    num_prev_states = 0

    # set dataset params
    if save_tensors:

        # read dataset subconfigs
        state_dataset_config = dataset_config['states']
        image_dataset_config = dataset_config['images']
        state_tensor_config = state_dataset_config['tensors']
        image_tensor_config = image_dataset_config['tensors']

        obj_pose_dim = POSE_DIM * max_objs_per_state
        obj_com_dim = POINT_DIM * max_objs_per_state
        state_tensor_config['fields']['obj_poses']['height'] = obj_pose_dim
        state_tensor_config['fields']['obj_coms']['height'] = obj_com_dim
        state_tensor_config['fields']['obj_ids']['height'] = max_objs_per_state

        image_tensor_config['fields']['camera_pose']['height'] = POSE_DIM

        if image_config['color']:
            image_tensor_config['fields']['color_im'] = {
                'dtype': 'uint8',
                'channels': 3,
                'height': im_height,
                'width': im_width
            }

        if image_config['depth']:
            image_tensor_config['fields']['depth_im'] = {
                'dtype': 'float32',
                'channels': 1,
                'height': im_height,
                'width': im_width
            }

        if image_config['modal']:
            image_tensor_config['fields']['modal_segmasks'] = {
                'dtype': 'uint8',
                'channels': segmask_channels,
                'height': im_height,
                'width': im_width
            }

        if image_config['amodal']:
            image_tensor_config['fields']['amodal_segmasks'] = {
                'dtype': 'uint8',
                'channels': segmask_channels,
                'height': im_height,
                'width': im_width
            }

        if image_config['semantic']:
            image_tensor_config['fields']['semantic_segmasks'] = {
                'dtype': 'uint8',
                'channels': 1,
                'height': im_height,
                'width': im_width
            }

        # create dataset filenames
        state_dataset_path = os.path.join(output_dataset_path, 'state_tensors')
        image_dataset_path = os.path.join(output_dataset_path, 'image_tensors')

        if warm_start:

            if not os.path.exists(state_dataset_path) or not os.path.exists(
                    image_dataset_path):
                logger.error(
                    'Attempting to warm start without saved tensor dataset')
                exit(1)

            # open datasets
            logger.info('Opening state dataset')
            state_dataset = TensorDataset.open(state_dataset_path,
                                               access_mode='READ_WRITE')
            logger.info('Opening image dataset')
            image_dataset = TensorDataset.open(image_dataset_path,
                                               access_mode='READ_WRITE')

            # read configs
            state_tensor_config = state_dataset.config
            image_tensor_config = image_dataset.config

            # clean up datasets (there may be datapoints with indices corresponding to non-existent data)
            num_state_datapoints = state_dataset.num_datapoints
            num_image_datapoints = image_dataset.num_datapoints
            num_prev_states = num_state_datapoints

            # clean up images
            image_ind = num_image_datapoints - 1
            image_datapoint = image_dataset[image_ind]
            while image_ind > 0 and image_datapoint[
                    'state_ind'] >= num_state_datapoints:
                image_ind -= 1
                image_datapoint = image_dataset[image_ind]
            images_to_remove = num_image_datapoints - 1 - image_ind
            logger.info('Deleting last %d image tensors' % (images_to_remove))
            if images_to_remove > 0:
                image_dataset.delete_last(images_to_remove)
                num_image_datapoints = image_dataset.num_datapoints
        else:
            # create datasets from scratch
            logger.info('Creating datasets')

            state_dataset = TensorDataset(state_dataset_path,
                                          state_tensor_config)
            image_dataset = TensorDataset(image_dataset_path,
                                          image_tensor_config)

        # read templates
        state_datapoint = state_dataset.datapoint_template
        image_datapoint = image_dataset.datapoint_template

    if warm_start:

        if not os.path.exists(
                os.path.join(output_dataset_path, 'metadata.json')):
            logger.error(
                'Attempting to warm start without previously created dataset')
            exit(1)

        # Read metadata and indices
        metadata = json.load(
            open(os.path.join(output_dataset_path, 'metadata.json'), 'r'))
        test_inds = np.load(os.path.join(image_dir,
                                         'test_indices.npy')).tolist()
        train_inds = np.load(os.path.join(image_dir,
                                          'train_indices.npy')).tolist()

        # set obj ids and splits
        reverse_obj_ids = metadata['obj_ids']
        obj_id_map = utils.reverse_dictionary(reverse_obj_ids)
        obj_splits = metadata['obj_splits']
        obj_keys = obj_splits.keys()
        mesh_filenames = metadata['meshes']

        # Get list of images generated so far
        generated_images = sorted(
            os.listdir(color_dir)) if image_config['color'] else sorted(
                os.listdir(depth_dir))
        num_total_images = len(generated_images)

        # Do our own calculation if no saved tensors
        if num_prev_states == 0:
            num_prev_states = num_total_images // num_images_per_state

        # Find images to remove and remove them from all relevant places if they exist
        num_images_to_remove = num_total_images - (num_prev_states *
                                                   num_images_per_state)
        logger.info(
            'Deleting last {} invalid images'.format(num_images_to_remove))
        for k in range(num_images_to_remove):
            im_name = generated_images[-(k + 1)]
            im_basename = os.path.splitext(im_name)[0]
            im_ind = int(im_basename.split('_')[1])
            if os.path.exists(os.path.join(depth_dir, im_name)):
                os.remove(os.path.join(depth_dir, im_name))
            if os.path.exists(os.path.join(color_dir, im_name)):
                os.remove(os.path.join(color_dir, im_name))
            if os.path.exists(os.path.join(semantic_dir, im_name)):
                os.remove(os.path.join(semantic_dir, im_name))
            if os.path.exists(os.path.join(modal_dir, im_basename)):
                shutil.rmtree(os.path.join(modal_dir, im_basename))
            if os.path.exists(os.path.join(amodal_dir, im_basename)):
                shutil.rmtree(os.path.join(amodal_dir, im_basename))
            if im_ind in train_inds:
                train_inds.remove(im_ind)
            elif im_ind in test_inds:
                test_inds.remove(im_ind)

    else:

        # Create initial env to generate metadata
        env = BinHeapEnv(config)
        obj_id_map = env.state_space.obj_id_map
        obj_keys = env.state_space.obj_keys
        obj_splits = env.state_space.obj_splits
        mesh_filenames = env.state_space.mesh_filenames
        save_obj_id_map = obj_id_map.copy()
        save_obj_id_map[ENVIRONMENT_KEY] = np.iinfo(np.uint32).max
        reverse_obj_ids = utils.reverse_dictionary(save_obj_id_map)
        metadata['obj_ids'] = reverse_obj_ids
        metadata['obj_splits'] = obj_splits
        metadata['meshes'] = mesh_filenames
        json.dump(metadata,
                  open(os.path.join(output_dataset_path, 'metadata.json'),
                       'w'),
                  indent=JSON_INDENT,
                  sort_keys=True)
        train_inds = []
        test_inds = []

    # generate states and images
    state_id = num_prev_states
    while state_id < num_states:

        # create env and set objects
        create_start = time.time()
        env = BinHeapEnv(config)
        env.state_space.obj_id_map = obj_id_map
        env.state_space.obj_keys = obj_keys
        env.state_space.set_splits(obj_splits)
        env.state_space.mesh_filenames = mesh_filenames
        create_stop = time.time()
        logger.info('Creating env took %.3f sec' %
                    (create_stop - create_start))

        # sample states
        states_remaining = num_states - state_id
        for i in range(min(states_per_garbage_collect, states_remaining)):

            # log current rollout
            if state_id % config['log_rate'] == 0:
                logger.info('State: %06d' % (state_id))

            try:
                # reset env
                env.reset()
                state = env.state
                split = state.metadata['split']

                # render state
                if vis_config['state']:
                    env.view_3d_scene()

                # Save state if desired
                if save_tensors:

                    # set obj state variables
                    obj_pose_vec = np.zeros(obj_pose_dim)
                    obj_com_vec = np.zeros(obj_com_dim)
                    obj_id_vec = np.iinfo(
                        np.uint32).max * np.ones(max_objs_per_state)
                    j = 0
                    for obj_state in state.obj_states:
                        obj_pose_vec[j * POSE_DIM:(j + 1) *
                                     POSE_DIM] = obj_state.pose.vec
                        obj_com_vec[j * POINT_DIM:(j + 1) *
                                    POINT_DIM] = obj_state.center_of_mass
                        obj_id_vec[j] = int(obj_id_map[obj_state.key])
                        j += 1

                    # store datapoint env params
                    state_datapoint['state_id'] = state_id
                    state_datapoint['obj_poses'] = obj_pose_vec
                    state_datapoint['obj_coms'] = obj_com_vec
                    state_datapoint['obj_ids'] = obj_id_vec
                    state_datapoint['split'] = split

                    # store state datapoint
                    image_start_ind = image_dataset.num_datapoints
                    image_end_ind = image_start_ind + num_images_per_state
                    state_datapoint['image_start_ind'] = image_start_ind
                    state_datapoint['image_end_ind'] = image_end_ind

                    # clean up
                    del obj_pose_vec
                    del obj_com_vec
                    del obj_id_vec

                    # add state
                    state_dataset.add(state_datapoint)

                # render images
                for k in range(num_images_per_state):

                    # reset the camera
                    if num_images_per_state > 1:
                        env.reset_camera()

                    obs = env.render_camera_image(color=image_config['color'])
                    if image_config['color']:
                        color_obs, depth_obs = obs
                    else:
                        depth_obs = obs

                    # vis obs
                    if vis_config['obs']:
                        if image_config['depth']:
                            plt.figure()
                            plt.imshow(depth_obs)
                            plt.title('Depth Observation')
                        if image_config['color']:
                            plt.figure()
                            plt.imshow(color_obs)
                            plt.title('Color Observation')
                        plt.show()

                    if image_config['modal'] or image_config[
                            'amodal'] or image_config['semantic']:

                        # render segmasks
                        amodal_segmasks, modal_segmasks = env.render_segmentation_images(
                        )

                        # retrieve segmask data
                        modal_segmask_arr = np.iinfo(np.uint8).max * np.ones(
                            [im_height, im_width, segmask_channels],
                            dtype=np.uint8)
                        amodal_segmask_arr = np.iinfo(np.uint8).max * np.ones(
                            [im_height, im_width, segmask_channels],
                            dtype=np.uint8)
                        stacked_segmask_arr = np.zeros(
                            [im_height, im_width, 1], dtype=np.uint8)

                        modal_segmask_arr[:, :, :env.
                                          num_objects] = modal_segmasks
                        amodal_segmask_arr[:, :, :env.
                                           num_objects] = amodal_segmasks

                        if image_config['semantic']:
                            for j in range(env.num_objects):
                                this_obj_px = np.where(
                                    modal_segmasks[:, :, j] > 0)
                                stacked_segmask_arr[this_obj_px[0],
                                                    this_obj_px[1], 0] = j + 1

                    # visualize
                    if vis_config['semantic']:
                        plt.figure()
                        plt.imshow(stacked_segmask_arr.squeeze())
                        plt.show()

                    if save_tensors:
                        # save image data as tensors
                        if image_config['color']:
                            image_datapoint['color_im'] = color_obs
                        if image_config['depth']:
                            image_datapoint['depth_im'] = depth_obs[:, :, None]
                        if image_config['modal']:
                            image_datapoint[
                                'modal_segmasks'] = modal_segmask_arr
                        if image_config['amodal']:
                            image_datapoint[
                                'amodal_segmasks'] = amodal_segmask_arr
                        if image_config['semantic']:
                            image_datapoint[
                                'semantic_segmasks'] = stacked_segmask_arr

                        image_datapoint['camera_pose'] = env.camera.pose.vec
                        image_datapoint[
                            'camera_intrs'] = env.camera.intrinsics.vec
                        image_datapoint['state_ind'] = state_id
                        image_datapoint['split'] = split

                        # add image
                        image_dataset.add(image_datapoint)

                    # Save depth image and semantic masks
                    if image_config['color']:
                        ColorImage(color_obs).save(
                            os.path.join(
                                color_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))
                    if image_config['depth']:
                        DepthImage(depth_obs).save(
                            os.path.join(
                                depth_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))
                    if image_config['modal']:
                        modal_id_dir = os.path.join(
                            modal_dir,
                            'image_{:06d}'.format(num_images_per_state *
                                                  state_id + k))
                        if not os.path.exists(modal_id_dir):
                            os.mkdir(modal_id_dir)
                        for i in range(env.num_objects):
                            BinaryImage(modal_segmask_arr[:, :, i]).save(
                                os.path.join(modal_id_dir,
                                             'channel_{:03d}.png'.format(i)))
                    if image_config['amodal']:
                        amodal_id_dir = os.path.join(
                            amodal_dir,
                            'image_{:06d}'.format(num_images_per_state *
                                                  state_id + k))
                        if not os.path.exists(amodal_id_dir):
                            os.mkdir(amodal_id_dir)
                        for i in range(env.num_objects):
                            BinaryImage(amodal_segmask_arr[:, :, i]).save(
                                os.path.join(amodal_id_dir,
                                             'channel_{:03d}.png'.format(i)))
                    if image_config['semantic']:
                        GrayscaleImage(stacked_segmask_arr.squeeze()).save(
                            os.path.join(
                                semantic_dir, 'image_{:06d}.png'.format(
                                    num_images_per_state * state_id + k)))

                    # Save split
                    if split == TRAIN_ID:
                        train_inds.append(num_images_per_state * state_id + k)
                    else:
                        test_inds.append(num_images_per_state * state_id + k)

                # auto-flush after every so many timesteps
                if state_id % states_per_flush == 0:
                    np.save(os.path.join(image_dir, 'train_indices.npy'),
                            train_inds)
                    np.save(os.path.join(image_dir, 'test_indices.npy'),
                            test_inds)
                    if save_tensors:
                        state_dataset.flush()
                        image_dataset.flush()

                # delete action objects
                for obj_state in state.obj_states:
                    del obj_state
                del state
                gc.collect()

                # update state id
                state_id += 1

            except Exception as e:
                # log an error
                logger.warning('Heap failed!')
                logger.warning('%s' % (str(e)))
                logger.warning(traceback.print_exc())
                if debug:
                    raise

                del env
                gc.collect()
                env = BinHeapEnv(config)
                env.state_space.obj_id_map = obj_id_map
                env.state_space.obj_keys = obj_keys
                env.state_space.set_splits(obj_splits)
                env.state_space.mesh_filenames = mesh_filenames

        # garbage collect
        del env
        gc.collect()

    # write all datasets to file, save indices
    np.save(os.path.join(image_dir, 'train_indices.npy'), train_inds)
    np.save(os.path.join(image_dir, 'test_indices.npy'), test_inds)
    if save_tensors:
        state_dataset.flush()
        image_dataset.flush()

    logger.info('Generated %d image datapoints' %
                (state_id * num_images_per_state))
예제 #20
0
path_depth_png = last_depth_image.replace("depth",
                                          "depth_PNG").replace("npy", 'png')

path_grasp_position = last_depth_image.replace("depth",
                                               "grasp").replace("npy", 'txt')

if os.path.isfile(last_mask_path):
    mask = last_mask_path
else:
    mask = None

#mask= "/home/lvianell/Desktop/Lorenzo_report/datasets/dexnet_maskcnn_human/mask_0.png"
#mask=None

# set up logger
logger = Logger.get_logger(
    '/home/lvianell/Desktop/Lorenzo_report/gqcnn/examples/policy_2.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Run a grasping policy on an example image')
    parser.add_argument('--model_name',
                        type=str,
                        default="GQCNN-4.0-PJ",
                        help='name of a trained model to run')
    parser.add_argument(
        '--depth_image',
        type=str,
        default=last_depth_image,
        help='path to a test depth image stored as a .npy file')
    parser.add_argument('--segmask',
예제 #21
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from autolab_core import YamlConfig, Logger
from perception import (BinaryImage, CameraIntrinsics, ColorImage, DepthImage,
                        RgbdImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import (RobustGraspingPolicy,
                            CrossEntropyRobustGraspingPolicy, RgbdImageState,
                            FullyConvolutionalGraspingPolicyParallelJaw,
                            FullyConvolutionalGraspingPolicySuction)
from gqcnn.utils import GripperMode
import cv2
import os
logger = Logger.get_logger("grabdepth_segmask_gqcnn.py")
config_filename = os.path.join("gqcnn_pj_realsense.yaml")
config = YamlConfig(config_filename)
policy_config = config["policy"]
policy_type = "cem"
policy = CrossEntropyRobustGraspingPolicy(policy_config)

import pybullet as p
import time
import numpy as np
import pybullet_data
from scipy.linalg import null_space
np.set_printoptions(formatter={'float_kind': lambda x: "{0:0.3f}".format(x)})
import math
pi = math.pi
import cv2
예제 #22
0
import skimage

from autolab_core import PointCloud, RigidTransform, YamlConfig, Logger
from perception import (BinaryImage, CameraIntrinsics, ColorImage, DepthImage,
                        RgbdImage, SegmentationImage)
from visualization import Visualizer2D as vis

from gqcnn import (RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy,
                   RgbdImageState)

CLUSTER_TOL = 0.0015
MIN_CLUSTER_SIZE = 100
MAX_CLUSTER_SIZE = 1000000

# Set up logger.
logger = Logger.get_logger("tools/policy_with_image_proc.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(
        description="Run a grasping policy on an example image")
    parser.add_argument(
        "--depth_image",
        type=str,
        default=None,
        help="path to a test depth image stored as a .npy file")
    parser.add_argument("--segmask",
                        type=str,
                        default=None,
                        help="path to an optional segmask to use")
    parser.add_argument("--camera_intrinsics",
예제 #23
0
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time
import os
import sys

import autolab_core.utils as utils
from autolab_core import YamlConfig, Logger
from gqcnn import get_gqcnn_model, get_gqcnn_trainer
from gqcnn import utils as gqcnn_utils

# setup logger
logger = Logger.get_logger('tools/finetune.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description=
        'Fine-Tune a pre-trained Grasp Quality Convolutional Neural Network with TensorFlow'
    )
    parser.add_argument(
        'dataset_dir',
        type=str,
        default=None,
        help='path to the dataset to use for training and validation')
    parser.add_argument('base_model_name',
                        type=str,
                        default=None,
예제 #24
0
    def _run_prediction(self, model_dir, model_output_dir, data_dir,
                        noise_analysis, depth_analysis, perturb_analysis,
                        single_analysis):
        """Predict the outcome of the file for a single model."""

        # Read in model config.
        model_config_filename = os.path.join(model_dir, "config.json")
        with open(model_config_filename) as data_file:
            model_config = json.load(data_file)

        # Load model.
        self.logger.info("Loading model %s" % (model_dir))
        log_file = None
        for handler in self.logger.handlers:
            if isinstance(handler, logging.FileHandler):
                log_file = handler.baseFilename
        gqcnn = get_gqcnn_model(verbose=self.verbose).load(
            model_dir, verbose=self.verbose, log_file=log_file)
        gqcnn.open_session()
        gripper_mode = gqcnn.gripper_mode
        angular_bins = gqcnn.angular_bins

        # Load data
        if noise_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, noise_arr = self._read_data(
                data_dir, noise=True)
        elif depth_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, depth_arr = self._read_data(
                data_dir, depth=True)
        elif perturb_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data(
                data_dir, perturb=True)
        elif single_analysis:
            image_arr, pose_arr, labels, width_arr, file_arr, perturb_arr = self._read_data(
                data_dir, perturb=True)
        else:
            image_arr, pose_arr, labels, width_arr, file_arr, obj_arr = self._read_data(
                data_dir)
        # Predict outcomes
        predictions = gqcnn.predict(image_arr, pose_arr)
        gqcnn.close_session()
        results = BinaryClassificationResult(predictions[:, 1], labels)

        # Log the results
        if noise_analysis:
            # Analyse the error rates in regard to the noise levels of the images
            noise_levels = np.unique(noise_arr)
            levels = len(noise_levels)
            for current_noise in noise_levels:
                pred = predictions[noise_arr[:, 0] == current_noise]
                lab = labels[noise_arr[:, 0] == current_noise]
                res = BinaryClassificationResult(pred[:, 1], lab)
                self._plot_histograms(pred[:, 1], lab, str(current_noise),
                                      model_output_dir)
                self.logger.info("Noise: %.4f Model %s error rate: %.3f" %
                                 (current_noise, model_dir, res.error_rate))
                self.logger.info(
                    "Noise: %.4f Model %s loss: %.3f" %
                    (current_noise, model_dir, res.cross_entropy_loss))
        elif depth_analysis:
            # Analyse the error rates in regard to the grasping depth in the images
            depth_levels = np.unique(depth_arr)
            levels = len(depth_levels)
            for current_depth in depth_levels:
                if current_depth == -1:
                    depth_mode = 'original'
                else:
                    depth_mode = 'relative %.2f' % (current_depth)
                pred = predictions[depth_arr == current_depth]
                lab = labels[depth_arr == current_depth]
                res = BinaryClassificationResult(pred[:, 1], lab)
                self._plot_histograms(pred[:, 1], lab, depth_mode,
                                      model_output_dir)
                self.logger.info("Depth %s Model %s error rate: %.3f" %
                                 (depth_mode, model_dir, res.error_rate))
                self.logger.info(
                    "Depth: %s Model %s loss: %.3f" %
                    (depth_mode, model_dir, res.cross_entropy_loss))
        elif perturb_analysis:
            # Analyse the error rates in regard to the grasping perturb in the images
            perturb_levels = np.unique(perturb_arr)
            print("Perturb levels: ", perturb_levels)
            _rot = len(np.unique(perturb_arr[:, 0]))
            _trans = len(np.unique(perturb_arr[:, 1]))
            try:
                _transy = len(np.unique(perturb_arr[:, 2]))
            except:
                _transy = 0
                print("No translation in y included")
            if _rot >= 2 and _trans <= 1 and _transy <= 1:
                perturbation = 'rotation'
                perturb_unit = 'deg'
                index = 0
            elif _rot <= 1 and _trans >= 2 and _transy <= 1:
                perturbation = 'translation'
                perturb_unit = 'pixel'
                index = 1
            elif _rot <= 1 and _trans <= 1 and _transy >= 2:
                perturbation = 'translationy'
                perturb_unit = 'pixel'
                index = 2
            else:
                raise ValueError(
                    "Perturbation array includes at least two different perturbation types. Can't be handled. Abort."
                )
                return None
            levels = len(perturb_levels)
            accuracies = []
            for current_perturb in perturb_levels:
                pred = predictions[perturb_arr[:, index] == current_perturb]
                lab = labels[perturb_arr[:, index] == current_perturb]
                res = BinaryClassificationResult(pred[:, 1], lab)
                perturb_mode = perturbation + ' %.0f ' % (
                    current_perturb) + perturb_unit
                self._plot_histograms(
                    pred[:, 1], lab,
                    perturbation + '_%.0f_' % (current_perturb) + perturb_unit,
                    model_output_dir)

                self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                 (perturb_mode, model_dir, res.error_rate))
                accuracies.append(100 - res.error_rate)
                self.logger.info(
                    "Grasp %s Model %s loss: %.3f" %
                    (perturb_mode, model_dir, res.cross_entropy_loss))
            self._plot_grasp_perturbations(perturb_levels, accuracies,
                                           model_output_dir, perturbation)
        elif single_analysis:
            # Analyse the error rates in regard to the grasping perturb in the images
            perturb_levels = np.unique(perturb_arr)
            _rot = np.count_nonzero(perturb_arr[:, 0])
            _trans = np.count_nonzero(perturb_arr[:, 1])
            _transy = np.count_nonzero(perturb_arr[:, 2])
            _scalez = np.count_nonzero(perturb_arr[:, 3])
            _scalex = np.count_nonzero(perturb_arr[:, 4])
            if _rot >= 1 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex == 0:
                index = 0
                perturbation = 'rotation'
            elif _rot == 0 and _trans >= 1 and _transy == 0 and _scalez == 0 and _scalex == 0:
                perturbation = 'translation'
                index = 1
            elif _rot == 0 and _trans == 0 and _transy >= 1 and _scalez == 0 and _scalex == 0:
                perturbation = 'translationy'
                index = 2
            elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez >= 1 and _scalex == 0:
                perturbation = 'scale_height'
                index = 3
            elif _rot == 0 and _trans == 0 and _transy == 0 and _scalez == 0 and _scalex >= 1:
                perturbation = 'scalex'
                index = 4
            else:
                perturbation = 'mixed'
                index = 5
            # Create new output dir for single file and perturbation mode
            print(len(perturb_arr))
            if len(perturb_arr) == 1:
                print("New output direction is: ", model_output_dir)
            else:
                model_output_dir = os.path.join(
                    model_output_dir,
                    str(file_arr[0][0]) + '_' + str(file_arr[0][1]) + '_' +
                    perturbation)
                print("New output direction is: ", model_output_dir)
            if not os.path.exists(model_output_dir):
                os.mkdir(model_output_dir)
            # Set up new logger.
            self.logger = Logger.get_logger(self.__class__.__name__,
                                            log_file=os.path.join(
                                                model_output_dir,
                                                "analysis.log"),
                                            silence=(not self.verbose),
                                            global_log_file=self.verbose)
            levels = len(perturb_arr)
            abs_pred_errors = []
            if levels == 1:
                self.logger.info(
                    "Mixed perturbation. Translationx %.1f, Translationy %.1f, "
                    "Rotation %.1f, Scale_height %.1f, Scale x %.1f" %
                    (perturb_arr[0][1], perturb_arr[0][2], perturb_arr[0][0],
                     perturb_arr[0][3], perturb_arr[0][4]))
                pred = predictions
                lab = labels
                res = BinaryClassificationResult(pred[:, 1], lab)
                self.logger.info("Grasp %s Model %s prediction: %.3f" %
                                 (perturbation, model_dir, pred[:, 1]))
                self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                 (perturbation, model_dir, res.error_rate))
                self.logger.info(
                    "Grasp %s Model %s loss: %.3f" %
                    (perturbation, model_dir, res.cross_entropy_loss))

            else:
                for current_perturb in perturb_levels:
                    pred = predictions[perturb_arr[:,
                                                   index] == current_perturb]
                    lab = labels[perturb_arr[:, index] == current_perturb]
                    res = BinaryClassificationResult(pred[:, 1], lab)

                    if perturbation == 'rotation':
                        perturb_mode = 'rotation %.0f deg' % (current_perturb)
                    elif perturbation == 'translation':
                        perturb_mode = 'translation in x %.0f pixel' % (
                            current_perturb)
                    elif perturbation == 'translationy':
                        perturb_mode = 'translation in y %.0f pixel' % (
                            current_perturb)
                    elif perturbation == 'scale_height':
                        perturb_mode = 'scaling depth by %.0f' % (
                            current_perturb)
                    elif perturbation == 'scalex':
                        perturb_mode = 'scaling x by %.0f' % (current_perturb)
                    pos_errors, neg_errors = self._calculate_prediction_errors(
                        pred[:, 1], lab)
                    # Only append positive errors if grasp was positive.
                    if pos_errors:
                        abs_pred_errors.append(pos_errors)
                    self.logger.info("Grasp %s Model %s prediction: %.3f" %
                                     (perturb_mode, model_dir, pred[:, 1]))
                    self.logger.info("Grasp %s Model %s error rate: %.3f" %
                                     (perturb_mode, model_dir, res.error_rate))
                    self.logger.info(
                        "Grasp %s Model %s loss: %.3f" %
                        (perturb_mode, model_dir, res.cross_entropy_loss))
                if pos_errors:
                    self._plot_single_grasp_perturbations(
                        perturb_levels, abs_pred_errors, model_output_dir,
                        perturbation)
        else:
            levels = 1
            self._plot_histograms(predictions[:, 1], labels, '',
                                  model_output_dir)
            self.logger.info("Model %s error rate: %.3f" %
                             (model_dir, results.error_rate))
            self.logger.info("Model %s loss: %.3f" %
                             (model_dir, results.cross_entropy_loss))

            if obj_arr is not None and 'Cornell' in data_dir:
                unique = np.unique(obj_arr).tolist()
                object_label = pd.read_csv(
                    DATA_PATH + "Cornell/original/z.txt",
                    sep=" ",
                    header=None,
                    usecols=[1, 2]).drop_duplicates().to_numpy()
                true_pos = dict()
                false_neg = dict()
                false_pos = dict()
                true_neg = dict()
                for obj in unique:
                    obj = int(obj)
                    true_pos[object_label[obj, 1]] = 0
                    false_pos[object_label[obj, 1]] = 0
                    true_neg[object_label[obj, 1]] = 0
                    false_neg[object_label[obj, 1]] = 0

                for obj, pred, label in zip(obj_arr, predictions[:, 1],
                                            labels):
                    if label == 1 and pred >= 0.5:
                        true_pos[object_label[obj, 1]] += 1
                    elif label == 1 and pred < 0.5:
                        false_neg[object_label[obj, 1]] += 1
                    elif label == 0 and pred >= 0.5:
                        false_pos[object_label[obj, 1]] += 1
                    elif label == 0 and pred < 0.5:
                        true_neg[object_label[obj, 1]] += 1
                print(true_pos)
                self._export_object_analysis(true_pos, false_neg, false_pos,
                                             true_neg, model_output_dir)

        # Log the ratios
        pos_lab = len(labels[labels == 1])
        neg_lab = len(labels[labels == 0])

        true_pos = len(results.true_positive_indices)
        true_neg = len(results.true_negative_indices)
        false_pos = neg_lab - true_neg
        false_neg = pos_lab - true_pos

        self.logger.info("%d samples, %d grasps" %
                         (len(labels), len(labels) / levels))
        self.logger.info("%d positive grasps, %d negative grasps" %
                         (pos_lab / levels, neg_lab / levels))
        self.logger.info("Model overall accuracy %.2f %%" %
                         (100 * results.accuracy))
        self.logger.info("Accuracy on positive grasps: %.2f %%" %
                         (true_pos / pos_lab * 100))
        self.logger.info("Accuracy on negative grasps: %.2f %%" %
                         (true_neg / neg_lab * 100))
        self.logger.info("True positive samples: %d" % true_pos)
        self.logger.info("True negative samples: %d" % true_neg)
        self.logger.info("Correct predictions: %d" % (true_pos + true_neg))
        self.logger.info("False positive samples: %d" % false_pos)
        self.logger.info("False negative samples: %d" % false_neg)
        self.logger.info("False predictions: %d" % (false_pos + false_pos))

        cnt = 0  # Counter for grouping the same images with different noise/depth levels
        if self.num_images is None or self.num_images > len(width_arr):
            self.num_images = len(width_arr)
        steps = int(len(width_arr) / self.num_images)
        for j in range(0, len(width_arr), steps):
            try:
                if file_arr[j][1] != file_arr[j - 1][1]:
                    cnt = 0
                else:
                    cnt += 1
            except:
                cnt += 1
            if noise_analysis:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         noise_arr=noise_arr)
            elif depth_analysis:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         depth_arr=depth_arr)
            elif perturb_analysis or single_analysis:
                print("Plot grasp")
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         perturb_arr=perturb_arr)
            else:
                image = self._plot_grasp(image_arr[j],
                                         width_arr[j],
                                         results,
                                         j,
                                         plt_results=False)
            try:
                if noise_analysis or depth_analysis or perturb_analysis or single_analysis:
                    image.save(
                        os.path.join(
                            model_output_dir, "%05d_%03d_example_%03d.png" %
                            (file_arr[j][0], file_arr[j][1], cnt)))
                else:
                    image.save(
                        os.path.join(
                            model_output_dir, "%05d_%03d.png" %
                            (file_arr[j][0], file_arr[j][1])))
                    # data = self.scale(image_arr[j][:, :, 0])
                    # image = Image.fromarray(data).convert('RGB').resize((300, 300), resample=Image.NEAREST)
                    # image.save(os.path.join(model_output_dir, "%05d_%03d_orig.png" % (file_arr[j][0], file_arr[j][1])))
            except:
                image.save(
                    os.path.join(model_output_dir, "Example_%03d.png" % (cnt)))
        if single_analysis:
            print("Plotting depth image")
            j = int(len(image_arr) / 2)
            # Plot pure depth image without prediction labeling.
            image = self._plot_grasp(image_arr[j],
                                     width_arr[j],
                                     results,
                                     j,
                                     plt_results=False)
            image.save(os.path.join(model_output_dir, "Depth_image.png"))
        return results
예제 #25
0
import argparse
import json
import os
import time

import numpy as np

from autolab_core import RigidTransform, YamlConfig, Logger
from perception import BinaryImage, CameraIntrinsics, ColorImage, DepthImage, RgbdImage
from visualization import Visualizer2D as vis

from gqcnn.grasping import RobustGraspingPolicy, CrossEntropyRobustGraspingPolicy, RgbdImageState, FullyConvolutionalGraspingPolicyParallelJaw, FullyConvolutionalGraspingPolicySuction
from gqcnn.utils import GripperMode, NoValidGraspsException

# set up logger
logger = Logger.get_logger('examples/policy.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Run a grasping policy on an example image')
    parser.add_argument('model_name',
                        type=str,
                        default=None,
                        help='name of a trained model to run')
    parser.add_argument(
        '--depth_image',
        type=str,
        default=None,
        help='path to a test depth image stored as a .npy file')
    parser.add_argument('--segmask',
예제 #26
0
"""
Script for searching over Grasp Quality Convolutional Neural Network (GQ-CNN) hyper-parameters.

Author
------
Vishal Satish
"""
import argparse
import sys

from gqcnn import GQCNNSearch
from autolab_core import YamlConfig, Logger

# set up logger
logger = Logger.get_logger('tools/hyperparam_search.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(
        description='Hyper-parameter search for GQ-CNN.')
    parser.add_argument('datasets',
                        nargs='+',
                        default=None,
                        help='path to datasets')
    parser.add_argument('--base_model_dirs',
                        nargs='+',
                        default=[],
                        help='path to pre-trained base models for fine-tuning')
    parser.add_argument('--train_configs',
                        nargs='+',
                        default=['cfg/train.yaml'],
예제 #27
0
import numpy as np

from autolab_core import YamlConfig, Logger
from perception import (BinaryImage, CameraIntrinsics, ColorImage, DepthImage,
                        RgbdImage)
from visualization import Visualizer2D as vis

from gqcnn.grasping import (RobustGraspingPolicy,
                            CrossEntropyRobustGraspingPolicy, RgbdImageState,
                            FullyConvolutionalGraspingPolicyParallelJaw,
                            FullyConvolutionalGraspingPolicySuction)
from gqcnn.utils import GripperMode

# Set up logger.
logger = Logger.get_logger("examples/policy.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(
        description="Run a grasping policy on an example image")
    parser.add_argument("model_name",
                        type=str,
                        default=None,
                        help="name of a trained model to run")
    parser.add_argument(
        "--depth_image",
        type=str,
        default=None,
        help="path to a test depth image stored as a .npy file")
    parser.add_argument("--segmask",
예제 #28
0
Be sure to register camera beforehand!!!
Author: Jeff Mahler
"""
import argparse
import numpy as np
import os

import rospy
import matplotlib.pyplot as plt

from autolab_core import RigidTransform, Box, YamlConfig, Logger
import autolab_core.utils as utils
from perception import RgbdSensorFactory, Image

# set up logger
logger = Logger.get_logger('tools/capture_test_images.py')

if __name__ == '__main__':
    # parse args
    parser = argparse.ArgumentParser(description='Capture a set of RGB-D images from a set of sensors')
    parser.add_argument('output_dir', type=str, help='path to save captured images')
    parser.add_argument('--config_filename', type=str, default='cfg/tools/capture_test_images.yaml', help='path to configuration file to use')
    args = parser.parse_args()
    config_filename = args.config_filename
    output_dir = args.output_dir

    # read config
    config = YamlConfig(config_filename)
    vis = config['vis']

    # make output dir if needed
예제 #29
0
파일: finetune.py 프로젝트: wenlongli/gqcnn
Author
------
Vishal Satish & Jeff Mahler
"""
import argparse
import os
import time

from autolab_core import YamlConfig, Logger
import autolab_core.utils as utils
from gqcnn import get_gqcnn_model, get_gqcnn_trainer
from gqcnn import utils as gqcnn_utils

# Setup logger.
logger = Logger.get_logger("tools/finetune.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(description=(
        "Fine-Tune a pre-trained Grasp Quality Convolutional Neural Network"
        " with TensorFlow"))
    parser.add_argument(
        "dataset_dir",
        type=str,
        default=None,
        help="path to the dataset to use for training and validation")
    parser.add_argument("base_model_name",
                        type=str,
                        default=None,
                        help="name of the pre-trained model to fine-tune")
예제 #30
0
Vishal Satish & Jeff Mahler
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
import time

from autolab_core import YamlConfig, Logger
import autolab_core.utils as utils
from gqcnn import get_gqcnn_model, get_gqcnn_trainer, utils as gqcnn_utils

# Setup logger.
logger = Logger.get_logger("tools/train.py")

if __name__ == "__main__":
    # Parse args.
    parser = argparse.ArgumentParser(
        description=("Train a Grasp Quality Convolutional Neural Network with"
                     " TensorFlow"))
    parser.add_argument(
        "dataset_dir",
        type=str,
        default=None,
        help="path to the dataset to use for training and validation")
    parser.add_argument("--split_name",
                        type=str,
                        default="image_wise",
                        help="name of the split to train on")