def __init__(self, dataset_path, frame=None, loop=True):
        self._dataset_path = dataset_path
        self._frame = frame
        self._color_frame = frame
        self._ir_frame = frame
        self._im_index = 0
        self._running = False

        from dexnet.learning import TensorDataset
        self._dataset = TensorDataset.open(self._dataset_path)
        self._num_images = self._dataset.num_datapoints
        self._image_rescale_factor = 1.0
        if 'image_rescale_factor' in self._dataset.metadata.keys():
            self._image_rescale_factor = 1.0 / self._dataset.metadata[
                'image_rescale_factor']

        datapoint = self._dataset.datapoint(
            0, [TensorDatasetVirtualSensor.CAMERA_INTR_FIELD])
        camera_intr_vec = datapoint[
            TensorDatasetVirtualSensor.CAMERA_INTR_FIELD]
        self._color_intr = CameraIntrinsics.from_vec(
            camera_intr_vec,
            frame=self._color_frame).resize(self._image_rescale_factor)
        self._ir_intr = CameraIntrinsics.from_vec(
            camera_intr_vec,
            frame=self._ir_frame).resize(self._image_rescale_factor)
Exemple #2
0
def analyze_classification_performance(model_dir, config, dataset_path=None):
    # read params
    predict_batch_size = config['batch_size']
    randomize = config['randomize']
    
    plotting_config = config['plotting']
    figsize = plotting_config['figsize']
    font_size = plotting_config['font_size']
    legend_font_size = plotting_config['legend_font_size']
    line_width = plotting_config['line_width']
    colors = plotting_config['colors']
    dpi = plotting_config['dpi']
    style = '-'

    class_remapping = None
    if 'class_remapping' in config.keys():
        class_remapping = config['class_remapping']

    # read training config
    training_config_filename = os.path.join(model_dir, 'training_config.yaml')
    training_config = YamlConfig(training_config_filename)

    # read training params
    indices_filename = None
    if dataset_path is None:
        dataset_path = training_config['dataset']
        indices_filename = os.path.join(model_dir, 'splits.npz')
    dataset_prefix, dataset_name = os.path.split(dataset_path)
    if dataset_name == '':
        _, dataset_name = os.path.split(dataset_prefix)
    x_names = training_config['x_names']
    y_name = training_config['y_name']
    batch_size = training_config['training']['batch_size']
    iterator_config = training_config['data_iteration']
    x_name = x_names[0]

    # set analysis dir
    analysis_dir = os.path.join(model_dir, 'analysis')
    if not os.path.exists(analysis_dir):
        os.mkdir(analysis_dir)

    # setup log file
    experiment_log_filename = os.path.join(analysis_dir, '%s_analysis.log' %(dataset_name))
    if os.path.exists(experiment_log_filename):
        os.remove(experiment_log_filename)
    formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
    hdlr = logging.FileHandler(experiment_log_filename)
    hdlr.setFormatter(formatter)
    logging.getLogger().addHandler(hdlr)

    # setup plotting
    plt.figure(figsize=(figsize, figsize))

    # read dataset
    dataset = TensorDataset.open(dataset_path)

    # read dataset splits
    if indices_filename is None:
        splits = {dataset_name: np.arange(dataset.num_datapoints)}
    else:
        splits = np.load(indices_filename)['arr_0'].tolist()

    # load cnn
    logging.info('Loading model %s' %(model_dir))
    cnn = ClassificationCNN.open(model_dir)

    # save examples
    logging.info('Saving examples of each class')
    all_labels = np.arange(cnn.num_classes)
    label_counts = {}
    [label_counts.update({l:0}) for l in all_labels]
    for tensor_ind in range(dataset.num_tensors):
        tensor = dataset.tensor(y_name, tensor_ind)
        for label in tensor:
            label_counts[label] += 1

    d = utils.sqrt_ceil(cnn.num_classes)
    plt.clf()
    for i, label in enumerate(all_labels):
        tensor_ind = 0
        label_found = False
        while not label_found and tensor_ind < dataset.num_tensors:
            tensor = dataset.tensor(y_name, tensor_ind)
            ind = np.where(tensor.arr == label)[0]
            if ind.shape[0] > 0:
                ind = ind[0] + dataset.datapoints_per_tensor * (tensor_ind)
                label_found = True

            tensor_ind += 1
        
        if not label_found:
            continue
        datapoint = dataset[ind]
        example_im = datapoint[x_name]
        
        plt.subplot(d,d,i+1)
        plt.imshow(example_im[:,:,:3].astype(np.uint8))
        plt.title('Class %03d: %.3f%%' %(label, float(label_counts[label]) / dataset.num_datapoints), fontsize=3)
        plt.axis('off')
    plt.savefig(os.path.join(analysis_dir, '%s_classes.pdf' %(dataset_name)))

    # evaluate on dataset
    results = {}
    for split_name, indices in splits.iteritems():
        logging.info('Evaluating performance on split: %s' %(split_name))

        # predict
        if randomize:
            pred_probs, true_labels = cnn.evaluate_on_dataset(dataset, indices=indices, batch_size=predict_batch_size)
            pred_labels = np.argmax(pred_probs, axis=1)
        else:
            true_labels = []
            pred_labels = []
            pred_probs = []
            for datapoint in dataset:
                im = ColorImage(datapoint['color_ims'].astype(np.uint8)[:,:,:3])
                true_label = datapoint['stp_labels']
                pred_prob = cnn.predict(im)
                pred_label = np.argmax(pred_prob, axis=1)
                true_labels.append(true_label)
                pred_labels.append(pred_label)
                pred_probs.append(pred_prob.ravel())
                
                """
                if class_remapping is not None:
                    true_label = class_remapping[true_label]
                plt.figure()
                plt.imshow(im.raw_data)
                plt.title('T: %d, P: %d' %(true_label, pred_label))
                plt.show()
                """
            true_labels = np.array(true_labels)
            pred_labels = np.array(pred_labels)
            pred_probs = np.array(pred_probs)
                
        # apply optional class re-mapping
        if class_remapping is not None:
            new_true_labels = np.zeros(true_labels.shape)
            for orig_label, new_label in class_remapping.iteritems():
                new_true_labels[true_labels==orig_label] = new_label
            true_labels = new_true_labels

        # compute classification results
        result = ClassificationResult([pred_probs], [true_labels])
        results[split_name] = result

        # print stats
        logging.info('SPLIT: %s' %(split_name))
        logging.info('Acc: %.3f' %(result.accuracy))
        logging.info('AP: %.3f' %(result.ap_score))
        logging.info('AUC: %.3f' %(result.auc_score))

        # save confusion matrix
        confusion = result.confusion_matrix.data
        plt.clf()
        plt.imshow(confusion, cmap=plt.cm.gray, interpolation='none')
        plt.locator_params(nticks=cnn.num_classes)
        plt.xlabel('Predicted', fontsize=font_size)
        plt.ylabel('Actual', fontsize=font_size)
        plt.savefig(os.path.join(analysis_dir, '%s_confusion.pdf' %(split_name)), dpi=dpi)

        # save analysis
        result_filename = os.path.join(analysis_dir, '%s.cres' %(split_name))
        result.save(result_filename)

    # plot
    colormap = plt.get_cmap('tab10')
    num_colors = 9

    plt.clf()
    for i, split_name in enumerate(splits.keys()):
        result = results[split_name]
        precision, recall, taus = result.precision_recall_curve()
        color = colormap(float(colors[i%num_colors]) / num_colors)
        plt.plot(recall, precision, linewidth=line_width, color=color, linestyle=style, label=split_name)
    plt.xlabel('Recall', fontsize=font_size)
    plt.ylabel('Precision', fontsize=font_size)
    plt.title('Precision-Recall Curve', fontsize=font_size)
    handles, plt_labels = plt.gca().get_legend_handles_labels()
    plt.legend(handles, plt_labels, loc='best', fontsize=legend_font_size)
    plt.savefig(os.path.join(analysis_dir, '%s_precision_recall.pdf' %(dataset_name)), dpi=dpi)

    plt.clf()
    for i, split_name in enumerate(splits.keys()):
        result = results[split_name]
        fpr, tpr, taus = result.roc_curve()
        color = colormap(float(colors[i%num_colors]) / num_colors)
        plt.plot(fpr, tpr, linewidth=line_width, color=color, linestyle=style, label=split_name)
    plt.xlabel('FPR', fontsize=font_size)
    plt.ylabel('TPR', fontsize=font_size)
    plt.title('Receiver Operating Characteristic', fontsize=font_size)
    handles, plt_labels = plt.gca().get_legend_handles_labels()
    plt.legend(handles, plt_labels, loc='best', fontsize=legend_font_size)
    plt.savefig(os.path.join(analysis_dir, '%s_roc.pdf' %(dataset_name)), dpi=dpi)
    ######################################################################
    if config_filename is None:
        config_filename = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..',
            'cfg/tools/generate_gqcnn_dataset.yaml')

    config = YamlConfig(config_filename)

    # set tensor dataset config
    tensor_config = config['tensors']
    tensor_config['fields']['depth_ims_tf_table_96'][
        'height'] = im_final_height
    tensor_config['fields']['depth_ims_tf_table_96']['width'] = im_final_width

    # tensor_dataset = TensorDataset(output_dir, tensor_config)
    tensor_dataset = TensorDataset.open(output_dir)
    tensor_datapoint = tensor_dataset.datapoint_template

    ######################################################################

    # open tensor dirs
    if not os.path.exists(dataset_path):
        raise ValueError('Dataset %s not found!' % (dataset_path))

    # create subdirectories
    image_dir = os.path.join(dataset_path, 'images')
    if not os.path.exists(image_dir):
        raise ValueError('Image folder %s not found!' % (image_dir))

    grasp_dir = os.path.join(dataset_path, 'grasps')
    if not os.path.exists(grasp_dir):
Exemple #4
0
        '--config_filename',
        type=str,
        default=None,
        help=
        'Yaml filename containing configuration parameters for the visualization'
    )
    args = parser.parse_args()
    dataset_path = args.dataset_path
    config_filename = args.config_filename

    # handle config filename
    if config_filename is None:
        config_filename = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), '..',
            'cfg/tools/visualize_gqcnn_dataset.yaml')

    # turn relative paths absolute
    if not os.path.isabs(dataset_path):
        dataset_path = os.path.join(os.getcwd(), dataset_path)
    if not os.path.isabs(config_filename):
        config_filename = os.path.join(os.getcwd(), config_filename)

    # read config
    config = YamlConfig(config_filename)

    # open tensor dataset
    dataset = TensorDataset.open(dataset_path)

    # visualize a tensor dataset
    visualize_tensor_dataset(dataset, config)
def finetune_classification_cnn(config):
    """ Main function. """
    # read params
    dataset = config['dataset']
    x_names = config['x_names']
    y_name = config['y_name']
    model_dir = config['model_dir']
    debug = config['debug']

    num_classes = None
    if 'num_classes' in config.keys():
        num_classes = config['num_classes']

    batch_size = config['training']['batch_size']
    train_pct = config['training']['train_pct']
    model_save_period = config['training']['model_save_period']

    data_aug_config = config['data_augmentation']
    preproc_config = config['preprocessing']
    iterator_config = config['data_iteration']
    model_config = config['model']
    base_model_config = model_config['base']
    optimization_config = config['optimization']
    train_config = config['training']

    generator_image_shape = None
    if 'image_shape' in data_aug_config.keys():
        generator_image_shape = data_aug_config['image_shape']
    optimizer_name = optimization_config['optimizer']

    model_params = {}
    if 'params' in model_config.keys():
        model_params = model_config['params']

    base_model_params = {}
    if 'params' in base_model_config.keys():
        base_model_params = base_model_config['params']

    if debug:
        seed = 108
        random.seed(seed)
        np.random.seed(seed)

    # generate model dir
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    model_id = utils.gen_experiment_id()
    model_dir = os.path.join(model_dir, 'model_%s' % (model_id))
    if not os.path.exists(model_dir):
        os.mkdir(model_dir)
    logging.info('Saving model to %s' % (model_dir))
    latest_model_filename = os.path.join(model_dir, 'weights_{epoch:05d}.h5')
    best_model_filename = os.path.join(model_dir, 'weights.h5')

    # save config
    training_config_filename = os.path.join(model_dir, 'training_config.yaml')
    config.save(training_config_filename)

    # open dataset
    dataset = TensorDataset.open(dataset)

    # split dataset
    indices_filename = os.path.join(model_dir, 'splits.npz')
    if os.path.exists(indices_filename):
        indices = np.load(indices_filename)['arr_0'].tolist()
        train_indices = indices['train']
        val_indices = indices['val']
    else:
        train_indices, val_indices = dataset.split(train_pct)
        indices = np.array({'train': train_indices, 'val': val_indices})
        np.savez_compressed(indices_filename, indices)
    num_train = train_indices.shape[0]
    num_val = val_indices.shape[0]
    val_steps = int(np.ceil(float(num_val) / batch_size))

    # init generator
    train_generator_filename = os.path.join(model_dir,
                                            'train_preprocessor.pkl')
    val_generator_filename = os.path.join(model_dir, 'val_preprocessor.pkl')
    if os.path.exists(train_generator_filename):
        logging.info('Loading generators')
        train_generator = pkl.load(open(train_generator_filename, 'rb'))
        val_generator = pkl.load(open(val_generator_filename, 'rb'))
    else:
        logging.info('Fitting generator')
        train_generator = TensorDataGenerator(num_classes=num_classes,
                                              **data_aug_config)
        val_generator = TensorDataGenerator(
            featurewise_center=data_aug_config['featurewise_center'],
            featurewise_std_normalization=data_aug_config[
                'featurewise_std_normalization'],
            image_shape=generator_image_shape,
            num_classes=num_classes)
        fit_start = time.time()
        train_generator.fit(dataset,
                            x_names,
                            y_name,
                            indices=train_indices,
                            **preproc_config)
        val_generator.mean = train_generator.mean
        val_generator.std = train_generator.std
        val_generator.min_output = train_generator.min_output
        val_generator.max_output = train_generator.max_output
        val_generator.num_classes = train_generator.num_classes
        fit_stop = time.time()
        logging.info('Generator fit took %.3f sec' % (fit_stop - fit_start))
        pkl.dump(train_generator, open(train_generator_filename, 'wb'))
        pkl.dump(val_generator, open(val_generator_filename, 'wb'))

    if num_classes is None:
        num_classes = int(train_generator.num_classes)

    # init iterator
    train_iterator = train_generator.flow_from_dataset(dataset,
                                                       x_names,
                                                       y_name,
                                                       indices=train_indices,
                                                       batch_size=batch_size,
                                                       **iterator_config)
    val_iterator = val_generator.flow_from_dataset(dataset,
                                                   x_names,
                                                   y_name,
                                                   indices=val_indices,
                                                   batch_size=batch_size,
                                                   **iterator_config)

    # setup model
    base_cnn = ClassificationCNN.open(base_model_config['model'],
                                      base_model_config['type'],
                                      input_name=x_names[0],
                                      **base_model_params)
    cnn = FinetunedClassificationCNN(base_cnn=base_cnn,
                                     name='dexresnet',
                                     num_classes=num_classes,
                                     output_name=y_name,
                                     im_preprocessor=val_generator,
                                     **model_params)

    # setup training
    cnn.freeze_base_cnn()
    if optimizer_name == 'sgd':
        optimizer = SGD(lr=optimization_config['lr'],
                        momentum=optimization_config['momentum'])
    elif optimizer_name == 'adam':
        optimizer = Adam(lr=optimization_config['lr'])
    else:
        raise ValueError('Optimizer %s not supported!' % (optimizer_name))
    model = cnn.model
    model.compile(optimizer=optimizer,
                  loss=optimization_config['loss'],
                  metrics=optimization_config['metrics'])

    # train
    steps_per_epoch = int(np.ceil(float(num_train) / batch_size))
    latest_model_ckpt = ModelCheckpoint(latest_model_filename,
                                        period=model_save_period)
    best_model_ckpt = ModelCheckpoint(best_model_filename,
                                      save_best_only=True,
                                      period=model_save_period)
    train_history_cb = TrainHistory(model_dir)
    callbacks = [latest_model_ckpt, best_model_ckpt, train_history_cb]
    history = model.fit_generator(
        train_iterator,
        steps_per_epoch=steps_per_epoch,
        epochs=train_config['epochs'],
        callbacks=callbacks,
        validation_data=val_iterator,
        validation_steps=val_steps,
        class_weight=train_config['class_weight'],
        use_multiprocessing=train_config['use_multiprocessing'])

    # save model
    cnn.save(model_dir)

    # save history
    history_filename = os.path.join(model_dir, 'history.pkl')
    pkl.dump(history.history, open(history_filename, 'wb'))