示例#1
0
def create_spline_image(json_path, image='blank'):
    """ Draws splines into given image

    Parameters
    ----------
    json_path: str
               path to label file
    image: str, 'blank' for all zeros or 'gray' for gray image
           numpy.array, direct image input

    Returns
    -------
    numpy.array
        image with drawn splines
    """
    sc = SplineCreator(json_path)
    sc.create_all_splines()

    # TODO replace section by label_file_scripts read_image
    if isinstance(image, str):
        if image == 'blank':
            image = numpy.zeros((717, 1276, 3), dtype=numpy.uint8)
        elif image == 'gray':
            image = label_file_scripts.read_image(json_path, 'gray')
        else:
            raise ValueError('Unexpected input image: {}'.format(image))

    # TODO Request that as part of read_image as well or util function
    if (len(image.shape) == 2 or image.shape[2] == 1):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

    for lane_name, spline in sc.spline_points.items():
        _draw_lines(image, spline, dc.DICT_COLORS[lane_name])

    return image
def create_segmentation_image(json_path, color=None, image=None):
    """ Draws pixel-level markers onto image

    Parameters
    ----------
    json_path: str
               path to label-file
    color: int/uint8 for grayscale color to draw markers
           tuple (uint8, uint8, uint8), BGR values
           None for default marker colors, multi-class
    image: str, 'blank' for all zeros or 'gray' for gray image
           numpy.array, direct image input

    Returns:
    --------
    numpy.array
        image with drawn markers

    Notes
    -----
    This one is for visualizing the label, may not be optimal for training label creation
    """

    label = label_file_scripts.read_json(json_path)

    # TODO replace section by label_file_scripts read_image
    # NOTE Same in function above
    if isinstance(image, str):
        if image == 'blank':
            image = numpy.zeros((717, 1276), dtype=numpy.uint8)
        elif image == 'gray':
            image = label_file_scripts.read_image(json_path, 'gray')
        # TODO Add color
        else:
            raise ValueError('Unknown image type {}'.format(image))

    if (len(image.shape) == 2 or image.shape[2] == 1)\
            and (color is None or not isinstance(color, int)):
        image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)

    for lane in label['lanes']:
        lane_id = lane['lane_id']
        for marker in lane['markers']:
            p1 = marker['world_start']
            p1 = [p1['x'], p1['y'], p1['z']]
            p2 = marker['world_end']
            p2 = [p2['x'], p2['y'], p2['z']]
            dcolor = dc.DICT_COLORS[lane_id] if color is None else color
            label_file_scripts.project_lane_marker(
                p1,
                p2,
                width=.1,
                projection_matrix=label['projection_matrix'],
                color=dcolor,
                img=image)
    return image
示例#3
0
    def _show_lanes(self, return_only=False):
        """ For debugging spline creation only """

        gray_image = label_file_scripts.read_image(self.json_path, 'gray')
        self.debug_image = cv2.cvtColor(gray_image, cv2.COLOR_GRAY2BGR)
        self.create_all_points()

        for _, sampled_points in self.sampled_points.items():
            _draw_points(self.debug_image, sampled_points, dc.DCOLORS[1])

        for lane_name, marker_points in self.lane_marker_points.items():
            _draw_points(self.debug_image, marker_points,
                         dc.DICT_COLORS[lane_name])

        if not return_only:
            cv2.imshow('debug image', cv2.resize(self.debug_image,
                                                 (2200, 1400)))
            cv2.waitKey(10000)

        return self.debug_image
示例#4
0
def create_deeplab_tfrecords(input_folder, tfrecord_file):
    """Creates a tfrecord file for a given folder

    Parameters:
        input_folder: str, path to samples for a given dataset
        tfrecord_file: str, path to tfrecord that will be created

    Flags:
        See docstring for more information
        color_input: whether to use gray or color images
        multi_class: binary or multi-class segmentation
        location_gradients: location information as extra channels
    """
    label_paths = helper_scripts.get_files_from_folder(input_folder, '.json')
    shuffle(label_paths)
    print('{} label files in {}'.format(len(label_paths), input_folder))

    loc_grad_x = list(
        map(lambda z: z / constants.IMAGE_WIDTH * 255,
            range(constants.IMAGE_WIDTH)))
    loc_grad_y = list(
        map(lambda z: z / constants.IMAGE_HEIGHT * 255,
            range(constants.IMAGE_HEIGHT)))
    loc_grad_x = numpy.asarray([loc_grad_x] * constants.IMAGE_HEIGHT)
    loc_grad_y = numpy.asarray([loc_grad_y] *
                               constants.IMAGE_WIDTH).transpose()
    loc_grad_x = numpy.round(loc_grad_x).astype(numpy.uint8)
    loc_grad_y = numpy.round(loc_grad_y).astype(numpy.uint8)

    os.makedirs(os.path.dirname(tfrecord_file), exist_ok=True)
    with tf.python_io.TFRecordWriter(tfrecord_file) as writer:
        for label_path in tqdm.tqdm(label_paths,
                                    total=len(label_paths),
                                    desc='Creating ' + tfrecord_file):

            image_name = os.path.basename(label_path).replace('.json', '')
            if FLAGS.color_input:
                image_data = label_file_scripts.read_image(label_path,
                                                           image_type='color')
            else:
                image_data = label_file_scripts.read_image(label_path,
                                                           image_type='gray')
                if FLAGS.location_gradients:
                    image_data = numpy.stack(
                        [image_data, loc_grad_x, loc_grad_y], -1)
            image_data = cv2.imencode('.png', image_data)[1].tostring()

            if FLAGS.multi_class:
                segmentation_label = segmentation_labels.create_multi_class_segmentation_label(
                    label_path)
                segmentation = numpy.zeros(segmentation_label.shape[0:2],
                                           numpy.uint8)
                for class_index in range(1, 5):
                    segmentation[segmentation_label[:, :, class_index] >
                                 0] = class_index
            else:
                segmentation = visualize_labels.create_segmentation_image(
                    label_path, image='blank')
                segmentation = cv2.cvtColor(segmentation, cv2.COLOR_BGR2GRAY)
                segmentation = segmentation > 0
                segmentation = segmentation.astype(numpy.uint8)

            segmentation = cv2.imencode('.png', segmentation)[1].tostring()

            example = build_data.image_seg_to_tfexample(
                image_data, image_name, constants.IMAGE_HEIGHT,
                constants.IMAGE_WIDTH, segmentation)

            writer.write(example.SerializeToString())