예제 #1
0
파일: VitalPIL.py 프로젝트: tao558/kwiver
def get_pil_image(img):
    """ Get image in python friendly format
    Assumptions are that the image has byte pixels.
    :return: array containing image
    :rtype: pil image
    """
    def pil_mode_from_image(img):
        """
        Determine image format from pixel properties
        May return None if our current encoding does not map to a PIL image
        mode.
        """
        if img.pixel_type() == img.PIXEL_UNSIGNED and img.pixel_num_bytes(
        ) == 1:
            if img.depth() == 3 and img.d_step() == 1 and img.w_step() == 3:
                return "RGB"
            elif img.depth() == 4 and img.d_step() == 1 and img.w_step() == 4:
                return "RGBA"
            elif img.depth() == 1 and img.w_step() == 1:
                return "L"
        elif img.depth() == 1 and img.w_step() == 1:
            if img.pixel_type() == img.PIXEL_BOOL and img.pixel_num_bytes(
            ) == 1:
                return "1"
            elif img.pixel_type() == img.PIXEL_SIGNED and img.pixel_num_bytes(
            ) == 4:
                return "I"
            elif img.pixel_type() == img.PIXEL_FLOAT and img.pixel_num_bytes(
            ) == 4:
                return "F"
        return None

    mode = pil_mode_from_image(img)

    if not mode:
        # make a copy of this image using contiguous memory with interleaved channels
        new_img = Image(img.width(), img.height(), img.depth(), True,
                        img.pixel_type(), img.pixel_num_bytes())
        new_img.copy_from(img)
        img = new_img
        mode = pil_mode_from_image(img)

    if not mode:
        raise RuntimeError("Unsupported image format.")

    # get buffer from image
    if six.PY2:
        img_pixels = buffer(bytearray(img))
    else:
        img_pixels = memoryview(bytearray(img)).tobytes()

    pil_img = _pil_image_from_bytes(mode, (img.width(), img.height()),
                                    img_pixels, "raw", mode,
                                    img.h_step() * img.pixel_num_bytes(), 1)
    return pil_img
예제 #2
0
    def test_numpy_share_memory(self):
        # TODO: do pytest parametarize once we move to pytest

        np_img = np.arange(4 * 5 * 3, dtype=np.uint8).reshape(4, 5, 3)
        vital_img = Image(np_img)

        assert np.all(
            np_img == vital_img.asarray()), ('must be initially the same')

        np_img += 1
        assert np.all(
            np_img != vital_img.asarray()), ('we do not share memory yet')
예제 #3
0
        def _test_numpy(dtype_name, nchannels, order='c'):
            np_img = create_numpy_image(dtype_name, nchannels, order)
            vital_img = Image(np_img)
            recast = vital_img.asarray()
            # asarray always returns 3 channels
            np_img = np.atleast_3d(np_img)
            pixel_type_name = vital_img.pixel_type_name()
            want = map_dtype_name_to_pixel_type(dtype_name)

            assert pixel_type_name == want, 'want={} but got={}'.format(
                want, pixel_type_name)

            if not np.all(np_img == recast):
                raise AssertionError(
                    'Failed dtype={}, nchannels={}, order={}'.format(
                        dtype_name, nchannels, order))
예제 #4
0
 def test_detect(self):
     modules.load_known_modules()
     detector = ImageObjectDetector.create("example_detector")
     image = Image()
     image_container = ImageContainer(image)
     detections = detector.detect(image_container)
     nose.tools.ok_(detections is not None,
                    "Unexpected empty detections" )
     nose.tools.assert_equal(len(detections), 1)
예제 #5
0
    def test_set_and_get_image_data(self):
        qr = self._create_query_result()

        imc_list = [ImageContainer(Image())]
        qr.image_data = imc_list
        nt.assert_equals(len(qr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 1)
        nt.assert_equals(qr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)

        imc_list.append(ImageContainer(Image(720, 480)))
        qr.image_data = imc_list
        nt.assert_equals(len(qr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 2)
        nt.assert_equals(qr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)
        nt.assert_equals(qr.image_data[1].size(), imc_list[1].size())
        nt.assert_equals(imc_list[1].size(), 720 * 480)

        qr.image_data = []
        nt.assert_equals(len(qr.image_data), 0)
    def test_set_and_get_image_data(self):
        dr = DescriptorRequest()

        imc_list = [ImageContainer(Image())]
        dr.image_data = imc_list
        nt.assert_equals(len(dr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 1)
        nt.assert_equals(dr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)

        imc_list.append(ImageContainer(Image(720, 480)))
        dr.image_data = imc_list
        nt.assert_equals(len(dr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 2)
        nt.assert_equals(dr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)
        nt.assert_equals(dr.image_data[1].size(), imc_list[1].size())
        nt.assert_equals(imc_list[1].size(), 720 * 480)

        dr.image_data = []
        nt.assert_equals(len(dr.image_data), 0)
예제 #7
0
    def filter(self, in_img):
        img = in_img.image().asarray().astype("uint16")

        mi = np.percentile(img, 1)
        ma = np.percentile(img, 100)

        normalized = (img - mi) / (ma - mi)
        normalized = normalized * 255
        normalized[normalized < 0] = 0

        output = ImageContainer(Image(normalized.astype("uint8")))
        return output
예제 #8
0
    def test_get_set_mask(self):
        # Check default
        do = DetectedObject(self.bbox)
        nt.ok_(self.check_img_containers_equal(do.mask, None))

        # Check setting through setter
        do.mask = self.mask
        nt.ok_(self.check_img_containers_equal(do.mask, self.mask))

        # Check setting through constructor
        new_mask = ImageContainer(Image(2048, 1080))
        do = DetectedObject(self.bbox, mask=new_mask)
        nt.ok_(self.check_img_containers_equal(do.mask, new_mask))
예제 #9
0
    def setUp(self):

        # Values to setup Detected Object to hold in DOS
        self.bbox1 = bb(10, 10, 20, 20)
        self.bbox2 = bb(10, 10, 30, 30)
        self.bbox3 = bb(5, 5, 20, 20)
        self.bbox4 = bb(1, 1, 10, 10)
        self.conf = 0.5
        self.conf2 = 0.4
        self.conf3 = 0.75
        self.cm = dot("example_class", 0.4)
        self.cm2 = dot("foo2", 3.14)
        self.cm3 = dot("foo3", 0.11)
        self.mask = ImageContainer(Image(1080, 720))
        self.mask2 = ImageContainer(Image(1920, 1080))
        self.mask3 = ImageContainer(Image(720, 1080))

        # Establish set of DO objects to pass to DOS constructor
        self.set = np.array([
            do(self.bbox1, self.conf, self.cm, self.mask),
            do(self.bbox2, self.conf2, self.cm2, self.mask2),
            do(self.bbox3, self.conf3, self.cm3, self.mask3),
            do(self.bbox4)
        ])
예제 #10
0
    def _step(self):
        # grab image container from port using traits
        img_c = self.grab_input_using_trait('image')

        img = img_c.image().asarray().astype("uint16")

        mi = np.percentile(img, 1)
        ma = np.percentile(img, 100)

        normalized = (img - mi) / (ma - mi)

        normalized = normalized * 255
        normalized[normalized < 0] = 0

        output = ImageContainer(Image(normalized.astype("uint8")))

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait('image', output)
        self._base_step()
예제 #11
0
    def setUp(self):
        self.loc1 = np.array([-73.759291, 42.849631])
        self.loc2 = np.array([-149.484444, -17.619482])

        self.bbox = BoundingBox(10, 10, 20, 20)
        self.conf = 0.5
        self.dot = DetectedObjectType("example_class", 0.4)
        self.mask = ImageContainer(Image(1080, 720))

        # Values to set outside of constructor
        self.geo_point = GeoPoint(self.loc1, geodesy.SRID.lat_lon_WGS84)
        self.index = 5
        self.detector_name = "example_detector_name"

        self.descriptor = descriptor.new_descriptor(5)
        self.descriptor[:] = 10

        self.note_to_add = "example_note"

        self.keypoint_to_add = Point2d()
        self.keypoint_to_add.value = self.loc2
        self.keypoint_id = "example_keypoint_id"
예제 #12
0
def create_image():
    return ImageContainer(Image(720, 480))
예제 #13
0
 def test_getitem_bool(self):
     img = Image(720, 480, 1, True, Image.PIXEL_BOOL, 1)
     nose.tools.assert_equal(img.pixel_type_name(), "bool")
     val1 = img[0, 0]
     val2 = img[0, 0, 0]
     nose.tools.assert_equal(val1, val2)
예제 #14
0
 def test_getitem_double(self):
     img = Image(720, 480, 3, True, Image.PIXEL_FLOAT, 8)
     nose.tools.assert_equal(img.pixel_type_name(), "double")
     val1 = img[0, 0]
     val2 = img[0, 0, 0]
     nose.tools.assert_equal(val1, val2)
예제 #15
0
 def test_getitem_int32(self):
     img = Image(720, 480, 3, True, Image.PIXEL_SIGNED, 4)
     nose.tools.assert_equal(img.pixel_type_name(), "int32")
     val1 = img[0, 0]
     val2 = img[0, 0, 0]
     nose.tools.assert_equal(val1, val2)
예제 #16
0
 def test_getitem_uint8(self):
     img = Image(720, 480)
     nose.tools.assert_equal(img.pixel_type_name(), "uint8")
     val1 = img[0, 0]
     val2 = img[0, 0, 0]
     nose.tools.assert_equal(val1, val2)
예제 #17
0
 def test_size(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.size(), 720 * 480)
예제 #18
0
 def test_new_type(self):
     # allocated a uint32_t image
     img = Image(720, 480, 3, True, Image.PIXEL_UNSIGNED, 4)
예제 #19
0
 def test_new_sized(self):
     img = Image(720, 480)
예제 #20
0
 def test_new(self):
     img = Image()
예제 #21
0
    def extract_chips_for_dets(self, image_files, truth_sets):
        import cv2
        output_files = []
        output_dets = []

        for i in range(len(image_files)):
            filename = image_files[i]
            groundtruth = truth_sets[i]
            detections = []
            scale = 1.0

            if self._target_type_scales:
                scale = self.compute_scale_factor(groundtruth)

            if len(groundtruth) > 0:
                img = cv2.imread(filename)

                if len(np.shape(img)) < 2:
                    continue

                img_max_x = np.shape(img)[1]
                img_max_y = np.shape(img)[0]

                # Optionally scale image
                if scale != 1.0:
                    img_max_x = int(scale * img_max_x)
                    img_max_y = int(scale * img_max_y)
                    img = cv2.resize(img, (img_max_x, img_max_y))

                # Run optional background detector on data
                if self._detector_model:
                    kw_image = Image(img)
                    kw_image_container = ImageContainer(kw_image)
                    detections = self._detector.detect(kw_image_container)

            if len(groundtruth) == 0 and len(detections) == 0:
                continue

            overlaps = np.zeros((len(detections), len(groundtruth)))
            det_boxes = []

            for det in detections:
                bbox = det.bounding_box
                det_boxes.append((int(bbox.min_x()), int(bbox.min_y()),
                                  int(bbox.width()), int(bbox.height())))

            for i, gt in enumerate(groundtruth):
                # Extract chip for this detection
                bbox = gt.bounding_box

                bbox_min_x = int(bbox.min_x() * scale)
                bbox_max_x = int(bbox.max_x() * scale)
                bbox_min_y = int(bbox.min_y() * scale)
                bbox_max_y = int(bbox.max_y() * scale)

                bbox_width = bbox_max_x - bbox_min_x
                bbox_height = bbox_max_y - bbox_min_y

                max_overlap = 0.0

                for j, det in enumerate(det_boxes):

                    # Compute overlap between detection and truth
                    (det_min_x, det_min_y, det_width, det_height) = det

                    # Get the overlap rectangle
                    overlap_x0 = max(bbox_min_x, det_min_x)
                    overlap_y0 = max(bbox_min_y, det_min_y)
                    overlap_x1 = min(bbox_max_x, det_min_x + det_width)
                    overlap_y1 = min(bbox_max_y, det_min_y + det_height)

                    # Check if there is an overlap
                    if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
                        continue

                    # If yes, calculate the ratio of the overlap
                    det_area = float(det_width * det_height)
                    gt_area = float(bbox_width * bbox_height)
                    int_area = float(
                        (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0))
                    overlap = min(int_area / det_area, int_area / gt_area)
                    overlaps[j, i] = overlap

                    if overlap >= self._min_overlap_for_association and overlap > max_overlap:
                        max_overlap = overlap

                        bbox_min_x = det_min_x
                        bbox_min_y = det_min_y
                        bbox_max_x = det_min_x + det_width
                        bbox_max_y = det_min_y + det_height

                        bbox_width = det_width
                        bbox_height = det_height

                if self._chip_method == "fixed_width":
                    chip_width = int(self._chip_width)
                    half_width = int(chip_width / 2)

                    bbox_min_x = int(
                        (bbox_min_x + bbox_max_x) / 2) - half_width
                    bbox_min_y = int(
                        (bbox_min_y + bbox_max_y) / 2) - half_width
                    bbox_max_x = bbox_min_x + chip_width
                    bbox_max_y = bbox_min_y + chip_width

                    bbox_width = chip_width
                    bbox_height = chip_width

                bbox_area = bbox_width * bbox_height

                if self._area_lower_bound > 0 and bbox_area < self._area_lower_bound:
                    continue
                if self._area_upper_bound > 0 and bbox_area > self._area_upper_bound:
                    continue

                if self._reduce_category and gt.type() and \
                  gt.type().get_most_likely_class() == self._reduce_category and \
                  random.uniform( 0, 1 ) < 0.90:
                    continue

                if self._border_exclude > 0:
                    if bbox_min_x <= self._border_exclude:
                        continue
                    if bbox_min_y <= self._border_exclude:
                        continue
                    if bbox_max_x >= img_max_x - self._border_exclude:
                        continue
                    if bbox_max_y >= img_max_y - self._border_exclude:
                        continue

                crop = img[bbox_min_y:bbox_max_y, bbox_min_x:bbox_max_x]
                self._sample_count = self._sample_count + 1
                crop_str = ('%09d' % self._sample_count) + ".png"
                new_file = os.path.join(self._chip_directory, crop_str)
                cv2.imwrite(new_file, crop)

                # Set new box size for this detection
                gt.bounding_box = BoundingBoxD(0, 0,
                                               np.shape(crop)[1],
                                               np.shape(crop)[0])
                new_set = DetectedObjectSet()
                new_set.add(gt)

                output_files.append(new_file)
                output_dets.append(new_set)

            neg_count = 0

            for j, det in enumerate(detections):

                if max(overlaps[j]) >= self._max_overlap_for_negative:
                    continue

                bbox = det.bounding_box

                bbox_min_x = int(bbox.min_x())
                bbox_max_x = int(bbox.max_x())
                bbox_min_y = int(bbox.min_y())
                bbox_max_y = int(bbox.max_y())

                bbox_width = bbox_max_x - bbox_min_x
                bbox_height = bbox_max_y - bbox_min_y

                bbox_area = bbox_width * bbox_height

                if self._chip_method == "fixed_width":
                    chip_width = int(self._chip_width)
                    half_width = int(chip_width / 2)

                    bbox_min_x = int(
                        (bbox_min_x + bbox_max_x) / 2) - half_width
                    bbox_min_y = int(
                        (bbox_min_y + bbox_max_y) / 2) - half_width
                    bbox_max_x = bbox_min_x + chip_width
                    bbox_max_y = bbox_min_y + chip_width

                    bbox_width = chip_width
                    bbox_height = chip_width

                if self._area_lower_bound > 0 and bbox_area < self._area_lower_bound:
                    continue
                if self._area_upper_bound > 0 and bbox_area > self._area_upper_bound:
                    continue

                if self._border_exclude > 0:
                    if bbox_min_x <= self._border_exclude:
                        continue
                    if bbox_min_y <= self._border_exclude:
                        continue
                    if bbox_max_x >= img_max_x - self._border_exclude:
                        continue
                    if bbox_max_y >= img_max_y - self._border_exclude:
                        continue

                # Handle random factor
                if self._max_neg_per_frame < 1.0 and random.uniform(
                        0, 1) > self._max_neg_per_frame:
                    break

                crop = img[bbox_min_y:bbox_max_y, bbox_min_x:bbox_max_x]
                self._sample_count = self._sample_count + 1
                crop_str = ('%09d' % self._sample_count) + ".png"
                new_file = os.path.join(self._chip_directory, crop_str)
                cv2.imwrite(new_file, crop)

                # Set new box size for this detection
                det.bounding_box = BoundingBoxD(0, 0,
                                                np.shape(crop)[1],
                                                np.shape(crop)[0])
                det.type = DetectedObjectType(self._negative_category, 1.0)
                new_set = DetectedObjectSet()
                new_set.add(det)

                output_files.append(new_file)
                output_dets.append(new_set)

                # Check maximum negative count
                neg_count = neg_count + 1
                if neg_count > self._max_neg_per_frame:
                    break

        return [output_files, output_dets]
예제 #22
0
 def test_height(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.height(), 480)
예제 #23
0
 def test_width(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.width(), 720)
예제 #24
0
파일: VitalPIL.py 프로젝트: tao558/kwiver
def from_pil(pil_image):
    """
    Construct Image from supplied PIL image object.
    :param pil_image: PIL image object
    :type pil_image: PIL.Image.Image
    :raises RuntimeError: If the PIL Image provided is not in a recognized
       mode.
    :returns: New Image instance using the given image's pixels.
    :rtype: Image
    """

    (img_width, img_height) = pil_image.size
    mode = pil_image.mode
    # TODO(paul.tunison): Extract this logic out into a utility function.
    if mode == "1":  # boolean
        img_depth = 1
        img_w_step = 1
        img_h_step = img_width
        img_d_step = 0
        img_pix_num_bytes = 1
        img_pix_type = Image.PIXEL_BOOL
    elif mode == "L":  # 8-bit greyscale
        img_depth = 1
        img_w_step = 1
        img_h_step = img_width
        img_d_step = 0
        img_pix_num_bytes = 1
        img_pix_type = Image.PIXEL_UNSIGNED
    elif mode == "RGB":  # 8-bit RGB
        img_depth = 3
        img_w_step = 3
        img_h_step = img_width * 3
        img_d_step = 1
        img_pix_num_bytes = 1
        img_pix_type = Image.PIXEL_UNSIGNED
    elif mode == "RGBA":  # 8-bit RGB with alpha
        img_depth = 4
        img_w_step = 4
        img_h_step = img_width * 4
        img_d_step = 1
        img_pix_num_bytes = 1
        img_pix_type = Image.PIXEL_UNSIGNED
    elif mode == "I":  # 32-bit signed int greyscale
        img_depth = 1
        img_w_step = 1
        img_h_step = img_width
        img_d_step = 0
        img_pix_num_bytes = 4
        img_pix_type = Image.PIXEL_SIGNED
    elif mode == "F":  # 32-bit float greyscale
        img_depth = 1
        img_w_step = 1
        img_h_step = img_width
        img_d_step = 0
        img_pix_num_bytes = 4
        img_pix_type = Image.PIXEL_FLOAT
    else:
        raise RuntimeError("Unsupported image format.")

    img_data = _pil_image_to_bytes(pil_image)
    vital_img = Image(img_data, img_width, img_height, img_depth, img_w_step,
                      img_h_step, img_d_step, img_pix_type, img_pix_num_bytes)
    return vital_img
예제 #25
0
    def test_size(self):
        img = Image()
        nose.tools.assert_equal(img.size(), 0)

        img = Image(720, 480)
        nose.tools.assert_equal(img.size(), 720 * 480)
예제 #26
0
    def test_new(self):
        image = Image()
        img_c = ImageContainer(image)

        image = Image(100, 100)
        img_c = ImageContainer(image)