Esempio n. 1
0
    def _step(self):
        # grab image container from port using traits
        optical_c = self.grab_input_using_trait('optical_image')
        thermal_c = self.grab_input_using_trait('thermal_image')

        # Get python image from conatiner (just for show)
        optical_npy = optical_c.image().asarray().astype('uint8')
        thermal_npy = thermal_c.image().asarray().astype('uint16')

        thermal_norm = normalize_thermal(thermal_npy)

        if thermal_norm is not None and optical_npy is not None:
            # compute transform
            ret, transform, _ = compute_transform(
                optical_npy,
                thermal_norm,
                warp_mode=cv2.MOTION_HOMOGRAPHY,
                match_low_res=True,
                good_match_percent=self._good_match_percent,
                ratio_test=self._ratio_test,
                match_height=self._match_height,
                min_matches=self._min_matches,
                min_inliers=self._min_inliers)
        else:
            ret = False

        if ret:
            # TODO: Make all of these computations conditional on port connection
            inv_transform = np.linalg.inv(transform)

            thermal_warped = cv2.warpPerspective( thermal_npy, transform, \
              ( optical_npy.shape[1], optical_npy.shape[0] ) )
            optical_warped = cv2.warpPerspective( optical_npy, inv_transform, \
              ( thermal_npy.shape[1], thermal_npy.shape[0] ) )

            #self.push_to_port_using_trait( 'thermal_to_optical_homog',
            #   F2FHomography.from_matrix( transform, 'd' )
            #self.push_to_port_using_trait( 'optical_to_thermal_homog',
            #   F2FHomography.from_matrix( inv_transform, 'd' )

            self.push_to_port_using_trait(
                'warped_thermal_image',
                ImageContainer.fromarray(thermal_warped))
            self.push_to_port_using_trait(
                'warped_optical_image',
                ImageContainer.fromarray(optical_warped))
        else:
            print('alignment failed!')

            #self.push_to_port_using_trait( "thermal_to_optical_homog", F2FHomography() )
            #self.push_to_port_using_trait( "optical_to_thermal_homog", F2FHomography() )

            self.push_to_port_using_trait('warped_optical_image',
                                          ImageContainer())
            self.push_to_port_using_trait('warped_thermal_image',
                                          ImageContainer())

        self._base_step()
Esempio n. 2
0
    def _step( self ):
        # grab image container from port using traits
        input_c = self.grab_input_using_trait( 'image' )

        # Get python image from conatiner and perform operation
        input_npy = input_c.image().asarray()
        input_8bit = input_npy.astype( 'uint8' )

        input_fft = np.fft.fft2( input_8bit )

        filt = input_fft - self._noise_fft

        im_filt = np.absolute( np.fft.ifft2( filt ) )

        im_filt = np.log( cv2.blur( im_filt, ( self._response_kernel, self._response_kernel ) ) )
        im_filt = ( im_filt - im_filt.min() ) / ( im_filt.max() - im_filt.min() )

        smoothed_8bit = cv2.blur( input_8bit, ( self._smooth_kernel, self._smooth_kernel ) )
        
        output_image = input_8bit * im_filt + smoothed_8bit * ( 1.0 - im_filt )

        self.push_to_port_using_trait( 'image', ImageContainer( \
          from_pil( pil_image.fromarray( output_image.astype( 'uint8' ) ) ) ) )
    
        self._base_step()
Esempio n. 3
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # Get image from container
        in_img = in_img_c.image()

        # convert generic image to PIL image
        pil_image = get_pil_image(in_img)

        # draw on the image to prove we can do it
        num = 37
        import PIL.ImageDraw
        draw = PIL.ImageDraw.Draw(pil_image)
        draw.line((0, 0) + pil_image.size, fill=128, width=5)
        draw.line((0, pil_image.size[1], pil_image.size[0], 0),
                  fill=32768,
                  width=5)
        #                 x0   y0   x1       y1
        draw.rectangle([num, num, num + 100, num + 100], outline=125)
        del draw

        new_image = from_pil(pil_image)  # get new image handle
        new_ic = ImageContainer(new_image)

        # push object to output port
        self.push_to_port_using_trait('out_image', new_ic)

        self._base_step()
Esempio n. 4
0
 def test_detect(self):
     modules.load_known_modules()
     detector = ImageObjectDetector.create("example_detector")
     image = Image()
     image_container = ImageContainer(image)
     detections = detector.detect(image_container)
     nose.tools.ok_(detections is not None,
                    "Unexpected empty detections" )
     nose.tools.assert_equal(len(detections), 1)
    def test_set_and_get_image_data(self):
        dr = DescriptorRequest()

        imc_list = [ImageContainer(Image())]
        dr.image_data = imc_list
        nt.assert_equals(len(dr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 1)
        nt.assert_equals(dr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)

        imc_list.append(ImageContainer(Image(720, 480)))
        dr.image_data = imc_list
        nt.assert_equals(len(dr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 2)
        nt.assert_equals(dr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)
        nt.assert_equals(dr.image_data[1].size(), imc_list[1].size())
        nt.assert_equals(imc_list[1].size(), 720 * 480)

        dr.image_data = []
        nt.assert_equals(len(dr.image_data), 0)
    def test_set_and_get_image_data(self):
        qr = self._create_query_result()

        imc_list = [ImageContainer(Image())]
        qr.image_data = imc_list
        nt.assert_equals(len(qr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 1)
        nt.assert_equals(qr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)

        imc_list.append(ImageContainer(Image(720, 480)))
        qr.image_data = imc_list
        nt.assert_equals(len(qr.image_data), len(imc_list))
        nt.assert_equals(len(imc_list), 2)
        nt.assert_equals(qr.image_data[0].size(), imc_list[0].size())
        nt.assert_equals(imc_list[0].size(), 0)
        nt.assert_equals(qr.image_data[1].size(), imc_list[1].size())
        nt.assert_equals(imc_list[1].size(), 720 * 480)

        qr.image_data = []
        nt.assert_equals(len(qr.image_data), 0)
Esempio n. 7
0
    def filter(self, in_img):
        img = in_img.image().asarray().astype("uint16")

        mi = np.percentile(img, 1)
        ma = np.percentile(img, 100)

        normalized = (img - mi) / (ma - mi)
        normalized = normalized * 255
        normalized[normalized < 0] = 0

        output = ImageContainer(Image(normalized.astype("uint8")))
        return output
Esempio n. 8
0
        def _test_numpy(dtype_name, nchannels, order='c'):
            np_img = create_numpy_image(dtype_name, nchannels, order)
            img_container = ImageContainer(Image(np_img))
            recast = img_container.asarray()

            # asarray always returns 3 channels
            np_img = np.atleast_3d(np_img)

            vital_img = img_container.image()
            pixel_type_name = vital_img.pixel_type_name()

            pixel_type_name = vital_img.pixel_type_name()
            want = map_dtype_name_to_pixel_type(dtype_name)

            assert pixel_type_name == want, 'want={} but got={}'.format(
                want, pixel_type_name)

            if not np.all(np_img == recast):
                raise AssertionError(
                    'Failed dtype={}, nchannels={}, order={}'.format(
                        dtype_name, nchannels, order))
Esempio n. 9
0
    def test_get_set_mask(self):
        # Check default
        do = DetectedObject(self.bbox)
        nt.ok_(self.check_img_containers_equal(do.mask, None))

        # Check setting through setter
        do.mask = self.mask
        nt.ok_(self.check_img_containers_equal(do.mask, self.mask))

        # Check setting through constructor
        new_mask = ImageContainer(Image(2048, 1080))
        do = DetectedObject(self.bbox, mask=new_mask)
        nt.ok_(self.check_img_containers_equal(do.mask, new_mask))
Esempio n. 10
0
    def _step(self):
        print("[DEBUG] ----- start step")
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')

        # Get python image from conatiner (just for show)
        in_img = in_img_c.get_image()

        # Print out text to screen
        print("Text: " + str(self.text))

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait('out_image', ImageContainer(in_img))

        self._base_step()
Esempio n. 11
0
    def setUp(self):

        # Values to setup Detected Object to hold in DOS
        self.bbox1 = bb(10, 10, 20, 20)
        self.bbox2 = bb(10, 10, 30, 30)
        self.bbox3 = bb(5, 5, 20, 20)
        self.bbox4 = bb(1, 1, 10, 10)
        self.conf = 0.5
        self.conf2 = 0.4
        self.conf3 = 0.75
        self.cm = dot("example_class", 0.4)
        self.cm2 = dot("foo2", 3.14)
        self.cm3 = dot("foo3", 0.11)
        self.mask = ImageContainer(Image(1080, 720))
        self.mask2 = ImageContainer(Image(1920, 1080))
        self.mask3 = ImageContainer(Image(720, 1080))

        # Establish set of DO objects to pass to DOS constructor
        self.set = np.array([
            do(self.bbox1, self.conf, self.cm, self.mask),
            do(self.bbox2, self.conf2, self.cm2, self.mask2),
            do(self.bbox3, self.conf3, self.cm3, self.mask3),
            do(self.bbox4)
        ])
Esempio n. 12
0
    def _dowork(self, img_container):
        """
        Helper to decouple the algorithm and pipeline logic

        CommandLine:
            xdoctest viame.processes.camtrawl.processes CamtrawlDetectFishProcess._dowork

        Example:
            >>> from viame.processes.camtrawl.processes import *
            >>> from kwiver.vital.types import ImageContainer
            >>> import kwiver.sprokit.pipeline.config
            >>> # construct dummy process instance
            >>> conf = kwiver.sprokit.pipeline.config.empty_config()
            >>> self = CamtrawlDetectFishProcess(conf)
            >>> self._configure()
            >>> # construct test data
            >>> from vital.util import VitalPIL
            >>> from PIL import Image as PILImage
            >>> pil_img = PILImage.open(ub.grabdata('https://i.imgur.com/Jno2da3.png'))
            >>> pil_img = PILImage.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))
            >>> img_container = ImageContainer(VitalPIL.from_pil(pil_img))
            >>> # Initialize the background detector by sending 10 black frames
            >>> for i in range(10):
            >>>     empty_set = self._dowork(img_container)
            >>> # now add a white box that should be detected
            >>> np_img = np.zeros((512, 512, 3), dtype=np.uint8)
            >>> np_img[300:340, 220:380] = 255
            >>> img_container = ImageContainer.fromarray(np_img)
            >>> detection_set = self._dowork(img_container)
            >>> assert len(detection_set) == 1
            >>> obj = detection_set[0]
        """
        # This should be read as np.uint8
        np_img = img_container.asarray()

        detection_set = DetectedObjectSet()
        ct_detections = self.detector.detect(np_img)

        for detection in ct_detections:
            bbox = BoundingBoxD(*detection.bbox.coords)
            mask = detection.mask.astype(np.uint8)
            vital_mask = ImageContainer.fromarray(mask)
            dot = DetectedObjectType("Motion", 1.0)
            obj = DetectedObject(bbox, 1.0, dot, mask=vital_mask)
            detection_set.add(obj)
        return detection_set
Esempio n. 13
0
def _kwimage_to_kwiver_detections(detections):
    """
    Convert kwimage detections to kwiver deteted object sets

    Args:
        detected_objects (kwimage.Detections)

    Returns:
        kwiver.vital.types.DetectedObjectSet
    """
    from kwiver.vital.types.types import ImageContainer, Image

    segmentations = None
    # convert segmentation masks
    if 'segmentations' in detections.data:
        segmentations = detections.data['segmentations']

    boxes = detections.boxes.to_tlbr()
    scores = detections.scores
    class_idxs = detections.class_idxs

    if not segmentations:
        # Placeholders
        segmentations = (None, ) * len(boxes)

    # convert to kwiver format, apply threshold
    detected_objects = DetectedObjectSet()

    for tlbr, score, cidx, seg in zip(boxes.data, scores, class_idxs,
                                      segmentations):
        class_name = detections.classes[cidx]

        bbox_int = np.round(tlbr).astype(np.int32)
        bounding_box = BoundingBoxD(bbox_int[0], bbox_int[1], bbox_int[2],
                                    bbox_int[3])

        detected_object_type = DetectedObjectType(class_name, score)
        detected_object = DetectedObject(bounding_box, score,
                                         detected_object_type)
        if seg:
            mask = seg.to_relative_mask().numpy().data
            detected_object.mask = ImageContainer(Image(mask))

        detected_objects.add(detected_object)
    return detected_objects
Esempio n. 14
0
    def demo_image(self):
        """
        Returns an image which can be run through the detector

        Returns:
            ImageContainer: an image of a scallop
        """
        from PIL import Image as PILImage
        from kwiver.vital.util import VitalPIL
        from kwiver.vital.types import ImageContainer
        import ubelt as ub
        url = 'https://data.kitware.com/api/v1/file/5dcf0d1faf2e2eed35fad5d1/download'
        image_fpath = ub.grabdata(
            url, fname='scallop.jpg', appname='viame',
            hash_prefix='3bd290526c76453bec7', hasher='sha512')
        pil_img = PILImage.open(image_fpath)
        image_data = ImageContainer(VitalPIL.from_pil(pil_img))
        return image_data
Esempio n. 15
0
    def _step(self):
        # grab image container from port using traits
        img_c = self.grab_input_using_trait('image')

        img = img_c.image().asarray().astype("uint16")

        mi = np.percentile(img, 1)
        ma = np.percentile(img, 100)

        normalized = (img - mi) / (ma - mi)

        normalized = normalized * 255
        normalized[normalized < 0] = 0

        output = ImageContainer(Image(normalized.astype("uint8")))

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait('image', output)
        self._base_step()
Esempio n. 16
0
    def _step(self):
        # grab image container from port using traits
        in_img_c = self.grab_input_using_trait('image')
        tracks = self.grab_input_using_trait('object_track_set')

        # Get python image from conatiner (just for show)
        in_img = get_pil_image(in_img_c.image()).convert('RGB')

        if len(tracks.tracks()) == 0:
            # Fill image
            in_img = pil_image.new(mode='RGB',
                                   size=in_img.size,
                                   color=(randint(0, 255), randint(0, 255),
                                          randint(0, 255)))

        # push dummy image object (same as input) to output port
        self.push_to_port_using_trait('image',
                                      ImageContainer(from_pil(in_img)))

        self._base_step()
Esempio n. 17
0
    def setUp(self):
        self.loc1 = np.array([-73.759291, 42.849631])
        self.loc2 = np.array([-149.484444, -17.619482])

        self.bbox = BoundingBox(10, 10, 20, 20)
        self.conf = 0.5
        self.dot = DetectedObjectType("example_class", 0.4)
        self.mask = ImageContainer(Image(1080, 720))

        # Values to set outside of constructor
        self.geo_point = GeoPoint(self.loc1, geodesy.SRID.lat_lon_WGS84)
        self.index = 5
        self.detector_name = "example_detector_name"

        self.descriptor = descriptor.new_descriptor(5)
        self.descriptor[:] = 10

        self.note_to_add = "example_note"

        self.keypoint_to_add = Point2d()
        self.keypoint_to_add.value = self.loc2
        self.keypoint_id = "example_keypoint_id"
    def draw(self, detected_object_set, image):
        u_image = cv2.cvtColor(image.asarray(), cv2.COLOR_RGB2BGR)
        for detected_object in detected_object_set:
            bbox = detected_object.bounding_box()
            confidence = detected_object.confidence()
            if self.bbox_shape == "rectangle":
                u_image = cv2.rectangle(u_image,
                              (int(bbox.min_x()), int(bbox.min_y())),
                              (int(bbox.max_x()), int(bbox.max_y())),
                              self.bbox_color,
                              self.bbox_thickness)
                text_origin = (int(bbox.min_x()),int(bbox.min_y()-self.bbox_thickness))
            else:
                center = ((int(bbox.min_x()) + int(bbox.max_x()))//2,
                          (int(bbox.min_y()) + int(bbox.max_y()))//2)
                radius = int(math.sqrt(math.pow(float(bbox.max_y()) - \
                                                float(bbox.min_y()), 2) + \
                                       math.pow(float(bbox.max_x()) - \
                                                float(bbox.min_x()), 2)))
                u_image = cv2.circle(u_image, center, radius, self.bbox_color,
                                     self.bbox_thickness)
                text_origin = (center[0]-radius, center[1]-radius-self.bbox_thickness)

            types = detected_object.type()
            if types is None:
                label = "{0}".format(confidence)
            else:
                label = "{0}: {1}".format(types.get_most_likely_class(),
                                          types.get_most_likely_score())
            u_image = cv2.putText(u_image,
                        label,
                        text_origin,
                        self.font, self.font_scale, self.bbox_color, self.font_thickness)
        u_image = cv2.cvtColor(u_image, cv2.COLOR_BGR2RGB)
        image_container = ImageContainer.fromarray(u_image)
        return image_container
Esempio n. 19
0
 def test_save_directory(self):
     dummy_image = np.zeros([100, 100])
     image_container = ImageContainer.fromarray(dummy_image)
     with tempfile.TemporaryDirectory() as directory_name:
         self.instance.save(directory_name, image_container)
Esempio n. 20
0
def create_image():
    return ImageContainer(Image(720, 480))
Esempio n. 21
0
 def test_height(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.height(), 480)
Esempio n. 22
0
 def test_width(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.width(), 720)
Esempio n. 23
0
 def test_size(self):
     i = Image(720, 480)
     ic = ImageContainer(i)
     nose.tools.assert_equal(ic.size(), 720 * 480)
Esempio n. 24
0
    def test_new(self):
        image = Image()
        img_c = ImageContainer(image)

        image = Image(100, 100)
        img_c = ImageContainer(image)
Esempio n. 25
0
    def extract_chips_for_dets(self, image_files, truth_sets):
        import cv2
        output_files = []
        output_dets = []

        for i in range(len(image_files)):
            filename = image_files[i]
            groundtruth = truth_sets[i]
            detections = []
            scale = 1.0

            if self._target_type_scales:
                scale = self.compute_scale_factor(groundtruth)

            if len(groundtruth) > 0:
                img = cv2.imread(filename)

                if len(np.shape(img)) < 2:
                    continue

                img_max_x = np.shape(img)[1]
                img_max_y = np.shape(img)[0]

                # Optionally scale image
                if scale != 1.0:
                    img_max_x = int(scale * img_max_x)
                    img_max_y = int(scale * img_max_y)
                    img = cv2.resize(img, (img_max_x, img_max_y))

                # Run optional background detector on data
                if self._detector_model:
                    kw_image = Image(img)
                    kw_image_container = ImageContainer(kw_image)
                    detections = self._detector.detect(kw_image_container)

            if len(groundtruth) == 0 and len(detections) == 0:
                continue

            overlaps = np.zeros((len(detections), len(groundtruth)))
            det_boxes = []

            for det in detections:
                bbox = det.bounding_box
                det_boxes.append((int(bbox.min_x()), int(bbox.min_y()),
                                  int(bbox.width()), int(bbox.height())))

            for i, gt in enumerate(groundtruth):
                # Extract chip for this detection
                bbox = gt.bounding_box

                bbox_min_x = int(bbox.min_x() * scale)
                bbox_max_x = int(bbox.max_x() * scale)
                bbox_min_y = int(bbox.min_y() * scale)
                bbox_max_y = int(bbox.max_y() * scale)

                bbox_width = bbox_max_x - bbox_min_x
                bbox_height = bbox_max_y - bbox_min_y

                max_overlap = 0.0

                for j, det in enumerate(det_boxes):

                    # Compute overlap between detection and truth
                    (det_min_x, det_min_y, det_width, det_height) = det

                    # Get the overlap rectangle
                    overlap_x0 = max(bbox_min_x, det_min_x)
                    overlap_y0 = max(bbox_min_y, det_min_y)
                    overlap_x1 = min(bbox_max_x, det_min_x + det_width)
                    overlap_y1 = min(bbox_max_y, det_min_y + det_height)

                    # Check if there is an overlap
                    if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
                        continue

                    # If yes, calculate the ratio of the overlap
                    det_area = float(det_width * det_height)
                    gt_area = float(bbox_width * bbox_height)
                    int_area = float(
                        (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0))
                    overlap = min(int_area / det_area, int_area / gt_area)
                    overlaps[j, i] = overlap

                    if overlap >= self._min_overlap_for_association and overlap > max_overlap:
                        max_overlap = overlap

                        bbox_min_x = det_min_x
                        bbox_min_y = det_min_y
                        bbox_max_x = det_min_x + det_width
                        bbox_max_y = det_min_y + det_height

                        bbox_width = det_width
                        bbox_height = det_height

                if self._chip_method == "fixed_width":
                    chip_width = int(self._chip_width)
                    half_width = int(chip_width / 2)

                    bbox_min_x = int(
                        (bbox_min_x + bbox_max_x) / 2) - half_width
                    bbox_min_y = int(
                        (bbox_min_y + bbox_max_y) / 2) - half_width
                    bbox_max_x = bbox_min_x + chip_width
                    bbox_max_y = bbox_min_y + chip_width

                    bbox_width = chip_width
                    bbox_height = chip_width

                bbox_area = bbox_width * bbox_height

                if self._area_lower_bound > 0 and bbox_area < self._area_lower_bound:
                    continue
                if self._area_upper_bound > 0 and bbox_area > self._area_upper_bound:
                    continue

                if self._reduce_category and gt.type() and \
                  gt.type().get_most_likely_class() == self._reduce_category and \
                  random.uniform( 0, 1 ) < 0.90:
                    continue

                if self._border_exclude > 0:
                    if bbox_min_x <= self._border_exclude:
                        continue
                    if bbox_min_y <= self._border_exclude:
                        continue
                    if bbox_max_x >= img_max_x - self._border_exclude:
                        continue
                    if bbox_max_y >= img_max_y - self._border_exclude:
                        continue

                crop = img[bbox_min_y:bbox_max_y, bbox_min_x:bbox_max_x]
                self._sample_count = self._sample_count + 1
                crop_str = ('%09d' % self._sample_count) + ".png"
                new_file = os.path.join(self._chip_directory, crop_str)
                cv2.imwrite(new_file, crop)

                # Set new box size for this detection
                gt.bounding_box = BoundingBoxD(0, 0,
                                               np.shape(crop)[1],
                                               np.shape(crop)[0])
                new_set = DetectedObjectSet()
                new_set.add(gt)

                output_files.append(new_file)
                output_dets.append(new_set)

            neg_count = 0

            for j, det in enumerate(detections):

                if max(overlaps[j]) >= self._max_overlap_for_negative:
                    continue

                bbox = det.bounding_box

                bbox_min_x = int(bbox.min_x())
                bbox_max_x = int(bbox.max_x())
                bbox_min_y = int(bbox.min_y())
                bbox_max_y = int(bbox.max_y())

                bbox_width = bbox_max_x - bbox_min_x
                bbox_height = bbox_max_y - bbox_min_y

                bbox_area = bbox_width * bbox_height

                if self._chip_method == "fixed_width":
                    chip_width = int(self._chip_width)
                    half_width = int(chip_width / 2)

                    bbox_min_x = int(
                        (bbox_min_x + bbox_max_x) / 2) - half_width
                    bbox_min_y = int(
                        (bbox_min_y + bbox_max_y) / 2) - half_width
                    bbox_max_x = bbox_min_x + chip_width
                    bbox_max_y = bbox_min_y + chip_width

                    bbox_width = chip_width
                    bbox_height = chip_width

                if self._area_lower_bound > 0 and bbox_area < self._area_lower_bound:
                    continue
                if self._area_upper_bound > 0 and bbox_area > self._area_upper_bound:
                    continue

                if self._border_exclude > 0:
                    if bbox_min_x <= self._border_exclude:
                        continue
                    if bbox_min_y <= self._border_exclude:
                        continue
                    if bbox_max_x >= img_max_x - self._border_exclude:
                        continue
                    if bbox_max_y >= img_max_y - self._border_exclude:
                        continue

                # Handle random factor
                if self._max_neg_per_frame < 1.0 and random.uniform(
                        0, 1) > self._max_neg_per_frame:
                    break

                crop = img[bbox_min_y:bbox_max_y, bbox_min_x:bbox_max_x]
                self._sample_count = self._sample_count + 1
                crop_str = ('%09d' % self._sample_count) + ".png"
                new_file = os.path.join(self._chip_directory, crop_str)
                cv2.imwrite(new_file, crop)

                # Set new box size for this detection
                det.bounding_box = BoundingBoxD(0, 0,
                                                np.shape(crop)[1],
                                                np.shape(crop)[0])
                det.type = DetectedObjectType(self._negative_category, 1.0)
                new_set = DetectedObjectSet()
                new_set.add(det)

                output_files.append(new_file)
                output_dets.append(new_set)

                # Check maximum negative count
                neg_count = neg_count + 1
                if neg_count > self._max_neg_per_frame:
                    break

        return [output_files, output_dets]
Esempio n. 26
0
 def test_save_nonexistant(self):
     dummy_image = np.zeros([100, 100])
     image_container = ImageContainer.fromarray(dummy_image)
     self.instance.save("nonexistant_filename.txt", image_container)