def __init__(self, data_dir, out_size=800):
        if 'train' in data_dir:
            self.bg_dir = data_dir[0:data_dir.find('train')] + 'background'
        else:
            self.bg_dir = data_dir[0:data_dir.find('test')] + 'background'

        self.bg_name = [
            name for name in os.listdir(self.bg_dir)
            if os.path.isfile(os.path.join(self.bg_dir, name))
        ]
        self.num_bg = len(self.bg_name)

        self.bg_object = [
            open_img(os.path.join(self.bg_dir, bg_name)).convert('RGB')
            for bg_name in self.bg_name
        ]

        self.bg_scale = (0.5, 1.0)
        self.obj_scale = (0.2, 0.4, 0.6, 0.8, 1)
        #self.obj_size = obj_size
        self.bg_size = out_size

        self.rrc = RandomResizedCrop(size=out_size, scale=self.bg_scale)
        self.rrc_obj = self.class_resize(min_size=500,
                                         max_size=(600, 575, 550))
        self.color_jig = ColorJitter()
    def __getitem__(self, idx):

        img_name = os.path.join(self.data_dir_img, self.img_name[idx])

        image = open_img(img_name).convert('RGB')
        #print(self.size, image.size)
        i, j, h, w = self.get_params(image, self.size)

        #print("image size : {} ; {}, {}, {}, {}".format(image.size, i, j, h, w))
        image = F.crop(image, i, j, h, w)

        boxes = torch.as_tensor(self.gt[idx]).reshape(
            -1, 4)  # guard against no boxes
        #print(boxes, self.gt[idx])
        #print("Boxes :", boxes.shape)

        boxes = boxes - torch.as_tensor([i, j, 0, 0])
        #print(boxes)
        target = BoxList(boxes, image.size, mode="xywh").convert("xyxy")

        classes = torch.as_tensor(self.cls_idx[idx]).reshape(-1)
        #print("Classes :", classes.shape)
        target.add_field("labels", classes)

        masks = torch.as_tensor(1 * self.gen_mask_from_img(image))
        masks = SegmentationMask(masks, image.size, mode='mask')

        target.add_field("masks", masks)

        target = target.clip_to_image(remove_empty=True)

        if self._transform:
            image, target = self._transform(image, target)

        return image, target, idx
Exemple #3
0
    def test_secure_filename(self):
        self.assertEqual('My_cool_movie.mov',
                         secure_filename('My cool movie.mov'))
        self.assertEqual('etc_passwd', secure_filename('../../../etc/passwd'))
        self.assertEqual('i_contain_cool_umlauts.txt',
                         secure_filename(u'i contain cool \xfcml\xe4uts.txt'))
        self.assertEqual('i_contain_weird_characters.txt',
                         secure_filename(u'i contain weird châräctérs.txt'))

        with self.assertNoException():
            size = open_img(
                join(settings.CREME_ROOT, 'static', 'common', 'images',
                     secure_filename('500_200.png'))).size
        self.assertEqual((200, 200), size)
Exemple #4
0
def augment_img(image_path, amount, width=IMAGE_WIDTH, height=IMAGE_HEIGHT):
    """Returns an array of `amount` generated images"""
    if amount <= 1:
        return [transform_img(image_path, width, height)]
    img = np.array(open_img(image_path))
    # The array has shape (amount, width, height, 3)
    # and dtype uint8.
    images = np.array([img for _ in range(amount)], dtype=np.uint8)

    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),  # horizontal flips
            iaa.Crop(percent=(0, 0.1)),  # random crops
            # Small gaussian blur with random sigma between 0 and 0.5.
            # But we only blur about 50% of all images.
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5))),
            # Strengthen or weaken the contrast in each image.
            iaa.ContrastNormalization((0.75, 1.5)),
            # Add gaussian noise.
            # For 50% of all images, we sample the noise once per pixel.
            # For the other 50% of all images, we sample the noise per pixel AND
            # channel. This can change the color (not only brightness) of the
            # pixels.
            iaa.AdditiveGaussianNoise(
                loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
            # Make some images brighter and some darker.
            # In 20% of all cases, we sample the multiplier once per channel,
            # which can end up changing the color of the images.
            iaa.Multiply((0.8, 1.2), per_channel=0.2),
            # Apply affine transformations to each image.
            # Scale/zoom them, translate/move them, rotate them and shear them.
            iaa.Affine(scale={
                "x": (0.8, 1.2),
                "y": (0.8, 1.2)
            },
                       translate_percent={
                           "x": (-0.2, 0.2),
                           "y": (-0.2, 0.2)
                       },
                       rotate=(-25, 25),
                       shear=(-8, 8))
        ],
        random_order=True)  # apply augmenters in random order

    images_aug = seq.augment_images(images)
    res = [transform_img(image_path, width, height)]
    for img in images_aug:
        res.append(transform(fromarray(img), width, height))
    return res
    def __init__(self, data_dir):
        if 'train' in data_dir:
            self.bg_dir = data_dir[0:data_dir.find('train')] + 'background'
        else:
            self.bg_dir = data_dir[0:data_dir.find('test')] + 'background'

        self.bg_name = [
            name for name in os.listdir(self.bg_dir)
            if os.path.isfile(os.path.join(self.bg_dir, name))
        ]
        self.num_bg = len(self.bg_name)
        #print(self.num_bg)
        self.bg_object = [
            open_img(os.path.join(self.bg_dir, bg_name)).convert('RGB')
            for bg_name in self.bg_name
        ]

        self.bg_scale = [0.1, 0.2, 0.4]
    def __getitem__(self, idx):
        img_name = os.path.join(self.data_dir_img, self.img_name[idx])

        image = open_img(img_name).convert('RGB')

        boxes = torch.as_tensor(self.gt[idx]).reshape(
            -1, 4)  # guard against no boxes

        target = BoxList(boxes, image.size, mode="xywh").convert("xyxy")

        classes = torch.as_tensor(self.cls_idx[idx])[None]
        target.add_field("labels", classes)

        masks = torch.as_tensor(1 * self.gen_mask_from_img(image))
        masks = SegmentationMask(masks, image.size, mode='mask')
        #print(masks)
        target.add_field("masks", masks)

        target = target.clip_to_image(remove_empty=True)

        if self._transform:
            image, target = self._transform(image, target)
        return image, target, idx
Exemple #7
0
def image_size(*, path: str) -> Tuple[int, int]:
    """Get the size of a image from it's file path."""
    with open_img(path) as img:
        size = img.size

    return size
Exemple #8
0
def transform_img(image_path, width=IMAGE_WIDTH, height=IMAGE_HEIGHT):
    """Returns resized image"""
    return transform(open_img(image_path), width, height)
 def get_img_info(self, index):
     img_name = os.path.join(self.data_dir_img, self.img_name[index])
     img = open_img(img_name)
     img_height, img_width = img.size
     return {"height": img_height, "width": img_width}