Ejemplo n.º 1
0
    def test_Image(self, debug=True):
        """
    Usage:
        export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7
        export TIME_STR=1
        export PYTHONPATH=./exp:./stylegan2-pytorch:./
        python 	-c "from exp.tests.test_styleganv2 import Testing_stylegan2;\
          Testing_stylegan2().test_train_ffhq_128()"

    :return:
    """
        if 'CUDA_VISIBLE_DEVICES' not in os.environ:
            os.environ['CUDA_VISIBLE_DEVICES'] = '0'
        if 'TIME_STR' not in os.environ:
            os.environ['TIME_STR'] = '0' if utils.is_debugging() else '0'
        from template_lib.v2.config_cfgnode.argparser import \
          (get_command_and_outdir, setup_outdir_and_yaml, get_append_cmd_str, start_cmd_run)

        tl_opts = ' '.join(sys.argv[sys.argv.index('--tl_opts') +
                                    1:]) if '--tl_opts' in sys.argv else ''
        print(f'tl_opts:\n {tl_opts}')

        command, outdir = get_command_and_outdir(
            self, func_name=sys._getframe().f_code.co_name, file=__file__)
        argv_str = f"""
                --tl_config_file none
                --tl_command none
                --tl_outdir {outdir}
                --tl_opts {tl_opts}
                """
        args, cfg = setup_outdir_and_yaml(argv_str, return_cfg=True)

        n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))
        import mmcv
        import numpy as np
        import matplotlib.pyplot as plt

        img_path = "template_lib/datasets/images/zebra_GT_target_origin.png"

        img = mmcv.imread(img_path)
        img_gray = mmcv.imread(img_path, flag='grayscale')
        img_ = mmcv.imread(img)  # nothing will happen, img_ = img
        mmcv.imwrite(img, f'{args.tl_outdir}/out.png')

        # mmcv.imshow(img)
        fig, axes = plt.subplots(2, 1)
        img = mmcv.bgr2rgb(img)
        axes[0].imshow(img)
        # plt.imshow(img)
        # plt.show()

        # ret = mmcv.imresize(img, (1000, 600), return_scale=True)
        # ret = mmcv.imrescale(img, (1000, 800))

        bboxes = np.array([10, 10, 100, 120])
        patch = mmcv.imcrop(img, bboxes)
        axes[1].imshow(patch)

        fig.show()
        pass
Ejemplo n.º 2
0
    def __call__(self, results):
        bbox_xywh = bbox_xyxy2xywh(results['bbox'][None, :])[0]
        length = bbox_xywh[2:].max()
        length = length * np.random.uniform(*self.scale)
        x = bbox_xywh[0] + np.random.uniform(-self.shift, self.shift) * length
        y = bbox_xywh[1] + np.random.uniform(-self.shift, self.shift) * length
        w, h = length, length * np.random.uniform(*self.ratio)

        bbox_xyxy = bbox_xywh2xyxy(np.array([[x, y, w, h]]))[0]
        bbox_xyxy = bbox_xyxy.clip(min=0)
        bbox_xyxy[2] = min(bbox_xyxy[2], results['width'][0])
        bbox_xyxy[3] = min(bbox_xyxy[3], results['height'][0])
        bbox_xyxy = bbox_xyxy.astype(np.int32)

        for i in range(len(results['video'])):
            video = results['video'][i].transpose(1, 2, 3, 0)
            num_frames = video.shape[-1]
            video = video.reshape(video.shape[0], video.shape[1], -1)
            video = mmcv.imcrop(video, bbox_xyxy)
            video = mmcv.imresize(video, self.size)

            results['video'][i] = video.reshape(video.shape[0], video.shape[1],
                                                -1, num_frames)
            results['video'][i] = results['video'][i].transpose(3, 0, 1, 2)
            results['width'][i], results['height'][i] = video.shape[
                1], video.shape[0]

        return results
Ejemplo n.º 3
0
 def __call__(self, results):
     for key in results.get('img_fields', ['img']):
         img = results[key]
         if self.efficientnet_style:
             get_params_func = self.get_params_efficientnet_style
             get_params_args = dict(
                 img=img,
                 size=self.size,
                 scale=self.scale,
                 ratio=self.ratio,
                 max_attempts=self.max_attempts,
                 min_covered=self.min_covered,
                 crop_padding=self.crop_padding)
         else:
             get_params_func = self.get_params
             get_params_args = dict(
                 img=img,
                 scale=self.scale,
                 ratio=self.ratio,
                 max_attempts=self.max_attempts)
         ymin, xmin, ymax, xmax = get_params_func(**get_params_args)
         img = mmcv.imcrop(img, bboxes=np.array([xmin, ymin, xmax, ymax]))
         results[key] = mmcv.imresize(
             img,
             tuple(self.size[::-1]),
             interpolation=self.interpolation,
             backend=self.backend)
     return results
Ejemplo n.º 4
0
    def __call__(self, results):
        """Call function.

        Args:
            results (dict): A dict containing the necessary information and
                data for augmentation.

        Returns:
            dict: A dict containing the processed data and information.
        """

        for key in self.keys:
            img = results[key]
            img_height, img_width = img.shape[:2]
            crop_size = min(img_height, img_width)
            y1 = 0 if img_height == crop_size else \
                int(round(img_height - crop_size) / 2)
            x1 = 0 if img_width == crop_size else \
                int(round(img_width - crop_size) / 2)
            y2 = y1 + crop_size - 1
            x2 = x1 + crop_size - 1

            img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2]))
            results[key] = img

        return results
Ejemplo n.º 5
0
    def __call__(self, results):
        crop_height, crop_width = self.crop_size[0], self.crop_size[1]
        for key in results.get('img_fields', ['img']):
            img = results[key]
            # img.shape has length 2 for grayscale, length 3 for color
            img_height, img_width = img.shape[:2]

            # https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/preprocessing.py#L118 # noqa
            if self.efficientnet_style:
                img_short = min(img_height, img_width)
                crop_height = crop_height / (crop_height +
                                             self.crop_padding) * img_short
                crop_width = crop_width / (crop_width +
                                           self.crop_padding) * img_short

            y1 = max(0, int(round((img_height - crop_height) / 2.)))
            x1 = max(0, int(round((img_width - crop_width) / 2.)))
            y2 = min(img_height, y1 + crop_height) - 1
            x2 = min(img_width, x1 + crop_width) - 1

            # crop the image
            img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2]))

            if self.efficientnet_style:
                img = mmcv.imresize(
                    img,
                    tuple(self.crop_size[::-1]),
                    interpolation=self.interpolation,
                    backend=self.backend)
            img_shape = img.shape
            results[key] = img
        results['img_shape'] = img_shape

        return results
Ejemplo n.º 6
0
    def __call__(self, results):
        """
        Args:
            img (ndarray): Image to be cropped.
        """
        for key in results.get('img_fields', ['img']):
            img = results[key]
            if self.padding is not None:
                img = mmcv.impad(img,
                                 padding=self.padding,
                                 pad_val=self.pad_val)

            # pad the height if needed
            if self.pad_if_needed and img.shape[0] < self.size[0]:
                img = mmcv.impad(img,
                                 padding=(0, self.size[0] - img.shape[0], 0,
                                          self.size[0] - img.shape[0]),
                                 pad_val=self.pad_val,
                                 padding_mode=self.padding_mode)

            # pad the width if needed
            if self.pad_if_needed and img.shape[1] < self.size[1]:
                img = mmcv.impad(img,
                                 padding=(self.size[1] - img.shape[1], 0,
                                          self.size[1] - img.shape[1], 0),
                                 pad_val=self.pad_val,
                                 padding_mode=self.padding_mode)

            xmin, ymin, height, width = self.get_params(img, self.size)
            results[key] = mmcv.imcrop(
                img,
                np.array([ymin, xmin, ymin + width - 1, xmin + height - 1]))
        return results
Ejemplo n.º 7
0
    def __call__(self, results):
        img_group = results['img_group']
        img_h, img_w = img_group[0].shape[:2]
        crop_w, crop_h = self.crop_size

        offsets = MultiScaleCrop.fill_fix_offset(False, img_w, img_h, crop_w,
                                                 crop_h)
        oversample_group = list()
        for o_w, o_h in offsets:
            normal_group = list()
            flip_group = list()
            for i, img in enumerate(img_group):
                crop = mmcv.imcrop(
                    img,
                    np.array([o_w, o_h, o_w + crop_w - 1, o_h + crop_h - 1]))
                normal_group.append(crop)
                flip_crop = mmcv.imflip(crop)

                if results['modality'] == 'Flow' and i % 2 == 0:
                    flip_group.append(mmcv.iminvert(flip_crop))
                else:
                    flip_group.append(flip_crop)

            oversample_group.extend(normal_group)
            oversample_group.extend(flip_group)
        results['img_group'] = oversample_group
        results['crop_bbox'] = None
        results['img_shape'] = results['img_group'][0].shape

        return results
Ejemplo n.º 8
0
    def __call__(self, img_group, is_flow=False):

        image_h = img_group[0].shape[0]
        image_w = img_group[0].shape[1]
        crop_w, crop_h = self.crop_size

        offsets = GroupMultiScaleCrop.fill_fix_offset(
            False, image_w, image_h, crop_w, crop_h)
        oversample_group = list()
        for o_w, o_h in offsets:
            normal_group = list()
            flip_group = list()
            for i, img in enumerate(img_group):
                crop = mmcv.imcrop(img, np.array(
                    [o_w, o_h, o_w + crop_w-1, o_h + crop_h-1]))
                normal_group.append(crop)
                flip_crop = mmcv.imflip(crop)

                if is_flow and i % 2 == 0:
                    flip_group.append(mmcv.iminvert(flip_crop))
                else:
                    flip_group.append(flip_crop)

            oversample_group.extend(normal_group)
            oversample_group.extend(flip_group)
        return oversample_group, None
Ejemplo n.º 9
0
 def crop_bboxes(self, image_id, bboxes):
     img_path = osp.join("data/PRW/frames", self.id_2_filename(image_id))
     img = mmcv.imread(img_path)
     crops = []
     for bbox in bboxes:
         xyxy = np.asarray([int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])])
         crops.append(mmcv.imcrop(img, xyxy))
     return crops
Ejemplo n.º 10
0
 def __call__(self, img):
     h, w = img.shape[:2]
     assert w >= self.img_scale[0] and h >= self.img_scale[1]
     x1 = w // 2 - self.img_scale[0] // 2
     x2 = x1 + self.img_scale[0]
     y1 = h // 2 - self.img_scale[1] // 2
     y2 = x1 + self.img_scale[1]
     img = mmcv.imcrop(img, np.array([x1, y1, x2, y2]))
     return img
Ejemplo n.º 11
0
 def __call__(self, img_group, is_flow=False):
     h = img_group[0].shape[0]
     w = img_group[0].shape[1]
     tw, th = self.size
     x1 = (w - tw) // 2
     y1 = (h - th) // 2
     box = np.array([x1, y1, x1+tw-1, y1+th-1])
     return ([mmcv.imcrop(img, box) for img in img_group],
             np.array([x1, y1, tw, th], dtype=np.float32))
Ejemplo n.º 12
0
    def __getitem__(self, item):
        img_id = self.img_infos[item]['id']
        filename = self.img_infos[item]['filename']
        #print(self.boxes.keys())
        #print(img_id)
        #ss_box = self.boxes[img_id][:30,:]
        #ss_box = self.mesh
        #print(ss_box.shape)

        bbox_label = self.get_ann_info(item)
        image = cv2.imread(osp.join(self.img_prefix, filename))
        bboxes = bbox_label['bboxes']  #person
        labeles = bbox_label['labels']
        randidx = np.random.randint(0, len(labeles))
        bbox = bboxes[randidx]
        label = labeles[randidx] - 1
        #assert label!=-1,img_id
        #if label==10:
        #    self.count +=1
        #    print(self.count)
        if self.crop:
            #print("crop person image!!!!!")
            person_img = mmcv.imcrop(image, bbox, scale=1.0)
        else:
            person_img = image
        import os
        #path_person = os.path.join(self.img_prefix,'PersonImages')
        #if not os.path.exists(path_person):
        #    os.makedirs(path_person)
        #cv2.imwrite(os.path.join(path_person,img_id+'.jpg'),person_img)
        # cv2 to PIL
        person_img = Image.fromarray(
            cv2.cvtColor(person_img, cv2.COLOR_BGR2RGB))
        #img = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        #transform
        if self.train:
            trn = T.Compose([
                #T.RandomResizedCrop(224, (0.3, 1.0)),
                #T.RandomHorizontalFlip(0.7),
                # T.RandomRotation(30),
                T.Resize(self.cfg.DATASET.IMAGE_SIZE),
                T.ToTensor(),
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
            ])
        else:
            trn = T.Compose([
                T.Resize(self.cfg.DATASET.IMAGE_SIZE),
                T.ToTensor(),
                T.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
            ])
        person_img = trn(person_img)
        #img = trn(img)
        #return ss_box.astype(np.float32),person_img,label
        return person_img, label
Ejemplo n.º 13
0
 def crop_img(self, image_id):
     # 将image_id做为下标
     crops = []
     img_path = osp.join("data/PRW/frames", self.id_2_filename(image_id))
     img = mmcv.imread(img_path)
     for det in self.detections:
         if int(det["image_id"]) == image_id and int(det['category_id']) == 1:
             bbox = det["bbox"]
             xyxy = np.asarray([int(bbox[0]), int(bbox[1]), int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])])
             crops.append(mmcv.imcrop(img, xyxy))
     return crops
Ejemplo n.º 14
0
 def get_frame_crop(self, det):
     crops = []
     if len(det) > 0:
         image_id = det[0]["image_id"]
         bboxes = []
         img_path = osp.join("data/PRW/frame", self.id_2_filename(image_id))
         img = mmcv.imread(img_path)
         for d in det:
             bboxes.append(d["bbox"])
         crops = mmcv.imcrop(img, bboxes)
     return crops
Ejemplo n.º 15
0
 def __call__(self, img_group, is_flow=False, interpolation='bilinear'):
     ret_img_group = [
         mmcv.imcrop(img, self.crop_quadruple) for img in img_group
     ]
     if self.resize:
         ret_img_group = [
             mmcv.imresize(img, (self.input_size[0], self.input_size[1]),
                           interpolation=interpolation)
             for img in ret_img_group
         ]
     return ret_img_group, self.crop_quadruple
Ejemplo n.º 16
0
def _inference_vos_single(model, img, img_refer, bbox, img_transform, device):
    img = mmcv.imread(img)
    # crop the object in first frame
    img_refer = mmcv.imread(img_refer)
    # crop the bbox
    img_refer = mmcv.imcrop(img_refer, bbox)

    data = _prepare_vos_data(img, img_refer, img_transform, model.cfg, device)
    with torch.no_grad():
        result = model(return_loss=False, rescale=True, **data)
    return result
Ejemplo n.º 17
0
    def __call__(self, results):
        img_group = results['img_group']
        img_h, img_w = img_group[0].shape[:2]
        crop_w, crop_h = self.crop_size
        # assert crop_h == img_h or crop_w == img_w

        if crop_h == img_h:
            w_step = (img_w - crop_w) // 2
            offsets = [
                (0, 0),  # left
                (2 * w_step, 0),  # right
                (w_step, 0),  # middle
            ]
        elif crop_w == img_w:
            h_step = (img_h - crop_h) // 2
            offsets = [
                (0, 0),  # top
                (0, 2 * h_step),  # down
                (0, h_step),  # middle
            ]
        else:
            w_step = (img_w - crop_w) // 4
            h_step = (img_h - crop_h) // 4

            offsets = list()
            offsets.append((0 * w_step, 2 * h_step))  # left
            offsets.append((4 * w_step, 2 * h_step))  # right
            offsets.append((2 * w_step, 2 * h_step))  # center

        oversample_group = list()
        for o_w, o_h in offsets:
            normal_group = list()
            flip_group = list()
            for i, img in enumerate(img_group):
                crop = mmcv.imcrop(
                    img,
                    np.array([o_w, o_h, o_w + crop_w - 1, o_h + crop_h - 1]))
                normal_group.append(crop)
                flip_crop = mmcv.imflip(crop)

                if results['modality'] == 'Flow' and i % 2 == 0:
                    flip_group.append(mmcv.iminvert(flip_crop))
                else:
                    flip_group.append(flip_crop)

            oversample_group.extend(normal_group)

        results['img_group'] = oversample_group
        results['crop_bbox'] = None
        results['img_shape'] = results['img_group'][0].shape

        return results
Ejemplo n.º 18
0
    def __call__(self, results):
        img_group = results['img_group']

        img_h, img_w = img_group[0].shape[:2]
        crop_w, crop_h = self.crop_size
        x1 = (img_w - crop_w) // 2
        y1 = (img_h - crop_h) // 2
        box = np.array([x1, y1, x1 + crop_w - 1, y1 + crop_h - 1])
        results['img_group'] = [mmcv.imcrop(img, box) for img in img_group]
        results['crop_bbox'] = box
        results['img_shape'] = results['img_group'][0].shape

        return results
Ejemplo n.º 19
0
 def __call__(self, img_group):
     """
     Args:
         clip (list of PIL Image): list of Image to be cropped and resized.
     Returns:
         list of PIL Image: Randomly cropped and resized image.
     """
     x1, y1, th, tw = self.get_params(img_group[0], self.scale, self.ratio)
     box = np.array([x1, y1, x1 + tw - 1, y1 + th - 1], dtype=np.float32)
     return ([
         mmcv.imresize(mmcv.imcrop(img, box), self.size)
         for img in img_group
     ], box)
Ejemplo n.º 20
0
    def __call__(self, img_group, is_flow=False):
        image_height = img_group[0].shape[0]
        image_width = img_group[0].shape[1]

        crop_width, crop_height = self.size
        x1 = (image_width - crop_width) // 2
        y1 = (image_height - crop_height) // 2

        if self.portrait_mode:
            y1 = min(y1, x1)

        box = np.array([x1, y1, x1 + crop_width - 1, y1 + crop_height - 1])

        return ([mmcv.imcrop(img, box) for img in img_group],
                np.array([x1, y1, crop_width, crop_height], dtype=np.float32))
Ejemplo n.º 21
0
    def __call__(self, img_group, is_flow=False):

        im_h = img_group[0].shape[0]
        im_w = img_group[0].shape[1]

        crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(
            (im_w, im_h))
        box = np.array([offset_w, offset_h, offset_w +
                        crop_w - 1, offset_h + crop_h - 1])
        crop_img_group = [mmcv.imcrop(img, box) for img in img_group]
        ret_img_group = [mmcv.imresize(
            img, (self.input_size[0], self.input_size[1]),
            interpolation=self.interpolation)
            for img in crop_img_group]
        return (ret_img_group, np.array([offset_w, offset_h, crop_w, crop_h],
                                        dtype=np.float32))
Ejemplo n.º 22
0
    def detect_and_classify(self, img_filepath):
        """
        test a single image
        """
        tik = time.time()
        result = {"status": 0, "data": {}}

        img = mmcv.imread(img_filepath)
        res = inference_detector(self.detector, img, self.cfg)
        if len(res) != 0:
            f_index = 0
            for cat, bbox in enumerate(res):
                print(cat, bbox)

                if len(bbox) != 0:
                    # crop patches with mmcv
                    bboxes = np.array(bbox[:, 0:-1]).astype(
                        np.int
                    )  # 0:-1 represents that the last col is confidence, and the first 4 cols are x1 y1 x2 y2
                    patches = mmcv.imcrop(img, bboxes)

                    for patch in patches:
                        patch = PIL.Image.fromarray(patch)
                        b, g, r = patch.split()
                        patch = Image.merge("RGB", (r, g, b))
                        if self.crop:
                            patch.save('{0}.jpg'.format(f_index))
                        cat_name = self.classify_from_mat(patch)
                        print(cat_name)
                        if cat_name in result['data'].keys():
                            result['data'][cat_name] += 1
                        else:
                            result['data'][cat_name] = 1

                        f_index += 1
                # else:
                #     if "Bad" not in result['data'].keys():
                #         result['data']["Bad"] = 1
                #     else:
                #         result['data']["Bad"] += 1
                tok = time.time()
        else:
            result['data'] = {"Bad": 1}

        result['elapse'] = tok - tik

        return result
Ejemplo n.º 23
0
    def __call__(self, results):
        crop_height, crop_width = self.crop_size[0], self.crop_size[1]
        for key in results.get('img_fields', ['img']):
            img = results[key]
            img_height, img_width, _ = img.shape

            y1 = max(0, int(round((img_height - crop_height) / 2.)))
            x1 = max(0, int(round((img_width - crop_width) / 2.)))
            y2 = min(img_height, y1 + crop_height) - 1
            x2 = min(img_width, x1 + crop_width) - 1

            # crop the image
            img = mmcv.imcrop(img, bboxes=np.array([x1, y1, x2, y2]))
            img_shape = img.shape
            results[key] = img
        results['img_shape'] = img_shape

        return results
Ejemplo n.º 24
0
 def __call__(self, results):
     """Implement data processing with center crop."""
     for i, modal in enumerate(results['modality']):
         height, width = results['height'][i], results['width'][i]
         video = results['video'][i].transpose(1, 2, 3, 0)
         num_frames = video.shape[-1]
         video = video.reshape(height, width, -1)
         start_h, start_w = (height - self.length) // 2, (width -
                                                          self.length) // 2
         video = mmcv.imcrop(
             video,
             np.array((start_w, start_h, start_w + self.length - 1,
                       start_h + self.length - 1)))
         results['video'][i] = video.reshape(self.length, self.length, -1,
                                             num_frames).transpose(
                                                 3, 0, 1, 2)
         results['width'][i], results['height'][
             i] = self.length, self.length
     return results
Ejemplo n.º 25
0
    def __call__(self, results):
        img_group = results['img_group']
        img_h, img_w = img_group[0].shape[:2]

        (crop_w, crop_h), (offset_w, offset_h) = self._sample_crop_size(
            (img_w, img_h))
        box = np.array(
            [offset_w, offset_h, offset_w + crop_w - 1, offset_h + crop_h - 1])
        crop_img_group = [mmcv.imcrop(img, box) for img in img_group]
        ret_img_group = [
            mmcv.imresize(img, (self.input_size[0], self.input_size[1]),
                          interpolation=self.interpolation)
            for img in crop_img_group
        ]

        results['crop_bbox'] = box

        results['img_group'] = ret_img_group
        results['img_shape'] = ret_img_group[0].shape
        results['scales'] = self.scales
        return results
Ejemplo n.º 26
0
    def __call__(self, results):
        """
        Args:
            clip (list of PIL Image): list of Image to be cropped and resized.
        Returns:
            list of PIL Image: Randomly cropped and resized image.
        """
        img_group = results['img_group']

        (x1, y1), (crop_h, crop_w) = self.get_params(img_group[0], self.scale,
                                                     self.ratio)
        box = np.array([x1, y1, x1 + crop_w - 1, y1 + crop_h - 1],
                       dtype=np.float32)

        results['img_group'] = [
            mmcv.imresize(mmcv.imcrop(img, box), self.input_size)
            for img in img_group
        ]
        results['crop_bbox'] = box
        results['img_shape'] = results['img_group'][0].shape
        return results
Ejemplo n.º 27
0
    def __call__(self, img_group, is_flow=False):

        image_h = img_group[0].shape[0]
        image_w = img_group[0].shape[1]
        crop_w, crop_h = self.crop_size
        assert crop_h == image_h or crop_w == image_w

        if crop_h == image_h:
            w_step = (image_w - crop_w) // 2
            offsets = list()
            offsets.append((0, 0))  # left
            offsets.append((2 * w_step, 0))  # right
            offsets.append((w_step, 0))  # middle
        elif crop_w == image_w:
            h_step = (image_h - crop_h) // 2
            offsets = list()
            offsets.append((0, 0))  # top
            offsets.append((0, 2 * h_step))  # down
            offsets.append((0, h_step))  # middle

        oversample_group = list()
        for o_w, o_h in offsets:
            normal_group = list()
            flip_group = list()
            for i, img in enumerate(img_group):
                crop = mmcv.imcrop(
                    img,
                    np.array([o_w, o_h, o_w + crop_w - 1, o_h + crop_h - 1]))
                normal_group.append(crop)
                flip_crop = mmcv.imflip(crop)

                if is_flow and i % 2 == 0:
                    flip_group.append(mmcv.iminvert(flip_crop))
                else:
                    flip_group.append(flip_crop)

            oversample_group.extend(normal_group)
            # oversample_group.extend(flip_group)
        return oversample_group, None
def process_image(image, augment, square=False):
    if square:
        iH, iW = 240, 240
        H, W = 224, 224
    else:
        iH, iW = 240, 480
        H, W = 224, 448
    image = mmcv.imresize(image, (iW, iH))
    h, w, _ = image.shape
    w_offset = w - W
    h_offset = h - H
    left = random.randrange(0, w_offset + 1)
    upper = random.randrange(0, h_offset + 1)
    image = mmcv.imcrop(image,
                        np.array([left, upper, left + W - 1, upper + H - 1]))

    if augment:
        enhancer = ImageEnhance.Brightness(Image.fromarray(image))
        image = enhancer.enhance(random.random() * 0.6 + 0.7)
        enhancer = ImageEnhance.Color(image)
        image = enhancer.enhance(random.random() * 0.6 + 0.7)
    return image
Ejemplo n.º 29
0
    def __call__(self, results):
        """
        Args:
            img (ndarray): Image to be cropped and resized.

        Returns:
            ndarray: Randomly cropped and resized image.
        """
        for key in results.get('img_fields', ['img']):
            img = results[key]
            xmin, ymin, target_height, target_width = self.get_params(
                img, self.scale, self.ratio)
            img = mmcv.imcrop(
                img,
                np.array([
                    ymin, xmin, ymin + target_width - 1,
                    xmin + target_height - 1
                ]))
            results[key] = mmcv.imresize(img,
                                         tuple(self.size[::-1]),
                                         interpolation=self.interpolation)
        return results
 def __getitem__(self, item):
     img_id = self.img_infos[item]['id']
     filename = self.img_infos[item]['filename']
     bbox_label = self.get_ann_info(item)
     image = cv2.imread(osp.join(self.img_prefix,filename))
     bboxes = bbox_label['bboxes']
     labeles = bbox_label['labels']
     randidx = np.random.randint(0,len(labeles))
     bbox = bboxes[randidx]
     label = labeles[randidx] - 1
     #assert label!=-1,img_id
     #if label==10:
     #    self.count +=1
     #    print(self.count)
     if self.cfg.DATASET.CROP_BBOX:
         #print("crop person image!!!!!")
         person_img = mmcv.imcrop(image,bbox,scale=1.0)
     else:
         person_img = image
     # cv2 to PIL
     person_img = Image.fromarray(cv2.cvtColor(person_img, cv2.COLOR_BGR2RGB))
     #transform
 #if self.train:
     trn = T.Compose([T.Resize(self.cfg.DATASET.IMAGE_SIZE),
                      T.RandomResizedCrop(224,(0.5,1.0)),
                      T.RandomHorizontalFlip(0.7),
                      T.RandomRotation((0,0.5)),
                      T.ToTensor(),
                      T.Normalize(mean=[0.485, 0.456, 0.406],
                                              std=[0.5,0.5,0.5])])
 #else:
     trn = T.Compose([T.Resize(self.cfg.DATASET.IMAGE_SIZE),
                      T.ToTensor(),
                      T.Normalize(mean=[0.485, 0.456, 0.406],
                                  std=[0.5, 0.5, 0.5])])
     person_img = trn(person_img)
     return person_img,label