Beispiel #1
0
    def __getitem__(self, index):
        input_record = self.data[index]

        image = read_image(input_record[0], grayscale=False)
        obj_id = input_record[1]
        cam_id = input_record[2]

        if len(input_record) > 3:
            dataset_id = input_record[3]

            mask = ''
            if input_record[4] != '':
                mask = read_image(input_record[4], grayscale=True)

            if self.transform is not None:
                image, mask = self.transform((image, mask))

            output_record = (image, obj_id, cam_id, dataset_id, mask) + input_record[5:]
        else:
            if self.transform is not None:
                image, _ = self.transform((image, ''))

            output_record = image, obj_id, cam_id

        return output_record
 def __getitem__(self, index):
     img_path, pid, camid = self.data[index]
     img = read_image(img_path)
     if self.transform is not None:
         img = self._transform_image(self.transform, self.k_tfm, img)
     item = {'img': img, 'pid': pid, 'camid': camid, 'impath': img_path}
     return item
Beispiel #3
0
    def __getitem__(self, index):
        img_path, pid, camid = self.data[index]
        img = read_image(img_path)

        if self.load_pose:
            img_name = '.'.join(img_path.split('/')[-1].split('.')[:-1])
            pose_pic_name = img_name + '_pose_heatmaps.png'
            if 'whole_body' in img_path:
                pose_pic_path = os.path.join(self.pose_dir, 'whole_body_pose',
                                             pose_pic_name)
            else:
                pose_pic_path = os.path.join(self.pose_dir,
                                             'occluded_body_pose',
                                             pose_pic_name)
            pose = cv2.imread(pose_pic_path, cv2.IMREAD_GRAYSCALE)
            pose = pose.reshape((pose.shape[0], 56, -1)).transpose(
                (0, 2, 1)).astype('float32')
            pose[:, :, 18:] = np.abs(pose[:, :, 18:] - 128)
            img, pose = self.transform[1](img, pose)
            img = self.transform[0](img)
            return img, pid, camid, img_path, pose
        else:
            if self.transform is not None:
                img = self.transform(img)
            return img, pid, camid, img_path
Beispiel #4
0
 def _read_im(self, p):
     # for i in self._cache_ims:
     #     if i[0] == p:
     #         return i[1]
     im = read_image(p)
     # self._cache_ims.append((p, im))
     return im
Beispiel #5
0
    def __getitem__(self, index):
        img_paths, pid, camid, dsetid = self.data[index]
        num_imgs = len(img_paths)

        if self.sample_method == 'random':
            # Randomly samples seq_len images from a tracklet of length num_imgs,
            # if num_imgs is smaller than seq_len, then replicates images
            indices = np.arange(num_imgs)
            replace = False if num_imgs >= self.seq_len else True
            indices = np.random.choice(
                indices, size=self.seq_len, replace=replace
            )
            # sort indices to keep temporal order (comment it to be order-agnostic)
            indices = np.sort(indices)

        elif self.sample_method == 'evenly':
            # Evenly samples seq_len images from a tracklet
            if num_imgs >= self.seq_len:
                num_imgs -= num_imgs % self.seq_len
                indices = np.arange(0, num_imgs, num_imgs / self.seq_len)
            else:
                # if num_imgs is smaller than seq_len, simply replicate the last image
                # until the seq_len requirement is satisfied
                indices = np.arange(0, num_imgs)
                num_pads = self.seq_len - num_imgs
                indices = np.concatenate(
                    [
                        indices,
                        np.ones(num_pads).astype(np.int32) * (num_imgs-1)
                    ]
                )
            assert len(indices) == self.seq_len

        elif self.sample_method == 'all':
            # Samples all images in a tracklet. batch_size must be set to 1
            indices = np.arange(num_imgs)

        else:
            raise ValueError(
                'Unknown sample method: {}'.format(self.sample_method)
            )

        imgs = []
        for index in indices:
            img_path = img_paths[int(index)]
            img = read_image(img_path)
            if self.transform is not None:
                img = self.transform(img)
            img = img.unsqueeze(0) # img must be torch.Tensor
            imgs.append(img)
        imgs = torch.cat(imgs, dim=0)

        item = {'img': imgs, 'pid': pid, 'camid': camid, 'dsetid': dsetid}

        return item
 def __getitem__(self, index):
     img_path, pid, camid = self.data[index]
     img = read_image(img_path)
     if self.transform is not None:
         img = self.transform(img)
     return img, pid, camid, img_path
Beispiel #7
0
    def __getitem__(self, index):
        input_record = self.data[index]

        image = read_image(input_record[0], grayscale=False)
        obj_id = input_record[1]
        cam_id = input_record[2]

        if len(input_record) > 3:
            dataset_id = input_record[3]
            if isinstance(
                    obj_id,
                (tuple, list)):  # when multi-label classification is available
                targets = torch.zeros(self.num_train_pids[dataset_id])
                for obj in obj_id:
                    targets[obj] = 1
                obj_id = targets

            mask = ''
            if input_record[4] != '':
                mask = read_image(input_record[4], grayscale=True)

            if self.mode == 'train' and self.num_sampled_packages > 1:
                assert self.transform is not None

                transformed_image, transformed_mask = [], []
                for _ in range(self.num_sampled_packages):
                    gen_image, gen_mask = self.transform((image, mask))

                    transformed_image.append(gen_image)
                    transformed_mask.append(gen_mask)

                transformed_image = torch.stack(transformed_image)
                transformed_mask = torch.stack(
                    transformed_mask) if mask != '' else ''
            elif self.transform is not None:
                transformed_image, transformed_mask = self.transform(
                    (image, mask))
            else:
                transformed_image, transformed_mask = image, mask

            output_record = (transformed_image, obj_id, cam_id, dataset_id,
                             transformed_mask) + input_record[5:]
        else:
            if self.mode == 'train' and self.num_sampled_packages > 1:
                assert self.transform is not None

                transformed_image = []
                for _ in range(self.num_sampled_packages):
                    gen_image, _ = self.transform((image, ''))

                    transformed_image.append(gen_image)

                transformed_image = torch.stack(transformed_image)
            elif self.transform is not None:
                transformed_image, _ = self.transform((image, ''))
            else:
                transformed_image = image

            output_record = transformed_image, obj_id, cam_id

        return output_record