예제 #1
0
    def __getitem__(self, index):

        img_path = os.path.join(self.data_root, self.image_set, self.img_list[index])
        img = np.array(cv2.imread(img_path), dtype=np.float32)
        mask_path = os.path.join(self.info_root, 'pose_mask', self.img_list[index].replace('.jpg', '.npy'))
        mask = np.load(mask_path)
        mask = np.array(mask, dtype=np.float32)

        kpt = self.kpt_list[index]
        center = self.center_list[index]
        scale = self.scale_list[index]

        img, mask, kpt, center = self.transformer(img, mask, kpt, center, scale)

        height, width, _ = img.shape

        mask = cv2.resize(mask, (width / self.stride, height / self.stride)).reshape((height / self.stride, width / self.stride, 1))

        heatmap = np.zeros((height / self.stride, width / self.stride, len(kpt[0]) + 1), dtype=np.float32)
        heatmap = generate_heatmap(heatmap, kpt, self.stride, self.sigma)
        heatmap[:,:,0] = 1.0 - np.max(heatmap[:,:,1:], axis=2) # for background
        heatmap = heatmap * mask

        vecmap = np.zeros((height / self.stride, width / self.stride, len(self.vec_pair[0]) * 2), dtype=np.float32)
        cnt = np.zeros((height / self.stride, width / self.stride, len(self.vec_pair[0])), dtype=np.int32)

        vecmap = generate_vector(vecmap, cnt, kpt, self.vec_pair, self.stride, self.theta)
        vecmap = vecmap * mask

        img = transforms.normalize(transforms.to_tensor(img), [128.0, 128.0, 128.0], [256.0, 256.0, 256.0]) # mean, std
        mask = transforms.to_tensor(mask)
        heatmap = transforms.to_tensor(heatmap)
        vecmap = transforms.to_tensor(vecmap)

        return img, heatmap, vecmap, mask
예제 #2
0
    def __getitem__(self, index):
        img_path = os.path.join(self.img_dir, self.json_data[index]['img_fn'])
        img = np.array(cv2.imread(img_path), dtype=np.float32)
        keypoints = self.json_data[index]['keypoints']

        if self.trans is not None:
            img, keypoints = self.trans(img, keypoints)

        label = np.zeros((self.s, self.s, self.num_kpt), dtype=np.float32)
        offset = np.zeros((self.s, self.s, self.num_kpt * 2), dtype=np.float32)
        for px in range(self.num_kpt):
            if keypoints[px * 3 + 2] == 0 or keypoints[
                    px * 3 +
                    0] <= 0 or keypoints[px * 3 + 0] >= self.size or keypoints[
                        px * 3 + 1] <= 0 or keypoints[
                            px * 3 + 1] >= self.size or keypoints[px * 3 +
                                                                  2] == 0:
                continue
            else:
                grid_loc_x = math.floor(keypoints[px * 3 + 0] //
                                        self.grid_size)
                grid_loc_y = math.floor(keypoints[px * 3 + 1] //
                                        self.grid_size)
                label[grid_loc_y][grid_loc_x][px] = 1
                offset[grid_loc_y][grid_loc_x][px] = (
                    keypoints[px * 3 + 0] % self.grid_size) / self.grid_size
                offset[grid_loc_y][grid_loc_x][self.num_kpt + px] = (
                    keypoints[px * 3 + 1] % self.grid_size) / self.grid_size

        img = normalize(to_tensor(img))
        label = to_tensor(label)
        offset = to_tensor(offset)

        return img, label, offset
예제 #3
0
파일: dataset.py 프로젝트: wentianli/MRI_RL
    def __call__(self, kspace, target, attrs, fname, slice):
        """
        Args:
            kspace (numpy.array): Input k-space of shape (num_coils, rows, cols, 2) for multi-coil
                data or (rows, cols, 2) for single coil data.
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.
        """

        # this is the original normalization method from fastMRI official code
        def normalize_image(x):
            x, mean, std = normalize_instance(x, eps=1e-11)
            x = x.clip(-6, 6)
            return x

        if target is not None:
            target = normalize_image(target)
        else:
            target = [0]

        # Apply mask
        seed = None if not self.use_seed else tuple(map(ord, fname))
        mask = self.mask_func(target.shape + (2, ), seed)
        mask = mask[:, :, 0].numpy()

        m = min(float(np.min(target)), 0)
        target_01 = (target - m) / (6 - m
                                    )  # normalization into the range [0, 1]
        image, _, _ = Downsample(target_01, mask)
        if self.normalize:
            target = target_01
        else:
            image = image * (6 - m) + m  # for unet, to scale back
        #else:
        #    image, _, _ = Downsample(target - m, mask) # make sure that the data are non-negative before downsampling
        #    image += m

        target = to_tensor(target)
        image = to_tensor(image)
        mask = to_tensor(mask)
        return target.unsqueeze(0).float(), image.unsqueeze(
            0).float(), mask.float()
예제 #4
0
    def __getitem__(self, idx):
        path_pair = self.file_list[idx]
        image, label = self.fetch_pair(path_pair)
        _, image_name = path_pair[0].rsplit('/', 1)

        for func_name in self.trans_types:
            transform = getattr(transforms, func_name)
            image, label = transform(image, label, 
                                     **self.trans_args.get(func_name, dict()))

        image, label = transforms.to_tensor(image, label)
        return image, label, image_name
예제 #5
0
    def __getitem__(self, idx):
        path_pair = self.file_list[idx]
        image, label = self.fetch_pair(path_pair)
        _, image_name = path_pair[0].rsplit('/', 1)

        names = self.trans_config.get('names', [])
        configs = self.trans_config.get('configs', dict())

        for name in names:
            transform = getattr(transforms, name)
            image, label = transform(image, label, **configs.get(name, dict()))

        image, label = transforms.to_tensor(image, label)
        return image, label, image_name
예제 #6
0
    def __call__(self, target, attrs, fname, slice):
        """
        Args:
            target (numpy.array): Target image
            attrs (dict): Acquisition related information stored in the HDF5 object.
            fname (str): File name
            slice (int): Serial number of the slice.
        Returns:
            (tuple): tuple containing:
                image (torch.Tensor): Zero-filled input image.
                target (torch.Tensor): Target image converted to a torch Tensor.
                mean (float): Mean value used for normalization.
                std (float): Standard deviation value used for normalization.
                norm (float): L2 norm of the entire volume.

        Additionally returns the used acceleration and center fraction for evaluation purposes.

        Changed from original: now starting from GT RSS, which makes more sense if doing singlecoil.
        """

        # Obtain full kspace from ground truth
        target = transforms.to_tensor(target)
        target = transforms.center_crop(target,
                                        (self.resolution, self.resolution))
        kspace = transforms.rfft2(target)

        seed = None if not self.use_seed else tuple(map(ord, fname))
        masked_kspace, mask = transforms.apply_mask(kspace, self.mask_func,
                                                    seed)
        # Inverse Fourier Transform to get zero filled solution
        zf = transforms.ifft2(masked_kspace)
        # Take complex abs to get a real image
        zf = transforms.complex_abs(zf)
        # Normalize input
        zf, zf_mean, zf_std = transforms.normalize_instance(zf, eps=1e-11)
        zf = zf.clamp(-6, 6)

        # # Normalize target
        # target = transforms.normalize(target, mean, std, eps=1e-11)
        target, gt_mean, gt_std = transforms.normalize_instance(target,
                                                                eps=1e-11)
        target = target.clamp(-6, 6)

        # Need to return kspace and mask information when doing active learning, since we are
        # acquiring frequencies and updating the mask for a data point during an AL loop.
        return kspace, masked_kspace, mask, zf, target, gt_mean, gt_std, fname, slice
예제 #7
0
    def __getitem__(self, index):
        img_path = os.path.join(self.img_dir, self.json_data[index]['img_fn'])
        img = np.array(cv2.imread(img_path), dtype=np.float32)
        keypoints = self.json_data[index]['keypoints']
        if 'bodysize' in self.json_data[index]:
            norm = self.json_data[index]['bodysize']
        elif 'headsize' in self.json_data[index]:
            norm = self.json_data[index]['headsize']
        else:
            norm = self.json_data[index]['normalize']

        img, keypoints, ratio = self.trans(img, keypoints)

        label = np.zeros((self.s, self.s, self.num_kpt), dtype=np.float32)
        offset = np.zeros((self.s, self.s, self.num_kpt * 2), dtype=np.float32)

        for px in range(self.num_kpt):
            if keypoints[px * 3 + 2] == 0 or keypoints[
                    px * 3 +
                    0] <= 0 or keypoints[px * 3 + 0] >= self.size or keypoints[
                        px * 3 + 1] <= 0 or keypoints[
                            px * 3 + 1] >= self.size or keypoints[px * 3 +
                                                                  2] == 0:
                continue
            else:
                grid_loc_x = math.floor(keypoints[px * 3 + 0] //
                                        self.grid_size)
                grid_loc_y = math.floor(keypoints[px * 3 + 1] //
                                        self.grid_size)
                label[grid_loc_y][grid_loc_x][px] = 1
                offset[grid_loc_y][grid_loc_x][px] = (
                    keypoints[px * 3 + 0] % self.grid_size) / self.grid_size
                offset[grid_loc_y][grid_loc_x][self.num_kpt + px] = (
                    keypoints[px * 3 + 1] % self.grid_size) / self.grid_size

        img1 = self._enhance(img.copy(), 1.0)
        img2 = self._enhance(img.copy(), 1.5)
        img3 = self._enhance(img.copy(), 2.0)
        img0 = normalize(to_tensor(img)).unsqueeze(dim=0)
        img1 = normalize(to_tensor(img1)).unsqueeze(dim=0)
        img2 = normalize(to_tensor(img2)).unsqueeze(dim=0)
        img3 = normalize(to_tensor(img3)).unsqueeze(dim=0)
        img = img0
        label = to_tensor(label)
        offset = to_tensor(offset)
        norm = norm * ratio

        return img, label, offset, norm
예제 #8
0
 def __call__(self, img):
     img = to_tensor(img)
     kspace = ifft2(img)
     ss_kspace, mask = apply_mask(kspace, mask_func)
     return img, ss_kspace, mask
예제 #9
0
파일: demo.py 프로젝트: bojanagajic/paiss
d_feats_file = 'data/features/resnet50-rnk-lm-da_ox.npy'
try:
    d_feats = np.load(d_feats_file)
except OSError as e:
    print(
        'ERROR: File {} not found. Please follow the instructions to download the pre-computed features.'
        .format(d_feats_file))
    sys.exit()

# Load the query image
img = Image.open(dataset.get_query_filename(args.qidx))
# Crop the query ROI
img = img.crop(tuple(dataset.get_query_roi(args.qidx)))
# Apply transformations
img = trf.resize_image(img, 800)
I = trf.to_tensor(img)
I = trf.normalize(
    I, dict(rgb_means=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
I = I.unsqueeze(0).to(device)
# Forward pass to extract the features
with torch.no_grad():
    print('Extracting the representation of the query...')
    q_feat = model(I).numpy()
print('Done\n')

# Rank the database and visualize the top-k most similar images in the database
dataset.vis_top(d_feats,
                args.qidx,
                q_feat=q_feat,
                topk=args.topk,
                out_image_file='out.png')