def load_process_img(self, img_idx):
        imname = str(self.filenames[img_idx])
        slice_idx = self.slice_idx[img_idx]
        spacing = self.spacing[img_idx]
        slice_intv = self.slice_intv[img_idx]
        box = self.boxes[img_idx]

        im, im_scale, crop = load_prep_img(imname, slice_idx, spacing, slice_intv,
                                            do_clip=False, num_slice=config.NUM_SLICES)
        box *= im_scale
        patch, new_box, patch_scale = get_patch(im, box)
        return patch, new_box
示例#2
0
def get_input(nifti_data, box0, imname, infos):
    slice_idx = box0[0]
    box = box0[1:].copy()
    vol = nifti_data.get_data()
    aff = nifti_data.get_affine()[:3, :3]
    spacing = np.abs(aff[:2, :2]).max()
    slice_intv = np.abs(aff[2, 2])

    # Ad-hoc code for normalizing the orientation of the volume.
    # The aim is to make vol[:,:,i] an supine right-left slice
    # It works for the authors' data, but maybe not suitable for some kinds of nifti files
    if np.abs(aff[0, 0]) > np.abs(aff[0, 1]):
        vol = np.transpose(vol, (1, 0, 2))
        aff = aff[[1, 0, 2], :]
    if np.max(aff[0, :2]) > 0:
        vol = vol[::-1, :, :]
    if np.max(aff[1, :2]) > 0:
        vol = vol[:, ::-1, :]

    im, im_scale, c = load_prep_img(vol,
                                    int(slice_idx),
                                    spacing=spacing,
                                    slice_intv=slice_intv,
                                    do_clip=False,
                                    num_slice=config.NUM_SLICES)
    box *= im_scale
    patch, center_box, patch_scale = get_patch(im, box)

    im_show = windowing(
        windowing_rev(patch[:, :, 1] + config.PIXEL_MEANS, config.WINDOWING),
        [-175, 275]).astype(np.uint8)
    im_show = cv2.cvtColor(im_show, cv2.COLOR_GRAY2BGR)
    cv2.rectangle(im_show, (int(center_box[0]), int(center_box[1])),
                  (int(center_box[2]), int(center_box[3])),
                  color=(0, 255, 0),
                  thickness=1)
    coord_str = '_'.join([str(x) for x in box0.astype(int).tolist()])
    fn = os.path.join(default.res_path, '%s_%s.png' % (imname, coord_str))
    cv2.imwrite(fn, im_show)
    infos.append('Image patch saved to %s.' % fn)

    patch = patch.astype(float) / 255
    patch = torch.from_numpy(patch.transpose((2, 0, 1)))

    out_box = torch.tensor([[0, 0, patch.shape[2],
                             patch.shape[1]]]).to(dtype=torch.float).cuda()
    center_box = torch.tensor(center_box[None, :]).to(dtype=torch.float).cuda()
    patch = patch[None, :, :, :].to(dtype=torch.float).cuda()
    return patch, out_box, center_box
示例#3
0
def get_image(roidb):
    """
    preprocess image and return processed roidb
    :param roidb: a list of roidb
    :return: list of img as in mxnet format
    roidb add new item['im_info']
    0 --- x (width, second dim of im)
    |
    y (height, first dim of im)
    """
    num_images = len(roidb)
    processed_ims = []
    processed_roidb = []
    # t = Timer()
    for i in range(num_images):
        r = roidb[i]
        # t.tic()
        im, im_scale, crop = load_prep_img(r['image'],
                                           r['slice_no'],
                                           r['spacing'],
                                           r['slice_intv'],
                                           config.IMG_DO_CLIP,
                                           num_slice=config.NUM_SLICES *
                                           config.NUM_IMAGES_3DCE)

        sys.stdout.flush()
        # print i, t.toc()
        # print r['image']
        im -= config.PIXEL_MEANS
        new_r = r.copy()

        im_tensor, _ = im_list_to_blob([im])
        processed_ims.append(im_tensor)
        im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale]
        new_r['boxes'] = r['boxes'].copy()

        # first clip then resize
        if config.IMG_DO_CLIP:
            new_r['boxes'] -= [crop[2], crop[0], crop[2], crop[0]]
            new_r['crop'] = crop
        new_r['boxes'] *= im_scale
        new_r['im_info'] = im_info
        processed_roidb.append(new_r)

        if DEBUG:
            from ..core.tester import vis_all_boxes
            vis_all_boxes(im, new_r['boxes'])
    return processed_ims, processed_roidb
示例#4
0
def get_input(nifti_data, box0, imname, infos):
    slice_idx = box0[0]
    box = box0[1:].copy()
    vol = nifti_data.get_data()
    aff = nifti_data.get_affine()[:3, :3]
    spacing = np.abs(aff[:2, :2]).max()
    slice_intv = np.abs(aff[2, 2])

    # Ad-hoc code for normalizing the orientation of the volume.
    # The aim is to make vol[:,:,i] an supine right-left slice
    # It works for the authors' data, but maybe not suitable for some kinds of nifti files
    if np.abs(aff[0, 0]) > np.abs(aff[0, 1]):
        vol = np.transpose(vol, (1,0,2))
        aff = aff[[1,0,2], :]
    if np.max(aff[0, :2]) > 0:
        vol = vol[::-1, :, :]
    if np.max(aff[1, :2]) > 0:
        vol = vol[:, ::-1, :]

    im, im_scale, c = load_prep_img(vol, int(slice_idx), spacing=spacing,
                                    slice_intv=slice_intv, do_clip=False, num_slice=config.NUM_SLICES)
    box *= im_scale
    patch, center_box, patch_scale = get_patch(im, box)

    im_show = windowing(windowing_rev(patch[:,:,1] + config.PIXEL_MEANS, config.WINDOWING), [-175, 275]).astype(np.uint8)
    im_show = cv2.cvtColor(im_show, cv2.COLOR_GRAY2BGR)
    cv2.rectangle(im_show, (int(center_box[0]), int(center_box[1])),
                  (int(center_box[2]), int(center_box[3])), color=(0, 255, 0), thickness=1)
    coord_str = '_'.join([str(x) for x in box0.astype(int).tolist()])
    fn = os.path.join(default.res_path, '%s_%s.png' % (imname, coord_str))
    cv2.imwrite(fn, im_show)
    infos.append('Image patch saved to %s.' % fn)

    patch = patch.astype(float) / 255
    patch = torch.from_numpy(patch.transpose((2, 0, 1)))

    out_box = torch.tensor([[0, 0, patch.shape[2], patch.shape[1]]]).to(dtype=torch.float).cuda()
    center_box = torch.tensor(center_box[None,:]).to(dtype=torch.float).cuda()
    patch = patch[None, :,:,:].to(dtype=torch.float).cuda()
    return patch, out_box, center_box