def get_mask(encode, width, height):
    if encode == [] or encode == [' -1']:
        return rle2mask(' -1', width, height)
    mask = rle2mask(encode[0], width, height)
    for e in encode[1:]:
        mask += rle2mask(e, width, height)
    return mask.T
Ejemplo n.º 2
0
def label_generator(file_path_list_temp, labels, size, img_size, n_channels):
    'Generates data containing batch_size samples'
    # Initialization self.batch_size
    y = np.empty((size, img_size, img_size, 1))
    y_class = []
    for idx, file_path in enumerate(file_path_list_temp[:size]):

        id = file_path.split('/')[-1][:-4]
        rle = labels.get(id)

        if rle is None:
            mask = np.zeros((1024, 1024))
        else:
            if len(rle) == 1:
                mask = rle2mask(rle[0], 1024, 1024).T
            else:
                mask = np.zeros((1024, 1024))
                for r in rle:
                    mask = mask + rle2mask(r, 1024, 1024).T

        # Store class
        y[idx, ] = cv2.resize(mask, (img_size, img_size))[..., np.newaxis]
        y[y > 0] = 255

    return np.array(y) / 255
def multi_rle_encode(img, **kwargs):
    ''' Encode disconnected regions as separated masks
    '''
    labels = label(img)
    if img.ndim > 2:
        return [
            rle2mask(np.sum(labels == k, axis=2), **kwargs)
            for k in np.unique(labels[labels > 0])
        ]
    else:
        return [
            rle2mask(labels == k, **kwargs)
            for k in np.unique(labels[labels > 0])
        ]
Ejemplo n.º 4
0
 def __getitem__(self, idx):
     item = self.data[idx]
     ## 1. the image part ##
     img_id = item['img_id']
     img_path = self.path + '%s.png' % img_id
     img = plt.imread(img_path)
     width, height = img.shape
     img = cv2.resize(img, (self.IMG_SIZE, self.IMG_SIZE))
     img = np.expand_dims(img, 0)
     img = torch.from_numpy(img)
     ## 2. the target part ##
     cnt_masks = item['cnt_masks']
     masks_in_rle = item['masks']
     if cnt_masks == 1:
         mask = rle2mask(masks_in_rle[0], width, height).T
         mask = cv2.resize(mask.astype(np.float),
                           (self.IMG_SIZE, self.IMG_SIZE))
         masks = [mask]
         boxes = [self._get_box(mask)
                  ]  #add bounding boxes, for instance segmentation task
     elif cnt_masks > 1:
         masks = []
         boxes = []
         for mask_in_rle in masks_in_rle:
             mask = rle2mask(mask_in_rle, width, height).T
             mask = cv2.resize(mask.astype(np.float),
                               (self.IMG_SIZE, self.IMG_SIZE))
             masks.append(mask)
             box = self._get_box(mask)
             boxes.append(box)
     else:
         masks = [[]]  #[np.zeros((self.IMG_SIZE, self.IMG_SIZE))]
         boxes = [[]]
     masks = np.array(masks, dtype=np.float)
     boxes = np.array(boxes, dtype=np.float)
     labels = np.array([1] * cnt_masks, dtype=np.int64)
     try:
         area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
     except:
         area = np.array([], dtype=np.float)
     iscrowd = np.zeros((cnt_masks, ), dtype=np.int)
     ## 3. the output part ##
     target = {}
     target["boxes"] = torch.from_numpy(boxes).float()
     target["labels"] = torch.from_numpy(labels)
     target["masks"] = torch.from_numpy(masks).int()
     target["image_id"] = torch.from_numpy(np.array([idx]))  #img_id
     target["area"] = torch.from_numpy(area)
     target["iscrowd"] = torch.from_numpy(iscrowd)
     return img, target
Ejemplo n.º 5
0
def _save_img_mask(dcm_path, rle_list, save_path):
    '''
    it's supposed to be called with multiprocessing.
    save dicom and its mask to save_path as numpy array, and it returns meta data as dictionary
    save_path should be base + image_id
    '''

    # for path in tqdm(dcm_path_list[:10]):
    dataset = pydicom.dcmread(dcm_path)

    img = dataset.pixel_array

    meta = {
        'path': save_path + '.npy',
        'image_id': save_path.split('/')[-1].split('.dcm')[0],
        'storage_type': dataset.SOPClassUID,
        'name': dataset.PatientName,
        'id': dataset.PatientID,
        'age': dataset.PatientAge,
        'sex': dataset.PatientSex,
        'modality': dataset.Modality,
        'body_part': dataset.BodyPartExamined,
        'view': dataset.ViewPosition,
        'height': img.shape[0],
        'width': img.shape[1],
        'pixel_spacing': dataset.PixelSpacing
    }

    for i, rle in enumerate(rle_list):

        if rle == ' -1':
            # negative case
            mask = np.zeros(img.shape)
            meta['class'] = 0
        else:
            meta['class'] = 1
            if i == 0:
                mask = rle2mask(rle, img.shape[1], img.shape[0]).T
            else:
                mask2 = rle2mask(rle, img.shape[1], img.shape[0]).T
                mask = np.add(mask, mask2)

    # originally, mask is 0 and 255. change the values to 0 and 1
    # mask[mask == 255] = 1
    # there are overlapping masks. it's safe to convert all non 0 to 1
    mask = np.where(mask > 0, 1, 0)
    mask = np.array(mask, dtype=np.uint8)

    np.save(save_path, {'img': img, 'mask': mask})
    return meta
Ejemplo n.º 6
0
    def __data_generation(self, file_path_list_temp):
        'Generates data containing batch_size samples'
        # Initialization self.batch_size
        X = np.empty(
            (self.batch_size, self.img_size, self.img_size, self.n_channels))
        y = np.empty((self.batch_size, self.img_size, self.img_size, 1))
        y_class = []
        for idx, file_path in enumerate(file_path_list_temp):

            id = file_path.split('/')[-1][:-4]
            rle = self.labels.get(id)

            image = pydicom.read_file(file_path).pixel_array

            if len(image.shape) == 2:
                image = np.repeat(image[..., None], 3, 2)

            image_resized = cv2.resize(image, (self.img_size, self.img_size))
            X[idx, ] = image_resized

            # if there is no mask create empty mask
            # notice we are starting of with 1024 because we need to use the rle2mask function
            if rle is None:
                mask = np.zeros((1024, 1024))
            else:
                if len(rle) == 1:
                    mask = rle2mask(rle[0], 1024, 1024).T
                else:
                    mask = np.zeros((1024, 1024))
                    for r in rle:
                        mask = mask + rle2mask(r, 1024, 1024).T

            # Store class
            y[idx, ] = cv2.resize(mask,
                                  (self.img_size, self.img_size))[...,
                                                                  np.newaxis]
            y[y > 0] = 255

        if self.augment is None:
            return X / 255.0, np.array(y) / 255
        else:
            X = np.uint8(X)
            y = np.uint8(y)
            im, mask = [], []
            for m, k in zip(X, y):
                augmented = self.augment(image=m, mask=k)
                im.append(augmented['image'])
                mask.append(augmented['mask'])
            return np.array(im) / 255.0, np.array(mask) / 255
def masks_as_image(rle_list, shape):
    # Take the individual masks and create a single mask array
    all_masks = np.zeros(shape, dtype=np.uint8)
    for mask in rle_list:
        if isinstance(mask, str) and mask != '-1':
            all_masks |= rle2mask(mask, shape[0], shape[1]).T.astype(bool)
    return all_masks
Ejemplo n.º 8
0
def plot_with_mask_and_bbox(dataset, mask_encoded, figsize=(20, 10)):
    mask_decoded = rle2mask(mask_encoded, 1024, 1024).T
    fig, ax = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(20, 10))
    rmin, rmax, cmin, cmax = bounding_box(mask_decoded)
    patch = patches.Rectangle((cmin, rmin),
                              cmax - cmin,
                              rmax - rmin,
                              linewidth=1,
                              edgecolor='r',
                              facecolor='none')
    ax[0].imshow(dataset.pixel_array, cmap=plt.cm.bone)
    ax[0].imshow(mask_decoded, alpha=0.3, cmap="Reds")
    ax[0].add_patch(patch)
    ax[0].set_title('With Mask')

    patch = patches.Rectangle((cmin, rmin),
                              cmax - cmin,
                              rmax - rmin,
                              linewidth=1,
                              edgecolor='r',
                              facecolor='none')
    ax[1].imshow(dataset.pixel_array, cmap=plt.cm.bone)
    ax[1].add_patch(patch)
    ax[1].set_title('Without Mask')
    plt.show()
Ejemplo n.º 9
0
    def getLabelFunc(self, x):

        if (len(x) < 3):
            return (np.zeros(self.size))
        else:
            temp = rle2mask(x, self.size[0], self.size[1])
            return (temp)
Ejemplo n.º 10
0
def processScan(name, df, resize=(1024, 1024), ORIG_DIM=1024):
    scan = pydicom.read_file(name)
    img = scan.pixel_array

    if (resize != (ORIG_DIM, ORIG_DIM)):
        img = cv2.resize(img, resize)

    index = name.split('\\')[-1][:-4]
    masks = df.loc[index, 'EncodedPixels']
    mask_count = 0

    imgMask = np.zeros((ORIG_DIM, ORIG_DIM))

    if (type(masks) != str or (type(masks) == str and masks != ' -1')):
        if (type(masks) == str): masks = [masks]
        # else: masks = masks.tolist()
        mask_count += 1
        for mask in masks:
            imgMask += rle2mask(mask, ORIG_DIM, ORIG_DIM).T

    if (resize != (ORIG_DIM, ORIG_DIM)):
        imgMask = cv2.resize(imgMask, resize)

    img = img / 255
    imgMask = imgMask / 255

    return img, imgMask
Ejemplo n.º 11
0
    def __data_generation(self, list_IDs_temp):

        # Creates an empty placeholder array that will be populated with data that is to be supplied
        X = np.empty((self.batch_size, *self.dim, self.n_channels))
        y = np.empty((self.batch_size, *self.dim, self.n_channels))

        for i, ID in enumerate(list_IDs_temp):

            # Write logic for selecting/manipulating X and y here
            img = pydicom.dcmread(ID).pixel_array
            img = np.reshape(img, (1, 1024, 1024, 1))
            img = tf.convert_to_tensor(img, dtype=tf.uint8)
            sizes = [1, 128, 128, 1]
            strides = [1, 128, 128, 1]  # how far the centres of two patches must be --> overlap area
            rates = [1, 1, 1, 1]  # dilation rate of patches
            img_patches = tf.image.extract_patches(img, sizes, strides, rates, padding="VALID")
            img_patches = np.reshape(img_patches, (64, 128, 128, 1))

            ID = ID.split("/")[-1]
            try:
                locPD = self.rles.loc[ID.split('/')[-1][:-4], 'EncodedPixels']  # EncodedPixels data from CSV based on ID lookup
                if '-1' in locPD:
                    gt = np.zeros((1024, 1024, 1))
                else:
                    if type(locPD) == str:
                        gt = np.expand_dims(rle2mask(locPD, 1024, 1024), axis=2)
                    else:
                        gt = np.zeros((1024, 1024, 1))
                        for x in locPD:
                            gt = gt + np.expand_dims(rle2mask(x, 1024, 1024), axis=2)
            except KeyError:
                gt = np.zeros((1024, 1024, 1))  # Assume missing masks are empty masks.

            gt = np.reshape(gt, (1, 1024, 1024, 1))
            gt = tf.convert_to_tensor(gt, dtype=tf.uint8)
            gt_patches = tf.image.extract_patches(gt, sizes, strides, rates, padding="VALID")
            gt_patches = np.reshape(gt_patches, (64, 128, 128, 1))
            gt_patches = np.where(gt_patches == 255, 1, 0)

            if np.random.randint(0,2) == 1:
                X = img_patches[0:32,:,:,:]
                y = gt_patches[0:32,:,:,:]
            else:
                X = img_patches[32:64,:,:,:]
                y = gt_patches[32:64,:,:,:]

        return X, y
Ejemplo n.º 12
0
def getLabelFunc(x):

    if (len(annoteDF['rle'][x]) < 3):
        #return(vs.open_mask_rle('', shape=(1024, 1024)).resize((1,1024,1024)))
        return (np.zeros([1024, 1024]))
    else:
        temp = rle2mask(annoteDF['rle'][x], 1024, 1024)
        return (temp)
def masks_as_color(rle_list, shape):
    # Take the individual masks and create a color mask array
    all_masks = np.zeros(shape, dtype=np.float)
    scale = lambda x: (len(rle_list) + x + 1) / (len(
        rle_list) * 2)  ## scale the heatmap image to shift
    for i, mask in enumerate(rle_list):
        if isinstance(mask, str) and mask != '-1':
            all_masks[:, :] += scale(i) * rle2mask(mask, shape[0], shape[1]).T
    return all_masks
Ejemplo n.º 14
0
    def get_and_resize_mask(mask):
        if (mask == b' -1'):
            return np.zeros((224, 224)).astype(np.float32)

        mask = rle2mask(mask, width=1024, height=1024)
        mask /= 255.
        mask = np.transpose(mask)
        mask = mask.reshape((1024, 1024, 1))
        return my_resize(mask).astype(np.float32)
Ejemplo n.º 15
0
    def load_siim(self, dataset_dir, subset):
        """Load a subset of the Balloon dataset.
        dataset_dir: Root directory of the dataset.
        subset: Subset to load: train or val
        """
        # Add classes. We have only one class to add.
        self.add_class("pneumothorax", 1, "pneumothorax")

        # Train or validation dataset?
        assert subset in ["train", "val", "test", "sample"]
        # e.g. /home/sa-279/Mask_RCNN/datasets/pneumothorax/train/train-rle.csv
        # e.g. /home/sa-279/Mask_RCNN/datasets/pneumothorax/val/val-rle.csv
        file = os.path.join(dataset_dir, subset, subset+"-rle.csv")
        dataset_dir = os.path.join(dataset_dir, subset)
        # read csv file in the subset directory

        # Load annotations
        # we have csv file with ImageId,EncodedPixels
        # EncodedPixels are RLE of the mask
        # We mostly care about the x and y coordinates of each region


        print(file)
        annotations = pd.read_csv(file)
        annotations.columns = ['ImageId', 'ImageEncoding']
        #annotations = annotations.sample(frac=1).reset_index(drop=True)  it causes submission id shuffle
        #annotations = annotations[annotations.iloc[:, 1] != "-1"] training data is already filtered in train-val-split.py
        image_ids = annotations.iloc[:, 0].values
        rles = annotations.iloc[:, 1].values
        for row in annotations.itertuples():
            id = row.ImageId
            encoding = row.ImageEncoding
            if str(encoding) == "-1":
                encoding = "0 1048576"
            image_path = os.path.join(dataset_dir, id + ".dcm")
            #image = pydicom.dcmread(image_path)
            #image preprocessing Adaptive Histogram Equilization
            #height = image.Rows
            height = 1024
            #width = image.Columns
            width = 1024
            mask = rle2mask(encoding,width,height)
            mask = mask.T
            #mask = mask.reshape(width,height,1) just 1 instance per image
            mask = np.expand_dims(mask,axis=2)
            class_name = "pneumothorax"
            if str(encoding) == "-1":
                class_name = "BG"
            self.add_image(
                class_name,
                image_id=id,  # use file name as a unique image id
                path=image_path,
                width=width, height=height,
                polygons=mask)
Ejemplo n.º 16
0
def main():

    encodingsV = pd.read_csv("train-rle.csv")
    encodings = encodingsV.values
    dic = {}
    zeros = 0
    ones = 0
    for i in range(0, encodings.shape[0]):
        if encodings[i, 0] not in dic:
            if (str(encodings[i, 1]) == " -1"):
                dic[encodings[i, 0]] = 0
                zeros = zeros + 1
            else:
                dic[encodings[i, 0]] = 1
                ones = ones + 1

    #print(dic)

    test = 4
    imageID = encodings[test, 0]
    #print(imageID)
    imageFront = re.search("\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.",
                           imageID).group(0)
    imageBack = re.split("\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+", imageID,
                         1)[1]
    imageTail = re.split("\d+\.\d+\.\d+\.", imageBack, 1)[1]
    imageTail = int(imageTail)
    imageTail2 = imageTail - 1
    imageTail3 = imageTail - 2
    imageBack = re.search("\d+\.\d+\.\d+\.", imageBack).group(0)

    #print(imageFront)
    #print(imageTail)
    string = './dicom-images-train/' + imageFront + "2." + imageBack + str(
        imageTail2) + '/' + imageFront + "3." + imageBack + str(
            imageTail3) + '/' + imageFront + "4." + imageBack + str(
                imageTail) + '.dcm'
    #print(string)
    img = dicom.read_file(string)

    with open('maskBinary.json', 'w') as fp:
        json.dump(dic, fp)

    for i in range(0, encodings.shape[0]):
        maskString = encodings[i, 1]
        ID = encodings[i, 0]
        mask = np.zeros(img.pixel_array.shape)
        if (str(maskString) != " -1"):
            mask = rle2mask(maskString, img.pixel_array.shape[0],
                            img.pixel_array.shape[1])

        img2 = Image.fromarray(mask).convert('L')
        img2.save('./train_masks/' + str(ID) + '.bmp')
def get_affected_area(encoded_pixels_list, pixel_spacing):

    # take the encoded mask, decode, and get the sum of nonzero elements
    pixel_sum = 0

    for encoded_mask in encoded_pixels_list:
        mask_decoded = rle2mask(encoded_mask, 1024, 1024).T
        pixel_sum += np.count_nonzero(mask_decoded)

    area_per_pixel = pixel_spacing[0] * pixel_spacing[1]

    return pixel_sum * area_per_pixel
Ejemplo n.º 18
0
    def load_siim(self, dataset_dir, subset):
        """Load a subset of the Balloon dataset.
        dataset_dir: Root directory of the dataset.
        subset: Subset to load: train or val
        """
        # Add classes. We have only one class to add.
        self.add_class("pneumothorax", 1, "pneumothorax")

        # Train or validation dataset?
        assert subset in ["train", "val"]
        # e.g. /home/sa-279/Mask_RCNN/datasets/pneumothorax/train/train-rle.csv
        # e.g. /home/sa-279/Mask_RCNN/datasets/pneumothorax/val/val-rle.csv
        file = os.path.join(dataset_dir, subset, subset + "-rle.csv")
        dataset_dir = os.path.join(dataset_dir, subset)
        # read csv file in the subset directory

        # Load annotations
        # we have csv file with ImageId,EncodedPixels
        # EncodedPixels are RLE of the mask
        # We mostly care about the x and y coordinates of each region

        print(file)
        annotations = pd.read_csv(file)
        annotations.columns = ['ImageId', 'ImageEncoding']
        annotations = annotations.sample(frac=1).reset_index(drop=True)
        annotations = annotations[annotations.iloc[:, 1] != "-1"]
        image_ids = annotations.iloc[:, 0].values
        rles = annotations.iloc[:, 1].values
        for row in annotations.itertuples():
            id = row.ImageId
            encoding = row.ImageEncoding
            if str(encoding) == "-1":
                encoding = "0 1048576"
            image_path = os.path.join(dataset_dir, id + ".dcm")
            image = pydicom.dcmread(image_path)
            height = image.Rows
            width = image.Columns
            mask = rle2mask(encoding, width, height)
            mask = mask.T
            mask = mask.reshape(width, height, 1)
            class_name = "pneumothorax"
            if str(encoding) == "-1":
                class_name = "BG"
            self.add_image(
                class_name,
                image_id=id,  # use file name as a unique image id
                path=image_path,
                width=width,
                height=height,
                polygons=mask)
    def __data_generation(self, file_path_list_temp):
        'generate data containing batch_size samples'
        X = np.empty(
            (self.batch_size, self.img_size, self.img_size, self.channels))
        y = np.empty(
            (self.batch_size, self.img_size, self.img_size, self.channels))

        for idx, file_path in enumerate(file_path_list_temp):

            id = file_path.split('/')[-1][:-4]
            rle = self.labels.get(id)
            image = pydicom.read_file(file_path).pixel_array
            image_resized = cv2.resize(image, (self.img_size, self.img_size))
            image_resized = np.array(image_resized, dtype=np.float64)

            X[idx, ] = np.expand_dims(image_resized, axis=2)

            # if there is no mask create empty mask
            # notice we are starting of with 1024 because we need to use the rle2mask function
            if rle is None:
                mask = np.zeros((1024, 1024))
            else:
                if len(rle) == 1:
                    mask = rle2mask(rle[0], 1024, 1024).T
                else:
                    mask = np.zeros((1024, 1024))
                    for r in rle:
                        mask = mask + rle2mask(r, 1024, 1024).T

            mask_resized = cv2.resize(mask, (self.img_size, self.img_size))
            y[idx, ] = np.expand_dims(mask_resized, axis=2)

        # normalize
        X = X / 255
        y = y / 255

        return X, y
Ejemplo n.º 20
0
    def __getitem__(self, idx):
        if self.mode == 'train':
            item = self.data[idx]
            img_path = self.path + '%s.png' % item['img_id']
            img = plt.imread(img_path)
            width, height = img.shape
            img = cv2.resize(img, (self.IMG_SIZE, self.IMG_SIZE))
            img = np.expand_dims(img, 0)

            cnt_masks = item['cnt_masks']
            masks_in_rle = item['masks']
            if cnt_masks == 1:
                mask = rle2mask(masks_in_rle[0], width, height).T
            elif cnt_masks > 1:  #v1: just simply merge overlapping masks to get union of masks
                masks = []
                for mask_in_rle in masks_in_rle:
                    mask = rle2mask(mask_in_rle, width, height).T
                    masks.append(mask)
                mask = (np.array(masks).sum(axis=0) >= 1).astype(
                    np.int)  #mask as 1 if at least one of the mask is 1
            else:
                mask = np.zeros((self.IMG_SIZE, self.IMG_SIZE))
            mask = cv2.resize(mask.astype(np.float),
                              (self.IMG_SIZE, self.IMG_SIZE))
            mask = np.expand_dims(mask, 0)
            ##augmentation
            if self.augmentation:
                img, mask = do_augmentation(img, mask)
            return img, mask
        elif self.mode == 'test':
            img_id = self.img_id_list[idx]
            img_path = self.path + '%s.png' % img_id
            img = plt.imread(img_path)
            width, height = img.shape
            img = cv2.resize(img, (self.IMG_SIZE, self.IMG_SIZE))
            img = np.expand_dims(img, 0)
            return img
Ejemplo n.º 21
0
def getMask(imgId, df, shape):
    """returns nd array mask"""
    df = df.loc[df['ImageId'] == imgId]
    rle_r = df['EncodedPixels'].tolist()
    
    rle = ''
    if len(rle_r) > 0:
        rle = rle_r[0].strip()

    mask = np.zeros([*shape])
    if not rle == '-1':
        mask = mask_functions.rle2mask(rle, *shape)
        mask = np.flip(mask, axis=0)
        mask = np.rot90(mask, -1)
    return mask
 def load_mask(self, image_id):
     info = self.image_info[image_id]
     annotations = info['annotations']
     #         print(image_id, annotations)
     count = len(annotations)
     if count == 0 or (count == 1 and annotations.values[0]
                       == ' -1'):  # empty annotation
         mask = np.zeros((info['orig_height'], info['orig_width'], 1),
                         dtype=np.uint8)
         class_ids = np.zeros((1, ), dtype=np.int32)
     else:
         mask = np.zeros((info['orig_height'], info['orig_width'], count),
                         dtype=np.uint8)
         class_ids = np.zeros((count, ), dtype=np.int32)
         for i, a in enumerate(annotations):
             mask[:, :, i] = rle2mask(a, info['orig_height'],
                                      info['orig_width']).T
             class_ids[i] = 1
     return mask.astype(np.bool), class_ids.astype(np.int32)
def main():
    test = 4
    encodingsV = pd.read_csv("train-rle.csv")
    encodings = encodingsV.values
    imageID = encodings[test, 0]
    #print(imageID)
    imageFront = re.search("\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.",
                           imageID).group(0)
    imageBack = re.split("\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+\.\d+", imageID,
                         1)[1]
    imageTail = re.split("\d+\.\d+\.\d+\.", imageBack, 1)[1]
    imageTail = int(imageTail)
    imageTail2 = imageTail - 1
    imageTail3 = imageTail - 2
    imageBack = re.search("\d+\.\d+\.\d+\.", imageBack).group(0)

    #print(imageFront)
    #print(imageTail)
    string = './dicom-images-train/' + imageFront + "2." + imageBack + str(
        imageTail2) + '/' + imageFront + "3." + imageBack + str(
            imageTail3) + '/' + imageFront + "4." + imageBack + str(
                imageTail) + '.dcm'
    #print(string)
    img = dicom.read_file(string)
    print(img.pixel_array.shape)

    maskString = encodings[test, 1]

    mask = rle2mask(maskString, img.pixel_array.shape[0],
                    img.pixel_array.shape[1])

    img2 = Image.fromarray(mask).convert("RGBA")

    im = Image.fromarray(img.pixel_array).convert("RGBA")

    im.show()
    img2.show()

    alphaComposited = Image.blend(im, img2, 0.5)
    alphaComposited.show()
Ejemplo n.º 24
0
from mask_functions import mask2rle, rle2mask
from PIL import Image
import pandas as pd
import os
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots(1)
dataset_dir = ""
dataset_dir = os.path.join(dataset_dir, "train")
annotations = pd.read_csv(image_path=os.path.join(dataset_dir,
                                                  "train-rle.csv"),
                          header=None)
annotations.columns = ['ImageId', 'ImageEncoding']
annotations = annotations[annotations.iloc[:, 1] != "-1"]
image_ids = annotations.iloc[:, 0].values
len = len(image_ids)
rles = annotations.iloc[:, 1].values
index = np.random.randint(0, len - 1)
img_id = image_ids[index]
encoding = rles[index]
image_path = os.path.join(dataset_dir, img_id + ".dcm")
image = pydicom.dcmread(image_path)
height = image.Rows
width = image.Columns
mask = rle2mask(encoding, width, height)
mask = mask.reshape(height, width, 1)

ax1.imshow(image.pixel_array)
rle_m1 = rle2mask(encoding, 1024, 1024)
ax1.imshow(rle_m1.T, alpha=0.2)

plt.show()
Ejemplo n.º 25
0
    df_avg_sub = pd.DataFrame(columns=["ImageId", "EncodedPixels"])
    df_avg_sub_idx = 0  # counter for the index of the final dataframe

    # iterate over image IDs
    for idx, iid in enumerate(iid_list):
        # initialize prediction mask
        avg_mask = np.zeros((1024, 1024))
        # iterate over prediction dataframes
        for df_sub in df_sub_list:
            # extract rles for each image ID and submission dataframe
            rles = df_sub.loc[df_sub["ImageId"] == iid, "EncodedPixels"]
            # iterate over rles
            for rle in rles:
                # if rle is not -1, build prediction mask and add to average mask
                if "-1" not in str(rle):
                    avg_mask += rle2mask(rle, 1024, 1024) / float(
                        len(df_sub_list))
        # threshold the average mask
        pred_mask = (avg_mask >=
                     (min_solutions / float(len(df_sub_list)))).astype("uint8")
        # transform to rle
        if pred_mask.sum() > 0:
            im = PIL.Image.fromarray(
                (pred_mask * 255).astype(np.uint8)).resize((1024, 1024))
            im = np.asarray(im)
            rle = mask2rle(im, 1024, 1024)
        else:
            rle = "-1"
        # add a row in the final dataframe
        df_avg_sub.loc[df_avg_sub_idx] = [iid, rle]
        df_avg_sub_idx += 1  # increment index
Ejemplo n.º 26
0
                    dtype=np.uint8)
 Y_train = np.zeros((len(train_fns), im_height, im_width, im_chan),
                    dtype=np.bool)
 print('Getting train images and masks ... ')
 sys.stdout.flush()
 for n, _id in tqdm_notebook(enumerate(train_fns), total=len(train_fns)):
     dataset = pydicom.read_file(_id)
     X_train[n] = np.expand_dims(dataset.pixel_array, axis=2)
     try:
         if '-1' in df_full.loc[_id.split('/')[-1][:-4], ' EncodedPixels']:
             Y_train[n] = np.zeros((1024, 1024, 1))
         else:
             if type(df_full.loc[_id.split('/')[-1][:-4],
                                 ' EncodedPixels']) == str:
                 Y_train[n] = np.expand_dims(rle2mask(
                     df_full.loc[_id.split('/')[-1][:-4], ' EncodedPixels'],
                     1024, 1024),
                                             axis=2)
             else:
                 Y_train[n] = np.zeros((1024, 1024, 1))
                 for x in df_full.loc[_id.split('/')[-1][:-4],
                                      ' EncodedPixels']:
                     Y_train[n] = Y_train[n] + np.expand_dims(
                         rle2mask(x, 1024, 1024), axis=2)
     except KeyError:
         print(
             f"Key {_id.split('/')[-1][:-4]} without mask, assuming healthy patient."
         )
         Y_train[n] = np.zeros(
             (1024, 1024, 1))  # Assume missing masks are empty masks.
 print('Done!')
Ejemplo n.º 27
0
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.10000.1517875220.938530.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.10001.1517875220.930580.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.10002.1517875220.939397.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.10003.1517875220.942420.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.4904.1517875185.355709.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.1314.1517875167.222290.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.4440.1517875182.865105.dcm')
imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.4982.1517875185.837576.dcm')
#imagepath = os.path.join(datadir, '1.2.276.0.7230010.3.1.4.8323329.12743.1517875241.599591.dcm')

df = pd.read_csv(csvfile,header = None)
#df.columns['ImageId','Encodings']
dcm = pydicom.dcmread(imagepath)
pixel = dcm.pixel_array
samples = df.iloc[:, -1].values
rle_m = rle2mask(samples[8], 1024, 1024)
#annotation = df.loc[df['ImageId'] =='1.2.276.0.7230010.3.1.4.8323329.1314.1517875167.222290']
gamma = 2
kernel1 = np.ones((3,3),np.uint8)
# Elliptical Kernel

kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
kernel3 = np.array([[0, 0, 1, 0, 0],
       [0, 1, 1, 1, 0],
       [1, 1, 1, 1, 1],
       [0, 1, 1, 1, 0],
       [0, 0, 1, 0, 0]]).astype(np.uint8)

def enhance_gamma(image, gamma):

    max_pixel = np.max(image)
Ejemplo n.º 28
0
def plot_with_mask_and_bbox(file_path, mask_encoded_list, figsize=(20, 10)):

    import cv2
    """Plot Chest Xray image with mask(annotation or label) and without mask.

    Args:
        file_path (str): file path of the dicom data.
        mask_encoded (numpy.ndarray): Pandas dataframe of the RLE.
        
    Returns:
        plots the image with and without mask.
    """

    pixel_array = pydicom.dcmread(file_path).pixel_array

    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
    clahe_pixel_array = clahe.apply(pixel_array)

    # use the masking function to decode RLE
    mask_decoded_list = [
        rle2mask(mask_encoded, 1024, 1024).T
        for mask_encoded in mask_encoded_list
    ]

    fig, ax = plt.subplots(nrows=1, ncols=3, sharey=True, figsize=(20, 10))

    # print out the xray
    ax[0].imshow(pixel_array, cmap=plt.cm.bone)
    # print the bounding box
    for mask_decoded in mask_decoded_list:
        # print out the annotated area
        ax[0].imshow(mask_decoded, alpha=0.3, cmap="Reds")
        rmin, rmax, cmin, cmax = bounding_box(mask_decoded)
        bbox = patches.Rectangle((cmin, rmin),
                                 cmax - cmin,
                                 rmax - rmin,
                                 linewidth=1,
                                 edgecolor='r',
                                 facecolor='none')
        ax[0].add_patch(bbox)
    ax[0].set_title('With Mask')

    # plot image with clahe processing with just bounding box and no mask
    ax[1].imshow(clahe_pixel_array, cmap=plt.cm.bone)
    for mask_decoded in mask_decoded_list:
        rmin, rmax, cmin, cmax = bounding_box(mask_decoded)
        bbox = patches.Rectangle((cmin, rmin),
                                 cmax - cmin,
                                 rmax - rmin,
                                 linewidth=1,
                                 edgecolor='r',
                                 facecolor='none')
        ax[1].add_patch(bbox)
    ax[1].set_title('Without Mask - Clahe')

    # plot plain xray with just bounding box and no mask
    ax[2].imshow(pixel_array,
                 cmap=plt.cm.bone)  #draws an image on the current figure
    for mask_decoded in mask_decoded_list:
        rmin, rmax, cmin, cmax = bounding_box(mask_decoded)
        bbox = patches.Rectangle((cmin, rmin),
                                 cmax - cmin,
                                 rmax - rmin,
                                 linewidth=1,
                                 edgecolor='r',
                                 facecolor='none')
        ax[2].add_patch(bbox)
    ax[2].set_title('Without Mask')
    plt.show()  #plt.show() displays the figure
Ejemplo n.º 29
0
    def __getitem__(self, idx):
        try:
            info = self.image_info[idx]
        except KeyError:
            raise StopIteration()
        img_path = info["image_path"]
        ds = pdc.dcmread(img_path)
        img = ds.pixel_array
        img = Image.fromarray(img)
        width, height = img.size
        img = img.resize((self.default_width, self.default_height),
                         resample=Image.BILINEAR)
        img = np.array(img)

        if 'annotations' in info.keys():
            mask = None
            has_pneumothorax = False
            for rle_mask in info['annotations']:
                tmp_mask = rle2mask(rle_mask, width, height)
                tmp_mask = Image.fromarray(tmp_mask.T)
                tmp_mask = tmp_mask.resize(
                    (self.default_width, self.default_height),
                    resample=Image.BILINEAR)
                # tmp_mask = np.expand_dims(tmp_mask, axis=0)
                if mask is None:
                    mask = tmp_mask
                else:
                    mask = np.maximum(mask, tmp_mask)

                if rle_mask != '-1':
                    has_pneumothorax = True

                mask = np.array(mask)

            if self.augment:
                img, mask = self.augment_image_and_mask(img, mask)

            mask = mask / 255
            mask = torch.as_tensor(mask, dtype=torch.float32)
            mask.unsqueeze_(0)
            mask = self.resize_th_3d_image(
                mask, (self.get_max_height(), self.get_max_width()))

            target = {}
            target["mask"] = mask
            # target["class"] = torch.from_numpy(np.expand_dims(np.array(has_pneumothorax, dtype=np.float32), axis=0))
            target["class"] = torch.unsqueeze(mask.max() > 0,
                                              dim=-1).type(torch.float32)

        img = transforms.ToTensor()(img)

        img = self.resize_th_3d_image(
            img, (self.get_max_height(), self.get_max_width()))

        # 1C to 3C to support pretrained imagenet models
        img = img.repeat(3, 1, 1)
        # img = self.normalize(img)

        view_pos = ds.get_item((0x0018, 0x5101)).value
        try:
            view_pos = view_pos.decode("utf-8").strip()
        except AttributeError as e:
            pass
        view_pos = self.view_pos_lut[view_pos]

        sex = ds.get_item((0x0010, 0x0040)).value.decode("utf-8").strip()
        sex = self.sex_lut[sex]

        age = ds.get_item((0x0010, 0x1010)).value.decode("utf-8").strip()
        age = self.age_converter(age)

        # attributes = [float(view_pos), float(sex), float(age)]
        # attributes = torch.from_numpy(np.array(attributes, dtype=np.float32)).view(len(attributes), 1, 1)
        # attributes = attributes.repeat(1, img.shape[1], img.shape[2])

        # total_input = torch.cat([img, attributes], dim=0)

        input = {
            'scan': img,
            # 'attributes': attributes,
            # 'total_input': total_input,
            'view_pos': view_pos,
            'sex': sex,
            'age': age,
            'image_path': img_path,
            'image_id': info['image_id'],
            'idx': idx
        }

        if 'annotations' in info.keys():
            return input, target
        else:
            return input
Ejemplo n.º 30
0
imgpath = path + "\\" + "sample Images" + "\\" + "sampleConvertedImages" + "\\" + id0 + ".png"

img = Image.open(imgpath)
w, h = img.size

# mask with fastai
'''
    1) grab csv rle label 
    2) label = rle_encode(csv rle label)
    3) mask = open_mask_rle(label, shape=(1024, 1024)).resize((1,1024,1024)))


'''

test = rle2mask(rle0, w, h)
label = vs.rle_encode(test)
mask = vs.open_mask_rle(label, shape=(1024, 1024)).resize((1, 1024, 1024))

w = 1024
h = 1024


def getLabelFunc(x):

    if (len(annoteDF['rle'][x]) < 3):
        #return(vs.open_mask_rle('', shape=(1024, 1024)).resize((1,1024,1024)))
        return (np.zeros([1024, 1024]))
    else:
        temp = rle2mask(annoteDF['rle'][x], 1024, 1024)
        return (temp)