示例#1
0
    def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
        image_path = self.file_paths[idx]

        image = load_rgb(image_path)

        image = self.transform(image=image)["image"]

        return {"torched_image": tensor_from_rgb_image(image), "image_path": str(image_path)}
    def __getitem__(self, index: int) -> Dict[str, torch.Tensor]:
        image = load_rgb(self.image_paths[index])
        image = self.transform(image=image)["image"]

        mask = generate_stroke_mask((image.shape[1], image.shape[0]))
        return {
            "image": tensor_from_rgb_image(image),
            "mask": torch.unsqueeze(torch.from_numpy(mask), 0)
        }
示例#3
0
    def __getitem__(self, index: int) -> Dict[str, Any]:
        labels = self.labels[index]

        file_name = labels["file_name"]

        image = load_rgb(self.image_path / file_name)

        image_height, image_width = image.shape[:2]

        # annotations will have the format
        # 4: box, 10 landmarks, 1: landmarks / no landmarks
        num_annotations = 4 + 10 + 1
        annotations = np.zeros((0, num_annotations))

        for label in labels["annotations"]:
            annotation = np.zeros((1, num_annotations))

            x_min, y_min, x_max, y_max = label["bbox"]

            x_min = np.clip(x_min, 0, image_width - 1)
            y_min = np.clip(y_min, 0, image_height - 1)
            x_max = np.clip(x_max, x_min + 1, image_width - 1)
            y_max = np.clip(y_max, y_min, image_height - 1)

            annotation[0, :4] = x_min, y_min, x_max, y_max

            if "landmarks" in label and label["landmarks"]:
                landmarks = np.array(label["landmarks"])
                # landmarks
                annotation[0, 4:14] = landmarks.reshape(-1, 10)
                if annotation[0, 4] < 0:
                    annotation[0, 14] = -1
                else:
                    annotation[0, 14] = 1

            annotations = np.append(annotations, annotation, axis=0)

        if self.rotate90:
            image, annotations = random_rotate_90(image,
                                                  annotations.astype(int))

        image, annotations = self.preproc(image, annotations)

        image = self.transform(image=image)["image"]

        return {
            "image": tensor_from_rgb_image(image),
            "annotation": annotations.astype(np.float32),
            "file_name": file_name,
        }
示例#4
0
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        idx = idx % len(self.samples)

        image_path, class_id = self.samples[idx]

        image = load_rgb(image_path, lib="cv2")

        # apply augmentations
        image = self.transform(image=image)["image"]

        return {
            "image_id": image_path.stem,
            "features": tensor_from_rgb_image(image),
            "targets": class_id
        }
示例#5
0
    def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
        image_path = self.file_paths[idx]

        image = load_rgb(image_path)
        height, width = image.shape[:2]

        image = self.transform(image=image)["image"]
        pad_dict = pad_to_size((max(image.shape[:2]), max(image.shape[:2])),
                               image)

        return {
            "torched_image": tensor_from_rgb_image(pad_dict["image"]),
            "image_path": str(image_path),
            "pads": pad_dict["pads"],
            "original_width": width,
            "original_height": height,
        }
示例#6
0
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        idx = idx % len(self.image_paths)

        image_path = self.image_paths[idx]

        image = load_rgb(image_path, lib="cv2")

        # apply augmentations
        image = self.transform(image=image)["image"]

        orientation = random.randint(0, 3)
        image = np.ascontiguousarray(np.rot90(image, orientation))

        return {
            "image_id": image_path.stem,
            "features": tensor_from_rgb_image(image),
            "targets": orientation
        }
示例#7
0
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        image_path, mask_path = self.samples[idx]

        image = load_rgb(image_path)
        mask = load_grayscale(mask_path)

        # apply augmentations
        sample = self.transform(image=image, mask=mask)
        image, mask = sample["image"], sample["mask"]

        mask = (mask > 0).astype(np.uint8)

        mask = torch.from_numpy(mask)

        return {
            "image_id": image_path.stem,
            "features": tensor_from_rgb_image(image),
            "masks": torch.unsqueeze(mask, 0).float(),
        }
示例#8
0
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        image_path = self.file_names[idx]

        image = load_rgb(image_path)

        height, width = image.shape[:2]

        # Resize
        resizer = albu.Compose([albu.LongestMaxSize(max_size=768, p=1)], p=1)
        image = resizer(image=image)["image"]

        # pad
        image, pads = pad(image, factor=768)

        # apply augmentations
        image = self.transform(image=image)["image"]

        return {
            "image_id": image_path.stem,
            "features": tensor_from_rgb_image(image),
            "pads": np.array(pads).T,
            "height": height,
            "width": width,
        }
示例#9
0
    def __getitem__(self, idx: int) -> Optional[Dict[str, Any]]:
        image_path = self.file_paths[idx]

        image = np.array(Image.open(image_path))

        image_height, image_width = image.shape[:2]

        image = self.resize(image=image)["image"]

        paded = pad_to_size(target_size=(self.max_size, self.max_size),
                            image=image)

        image = paded["image"]
        pads = paded["pads"]

        image = self.transform(image=image)["image"]

        return {
            "torched_image": tensor_from_rgb_image(image),
            "image_path": str(image_path),
            "pads": np.array(pads),
            "image_height": image_height,
            "image_width": image_width,
        }
示例#10
0
    def predict_jsons(
        self, image: np.array, confidence_threshold: float = 0.7, nms_threshold: float = 0.4
    ) -> List[Dict[str, Union[List, float]]]:
        with torch.no_grad():
            original_height, original_width = image.shape[:2]

            scale_landmarks = torch.from_numpy(np.tile([self.max_size, self.max_size], 5)).to(self.device)
            scale_bboxes = torch.from_numpy(np.tile([self.max_size, self.max_size], 2)).to(self.device)

            transformed_image = self.transform(image=image)["image"]

            paded = pad_to_size(target_size=(self.max_size, self.max_size), image=transformed_image)

            pads = paded["pads"]

            torched_image = tensor_from_rgb_image(paded["image"]).to(self.device)

            loc, conf, land = self.model(torched_image.unsqueeze(0))

            conf = F.softmax(conf, dim=-1)

            annotations: List[Dict[str, Union[List, float]]] = []

            boxes = decode(loc.data[0], self.prior_box, self.variance)

            boxes *= scale_bboxes
            scores = conf[0][:, 1]

            landmarks = decode_landm(land.data[0], self.prior_box, self.variance)
            landmarks *= scale_landmarks

            # ignore low scores
            valid_index = torch.where(scores > confidence_threshold)[0]
            boxes = boxes[valid_index]
            landmarks = landmarks[valid_index]
            scores = scores[valid_index]

            # Sort from high to low
            order = scores.argsort(descending=True)
            boxes = boxes[order]
            landmarks = landmarks[order]
            scores = scores[order]

            # do NMS
            keep = nms(boxes, scores, nms_threshold)
            boxes = boxes[keep, :].int()

            if boxes.shape[0] == 0:
                return [{"bbox": [], "score": -1, "landmarks": []}]

            landmarks = landmarks[keep]

            scores = scores[keep].cpu().numpy().astype(np.float64)
            boxes = boxes.cpu().numpy()
            landmarks = landmarks.cpu().numpy()
            landmarks = landmarks.reshape([-1, 2])

            unpadded = unpad_from_size(pads, bboxes=boxes, keypoints=landmarks)

            resize_coeff = max(original_height, original_width) / self.max_size

            boxes = (unpadded["bboxes"] * resize_coeff).astype(int)
            landmarks = (unpadded["keypoints"].reshape(-1, 10) * resize_coeff).astype(int)

            for box_id, bbox in enumerate(boxes):
                x_min, y_min, x_max, y_max = bbox

                x_min = np.clip(x_min, 0, original_width - 1)
                x_max = np.clip(x_max, x_min + 1, original_width - 1)

                if x_min >= x_max:
                    continue

                y_min = np.clip(y_min, 0, original_height - 1)
                y_max = np.clip(y_max, y_min + 1, original_height - 1)

                if y_min >= y_max:
                    continue

                annotations += [
                    {
                        "bbox": bbox.tolist(),
                        "score": scores[box_id],
                        "landmarks": landmarks[box_id].reshape(-1, 2).tolist(),
                    }
                ]

            return annotations
示例#11
0
st.title("Segment glomeruli")
# What about a TIFF image?
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png"])

if uploaded_file is not None:
    original_image = np.array(Image.open(uploaded_file))
    st.image(original_image, caption="Before", use_column_width=True)
    st.write("")
    st.write("Detecting glomeruli...")

    original_height, original_width = original_image.shape[:2]
    image = transform(image=original_image)["image"]
    padded_image, pads = pad(image, factor=MAX_SIZE, border=cv2.BORDER_CONSTANT)

    x = torch.unsqueeze(tensor_from_rgb_image(padded_image), 0)

    with torch.no_grad():
        prediction = model(x)[0][0]

    mask = (prediction > 0).cpu().numpy().astype(np.uint8)
    mask = unpad(mask, pads)
    mask = cv2.resize(
        mask, (original_width, original_height), interpolation=cv2.INTER_NEAREST
    )
    mask_3_channels = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
    dst = cv2.addWeighted(
        original_image, 1, (mask_3_channels * (0, 255, 0)).astype(np.uint8), 0.5, 0
    )

    st.image(mask * 255, caption="Mask", use_column_width=True)
示例#12
0
from midv500models.pre_trained_models import create_model

start = timeit.default_timer()

model = create_model('Unet_resnet34_2020-05-19')
model.eval()

image = load_rgb(sys.argv[1])
# im = Image.fromarray(image)
# im.save()

transform = albu.Compose([albu.Normalize(p=1)], p=1)
padded_image, pads = pad(image, factor=32, border=cv2.BORDER_CONSTANT)
x = transform(image=padded_image)['image']
x = torch.unsqueeze(tensor_from_rgb_image(x), 0)

with torch.no_grad():
    prediction = model(x)[0][0]

mask = (prediction > 0).cpu().numpy().astype(np.uint8)
mask = unpad(mask, pads)
# im = Image.fromarray(mask)
# im.save()

dst = cv2.addWeighted(image, 1, (cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB) *
                                 (0, 255, 0)).astype(np.uint8), 0.5, 0)
# im = Image.fromarray(dst)
# im.save()

combinedImages = (np.hstack(