Exemplo n.º 1
0
    def process(self, images: Sequence[Image]) -> Dict[str, Image]:
        # Based on:
        # https://www.pyimagesearch.com/2016/01/11/opencv-panorama-stitching/
        outputs = {}
        if len(images) != 2:
            raise Exception("Stitching requires 2 images, not " +
                            str(len(images)))

        img1 = images[0].get(ImageType.OPENCV)
        img2 = images[1].get(ImageType.OPENCV)
        stitched, vis = self.stitch(img1, img2)

        if stitched is not None:
            outputs["stitched"] = Image(stitched, opencv=True)
        if vis is not None:
            outputs["vis"] = Image(stitched, opencv=True)
        return outputs
Exemplo n.º 2
0
    def process(self, images: Sequence[Image]) -> Dict[str, Image]:
        outputs = {}
        for (i, image) in enumerate(images):
            if image is not None:
                img = image.get(ImageType.OPENCV)
                img_bw = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                img_bil = cv2.bilateralFilter(img_bw, 7, 50, 50)
                img_out = cv2.Canny(img_bil, self.thr1, self.thr2)

                outputs[str(i)] = Image(img_out, opencv=True)

        return outputs
Exemplo n.º 3
0
    def process(self, images: Sequence[Image], count=[0]) -> Dict[str, Image]:
        # Based on:
        # https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html
        outputs = {}

        for (i, image) in enumerate(images):
            key = str(i)
            if image is not None:

                img = image.get(ImageType.OPENCV)
                img_grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                # on first image since reset, init
                if key not in self.img_prev:
                    self.img_prev[key] = img_grey
                    self.mask[key] = np.zeros_like(img)
                    self.points_prev[key] = cv2.goodFeaturesToTrack(
                        img_grey, mask=None, **OpticalFlow.FEATURE_PARAMS)

                # track on all future frames
                else:
                    points, status, err = cv2.calcOpticalFlowPyrLK(
                        self.img_prev[key], img_grey, self.points_prev[key],
                        None, **OpticalFlow.LK_PARAMS)

                    if points is not None:

                        points_good_curr = points[status == 1]
                        points_good_prev = self.points_prev[key][status == 1]

                        # draw
                        for j, (curr, prev) in enumerate(
                                zip(points_good_curr, points_good_prev)):
                            a, b = curr.ravel()
                            c, d = prev.ravel()
                            self.mask[key] = cv2.line(
                                self.mask[key], (a, b), (c, d),
                                OpticalFlow.COLOURS[j % 100].tolist(), 2)
                            img = cv2.circle(
                                img, (a, b), 5,
                                OpticalFlow.COLOURS[j % 100].tolist(), -1)
                        img = cv2.add(img, self.mask[key])

                        # update for next frame
                        self.points_prev[key] = points_good_curr.reshape(
                            -1, 1, 2)

                    self.img_prev[key] = img_grey

                outputs[key] = Image(img, opencv=True)

        return outputs
Exemplo n.º 4
0
    def process(self, images: Sequence[Image]) -> Dict[str, Image]:
        outputs = {}
        for (i, image) in enumerate(images):
            if image is not None:
                img = image.get(ImageType.OPENCV)
                img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                # detect
                faces = Faces.FACE_CASCADE.detectMultiScale(img_gray, 1.3, 5)

                # draw
                for x, y, w, h in faces:
                    img = cv2.rectangle(img, (x, y), (x+w, y+h), Faces.BOX_COLOUR, 2)

                outputs[str(i)] = Image(img, opencv=True)

        return outputs
Exemplo n.º 5
0
    def process(
        self,
        image: Image,
        short=512,
        max_size=640,
        mean=(0.485, 0.456, 0.406),
        std=(0.229, 0.224, 0.225)
    ) -> Dict[str, Any]:
        if not self.model:
            raise RuntimeError(
                "[mxnet] No model has been loaded. Run load() first.")

        # preprocess
        np_img = image.asnumpy()
        mx_img = mx.nd.array(np_img).astype('uint8')
        mx_img = gcv.data.transforms.image.resize_short_within(
            mx_img, short=short, max_size=max_size, mult_base=32)
        ts_img = mx.nd.image.to_tensor(mx_img).copyto(self.ctx)
        ts_img = mx.nd.image.normalize(ts_img, mean=mean,
                                       std=std).expand_dims(0)

        # run model
        if hasattr(self.model, "predict"):
            # Some models have this method (i.e., segmentation models), some don't
            outputs = self.model.predict(ts_img)
        else:
            outputs = self.model(ts_img)

        if self.model_type is ModelType.CLASSIFICATION:
            return {"class_ids": outputs}
        elif self.model_type is ModelType.DETECTION:
            return {
                "class_ids": outputs[0],
                "scores": outputs[1],
                "bounding_boxes": outputs[2]
            }
        elif self.model_type is ModelType.SEGMENTATION:
            return {"mask": outputs}
        else:
            return {"outputs": outputs}
Exemplo n.º 6
0
 def load(self, info):
     info['apiRoot'].images = Image()
Exemplo n.º 7
0
    def visualise(self,
                  image: Image,
                  metadata: Dict[str, Any],
                  blend=0.5,
                  show_labels=True):
        np_img = image.asnumpy()
        mx_img = mx.nd.array(np_img).astype('uint8')

        img = None
        if all(key in metadata
               for key in ["bounding_boxes", "scores", "class_ids"]):
            # object detection results
            bounding_boxes = metadata["bounding_boxes"]
            scores = metadata["scores"]
            class_ids = metadata["class_ids"]

            img = gcv.utils.viz.cv_plot_bbox(mx_img,
                                             bounding_boxes[0],
                                             scores[0],
                                             class_ids[0],
                                             class_names=self.model.classes)

        elif "class_ids" in metadata:
            # classification results
            class_ids = metadata["class_ids"]
            scores = mx.nd.softmax(class_ids)[0].asnumpy()

            top_ids = mx.nd.topk(class_ids, k=3)[0].astype("int").asnumpy()
            captions = [
                "{}: {:.3}".format(self.model.classes[id], scores[id])
                for id in top_ids
            ]
            img = draw_captions(np_img, captions) if show_labels else np_img

        elif "mask" in metadata:
            # segmentation results
            mask = metadata["mask"]
            img_resized = gcv.data.transforms.image.imresize(
                mx_img, mask.shape[3], mask.shape[2]).asnumpy()

            predict = mx.nd.argmax(mask, 1)[0].astype("int").asnumpy()
            mask_colour = gcv.utils.viz.get_color_pallete(
                predict, "pascal_voc")
            mask_colour = np.array(
                mask_colour.convert("RGB"))  # PIL with palette -> numpy RGB
            img = cv2.addWeighted(img_resized, 1 - blend, mask_colour, blend,
                                  0)

            if show_labels:
                classfreqs = list(zip(*np.unique(predict, return_counts=True)))
                classfreqs = sorted(classfreqs,
                                    key=lambda cf: cf[1],
                                    reverse=True)
                # NOTE: a bug in gluoncv currently returns a property of a class, instead of an instance of the class,
                #       for all Segmentation models; we need to obtain the property value (passing a bogus self)
                model_classes = self.model.classes.fget(0)
                captions = [
                    "{} ({}px)".format(model_classes[id], freq)
                    for (id, freq) in classfreqs[1:5]
                ]
                img = draw_captions(img, captions)

        else:
            raise ValueError(
                "Don't know how to visualise metadata with keys " +
                ",".join(metadata.keys()))

        img_result = Image(img, opencv=False)
        return img_result
Exemplo n.º 8
0
    def get(self, latest=False):
        # Wait for next image
        frame = self.image_queue.get()

        return Image(frame, opencv=True)
Exemplo n.º 9
0
    def get(self, latest=False):
        # Wait for next image
        img = self.image_queue.get()

        return Image(img, opencv=True)
Exemplo n.º 10
0
    def process_calibration(self, images: Sequence[Image]):
        """
        Calibrate 2+ images: find intrinsic + extrinsic matrices and R/T between the camera pairs
        """
        outputs = {str(i): image for (i, image) in enumerate(images)}

        # step 1: press space to start
        if self.stage == StereoVision.STAGES["CALIBRATE_WAIT"]:
            print("[calibration] Grab your chessboard pattern, and press space to start taking snapshots.")
            print("              The chessboard needs to be fully visible in *all* images.")

        # step 2: take snapshots every 3s
        elif self.snapshot_count < 8:

            # step 2a: waiting for next snapshot
            if time.time() - self.last_snapshot < 3.000:
                # waiting for next snapshot moment
                outputs = {str(i): image for (i, image) in enumerate(images)}

            # step 2b: take snapshot (if delay passed and the pattern was found for all images)
            else:
                all_corners  = {}
                all_snapshots = {}
                for i, image in enumerate(images):
                    key = str(i)
                    img = image.get(ImageType.OPENCV)
                    print(i, img.shape)
                    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                    ret, corners = cv2.findChessboardCornersSB(
                        img_gray, StereoVision.CHESSBOARD_SIZE, None)
                    if ret:
                        all_corners[key] = corners
                        all_snapshots[key] = img

                        img = cv2.drawChessboardCorners(
                            img, StereoVision.CHESSBOARD_SIZE, corners, ret)
                        img = cv2.rectangle(img, (1, 1), (img.shape[1]-2, img.shape[0]-2), (0, 255, 0), 2)
                    else:
                        img = cv2.rectangle(img, (1, 1), (img.shape[1]-2, img.shape[0]-2), (0, 0, 255), 2)

                    outputs[key] = Image(img, opencv=True)

                if len(all_corners) == len(images):
                    # found corners in all images; saving
                    self.snapshot_count += 1
                    self.last_snapshot = time.time()

                    for key in all_corners:
                        self.corners[key].append(all_corners[key])
                        self.snapshots[key].append(all_snapshots[key])

                    print("[calibration] Snapshot {}! Waiting 3s for the next snapshot..".format(
                        self.snapshot_count))

        # step 3: calculate calibration parameters
        elif self.snapshot_count >= 8:
            print("[calibration] Calculating calibration parameters")

            # calibrate individual cameras
            SIZEX, SIZEY = StereoVision.CHESSBOARD_SIZE
            world_points_frame = np.zeros((SIZEX*SIZEY, 3), np.float32)
            world_points_frame[:, :2] = np.mgrid[0:SIZEX, 0:SIZEY].T.reshape(-1, 2) * StereoVision.SQUARE_SIZE
            world_points = [world_points_frame] * 8

            for key in self.corners:
                img_points = self.corners[key]
                img_size = self.snapshots[key][0].shape[1::-1]

                camparams = mvutils.calibrate_camera(
                    world_points, img_points, img_size)
                self.camparams[key] = camparams

                pprint(camparams)

            # calibrate camera pairs (pairwise, chained in given order)
            for index_l, index_r in zip(range(0, len(images)-1), range(1, len(images))):
                key_l, key_r = str(index_l), str(index_r)

                camparams_img_l = self.camparams[key_l]
                camparams_img_r = self.camparams[key_r]
                img_size = self.snapshots[key_l][0].shape[1::-1]
                img_size_r = self.snapshots[key_r][0].shape[1::-1]
                if img_size != img_size_r:
                    print("ERROR: images should be of the same size; current sizes:", img_size, img_size_r)

                pairparams = mvutils.calibrate_camera_pair(
                    world_points, self.corners[key_l], self.corners[key_r],
                    camparams_img_l, camparams_img_r, img_size)
                self.pairparams[(key_l, key_r)] = pairparams

                print((key_l, key_r))
                pprint(pairparams)

            # save to disk
            self.save_params()

            self.stage = StereoVision.STAGES["RUNNING"]

        return outputs
Exemplo n.º 11
0
    def process_stereo(self, images: Sequence[Image]):
        outputs = {}

        if len(self.camparams.keys()) != len(images):
            print("Error: number of images doesn't match the number of saved parameters.")
            print("Recalibrate (touch 'c') if old parameters were loaded from the cache.")
            return {str(i): image for (i, image) in enumerate(images)}

        # return undistorted images
        for i, image in enumerate(images):
            key = str(i)
            img = image.get(ImageType.OPENCV)
            camparams = self.camparams[key]

            img_undistort = cv2.undistort(
                img, camparams["intrinsic"], camparams["distortion"],
                None, camparams["intrinsic_crop"])
            #x, y, w, h = camparams["intrinsic_roi"]
            #img_undistort = img_undistort[y:y+h, x:x+w]
            outputs[key] = Image(img_undistort, opencv=True)

        # return depth image(s)
        if not self.stereo_matcher:
            if self.sgbm:
                self.stereo_matcher = cv2.StereoSGBM_create(
                    minDisparity=5, numDisparities=64, blockSize=5,
                    speckleRange=5, speckleWindowSize=15)
            else:
                self.stereo_matcher = cv2.StereoBM_create(
                    numDisparities=64, blockSize=5)
                self.stereo_matcher.setMinDisparity(5)
                self.stereo_matcher.setSpeckleRange(9)
                self.stereo_matcher.setSpeckleWindowSize(21)

        # process camera pairs (pairwise, chained in given order)
        for index_l, index_r in zip(range(0, len(images)-1), range(1, len(images))):
            key_l, key_r = str(index_l), str(index_r)
            pairkey = "{}|{}".format(key_l, key_r)

            img_l = outputs[key_l].get(ImageType.OPENCV)
            img_r = outputs[key_r].get(ImageType.OPENCV)
            pairparams = self.pairparams[(key_l, key_r)]

            img_l_rect = cv2.remap(
                img_l, pairparams["map_x"][0], pairparams["map_y"][0], cv2.INTER_LINEAR)
            img_r_rect = cv2.remap(
                img_r, pairparams["map_x"][1], pairparams["map_y"][1], cv2.INTER_LINEAR)

            img_l_gray = cv2.cvtColor(img_l_rect, cv2.COLOR_BGR2GRAY)
            img_r_gray = cv2.cvtColor(img_r_rect, cv2.COLOR_BGR2GRAY)
            outputs[key_l + "_rect_" + pairkey] = Image(img_l_rect, opencv=True)
            outputs[key_r + "_rect_" + pairkey] = Image(img_r_rect, opencv=True)

            img_depth = self.stereo_matcher.compute(img_l_gray, img_r_gray)

            img_depth_8bit = (img_depth.astype(np.float64) / img_depth.max() * 255).astype(np.uint8)
            outputs["depth_" + pairkey] = Image(img_depth_8bit, opencv=True)

            img_depth_colour = cv2.applyColorMap(img_depth_8bit, cv2.COLORMAP_JET) # _AUTUMN/_JET
            outputs["depth_" + pairkey + "_colour"] = Image(img_depth_colour, opencv=True)

        return outputs
Exemplo n.º 12
0
from images.image import Image
from images.image_type import ImageType

import cv2
from PIL import Image as PILImage
from pprint import pprint


def printcached(img):
    print("Cached results: ")
    pprint({key: img_pil.img[key] is not None for key in img_pil.img.keys()})


img_pil = Image(
    PILImage.fromarray(cv2.imread('../img/object_detection_yolo_mxnet.png')))

printcached(img_pil)
img_pil.asnumpy()
printcached(img_pil)
img_pil.asopencv()
printcached(img_pil)
img_pil.asnumpy()
printcached(img_pil)
img_pil.asopencv()
printcached(img_pil)
img_pil.aspil()
printcached(img_pil)
Exemplo n.º 13
0
    def __init__(self, identifier: int):
        super().__init__(identifier)

        self.image = Image(cv2.imread(identifier), opencv=True)