Beispiel #1
0
def detect_lines(img: Any, debug: bool = True) -> Any:
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    lsd = cv2.createLineSegmentDetector(0)
    lines = lsd.detect(img)[0]

    if debug:
        drawn_img = lsd.drawSegments(img, lines)
        show_image(drawn_img, wait=True)

    return lines
Beispiel #2
0
    def uahsd2():
        # file = "360_F_108702068_Z9VGab1DfiyPzq2v5Xgm2wRljttzRGgq.jpg"
        file = "NdNLO.jpg"

        from pathlib import Path

        img = cv2.imread(str(Path.home() / "Pictures" / file))
        img = to_gray(img)
        ret, img = cv2.threshold(img, 127, 255, 0)
        skel = skeletonise(img)
        show_image(skel, wait=True)
        cv2.destroyAllWindows()
Beispiel #3
0
    def main(method: ComArchEnum = ComArchEnum.pubsub,
             topic: Optional[str] = None) -> None:
        """

        topic: "" is all

        :param method:
        :param topic:
        """
        if topic is None:
            topic = ""  # "" is all

        with zmq.Context() as context:
            with context.socket(method.value["dst"].value) as zmq_socket:
                if method is ComArchEnum.pubsub:
                    zmq_socket.subscribe(topic)
                    # zmq_socket.setsockopt(zmq.ZMQ_RCVHWM, 1)

                zmq_socket.setsockopt(zmq.CONFLATE, 1)
                zmq_socket.setsockopt(zmq.LINGER, 0)

                # zmq_socket.connect(SOCKET_ADDRESS2)
                zmq_socket.bind(SOCKET_ADDRESS2)

                while True:
                    frame = zmq_socket.recv_pyobj(
                        # flags=zmq.NOBLOCK
                    )
                    if show_image(frame):
                        break
Beispiel #4
0
    def uhasd():
        file = (
            "white-paper-table-13912022.jpg",
            "2.jpg",
            "NdNLO.jpg",
            "sudoku.jpg",
            "sudoku2.jpg",
            "sudoku3.jpg",
        )
        for f in file:
            img = cv2.imread(str(Path.home() / "Pictures" / f))

            lines = hough_lines(img, debug=True)
            for line in lines:  # Draw lines on the image
                x1, y1, x2, y2 = line[0]
                cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3)

            if show_image(cv2.resize(img, dsize=(600, 600)), wait=True):
                break

        cv2.destroyAllWindows()
Beispiel #5
0
if __name__ == "__main__":
    # file = "360_F_108702068_Z9VGab1DfiyPzq2v5Xgm2wRljttzRGgq.jpg"
    # file = "istockphoto-529081402-170667a.jpg"
    # file = "white-paper-table-13912022.jpg"
    # file = "2.jpg"
    file = "3.jpg"
    # file = "NdNLO.jpg"
    image = cv2.imread(str(Path.home() / "Pictures" / file))
    gray = to_gray(image)

    blur = noise_filter(gray,
                        method=NoiseFilterMethodEnum.median_blur,
                        ksize=5)

    adapt_type = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
    thresh_type = cv2.THRESH_BINARY_INV
    bin_img = cv2.adaptiveThreshold(blur, 255, adapt_type, thresh_type, 11, 2)
    if False:
        show_image(bin_img, wait=True)

    lines = cv2.HoughLines(bin_img, rho=2, theta=numpy.pi / 180,
                           threshold=350)  # Detect lines
    """
  if False:
      img_with_all_lines = numpy.copy(img)
      draw_lines(img_with_all_lines, lines)
      show_image(img_with_all_lines, wait=True)
  """
    centroids = find_intersections(lines)
    show_image(draw_markers(image.copy(), centroids), wait=True)
Beispiel #6
0
    def main(model_name: str = "maskrcnn_pennfudanped", score_threshold=0.55):
        """

        Args:
          model_name:
          score_threshold:
        """
        base_path = PROJECT_APP_PATH.user_data / "maskrcnn"
        dataset_root = Path.home() / "Data"

        seed_stack(3825)

        dataset = (
            PennFudanDataset  # (dataset_root / "PennFudanPed", SplitEnum.training)
        )
        categories = dataset.categories

        if True:
            model = load_model(model_name=model_name,
                               model_directory=base_path / "models")
        else:
            model = get_pretrained_instance_segmentation_maskrcnn(
                dataset.response_channels)

        model.to(global_torch_device())
        cpu_device = torch.device("cpu")

        with torch.no_grad():
            with TorchEvalSession(model):
                for image in tqdm(
                        to_tensor_generator(
                            frame_generator(cv2.VideoCapture(0)),
                            device=global_torch_device(),
                        )):
                    prediction = model(
                        # torch_vision_normalize_batch_nchw(
                        uint_hwc_to_chw_float_tensor(image).unsqueeze(0)
                        #    )
                    )[0]

                    (boxes, labels, scores) = (
                        prediction["boxes"].to(cpu_device).numpy(),
                        prediction["labels"].to(cpu_device).numpy(),
                        torch.sigmoid(
                            prediction["scores"]).to(cpu_device).numpy(),
                    )

                    indices = scores > score_threshold

                    if show_image(
                            draw_bounding_boxes(
                                quick_to_pil_image(image),
                                boxes[indices],
                                labels=labels[indices],
                                scores=scores[indices],
                                categories=categories,
                            ),
                            model_name,
                            wait=True,
                    ):
                        break  # esc to quit
Beispiel #7
0
def run_webcam_demo(
    cfg: NOD,
    categories: Sequence[str],
    model_checkpoint: Path,
    score_threshold: float = 0.5,
    window_name: str = "SSD",
):
    """

    :param categories:
    :type categories:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param score_threshold:
    :type score_threshold:
    :param window_name:
    :type window_name:
    :return:
    :rtype:"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(cfg.input.image_size,
                              cfg.input.pixel_mean,
                              split=SplitEnum.testing)
    model = SingleShotDetection(cfg)

    checkpointer = CheckPointer(model,
                                save_dir=ensure_existence(
                                    PROJECT_APP_PATH.user_data / "results"))
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for infos in tqdm(DictUnityEnvironment(connect_to_running=True)):
            info = next(iter(infos.values()))
            new_images = extract_all_cameras(info)
            image = next(iter(new_images.values()))[..., :3][..., ::-1]
            image = gamma_correct_float_to_byte(image)
            result = model(
                transforms(image)[0].unsqueeze(0).to(global_torch_device()))[0]
            height, width, *_ = image.shape

            result["boxes"][:, 0::2] *= width / result["img_width"]
            result["boxes"][:, 1::2] *= height / result["img_height"]
            (boxes, labels, scores) = (
                result["boxes"].to(cpu_device).numpy(),
                result["labels"].to(cpu_device).numpy(),
                result["scores"].to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            if show_image(
                    draw_bounding_boxes(
                        image,
                        boxes[indices],
                        labels=labels[indices],
                        scores=scores[indices],
                        categories=categories,
                    ).astype(numpy.uint8),
                    window_name,
                    wait=1,
            ):
                break  # esc to quit
Beispiel #8
0
           Created on 03/02/2022
           """

__all__ = []

from pathlib import Path

import cv2

# Load image, convert to grayscale, and find edges
from draugr.opencv_utilities import show_image

image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY)[1]

# Find contour and sort by contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

ROI = None
# Find bounding box and extract ROI
for c in cnts:
    x, y, w, h = cv2.boundingRect(c)
    ROI = image[y:y + h, x:x + w]
    break

show_image(ROI, wait=True, save_path=Path("ROI.png"))
Beispiel #9
0
def run_webcam_demo(
    cfg: NOD,
    input_cfg: NOD,
    categories: List,
    model_checkpoint: Path,
    score_threshold: float = 0.7,
    window_name: str = "SSD",
):
    """

    :param categories:
    :type categories:
    :param cfg:
    :type cfg:
    :param model_checkpoint:
    :type model_checkpoint:
    :param score_threshold:
    :type score_threshold:
    :param window_name:
    :type window_name:
    :return:
    :rtype:"""

    cpu_device = torch.device("cpu")
    transforms = SSDTransform(
        input_cfg.image_size, input_cfg.pixel_mean, split=SplitEnum.testing
    )
    model = SingleShotDetectionNms(cfg)

    checkpointer = CheckPointer(
        model, save_dir=ensure_existence(PROJECT_APP_PATH.user_data / "results")
    )
    checkpointer.load(model_checkpoint, use_latest=model_checkpoint is None)
    print(
        f"Loaded weights from {model_checkpoint if model_checkpoint else checkpointer.get_checkpoint_file()}"
    )

    model.post_init()
    model.to(global_torch_device())

    with TorchEvalSession(model):
        for image in tqdm(frame_generator(cv2.VideoCapture(0))):
            result = model(transforms(image)[0].unsqueeze(0).to(global_torch_device()))
            height, width, *_ = image.shape

            result.boxes[:, 0::2] *= width / result.img_width.cpu().item()
            result.boxes[:, 1::2] *= height / result.img_height.cpu().item()
            (boxes, labels, scores) = (
                result.boxes.to(cpu_device).numpy(),
                result.labels.to(cpu_device).numpy(),
                result.scores.to(cpu_device).numpy(),
            )

            indices = scores > score_threshold

            if show_image(
                draw_bounding_boxes(
                    image,
                    boxes[indices],
                    labels=labels[indices],
                    scores=scores[indices],
                    categories=categories,
                    score_font=ImageFont.truetype(
                        PACKAGE_DATA_PATH / "Lato-Regular.ttf",
                        24,
                    ),
                ).astype(numpy.uint8),
                window_name,
                wait=1,
            ):
                break  # esc to quit
Beispiel #10
0
def hough_lines(
    img,
    kernel_size=11,
    sigma=1.4,  # 0
    aperture_size=3,
    rho=1,
    theta=numpy.pi / 180,
    min_votes=99,
    lines=100,
    min_line_length=10,
    max_line_gap=250,
    debug: bool = False,
) -> Any:
    gray = to_gray(img)

    if True:  # remove noise
        # gray = cv2.medianBlur(gray, kernel_size)
        gray = cv2.GaussianBlur(gray, (kernel_size, kernel_size), sigma)

    if False:
        if False:
            edges = cv2.Canny(gray,
                              threshold1=50,
                              threshold2=200,
                              apertureSize=aperture_size)
        else:
            adapt_type = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
            thresh_type = cv2.THRESH_BINARY_INV
            edges = cv2.adaptiveThreshold(gray, 255, adapt_type, thresh_type,
                                          11, 2)
    else:
        laplacian = cv2.Laplacian(
            gray, cv2.CV_8UC1,
            ksize=3)  # ,cv2.CV_16UC1, #cv2.CV_16S, # cv2.CV_64F
        # blurryness = resLap.var()
        # sobelx = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=5)
        # sobely = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=5)

        edges = cv2.threshold(
            laplacian,
            0,
            255,
            ThresholdTypeFlag.otsu.value +
            ThresholdTypeFlag.binary.value,  # ThresholdTypeFlag.to_zero.value
        )[1]

    if True:
        lines = cv2.HoughLinesP(
            edges,
            rho=rho,
            theta=theta,
            threshold=min_votes,
            minLineLength=min_line_length,
            maxLineGap=max_line_gap,
        )
    else:
        lines = None

    if debug:
        show_image(gray)
        # show_image(laplacian)
        show_image(edges, wait=True)
        if False:
            for line in lines:  # Draw lines on the image
                x1, y1, x2, y2 = line[0]
                cv2.line(img, (x1, y1), (x2, y2), (255, 0, 0), 3)

    return lines
Beispiel #11
0
# the facial landmark predictor
from draugr.opencv_utilities import AsyncVideoStream, show_image
from draugr.opencv_utilities.dlib_utilities import shape_to_ndarray

p = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(p)

cap = cv2.VideoCapture(0)
upsample_num_times = 0

for image in AsyncVideoStream():
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    for (i, rect) in enumerate(detector(gray, upsample_num_times)):
        # determine the facial landmarks for the face region, then
        # convert the facial landmark (x, y)-coordinates to a NumPy
        # array
        # loop over the (x, y)-coordinates for the facial landmarks
        # and draw them on the image
        for (x, y) in shape_to_ndarray(predictor(gray, rect)):
            cv2.circle(image, (x, y), 2, (0, 255, 0), -1)

    if show_image(
            image, wait=5, char="q"
    ):  # show the output image with the face detections + facial landmarks
        break

cv2.destroyAllWindows()
cap.release()
Beispiel #12
0
    In some cases, we may need all the points which comprises that object. It can be done as follows:


      :param cnt:
      :type cnt:
      :return:
      :rtype:
    """
    mask = numpy.zeros(im_shape, numpy.uint8)
    cv2.drawContours(mask, [cnt], 0, 255, -1)
    return numpy.transpose(numpy.nonzero(mask))


def convexity_defects(cnt: numpy.ndarray) -> numpy.ndarray:
    """
    Notice that "returnPoints = False" in first line to get indices of the contour points, because input to convexityDefects() should be these indices, not original points.

    It returns a defects structure, an array of four values - [ start point, end point, farthest point, approximate distance to farthest point ]
    """
    return cv2.convexityDefects(cnt, cv2.convexHull(cnt, returnPoints=False))


if __name__ == "__main__":
    from neodroidvision.utilities.misc.perlin import generate_perlin_noise
    from draugr.opencv_utilities import threshold_channel, show_image

    a = generate_perlin_noise() * 255
    a = numpy.uint8(a)
    t = threshold_channel(a)
    show_image(t, wait=True)
Beispiel #13
0
from draugr.opencv_utilities import show_image

from neodroidvision.utilities.misc.perlin import generate_perlin_noise

show_image(generate_perlin_noise((100, 100)), wait=True)