import cv2

from video_loop import run_video_capture_pipeline


def color(image):
    # kernel_size = 5
    # image = cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    return image


run_video_capture_pipeline(transform_fn=color)
Exemplo n.º 2
0
import cv2

from video_loop import run_video_capture_pipeline


def canny(image):
    edges = cv2.Canny(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), 50, 150)
    # Transform again to BGR
    image = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
    return image


run_video_capture_pipeline(transform_fn=canny)
Exemplo n.º 3
0
)  # link to model: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2


def blur(image):
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = detector(gray, 0)

    mask = np.zeros(image.shape[:2], np.uint8)
    blurred_image = image.copy()
    for face in faces:  # if there are faces
        (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
        blurred_image[y:y + h, x:x + w, :] = anonymize_face_pixelate(
            blurred_image[y:y + h, x:x + w, :], blocks=10)
        # *** Facial Landmarks detection
        shape = predictor(gray, face)
        shape = face_utils.shape_to_np(shape)
        # Get mask with only face shape
        shape = cv2.convexHull(shape)
        cv2.drawContours(mask, [shape], -1, 255, -1)

        # Replace blurred image only in mask
        mask = mask / 255.0
        mask = np.expand_dims(mask, axis=-1)
        image = (1.0 - mask) * image + mask * blurred_image
        image = image.astype(np.uint8)

    return image


run_video_capture_pipeline(transform_fn=blur)
import cv2

from video_loop import run_video_capture_pipeline

kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
fgbg = cv2.createBackgroundSubtractorMOG2()


def bg_substraction(image):
    image = fgbg.apply(image)
    image = cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)
    # Transform again to BGR
    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
    return image


run_video_capture_pipeline(transform_fn=bg_substraction)
Exemplo n.º 5
0
import cv2
import numpy as np

from video_loop import run_video_capture_pipeline, args

cap = cv2.VideoCapture(args.read_camera)
ret, frame1 = cap.read()
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame1)
hsv[..., 1] = 255
cap.release()


def dense_flow(image):
    global prvs
    next = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5,
                                        1.2, 0)

    mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
    hsv[..., 0] = ang * 180 / np.pi / 2
    hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    prvs = next
    return image


run_video_capture_pipeline(transform_fn=dense_flow)
    # Range for lower red
    lower_red = np.array([0, 120, 70])
    upper_red = np.array([10, 255, 255])
    mask1 = cv2.inRange(hsv, lower_red, upper_red)

    # Range for upper range
    lower_red = np.array([170, 120, 70])
    upper_red = np.array([180, 255, 255])
    mask2 = cv2.inRange(hsv, lower_red, upper_red)

    # Generating the final mask to detect red color
    mask1 = mask1 + mask2

    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
    mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3, 3),
                                                              np.uint8))

    # creating an inverted mask to segment out the cloth from the frame
    mask2 = cv2.bitwise_not(mask1)
    # Segmenting the cloth out of the frame using bitwise and with the inverted mask
    res1 = cv2.bitwise_and(image, image, mask=mask2)

    # creating image showing static background frame pixels only for the masked region
    res2 = cv2.bitwise_and(background, background, mask=mask1)
    # Generating the final output
    final_output = cv2.addWeighted(res1, 1, res2, 1, 0)
    return final_output


run_video_capture_pipeline(transform_fn=invisibility)