예제 #1
0
    for zone in black_out_zones:
        x, ex, y, ey = zone
        frame[int(h*y):int(h*ey), int(w*x):int(w*ex), 0:3] = 0

    return frame

# In the format [(x, endx, y, endy), ...]
black_out_zones = [
    (0, 0.65, 0.78, 1), # black out magic hand
    (-0.181, -0.015, 0.343, 0.7552)
] 

with mss.mss() as sct:
    parser = make_parser()
    parser.add_argument(
        "-m",
        "--monitor",
        type=int,
        default=0,
        help="The monitor index to use"
    )
    args = parser.parse_args()
    camera, args = make_camera_with_args(parser=parser, video=Camera.Monitor(sct, mon=args.monitor))
    camera.make_virtual_webcam(prepare=prepare, webcam_res=(1920, 1080))

'''
4 in width, -0.375 in from right
5.5 in height, 4.6 inches from the top
24.25 inches 
13.375
'''
예제 #2
0
    img = raw
    get_ind = lambda i: total_frames - (i + 1) * len(frames) // echos
    for i in range(echos - 1):
        ind = get_ind(i)
        img = cv2.addWeighted(img, 0.7, frames[ind], 0.3, 0)
    final = cv2.addWeighted(img, 0.9, raw, 0.1, 0)
    # final = cv2.cvtColor(final, cv2.COLOR_BGR2RGB)
    return final


parser = make_parser()
parser.add_argument("-e",
                    "--echos",
                    type=int,
                    default=2,
                    help="The number of echos")
parser.add_argument("-tf",
                    "--total-frames",
                    type=int,
                    default=100,
                    help="The total number of frames stored for the echos")
camera, args = make_camera_with_args(parser=parser,
                                     log=False,
                                     fps=15,
                                     res=(1280, 720))
total_frames = args.total_frames
echos = args.echos
camera.make_virtual_webcam(prepare=prepare,
                           preprocess=preprocess,
                           frames_stored=args.total_frames)
예제 #3
0
parser.add_argument(
    "-p",
    "--people",
    type=int,
    default=1,
    help="The number of people in the video stream",
)
parser.add_argument(
    "-t",
    "--threshold",
    type=int,
    default=0,
    help="The threshold to run on the image before applying blob detection. If it is left as 0 or default, a slider will be used",
)
camera, args = make_camera_with_args(
    parser=parser, log=True, fps=30, res=(640, 360), cam=1
)
camera.name = "Image"
cv2.namedWindow("Image")
num_of_people = args.people
threshold = args.threshold
if threshold == 0:

    def set_threshold(t):
        global threshold
        threshold = t

    cv2.createTrackbar("threshold", "Image", 0, 255, set_threshold)

# def _default_output_function(self, frame):
#     cv2.imshow(self.name, frame)
예제 #4
0
def process3(frame):
    return np.where(frame > 150, frame * 2, frame)
    # return frame * 2


def process4(frame):
    kernel = np.array([[1, 1, 1], [1, -8, 1], [1, 1, 1]])
    output = np.zeros_like(frame)
    for i in range(len(frame) - 3):
        for j in range(len(frame[i]) - 3):
            output[i, j, 0] = np.sum(kernel * frame[i:i + 3, j:j + 3, 0])
            output[i, j, 1] = np.sum(kernel * frame[i:i + 3, j:j + 3, 1])
            output[i, j, 2] = np.sum(kernel * frame[i:i + 3, j:j + 3, 2])
    return output


def prepare(process=None):
    def f(frame):
        frame = cv2.flip(frame, 1)

        if process != None:
            frame = process(frame)

        return frame

    return f


camera, args = make_camera_with_args(cam=1)
camera.stream(prepare=prepare(process4), log=True)
예제 #5
0
    h, w, c = img.shape
    img = img[py:py+ph, px:px+pw]
    return cv2.resize(img, (w, h))


def pos_type(s):
    try:
        a = [float(n) for n in s[1:-1].split(",")]
        assert len(a) == 4
        return tuple(a)
    except:
        raise argparse.ArgumentTypeError("Must format position as (x,y,w,h). Remember no spaces!")


parser = make_parser()
parser.add_argument(
    "-p",
    "--pos",
    type=pos_type,
    default=(0, 0, 1, 1),
    help="The position to use in the format: (x,y,w,h)"
)

camera, args = make_camera_with_args(parser=parser)
pos = args.pos
w, h = camera.get_res()
pix_pos = (int(pos[0] * w), int(pos[1] * h), int(pos[2] * w), int(pos[3] * h))
px, py, pw, ph = pix_pos
webcam_res = (pix_pos[2], pix_pos[3])
camera.make_virtual_webcam(prepare=prepare, preprocess=preprocess, frames_stored=1, webcam_res=webcam_res)
예제 #6
0
    for i in range(len(faces)):
        if i == max_ind:
            continue
        metric = confidence[i]
        if metric > max_metric:
            max_metric = metric
            max_ind_2 = i

    for i in [max_ind, max_ind_2]:
        if i < 0:
            continue
        x, y, w, h = faces[i]
        noise = (1 - r) * noise + r * approach
        count += 1
        if count >= stability:
            count = 0
            approach = tf.random.normal([1, 100])
        gen_img = colorifier(generator(noise, training=False), training=False)
        gen_img = normalize(gen_img, input_range=(-1, 1), output_range=(0, 255))
        gen_img = np.reshape(gen_img, (28,28,3))
        gen_img = cv2.resize(gen_img, (w, h))
        weights = cv2.resize(gaussian_tensor, (w, h))
        img[y: y + h, x: x + w] = gen_img * weights + (1 - weights) * img[y: y + h, x: x + w]
    return img

camera, args = make_camera_with_args(log=False)

camera.make_virtual_webcam(
    preprocess=preprocess,
    prepare=None
)