def main():
    video_capture = WebcamVideoStream(src=0, width=480, height=360).start()
    fps = FPS().start()

    detection_graph = model_load_into_memory()

    thread1 = ServerHandlerPacket("Thread-1-ServerHandlerPacket")
    thread1.daemon = True
    thread1.start()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:
                # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                t = time.time()
                output = detect_objects(frame, sess, detection_graph)
                cv2.imshow('Video', output)
                fps.update()
                print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            video_capture.stop()
            fps.stop()
            print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
            print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

            cv2.destroyAllWindows()
Ejemplo n.º 2
0
def main():
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    servo = Servo()

    camera = PiCamera(resolution='400x300')
    sleep(5)
    frame = PiRGBArray(camera)
    camera.capture(frame, format="bgr")
    frame = frame.array
    camera.close()

    yolo = YOLO_tiny_tf.YOLO_TF()

    vs = WebcamVideoStream('448x448').start()

    """prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
    servo.servo_control_up_down(20 * (prev[1]-150) / 150)
    servo.servo_control_left_right(-30 * (prev[0]-200) / 200)
    """
    count = 0
    frame_without = 0

    while True:
        frame = vs.read()
        yolo.detect_from_cvmat(frame)
        #frame = imutils.resize(frame, width=400)
        result_box = yolo.result
        #timer = cv2.getTickCount()

        print(count)
        center = [0,0]
        person = 0
        for result in result_box:
            bbox = (result[1], result[2], result[3], result[4])
            print(result[0], bbox)

            #fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

            # Display FPS on frame
            #cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);

            if result[0] == 'person':
                person += 1
                center[0] += bbox[0]
                center[1] += bbox[1]

        if person == 0:
            frame_without += 1
            if frame_without == 5:
                servo.servo_reset()
                frame_without = 0
        #else:
        if person > 0:
            frame_without = 0
            prev = (center[0]/person, center[1]/person)
            print(prev)
            servo.servo_control_up_down(20 * (prev[1]-224) / 448)
            servo.servo_control_left_right(-30 * (prev[0]-224) / 448)
        count += 1
Ejemplo n.º 3
0
def main():
    args = argument_parser()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    detection_graph = model_load_into_memory()

    # Thread starting in background
    http_thread = ObjectDetectionThread("HTTP Publisher Thread")
    http_thread.daemon = True
    http_thread.start()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:
                # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                output = detect_objects(frame, sess, detection_graph)
                TotalPeople.img = cv2.imencode('.jpeg', output)
                cv2.imshow('Video', output)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            # Ending resources
            video_capture.stop()
            http_thread.stop()
            cv2.destroyAllWindows()
Ejemplo n.º 4
0
def run():
    cap = WebcamVideoStream(src=0, width=1920, height=1080).start()
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
    parameters = aruco.DetectorParameters_create()
    while True:
        frame = cap.read()
        fh, fw, _ = frame.shape

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        # res = cv2.resize(frame, (320, 180))
        # retval, buffer = cv2.imencode('.jpg', res)
        # cv2.imshow('res',res)
        sio.emit('img', 'data:image/jpeg;base64,' +
                 len(corners))  # + base64.b64encode(buffer))
        # sio.emit('img', "asdsadad")
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        # 	break

    cv2.destroyAllWindows()
Ejemplo n.º 5
0
    pool = Pool(args.num_workers, worker,
                (input_q, output_q, net, args.min_confidence))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # Define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*args.codec)
    out = cv.VideoWriter(args.save, fourcc, args.fps,
                         (args.width, args.height))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_frame = output_q.get()
        out.write(output_frame)
        cv.imshow('Video', output_frame)
        fps.update()
        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
        if cv.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    pool.terminate()
Ejemplo n.º 6
0
def PolygonArea(c):
    c = c[0]
    c = [(c[0][0], c[0][1]), (c[1][0], c[1][1]), (c[2][0], c[2][1]),
         (c[3][0], c[3][1])]
    n = len(c)
    area = 0.0
    for i in range(n):
        j = (i + 1) % n
        area += c[i][0] * c[j][1]
        area -= c[j][0] * c[i][1]
    area = abs(area) / 2.0
    return area


while True:
    frame = cap.read()

    frame = frame[0:720, 0:1280]
    frame = cv2.rotate(frame, rotateCode=0)

    fh, fw, _ = frame.shape
    cx, cy = int(fw / 2), int(fh / 2)

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    corners, ids, rejectedImgPoints = aruco.detectMarkers(
        gray, aruco_dict, parameters=parameters)
    aruco.drawDetectedMarkers(frame, corners, ids)

    i = 0
    objs = []
    for corner in corners:
Ejemplo n.º 7
0
def main():
    # Load the AdaIN model
    ada_in = AdaINference(args.checkpoint, args.vgg_path, device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size, args.scale, args.alpha, args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1]+img_shape[0],img_shape[0]) # Make room for the style img
        else:
            out_shape = (img_shape[1],img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps, out_shape)
    
    fps = FPS().start() # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while(True):
        ret, frame = cap.read()

        if ret is True:       
            frame_resize = cv2.resize(frame, None, fx=style_window.scale, fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
                frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            count += 1
            print("Frame:",count,"Orig shape:",frame.shape,"New shape",frame_resize.shape)

            content_rgb = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0], content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = ada_in.predict(content_rgb, style_rgb, style_window.alpha)
            else:
                interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
                stylized_rgb = ada_in.predict_interpolate(content_rgb, 
                                                          style_window.style_rgbs,
                                                          interp_weights,
                                                          style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])
            
            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)
                
            if args.video_out is not None:
                stylized_bgr = cv2.resize(stylized_bgr, out_shape) # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('AdaIN Style', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10) 
            if key & 0xFF == ord('r'):   # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:     # Load a a second style if interpolating
                    style_window.set_style(random=True, style_idx=1, window='style2')    
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print("Switching to keep_colors",keep_colors)
            elif key & 0xFF == ord('q'): # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()
    
    if args.video_out is not None:
        video_writer.release()
    
    cv2.destroyAllWindows()
Ejemplo n.º 8
0
def main():
    servo = Servo()

    camera = PiCamera(resolution='400x300')
    #bbox = (287, 23, 86, 320)
    sleep(5)
    frame = PiRGBArray(camera)
    camera.capture(frame, format="bgr")
    frame = frame.array
    camera.close()

    bbox = cv2.selectROI('ROI', frame, False, False)
    print("return", bbox)
    cv2.destroyAllWindows()

    ok = tracker.init(frame, bbox)

    vs = WebcamVideoStream('400x300').start()

    prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
    servo.servo_control_up_down(20 * (prev[1] - 150) / 150)
    servo.servo_control_left_right(-30 * (prev[0] - 200) / 200)

    count = 0

    while True:
        frame = vs.read()
        #frame = imutils.resize(frame, width=400)

        timer = cv2.getTickCount()

        ok, bbox = tracker.update(frame)
        bbox = (bbox[0], bbox[1], bbox[2], bbox[3])
        print(ok, bbox)

        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            servo.servo_reset()
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        #https://github.com/opencv/opencv_contrib/issues/640
        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        if count % 5 == 0:
            prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
            servo.servo_control_up_down(20 * (prev[1] - 150) * 0.5 / 150)
            servo.servo_control_left_right(-30 * (prev[0] - 200) * 0.5 / 200)

        count += 1
        #if count == 100:
        #    return
        cv2.imshow("Tracking", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
Ejemplo n.º 9
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints,
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path,
                    device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size,
                               args.crop_size, args.scale, args.alpha,
                               args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1] + img_shape[0], img_shape[0]
                         )  # Make room for the style img
        else:
            out_shape = (img_shape[1], img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps,
                                       out_shape)

    fps = FPS().start()  # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while (True):
        if args.max_frames > 0 and count > args.max_frames:
            break

        ret, frame = cap.read()

        if ret is True:
            frame_resize = cv2.resize(frame,
                                      None,
                                      fx=style_window.scale,
                                      fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape,
                                                 np.uint8)

            count += 1
            print("Frame:", count, "Orig shape:", frame.shape, "New shape",
                  frame_resize.shape)

            content_rgb = cv2.cvtColor(
                frame_resize,
                cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0],
                                               content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            # For best results style img should be comparable size to content
            # style_rgb = resize_to(style_rgb, min(content_rgb.shape[0], content_rgb.shape[1]))

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = wct_model.predict(content_rgb, style_rgb,
                                                 style_window.alpha)

                if args.passes > 1:
                    for i in range(args.passes - 1):
                        stylized_rgb = wct_model.predict(
                            stylized_rgb, style_rgb, style_window.alpha)
                # stylized_rgb = wct_model.predict_np(content_rgb, style_rgb, style_window.alpha) # Numpy version
            # else: ## TODO Implement interpolation
            #     interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
            #     stylized_rgb = wct_model.predict_interpolate(content_rgb,
            #                                               style_window.style_rgbs,
            #                                               interp_weights,
            #                                               style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(
                    style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])

            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)

            if args.video_out is not None:
                stylized_bgr = cv2.resize(
                    stylized_bgr,
                    out_shape)  # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('WCT Universal Style Transfer', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10)
            if key & 0xFF == ord('r'):  # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:  # Load a a second style if interpolating
                    style_window.set_style(random=True,
                                           style_idx=1,
                                           window='style2')
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print('Switching to keep_colors', keep_colors)
            elif key & 0xFF == ord('s'):
                out_f = "{}.png".format(time.time())
                save_img(out_f, stylized_rgb)
                print('Saved image to', out_f)
            elif key & 0xFF == ord('q'):  # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()

    if args.video_out is not None:
        video_writer.release()

    cv2.destroyAllWindows()
Ejemplo n.º 10
0
                                      width=800,
                                      height=600).start()
    num_frames = 0
    start_time = datetime.datetime.now()

    # init multiprocessing
    input_q = Queue(maxsize=5)
    output_q = Queue(maxsize=5)

    # spin up workers to paralleize detection.
    frame_processed = 0
    num_workers = 3
    pool_hand = Pool(num_workers, worker_hand_pose, (input_q, output_q, frame_processed))

    while True:
        image_np = video_capture.read()
        image_np = cv2.cvtColor(image_np, cv2.COLOR_BGR2RGB)  # opencv reads images by default in BGR format
        handTrak.set_input(image_np)
        handTrak.detect_objects()

        for hand_data in handTrak.cropped_hands:
            hand_image, box, track_score = hand_data
            if hand_image.shape[0] > 0 and hand_image.shape[1] > 0:
                input_q.put(cv2.cvtColor(hand_image, cv2.COLOR_BGR2RGB))
                _, guess, guess_score = output_q.get()
                print guess, guess_score
                # handPose.set_input(hand_image)
                # guess, guess_score = handPose.get_pred()
                draw_box_on_image(box, guess, guess_score, track_score, score_thresh, image_np)

        # Calculate Frames per second (FPS)
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)

    process = Process(target=worker, args=((input_q, output_q)))
    process.daemon = True
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))
Ejemplo n.º 12
0

lowerBound=np.array([10, 200, 100])
upperBound=np.array([22, 255, 255])

kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))

servo = Servo()
vs = WebcamVideoStream('400x300').start()
count = 0

bbox = [0, 0, 0, 0]

while True:
    img = vs.read()
    #frame = imutils.resize(frame, width=400)
    frame= cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    timer = cv2.getTickCount()

    mask = cv2.inRange(frame, lowerBound, upperBound)
    mask = cv2.erode(mask, None, iterations=5)
    mask = cv2.dilate(mask, None, iterations=2)

    cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)[-2]

    if len(cnts) > 0:
        cv2.drawContours(frame, cnts, -1, (255,0,0), 3)
        bbox = cv2.boundingRect(cnts[0])
Ejemplo n.º 13
0
import numpy as np
import cv2
from utils import WebcamVideoStream
from rift import PyRift


if __name__ == '__main__':
    left = WebcamVideoStream(src=0).start()
    right = WebcamVideoStream(src=1).start()

    cv2.namedWindow('view', cv2.WND_PROP_FULLSCREEN)

    while True:

        left_frame = left.read()
        right_frame = right.read()

        frame = np.concatenate((left_frame, right_frame), axis=1)
        frame = cv2.resize(frame, (1920, 1080), interpolation=cv2.INTER_NEAREST)

        # Display the resulting frame
        cv2.imshow('view', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            left.stop()
            right.stop()
            break

    # When everything done, release the capture
    cv2.destroyAllWindows()