def main():
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    servo = Servo()

    camera = PiCamera(resolution='400x300')
    sleep(5)
    frame = PiRGBArray(camera)
    camera.capture(frame, format="bgr")
    frame = frame.array
    camera.close()

    yolo = YOLO_tiny_tf.YOLO_TF()

    vs = WebcamVideoStream('448x448').start()

    """prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
    servo.servo_control_up_down(20 * (prev[1]-150) / 150)
    servo.servo_control_left_right(-30 * (prev[0]-200) / 200)
    """
    count = 0
    frame_without = 0

    while True:
        frame = vs.read()
        yolo.detect_from_cvmat(frame)
        #frame = imutils.resize(frame, width=400)
        result_box = yolo.result
        #timer = cv2.getTickCount()

        print(count)
        center = [0,0]
        person = 0
        for result in result_box:
            bbox = (result[1], result[2], result[3], result[4])
            print(result[0], bbox)

            #fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);

            # Display FPS on frame
            #cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);

            if result[0] == 'person':
                person += 1
                center[0] += bbox[0]
                center[1] += bbox[1]

        if person == 0:
            frame_without += 1
            if frame_without == 5:
                servo.servo_reset()
                frame_without = 0
        #else:
        if person > 0:
            frame_without = 0
            prev = (center[0]/person, center[1]/person)
            print(prev)
            servo.servo_control_up_down(20 * (prev[1]-224) / 448)
            servo.servo_control_left_right(-30 * (prev[0]-224) / 448)
        count += 1
def main():
    video_capture = WebcamVideoStream(src=0, width=480, height=360).start()
    fps = FPS().start()

    detection_graph = model_load_into_memory()

    thread1 = ServerHandlerPacket("Thread-1-ServerHandlerPacket")
    thread1.daemon = True
    thread1.start()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:
                # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                t = time.time()
                output = detect_objects(frame, sess, detection_graph)
                cv2.imshow('Video', output)
                fps.update()
                print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            video_capture.stop()
            fps.stop()
            print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
            print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

            cv2.destroyAllWindows()
Beispiel #3
0
def main():
    args = argument_parser()

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    detection_graph = model_load_into_memory()

    # Thread starting in background
    http_thread = ObjectDetectionThread("HTTP Publisher Thread")
    http_thread.daemon = True
    http_thread.start()

    with detection_graph.as_default():
        with tf.Session(graph=detection_graph) as sess:
            while True:
                # Camera detection loop
                frame = video_capture.read()
                cv2.imshow('Entrada', frame)
                output = detect_objects(frame, sess, detection_graph)
                TotalPeople.img = cv2.imencode('.jpeg', output)
                cv2.imshow('Video', output)

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            # Ending resources
            video_capture.stop()
            http_thread.stop()
            cv2.destroyAllWindows()
Beispiel #4
0
def run():
    cap = WebcamVideoStream(src=0, width=1920, height=1080).start()
    aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
    parameters = aruco.DetectorParameters_create()
    while True:
        frame = cap.read()
        fh, fw, _ = frame.shape

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = aruco.detectMarkers(
            gray, aruco_dict, parameters=parameters)
        # res = cv2.resize(frame, (320, 180))
        # retval, buffer = cv2.imencode('.jpg', res)
        # cv2.imshow('res',res)
        sio.emit('img', 'data:image/jpeg;base64,' +
                 len(corners))  # + base64.b64encode(buffer))
        # sio.emit('img', "asdsadad")
        # if cv2.waitKey(0) & 0xFF == ord('q'):
        # 	break

    cv2.destroyAllWindows()
Beispiel #5
0
    # cfg = 'models/tiny-yolo/tiny-yolo-obj.cfg'
    # model = 'models/tiny-yolo/tiny-yolo-face_13000.weights'

    cfg = 'models/yolo/yolo-obj.cfg'
    model = 'models/yolo/yolo-face_1400.weights'
    names = 'models/yolo/obj.names'
    net = cv.dnn.readNetFromDarknet(cfg, model)
    if net.empty():
        exit(1)

    pool = Pool(args.num_workers, worker,
                (input_q, output_q, net, args.min_confidence))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()

    # Define the codec and create VideoWriter object
    fourcc = cv.VideoWriter_fourcc(*args.codec)
    out = cv.VideoWriter(args.save, fourcc, args.fps,
                         (args.width, args.height))

    fps = FPS().start()
    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)
        t = time.time()
        output_frame = output_q.get()
        out.write(output_frame)
        cv.imshow('Video', output_frame)
        help=
        "don't show facial features as hand drawn images from the quick-draw dataset",
        action="store_true")
    parser.add_argument(
        "--showpose",
        help=
        "show the pose as a line, along with the points used to compute the pose",
        action="store_true")
    settings = vars(parser.parse_args())
    settings['scale_frame'] = 4
    settings['height'] = 720
    settings['width'] = 1280

    # Get a reference to webcam #0 (the default one). This could be made a command line option.
    video_capture = WebcamVideoStream(src=0,
                                      width=settings['width'],
                                      height=settings['height']).start()

    # Setup some rendering related things
    canvas = None
    quickdraw = QuickDraw()
    sketch_images = {}
    line_color = (156, 156, 156)
    sprites = []
    last_added_sprite = time.time()

    # Track fps
    fps = FPS().start()

    # Loop until the user hits 'q' to quit
    while True:
Beispiel #7
0
from threading import Thread
import time
import datetime
import base64
import numpy as np
import cv2
import cv2.aruco as aruco
import math
import json
import requests
from utils import WebcamVideoStream

cap = WebcamVideoStream(src='http://192.168.101.73/', width=1280,
                        height=720).start()
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
parameters = aruco.DetectorParameters_create()


def PolygonArea(c):
    c = c[0]
    c = [(c[0][0], c[0][1]), (c[1][0], c[1][1]), (c[2][0], c[2][1]),
         (c[3][0], c[3][1])]
    n = len(c)
    area = 0.0
    for i in range(n):
        j = (i + 1) % n
        area += c[i][0] * c[j][1]
        area -= c[j][0] * c[i][1]
    area = abs(area) / 2.0
    return area
Beispiel #8
0
from threading import Thread
import time
import datetime
import base64
import numpy as np
import cv2
import cv2.aruco as aruco
import math
import json
import requests
from utils import WebcamVideoStream
from matplotlib import pyplot as plt

cap = WebcamVideoStream(src=1, width=2560, height=720).start()

while True:
    frame = cap.read()
    fh, fw, _ = frame.shape

    a = frame[0:720, 0:1280]
    afh, afw, _ = a.shape
    b = frame[0:720, 1280:2560]
    bfh, bfw, _ = b.shape

    # frame = cv2.resize(frame, (int(fw/2), int(fh/2)))
    a = cv2.resize(a, (int(afw / 2), int(afh / 2)))
    b = cv2.resize(b, (int(bfw / 2), int(bfh / 2)))
    cv2.imshow('a', a)
    cv2.imshow('b', b)
    # cv2.imshow('frame',frame)
    a = cv2.cvtColor(a, cv2.COLOR_BGR2GRAY)
Beispiel #9
0
def main():
    # Load the AdaIN model
    ada_in = AdaINference(args.checkpoint, args.vgg_path, device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size, args.scale, args.alpha, args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1]+img_shape[0],img_shape[0]) # Make room for the style img
        else:
            out_shape = (img_shape[1],img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps, out_shape)
    
    fps = FPS().start() # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while(True):
        ret, frame = cap.read()

        if ret is True:       
            frame_resize = cv2.resize(frame, None, fx=style_window.scale, fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
                frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            count += 1
            print("Frame:",count,"Orig shape:",frame.shape,"New shape",frame_resize.shape)

            content_rgb = cv2.cvtColor(frame_resize, cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0], content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = ada_in.predict(content_rgb, style_rgb, style_window.alpha)
            else:
                interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
                stylized_rgb = ada_in.predict_interpolate(content_rgb, 
                                                          style_window.style_rgbs,
                                                          interp_weights,
                                                          style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])
            
            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)
                
            if args.video_out is not None:
                stylized_bgr = cv2.resize(stylized_bgr, out_shape) # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('AdaIN Style', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10) 
            if key & 0xFF == ord('r'):   # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:     # Load a a second style if interpolating
                    style_window.set_style(random=True, style_idx=1, window='style2')    
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print("Switching to keep_colors",keep_colors)
            elif key & 0xFF == ord('q'): # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()
    
    if args.video_out is not None:
        video_writer.release()
    
    cv2.destroyAllWindows()
import time
import argparse

from utils import FPS, WebcamVideoStream
CWD_PATH = os.getcwd()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-o', '--output', dest='output_name', type=str,
                        default="output_recorded.avi", help='name of output file.')
    args = parser.parse_args()


    video_capture = WebcamVideoStream(src=0,
                                      width=480,
                                      height=360).start()
    fps = FPS().start()

    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(args.output_name,fourcc, 20.0, (352, 288))
    while True:  # fps._numFrames < 120
        frame = video_capture.read()

        t = time.time()
        print('video output activates')
        cv2.imshow('Video', frame)
        out.write(frame)
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))
Beispiel #11
0
def main():
    servo = Servo()

    camera = PiCamera(resolution='400x300')
    #bbox = (287, 23, 86, 320)
    sleep(5)
    frame = PiRGBArray(camera)
    camera.capture(frame, format="bgr")
    frame = frame.array
    camera.close()

    bbox = cv2.selectROI('ROI', frame, False, False)
    print("return", bbox)
    cv2.destroyAllWindows()

    ok = tracker.init(frame, bbox)

    vs = WebcamVideoStream('400x300').start()

    prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
    servo.servo_control_up_down(20 * (prev[1] - 150) / 150)
    servo.servo_control_left_right(-30 * (prev[0] - 200) / 200)

    count = 0

    while True:
        frame = vs.read()
        #frame = imutils.resize(frame, width=400)

        timer = cv2.getTickCount()

        ok, bbox = tracker.update(frame)
        bbox = (bbox[0], bbox[1], bbox[2], bbox[3])
        print(ok, bbox)

        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            servo.servo_reset()
            cv2.putText(frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        #https://github.com/opencv/opencv_contrib/issues/640
        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        if count % 5 == 0:
            prev = ((bbox[0] + bbox[2] / 2), (bbox[1] + bbox[3] / 2))
            servo.servo_control_up_down(20 * (prev[1] - 150) * 0.5 / 150)
            servo.servo_control_left_right(-30 * (prev[0] - 200) * 0.5 / 200)

        count += 1
        #if count == 100:
        #    return
        cv2.imshow("Tracking", frame)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
Beispiel #12
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints,
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path,
                    device=args.device)

    # Load a panel to control style settings
    style_window = StyleWindow(args.style_path, args.style_size,
                               args.crop_size, args.scale, args.alpha,
                               args.interpolate)

    # Start the webcam stream
    cap = WebcamVideoStream(args.video_source, args.width, args.height).start()

    _, frame = cap.read()

    # Grab a sample frame to calculate frame size
    frame_resize = cv2.resize(frame, None, fx=args.scale, fy=args.scale)
    img_shape = frame_resize.shape

    # Setup video out writer
    if args.video_out is not None:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if args.concat:
            out_shape = (img_shape[1] + img_shape[0], img_shape[0]
                         )  # Make room for the style img
        else:
            out_shape = (img_shape[1], img_shape[0])
        print('Video Out Shape:', out_shape)
        video_writer = cv2.VideoWriter(args.video_out, fourcc, args.fps,
                                       out_shape)

    fps = FPS().start()  # Track FPS processing speed

    keep_colors = args.keep_colors

    count = 0

    while (True):
        if args.max_frames > 0 and count > args.max_frames:
            break

        ret, frame = cap.read()

        if ret is True:
            frame_resize = cv2.resize(frame,
                                      None,
                                      fx=style_window.scale,
                                      fy=style_window.scale)

            if args.noise:  # Generate textures from noise instead of images
                frame_resize = np.random.randint(0, 256, frame_resize.shape,
                                                 np.uint8)

            count += 1
            print("Frame:", count, "Orig shape:", frame.shape, "New shape",
                  frame_resize.shape)

            content_rgb = cv2.cvtColor(
                frame_resize,
                cv2.COLOR_BGR2RGB)  # OpenCV uses BGR, we need RGB

            if args.random > 0 and count % args.random == 0:
                style_window.set_style(random=True, style_idx=0)

            if keep_colors:
                style_rgb = preserve_colors_np(style_window.style_rgbs[0],
                                               content_rgb)
            else:
                style_rgb = style_window.style_rgbs[0]

            # For best results style img should be comparable size to content
            # style_rgb = resize_to(style_rgb, min(content_rgb.shape[0], content_rgb.shape[1]))

            if args.interpolate is False:
                # Run the frame through the style network
                stylized_rgb = wct_model.predict(content_rgb, style_rgb,
                                                 style_window.alpha)

                if args.passes > 1:
                    for i in range(args.passes - 1):
                        stylized_rgb = wct_model.predict(
                            stylized_rgb, style_rgb, style_window.alpha)
                # stylized_rgb = wct_model.predict_np(content_rgb, style_rgb, style_window.alpha) # Numpy version
            # else: ## TODO Implement interpolation
            #     interp_weights = [style_window.interp_weight, 1 - style_window.interp_weight]
            #     stylized_rgb = wct_model.predict_interpolate(content_rgb,
            #                                               style_window.style_rgbs,
            #                                               interp_weights,
            #                                               style_window.alpha)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat and args.interpolate is False:
                # Resize style img to same height as frame
                style_rgb_resized = cv2.resize(
                    style_rgb, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                stylized_rgb = np.hstack([style_rgb_resized, stylized_rgb])

            stylized_bgr = cv2.cvtColor(stylized_rgb, cv2.COLOR_RGB2BGR)

            if args.video_out is not None:
                stylized_bgr = cv2.resize(
                    stylized_bgr,
                    out_shape)  # Make sure frame matches video size
                video_writer.write(stylized_bgr)

            cv2.imshow('WCT Universal Style Transfer', stylized_bgr)

            fps.update()

            key = cv2.waitKey(10)
            if key & 0xFF == ord('r'):  # Load new random style
                style_window.set_style(random=True, style_idx=0)
                if args.interpolate:  # Load a a second style if interpolating
                    style_window.set_style(random=True,
                                           style_idx=1,
                                           window='style2')
            elif key & 0xFF == ord('c'):
                keep_colors = not keep_colors
                print('Switching to keep_colors', keep_colors)
            elif key & 0xFF == ord('s'):
                out_f = "{}.png".format(time.time())
                save_img(out_f, stylized_rgb)
                print('Saved image to', out_f)
            elif key & 0xFF == ord('q'):  # Quit
                break
        else:
            break

    fps.stop()
    print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
    print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

    cap.stop()

    if args.video_out is not None:
        video_writer.release()

    cv2.destroyAllWindows()
Beispiel #13
0
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'  # suppress tensorflow warnings

if __name__ == '__main__':
    # max number of hands we want to detect/track
    score_thresh = 0.2
    num_hands = 2

    handTrak = HandModelCV(score_thresh=0.6, num_hands_detect=num_hands)
    # handPose = PoseModel(score_thresh=score_thresh, num_hands_detect=num_hands)

    handTrak.load_graph('frozen_models/hand_detect_graph.pb',
                        'frozen_models/hand_detect_graph.pbtxt')
    # handPose.load_graph('/home/testuser/obj_det_git/tf_image_classifier/tf_files/retrained_graph.pb')

    video_capture = WebcamVideoStream(src=0,
                                      width=800,
                                      height=600).start()
    num_frames = 0
    start_time = datetime.datetime.now()

    # init multiprocessing
    input_q = Queue(maxsize=5)
    output_q = Queue(maxsize=5)

    # spin up workers to paralleize detection.
    frame_processed = 0
    num_workers = 3
    pool_hand = Pool(num_workers, worker_hand_pose, (input_q, output_q, frame_processed))

    while True:
        image_np = video_capture.read()
    parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
                        default=5, help='Size of the queue.')
    args = parser.parse_args()

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    input_q = Queue(maxsize=args.queue_size)
    output_q = Queue(maxsize=args.queue_size)

    process = Process(target=worker, args=((input_q, output_q)))
    process.daemon = True
    pool = Pool(args.num_workers, worker, (input_q, output_q))

    video_capture = WebcamVideoStream(src=args.video_source,
                                      width=args.width,
                                      height=args.height).start()
    fps = FPS().start()

    while True:  # fps._numFrames < 120
        frame = video_capture.read()
        input_q.put(frame)

        t = time.time()

        cv2.imshow('Video', output_q.get())
        fps.update()

        print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

        if cv2.waitKey(1) & 0xFF == ord('q'):
from time import sleep
from imutils.video import VideoStream
import imutils
import cv2
from utils import WebcamVideoStream, FPS
from servo_control2 import Servo


lowerBound=np.array([10, 200, 100])
upperBound=np.array([22, 255, 255])

kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))

servo = Servo()
vs = WebcamVideoStream('400x300').start()
count = 0

bbox = [0, 0, 0, 0]

while True:
    img = vs.read()
    #frame = imutils.resize(frame, width=400)
    frame= cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    timer = cv2.getTickCount()

    mask = cv2.inRange(frame, lowerBound, upperBound)
    mask = cv2.erode(mask, None, iterations=5)
    mask = cv2.dilate(mask, None, iterations=2)
Beispiel #16
0
import numpy as np
import cv2
from utils import WebcamVideoStream
from rift import PyRift


if __name__ == '__main__':
    left = WebcamVideoStream(src=0).start()
    right = WebcamVideoStream(src=1).start()

    cv2.namedWindow('view', cv2.WND_PROP_FULLSCREEN)

    while True:

        left_frame = left.read()
        right_frame = right.read()

        frame = np.concatenate((left_frame, right_frame), axis=1)
        frame = cv2.resize(frame, (1920, 1080), interpolation=cv2.INTER_NEAREST)

        # Display the resulting frame
        cv2.imshow('view', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            left.stop()
            right.stop()
            break

    # When everything done, release the capture
    cv2.destroyAllWindows()