def session(cli, add):
    """
    sl , sr : synchronized set speed
    al, ar : asynchronous set speed
    gs: get speed
    gi: get information
    sp: synchronized get picture :response 'ok'
    ap: asynchronized get picrture
    vd: stream video
    sv: stop stream video
    """

    while True:
        command_raw = cli.recv(unpacker.size)
        name, speed = unpacker.unpack(command_raw)
        name = name.decode()
        if name[0] == 's':
            if name[1] == 'p':
                cli.sendall(get_frame())
                print('wait for response')
                res = cli.recv(unpacker.size)
                n, s = unpacker.unpack(res)
                if not n.decode() == 'ok':
                    print('send image failed')

            if name[1] == 'l':
                left_motor.change_speed(speed)
                cli.sendall(single_packer.pack(left_motor.get_speed()))
            elif name[1] == 'r':
                right_motor.change_speed(speed)
                cli.sendall(single_packer.pack(right_motor.get_speed()))
        elif name[0] == 'a':
            if name[1] == 'p':
                fb = get_frame()
                cli.sendall(fb)
            if name[1] == 'l':
                left_motor.change_speed(speed)
            elif name[1] == 'r':
                right_motor.change_speed(speed)
        elif name[0] == 'g':
            if name[1] == 's':
                cli.sendall(double_packer.pack(left_motor.get_speed(), right_motor.get_speed()))
            elif name[1] == 'i':
                fbs, speeds = get_packed_info()
                cli.sendall(fbs)
                cli.sendall(speeds)
        elif name == 'in':
            size = get_frame_size()
            picked = single_packer.pack(size)
            cli.sendall(picked)
示例#2
0
def cam_process():
    global encodings, current_faces, zach_encoding, zach_image

    while True:

        frame = camera.get_frame()
        if frame is None:
            break

        # find faces
        face_locations = face_recognition.face_locations(frame)
        face_encodings = face_recognition.face_encodings(frame, face_locations)

        # identify faces
        for enc in encodings:
            if not enc[0] in current_faces.keys():
                current_faces[enc[0]] = 0
            current_faces[
                enc[0]] = 1 if True in face_recognition.compare_faces(
                    face_encodings, enc[1]) else current_faces[enc[0]] * .75

        # tell the keylogger what faces are visible
        faces = []
        for key in current_faces.keys():
            if current_faces[key] >= .25:
                faces.append(key)
        keylogger.set_faces(faces)
        print(current_faces)
        time.sleep(.3)
示例#3
0
        def gen():
            camera = VideoCamera(server_conf.camera_source)
            detector = object_detector.ObjectDetector(
                'ssd_mobilenet_v1_coco_2017_11_17')
            face_recog_m = FaceRecog()
            self.logger.debug("start detector")
            #detector = ObjectDetector('mask_rcnn_inception_v2_coco_2018_01_28')
            #detector = ObjectDetector('pet', label_file='data/pet_label_map.pbtxt')

            retry_cnt = 0
            while True:
                try:
                    frame = camera.get_frame()
                    frame, face_result_list = face_recog_m.get_frame_live(
                        frame)
                    #frame = cam.get_frame()
                    frame, obj_detect_dict = detector.detect_objects_live(
                        frame)
                    self.buffer_handle(face_result_list, obj_detect_dict)

                    ret, jpg = cv2.imencode('.jpg', frame)
                    jpg_bytes = jpg.tobytes()

                    yield (b'--frame\r\n'
                           b'Content-Type: image/jpeg\r\n\r\n' + jpg_bytes +
                           b'\r\n\r\n')
                except Exception as err:
                    self.logger.error(str(err))
                    try:
                        camera = VideoCamera(server_conf.camera_source)
                    except:
                        retry_cnt += 1
                        if retry_cnt > self.retry_cnt_max:
                            break
示例#4
0
def gen(camera,callback):
    global frame,play;
    while True:
        if play:
            frame = camera.get_frame(callback)
        else:
            time.sleep(1)
        yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
示例#5
0
def get_snapshot(cam_no):
    if not session.get('logged_in'):
        return "Login to view feed"
    # For now, save it locally then send it.
    # TODO Fix this in the future.
    result = cv2.imwrite("snapshot.jpg", camera.get_frame(cam_no))
    dir = app.root_path
    return send_from_directory(directory=dir, filename="snapshot.jpg")
示例#6
0
def gen(camera):
    while (int(time.time() - start_time) < capture_duration):
        settings.init()
        frame = camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
        if len(settings.myList) != 0:
            emotions.append(settings.myList[0][0])
            print(settings.myList[0][0])
示例#7
0
def gen2(camera):
    while True:
        #get camera frame
        frame = camera.get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
        key = cv2.waitKey(1) & 0xFF

        if key == ord("q"):
            break
示例#8
0
def gen(camera):
    #start_time = time.time() #New---Code--Added
    #timestamp_record.append(start_time) #New---Code--Added
    while True:
        #get camera frame
        frame = camera.get_frame()  #start_time,timestamp_record)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')

        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break
示例#9
0
def gen(camera):
    """Video streaming generator function."""
    reference = 100
    counter = 0
    init_time = time.time()

    while True:

        # Getting frame from camera object
        frame = camera.get_frame()

        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
示例#10
0
def gen(camera):
    global words
    print("working")
    while True:
        frame = camera.get_frame()
        # yield (b'--frame\r\n'
        #         b'Content-Type: image/jpeg\r\n\r\n' + frame[0] + b'\r\n\r\n')
        if frame[1] == False:
            yield (b'--frame\r\n'
                b'Content-Type: image/jpeg\r\n\r\n' + frame[0] + b'\r\n\r\n')
        else:
            print("Text: ", frame[2])
            finalAnswer="right"
            words = frame[2]
示例#11
0
def gen(camera, save=False, vstart=False):
    while True:

        frame = camera.get_frame(False,save,vstart)
	yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n{0}\r\n\r\n'.format(frame))
示例#12
0
 def getFrameFromCamera(self):
     frame = camera.get_frame()
     while len(frame) <= 0:
         frame = camera.get_frame()
     frame = np.fromstring(frame, np.uint8).reshape(self.h, self.w, 3)
     return frame
示例#13
0
def gen(camera):
    while True:
        frame = camera.get_frame(face_cascade)        
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
示例#14
0
def gen(camera, save=False, vstart=False):
    while True:
        frame = camera.get_frame(False,save,vstart)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
    targetValues['lowerLimits'] = np.array([target[0], target[1], target[2]])
    targetValues['upperLimits'] = np.array([target[3], target[4], target[5]])
    targetValues['targetKerne'] = np.ones((target[6], target[6]), np.uint8)


targetValues = {'lowerLimits': None, 'upperLimits': None, 'kernelDilate': None}
set_target_basket(opponent)

values = []

cv2.namedWindow('rgb_img', cv2.WINDOW_NORMAL)
while True:
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
    depth_frame, frame = camera.get_frame()
    hsv_frame = camera.to_hsv(frame)

    basket = camera.basket_bottom(hsv_frame, targetValues)

    if basket:
        print("Bottom coordinate:", basket[1])
        cv2.putText(frame, str(basket), tuple(basket),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
    else:
        print("No Basket")
    cv2.imshow("Show", frame)

camera.stop()
cv2.destroyAllWindows()
示例#16
0
def gen(camera, save=False, vstart=False):
    while True:

        frame = camera.get_frame(False, save, vstart)
        yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n{0}\r\n\r\n'.
               format(frame))
示例#17
0
 def gen():
     while True:
         f = camera.get_frame(_cam)
         r = camera.fmt_frame(_cam)
         yield (r)
示例#18
0
def calc2():
    return Response(get_frame(1),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
def get_packed_info():
    frame_bytes = get_frame()
    ls = left_motor.get_speed()
    rs = right_motor.get_speed()
    speeds = double_packer.pack(ls, rs)
    return frame_bytes, speeds
cv2.createTrackbar('hB', "Controls", bars[3], 255, partial(updateValue, 3))
cv2.createTrackbar('hG', "Controls", bars[4], 255, partial(updateValue, 4))
cv2.createTrackbar('hR', "Controls", bars[5], 255, partial(updateValue, 5))
cv2.createTrackbar('Dilate', "Controls", bars[6], 20, partial(updateValue, 6))
# cv2.createTrackbar('Erode', "Controls", bars[7], 20, partial(updateValue, 7))

try:
    while True:
        # kernel
        values['kernelDilate'] = np.ones((bars[6], bars[6]), np.uint8)
        # kernelErode = np.ones((bars[7],bars[7]),np.uint8)

        values['lowerLimits'] = np.array([bars[0], bars[1], bars[2]])
        values['upperLimits'] = np.array([bars[3], bars[4], bars[5]])

        d, frame = camera.get_frame()
        #frame = frame[600:680, 530:650]
        hsv = camera.to_hsv(frame)

        # for balls
        processed_frame = camera.process_frame(hsv, values)

        # for baskets
        # processed_frame = Camera.process_basket(hsv, lowerLimits, upperLimits, kernelDilate)

        contours, _hierarchy = cv2.findContours(processed_frame, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
        circles = map(cv2.minEnclosingCircle, contours)

        # tagging all blobs
        for i in circles:
示例#21
0
	devices = [args.device, args.device_age_gender]
    models = [args.model_face, args.model_age_gender]

	if "CPU" in devices and args.cpu_extension is None:
    	print("\nPlease try to specify cpu extensions library path in demo's command line parameters using -l ""or --cpu_extension command line argument")
    	sys.exit(1)

	detections = interactive_detection.Detections(
        devices, models, args.cpu_extension, args.plugin_dir,args.prob_threshold, args.prob_threshold_face)

	ct = CentroidTracker()
	camera = VideoCamera(args.input, detections, args.no_v4l, ct)
	# このframeにはすでに描画処理されているものだな
	while True:
		frame = camera.get_frame(flip_code, is_age_gender_detection)
		# show the output frame
		cv2.imshow("Frame", frame)
		key = cv2.waitKey(1) & 0xFF

		# if the `q` key was pressed, break from the loop
		if key == ord("q"):
			dump_json(object_list)
			break
	# if we are using a webcam, release the pointer
	if video is None:
		vs.stop()
		dump_json(object_list)

	# otherwise, release the file pointer
	else: