Ejemplo n.º 1
0
import cv2
import sys
import time
import imutils
import threading
import numpy as np
import face_recognition
from flask import Flask, render_template, Response
from imutils.video.pivideostream import PiVideoStream

video_capture = PiVideoStream().start()
time.sleep(2.0)

p1 = face_recognition.load_image_file("pictures/p1.jpg")
# Pre-calculated face encoding of p1 generated with face_recognition.face_encodings(p1)
p1_face_encoding = face_recognition.face_encodings(p1)[0]

p2 = face_recognition.load_image_file("pictures/p2.jpg")
# Pre-calculated face encoding of p2 generated with face_recognition.face_encodings(p2)
p2_face_encoding = face_recognition.face_encodings(p2)[0]

known_face_encodings = [
    p1_face_encoding,
    p2_face_encoding
]
known_face_names = [
    "Barack Obama",
    "Novak Dokovic"
]

last_epoch = 0
Ejemplo n.º 2
0
FREQ = 13  #Frequency at which we update the car
RES = (320, 240)  #Resolution of the camera

update = True
img = None

print("Loading ...")

#Loading NN
network = '4out.h5'
model = load_model(NETWORK)

#Setting up car and video stream
initio = car.Initio()
initio.init()
vs = PiVideoStream(resolution=RES, framerate=5).start()

sleep(2.0)

print("Ready !")

t = time()
##Main loop
while True:
    if time() - t > 1 / FREQ or img == None:
        #Take picture
        update = True
        img = util.takePicture(vs)
        t = time()

    #Prediction of the NN
Ejemplo n.º 3
0
 def __init__(self, flip):
     self.vs = PiVideoStream(resolution=(800, 608)).start()
     time.sleep(2.0)
     self.flip = flip
Ejemplo n.º 4
0
import imutils
from imutils.video.pivideostream import PiVideoStream
import argparse
import cv2 as cv
import io
import time
from picamera import PiCamera
from threading import Thread
from picamera.array import PiRGBArray

IMAGE_WIDTH = 640
IMAGE_HEIGHT = 380

# start video capture as seperate thread
print("starting video stream (call vs.stop() to kill thread)...")
vs = PiVideoStream(resolution=(640, 480)).start()
vs.camera.contrast = 100
time.sleep(2.0)

image = vs.read()
cv.imshow('image', image)
cv.waitKey(0)
cv.destroyAllWindows()
for i in range(0, 1000):
    #if i%10 == 0:
    cv.imshow('Raw', vs.read())
    cv.waitKey(1)
    print(i)
vs.stop()
Ejemplo n.º 5
0
import numpy as np
import os
from varbs import pxC, wxC
'''def draw_detections(img, rects, thickness = 1):
    for x, y, w, h in rects:
        # the HOG detector returns slightly larger rectangles than the real objects.
        # so we slightly shrink the rectangles to get a nicer output.
        pad_w, pad_h = int(0.15*w), int(0.05*h)
        cv2.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)'''

wd = 400

hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
wCam = WebcamVideoStream(src=0).start()
pCam = PiVideoStream().start()
time.sleep(2.0)
while True:
    pFrame = pCam.read()
    wFrame = wCam.read()
    pFrame = imutils.resize(pFrame, width=wd)
    wFrame = imutils.resize(wFrame, width=wd)
    pFound, _ = hog.detectMultiScale(pFrame,
                                     winStride=(8, 8),
                                     padding=(8, 8),
                                     scale=1.05)
    wFound, _ = hog.detectMultiScale(wFrame,
                                     winStride=(8, 8),
                                     padding=(8, 8),
                                     scale=1.05)
    pFound = np.array([[x, y, x + w, y + h] for (x, y, w, h) in pFound])
Ejemplo n.º 6
0
import time
import cv2
import os
import RPi.GPIO as GPIO


# print object coordinates
def mapObjectPosition(x, y):
    print("[INFO] Object Center coordenates at X0 = {0} and Y0 =  {1}".format(
        x, y))


# initialize the video stream and allow the camera sensor to warmup
print("[INFO] waiting for camera to warmup...")
#vs = VideoStream(0).start()
vs = PiVideoStream(resolution=(320, 240), framerate=32).start()
time.sleep(2.0)

# define the lower and upper boundaries of the object
# to be tracked in the HSV color space
colorLower = (27, 100, 100)
colorUpper = (47, 255, 255)

# loop over the frames from the video stream
while True:
    # grab the next frame from the video stream, Invert 180o, resize the
    # frame, and convert it to the HSV color space
    frame = vs.read()
    frame = imutils.resize(frame, width=600)
    frame = imutils.rotate(frame, angle=180)
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
Ejemplo n.º 7
0
def camThread(LABELS, results, frameBuffer, vidfps, video_file_path,
              isTrackingBuffer, frameBufferToServer, ePositionBuffer,
              eDistanceBuffer):
    # global init_check   #This variable is used to init fuzzy logic controller at first initializing
    global fps
    global detectfps
    global lastresults
    global framecount
    global detectframecount
    global time1
    global time2
    global cam
    global window_name
    global vs
    global camera_width
    global camera_height
    global right_pwm
    global left_pwm
    global isTracking
    global ePosition
    global eDistance

    if video_file_path != "":
        vs = FileVideoStream(video_file_path).start()
        window_name = "Movie File"
    else:
        vs = PiVideoStream((camera_width, camera_height), vidfps).start()
        window_name = "PiCamera"
    time.sleep(2)
    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    while True:
        t1 = time.perf_counter()

        # PiCamera Stream Read
        color_image = vs.read()
        #color_image = cv2.flip(color_image, 1)
        if frameBuffer.full():
            frameBuffer.get()
        frames = color_image

        #Reinitialize  width and height of camera
        camera_height = color_image.shape[0]
        camera_width = color_image.shape[1]
        frameBuffer.put(color_image.copy())
        frameBufferToServer.put(color_image.copy())

        res = None

        if not results.empty():
            res = results.get(False)
            detectframecount += 1
            imdraw = overlay_on_image(frames, res, LABELS, isTrackingBuffer,
                                      frameBufferToServer, ePositionBuffer,
                                      eDistanceBuffer)
            lastresults = res
        else:
            imdraw = overlay_on_image(frames, lastresults, LABELS,
                                      isTrackingBuffer, frameBufferToServer,
                                      ePositionBuffer, eDistanceBuffer)

        cv2.imshow(window_name,
                   cv2.resize(imdraw, (camera_width, camera_height)))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            sys.exit(0)

        ## Print FPS
        framecount += 1
        if framecount >= 15:
            fps = "(Playback) {:.1f} FPS".format(time1 / 15)
            detectfps = "(Detection) {:.1f} FPS".format(detectframecount /
                                                        time2)
            framecount = 0
            detectframecount = 0
            time1 = 0
            time2 = 0
        t2 = time.perf_counter()
        elapsedTime = t2 - t1
        time1 += 1 / elapsedTime
        time2 += elapsedTime
Ejemplo n.º 8
0

def get_frame():
    frame = vs.read()
    ret, jpeg = cv2.imencode('.jpg', frame)
    return jpeg.tobytes()


@app.route('/')
def index():
    return render_template('index.html')


def gen():
    while True:
        frame = get_frame()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')


@app.route('/video_feed')
def video_feed():
    return Response(gen(vs),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == '__main__':
    vs = PiVideoStream(framerate=32).start()
    time.sleep(2.0)
    app.run(host='192.168.0.102', debug=False)
 def __init__(self, flip = False):
     self.vs = PiVideoStream(resolution=(360,240)).start()
     print("During init")
     print(self.vs)
     self.flip = flip
     time.sleep(2.0)
 def __init__(self):
     self.vs = PiVideoStream().start()
     time.sleep(2.0)
Ejemplo n.º 11
0
 def __init__(self, flip = False):
     self.vs = PiVideoStream(resolution=(480 , 320),framerate=32).start()  #resolution and framerate
     self.flip = flip
     time.sleep(0.1)
Ejemplo n.º 12
0
from time import sleep
ser = serial.Serial('/dev/ttyUSB0',57600)       ######NANO ADDRESS OVER USB######

    
def nothing(x):
    pass

cv2.namedWindow('image',0)

####################THRESHOLD FOR COLOR########################
Col={'Blue':[83,67,35,123,250,180],'Red':[157,102,73,187,186,180],   ####TO CHANGE IN 
     'Gold':[0,33,112,60,92,225]}                                    ####ACCORDANCE WITH
###############################################################      ####LIGHTING


cam= PiVideoStream(resolution=(400, 300), framerate=2).start()
time.sleep(5)

red=0
blue=0
gold=0


while 1:
    var=''
    fram=cam.read()
    fram=imutils.resize(fram,width=400)
    fram1=fram.copy()

    cropped=fram[150:300,100:200]                       #####ROI SELECTOR#####
    cv2.rectangle(fram,(150,300),(100,200),(0,0,0),3)   #####ROI SELECTOR#####
Ejemplo n.º 13
0
 def __init__(self):
   # self.video = cv2.VideoCapture(0)
   self.video = PiVideoStream().start()
   time.sleep(2.0)
Ejemplo n.º 14
0
def ai():
    cap = PiVideoStream(resolution=(208, 208), framerate=20)  #rpi camera
    cap.start()
    cap.camera.iso = 0  # 0:auto, 100-200: sunny day
    cap.camera.awb_mode = 'sunlight'  # sunlight,cloudy,auto
    cap.camera.hflip = True
    cap.camera.vflip = True
    time.sleep(2.0)

    print("analog_gain: ", float(cap.camera.analog_gain), file=f)
    print("exposure_speed: ", cap.camera.exposure_speed, file=f)
    print("iso: ", cap.camera.iso, file=f)

    print("[INFO] video recording")
    out = cv2.VideoWriter('avcnet.avi', cv2.VideoWriter_fourcc(*'XVID'), 4,
                          (208, 208))

    # load the trained convolutional neural network
    print("[INFO] loading network...")
    print('inference:', inference)
    print('inference:', inference, file=f)

    if inference == 'k':
        from keras.models import load_model
        import tensorflow as tf
        from keras.preprocessing.image import img_to_array
        model = load_model("/home/pi/drone_exe/TF_model/fly1.h5",
                           custom_objects={"tf": tf})

    if inference == 'tf':
        from tensorflow.python.platform import gfile
        import tensorflow as tf
        from keras.preprocessing.image import img_to_array
        f1 = gfile.FastGFile("/home/pi/drone_exe/TF_model/tf_model_cv.pb",
                             'rb')
        graph_def = tf.GraphDef()
        # Parses a serialized binary message into the current message.
        graph_def.ParseFromString(f1.read())
        f1.close()
        sess = tf.Session()
        sess.graph.as_default()
        # Import a serialized TensorFlow `GraphDef` protocol buffer
        # and place into the current default `Graph`.
        tf.import_graph_def(graph_def)
        softmax_tensor = sess.graph.get_tensor_by_name(
            'import/activation_5/Softmax:0')

    if inference == 'dnn':
        net = cv2.dnn.readNetFromTensorflow(
            '/home/pi/drone_exe/TF_model/fly1.pb')
        ##        net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU) # if no NCS stick
        ##        net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV) # if no NCS stick
        net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)

    # default parameters
    orient = 0
    distmax = 600
    tim_old = 0.1
    state = 'fly'
    dist = 510
    global velocity_y
    velocity_y = 0
    fly_1 = 0.9
    k = 0.66  # k=(tau/T)/(1+tau/T) tau time constant LPF, T period
    ##    k=0 # no filtering
    fps = FPS().start()

    while True:
        start = time.time()
        frame = cap.read()
        orig = frame.copy()
        ##

        #===========================dnn============================================
        if inference == 'dnn':
            # use cv2.dnn module for inference
            resized = cv2.resize(frame, (64, 64))
            scale = 1 / 255.0  # AI trained with scaled images image/255
            blob = cv2.dnn.blobFromImage(resized,
                                         scale, (64, 64), (0, 0, 0),
                                         swapRB=True,
                                         ddepth=5)
            net.setInput(blob)
            predictions = net.forward()

#===========================tf=============================================
        if inference == 'tf':
            #use tf for inference
            frame = cv2.resize(frame, (64, 64))
            frame = cv2.cvtColor(
                frame, cv2.COLOR_RGB2BGR)  # @ training images RGB->BGR
            frame = frame.astype("float") / 255.0
            frame = img_to_array(frame)
            frame = np.expand_dims(frame, axis=0)
            predictions = sess.run(softmax_tensor,
                                   {'import/img_input:0': frame})

#===========================k==============================================
        if inference == 'k':
            #use k for inference
            frame = cv2.resize(frame, (64, 64))
            frame = cv2.cvtColor(
                frame, cv2.COLOR_RGB2BGR)  # @ training images RGB->BGR
            frame = frame.astype("float") / 255.0
            frame = img_to_array(frame)
            frame = np.expand_dims(frame, axis=0)
            predictions = model.predict(frame)

#===========================================================================

# classify the input image
        fly = predictions[0][0]
        # build the label

        my_dict = {
            'stop': predictions[0][3],
            'left': predictions[0][1],
            'right': predictions[0][2]
        }
        maxPair = max(my_dict.items(), key=itemgetter(1))
        fly_f = k * fly_1 + (1 - k) * fly
        fly_1 = fly_f

        if state == 'avoid':
            ##            label=maxPair[0]
            ##            proba=maxPair[1]
            if fly_f * 100 >= 60:
                dist = 510
                state = 'fly'
                print(state, file=f)

        else:
            label = 'forward'
            proba = fly
            if fly_f * 100 <= 50:
                dist = 180
                label = maxPair[0]
                proba = maxPair[1]
                print(my_dict, file=f)
                state = 'avoid'

        label_1 = "{} {:.1f}% {}".format(label, proba * 100, state)
        # draw the label on the image
        output = imutils.resize(orig, width=208)
        cv2.putText(output, label_1, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        # Write the frame into the file 'output.avi'
        out.write(output)

        if vehicle.channels['8'] > 1700:
            event.clear()
        if vehicle.channels['8'] > 1400 and vehicle.channels['8'] < 1700:
            if state == "fly":
                event.clear()

            if state == "avoid":
                event.set()

                if label == 'left':
                    velocity_y = -0.8
                if label == 'right':
                    velocity_y = 0.8
                if label == 'stop':
                    velocity_y = 0

        if vehicle.channels['8'] < 1400:
            event.clear()

        msg_sensor(dist, 0, 600)

        # show the output frame
        cv2.imshow("Frame", output)
        key = cv2.waitKey(10) & 0xFF

        # update the FPS counter
        fps.update()
        # if the `Esc` key was pressed, break from the loop
        if key == 27:
            break

    # do a bit of cleanup
    # stop the timer and save FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()), file=f)
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()), file=f)
    f.close()
    f.close()
    print('end')

    cv2.destroyAllWindows()
    cap.stop()
    out.release()
 def __init__(self, flip=True):
     self.last_upload = time.time()
     self.vs = PiVideoStream(resolution=(800, 608)).start()
     self.flip = flip
     time.sleep(2.0)
def cameraOptim():
    time.sleep(2)
    serialHandler = SerialHandler.SerialHandler("/dev/ttyACM0")
    serialHandler.startReadThread()
    serialHandler.sendPidActivation(True)

    def moveForward():
        serialHandler.sendMove(SPEED, -1.8)
        print("Forward")

    def moveLeft():
        serialHandler.sendMove(SPEED, -23.0)
        print("Left")

    def moveRight():
        serialHandler.sendMove(SPEED, 23.0)
        print("Right")

    def moveBackward():
        serialHandler.sendMove(-SPEED, -1.8)
        print("Back")

    def dontMove():
        serialHandler.sendMove(0, -1.8)
        print("Break")

    def region_of_interest(img, vertices):

        # Define a blank matrix that matches the image height/width.
        mask = np.zeros_like(img)
        # Retrieve the number of color channels of the image.
        #channel_count = img.shape[2]
        # Create a match color with the same color channel counts.
        match_mask_color = (255)
        # Fill inside the polygon
        cv2.fillPoly(mask, vertices, match_mask_color)
        # Returning the image only where mask pixels match
        masked_image = cv2.bitwise_and(img, mask)
        return masked_image

    def draw_lanes(img, lines, color=[0, 255, 255], thickness=3):
        # if this fails, go with some default line
        try:

            # finds the maximum y value for a lane marker
            # (since we cannot assume the horizon will always be at the same point.)

            ys = []
            for i in lines:
                for ii in i:
                    ys += [ii[1], ii[3]]
            min_y = min(ys)
            max_y = 600
            new_lines = []
            line_dict = {}

            for idx, i in enumerate(lines):
                for xyxy in i:
                    # These four lines:
                    # modified from http://stackoverflow.com/questions/21565994/method-to-return-the-equation-of-a-straight-line-given-two-points
                    # Used to calculate the definition of a line, given two sets of coords.
                    x_coords = (xyxy[0], xyxy[2])
                    y_coords = (xyxy[1], xyxy[3])
                    A = vstack([x_coords, ones(len(x_coords))]).T
                    m, b = lstsq(A, y_coords)[0]

                    # Calculating our new, and improved, xs
                    x1 = (min_y - b) / m
                    x2 = (max_y - b) / m

                    line_dict[idx] = [m, b, [int(x1), min_y, int(x2), max_y]]
                    new_lines.append([int(x1), min_y, int(x2), max_y])

            final_lanes = {}

            for idx in line_dict:
                final_lanes_copy = final_lanes.copy()
                m = line_dict[idx][0]
                b = line_dict[idx][1]
                line = line_dict[idx][2]

                if len(final_lanes) == 0:
                    final_lanes[m] = [[m, b, line]]

                else:
                    found_copy = False

                    for other_ms in final_lanes_copy:

                        if not found_copy:
                            if abs(other_ms * 1.2) > abs(m) > abs(
                                    other_ms * 0.8):
                                if abs(final_lanes_copy[other_ms][0][1] *
                                       1.2) > abs(b) > abs(
                                           final_lanes_copy[other_ms][0][1] *
                                           0.8):
                                    final_lanes[other_ms].append([m, b, line])
                                    found_copy = True
                                    break
                            else:
                                final_lanes[m] = [[m, b, line]]

            line_counter = {}

            for lanes in final_lanes:
                line_counter[lanes] = len(final_lanes[lanes])

            top_lanes = sorted(line_counter.items(),
                               key=lambda item: item[1])[::-1][:2]

            lane1_id = top_lanes[0][0]
            lane2_id = top_lanes[1][0]

            def average_lane(lane_data):
                x1s = []
                y1s = []
                x2s = []
                y2s = []
                for data in lane_data:
                    x1s.append(data[2][0])
                    y1s.append(data[2][1])
                    x2s.append(data[2][2])
                    y2s.append(data[2][3])
                return int(mean(x1s)), int(mean(y1s)), int(mean(x2s)), int(
                    mean(y2s))

            l1_x1, l1_y1, l1_x2, l1_y2 = average_lane(final_lanes[lane1_id])
            l2_x1, l2_y1, l2_x2, l2_y2 = average_lane(final_lanes[lane2_id])

            return [l1_x1, l1_y1, l1_x2, l1_y2], [l2_x1, l2_y1, l2_x2,
                                                  l2_y2], lane1_id, lane2_id
        except Exception as e:
            print(str(e))

    def process_img(image):
        original_image = image
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        processed_img = cv2.Canny(gray_image, 200, 300)
        processed_img = cv2.GaussianBlur(processed_img, (5, 5), 0)
        #processed_img = gray_image

        vertices = np.array([
            [10, 500],
            [10, 400],
            [300, 250],
            [500, 250],
            [800, 500],
            [800, 500],
        ], np.int32)
        cropped_image = region_of_interest(processed_img, [vertices])
        lines = cv2.HoughLinesP(cropped_image,
                                rho=1,
                                theta=np.pi / 180,
                                threshold=180,
                                lines=np.array([]),
                                minLineLength=20,
                                maxLineGap=15)
        m1 = 0
        m2 = 0
        try:
            l1, l2, m1, m2 = draw_lanes(original_image, lines)
            cv2.line(original_image, (l1[0], l1[1]), (l1[2], l1[3]),
                     [0, 255, 0], 30)
            cv2.line(original_image, (l2[0], l2[1]), (l2[2], l2[3]),
                     [0, 255, 0], 30)
        except Exception as e:
            print(str(e))
            pass
        try:
            for coords in lines:
                coords = coords[0]
                try:
                    cv2.line(processed_img, (coords[0], coords[1]),
                             (coords[2], coords[3]), [255, 0, 0], 3)

                except Exception as e:
                    print(str(e))
        except Exception as e:
            pass
        #line_image = draw_lines(image, lines, thickness=1)
        #line_image = image
        #return line_image
        return processed_img, original_image, m1, m2

    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-n",
                    "--num-frames",
                    type=int,
                    default=100,
                    help="# of frames to loop over for FPS test")
    ap.add_argument("-d",
                    "--display",
                    type=int,
                    default=1,
                    help="Whether or not frames should be displayed")
    args = vars(ap.parse_args())

    # initialize the camera and stream
    #camera = PiCamera()
    #camera.resolution = (800, 600)
    #camera.framerate = 32

    #last_time = time.time()

    # created a *threaded *video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from `picamera` module...")
    vs = PiVideoStream().start()
    time.sleep(2.0)
    #fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    #while fps._numFrames < args["num_frames"]:

    # do a bit of cleanup
    last_time = time.time()
    while (True):
        #rawCapture=PiRGBArray(camera, size=(800,600))
        #cap.framerate = 10
        #camera.capture(rawCapture, format="bgr")
        frame = vs.read()
        frame = imutils.resize(frame, width=800)
        last_time = time.time()
        new_screen, original_image, m1, m2 = process_img(frame)
        #rawCapture = PiRGBArray(camera, size=(320, 240))
        #stream = camera.capture_continuous(rawCapture, format="bgr",
        #use_video_port=True)
        #frame = stream.array
        #screen= process_img(frame)
        print('Frame took {} seconds'.format(time.time() - last_time))

        cv2.imshow(
            'frame',
            original_image)  #se mai poate adauga argumentul cv2.COLOR_BGR2RGB

        if m1 < 0 and m2 < 0:
            moveRight()
            time.sleep(0.1)
            dontMove()
        elif m1 > 0 and m2 > 0:
            moveLeft()
            time.sleep(0.1)
            dontMove()
        else:
            moveForward()
            time.sleep(0.1)
            dontMove()

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 17
0
from flask import Flask, Response, render_template
from imutils.video.pivideostream import PiVideoStream
import cv2
import time

app = Flask(__name__)
camera = PiVideoStream(resolution=(400, 304), framerate=5).start()
time.sleep(2)


def gen(camera):
    while True:
        frame = camera.read()
        ret, jpeg = cv2.imencode('.jpg', frame)
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() +
               b'\r\n\r\n')


@app.route('/')
def index():
    return Response(gen(camera),
                    mimetype='multipart/x-mixed-replace; boundary=frame')


if __name__ == '__main__':
    app.run(host='0.0.0.0', debug=False, threaded=True, use_reloader=False)
Ejemplo n.º 18
0
 def __init__(self, flip=False, resolution=(640, 480)):
     self.video_stream = PiVideoStream(resolution=resolution,
                                       framerate=8).start()
     time.sleep(2.0)
Ejemplo n.º 19
0
 def __init__(self, flip=False):
     self.vs = PiVideoStream(resolution=(400, 304), framerate=3).start()
     self.flip = flip
     time.sleep(2.0)
Ejemplo n.º 20
0
def get_stream():
    pi_stream = PiVideoStream(resolution=RESOLUTION, framerate=FRAMERATE)
    pi_stream.camera.rotation = ROTATION
    return pi_stream
Ejemplo n.º 21
0
def ai():
    cap = PiVideoStream(resolution=(208, 208))  #rpi camera
    cap.start()
    time.sleep(2.0)
    cap.camera.hflip = True
    cap.camera.vflip = True

    print("[INFO] video recording")
    ##    out = cv2.VideoWriter('avcnet.avi',cv2.VideoWriter_fourcc(*'XVID'), 10, (208,208))

    # load the trained convolutional neural network
    print("[INFO] loading network...")
    ##    model = load_model("./avcnet_v1.model")
    model = load_model("./avcnet_best_1.hdf5", custom_objects={"tf": tf})

    # default parameters
    orient = 0
    tim_old = 0.1
    state = 'fly'
    dist = 510
    global velocity_y
    velocity_y = 0

    while True:
        start = time.time()
        frame = cap.read()
        orig = frame.copy()
        frame = cv2.resize(frame, (64, 64))
        frame = frame.astype("float") / 255.0
        frame = img_to_array(frame)
        frame = np.expand_dims(frame, axis=0)

        # classify the input image
        (stop, left, right, fly) = model.predict(frame)[0]
        # build the label
        ##        my_dict = {'stop':stop, 'left':left, 'right':right,'fly':fly}
        my_dict = {'stop': stop, 'left': left, 'right': right}
        maxPair = max(my_dict.iteritems(), key=itemgetter(1))

        if state == 'avoid':
            ##            label=maxPair[0]
            ##            proba=maxPair[1]
            if fly * 100 >= 60:
                dist = 510
                state = 'fly'
                print >> f, state

        else:
            label = 'forward'
            proba = fly
            if fly * 100 <= 50:
                dist = 180
                label = maxPair[0]
                proba = maxPair[1]
                print >> f, my_dict
                state = 'avoid'

        label_1 = "{} {:.1f}% {}".format(label, proba * 100, state)
        # draw the label on the image
        output = imutils.resize(orig, width=208)
        cv2.putText(output, label_1, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                    (0, 255, 0), 2)

        # Write the frame into the file 'output.avi'
        ##        out.write(output)

        if vehicle.channels['8'] > 1700:
            event.clear()
        if vehicle.channels['8'] > 1400 and vehicle.channels['8'] < 1700:
            if state == "fly":
                event.clear()

            if state == "avoid":
                event.set()

                if label == 'left':
                    velocity_y = -0.5
                if label == 'right':
                    velocity_y = 0.5
                if label == 'stop':
                    velocity_y = 0

        if vehicle.channels['8'] < 1400:
            event.clear()

        msg_sensor(dist, orient)

        # show the output frame
        cv2.imshow("Frame", output)
        key = cv2.waitKey(10) & 0xFF

        # if the `Esc` key was pressed, break from the loop
        if key == 27:
            break

    # do a bit of cleanup
    f.close()
    print('end')

    cv2.destroyAllWindows()
    cap.stop()
Ejemplo n.º 22
0
 def __init__(self, flip=False):
     self.vs = PiVideoStream(resolution=(1280, 720), framerate=10).start()
     self.flip = flip
     time.sleep(2.0)
Ejemplo n.º 23
0
    def ProcessImage(
        self
    ):  #przetwarza obraz pobierajac klatke z kamery i wykonujac na niej operacje analizy

        #bufor dzielenia mapy przeszkod z innymi procesami
        self.obstacle_map_np = np.frombuffer(
            self.obstacle_map,
            dtype=np.int32).reshape(ImageProcessor.obstacle_map_size**2)

        #parametry trackera kulki
        self.ballTracker_pos = [
            ImageProcessor.detection_image_resolution[0] // 2,
            ImageProcessor.detection_image_resolution[1] // 2
        ]
        self.ballTracker_size = 40
        self.ballTracker_result = [0, 0]

        if not simulationMode:
            self.tensorflowProcessor = TPM.TensorflowProcessor()
            videoStream = PiVideoStream(
                resolution=ImageProcessor.camera_resolution,
                framerate=ImageProcessor.camera_framerate).start(
                )  #uruchamianie watku, ktory czyta kolejne klatki z kamery
        else:
            videoStream = self.simulationCommunicator

        time.sleep(1)
        self.frame_original = videoStream.read()

        lastTime = time.time()
        a = 190
        lastID = 0

        saveCounter = 0
        saveCount = 0

        while True:
            if self.key.value == -666: break

            #prosty licznik przetworzonych klatek w ciagu sekundy
            a = a + 1
            if a > 200:
                if ImageProcessor.detection_image_resolution_cropped[0] == -1:
                    ImageProcessor.detection_image_resolution_cropped = (
                        np.size(self.frame_original,
                                0), np.size(self.frame_original, 1))
                print(str(a * 1.0 / (time.time() - lastTime)))
                lastTime = time.time()
                a = 0

            #synchronizacja pobierania nowej klatki z czestotliwascia kamery
            while True:
                frameGrabbed = videoStream.read()
                ID = id(frameGrabbed)
                if ID != lastID:
                    self.frame_original = frameGrabbed
                    lastID = ID
                    break
                elif not simulationMode:
                    time.sleep(0.01)

            #klatka przeznaczona do debugowania
            #self.frame_debug = copy.copy(self.frame_original)

            if not simulationMode:
                newCorners = ImageProcessor.FindBoardCorners(
                    self)  #znajdowanie pozycji rogow plyty
                for i in range(4):
                    self.corners[i] = (
                        MM.lerp(self.corners[i][0], newCorners[i][0], 0.3),
                        MM.lerp(self.corners[i][1], newCorners[i][1], 0.3)
                    )  #wygladzanie aktualizacji pozycji rogow plyty
            else:
                self.corners = self.simulationCommunicator.FindBoardCorners()
            ImageProcessor.ChangePerspective(
                self
            )  #zmiana perspektywy znalezionej tablicy, aby wygladala jak kwadrat
            if not simulationMode:
                ImageProcessor.UpdateBallTracker(
                    self)  #aktualizacja trackera kulki
            else:
                pos = self.simulationCommunicator.getBallPosition()
                self.ballTracker_result[0] = pos[
                    0] * ImageProcessor.detection_image_resolution_cropped[0]
                self.ballTracker_result[1] = pos[
                    1] * ImageProcessor.detection_image_resolution_cropped[1]

            #ustawianie znalezionej pozycji kulki w zmiennych dzielonych miedzy procesami
            self.result_x.value = self.ballTracker_result[
                0] / ImageProcessor.detection_image_resolution_cropped[0]
            self.result_y.value = self.ballTracker_result[
                1] / ImageProcessor.detection_image_resolution_cropped[1]

            ImageProcessor.UpdateObstacleMap(self)

            #cv2.imshow("Frame debug", self.frame_debug)
            if saveCounter < saveCount:
                cv2.imwrite("Frame" + str(saveCounter) + ".png",
                            self.frame_original)
                saveCounter += 1

            cv2.imshow("Frame Casted", self.frame_original)
            key = cv2.waitKey(1) & 0xFF
            #if key == ord("q"):
            #    break

        videoStream.stop()
Ejemplo n.º 24
0
def tracking(args, inches):
    global hpd
    global no_detect_flag
    global JAVARA_TERMINATE
    global HPD_INIT

    filename = args["input_file"]

    inches = float(inches)
    if inches >= 8:
        inches = 8

    default_zAngle = inches * 2.54 * 4 + 4
    img_num = 0

    if filename is None:
        isVideo = False

        # created a *threaded *video stream, allow the camera sensor to warmup,
        # and start the FPS counter
        print("[INFO] sampling THREADED frames from `picamera` module...")
        vs = PiVideoStream().start()
        #time.sleep(1.0)
        fps = FPS().start()

    else:
        isVideo = True
        cap = cv2.VideoCapture(filename)
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        name, ext = osp.splitext(filename)
        out = cv2.VideoWriter(args["output_file"], fourcc, fps,
                              (width, height))

    # Initialize head pose detection
    # if(hpd == None):
    #     print("[INFO] initialize HPD Model...")
    #     hpd = HPD(args["landmark_type"], args["landmark_predictor"])
    '''
    init_cnt = 0
    while HPD_INIT == False:
        print('.', end='')
        sleep(1)    
        init_cnt += 1
        if init_cnt % 20 == 19:
            print('')
    '''
    pm2_arduino = serial.Serial('/dev/ttyUSB0', 115200)  # motor 2,4
    first_arduino = serial.Serial('/dev/ttyACM0', 115200)  # motor 1
    third_arduino = serial.Serial('/dev/ttyUSB1', 115200)  # motor 3

    time.sleep(0.5)

    cv2.namedWindow('frame2', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('frame2', 320, 240)

    while (vs.stopped == False):
        # Capture frame-by-frame
        # start_time = time.time()
        print('\rframe: %d' % fps._numFrames, end='')
        frame = vs.read()

        h, w, c = frame.shape
        new_h = (int)(h / RESIZE_RATIO)
        new_w = (int)(w / RESIZE_RATIO)
        frame_small = cv2.resize(frame, (new_w, new_h))
        frame_small2 = cv2.flip(frame_small, 1)  #
        frame_small3 = cv2.flip(frame_small, 0)  #
        frameOut = frame_small3.copy()

        if isVideo:

            if frame is None:
                break
            else:
                out.write(frame)

        else:

            if (fps._numFrames % SKIP_FRAMES == 0 and HPD_INIT == True):
                frameOut, angles, tvec = hpd.processImage(frame_small3)
                if tvec == None:
                    print('\rframe2: %d' % fps._numFrames, end='')
                    print(" There is no face detected\n")

                    # face detecting!!
                    second_angle = 0
                    if no_detect_flag == 0:
                        fourth_angle = -10
                        no_detect_flag += 1
                    elif no_detect_flag == 1:
                        fourth_angle = -10
                        no_detect_flag += 1
                    elif no_detect_flag == 2:
                        fourth_angle = 30
                        no_detect_flag = 0

                    pm_second_angle = str(int(fourth_angle)) + delimiter + str(
                        int(second_angle))
                    pm_second_angle = pm_second_angle.encode('utf-8')
                    pm2_arduino.write(pm_second_angle)

                    print('fourth_angle', pm_second_angle)

                    time.sleep(2.5)

                    fps.update()
                    continue

                else:
                    # tvec: transpose vector
                    # tx : left(+tx) & right(-tx)
                    # ty : up(+ty) & down(-ty)
                    # tz : far(bit tz value) & close(small tz value) :: -tz value
                    tx, ty, tz = tvec[:, 0]
                    rx, ry, rz = angles

                    ################
                    #### before ####
                    ################
                    # user up (+ty) ->    second_angle(-), fourth_angle(-)
                    # user down (-ty) ->    second_angle(+), fourth_angle(+)

                    # user far (tz bigger than before) -> thrid_angle(+)
                    # user close (tz smaller than before) -> thrid_angle(-)

                    ############
                    ## change ##
                    ############
                    # ty -> third, fourth angle
                    # tz -> second angle

                    # user up (+ty) ->    third_angle(-), fourth_angle(+)
                    # user down (-ty) ->    third_angle(+), fourth_angle(-)
                    # user far (tz bigger than before) -> second_angle(-)
                    # user close (tz smaller than before) -> second_angle(+)

                    # user go left(tx) -> first_angle
                    # user go right(tx) -> first_angle

                    first_angle = str(int(-tx))

                    first_angle = first_angle.encode('utf-8')

                    print("first_angle: ", first_angle)

                    third_angle = -ty
                    second_angle = default_zAngle + tz
                    if abs(second_angle) <= 4:
                        second_angle = 0

                    if second_angle >= 20:
                        second_angle = 20
                    elif second_angle <= -20:
                        second_angle = -20
                    '''
                    if abs(third_angle) <=4: 
                        third_angle = 0
					'''

                    # move third motor
                    third_angle = str(int(third_angle))
                    third_angle = third_angle.encode('utf-8')
                    print('third angle: ', third_angle)
                    third_arduino.write(third_angle)

                    fourth_angle = -ty
                    '''
                    if second_angle >= 20 and second_angle < 25:
                        fourth_angle = 10
                    elif second_angle <= -20 and second_angle > -25:
                        fourth_angle = -10
                    elif second_angle >= 25 and second_angle < 30:
                        fourth_angle = 13
                    elif second_angle <= -25 and second_angle > -30:    
                        fourth_angle = -13
                    elif second_angle >= 30 and second_angle < 35:
                        fourth_angle = 16
                    elif second_angle <= -30 and second_angle > -35:    
                        fourth_angle = -16
                    elif second_angle >= 35:
                        fourth_angle = 19
                    elif second_angle <= -35:    
                        fourth_angle = -19
                    '''

                    if second_angle >= 20 and second_angle < 25:
                        fourth_angle = 13
                    elif second_angle <= -20 and second_angle > -25:
                        fourth_angle = -10
                    elif second_angle >= 25 and second_angle < 30:
                        fourth_angle = 16
                    elif second_angle <= -25 and second_angle > -30:
                        fourth_angle = -13
                    elif second_angle >= 30 and second_angle < 35:
                        fourth_angle = 19
                    elif second_angle <= -30 and second_angle > -35:
                        fourth_angle = -16
                    elif second_angle >= 35:
                        fourth_angle = 20
                    elif second_angle <= -35:
                        fourth_angle = -19

                    pm_second_angle = str(int(fourth_angle)) + delimiter + str(
                        int(second_angle))
                    pm_second_angle = pm_second_angle.encode('utf-8')

                    print('fourth_second_angle', pm_second_angle)

                    first_arduino.write(first_angle)
                    pm2_arduino.write(pm_second_angle)

                    time.sleep(2)
                    img_num += 1

            else:
                pass

            # elapsed_time = time.time() - start_time
            # cv2.putText(frameOut, "FPS: {:.2f}".format(1 / elapsed_time), ((int)(frameOut.shape[0]/2) - 100,(int)(frameOut.shape[1]/2) - 100),cv2.FONT_HERSHEY_DUPLEX, fontScale=0.6, color=(0,0,255), thickness = 2)
            # Display the resulting frame
            # elapsed_time = time.time() - start_time
            # cv2.putText(frameOut, "FPS: {:.2f}".format(1 / elapsed_time), ((int)(frameOut.shape[0]/2) - 100,(int)(frameOut.shape[1]/2) - 100),cv2.FONT_HERSHEY_DUPLEX, fontScale=0.6, color=(0,0,255), thickness = 2)
            cv2.imshow('frame2', frameOut)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            if JAVARA_TERMINATE == True:
                break

            #count += 1

        fps.update()

    # When everything done, release the capture
    # stop the timer and display FPS information
    fps.stop()
    print()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    vs.stop()
    if isVideo: out.release()
    cv2.destroyAllWindows()
    def __init__(self, flip=True):
        self.vs = PiVideoStream(resolution=(400, 304), framerate=16).start()
        self.flip = flip
        time.sleep(2.0)

        self.avg = None
Ejemplo n.º 26
0
def camThread(LABELS, resultsEm, frameBuffer, camera_width, camera_height, vidfps, number_of_camera, mode_of_camera):
    global fps
    global detectfps
    global lastresults
    global framecount
    global detectframecount
    global time1
    global time2
    global cam
    global vs
    global window_name

    if mode_of_camera == 0:
        cam = cv2.VideoCapture(number_of_camera)
        if cam.isOpened() != True:
            print("USB Camera Open Error!!!")
            sys.exit(0)
        cam.set(cv2.CAP_PROP_FPS, vidfps)
        cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
        cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
        window_name = "USB Camera"
    else:
        vs = PiVideoStream((camera_width, camera_height), vidfps).start()
        sleep(3)
        window_name = "PiCamera"

    cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)

    while True:
        t1 = time.perf_counter()

        # USB Camera Stream or PiCamera Stream Read
        color_image = None
        if mode_of_camera == 0:
            s, color_image = cam.read()
            if not s:
                continue
        else:
            color_image = vs.read()

        if frameBuffer.full():
            frameBuffer.get()
        frames = color_image

        height = color_image.shape[0]
        width = color_image.shape[1]
        frameBuffer.put(color_image.copy())
        res = None

        if not resultsEm.empty():
            res = resultsEm.get(False)
            # print("[LOG] ".format(type(res)))
            # print(res)
            detectframecount += 1
            imdraw = overlay_on_image(frames, res)
            lastresults = res
        else:
            imdraw = overlay_on_image(frames, lastresults)

        cv2.imshow(window_name, cv2.resize(imdraw, (width, height)))

        if cv2.waitKey(1) & 0xFF == ord('q'):
            sys.exit(0)

        ## Print FPS
        framecount += 1
        if framecount >= 25:
            fps = "(Playback) {:.1f} FPS".format(time1 / 25)
            detectfps = "(Detection) {:.1f} FPS".format(detectframecount / time2)
            framecount = 0
            detectframecount = 0
            time1 = 0
            time2 = 0
        t2 = time.perf_counter()
        elapsedTime = t2 - t1
        time1 += 1 / elapsedTime
        time2 += elapsedTime
Ejemplo n.º 27
0
 def __init__(self, flip=True):
     self.vs = PiVideoStream().start()
     self.flip = flip
     time.sleep(2.0)
Ejemplo n.º 28
0
def photo_booth(url,
                path,
                models,
                width=1080,
                prep_time=10,
                view_time=10,
                rotate=0,
                raspi=False):

    if raspi:
        vs = PiVideoStream().start()
    else:
        vs = VideoStream(src=0).start()
    print('Warming up')
    time.sleep(2.0)
    print('Program started')
    cv2.namedWindow("Output", cv2.WND_PROP_FULLSCREEN)
    cv2.setWindowProperty("Output", cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)
    while True:
        preparation(vs, prep_time, rotate)
        img = vs.read()
        img = cv2.flip(img, 1)
        img = resize(img, width)

        # rotate
        if rotate > 0:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        elif rotate < 0:
            img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)

        # Choosing and loading the model
        model_name = np.random.choice(models)
        print('Using {}'.format(model_name))
        # model_path = os.path.join(path,model_name)

        # Show the captured frame
        cv2.imshow('Output', img)
        key = cv2.waitKey(1) & 0xFF
        cv2.imwrite('./images/content-images/snapshot.jpg', img)
        # Inference
        start = time.time()
        # url=" http://0.0.0.0:8080/"
        req = {'style': os.path.splitext(model_name)[0]}
        with open('./images/content-images/snapshot.jpg', 'rb') as fp:
            filename = 'snapshot.jpg'
            resp = requests.post(url,
                                 files=[('file', (filename, fp,
                                                  'application/octet'))],
                                 data=req)
        end = time.time()
        print('response received in {} seconds'.format(end - start))
        with open('./images/output-images/output.jpg', 'wb') as fp:
            fp.write(resp.content)

        # Postprocessing
        output = cv2.imread('./images/output-images/output.jpg')
        cv2.imshow('Output', output)
        key = cv2.waitKey(1) & 0xFF
        if key == ord("q"):
            break

        view_result(output, view_time)
Ejemplo n.º 29
0
    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    stream.close()
    rawCapture.close()
    camera.close()

    # created a *threaded *video stream, allow the camera sensor to warmup,
    # and start the FPS counter
    print("[INFO] sampling THREADED frames from `picamera` module...")
    vs = PiVideoStream().start()
    time.sleep(1.0)
    fps = FPS().start()
    l = 0
    # loop over some frames...this time using the threaded stream
    perf = []
    #	init()
    if RUNNING_ON_PI == True:

        while True:
            #cap = cv2.VideoCapture('2018_06_20_2.h264')
            #	frame_counter = 0
            #                init()
            #Dieser Block wird ausgefuehrt wenn das Programm von der Kamera Video beziehen soll

            #for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
def main(args):
    filename = args["input_file"]

    if filename is None:
        isVideo = False

        # created a *threaded *video stream, allow the camera sensor to warmup,
        # and start the FPS counter
        print("[INFO] sampling THREADED frames from `picamera` module...")
        vs = PiVideoStream().start()
        time.sleep(2.0)
        fps = FPS().start()

    else:
        isVideo = True
        cap = cv2.VideoCapture(filename)
        fps = cap.get(cv2.CAP_PROP_FPS)
        width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        name, ext = osp.splitext(filename)
        out = cv2.VideoWriter(args["output_file"], fourcc, fps,
                              (width, height))

    # Initialize head pose detection
    hpd = HPD(args["landmark_type"], args["landmark_predictor"])

    xy_arduino = serial.Serial('/dev/ttyUSB0', 9600)
    servo_arduino = serial.Serial('/dev/ttyACM1', 9600)
    z_arduino = serial.Serial('/dev/ttyACM0', 9600)

    # servo motor default angle
    servo_angle = default_servoAngle
    tempAngle = str(servo_angle)
    tempAngle = tempAngle.encode('utf-8')
    #servo_arduino.write(tempAngle)

    #count = 0

    cv2.namedWindow('frame2', cv2.WINDOW_NORMAL)
    cv2.resizeWindow('frame2', 320, 240)

    while (vs.stopped == False):
        # Capture frame-by-frame
        print('\rframe: %d' % fps._numFrames, end='')
        frame = vs.read()

        h, w, c = frame.shape
        new_h = (int)(h / RESIZE_RATIO)
        new_w = (int)(w / RESIZE_RATIO)
        frame_small = cv2.resize(frame, (new_w, new_h))
        frame_small2 = cv2.flip(frame_small, 1)  # 좌우반전: 카메라 거울상
        frame_small3 = cv2.flip(frame_small, 0)  # 상하반전

        if isVideo:

            if frame is None:
                break
            else:
                out.write(frame)

        else:

            if (fps._numFrames % SKIP_FRAMES == 0):
                frameOut, angles, tvec = hpd.processImage(frame_small3)
                if tvec == None:
                    print('\rframe2: %d' % fps._numFrames, end='')
                    print(" There is no face detected\n")

                    fps.update()
                    #count += 1
                    continue

                else:
                    tx, ty, tz = tvec[:, 0]
                    rx, ry, rz = angles

                    th = math.radians(servo_angle)
                    # xy angle
                    # tz: : x angle, tx: y angle
                    temp_z = int(tz) + default_xAngle  #temp_z: x angle

                    x_angle = math.sin(th) * temp_z + math.cos(th) * tx
                    y_angle = math.cos(th) * temp_z + math.sin(th) * tx

                    temp_y = int(ty)
                    z_angle = str(temp_y)

                    z_angle = z_angle.encode('utf-8')
                    ry = int(ry)

                    if (abs(ry) <= 15):
                        ry = 0

                    th1 = math.radians(ry)
                    y_angle += int(math.tan(th1) * tz)

                    xy_angle = str(int(x_angle)) + delimiter + str(
                        int(y_angle))
                    xy_angle = xy_angle.encode('utf-8')
                    print("\n\n\nstr_tx: ", xy_angle)
                    servo_angle = servo_angle + ry
                    tempAngle = str(servo_angle)
                    tempAngle = tempAngle.encode('utf-8')

                    print("\ntx: ", tx, "\nty: ", ty, "\ntz: ", tz)

                    print("\n\n\nry: ", ry)

                    # write XY angle
                    xy_arduino.write(xy_angle)

                    # write Z angle
                    z_arduino.write(z_angle)
                    servo_arduino.write(tempAngle)
                    time.sleep(3)

            else:
                pass

            # Display the resulting frame

            cv2.imshow('frame2', frameOut)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break


##        count += 1
        fps.update()

    # When everything done, release the capture
    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    vs.stop()
    if isVideo: out.release()
    cv2.destroyAllWindows()