Ejemplo n.º 1
0
class VideoCamera(object):
    def __init__(self):
        self.video = WebcamVideoStream(src=0).start()

    def __del__(self):
        self.video.stop()

    def get_frame(self):
        #vs = WebcamVideoStream(src=0).start()
        fps = FPS().start()

        while fps._numFrames < numFrames:
            frame = detector(self.video)
            frame = imutils.resize(frame, width=400)
            image = self.video.read()
            image = cv2.resize(frame,
                               None,
                               fx=ds_factor,
                               fy=ds_factor,
                               interpolation=cv2.INTER_AREA)
            # grab the frame from the stream and resize it to have a maximum
            # width of 400 pixels
            # check to see if the frame should be displayed to our screen
            fps.update()
        ret, jpeg = cv2.imencode('.jpg', image)
        return jpeg.tobytes()
Ejemplo n.º 2
0
def threaded_test():
    print("[TEST2] sampling THREADED frames from webcam...")
    vs0 = WebcamVideoStream(src=1).start()
    vs1 = WebcamVideoStream(src=2).start()
    fps = FPS().start()
    prev_frame = vs1.read()
    # loop over some frames...this time using the threaded stream
    while fps._numFrames < args["num_frames_thr"]:
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame0 = vs0.read()
        frame1 = vs1.read()
        diff = frame1 - prev_frame
        prev_frame = frame1
        time.sleep(0.04)
        # check to see if the frame should be displayed to our screen
        if args["display"] > 0:
            cv2.imshow("Frame1", frame1)
            cv2.imshow("Frame0", frame0)
            key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        if notBlack(diff):
            fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[TEST2] elasped time: {:.2f}".format(fps.elapsed()))
    print("[TEST2] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs0.stop()
    vs1.stop()
    return int(fps.fps()) >> 4 << 4
Ejemplo n.º 3
0
def predict():
    face_classifier = cv2.CascadeClassifier(
        "C:/Users/uvss/AppData/Local/Programs/Python/Python37-32/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml"
    )
    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read("trainer/training_data.yml")
    vs = WebcamVideoStream(
        "rtsp://*****:*****@192.168.1.90/axis-media/media.amp")
    vs = vs.start()
    if vs.grabbed:
        while True:
            image = vs.read()
            img = image.copy()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = face_classifier.detectMultiScale(gray, 1.2, 5)
            if faces is ():
                pass
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (102, 255, 102), 2)
                roi_gray = gray[y:y + h, x:x + w]
                label, conf = face_recognizer.predict(roi_gray)
                print(label, int(conf))
                if conf > 85:
                    label = "Unknown"
                cv2.putText(img, str(label), (x + 5, y + h - 5),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
            cv2.imshow("frames", img)
            if cv2.waitKey(1) == ord("q"):
                break
        vs.stop()
        cv2.destroyAllWindows()
Ejemplo n.º 4
0
def training_Data(path):
    while True:
        try:
            details = {
                "name": input("Provide a name for the Employee:- "),
                "depart": input("Department:- "),
                "ID": int(input("Office Id (number) :- "))
            }
            if bool(re.search(r'\d', details["name"])) or bool(
                    re.search(r'\d', details["depart"])):
                raise ValueError(
                    "[Error] Invalid Input for Name or Department")
            break
        except Exception as err:
            print("[Error] Invalid Input for Office Id")
            continue

    sampled_faces = []
    sampled_labels = []
    img_name = None
    usr_id, label = 1, 1
    img_id = 0
    users = os.listdir(path)
    users.sort(
        key=lambda x: int(x[1:]))  # sorting all the user data directories
    if users == []:
        usr_dir = path + "/s" + str(usr_id) + "/"
        os.mkdir(usr_dir)
    else:
        label = int(users[-1][1:]) + 1
        usr_id = str(label)  # get [1:] part of last element in the list
        usr_dir = path + "/s" + usr_id + "/"
        os.mkdir(usr_dir)

    vs = WebcamVideoStream(
        src="rtsp://*****:*****@192.168.1.90/axis-media/media.amp")
    if vs.grabbed:
        vs = vs.start()
        while (True):
            frame = vs.read()
            cv2.imshow("frame", frame)
            faces = face_Detector(frame)
            if cv2.waitKey(1) == ord("q"):
                # sys.exit()
                break
            if faces is None or faces == []:
                continue
            for face in faces:
                img_id += 1
                cv2.imshow("face", face)
                img_name = usr_dir + str(img_id) + ".jpg", face
                cv2.imwrite(img_name, face)
                sampled_labels.append(label)
                sampled_faces.append(face)

            if img_id >= 50:
                break
    vs.stop()
    cv2.destroyAllWindows()
    return (sampled_faces, sampled_labels, details)
Ejemplo n.º 5
0
def detection():
    print('here')
    # cap = cv2.VideoCapture(0)                                                 # To capture stream using OpenCV
    vs = WebcamVideoStream().start()                                            # To capture strem using imutils.
    start = tm.time()
    end = tm.time()
    pin = []
    while (end-start<5):                                                        # We provide a time period of 5 seconds for each pin.
        image = vs.read()                                                       # Reading current frame from imutils.
        # check , image = cap.read()                                            # For reading image using OpenCV

        img = cv2.cvtColor(image , cv2.COLOR_BGR2RGB)
        resized = cv2.resize(img,(224,224),cv2.INTER_AREA)                      # Resizing frame for expected input shape
        scaled = resized/255.0
        expand_img = np.expand_dims(scaled , axis=0)
        img_np = np.array(expand_img,dtype='float32')

        # Prediction using tflite model.
        tflite_inter.set_tensor(input_details[0]['index'] , img_np)
        tflite_inter.invoke()
        tflite_pred = tflite_inter.get_tensor(output_details[0]['index'])
        pred=np.argmax(tflite_pred)

        # For prediction using direct .h5 model
        # pred = np.argmax(classifier.predict(expand_img))

        pin.append(pred)

        end = tm.time()

    vs.stop()                                                                   # The stream needs to be released after every function call. Or it will keep the camera Open.
    vs.stream.release()                                                         # To release capture stream from memory.
    # cv2.destroyAllWindows()
    # cap.release()
    return (mode(pin))
Ejemplo n.º 6
0
    def handle_view_objects_intent(self, message):
        self.speak('Showing you what objects I see now')
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)

        input_q = Queue(maxsize=5)
        output_q = Queue(maxsize=5)

        process = Process(target=worker, args=((input_q, output_q)))
        process.daemon = True
        pool = Pool(2, worker, (input_q, output_q))

        video_capture = WebcamVideoStream(src=0).start()
        fps = FPS().start()

        while True:  # fps._numFrames < 120
            frame = video_capture.read()
            input_q.put(frame)

            t = time.time()

            cv2.imshow('Video', output_q.get())
            fps.update()

            print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        fps.stop()
        print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
        print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))

        video_capture.stop()
        cv2.destroyAllWindows()
Ejemplo n.º 7
0
def pcv(**kwargs):
    geckopy.init_node(**kwargs)
    rate = geckopy.Rate(10)

    p = geckopy.Publisher()

    camera = WebcamVideoStream(src=0).start()

    while not geckopy.is_shutdown():
        img = camera.read()

        if img is not None:
            # geckopy.log(img.shape)
            # img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            img = cv2.resize(img, (640,480))
            msg = find_ball(img)
            # msg = Image(img.shape, img.tobytes(), img.dtype)
            if msg:
                # p.pub('images_color', msg)  # topic msg
                p.pub('target', msg)
                geckopy.log(msg)
            # p.pub(topic, {'a': 5})
            # geckopy.log(img.shape)
        else:
            geckopy.log("*** couldn't read image ***")

        # sleep
        rate.sleep()

    camera.stop()
    print('cv bye ...')
Ejemplo n.º 8
0
def mix_record():
    #cams_test.raw_test()
    #cams_test.threaded_test()
    print("[INFO] saving image...")
    # created a *threaded *video stream, allow the camera senor to warmup,
    # and start the FPS counter
    vs0 = cv2.VideoCapture(1)
    vs1 = WebcamVideoStream(src=2).start()
    # loop over some frames...this time using the threaded stream
    count = 0
    while True:
        count += 1
        ret, frame0 = vs0.read()
        frame1 = vs1.read()
        if args['output'] < 0:
            cv2.imwrite("./mixRecord/" + str(count) + "_r.png", frame0)
            cv2.imwrite("./mixRecord/" + str(count) + "_t.png", frame1)
        if args['display'] > 0:
            cv2.imshow("Frame0", frame0)
            cv2.imshow("Frame1", frame1)
            key = cv2.waitKey(1) & 0xFF
            if key == 27:
                break
    vs0.release()
    vs1.stop()
    cv2.destroyAllWindows()
class VideoCamera(object):
    def __init__(self):
        self.stream = WebcamVideoStream(src=0).start()
    
    def __del__(self):
        self.stream.stop()
    
    def get_frame(self):
        image = self.stream.read()

        detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        # Detect the faces
        faces = detector.detectMultiScale(gray, 1.1, 4)

        # Draw the rectangle around each face
        for (x, y, w, h) in faces:
            cv2.rectangle(image, (x, y), (x+w, y+h), (255, 0, 0), 2)

        
            
    # Release the VideoCapture object
    

        ret,jpeg = cv2.imencode('.jpg',image)
        data =[]
        data.append(jpeg.tobytes())
        return data
Ejemplo n.º 10
0
def threaded_check():
    #cams_test.raw_test()
    #cams_test.threaded_test()
    print("[INFO] saving image...")
    # created a *threaded *video stream, allow the camera senor to warmup,
    # and start the FPS counter
    vs1 = WebcamVideoStream(src=1).start()
    # loop over some frames...this time using the threaded stream
    count = 0
    prev_frame = vs1.read()
    fps = FPS().start()
    while count < 1000:
        count += 1
        frame1 = vs1.read()
        #         gray = cv2.cvtColor((frame1-prev_frame), cv2.COLOR_BGR2GRAY)
        #         ret, diff = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)
        diff = frame1 - prev_frame
        #         if args['output'] < 0:
        #             cv2.imwrite("./fRecord/"+str(count)+".png",frame1)
        #             cv2.imwrite("./tRecord/"+str(count)+"_d.png",frame1-prev_frame)
        prev_frame = frame1
        if args['display'] > 0:
            cv2.imshow("Frame1", frame1)
            #            cv2.imshow("Diff", diff)
            key = cv2.waitKey(1) & 0xFF
            if key == 27:
                break
        if notBlack(diff):
            fps.update()
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
    vs1.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 11
0
class VideoCamera(object):
    def __init__(self):
        self.stream = WebcamVideoStream(src=0).start()

    def __del__(self):
        self.stream.stop()

    def get_frame(self, found, sound):

        img2 = self.stream.read()
        img2 = preprocess(img2)

        data = []

        while (True):
            #comienza la comparación para detectar el billete
            for bill in data_bill:

                img1 = bill[0]
                kp = bill[1]
                desc = bill[2]
                showText = bill[3]
                sound = bill[4]

                found = match_draw(img1, img2, found, showText, sound, kp,
                                   desc)

            if found:
                ret, jpeg = cv2.imencode('.jpg', img2)
                data.append(jpeg.tobytes())
                return data
Ejemplo n.º 12
0
def threadind():
    key = ''

    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()

    # loop over some frames...this time using the threaded stream
    while key != ord('q'):
        # grab the frame from the threaded video stream and resize it
        # to have a maximum width of 400 pixels
        frame = vs.read()
        # frame = imutils.resize(frame, width=400)

        # check to see if the frame should be displayed to our screen

        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # update the FPS counter
        fps.update()

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 13
0
def main():
    # Video capture constructor multithreaded
    stream = WebcamVideoStream(VIDEO_SOURCE).start()

    print("Tracking...")
    recording = False
    tracking = True
    frame_id = 0
    while (tracking):
        # Capture frame
        frame = stream.read()
        frame = cv2.flip(frame, 1)

        if recording:
            if frame_id % 30 == 0:
                # Save image
                filename = os.path.join(DATA_DIR, str(frame_id) + '.jpg')
                cv2.imwrite(filename, frame)

            frame_id += 1

        cv2.imshow('frame', frame)

        # Waits for pressed key to stop execution
        key = cv2.waitKey(1) & 0xFF
        if key == ord('q'):
            tracking = False
            cv2.destroyAllWindows()
            stream.stop()
        elif key == ord('r'):
            print("Recording...")
            recording = True
Ejemplo n.º 14
0
class VideoCamera(object):
    def __init__(self):
        self.stream = WebcamVideoStream(src=0).start()

        self.folderName = '38'
        self.n_folderName = self.folderName
        self.path = r"/home/asus/Desktop/mask_recog/without_mask2/{}".format(
            self.n_folderName)
        os.mkdir(self.path)

    def __del__(self):
        self.stream.stop()

    def get_frame(self):
        self.frame = self.stream.read()

        self.window_name = 'Image'
        self.font = cv2.FONT_HERSHEY_SIMPLEX
        self.org = (0, 50)
        self.fontScale = 1
        self.color = (0, 0, 255)
        self.thickness = 2

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(
            "shape_predictor_68_face_landmarks.dat")

        self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
        self.faces = self.detector(self.gray)
        for self.face in self.faces:
            self.x1 = self.face.left()
            self.y1 = self.face.top()
            self.x2 = self.face.right()
            self.y2 = self.face.bottom()
            self.p = cv2.rectangle(self.frame, (self.x1, self.y1),
                                   (self.x2, self.y2), (255, 0, 0), 3)
            self.landmarks = self.predictor(self.gray, self.face)

            for self.n in range(31, 36):
                self.x = self.landmarks.part(self.n).x
                self.y = self.landmarks.part(self.n).y
                self.t = cv2.circle(self.frame, (self.x, self.y), 2,
                                    (255, 255, 0), -1)

            if self.t.any() == True:
                time.sleep(2)
                self.photo = time.strftime(r"%Y-%m-%d_%H-%M-%S")
                self.file = r'/home/asus/Desktop/mask_recog/without_mask2/{}/{}.jpg'.format(
                    self.n_folderName, self.photo)
                cv2.imwrite(self.file, self.frame)
                time.sleep(5)
                continue

        _, self.jpeg = cv2.imencode('.jpg', self.frame)
        self.data = []
        self.data.append(self.jpeg.tobytes())

        return self.data
Ejemplo n.º 15
0
def main():
    global FONT, fx, fy, fh, accent_color
    model = load_model('models/final.h5')
    x0, y0, size = 200, 180, 300

    cam = WebcamVideoStream(src=0).start()
    cv2.namedWindow('Capture', cv2.WINDOW_NORMAL)

    while True:
        frame = cam.read()
        frame = cv2.flip(frame, 1)
        window = copy.deepcopy(frame)
        cv2.rectangle(window, (x0, y0), (x0 + size, y0 + size), accent_color, 12)

        # get region of interest
        roi = frame[y0:y0 + size, x0:x0 + size]
        roi = binary_mask(roi)

        # apply processed roi in frame
        window[y0:y0 + size, x0:x0 + size] = cv2.cvtColor(roi, cv2.COLOR_GRAY2BGR)

        img = cv2.resize(roi, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)

        img = np.float32(img) / 255.
        img = np.expand_dims(img, axis=0)
        img = np.expand_dims(img, axis=-1)
        predictions = model.predict(img)
        if predictions.any() > 0.9:
            pred = classes[predictions.argmax()]
            actions[pred](*args[pred])
            cv2.putText(window, 'Prediction: %s' % pred, (fx, fy + fh), FONT, 1.0, (245, 210, 65), 2, 1)
        else:
            cv2.putText(window, 'No gesture found', (fx, fy + fh), FONT, 1.0, (245, 210, 65), 2, 1)

        # show the window
        cv2.imshow('Capture', window)

        # Keyboard inputs
        key = cv2.waitKey(120) & 0xff

        # use q key to close the program
        if key == ord('q'):
            break

        # adjust the position of window
        elif key == ord('i'):
            y0 = max((y0 - 5, 0))
        elif key == ord('k'):
            y0 = min((y0 + 5, window.shape[0] - size))
        elif key == ord('j'):
            x0 = max((x0 - 5, 0))
        elif key == ord('l'):
            x0 = min((x0 + 5, window.shape[1] - size))

    cv2.destroyAllWindows()
    cam.stop()
Ejemplo n.º 16
0
def main():
    # paths to models
    face_xml = "utils/face-detection-adas-0001.xml"
    face_bin = "utils/face-detection-adas-0001.bin"
    face_detection_net, n_f, c_f, w_f, h_f, input_blob_f, out_blob_f, plugin_f = init_model(face_xml, face_bin)

    fvs = WebcamVideoStream(src=0).start()
    time.sleep(0.5)

    # Initialize some variables
    frame_count = 0
    cur_request_id_f = 0
    next_request_id_f = 1

    while True:
        # Grab a single frame of video
        frame = fvs.read()
        initial_h, initial_w = frame.shape[:2]
        if frame is None:
            break

        # Find all the faces and face encodings in the current frame of video
        face_locations = []
        in_frame = cv2.resize(frame, (w_f, h_f))
        in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
        in_frame = in_frame.reshape((n_f, c_f, h_f, w_f))
        face_detection_net.start_async(request_id=cur_request_id_f, inputs={input_blob_f: in_frame})
        if face_detection_net.requests[cur_request_id_f].wait(-1) == 0:
            face_detection_res = face_detection_net.requests[cur_request_id_f].outputs[out_blob_f]
            for face_loc in face_detection_res[0][0]:
                if face_loc[2] > 0.5:
                    xmin = abs(int(face_loc[3] * initial_w))
                    ymin = abs(int(face_loc[4] * initial_h))
                    xmax = abs(int(face_loc[5] * initial_w))
                    ymax = abs(int(face_loc[6] * initial_h))
                    face_locations.append((xmin, ymin, xmax, ymax))

        for (left, top, right, bottom) in face_locations:
            crop_img = frame[top:bottom, left:right]
            label = mask_detection(crop_img)
            frame = cv2.rectangle(frame, (left, top), (right, bottom), (65, 65, 65), 2)
            cv2.putText(frame, label, (left, top + 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (36, 255, 12), 2)

        cv2.imshow('Video', frame)
        frame_count += 1

        cur_request_id_f, next_request_id_f = next_request_id_f, cur_request_id_f

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    # Release handle to the webcam
    fvs.stop()
    cv2.destroyAllWindows()
    del exec_net
    del plugin
Ejemplo n.º 17
0
def run():
    print("run hsvpicker")
    cap = WebcamVideoStream(src=0).start()
    time.sleep(1)  # startup

    sample = cap.read()
    height = sample.shape[0]  # get height
    width = sample.shape[1]  # get width
    print("w: " + str(width) + " " + "h: " + str(height))

    createtrackbars("1")

    while True:
        img = cap.read()
        img = cv2.GaussianBlur(img, (9, 9), 0)

        lowh = cv2.getTrackbarPos('Low H', '1')
        lows = cv2.getTrackbarPos('Low S', '1')
        lowv = cv2.getTrackbarPos('Low V', '1')

        highh = cv2.getTrackbarPos('High H', '1')
        highs = cv2.getTrackbarPos('High S', '1')
        highv = cv2.getTrackbarPos('High V', '1')

        # Hsv Mask
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        lower = np.array([lowh, lows, lowv])
        higher = np.array([highh, highs, highv])
        mask = cv2.inRange(hsv, lower, higher)

        mask += calculate_mask("1", mask, img)

        output = cv2.bitwise_and(img, img, mask=mask)

        ret, thresh = cv2.threshold(mask, 127, 255, 0)
        im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)

        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area > 100:
                cv2.drawContours(output, [cnt], -1, (255, 255, 255), 5)

        cv2.imshow('hsv-picker', output)

        if cv2.waitKey(1) & 0xFF == ord('s'):
            print("([" + str(lowh) + ", " + str(lows) + ", " + str(lowv) +
                  "], [" + str(highh) + ", " + str(highs) + ", " + str(highv) +
                  "])")

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    cap.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 18
0
class Scanner():
    def __init__(self, src=1):
        self.camera = WebcamVideoStream(src=src).start()
        self.code = ''

    def __decode(self, im):
        # Find barcodes and QR codes
        decodedObjects = pyzbar.decode(im)
        # Print results
        for obj in decodedObjects:
            self.code = obj.data
        return decodedObjects

    # Display barcode and QR code location
    def display(self, im, decodedObjects):
        # Loop over all decoded objects
        for decodedObject in decodedObjects:
            points = decodedObject.polygon
            if len(points) == 4:
                cv2.rectangle(im, (points[0].x, points[0].y),
                              (points[2].x, points[2].y), (0, 255, 0), 3)
        return im

    def decoded(self):
        if not self.code:
            return False
        else:
            return self.code

    def cv2_call(self):
        while (True):
            frame = self.camera.read()
            # resize the frame and convert it to grayscale (while still
            # retaining 3 channels)
            frame = imutils.resize(frame, width=400)
            #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            #frame = np.dstack([frame, frame, frame])
            decodedObjects = self.__decode(frame)
            self.display(frame, decodedObjects)
            cv2.imshow("Results", frame)
            cv2.waitKey(1)

        cv2.destroyAllWindows()
        camera.stop()

    def callback(self):
        frame = self.camera.read()
        frame = imutils.resize(frame, width=400)
        decodedObjects = self.__decode(frame)
        image = self.display(frame, decodedObjects)
        return image

    def exit(self):
        self.camera.stop()
Ejemplo n.º 19
0
def video_loop(delay):
    """
    Show a delayed video loop with the specified number of seconds of delay buffer.
    """
    vs = WebcamVideoStream().start()

    cv2.namedWindow('loop', cv2.WINDOW_NORMAL)
    cv2.setWindowProperty('loop', cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    fps = int(vs.stream.get(cv2.CAP_PROP_FPS) *
              2)  # Off by a factor of 2, inexplicably.
    width = int(vs.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(vs.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))

    buf_size = int(delay * fps)
    if buf_size == 0:
        buf_size = 1
    buf = {}
    i = 0
    while True:
        # Capture.
        # Despeckle before main processing.
        buf[i] = vs.read()
        buf[i] = cv2.medianBlur(buf[i], 3)

        # Collect the oldest few frames for frame processing.
        # Start at slow speed until buffer is full and we catch up to it.
        oldest_i = min(buf.keys())
        if i < (2 * buf_size):
            display_i = int(i**2 / (4 * buf_size))
        else:
            display_i = oldest_i
        input_frames = []
        for offset in range(3):
            j = display_i + offset
            if j in buf:
                input_frames.append(buf[j])

        # Display.
        frame = process_frame(input_frames, i)
        cv2.imshow('loop', frame)

        key = cv2.waitKey(1)
        if key != -1:
            # break
            pass

        # Rotate buffer.
        buf.pop(i - buf_size, None)
        i += 1

    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 20
0
def main(args):
    def resource_path(relative_path):
        """Convert relative resource path to the real path"""
        if hasattr(sys, "_MEIPASS"):
            return os.path.join(sys._MEIPASS, relative_path)
        return os.path.join(relative_path)

    '''caffe DNN model for face detection'''
    net = cv2.dnn.readNetFromCaffe(resource_path(args["prototxt"]),
                                   resource_path(args["face_model"]))
    '''caffe CascadeClassifier model for face detection'''
    # net = cv2.CascadeClassifier('models/face/haarcascade_frontalface_alt.xml')
    ''''tflearn emotion model'''
    # network = EMR()
    # network.build_network()
    '''keras emotion model'''
    # network = resnext.ResNextImageNet(include_top=True, input_shape=(64, 64, 1), classes=8)
    # network.load_s("models/model.h5")
    network = load_model(resource_path("models/resnet50_final.h5"))
    '''tensorflow emotion model'''
    # network = predictor_factories.from_saved_model(args["emotion_model_dir"])

    print("[INFO] sampling frames from webcam...")
    vs = WebcamVideoStream(args["webcam"]).start()
    fps = FPS().start()
    heart_rate = None
    while True:
        time0 = time.time()
        frame = vs.read()
        frame = imutils.resize(frame, width=720)
        try:
            face_rect = face_detection.detector(net, frame)
        except Exception as e:
            print("face_detection failed to {}".format(str(e)))

        if face_rect is not None:
            result = emotion_detection.detector1(frame, face_rect, network)
            cv2.rectangle(frame, (face_rect[0], face_rect[1]),
                          (face_rect[2], face_rect[3]), (0, 255, 0), 2)
            cv2.putText(frame, "{}".format(EMOTION[np.argmax(result)]),
                        (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
            # cv2.imwrite("sdfjdf",frame)
            if args["display"] > 0:
                cv2.imshow("Frame", frame)
                key = cv2.waitKey(5) & 0xFF
                if key == ord("q"):
                    break
        fps.update()
    fps.stop()
    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
    vs.stop()
Ejemplo n.º 21
0
    def run_detection(self):

        print("Start object detection ...")

        self.define_classes()
        vs = WebcamVideoStream(0).start()
        fps = FPS().start()

        while True:

            frame = vs.read()
            (h, w) = frame.shape[:-1]
            blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
                                         scalefactor=0.007843, size=(300, 300), mean=127.5)
            self.net.setInput(blob)
            detections = self.net.forward()

            # Loop over detected objects
            for i in np.arange(0, detections.shape[2]):

                # Get proba for each object
                probability = detections[0, 0, i, 2]

                # Set up threshold for filtering detection
                if probability > 0.5:
                    # Get prediction index
                    index = int(detections[0, 0, i, 1])

                    # Get bounding box coordinates
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")

                    label = "{}: {:.2f}%".format(self.classes[index], probability * 100)

                    y = startY - 10 if startY - 10 > 10 else startY + 10

                    cv2.rectangle(frame, (startX, startY), (endX, endY),
                                  self.colors[index], 2)
                    cv2.putText(frame, label, (startX, y),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, self.colors[index], 2)

            cv2.imshow('frame', frame)
            fps.update()

            key = cv2.waitKey(1) & 0xFF
            if key == ord("q"):
                cv2.destroyAllWindows()
                vs.stop()
                fps.stop()
                break

        print("Fps: {:.2f}".format(fps.fps()))
        fps.update()
Ejemplo n.º 22
0
def cam(cap_id,e):
    cap = WebcamVideoStream(cap_id).start()
    while True:
        frame = cap.read()
        if e.is_set():
            cv2.circle(frame,(50,50),10,(255,0,255),2)
        cv2.imshow(str(cap_id), frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    cap.stop()
    cv2.destroyWindow(str(cap_id))
    return
def apriltag_test():
    camera = WVS(src=0).start()
    while 1:
        frame = camera.read()
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        detections, dimg = det.detect(img, return_image=True)
        cv2.imshow('Original', frame)
        cv2.imshow('Detection', dimg)
        if cv2.waitKey(1) & 0xff == ord('q'):
            break
    camera.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 24
0
    def open_camera(self):
        UITool.msg_window('Info', 'Start face distant protect mode.')
        flag = True
        vs = WebcamVideoStream(src=0)
        vs.start()
        time.sleep(2.0)
        cv2.startWindowThread()
        while flag:
            check = True
            frame = vs.read()
            frame = imutils.resize(frame, width=800)
            display_frame = frame.copy()
            capt_faces = FaceCapture.cap(frame)
            if capt_faces.has_face:
                display_frame = ImgTool.add_face_block(display_frame,
                                                       capt_faces)
                face_data = FacesProc.get_all_faces_with_encode_low_filter(
                    frame, self.mode)
                if face_data:
                    check = self.check_face_encode(face_data)
                    print(f"status: {check}")
                    if not check:
                        print(
                            f" WARN - find stranger and no master in the same time !!!"
                        )
                        self.alert = True
                else:
                    print('fetched face data is not enough')

            if self.display:
                display_frame = cv2.flip(display_frame, 1)
                if check:
                    text = 'Safe'
                    print(text)
                    display_frame = ImgTool.add_text(display_frame, text)
                else:
                    text = 'Detect Stranger!'
                    display_frame = ImgTool.add_text(display_frame,
                                                     text,
                                                     color='red')

                cv2.imshow('Frame', display_frame)

            key = cv2.waitKey(1)
            if self.alert or key == 27 or key == ord('q'):
                flag = False
                cv2.waitKey(500)
                cv2.destroyAllWindows()
                cv2.waitKey(500)

        vs.stop()
        cv2.destroyAllWindows()
Ejemplo n.º 25
0
    def launch(self):
        flag = True
        vs = WebcamVideoStream(src=0)
        vs.start()
        time.sleep(2.0)
        fetch_face = 0
        last_face_ts = 0
        cv2.startWindowThread()
        while flag:
            frame = vs.read()
            if frame is None:
                continue
            frame = imutils.resize(frame, width=800)
            display_frame = frame.copy()
            capt_faces = FaceCapture.cap(frame)
            if capt_faces.face_count == 1:
                display_frame = ImgTool.add_face_block(display_frame,
                                                       capt_faces)
                ts = FileTool.get_curr_ts()
                if ts - last_face_ts > RecordMD.INTERVAL:
                    if self.__record_face(frame, capt_faces, ts):
                        fetch_face += 1
                        print(
                            f'fetch face successfully. ({fetch_face}/{self.fetch_count})'
                        )
                        last_face_ts = ts
                    else:
                        print(f'failed to record faces')

            if self.display:
                display_frame = cv2.flip(display_frame, 1)
                text = f"{fetch_face} / {self.fetch_count}"
                display_frame = ImgTool.add_text(display_frame, text)
                cv2.imshow('Frame', display_frame)

            key = cv2.waitKey(1)

            if fetch_face == self.fetch_count or key == 27 or key == ord('q'):
                flag = False
                cv2.waitKey(500)
                cv2.destroyAllWindows()
                cv2.waitKey(500)

        vs.stop()
        cv2.destroyAllWindows()

        # self.__img_meta.to_csv(RecordMD.meta_path, index=False, encoding='utf-8')
        msg = 'Finish recoding master data.'
        UITool.msg_window(msg=msg)

        print('finish saving img meta.')
def main(mode):
	if mode == 'human':
		#cascade = cv2.CascadeClassifier('./cascade_xml/haarcascade_frontalface_default.xml')
		cascade = cv2.CascadeClassifier('./cascade_xml/lbpcascade_frontalface_improved.xml')
	else:
		cascade = None
		
		
	cam = WebcamVideoStream(src=-1).start()
	prevTime = 0

	while 1:
		#ret, img = cam.read()
		img = cam.read()
		#img = imutils.resize(img,height=400)
		gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
		
		#fps
		#현재 시간 가져오기 (초단위로 가져옴)
		curTime = time.time()

		#현재 시간에서 이전 시간을 빼면?
		#한번 돌아온 시간!!
		sec = curTime - prevTime
		#이전 시간을 현재시간으로 다시 저장시킴
		prevTime = curTime

		# 프레임 계산 한바퀴 돌아온 시간을 1초로 나누면 된다.
		# 1 / time per frame
		fps = 1/(sec)

		# 프레임 수를 문자열에 저장
		str = "FPS : %0.1f" % fps

		# 표시
		cv2.putText(img, str, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0))

		#Cam START
		#if ret:
		detectAndDisply(img,cascade,mode)
			
		#ESC Click -> EXIT
		if cv2.waitKey(1) & 0xFF == 27:
			m.clean()
			break

				
	#cam.release()
	cv2.destroyAllWindows()
	cam.stop()
Ejemplo n.º 27
0
class VideoCam(object):
    def __init__(self):
        self.streaming = WebcamVideoStream(src=0).start()

    def __del__(self):
        self.streaming.stop()

    def get_frame(self):
        image = self.streaming.read()
        ret, jpeg = cv2.imencode('.jpg', image)
        data = []
        data.append(jpeg.tobytes())

        return data
Ejemplo n.º 28
0
def calibrate_blur(frames, path, src=0):
    blur_data = BlurData()
    camera_stream = WebcamVideoStream(src=src).start()
    fps = FPS().start()

    while fps._numFrames < frames:
        frame = camera_stream.read()
        blur_data.image = frame
        blur_data.define_blur_strength()
        BlurData.serialize_blur_data(blur_data, path)

    fps.stop()
    camera_stream.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 29
0
def calibrate_hsv(frames, path, src=0):
    hsv_data = HSVData()
    camera_stream = WebcamVideoStream(src=src).start()
    fps = FPS().start()

    while fps._numFrames < frames:
        frame = camera_stream.read()
        hsv_data.image = frame
        hsv_data.define_hsv_range()
        HSVData.serialize_hsv_data(hsv_data.lower_bound, hsv_data.upper_bound, path)

    fps.stop()
    camera_stream.stop()
    cv2.destroyAllWindows()
def cam(cap_id, e):
    args = utilities.parseConnectionArguments()
    # Create connection to the device and get the router
    with utilities.DeviceConnection.createTcpConnection(args) as router:
        cap = WebcamVideoStream(cap_id).start()
        while True:
            frame = cap.read()
            if e.is_set():
                break
            cv2.imshow("robot camera", frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        cap.stop()
        #cv2.destroyWindow(str(cap_id))
        return
def main():
	points = []
	ticks =str(int(time.time() * 1000))
	# construct the argument parse and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-v", "--video",
		help="path to the (optional) video file")
	ap.add_argument("-b", "--buffer", type=int, default=64,
		help="max buffer size")
	args = vars(ap.parse_args())

	# define the lower and upper boundaries of the "green"
	# ball in the HSV color space, then initialize the
	# list of tracked points

	# BLUE
	ballLower, ballUpper = getBallColor()

	pts = deque(maxlen=args["buffer"])

	# created a *threaded *video stream, allow the camera senor to warmup,
	# and start the FPS counter
	debugPrint("[INFO] sampling THREADED frames from webcam...")
	vs = WebcamVideoStream(src=1).start()
	frame = vs.read()
	if frame is None:
		debugPrint("Changed to default camera!")
		# Take the default camera (webcam is not connected)
		vs = WebcamVideoStream(src=0).start()

	if __ENABLE_VIDEO_OUT__:
		outStream = cv2.VideoWriter('Output/output' + ticks +'.avi', -1, 20.0, (1000,705))

	eventHook = EventHook()

	# keep looping
	while True:
		# grab the current frame
		frame = vs.read()

		if frame is None:
			continue
		
		# resize the frame, blur it, and convert it to the HSV
		# color space
		frame = imutils.resize(frame, width=1000)
		frame = frame[40:745,0:1000]

		# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
		hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

		# construct a mask for the color "green", then perform
		# a series of dilations and erosions to remove any small
		# blobs left in the mask
		mask = cv2.inRange(hsv, ballLower, ballUpper)
		mask = cv2.erode(mask, None, iterations=2)
		mask = cv2.dilate(mask, None, iterations=2)

		# find contours in the mask and initialize the current
		# (x, y) center of the ball
		cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
			cv2.CHAIN_APPROX_SIMPLE)[-2]
		center = None

		# only proceed if at least one contour was found
		if len(cnts) > 0:
			# find the largest contour in the mask, then use
			# it to compute the minimum enclosing circle and
			# centroid
			c = max(cnts, key=cv2.contourArea)
			((x, y), radius) = cv2.minEnclosingCircle(c)
			M = cv2.moments(c)
			center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))

			# only proceed if the radius meets a minimum size
			if radius > 5: #Changed from 10
				# draw the circle and centroid on the frame,
				# then update the list of tracked points
				cv2.circle(frame, (int(x), int(y)), int(radius),
					(0, 255, 255), 2)
				cv2.circle(frame, center, 5, (0, 0, 255), -1)

		# update the points queue
		pts.appendleft(center)

		debugPrint(center)
		if center is None:
			points.append((-1,-1))
		else:
			points.append(center)

		# loop over the set of tracked points
		for i in xrange(1, len(pts)):
			# if either of the tracked points are None, ignore
			# them
			if pts[i - 1] is None or pts[i] is None:
				continue

			# otherwise, compute the thickness of the line and
			# draw the connecting lines
			thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
			cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

		# show the frame to our screen
		cv2.imshow("Frame", frame)
		
		if __ENABLE_VIDEO_OUT__:
			outStream.write(frame)

		# add replay frame
		addReplayFrame(frame)
		
		key = cv2.waitKey(1) & 0xFF

		# if the 'q' key is pressed, stop the loop
		if key == ord("q"):
			break

		if center is None:
			if eventHook.fire([(-1,-1)]):
				saveGoalVideo()
		else:
			normalizePoint = (int(2000*float(center[0]/1000.0)),int(1000*float(center[1]/705.0)))
			if eventHook.fire([normalizePoint]):
				saveGoalVideo()

			debugPrint(normalizePoint)

	# cleanup the camera and close any open windows
	cv2.destroyAllWindows()
	vs.stop()
	savePtsVector(points,ticks)
	raise SystemExit
	if __ENABLE_VIDEO_OUT__:
		outStream.release()
Ejemplo n.º 32
0
class EyeCanSee(object):
    def __init__(self, center=int(cvsettings.CAMERA_WIDTH / 2), debug=False, is_usb_webcam=True, period_s=0.025):
        # Our video stream
        # If its not a usb webcam then get pi camera
        if not is_usb_webcam:
            self.vs = PiVideoStream(resolution=(cvsettings.CAMERA_WIDTH, cvsettings.CAMERA_HEIGHT))
            # Camera cvsettings
            self.vs.camera.shutter_speed = cvsettings.SHUTTER
            self.vs.camera.exposure_mode = cvsettings.EXPOSURE_MODE
            self.vs.camera.exposure_compensation = cvsettings.EXPOSURE_COMPENSATION
            self.vs.camera.awb_gains = cvsettings.AWB_GAINS
            self.vs.camera.awb_mode = cvsettings.AWB_MODE
            self.vs.camera.saturation = cvsettings.SATURATION
            self.vs.camera.rotation = cvsettings.ROTATION
            self.vs.camera.video_stabilization = cvsettings.VIDEO_STABALIZATION
            self.vs.camera.iso = cvsettings.ISO
            self.vs.camera.brightness = cvsettings.BRIGHTNESS
            self.vs.camera.contrast = cvsettings.CONTRAST

        # Else get the usb camera
        else:
            self.vs = WebcamVideoStream(src=0)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, cvsettings.CAMERA_WIDTH)
            self.vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, cvsettings.CAMERA_HEIGHT)

        # Has camera started
        self.camera_started = False
        self.start_camera()  # Starts our camera

        # To calculate our error in positioning
        self.center = center

        # To determine if we actually detected lane or not
        self.detected_lane = False

        # debug mode on? (to display processed images)
        self.debug = debug

        # Time interval between in update (in ms)
        # FPS = 1/period_s
        self.period_s = period_s

        # Starting time
        self.start_time = time.time()

    # Mouse event handler for get_hsv
    def on_mouse(self, event, x, y, flag, param):
        if event == cv2.EVENT_LBUTTONDBLCLK:
            # Circle to indicate hsv location, and update frame
            cv2.circle(self.img_debug, (x, y), 3, (0, 0, 255))
            cv2.imshow('hsv_extractor', self.img_debug)

            # Print values
            values = self.hsv_frame[y, x]
            print('H:', values[0], '\tS:', values[1], '\tV:', values[2])

    def get_hsv(self):
        cv2.namedWindow('hsv_extractor')
        while True:
            self.grab_frame()

            # Bottom ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_BOTTOM-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Top ROI
            cv2.rectangle(self.img_debug, (0, cvsettings.HEIGHT_PADDING_TOP-2), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT + 2), (0, 250, 0), 2)

            # Object
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            self.hsv_frame = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)

            # Mouse handler
            cv2.setMouseCallback('hsv_extractor', self.on_mouse, 0)
            cv2.imshow('hsv_extractor', self.img_debug)

            key = cv2.waitKey(0) & 0xFF
            if key == ord('q'):
                break
        self.stop_camera()
        cv2.destroyAllWindows()

    # Starts camera (needs to be called before run)
    def start_camera(self):
        self.camera_started = True
        self.vs.start()
        time.sleep(2.0)  # Wait for camera to cool

    def stop_camera(self):
        self.camera_started = False
        self.vs.stop()

    # Grabs frame from camera
    def grab_frame(self):
        # Starts camera if it hasn't been started
        if not self.camera_started:
            self.start_camera()
        self.img = self.vs.read()
        self.img_debug = self.img.copy()

    # Normalizes our image
    def normalize_img(self):
        # Crop img and convert to hsv
        self.img_roi_bottom = np.copy(self.img[cvsettings.HEIGHT_PADDING_BOTTOM:int(cvsettings.HEIGHT_PADDING_BOTTOM + cvsettings.IMG_ROI_HEIGHT), :])
        self.img_roi_top = np.copy(self.img[cvsettings.HEIGHT_PADDING_TOP:int(cvsettings.HEIGHT_PADDING_TOP + cvsettings.IMG_ROI_HEIGHT), :])

        self.img_roi_bottom_hsv = cv2.cvtColor(self.img_roi_bottom, cv2.COLOR_BGR2HSV).copy()
        self.img_roi_top_hsv = cv2.cvtColor(self.img_roi_top, cv2.COLOR_BGR2HSV).copy()

        # Get our ROI's shape
        # Doesn't matter because both of them are the same shape
        self.roi_height, self.roi_width, self.roi_channels = self.img_roi_bottom.shape

    # Smooth image and convert to bianry image (threshold)
    # Filter out colors that are not within the RANGE value
    def filter_smooth_thres(self, RANGE, color):
        for (lower, upper) in RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_bottom = cv2.inRange(self.img_roi_bottom_hsv, lower, upper)
            mask_top = cv2.inRange(self.img_roi_top_hsv, lower, upper)

        blurred_bottom = cv2.medianBlur(mask_bottom, 5)
        blurred_top = cv2.medianBlur(mask_top, 5)

        # Morphological transformation
        kernel = np.ones((2, 2), np.uint8)
        smoothen_bottom = blurred_bottom #cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)
        smoothen_top = blurred_top  # cv2.morphologyEx(blurred, cv2.MORPH_OPEN, kernel, iterations=5)

        """
        if self.debug:
            cv2.imshow('mask bottom ' + color, mask_bottom)
            cv2.imshow('blurred bottom' + color, blurred_bottom)

            cv2.imshow('mask top ' + color, mask_top)
            cv2.imshow('blurred top' + color, blurred_top)
        """

        return smoothen_bottom, smoothen_top

    # Gets metadata from our contours
    def get_contour_metadata(self):

        # Metadata (x,y,w,h)for our ROI
        contour_metadata = {}
        for cur_img in [self.thres_yellow_bottom, self.thres_yellow_top, self.thres_blue_bottom, self.thres_blue_top]:
            key = ''

            # Blue is left lane, Yellow is right lane
            if cur_img is self.thres_yellow_bottom:
                key = 'right_bottom'
            elif cur_img is self.thres_yellow_top:
                key = 'right_top'
            elif cur_img is self.thres_blue_bottom:
                key = 'left_bottom'
            elif cur_img is self.thres_blue_top:
                key = 'left_top'

            _, contours, hierarchy = cv2.findContours(cur_img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

            cur_height, cur_width = cur_img.shape

            # Get index of largest contour
            try:
                areas = [cv2.contourArea(c) for c in contours]
                max_index = np.argmax(areas)
                cnt = contours[max_index]

                # Metadata of contour
                x, y, w, h = cv2.boundingRect(cnt)

                # Normalize it to the original picture
                x += int(cvsettings.WIDTH_PADDING + w / 2)

                if 'top' in key:
                    y += int(cvsettings.HEIGHT_PADDING_TOP +h /2)
                else:
                    y += int(cvsettings.HEIGHT_PADDING_BOTTOM + h / 2)

                contour_metadata[key] = (x, y)

                self.detected_lane = True

            # If it throws an error then it doesn't have a ROI
            # Means we're too far off to the left or right
            except:

                # Blue is left lane, Yellow is right lane
                x = int(cvsettings.WIDTH_PADDING) - cvsettings.CAMERA_WIDTH

                if 'bottom' in key:
                    y = int(cur_height / 2) + int(cvsettings.HEIGHT_PADDING_BOTTOM + cur_height / 2)
                else:
                    y = int(cur_height / 2) + int(cvsettings.HEIGHT_PADDING_TOP + cur_height / 2)

                if 'right' in key:
                    x = int(cur_width)*2

                contour_metadata[key] = (x, y)

                self.detected_lane = False

        return contour_metadata

    # Gets the centered coord of the detected lines
    def get_centered_coord(self):
        bottom_centered_coord = None
        top_centered_coord = None

        left_xy_bottom = self.contour_metadata['left_bottom']
        right_xy_bottom = self.contour_metadata['right_bottom']

        left_xy_top = self.contour_metadata['left_top']
        right_xy_top = self.contour_metadata['right_top']

        bottom_xy = (left_xy_bottom[0] + right_xy_bottom[0], left_xy_bottom[1] + right_xy_bottom[1])
        bottom_centered_coord = (int(bottom_xy[0] / 2), int(bottom_xy[1] / 2))

        top_xy = (left_xy_top[0] + right_xy_top[0], left_xy_top[1] + right_xy_top[1])
        top_centered_coord = (int(top_xy[0] / 2), int(top_xy[1] / 2))

        # Left can't be greater than right and vice-versa
        if left_xy_top > right_xy_top:
            top_centered_coord = (0, top_centered_coord[1])
        elif right_xy_top < left_xy_top:
            top_centered_corrd = (cvsettings.CAMERA_WIDTH, top_centered_coord[1])

        if left_xy_bottom > right_xy_bottom:
            bottom_centered_coord = (0, bottom_centered_coord[1])
        elif right_xy_bottom < left_xy_bottom:
            bottom_centered_coord = (cvsettings.CAMERA_WIDTH, top_centered_coord[1])

        return bottom_centered_coord, top_centered_coord

    # Gets the error of the centered coordinates (x)
    # Also normlizes their values
    def get_errors(self):
        top_error = (float(self.center_coord_top[0]) - float(self.center))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)
        bottom_error = (float(self.center_coord_bottom[0]) - float(self.center))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)
        relative_error = (float(self.center_coord_top[0]) - float(self.center_coord_bottom[0]))/float(cvsettings.CAMERA_WIDTH + cvsettings.WIDTH_PADDING)

        return (top_error + relative_error + bottom_error)/3.0

    # Object avoidance
    def where_object_be(self):
        # We only want to detect objects in our path: center of top region and bottom region
        # So to save processing speed, we'll only threshold from center of top region to center of bottom region
        left_x = cvsettings.WIDTH_PADDING
        right_x = cvsettings.CAMERA_WIDTH

        # Image region with objects
        img_roi_object = self.img[cvsettings.OBJECT_HEIGHT_PADDING : int(cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), left_x:right_x]
        img_roi_object_hsv = cv2.cvtColor(img_roi_object, cv2.COLOR_BGR2HSV).copy()

        # Filtering color and blurring
        for (lower, upper) in cvsettings.OBJECT_HSV_RANGE:
            lower = np.array(lower, dtype='uint8')
            upper = np.array(upper, dtype='uint8')

            mask_object = cv2.inRange(img_roi_object_hsv, lower, upper)

        blurred_object = cv2.medianBlur(mask_object, 25)

        # Finding position of object (if its there)
        _, contours, hierarchy = cv2.findContours(blurred_object.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        left_x = self.contour_metadata['left_top'][0]
        right_x = self.contour_metadata['right_top'][0]

        for c in contours:
            areas = cv2.contourArea(c)

            # Object needs to be larger than a certain area
            if areas > cvsettings.OBJECT_AREA:
                x, y, w, h = cv2.boundingRect(c)

                y += int(cvsettings.OBJECT_HEIGHT_PADDING + h / 2)

                # Confusing part - this finds the object and makes it think that
                # it is also a line to avoid bumping into it
                # It +w and -w to find which line its closest to and then set
                # the opposite as to be the new left/right lane
                # e.g. line is closest to left lane (x-w), so left lane new x is
                # (x+w)

                distance_to_left = abs(x - left_x)
                distance_to_right = abs(x+w - right_x)

                # Make object's left most area the middle of right lane
                if distance_to_left > distance_to_right:
                    self.contour_metadata['right_top'] = (x, self.contour_metadata['right_top'][1])

                # Make object's right most area the middle of left lane
                elif distance_to_right > distance_to_right:
                    self.contour_metadata['left_top'] = (x+w, self.contour_metadata['left_top'][1])

                if self.debug:
                    cv2.circle(self.img_debug, (x+(w/2), y+(h/2)), 5, (240, 32, 160), 2)

        if self.debug:
            cv2.imshow('Blurred object', blurred_object)


    # Where are we relative to our lane
    def where_lane_be(self):
        # Running once every period_ms
        while float(time.time() - self.start_time) < self.period_s:
            pass

        # Update time instance
        self.start_time = time.time()

        # Camera grab frame and normalize it
        self.grab_frame()
        self.normalize_img()

        # Filter out them colors
        self.thres_blue_bottom, self.thres_blue_top = self.filter_smooth_thres(cvsettings.BLUE_HSV_RANGE, 'blue')
        self.thres_yellow_bottom, self.thres_yellow_top = self.filter_smooth_thres(cvsettings.YELLOW_HSV_RANGE, 'yellow')

        # Get contour meta data
        self.contour_metadata = self.get_contour_metadata()

        # Finds objects and (and corrects lane position)
        # this overwrite contour_metadata
        #self.where_object_be()

        # Find the center of the lanes (bottom and top) [we wanna be in between]
        self.center_coord_bottom, self.center_coord_top = self.get_centered_coord()

        # Gets relative error between top center and bottom center
        self.relative_error = self.get_errors()

        if self.debug:
            # Drawing locations
            blue_top_xy = self.contour_metadata['left_top']
            blue_bottom_xy = self.contour_metadata['left_bottom']
            yellow_top_xy = self.contour_metadata['right_top']
            yellow_bottom_xy = self.contour_metadata['right_bottom']

            # Circles to indicate lanes
            cv2.circle(self.img_debug, blue_top_xy, 5, (255, 0, 0), 2)
            cv2.circle(self.img_debug, blue_bottom_xy, 5, (255, 0, 0), 2)
            cv2.circle(self.img_debug, yellow_top_xy, 5, (0, 255, 255), 2)
            cv2.circle(self.img_debug, yellow_bottom_xy, 5, (0, 255, 255), 2)
            cv2.circle(self.img_debug, self.center_coord_bottom, 5, (0, 255, 0), 3)
            cv2.circle(self.img_debug, self.center_coord_top, 5, (0, 255, 0), 3)

            # ROI for object detection
            cv2.rectangle(self.img_debug, (0, cvsettings.OBJECT_HEIGHT_PADDING), (cvsettings.CAMERA_WIDTH, cvsettings.HEIGHT_PADDING_TOP - cvsettings.OBJECT_HEIGHT_PADDING), (238, 130, 238), 2)

            # Displaying image
            cv2.imshow('img', self.img_debug)
            #cv2.imshow('img_roi top', self.img_roi_top)
            #cv2.imshow('img_roi bottom', self.img_roi_bottom)
            #cv2.imshow('img_hsv', self.img_roi_hsv)
            cv2.imshow('thres_blue_bottom', self.thres_blue_bottom)
            cv2.imshow('thres_blue_top', self.thres_blue_top)
            cv2.imshow('thres_yellow_bottom', self.thres_yellow_bottom)
            cv2.imshow('thres_yellow_top', self.thres_yellow_top)
            key = cv2.waitKey(0) & 0xFF  # Change 1 to 0 to pause between frames

    # Use this to calculate fps
    def calculate_fps(self, frames_no=100):
        fps = FPS().start()

        # Don't wanna display window
        if self.debug:
            self.debug = not self.debug

        for i in range(0, frames_no):
            self.where_lane_be()
            fps.update()

        fps.stop()

        # Don't wanna display window
        if not self.debug:
            self.debug = not self.debug

        print('Time taken: {:.2f}'.format(fps.elapsed()))
        print('~ FPS : {:.2f}'.format(fps.fps()))

    # Use this to save images to a location
    def save_images(self, dirname='dump'):
        import os
        img_no = 1

        # Makes the directory
        if not os.path.exists('./' + dirname):
            os.mkdir(dirname)

        while True:
            self.grab_frame()

            if self.debug:
                cv2.imshow('frame', self.img)

            k = cv2.waitKey(1) & 0xFF

            if k == ord('s'):
                cv2.imwrite(os.path.join(dirname, 'dump_' + str(img_no) + '.jpg'), self.img)
                img_no += 1

            elif k == ord('q'):
                break

        cv2.destroyAllWindows()
    # Destructor
    def __del__(self):
        self.vs.stop()
        cv2.destroyAllWindows()
class Stereo_Vision:
    """Class for obtaining and analyzing depth maps"""

    def __init__(self, cam_L_src, cam_R_src, display_frames=True, threshold=110):

        # initialize camera feed threads
        self.capL = WebcamVideoStream(src=cam_L_src).start()
        time.sleep(0.5)
        self.capR = WebcamVideoStream(src=cam_R_src).start()

        self.stop = False

        self.display_frames = display_frames

        # initialize windows
        if self.display_frames == True:
            cv2.namedWindow('Depth Map')
            cv2.namedWindow('Threshold')
            cv2.namedWindow('Shapes')

        # import calibration matrices
        self.undistMapL = np.load('calibration/undistortion_map_left.npy')
        self.undistMapR = np.load('calibration/undistortion_map_right.npy')
        self.rectMapL = np.load('calibration/rectification_map_left.npy')
        self.rectMapR = np.load('calibration/rectification_map_right.npy')

        # initialize blank frames
        self.rectL = np.zeros((640, 480, 3), np.uint8)
        self.rectR = np.zeros((640, 480, 3), np.uint8)

        self.quadrant_near_object = np.zeros((3, 3), dtype=bool)
        self.threshold = threshold

        self.exec_time_sum = 0
        self.frame_counter = 0
        self.fps = 0

        self.no_object_left_column = False
        self.no_object_mid_column = False
        self.no_object_right_column = False
        self.no_object_bottom_row = False
        self.no_object_mid_row = False
        self.no_object_top_row = False

    def start(self):
        self.vision_thread = Thread(target=self.start_stereo)
        self.vision_thread.start()

    def stop_vision(self):
        self.stop = True

    def get_quadrants(self):
        return self.quadrant_near_object

    def start_stereo(self):
        while (self.stop != True):

            self.start_time = time.time()
            self.frameL = self.capL.read()
            self.frameR = self.capR.read()

            # remap cameras to remove distortion
            self.rectL = cv2.remap(self.frameL, self.undistMapL, self.rectMapL, cv2.INTER_LINEAR)
            self.rectR = cv2.remap(self.frameR, self.undistMapR, self.rectMapR, cv2.INTER_LINEAR)

            # convert rectified from RGB to 8 bit grayscale
            self.grayRectL = cv2.cvtColor(self.rectL, cv2.COLOR_BGR2GRAY)
            self.grayRectR = cv2.cvtColor(self.rectR, cv2.COLOR_BGR2GRAY)
            self.grayFrameL = cv2.cvtColor(self.frameL, cv2.COLOR_BGR2GRAY)
            self.grayFrameR = cv2.cvtColor(self.frameR, cv2.COLOR_BGR2GRAY)

            # create depth map

            self.object_left_column = True
            self.object_mid_column = True
            self.object_right_column = True
            self.object_top_row = True
            self.object_mid_row = True
            self.object_bottom_row = True

            self.stereo = cv2.StereoBM_create(numDisparities=0, blockSize=13)

            self.disparity = self.stereo.compute(self.grayRectL, self.grayRectR).astype(np.float32)
            self.disparity = cv2.normalize(self.disparity, self.disparity, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,
                                           dtype=cv2.CV_8UC1)

            # blur depth map to filter high frequency noise
            self.disparity_blur = cv2.medianBlur(self.disparity, 13)

            #apply 2 pixel border, helps keep contours continuous at extreme edges of image
            self.disparity_blur = cv2.copyMakeBorder(self.disparity_blur, top=2, bottom=2, right=2, left=2,
                                                     borderType=cv2.BORDER_CONSTANT, value=0)

            # threshold depth map
            self.disparity_thresh_imm = cv2.threshold(self.disparity_blur, self.threshold, 255, cv2.THRESH_BINARY)[1]

            self.disparity_edge = cv2.Canny(self.disparity_thresh_imm, 100, 200)

            # contour finding
            self.contours = cv2.findContours(self.disparity_edge, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]

            self.disparity_contours = np.zeros((480, 640, 3), np.uint8)

            for self.contour_index in self.contours:
                #draw bounding rectangle around each contour
                self.rect = cv2.minAreaRect(self.contour_index)
                self.box = cv2.boxPoints(self.rect)
                self.box = np.int0(self.box)

                #calculate area of each box
                self.box_area = abs(self.box[2][0] - self.box[0][0]) * abs(self.box[2][1] - self.box[0][1])
                self.col = -1
                self.row = -1

                if self.box_area > 1000: #pixels^2
                    cv2.drawContours(self.disparity_contours, [self.box], 0, (0, 255, 0), 2)  # draw green box

                    # determine which quadrants the rectangle occupies
                    for corner_index in range(0, 3):
                        self.col = self.box[corner_index][0] / 213  # x coordinates
                        self.row = self.box[corner_index][1] / 160  # y coordinates
                        if self.row > 2:
                            self.row = 2

                        if self.col > 2:
                            self.col = 2

                        if self.col != -1 and self.row != -1:
                            self.quadrant_near_object[self.row][self.col] = True


            #Boolean logic to set flags for main program
            if (self.quadrant_near_object[0][0] == False and self.quadrant_near_object[1][0] == False or
                        self.quadrant_near_object[2][0] == False):
                self.no_object_left_column = True
            else:
                self.no_object_left_column = False

            if (self.quadrant_near_object[0][1] == False and self.quadrant_near_object[1][1] == False and
                        self.quadrant_near_object[2][1] == False):
                self.no_object_mid_column = True
            else:
                self.no_object_mid_column = False

            if (self.quadrant_near_object[0][2] == False and self.quadrant_near_object[1][2] == False and
                        self.quadrant_near_object[2][2] == False):
                self.no_object_right_column = True
            else:
                self.no_object_right_column = False

            if (self.quadrant_near_object[0][0] == False and self.quadrant_near_object[0][1] == False and
                        self.quadrant_near_object[0][2] == False):
                self.no_object_top_row = True
            else:
                self.no_object_top_row = False
            if (self.quadrant_near_object[1][0] == False and self.quadrant_near_object[1][1] == False and
                        self.quadrant_near_object[1][2] == False):
                self.no_object_mid_row = True
            else:
                self.no_object_mid_row = False

            if (self.quadrant_near_object[2][0] == False and self.quadrant_near_object[2][1] == False and
                        self.quadrant_near_object[2][2] == False):
                self.no_object_bottom_row = True
            else:
                self.no_object_bottom_row = False

            #FPS calculations
            self.end_time = time.time()
            self.exec_time = self.end_time - self.start_time
            self.exec_time_sum += self.exec_time
            self.frame_counter += 1
            self.fps = 1.0 / self.exec_time

            #draw edges in pink
            cv2.drawContours(self.disparity_contours, self.contours, -1, (180, 105, 255), -1)
            cv2.line(self.disparity_contours, (0,160), (640,160), (255,0,0))
            cv2.line(self.disparity_contours, (0,320), (640,320), (255,0,0))
            cv2.line(self.disparity_contours, (213,0), (213,480), (255,0,0))
            cv2.line(self.disparity_contours, (426,0), (426,480), (255,0,0))


            if self.display_frames == True:
                cv2.imshow('Threshold', self.disparity_edge)
                cv2.imshow('Depth Map', self.disparity)
                cv2.imshow('Shapes', self.disparity_contours)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                self.capL.stop()
                self.capR.stop()
                break

        self.capL.stop()
        self.capR.stop()
        cv2.destroyAllWindows()

    def save_frames(self, filename_index):
        cv2.imwrite("Images/DepthMap" + str(filename_index) + ".jpg", self.disparity)
        cv2.imwrite("Images/DepthMapBlur" + str(filename_index) + ".jpg", self.disparity_blur)
        cv2.imwrite("Images/Contours_" + str(filename_index) + ".jpg", self.disparity_contours)
        cv2.imwrite("Images/LeftCam" + str(filename_index) + ".jpg", self.frameL)
        cv2.imwrite("Images/RightCam" + str(filename_index) + ".jpg", self.frameR)
Ejemplo n.º 34
0
    if avg is None:
        avg = gray.copy().astype("float")
        continue

    cv2.accumulateWeighted(gray, avg, 0.1)
    frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.dilate(thresh, None, iterations=2)
    _, contours, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    for c in contours:
        if cv2.contourArea(c) < 4000:
            print("Tiny")
            continue

        (x, y, w, h) = cv2.boundingRect(c)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        M = cv2.moments(c)
        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])
        cv2.circle(frame, (cX, cY), 7, (0, 255, 0), -1)

    cv2.imshow("Diff", frame)
    key = cv2.waitKey(1) & 0xFF
    if key == 27:
        break

cv2.destroyAllWindows()
vs.stop()
    if key == ord('r'): #go to recording component
        if record == False:
            record = True
            print("Starting Recording")
            rec_time = timer()
        elif record == True:
            record = False
    elif key == ord('q'):
        break

if record == True:
    tmem = (numframe * h * w)/(1024**2) # approxamate total memory in megabytes
    mall = 2000
    div = int((numframe * mall)/tmem)
    num_files = math.ceil(tmem / mall)

    for n in range (num_files):
        wb.saveFile(fnm_save + '_c1-%04d.tif' % n , c1[n*(div):(n+1)*(div)])
        wb.saveFile(fnm_save + '_c2-%04d.tif' % n , c2[n*(div):(n+1)*(div)])

print("Shutting down")
tlog.close()
vs1.stop()
vs2.stop()
s.close()
cv2.destroyAllWindows()
for i in range(5):
    cv2.waitKey(1)


    end_time = time.time()
    exec_time = end_time - start_time
    exec_time_sum += exec_time
    i += 1
    fps = 1.0/exec_time

    #cv2.imshow(WINDOW_L, frameL)
    #cv2.imshow(WINDOW_R, frameR)
    #cv2.imshow(WINDOW_L1, rectL)
    #cv2.imshow(WINDOW_R1, rectR)
    #cv2.imshow('Threshold', disparity_thresh_imm)
    cv2.imshow('Depth Map', disparity)
    #cv2.imshow('Shapes', disparity_contours)
    #cv2.imshow('Test', disparity_gaussian)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        capL.stop()
        capR.stop()
        break


capL.stop()
capR.stop()
cv2.destroyAllWindows()

exec_time_avg = exec_time_sum/i
fps_avg = 1.0/exec_time_avg

print "Average Execution Time/Frame : %s" %exec_time_avg
print "Average FPS : %s" %fps_avg