Esempio n. 1
0
def main():
    if len(sys.argv) < 2:
        print 'USAGE: runtracker.py participant_id [save_location]'
        return

    S = MARKER_SIZE
    F = 1

    participant_id = sys.argv[1]

    save_location = DATA_DIR
    if len(sys.argv) >= 3:
        save_location = sys.argv[2]

    # Mouse callback for setting origin point
    def mouse_callback(event, x, y, flags, param):
        if datalog.is_running():
            return

        if event == cv2.EVENT_LBUTTONDOWN:
            # Set new origin_y
            tracker.set_origin(y)

    # Open webcam
    cap = cv2.VideoCapture(TRACKER_CAM)
    if not cap.isOpened():
        print 'Error opening tracker camera!'
        return

    # Open facecam (for separate recording)
    facecap = None
    if FACE_CAM is not None:
        facecap = cv2.VideoCapture(FACE_CAM)
        if not facecap.isOpened():
            print 'Error opening face camera!'
            facecap = None

    # Get video parameters (try to retain same attributes for output video)
    width = float(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = float(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = float(cap.get(cv2.CAP_PROP_FPS))

    # Create instances
    track_finder = mk.MarkerFinder(GREEN_COLOR_MIN, GREEN_COLOR_MAX)
    tracker = WristTracker(track_finder, S, F)
    tracker.set_origin(int(height / 2))

    cprstatus = CPRStatus(CPR_BUFFER_SIZE)
    statussender = StatusSender(SOCK_ADDR, SOCK_PORT)
    datalog = DataLogger(fps if (0 < fps <= 60) else 30, width, height,
                         save_location, facecap is not None)

    trainer_on = True

    cur_position = 0
    cur_size = 0

    last_rate, last_depth, last_recoil, last_code = 0, 0, 0, 0

    while True:
        # Get frame
        ret, frame = cap.read()
        if ret is False or frame is None:
            break

        faceframe = None
        if facecap:
            faceret, faceframe = facecap.read()

        # Get dimensions
        h, w, _ = frame.shape
        center = (w / 2, h / 2)
        '''
        Output Drawing
        '''

        # Make output image
        output = frame.copy()

        # Display program status
        cv2.putText(
            output,
            '<NOT RUNNING>' if not datalog.is_running() else 'RECORDING ' +
            datalog.get_filename() + ' [' + str(datalog.get_index()) + ']',
            (0, 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 0, 255), 2)
        cv2.putText(
            output, 'R: ' + str(last_rate) + ' D: ' + str(last_depth) +
            ' C: ' + str(last_recoil) + ' S: ' + str(last_code), (0, 60),
            cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)

        # Draw center and origin lines
        cv2.line(output, (center[0], 0), (center[0], h), (0, 0, 255), 1)
        cv2.line(output, (center[0] - 20, int(tracker.get_origin())),
                 (center[0] + 20, int(tracker.get_origin())), (0, 0, 255), 1)

        # Draw instructions
        if not datalog.is_running():
            cv2.putText(
                output, 'c - set clr at orig | t - trainer is ' +
                ('on' if trainer_on else 'off') + ' | space - tgl tracker',
                (0, h - 4), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 1)
        '''
        Tracking
        '''

        # Get tracked marker from image
        tracked_marker = tracker.get_marker(frame, output)
        if tracked_marker:
            draw_marker(output, tracked_marker.marker, tracked_marker.size,
                        tracked_marker.distance, tracked_marker.position)

            # Update cur position/size (for calibration)
            cur_position = tracked_marker.y
            cur_size = tracked_marker.size
        '''
        Analysis
        '''

        if datalog.is_running():
            if tracked_marker:
                # Analyze CPR status
                rate, depth, recoil, code = cprstatus.update(tracked_marker)

                if code is not None:
                    last_rate, last_depth, last_recoil, last_code = rate, depth, recoil, code  # Update
                    print 'R: ' + str(last_rate) + ' D: ' + str(
                        last_depth) + ' C: ' + str(last_recoil) + ' S: ' + str(
                            last_code)

                    # Send status if trainer is turned on
                    if trainer_on:
                        if not (last_rate == 0 and last_depth == 0
                                and last_recoil == 0):
                            print 'send status ' + str(code)
                            statussender.send_status(code)

                datalog.log(frame, output, tracker.get_origin(),
                            tracked_marker.position if tracked_marker else 0,
                            rate, depth, recoil, code, faceframe)
        '''
        Show Output
        '''

        # Resize frame
        #output = cv2.resize(output, (OUT_WIDTH, OUT_HEIGHT))

        # Show frame
        #cv2.imshow('Frame', frame)
        cv2.imshow('Output', output)
        cv2.setMouseCallback('Output', mouse_callback)

        if faceframe is not None:
            cv2.imshow('Face', faceframe)
        '''
        Process Keypresses
        '''

        k = cv2.waitKey(1)
        if k == 27:
            datalog.stop()
            break
        elif k == ord('v'):
            track_finder.set_color(VIOLET_COLOR_MIN, VIOLET_COLOR_MAX)
        elif k == ord('g'):
            track_finder.set_color(GREEN_COLOR_MIN, GREEN_COLOR_MAX)
        elif k == ord('y'):
            track_finder.set_color(YELLOW_COLOR_MIN, YELLOW_COLOR_MAX)
        elif k == ord('c'):
            # Calibrate tracker (color)
            color = (0, 0, 0)

            # Get origin pixel color
            pixel = frame[int(tracker.get_origin())][center[0]]
            hsv = cvt_bgr2hsv(pixel)
            print 'Calibrate color: ' + str(hsv) + ' | BGR: ' + str(pixel)

            # Apply HSV range
            '''
            lower_bound = tuple(map(lambda x, y: max(0, x - y), hsv, COLOR_VARIANCE))
            upper_bound = tuple(map(lambda x, y: min(255, x + y), hsv, COLOR_VARIANCE))
            '''
            lower_bound = (max(0, hsv[0] - COLOR_MIN[0]), COLOR_MIN[1],
                           COLOR_MIN[2])
            upper_bound = (min(255, hsv[0] + COLOR_MAX[0]), COLOR_MAX[1],
                           COLOR_MAX[2])
            #'''

            track_finder.set_color(lower_bound, upper_bound)
            print 'Color range: ' + str(lower_bound) + ' to ' + str(
                upper_bound)
        elif k == ord('t'):
            # Toggle trainer
            trainer_on = not trainer_on
        elif k == 32:
            # Toggle on/off
            if datalog.is_running():
                datalog.stop()
            else:
                # Reset logger
                cur_time = datetime.now()
                time_str = cur_time.strftime('%m-%d-%y_%H%M%S')
                datalog.start('CPR_' + str(participant_id) + '_' + time_str)
                cprstatus.reset()

                # Set tracker origin/size
                tracker.set_origin(cur_position)
                tracker.set_force_px_size(cur_size)

    cap.release()
Esempio n. 2
0
    def run(self):

        sensors = None
        datalogger = None

        try:

            # initialise objects
            sensors = Sensors(self.T_OFFSET,self.P_OFFSET,self.H_OFFSET)
            datalogger = DataLogger(self.DATALOGFILE, self.DATALOGKEYS, self.DATALOGMAXFILESIZE)
            self.scheduler = HeaterScheduler()
            self.modemanager = ModeManager( scheduler=self.scheduler,
                                            callbackfunc=self.modemanagercallbackfunc)
            self.mqttclient = MqttClient(subscribelist=self.SUBSCRIBELIST)
            self.heatercontroller = HeaterController()
            
            # initialise state
            self.modemanager.setMode(self.modemanager.AUTO)

            # initial data
            t_avg,p_avg,h_avg = sensors.getData()

            # present ourselves
            self.mqttclient.publish({topics.IOTHERMSTATUS:'Active'})
            self.mqttclient.publish({topics.IOTHERMVERSION:__version__ +' '+ __status__})
            self.mqttclient.publish({topics.MODE:self.modemanager.currentmode.name})
            self.mqttclient.publish({topics.TARGETTEMPERATURE:self.modemanager.currentmode.targettemperature})

            t = 0
            
            while True:
               t_,p,h = sensors.getData()
               messages = self.mqttclient.getData()
               
               # use temperature value from mqtt
               if self.USE_OTHERTEMPERATURE and topics.OTHERTEMPERATURE in messages:
                   t = float( messages[topics.OTHERTEMPERATURE] )
            
               if not self.USE_OTHERTEMPERATURE:
                   t = t_
            
               # calculate averages
               t_avg = (t_avg + t)/2       
               p_avg = (p_avg + p)/2       
               h_avg = (h_avg + h)/2

               # calculate derivatives
               d_t = (t - t_avg)/self.REFRESH_SECONDS
               d_p = (p - p_avg)/self.REFRESH_SECONDS
               d_h = (h - h_avg)/self.REFRESH_SECONDS

               # process data from subscribed topics
               self.processMessages( messages )
           
               # prepare for publishing
               messages = self.prepareMessages(t, p, h, d_t, d_p, d_h)
               datalogger.log(messages)
               self.mqttclient.publish(messages)

               # update the heatercontroller with the current and target temperatures
               #print('targettemperature = {}'.format(self.modemanager.currentmode.targettemperature))
               self.heatercontroller.update(t,self.modemanager.currentmode.targettemperature)

               sleep(self.REFRESH_SECONDS)

        finally:
            print('IOThermostat: Stopping IOThermostat..')
            if datalogger:
                datalogger.close()
                
            if sensors:
                sensors.close()
                
            if self.scheduler:
                self.scheduler.close()
                
            if self.heatercontroller:
                self.heatercontroller.close()
                
            if self.mqttclient:
                self.mqttclient.close()
def main():
    args = vars(ap.parse_args())

    # create frame counter
    fps_counter = FPSCounter()

    # total number of blinks
    TOTAL = 0

    # initialize dlib's face detector (HOG-based) and then create
    # the facial landmark predictor
    print("[INFO] loading facial landmark predictor...")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor(args["shape_predictor"])

    # grab the indexes of the facial landmarks for the left and
    # right eye, respectively
    (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
    (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]

    # start the video stream thread
    print("[INFO] starting video stream thread...")
    print("[INFO] print q to quit...")
    if args['video'] == "camera":
        vs = VideoStream(src=0).start()
        vs.stream.set(cv2.CAP_PROP_FPS, 15)
        fileStream = False
    else:
        vs = FileVideoStream(args["video"]).start()
        fileStream = True
        fps = vs.stream.get(cv2.CAP_PROP_FPS)

    # create dataloggers
    datalogger = DataLogger(columns=['ear', 'adr'])

    # blink detector
    blink_detector = BlinkDetector(time_window=5,
                                   plot=args['graph'],
                                   frame_delay=10)

    # loop over frames from the video stream
    frame_cnt = 0
    INIT_TIME = None
    while True:
        # if this is a file video stream, then we need to check if
        # there any more frames left in the buffer to process
        if fileStream and not vs.more():
            break

        # get timestamp
        if fileStream:
            timestamp = frame_cnt / fps
        else:
            if INIT_TIME is None:
                INIT_TIME = time.time()
            timestamp = time.time() - INIT_TIME
            fps = fps_counter.tick()

        # get the new frame
        frame = vs.read()
        frame_cnt += 1
        if frame is None:
            break

        frame = imutils.resize(frame, width=450)
        # it, and convert it to grayscale channels)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array

            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # extract the left and right eye coordinates, then use the
            # coordinates to compute the eye aspect ratio for both eyes
            leftEye = shape[lStart:lEnd]
            rightEye = shape[rStart:rEnd]
            leftEAR = eye_aspect_ratio(leftEye)
            rightEAR = eye_aspect_ratio(rightEye)

            # compute the area-over-distance metric
            adr = AreaDistanceRatio.compute(leftEye, rightEye)
            # log ADR
            datalogger.log(adr, 'adr', timestamp)

            # average the eye aspect ratio together for both eyes
            ear = (leftEAR + rightEAR) / 2.0
            # log EAR
            datalogger.log(ear, 'ear', timestamp)

            # compute the convex hull for the left and right eye, then
            # visualize each of the eyes
            leftEyeHull = cv2.convexHull(leftEye)
            rightEyeHull = cv2.convexHull(rightEye)
            cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
            cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)

            # send new data to blink detector and check if it detected new blinks
            blink_detector.send(adr, timestamp)
            blink = blink_detector.get_blink()
            if blink is not None:
                blink_time, blink_dur = blink
                TOTAL += 1
                print(f"[BLINK] time: {blink_time:.2f}  dur: {blink_dur:.2f}")

            # draw the total number of blinks on the frame along with
            # the computed eye aspect ratio for the frame
            cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "ADR: {:.2f}".format(ear), (300, 30),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
            cv2.putText(frame, "FPS: {:.2f}".format(fps), (300, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

    # save datafile
    output_file = args['output_file']
    if output_file == 'ask':
        output_file = input("Enter filename to save: ")
    if output_file is not None:
        datalogger.save(output_file)

    # do a bit of cleanup
    cv2.destroyAllWindows()
    vs.stop()