Example #1
0
 def __init__(self, cfg, camera, stage):
     self.imageWidth = cfg["imageWidth"]
     self.minConfidence = cfg["minConfidence"]
     self.returnHomeSpeed = cfg["returnHomeSpeed"]
     self.homePauseSeconds = cfg["homePauseSeconds"]
     self.homePauseTimer = RealtimeInterval(cfg["homePauseSeconds"], False)
     self.zoomTimer = RealtimeInterval(cfg["zoomMaxSecondsSafety"], False)
Example #2
0
def main(cfg):
    camera = Camera(cfg['camera'], args["usbDeviceNum"])
    stage = Stage(cfg['stage'])
    subject = Subject(cfg['subject'])
    face = Face(cfg['face'])
    scene = Scene(cfg['scene'], camera, stage)

    fpsDisplay = True
    fpsCounter = WeightedFramerateCounter()
    fpsInterval = RealtimeInterval(10.0, False)

    # Loop on acquisition
    while 1:
        camera.updatePTZ()
        raw = None
        raw = camera.cvreader.Read()

        if raw is not None:

            ### This is the primary frame processing block
            fpsCounter.tick()

            raw = imutils.resize(raw, width=scene.imageWidth)
            gray = cv2.cvtColor(raw, cv2.COLOR_BGR2GRAY)

            #~ panMsg = "*" if camera.controller.panTiltOngoing() else "-"
            #~ tiltMsg = "-"
            #~ zoomMsg =  "*" if camera.controller.zoomOngoing() else "-"

            #~ cv2.putText(raw, "P {} #{}".format(panMsg, camera.panPos), (5, 15),
            #~ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #~ cv2.putText(raw, "T {} #{}".format(tiltMsg, camera.tiltPos), (5, 45),
            #~ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            #~ cv2.putText(raw, "Z {} #{}".format(zoomMsg, camera.zoomPos), (5, 75),
            #~ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            # scan for faces here against a grayscale frame
            cascPath = "haarcascade_frontalface_default.xml"
            faceCascade = cv2.CascadeClassifier(cascPath)
            faces = faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30)
                #flags = cv2.CV_HAAR_SCALE_IMAGE
            )

            #~ printif("Found {0} faces!".format(len(faces)))
            if len(faces):
                (x, __, w, __) = faces[0]
                face.found(x + w / 2)
            else:
                face.lost()
            subject.evaluate(face, scene)
            scene.trackSubject(camera, stage, subject, face, len(faces))

            #~ # Decorate the image with CV findings and camera stats
            #~ cv2.putText(raw, subject.text(), (5, 105),
            #~ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            #~ for (x, y, w, h) in faces:
            #~ cv2.rectangle(raw, (x, y), (x+w, y+h), (0, 255, 0), 2)

            #~ # show the output image with decorations
            #~ # (not easy to do on Docker)
            if g_debugMode:
                cv2.imshow("Output", raw)

        if fpsDisplay and fpsInterval.hasElapsed():
            print "{0:.1f} fps (processing)".format(fpsCounter.getFramerate())
            #~ if camera.cvreader is not None:
            #~ print "{0:.1f} fps (camera)".format(camera.cvreader.fps.getFramerate())
            print "Face has been seen for {0:.1f} seconds".format(face.age())

        # Monitor for control keystrokes in debug mode
        if g_debugMode:
            keyPress = cv2.waitKey(1)
            if keyPress != -1:
                keyPress = keyPress & 0xFF
            if keyPress == ord("q"):
                break
    # Clean up
    printif("Cleaning up")
    if camera.cvreader is not None:
        camera.cvreader.Stop()
        time.sleep(0.5)
    if camera.cvcamera is not None:
        camera.cvcamera.release()
    if g_debugMode:
        cv2.destroyAllWindows()

    printif("End of main function")
Example #3
0
def main():
    connectThrottle = RealtimeInterval(10)
    host = "roboRIO-5495-FRC.local"
    port = 5888
    topics = (MQTT_TOPIC_SCREENSHOT)
    client = mqttClient.MqttClient(host, port, topics, messageHandler)

    params = CVParameterGroup("Sliders", debugMode)
    # HUES: GREEEN=65/75 BLUE=110
    params.addParameter("hue", 75, 179)
    params.addParameter("hueWidth", 20, 25)
    params.addParameter("low", 70, 255)
    params.addParameter("high", 255, 255)       
    params.addParameter("countourSize", 50, 200)
    params.addParameter("keystone", 0, 320)

    camera = cameraReader = None
    if testImage is None:
        camera = createCamera()
        cameraReader = CameraReaderAsync.CameraReaderAsync(camera)
    distanceCalculatorH = distanceCalculatorV  = None
    if tuneDistance:
        distanceCalculatorH = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_WIDTH)
        distanceCalculatorV = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_HEIGHT)
    else:
        distanceCalculatorH = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_WIDTH, DistanceCalculator.PFL_H_LC3000)
        distanceCalculatorV = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_HEIGHT, DistanceCalculator.PFL_V_LC3000)
    
    fpsDisplay = True
    fpsCounter = WeightedFramerateCounter()
    fpsInterval = RealtimeInterval(5.0, False)

    keyStoneBoxSource = [[0, 0], [cameraFrameWidth, 0], [cameraFrameWidth, cameraFrameHeight], [0, cameraFrameHeight]]

    # The first frame we take off of the camera won't have the proper exposure setting
    # We need to skip the first frame to make sure we don't process bad image data.
    frameSkipped = False

    while (True):
        if (not client.isConnected()) and connectThrottle.hasElapsed():
            try:
                client.connect()
            except:
                None

        # This code will load a test image from disk and process it instead of the camera input
        #raw = cv2.imread('test.png')
        #frameSkipped = True
        #if raw == None or len(raw) == 0:
        #    print "Can't load image"
        #    break
        if testImage is not None:
            raw = testImage.copy()
        elif cameraReader is not None:
            raw = cameraReader.Read()
        if raw != None and frameSkipped:
            fpsCounter.tick()
            
            if debugMode:
                if fpsDisplay:
                    cv2.putText(raw, "{:.0f} fps".format(fpsCounter.getFramerate()), (cameraFrameWidth - 100, 13 + 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                cv2.imshow("raw", raw)

            # This will "deskew" or fix the keystone of a tilted camera.
            #ptSrc = np.float32([keyStoneBoxSource])
            #ptDst = np.float32([[params['keystone'], 0],\
            #                    [cameraFrameWidth - params['keystone'], 0],\
            #                    [cameraFrameWidth + params['keystone'], cameraFrameHeight],\
            #                    [-params['keystone'], cameraFrameHeight]])
            #matrix = cv2.getPerspectiveTransform(ptSrc, ptDst)
            #transformed = cv2.warpPerspective(raw, matrix, (cameraFrameWidth, cameraFrameHeight))
            #cv2.imshow("keystone", transformed)
            #target = findTarget(transformed, params)
            target = findTarget(raw, params)
            
            if target == None or not target.any():
                payload = { 'hasTarget': False, "fps": round(fpsCounter.getFramerate()) }
                client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload))
            else:
                distance = None

                targetBox = getTargetBoxTight(target)
                # We can tell how off-axis we are by looking at the slope
                # of the top off the targetBox. If we are on-center they will
                # be even. If we are off axis they will be unequal.
                # We are to the right of the target if the line slopes up to the right
                # and the slope is positive.
                offAxis = (targetBox[0][1] - targetBox[1][1]) / (cameraFrameHeight / 10.0)
                measuredHeight, centerLine = getTargetHeight(targetBox)
                center = (round((centerLine[0][0] + centerLine[1][0]) / 2),\
                          round((centerLine[0][1] + centerLine[1][1]) / 2))
                horizontalOffset = center[0] - (cameraFrameWidth / 2.0)
                
                perceivedFocalLengthH = perceivedFocalLengthV = 0.0
                if tuneDistance:
                    perceivedFocalLengthH = distanceCalculatorH.CalculatePerceivedFocalLengthAtGivenDistance(w, TARGET_CALIBRATION_DISTANCE)
                    perceivedFocalLengthV = distanceCalculatorV.CalculatePerceivedFocalLengthAtGivenDistance(h, TARGET_CALIBRATION_DISTANCE)
                    distance = TARGET_CALIBRATION_DISTANCE
                else:
                    # We use the height at the center of the taget to determine distance
                    # That way we hope it will be less sensitive to off-axis shooting angles
                    
                    distance = distanceCalculatorV.CalculateDistance(measuredHeight)
                distance = round(distance, 1)

                horizDelta = horizontalOffset / cameraFrameWidth * 2
                payload = {\
                    'horizDelta': horizDelta,\
                    'targetDistance': round(distance),\
                    'hasTarget': True,\
                    "fps": round(fpsCounter.getFramerate()),\
                    "offAxis": offAxis}
                client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload))

                if debugMode:
                    result = raw.copy()

                    # Draw the actual contours
                    #cv2.drawContours(result, target, -1, (255, 255, 255), 1)

                    # Draw the bounding area (targetBox)
                    cv2.drawContours(result, [np.int0(targetBox)], -1, (255, 0, 0), 1)

                    # Draw Convex Hull
                    #hull = cv2.convexHull(target)
                    #cv2.drawContours(result, hull, -1, (255, 0, 255), 1)
                    #temp = []
                    #for c in target:
                    #    contour = [c][0][0]
                    #    temp.append(contour)
                    #    #print contour
                    ##print temp
                    #top = getIndexOfTopLeftCorner(temp)
                    ##print target[top][0]
                    #cv2.circle(result, (target[top][0][0], target[top][0][1]), 3, (255, 255, 255), -1)

                    # Draw the centerline that represent the height
                    cv2.line(result, (int(round(centerLine[0][0])), int(round(centerLine[0][1]))),\
                                     (int(round(centerLine[1][0])), int(round(centerLine[1][1]))),\
                                     (128, 0, 255), 1)
                    
                    # draw the center of the object
                    cv2.circle(result, (int(round(center[0])), int(round(center[1]))), 4, (250, 250, 250), -1)
                    
                    #cv2.putText(result, str(horizontalOffset), (x-50, y+15), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1)
                    if tuneDistance:
                        cv2.putText(result, "PFL_H: {:.0f}".format(perceivedFocalLengthH), (3, 13 + 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                        cv2.putText(result, "PFL_V: {:.0f}".format(perceivedFocalLengthV), (3, 13 + 5 + 22), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    else:
                        cv2.putText(result, "{} inches".format(distance), (3, 13 + 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    if fpsDisplay:
                        cv2.putText(result, "{:.0f} fps".format(fpsCounter.getFramerate()), (cameraFrameWidth - 100, 13 + 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    cv2.imshow("result", result)

        if raw != None:
            frameSkipped = True
        if fpsDisplay and fpsInterval.hasElapsed():
            print "{0:.1f} fps (processing)".format(fpsCounter.getFramerate())
            if cameraReader is not None:
                print "{0:.1f} fps (camera)".format(cameraReader.fps.getFramerate())
    
        if debugMode:
            keyPress = cv2.waitKey(1)
            if keyPress != -1:
                keyPress = keyPress & 0xFF
            if keyPress == ord("f"):
                fpsDisplay = not fpsDisplay
            elif keyPress == ord("q"):
                break 
            elif keyPress == ord("z"):
                takeScreenshot()

    client.disconnect()
    if cameraReader is not None:
        cameraReader.Stop()
    if camera is not None:
        camera.release()
    cv2.destroyAllWindows()
Example #4
0
params = CVParameterGroup("Sliders")
params.addParameter("hue", 75, 255)
params.addParameter("hueWidth", 4, 25)
params.addParameter("FOV", 13782, 50000)
params.addParameter("low", 90, 255)
params.addParameter("high", 255, 255)
camera = cv2.VideoCapture(0)
#No camera's exposure goes this low, but this will set it as low as possible
camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE,-100)
#camera.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

fpsDisplay = False;
fpsCounter = WeightedFramerateCounter()
fpsInterval = RealtimeInterval(3.0)
raw = cv2.imread("testImages/1454372914.52.png")
targetSize = (20, 14)
while (True):
    #ret, raw = camera.read()
    
    ret = True
    if ret:
        fpsCounter.tick()
        
        #cv2.imshow("raw", raw)
        
        mask = filterHue(raw)
        cv2.imshow("mask", mask)

        #colorOnly = cv2.bitwise_and(raw, raw, mask = mask)