示例#1
0
def main():
    connectThrottle = RealtimeInterval(10)
    host = "roboRIO-5495-FRC.local"
    port = 5888
    topics = (MQTT_TOPIC_SCREENSHOT)
    client = mqttClient.MqttClient(host, port, topics, messageHandler)

    params = CVParameterGroup("Sliders", debugMode)
    # HUES: GREEEN=65/75 BLUE=110
    params.addParameter("hue", 75, 179)
    params.addParameter("hueWidth", 20, 25)
    params.addParameter("low", 70, 255)
    params.addParameter("high", 255, 255)       
    params.addParameter("countourSize", 50, 200)
    params.addParameter("keystone", 0, 320)

    camera = cameraReader = None
    if testImage is None:
        camera = createCamera()
        cameraReader = CameraReaderAsync.CameraReaderAsync(camera)
    distanceCalculatorH = distanceCalculatorV  = None
    if tuneDistance:
        distanceCalculatorH = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_WIDTH)
        distanceCalculatorV = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_HEIGHT)
    else:
        distanceCalculatorH = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_WIDTH, DistanceCalculator.PFL_H_LC3000)
        distanceCalculatorV = DistanceCalculator.TriangleSimilarityDistanceCalculator(TARGET_HEIGHT, DistanceCalculator.PFL_V_LC3000)
    
    fpsDisplay = True
    fpsCounter = WeightedFramerateCounter()
    fpsInterval = RealtimeInterval(5.0, False)

    keyStoneBoxSource = [[0, 0], [cameraFrameWidth, 0], [cameraFrameWidth, cameraFrameHeight], [0, cameraFrameHeight]]

    # The first frame we take off of the camera won't have the proper exposure setting
    # We need to skip the first frame to make sure we don't process bad image data.
    frameSkipped = False

    while (True):
        if (not client.isConnected()) and connectThrottle.hasElapsed():
            try:
                client.connect()
            except:
                None

        # This code will load a test image from disk and process it instead of the camera input
        #raw = cv2.imread('test.png')
        #frameSkipped = True
        #if raw == None or len(raw) == 0:
        #    print "Can't load image"
        #    break
        if testImage is not None:
            raw = testImage.copy()
        elif cameraReader is not None:
            raw = cameraReader.Read()
        if raw != None and frameSkipped:
            fpsCounter.tick()
            
            if debugMode:
                if fpsDisplay:
                    cv2.putText(raw, "{:.0f} fps".format(fpsCounter.getFramerate()), (cameraFrameWidth - 100, 13 + 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                cv2.imshow("raw", raw)

            # This will "deskew" or fix the keystone of a tilted camera.
            #ptSrc = np.float32([keyStoneBoxSource])
            #ptDst = np.float32([[params['keystone'], 0],\
            #                    [cameraFrameWidth - params['keystone'], 0],\
            #                    [cameraFrameWidth + params['keystone'], cameraFrameHeight],\
            #                    [-params['keystone'], cameraFrameHeight]])
            #matrix = cv2.getPerspectiveTransform(ptSrc, ptDst)
            #transformed = cv2.warpPerspective(raw, matrix, (cameraFrameWidth, cameraFrameHeight))
            #cv2.imshow("keystone", transformed)
            #target = findTarget(transformed, params)
            target = findTarget(raw, params)
            
            if target == None or not target.any():
                payload = { 'hasTarget': False, "fps": round(fpsCounter.getFramerate()) }
                client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload))
            else:
                distance = None

                targetBox = getTargetBoxTight(target)
                # We can tell how off-axis we are by looking at the slope
                # of the top off the targetBox. If we are on-center they will
                # be even. If we are off axis they will be unequal.
                # We are to the right of the target if the line slopes up to the right
                # and the slope is positive.
                offAxis = (targetBox[0][1] - targetBox[1][1]) / (cameraFrameHeight / 10.0)
                measuredHeight, centerLine = getTargetHeight(targetBox)
                center = (round((centerLine[0][0] + centerLine[1][0]) / 2),\
                          round((centerLine[0][1] + centerLine[1][1]) / 2))
                horizontalOffset = center[0] - (cameraFrameWidth / 2.0)
                
                perceivedFocalLengthH = perceivedFocalLengthV = 0.0
                if tuneDistance:
                    perceivedFocalLengthH = distanceCalculatorH.CalculatePerceivedFocalLengthAtGivenDistance(w, TARGET_CALIBRATION_DISTANCE)
                    perceivedFocalLengthV = distanceCalculatorV.CalculatePerceivedFocalLengthAtGivenDistance(h, TARGET_CALIBRATION_DISTANCE)
                    distance = TARGET_CALIBRATION_DISTANCE
                else:
                    # We use the height at the center of the taget to determine distance
                    # That way we hope it will be less sensitive to off-axis shooting angles
                    
                    distance = distanceCalculatorV.CalculateDistance(measuredHeight)
                distance = round(distance, 1)

                horizDelta = horizontalOffset / cameraFrameWidth * 2
                payload = {\
                    'horizDelta': horizDelta,\
                    'targetDistance': round(distance),\
                    'hasTarget': True,\
                    "fps": round(fpsCounter.getFramerate()),\
                    "offAxis": offAxis}
                client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload))

                if debugMode:
                    result = raw.copy()

                    # Draw the actual contours
                    #cv2.drawContours(result, target, -1, (255, 255, 255), 1)

                    # Draw the bounding area (targetBox)
                    cv2.drawContours(result, [np.int0(targetBox)], -1, (255, 0, 0), 1)

                    # Draw Convex Hull
                    #hull = cv2.convexHull(target)
                    #cv2.drawContours(result, hull, -1, (255, 0, 255), 1)
                    #temp = []
                    #for c in target:
                    #    contour = [c][0][0]
                    #    temp.append(contour)
                    #    #print contour
                    ##print temp
                    #top = getIndexOfTopLeftCorner(temp)
                    ##print target[top][0]
                    #cv2.circle(result, (target[top][0][0], target[top][0][1]), 3, (255, 255, 255), -1)

                    # Draw the centerline that represent the height
                    cv2.line(result, (int(round(centerLine[0][0])), int(round(centerLine[0][1]))),\
                                     (int(round(centerLine[1][0])), int(round(centerLine[1][1]))),\
                                     (128, 0, 255), 1)
                    
                    # draw the center of the object
                    cv2.circle(result, (int(round(center[0])), int(round(center[1]))), 4, (250, 250, 250), -1)
                    
                    #cv2.putText(result, str(horizontalOffset), (x-50, y+15), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 0, 0), 1)
                    if tuneDistance:
                        cv2.putText(result, "PFL_H: {:.0f}".format(perceivedFocalLengthH), (3, 13 + 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                        cv2.putText(result, "PFL_V: {:.0f}".format(perceivedFocalLengthV), (3, 13 + 5 + 22), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    else:
                        cv2.putText(result, "{} inches".format(distance), (3, 13 + 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    if fpsDisplay:
                        cv2.putText(result, "{:.0f} fps".format(fpsCounter.getFramerate()), (cameraFrameWidth - 100, 13 + 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255,255,255), 1)
                    cv2.imshow("result", result)

        if raw != None:
            frameSkipped = True
        if fpsDisplay and fpsInterval.hasElapsed():
            print "{0:.1f} fps (processing)".format(fpsCounter.getFramerate())
            if cameraReader is not None:
                print "{0:.1f} fps (camera)".format(cameraReader.fps.getFramerate())
    
        if debugMode:
            keyPress = cv2.waitKey(1)
            if keyPress != -1:
                keyPress = keyPress & 0xFF
            if keyPress == ord("f"):
                fpsDisplay = not fpsDisplay
            elif keyPress == ord("q"):
                break 
            elif keyPress == ord("z"):
                takeScreenshot()

    client.disconnect()
    if cameraReader is not None:
        cameraReader.Stop()
    if camera is not None:
        camera.release()
    cv2.destroyAllWindows()
示例#2
0
    x = params["hue"] + params["hueWidth"]
    if x > 179:
         x = 179
    high = np.array([x, params["high"], params["high"]])

    mask = cv2.inRange(hsv, low, high)
    return mask


def findLargestContour(source):
    contours, hierarchy = cv2.findContours(source, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        ordered = sorted(contours, key = cv2.contourArea, reverse = True)[:1]
        return ordered[0]

params = CVParameterGroup("Sliders")
params.addParameter("hue", 75, 255)
params.addParameter("hueWidth", 4, 25)
params.addParameter("FOV", 13782, 50000)
params.addParameter("low", 90, 255)
params.addParameter("high", 255, 255)
camera = cv2.VideoCapture(0)
#No camera's exposure goes this low, but this will set it as low as possible
camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE,-100)
#camera.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

fpsDisplay = False;
fpsCounter = WeightedFramerateCounter()
fpsInterval = RealtimeInterval(3.0)
示例#3
0
    hsvcomponents = cv2.split(hsv)

    low = np.array([params["hue low"], params["sat low"], params["value low"]])
    high = np.array([params["hue high"], params["sat high"], params["value high"]])

    mask = cv2.inRange(hsv, low, high)
    return mask


def findLargestContour(source):
    contours, hierarchy = cv2.findContours(source, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        ordered = sorted(contours, key = cv2.contourArea, reverse = True)[:1]
        return ordered[0]

params = CVParameterGroup("Sliders")
params.addParameter("hue high", 106, 255)
params.addParameter("hue low", 0, 255)
params.addParameter("sat high", 75, 255)
params.addParameter("sat low", 0, 255)
params.addParameter("value high", 255, 255)
params.addParameter("value low", 105, 255)
params.addParameter("ED Size", 4, 20)

camera = cv2.VideoCapture(0)
#camera.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

fpsDisplay = False;
fpsCounter = WeightedFramerateCounter()
示例#4
0
##            tPx = w
##            distance = params["FOV"]/w
##            cv2.putText(result, str(distance), (30, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,0), 1)
        #client.publish("5495.targetting", center[0]-320)

        payload = { 'horizDelta': center[0] - 320, 'targetDistance': int(distance) }
        client.publish(MQTT_TOPIC_TARGETTING, json.dumps(payload))
        return result

MQTT_TOPIC_TARGETTING = "5495.targetting"
host = "roboRIO-5495-FRC.local"
port = 5888
topics = ()
client = mqttClient.MqttClient(host, port, topics, messageHandler)

params = CVParameterGroup("Sliders")
if debugMode:
    params.showWindow()
params.addParameter("hue", 65, 255) # GREEN
#params.addParameter("hue", 105, 255) #BLUE
params.addParameter("hueWidth", 5, 25)
params.addParameter("FOV", 652, 50000)
params.addParameter("low", 70, 255)
params.addParameter("high", 255, 255)

camera = createCamera()

targetSize = (11.25, 44) # size, distance
distanceCalc = TriangleSimilarityDistanceCalculator(targetSize[0])

fpsDisplay = False;
    x = params["hue"] + params["hueWidth"]
    if x > 179:
         x = 179
    high = np.array([x, 255, 255])

    mask = cv2.inRange(hsv, low, high)
    return mask


def findLargestContour(source):
    contours, hierarchy = cv2.findContours(source, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    if len(contours) > 0:
        ordered = sorted(contours, key = cv2.contourArea, reverse = True)[:1]
        return ordered[0]

params = CVParameterGroup("Sliders")
params.addParameter("hue", 111, 255)
params.addParameter("hueWidth", 2, 25)
params.addParameter("FOV", 13782, 50000)

camera = cv2.VideoCapture(0)
#camera.set(cv2.cv.CV_CAP_PROP_FPS, 15)
#camera.set(cv2.cv.CV_CAP_PROP_FOCUS_AUTO, 0)
#camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE_AUTO, 0)
#camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE, 0)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 640)
#camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 480)

fpsDisplay = False;
fpsCounter = WeightedFramerateCounter()
fpsInterval = RealtimeInterval(3.0)