Ejemplo n.º 1
0
def start_camera():
    inst = CameraServer.getInstance()
    camera = UsbCamera('Hatch Panels', '/dev/video0')

    # with open("cam.json", encoding='utf-8') as cam_config:
    #     camera.setConfigJson(json.dumps(cam_config.read()))
    camera.setResolution(VERTICAL_RES, HORIZONTAL_RES)
    camera.setFPS(FPS)
    camera.setBrightness(10)
    camera.setConfigJson("""
{
    "fps": """ + str(FPS) + """,
    "height": """ + str(HORIZONTAL_RES) + """,
    "pixel format": "mjpeg",
    "properties": [
        {
            "name": "brightness",
            "value": 0
        },
        {
            "name": "contrast",
            "value": 100
        },
        {
            "name": "saturation",
            "value": 40
        }
    ],
    "width": """ + str(VERTICAL_RES) + """
}
    """)

    camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen)

    return inst, camera
Ejemplo n.º 2
0
def main():
    cs = CameraServer.getInstance()
    cs.enableLogging()

    usb0 = UsbCamera('Camera 0', 0)
    usb0.setFPS(10)
    usb0.setResolution(176, 144)
    usb1 = UsbCamera('Camera 1', 1)
    usb1.setFPS(10)
    usb1.setResolution(176, 144)

    # We "bounce" between these two camera objects using a Command on
    # the robot.
    cams = [usb0, usb1]

    # Add a "Switched" camera opject to Network Tables,
    # and set the source to the first camera (usb0).
    server = cs.addSwitchedCamera('Switched')
    server.setSource(cams[0])
    server.setFPS(10)
    server.setCompression(80)
    server.setResolution(176, 144)


    def _listener(source, key, value, isNew):
        '''Use networktables to switch the camera source.
        We set the camera source to the camera object in the "cams"
        dictionary. We use the bitwise Exclusive Or of "value" to index
        the array (see https://wiki.python.org/moin/BitwiseOperators for
        bit operators).

        "value" MUST be 0 (zero) or 1 (one) in order for this to work
        '''
        # print("cams[int(value)]: value: {} cams: {}".format(int(value), cams[int(value)]))
        # print("usb0: {}   usb1: {}".format(usb0, usb1))
        server.setSource(cams[int(value)])

    table = networktables.NetworkTables.getTable('/CameraPublisher')
    table.putNumber('selected', 0)
    table.addEntryListener(_listener, key='selected')

    cs.waitForever()
def main():
    cs = CameraServer.getInstance()
    cs.enableLogging()

    outputStream = CvSource('camera', VideoMode.PixelFormat.kMJPEG, int(640),
                            int(480), int(15))
    cs.addCamera(outputStream)
    server = cs.addServer(name='camera', port=int(5801))
    server.setSource(outputStream)

    camera = UsbCamera(
        "cam1",
        '/dev/v4l/by-id/usb-HD_Camera_Manufacturer_USB_2.0_Camera-video-index0'
    )
    # Capture from the first USB Camera on the system
    cs.startAutomaticCapture(camera=camera)
    camera.setResolution(640, 480)

    # Get a CvSink. This will capture images from the camera
    cvSink = cs.getVideo(camera=camera)

    # Allocating new images is very expensive, always try to preallocate
    img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        time, img = cvSink.grabFrame(img)
        if time == 0:
            # Send the output the error.
            outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        #
        # Insert your image processing logic here!
        #
        cv2.imshow("stream", img)
        # (optional) send some image back to the dashboard
        outputStream.putFrame(img)
Ejemplo n.º 4
0
    # start NetworkTables to send to smartDashboard
    ntinst = NetworkTablesInstance.getDefault()

    print("Setting up NetworkTables client for team {}".format(team))
    ntinst.startClientTeam(7539)

    SmartDashBoardValues = ntinst.getTable('SmartDashboard')

    #Start camera
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    Camera.setExposureManual(5)
    Camera.setResolution(160, 120)
    cs.addCamera(Camera)

    print("connected")

    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160, 120)

    #buffer to store img data
    img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
    # loop forever
    while True:

        GotFrame, img = CvSink.grabFrame(img)
        if GotFrame == 0:
            outputStream.notifyError(CvSink.getError())
Ejemplo n.º 5
0
def main():

    cs = CameraServer.getInstance()
    cs.enableLogging()
    outputStreamTarget = cs.putVideo("targetImage", imageResolutionSend[0], imageResolutionSend[1])
    outputStreamTargetBinary = cs.putVideo("targetImageBinary", imageResolutionSend[0], imageResolutionSend[1])
    outputStreamBall = cs.putVideo("ballImage", imageResolutionSend[0], imageResolutionSend[1])
    # outputStreamBallBinary = cs.putVideo("ballImageBinary", imageResolutionSend[0], imageResolutionSend[1])

    targetImage = np.zeros(shape=(imageResolutionRasp[1], imageResolutionRasp[0], 3), dtype=np.uint8)
    ballImage = np.zeros(shape=(imageResolutionRasp[1], imageResolutionRasp[0], 3), dtype=np.uint8)

    availableTargetCamera = targetCameraAddress in os.listdir("/dev")
    availableBallCamera = ballCameraAddress in os.listdir("/dev")

    if availableTargetCamera:
        cameraTarget = UsbCamera("Camera Target", "/dev/" + targetCameraAddress)
        cameraTarget.setResolution(imageResolutionRasp[0], imageResolutionRasp[1])
        cameraTarget.setBrightness(brightnessTarget)
        cameraTarget.setExposureManual(exposureTarget)
        cs.addCamera(cameraTarget)
        cvSinkTarget = cs.getVideo(name="Camera Target")
        cvSinkTarget.setSource(cameraTarget)

    if availableBallCamera:
        cameraBall = UsbCamera("Camera Ball", "/dev/" + ballCameraAddress)
        cameraBall.setResolution(imageResolutionRasp[0], imageResolutionRasp[1])
        cameraBall.setBrightness(brightnessBall)
        cameraBall.setExposureManual(exposureBall)
        cs.addCamera(cameraBall)
        cvSinkBall = cs.getVideo(name="Camera Ball")
        cvSinkBall.setSource(cameraBall)
    
    counter = 0
    while True:

        if availableTargetCamera:
            availableTargetCamera = targetCameraAddress in os.listdir("/dev")
        if availableBallCamera:
            availableBallCamera = ballCameraAddress in os.listdir("/dev")

        counter += 1
        textOutput = ""

        if calibration:
            
            getHSVBallParameters()
            getHSVTargetParameters()
            
            getCameraTargetParameters()
            getCameraBallParameters()

            getTargetDistanceParameters()
        
        if availableTargetCamera:
            t, targetImage = cvSinkTarget.grabFrame(targetImage)
            available, targetImage, binaryTargetImage, targetXPos, targetYPos = findTarget(targetImage)
            
            distance = calculateDistance(targetYPos)
            horizontalAngle = getHorizontalAngle(targetXPos, imageResolutionRasp)
            launcherAngle = getLauncherAngle(distance)

            if distance > 95 and distance < 240:
                cv2.rectangle(targetImage, (20, 20), (100, 100), (0, 255, 0), -1)
            
            netTableTargetAvailable.setBoolean(available)
            netTableTargetDistance.setDouble(distance)
            netTableTargetHorizontalAngle.setDouble(horizontalAngle)
            netTableTargetLauncherAngle.setDouble(launcherAngle)
            netTableTargetXPos.setDouble(targetXPos)
            netTableTargetYPos.setDouble(targetYPos)

            smallerBinaryTargetImage = cv2.resize(binaryTargetImage, (imageResolutionSend[0], imageResolutionSend[1]))
            smallerTargetImage = cv2.resize(targetImage, (imageResolutionSend[0], imageResolutionSend[1]))
            outputStreamTargetBinary.putFrame(smallerBinaryTargetImage)
            outputStreamTarget.putFrame(smallerTargetImage)
            textOutput += "Distance: {} horizontalAngle: {} launcherAngle: {} targetYPos: {}\n".format(distance, horizontalAngle, launcherAngle, targetYPos)
        else:
            textOutput += "Target Camera disabled\n"


        if availableBallCamera:
            t, ballImage = cvSinkBall.grabFrame(ballImage)
            # ballImage, binaryBallImage, ballXPos, ballYPos = findBall(ballImage)
            # angle = getHorizontalAngle(ballXPos, imageResolutionRasp)

            # netTableBallXPos.setDouble(ballXPos)
            # netTableBallYPos.setDouble(ballYPos)
            # netTableBallAngle.setDouble(angle)

            # smallerBinaryBallImage = cv2.resize(binaryBallImage, (imageResolutionSend[0], imageResolutionSend[1]))
            smallerBallImage = cv2.resize(ballImage, (imageResolutionSend[0], imageResolutionSend[1]))
            # outputStreamBallBinary.putFrame(smallerBinaryBallImage)
            outputStreamBall.putFrame(smallerBallImage)
            # textOutput += "ballXPos: {} ballYPos: {}\n".format(ballXPos, ballYPos)
        else:
            textOutput += "Ball Camera disabled\n"

        if counter % 10 == 0:
            print(textOutput)
lower_green = np.array([0, 255, 110])  #137 240 135 - HSV, 0,90,90 - RGB
upper_green = np.array([80, 255, 200])  #143 255 148 - HSV, 86,255,255 - RGB

camWidth = 320
camHeight = 240
center_x = camWidth * .5
margin = 20

cs = CameraServer.getInstance()
cs.enableLogging()

cam1 = UsbCamera("cam1", 0)
cam2 = UsbCamera("cam2", 1)

cam1.setResolution(camWidth, camHeight)
cam2.setResolution(camWidth, camHeight)

cam1.setExposureManual(8)
cam2.setExposureManual(8)

cvSink = cs.getVideo(camera=cam1)

outputStream = cs.putVideo("Cam1", camWidth, camHeight)

frame = np.zeros(shape=(camHeight, camWidth, 3), dtype=np.uint8)


def listener(table, key, value, isNew):
    print("value changed: key: '%s'; value: %s; isNew: %s" %
          (key, value, isNew))
Ejemplo n.º 7
0
    SmartDashBoardValues.setPersistent("HL")
    SmartDashBoardValues.setPersistent("HU")
    SmartDashBoardValues.setPersistent("SL")
    SmartDashBoardValues.setPersistent("SU")
    SmartDashBoardValues.setPersistent("VL")
    SmartDashBoardValues.setPersistent("VU")

    #Start first camera
    print("Connecting to camera 0")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    #exp = 2
    #Camera.setExposureManual(exp)
    Camera.setResolution(160,120)
    cs.addCamera(Camera)
    #SmartDashBoardValues.putNumber('ExpAuto', 0)

    print("connected")

    Camera1 = UsbCamera('Cam 1', 1)
    Camera1.setResolution(640, 480)
    Camera1.setFPS(25)
    mjpegServer = MjpegServer("serve_Cam 1", 1182)
    mjpegServer.setResolution(480, 360)
    mjpegServer.setSource(Camera1)

    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160,120)
Ejemplo n.º 8
0
def main(table):
    camSrv = CameraServer.getInstance()
    camSrv.enableLogging()

    cam = UsbCamera('logitech', 0)
    cam.setResolution(CAM_WIDTH, CAM_HEIGHT)
    camSrv.addCamera(cam)

    # Get a CvSink. This will capture images from the camera
    cvSink = camSrv.getVideo()

    # (optional) Setup a CvSource. This will send images back to the Dashboard
    outputStream = camSrv.putVideo("Rectangle", CAM_WIDTH, CAM_HEIGHT)

    # Allocating new images is very expensive, always try to preallocate
    rawimg = np.zeros(shape=(CAM_HEIGHT, CAM_WIDTH, 3), dtype=np.uint8)

    # OpenCV ranges
    # Hue: 0 - 180
    # Saturation: 0 - 255
    # Vibrancy: 0 - 255
    lower = np.array([0, 0, 225])
    upper = np.array([10, 10, 255])

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        time, rawimg = cvSink.grabFrame(rawimg, 0.5)
        if time == 0:
            # Send the output the error.
            print(cvSink.getError())
            #outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        # Threshold the HSV image to get only the selected colors
        hsv = cv2.cvtColor(rawimg, cv2.COLOR_RGB2HSV)
        mask = cv2.inRange(hsv, lower, upper)

        mask, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,
                                                     cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) == 0:
            continue
        try:
            contours.sort(key=cv2.contourArea)
            target1 = contours[-1]
            target2 = contours[-2]
            target1Moment = cv2.moments(target1)
            target2Moment = cv2.moments(target2)
        except:
            continue

        try:
            target1Cx = int(target1Moment['m10'] / target1Moment['m00'])
            target1Cy = int(target1Moment['m01'] / target1Moment['m00'])

            target2Cx = int(target2Moment['m10'] / target2Moment['m00'])
            target2Cy = int(target2Moment['m01'] / target2Moment['m00'])

        except ZeroDivisionError:
            target1Cx = 0
            target1Cy = 0
            target2Cx = 0
            target2Cy = 0

        distance = int(
            vu.getDistance(target1Cx, target1Cy, target2Cx, target2Cy))
        cv2.drawContours(rawimg, [target1], -1, (0, 0, 255), 2)
        cv2.drawContours(rawimg, [target2], -1, (0, 0, 255), 2)
        # figure the midpoint of x1,y1 and x2,y2, get the angle for midX
        midX, midY = vu.getMidPoint(target1Cx, target1Cy, target2Cx, target2Cy)
        midX = int(midX)
        midY = int(midY)
        if distance > 100:
            cv2.circle(rawimg, (target1Cx, target1Cy), 7, (255, 0, 0), -1)
            cv2.circle(rawimg, (target2Cx, target2Cy), 7, (255, 0, 0), -1)
            cv2.drawMarker(rawimg, (midX, midY), (0, 0, 255), cv2.MARKER_CROSS,
                           20, 3)

        cv2.line(rawimg, (336, 0), (336, 480), (0, 255, 0), 2)
        cv2.line(rawimg, (304, 0), (304, 480), (0, 255, 0), 2)
        #cv2.circle(rawimg, (midX, midY), 7, (0, 0, 255), -1)
        #angle = vu.getAngle(midX, CENTER, DEGREES_PER_PIXEL)

        print('%d\t%d' % (midX, distance))
        table.putNumber('midX', midX)
        table.putNumber('distance', distance)

        # Give the output stream a new image to display
        outputStream.putFrame(rawimg)
Ejemplo n.º 9
0
def main(config):

    team = read_config(config)
    WIDTH, HEIGHT = 320, 240
    FOV = 68.5
    focal_length = WIDTH / (2 * math.tan(math.radians(FOV / 2)))
    print("Starting camera server")

    cs = CameraServer.getInstance()
    power_cell_cam = UsbCamera('power_cell_cam', 2)
    power_cell_cam.setResolution(WIDTH, HEIGHT)
    power_cell_cam.setExposureManual(50)

    tape_cam = UsbCamera('tape_cam', 0)
    tape_cam.setResolution(WIDTH, HEIGHT)
    tape_cam.setExposureManual(1)

    print("Connecting to Network Tables")
    ntinst = NetworkTablesInstance.getDefault()
    ntinst.startClientTeam(team)
    ntinst.addConnectionListener(connectionListener, immediateNotify=True)
    """Format of these entries found in WPILib documentation."""
    power_cell_angle = ntinst.getTable('ML').getEntry('power_cell_angle')
    power_cell_pos = ntinst.getTable('ML').getEntry('power_cell_pos')
    power_cell_exists = ntinst.getTable('ML').getEntry('power_cell_exists')
    power_cell_dist = ntinst.getTable('ML').getEntry('power_cell_dist')
    power_cell_x = ntinst.getTable('ML').getEntry('power_cell_x')
    power_cell_y = ntinst.getTable('ML').getEntry('power_cell_y')

    tape_angle = ntinst.getTable('ML').getEntry('tape_angle')
    tape_dist = ntinst.getTable('ML').getEntry('tape_dist')
    tape_found = ntinst.getTable('ML').getEntry('tape_found')
    sd = ntinst.getTable('SmartDashboard')

    sd.addEntryListener(listener, key='cam')

    cvSink = cs.getVideo(camera=power_cell_cam)
    img = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype=np.uint8)
    output = cs.putVideo("MLOut", WIDTH, HEIGHT)

    print("Initializing ML engine")
    engine = DetectionEngine("model.tflite")

    lower_yellow = np.array([20, 75, 100])
    upper_yellow = np.array([40, 255, 255])

    lower_green = np.array([55, 124, 81])  #137 240 135 - HSV, 0,90,90 - RGB
    upper_green = np.array([255, 255,
                            255])  #143 255 148 - HSV, 86,255,255 - RGB

    lower_color = np.array([0, 0, 0])
    upper_color = np.array([0, 0, 0])

    camera_robot_x = .3302
    camera_robot_y = -.0254

    print("Starting ML mainloop")

    while True:
        cam_mode = sd.getBoolean(key='cam', defaultValue=False)

        if cam_mode:
            cvSink.setSource(tape_cam)
            lower_color = lower_green
            upper_color = upper_green
            #print('green')
        else:
            cvSink.setSource(power_cell_cam)
            lower_color = lower_yellow
            upper_color = upper_yellow
            #print('yellow')

        t, frame = cvSink.grabFrame(img)

        # Run inference.

        if not cam_mode:
            ans = engine.detect_with_image(Image.fromarray(frame),
                                           threshold=0.5,
                                           keep_aspect_ratio=True,
                                           relative_coord=False,
                                           top_k=10)
        else:
            ans = False

        center_power_cell_x = WIDTH / 2
        center_power_cell_y = HEIGHT / 2

        bool_power_cell = False
        bool_tape = False

        largest_box_xmin = 0
        largest_box_xmax = 0
        largest_box_ymin = 0
        largest_box_ymin = 0
        largest_box_area = 0

        minimum_box_area = 0

        angle_to_turn_power_cell = 0
        angle_to_turn_power_cell_robot = 0
        dist_power_cell = 0

        angle_to_turn_tape = 0
        dist_tape = 0

        new_x = 0
        new_y = 0

        center_contour_x = WIDTH / 2

        # Display result.
        if ans:
            for obj in ans:
                box = [
                    round(i, 3) for i in obj.bounding_box.flatten().tolist()
                ]
                xmin, ymin, xmax, ymax = map(int, box)
                box_area = (xmax - xmin) * (ymax - ymin)

                if box_area > largest_box_area:
                    largest_box_area = box_area
                    largest_box_xmin = xmin
                    largest_box_xmax = xmax
                    largest_box_ymin = ymin
                    largest_box_ymax = ymax

                label = '%s: %d%%' % ('power_cell', int(obj.score * 100)
                                      )  # Example: 'Cargo: 72%'
                label_size, base_line = cv2.getTextSize(
                    label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
                label_ymin = max(ymin, label_size[1] + 10)

                cv2.rectangle(
                    frame, (xmin, label_ymin - label_size[1] - 10),
                    (xmin + label_size[0], label_ymin + base_line - 10),
                    (255, 255, 255), cv2.FILLED)
                cv2.putText(frame, label, (xmin, label_ymin - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0),
                              4)

            if (largest_box_area > minimum_box_area):
                center_power_cell_x = 0.5 * (largest_box_xmin +
                                             largest_box_xmax)
                center_power_cell_y = 0.5 * (largest_box_ymin +
                                             largest_box_ymax)
                bool_power_cell = True

                angle_to_turn_power_cell = math.degrees(
                    math.atan((center_power_cell_x - (.5 * WIDTH - .5)) /
                              focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(
                        (largest_box_xmax - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = abs(angle_to_right_side -
                                              angle_to_turn_power_cell)
                dist_power_cell = (.5842 / (2 * math.pi)) / math.tan(
                    math.radians(angle_diff_right_center))

                x_vec = dist_power_cell / math.sqrt(
                    1 + math.tan(math.radians(-angle_to_turn_power_cell))**2)
                y_vec = x_vec * math.tan(
                    math.radians((-angle_to_turn_power_cell)))
                if (angle_to_turn_power_cell < 0):
                    x_vec *= -1
                    y_vec *= -1
                new_x = camera_robot_x + x_vec
                new_y = camera_robot_y + y_vec
                angle_to_turn_power_cell_robot = -math.degrees(
                    math.atan2(new_y, new_x))

                cv2.putText(frame, str(angle_to_turn_power_cell_robot),
                            (100, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 2)
                #cv2.putText(frame, str(new_y), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                #cv2.putText(frame, str(dist_power_cell), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        else:

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, lower_color, upper_color)
            contours = cv2.findContours(mask, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
            '''
            TODO: Determine how reflective tapes will look through eye of god
            TODO: Calculate angle and distance to tape (tape angle calculation same as power cell angle calculation)
            '''

            largest_contour = np.array([0, 0])
            for c in contours:
                area = cv2.contourArea(c)
                if area > largest_box_area:
                    largest_contour = c
                    largest_box_area = area

            if (largest_box_area > minimum_box_area and not cam_mode):
                x, y, w, h = cv2.boundingRect(largest_contour)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                center_power_cell_x = 0.5 * (x + x + w)
                center_power_cell_y = 0.5 * (y + y + h)
                bool_power_cell = True

                angle_to_turn_power_cell = math.degrees(
                    math.atan((center_power_cell_x - (.5 * WIDTH - .5)) /
                              focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(((x + w) - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = angle_to_right_side - angle_to_turn_power_cell
                dist_power_cell = (.5842 / (2 * math.pi)) / math.tan(
                    math.radians(angle_diff_right_center))

                x_vec = dist_power_cell / math.sqrt(
                    1 + math.tan(math.radians(-angle_to_turn_power_cell))**2)
                y_vec = x_vec * math.tan(
                    math.radians((-angle_to_turn_power_cell)))
                if (angle_to_turn_power_cell < 0):
                    x_vec *= -1
                    y_vec *= -1
                new_x = camera_robot_x + x_vec
                new_y = camera_robot_y + y_vec
                angle_to_turn_power_cell_robot = -math.degrees(
                    math.atan2(new_y, new_x))

                cv2.putText(frame, str(angle_to_turn_power_cell_robot),
                            (100, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 2)
                #cv2.putText(frame, str(new_y), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            # cv2.putText(frame, str(dist_power_cell), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            elif (largest_box_area > 0 and cam_mode):
                x, y, w, h = cv2.boundingRect(largest_contour)
                M1 = cv2.moments(largest_contour)
                center_contour_x = int(M1['m10'] / M1['m00'])
                cv2.drawContours(frame, [largest_contour], 0, (0, 255, 0), 3)

                angle_to_turn_tape = math.degrees(
                    math.atan(
                        (center_contour_x - (.5 * WIDTH - .5)) / focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(((x + w) - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = abs(angle_to_right_side -
                                              angle_to_turn_tape)
                dist_tape = .99695 / math.tan(
                    math.radians(angle_diff_right_center))

                cv2.putText(frame, str(dist_tape), (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                bool_tape = True

        output.putFrame(frame)

        tape_found.setBoolean(bool_tape)

        if cam_mode:
            angle_to_turn_tape = (center_contour_x / WIDTH * FOV) - FOV / 2
            tape_angle.setDouble(angle_to_turn_tape)
            #TODO: Send distance to tape
        else:
            power_cell_angle.setDouble(angle_to_turn_power_cell_robot)
            power_cell_pos.setDoubleArray(
                [center_power_cell_x, center_power_cell_y])
            power_cell_exists.setBoolean(bool_power_cell)
            power_cell_dist.setDouble(dist_power_cell)
            power_cell_x.setDouble(new_x)
            power_cell_y.setDouble(new_y)
Ejemplo n.º 10
0
def mainMain():
    #   network tables initialization
    NetworkTables.initialize(server=__IP)
    print("nt initialized")
    smartdash = NetworkTables.getTable("SmartDashboard")
    #    table = NetworkTables.getTable(__SENT_TABLE_NAME)
    #    subtable = smartdash.getSubTable(__ORIENTATION_TABLE_NAME)

    #   initialize cameras
    # CAMERA 1
    cam = UsbCamera('Cam 1 Front', 1)
    cam.setResolution(160, 120)
    cam.setExposureManual(0)
    cam.setBrightness(0)
    cam.setFPS(30)
    print("cam1 initialized")
    # CAMERA 2
    cam2 = UsbCamera('Cam 2 Back', 0)
    cam2.setResolution(160, 120)
    cam2.setExposureManual(0)
    cam2.setBrightness(0)
    cam2.setFPS(30)
    print("cam2 initialized")

    # EACH CAMERA STARTS CAPTURING VIDEO
    cs.startAutomaticCapture(camera=cam)
    cs.startAutomaticCapture(camera=cam2)

    # GETTING THE VIDEO STREAM OF EACH CAMERA
    vid = cs.getVideo(camera=cam)
    vid2 = cs.getVideo(camera=cam2)

    #   initialize outputstream, this is the place where we send frames to shuffleboard.
    output_stream = cs.putVideo('TOP CAMERAS', 160, 120)

    print("Running while loop")
    counter = 0
    while True:
        # get front or back value
        camera_value = subtable.getString("value", "FRONT")

        img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
        #   get video
        time, frame = video.grabFrame(img, 0.5)

        #   This condition sends video from different cameras based on the robots orientation.
        if camera_value == 'BACK':
            print("BACK")
            video = vid
        elif camera_value == 'FRONT':
            print("FRONT")
            video = vid2
        else:
            print('error, no camera value detected')

        print("sending frame")
        # send frame to shuffleboard
        output_stream.putFrame(frame)

        print('Done.')
        sys.stdout.flush()
        # just counting how fast it is running and if it is running at all
        counter += 1
        print(counter)
Ejemplo n.º 11
0
# This code is designed to work for the Microsoft Lifecam HD3000 and the limelight ring

log.basicConfig(level=log.DEBUG)

# NetworkTables set up
ntinst = nti.getDefault()
ntinst.startClientTeam(578)

sd = ntinst.getTable('SmartDashboard')

# Start the camera
cs = CameraServer.getInstance()
cs.enableLogging()
Camera = UsbCamera('rPi Camera 0', 0)
Camera.setResolution(1280, 720)
cs.addCamera(Camera)

CvSink = cs.getVideo()
outputStream = cs.putVideo("Processed Frames", 1280, 720)

img = np.zeros(shape=(1280, 720, 3), dtype=np.uint8)

#cap = cv2.VideoCapture(2)

while (True):

    GotFrame, img = CvSink.grabFrame(img)
    if (GotFrame == 0):
        outputStream.notifyError(CvSink.getError())
        continue
Ejemplo n.º 12
0
            fiveMove = False
        else:
            rightFound = True
    return x


if __name__ == "__main__":
    if len(sys.argv) >= 2:
        configFile = sys.argv[1]

    # start cameras
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('rPi Camera 0', 0)
    Camera.setResolution(width, height)
    #cs.addCamera(Camera)
    cs.startAutomaticCapture(camera=Camera, return_server=True)

    CvSink = cs.getVideo()
    outputStream = CvSource('Processed Frames', VideoMode.PixelFormat.kBGR,
                            width, height, 28)
    img = np.zeros(shape=(width, height, 3), dtype=np.uint8)
    count = 0
    global topX
    global topY
    global botX
    global botY
    global direction
    global alreadyFound
    while True:
Ejemplo n.º 13
0
def main(table):
    camSrv = CameraServer.getInstance()
    camSrv.enableLogging()

    cam = UsbCamera("logitech", 0)
    camSrv.addCamera(cam)
    #cam = cs.startAutomaticCapture()
    
    cam.setResolution(CAM_WIDTH, CAM_HEIGHT)
    
    # Get a CvSink. This will capture images from the camera
    cvSink = camSrv.getVideo() #camera=cam)
    
    # (optional) Setup a CvSource. This will send images back to the Dashboard
    outputStream = camSrv.putVideo("Rectangle", CAM_WIDTH, CAM_HEIGHT)
    
    # Allocating new images is very expensive, always try to preallocate
    rawimg = np.zeros(shape=(CAM_HEIGHT, CAM_WIDTH, 3), dtype=np.uint8)
    # OpenCV ranges
    # Hue: 0 - 180
    # Saturation: 0 - 255
    # Vibrancy: 0 - 255
    lower = np.array([0, 0, 200])
    upper = np.array([10, 100, 255])

    while True:
        # Tell the CvSink to grab a frame from the camera and put it
        # in the source image.  If there is an error notify the output.
        #print('grab a frame...')
        time, rawimg = cvSink.grabFrame(rawimg,0.5)
        if time == 0:
            # Send the output the error.
            print(cvSink.getError())
            #outputStream.notifyError(cvSink.getError())
            # skip the rest of the current iteration
            continue

        # convert RGB to HSV
        hsv = cv2.cvtColor(rawimg, cv2.COLOR_RGB2HSV)

        # Threshold the HSV image to get only the selected colors
        mask = cv2.inRange(hsv, lower, upper)
        
        mask, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) == 0:
            print('no contours')
            continue
        try:
            contours.sort(key=lambda x: cv2.contourArea(x))
            target = max(contours, key=cv2.contourArea)
            boundingBox = cv2.minAreaRect(target)
        except:
            print('no bounding box')
            continue

        angle = boundingBox[2]
        if angle < -45:
            angle = angle + 90
            
        table.putNumber('cameraAngle', angle)

        # draw a red outline on the output image
        # so that the user can see what is targeted
        boxPoints = cv2.boxPoints(boundingBox)
        boxPoints = np.int0(boxPoints)
        rawimg = cv2.drawContours(rawimg, [boxPoints], 0, (0,0,255), 2)
        # Give the output stream a new image to display
        outputStream.putFrame(rawimg)    
Ejemplo n.º 14
0
    except KeyError:
        parseError("could not read cameras")
        return False
    for camera in cameras:
        if not readCameraConfig(camera):
            return False

    return True

readConfig()
#Set up the camera
camServer = CameraServer.getInstance()
camServer.enableLogging()
camera = UsbCamera("rPi Camera 0", 0)
camera.setFPS(30)
camera.setResolution(cameraWidth, cameraHeight)
camServer.addCamera(camera)

cvSink = camServer.getVideo()
outputStream = camServer.putVideo("Processed Frames", cameraWidth, cameraHeight)

ntinst = NetworkTablesInstance.getDefault()

print("Setting up NetworkTables client for team {}".format(team))
ntinst.startClientTeam(team)
dashboard = ntinst.getTable('SmartDashboard')

#outputStream = camServer.putVideo("Vision", cameraWidth, cameraHeight)


#camera = camServer.startAutomaticCapture();