Ejemplo n.º 1
0
            if (solid < solidity[0] or solid > solidity[1]):
                continue
            if (len(contour) < min_vertex_count or len(contour) > max_vertex_count):
                continue
            ratio = (float)(w) / h
            if (ratio < min_ratio or ratio > max_ratio):
                continue
            output.append(contour)
        return output



gLine = Example()

camera = UsbCamera("CammyBoi", 0)
camera.setExposureManual(10)
#camera.setConfigJson(json.dumps(json.load(open(configFile, "rt", encoding="utf-8"))))
vidSink = CvSink("Camera")
vidSink.setSource(camera)

vidSource = CvSource("Processed", VideoMode.PixelFormat.kMJPEG, 640, 480, 30)
networkStream = MjpegServer("Stream", 1181)
networkStream.setSource(vidSource)
img = np.zeros(shape=(480, 640, 3), dtype=np.uint8)

while(True):
    ret, src = vidSink.grabFrame(img)
    startTime = time.time()

    gLine.process(src)
Ejemplo n.º 2
0
def main():

    cs = CameraServer.getInstance()
    cs.enableLogging()
    outputStreamTarget = cs.putVideo("targetImage", imageResolutionSend[0], imageResolutionSend[1])
    outputStreamTargetBinary = cs.putVideo("targetImageBinary", imageResolutionSend[0], imageResolutionSend[1])
    outputStreamBall = cs.putVideo("ballImage", imageResolutionSend[0], imageResolutionSend[1])
    # outputStreamBallBinary = cs.putVideo("ballImageBinary", imageResolutionSend[0], imageResolutionSend[1])

    targetImage = np.zeros(shape=(imageResolutionRasp[1], imageResolutionRasp[0], 3), dtype=np.uint8)
    ballImage = np.zeros(shape=(imageResolutionRasp[1], imageResolutionRasp[0], 3), dtype=np.uint8)

    availableTargetCamera = targetCameraAddress in os.listdir("/dev")
    availableBallCamera = ballCameraAddress in os.listdir("/dev")

    if availableTargetCamera:
        cameraTarget = UsbCamera("Camera Target", "/dev/" + targetCameraAddress)
        cameraTarget.setResolution(imageResolutionRasp[0], imageResolutionRasp[1])
        cameraTarget.setBrightness(brightnessTarget)
        cameraTarget.setExposureManual(exposureTarget)
        cs.addCamera(cameraTarget)
        cvSinkTarget = cs.getVideo(name="Camera Target")
        cvSinkTarget.setSource(cameraTarget)

    if availableBallCamera:
        cameraBall = UsbCamera("Camera Ball", "/dev/" + ballCameraAddress)
        cameraBall.setResolution(imageResolutionRasp[0], imageResolutionRasp[1])
        cameraBall.setBrightness(brightnessBall)
        cameraBall.setExposureManual(exposureBall)
        cs.addCamera(cameraBall)
        cvSinkBall = cs.getVideo(name="Camera Ball")
        cvSinkBall.setSource(cameraBall)
    
    counter = 0
    while True:

        if availableTargetCamera:
            availableTargetCamera = targetCameraAddress in os.listdir("/dev")
        if availableBallCamera:
            availableBallCamera = ballCameraAddress in os.listdir("/dev")

        counter += 1
        textOutput = ""

        if calibration:
            
            getHSVBallParameters()
            getHSVTargetParameters()
            
            getCameraTargetParameters()
            getCameraBallParameters()

            getTargetDistanceParameters()
        
        if availableTargetCamera:
            t, targetImage = cvSinkTarget.grabFrame(targetImage)
            available, targetImage, binaryTargetImage, targetXPos, targetYPos = findTarget(targetImage)
            
            distance = calculateDistance(targetYPos)
            horizontalAngle = getHorizontalAngle(targetXPos, imageResolutionRasp)
            launcherAngle = getLauncherAngle(distance)

            if distance > 95 and distance < 240:
                cv2.rectangle(targetImage, (20, 20), (100, 100), (0, 255, 0), -1)
            
            netTableTargetAvailable.setBoolean(available)
            netTableTargetDistance.setDouble(distance)
            netTableTargetHorizontalAngle.setDouble(horizontalAngle)
            netTableTargetLauncherAngle.setDouble(launcherAngle)
            netTableTargetXPos.setDouble(targetXPos)
            netTableTargetYPos.setDouble(targetYPos)

            smallerBinaryTargetImage = cv2.resize(binaryTargetImage, (imageResolutionSend[0], imageResolutionSend[1]))
            smallerTargetImage = cv2.resize(targetImage, (imageResolutionSend[0], imageResolutionSend[1]))
            outputStreamTargetBinary.putFrame(smallerBinaryTargetImage)
            outputStreamTarget.putFrame(smallerTargetImage)
            textOutput += "Distance: {} horizontalAngle: {} launcherAngle: {} targetYPos: {}\n".format(distance, horizontalAngle, launcherAngle, targetYPos)
        else:
            textOutput += "Target Camera disabled\n"


        if availableBallCamera:
            t, ballImage = cvSinkBall.grabFrame(ballImage)
            # ballImage, binaryBallImage, ballXPos, ballYPos = findBall(ballImage)
            # angle = getHorizontalAngle(ballXPos, imageResolutionRasp)

            # netTableBallXPos.setDouble(ballXPos)
            # netTableBallYPos.setDouble(ballYPos)
            # netTableBallAngle.setDouble(angle)

            # smallerBinaryBallImage = cv2.resize(binaryBallImage, (imageResolutionSend[0], imageResolutionSend[1]))
            smallerBallImage = cv2.resize(ballImage, (imageResolutionSend[0], imageResolutionSend[1]))
            # outputStreamBallBinary.putFrame(smallerBinaryBallImage)
            outputStreamBall.putFrame(smallerBallImage)
            # textOutput += "ballXPos: {} ballYPos: {}\n".format(ballXPos, ballYPos)
        else:
            textOutput += "Ball Camera disabled\n"

        if counter % 10 == 0:
            print(textOutput)
Ejemplo n.º 3
0
        sys.exit(1)

    # start NetworkTables to send to smartDashboard
    ntinst = NetworkTablesInstance.getDefault()

    print("Setting up NetworkTables client for team {}".format(team))
    ntinst.startClientTeam(7539)

    SmartDashBoardValues = ntinst.getTable('SmartDashboard')

    #Start camera
    print("Connecting to camera")
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    Camera.setExposureManual(5)
    Camera.setResolution(160, 120)
    cs.addCamera(Camera)

    print("connected")

    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160, 120)

    #buffer to store img data
    img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
    # loop forever
    while True:

        GotFrame, img = CvSink.grabFrame(img)
        if GotFrame == 0:
camWidth = 320
camHeight = 240
center_x = camWidth * .5
margin = 20

cs = CameraServer.getInstance()
cs.enableLogging()

cam1 = UsbCamera("cam1", 0)
cam2 = UsbCamera("cam2", 1)

cam1.setResolution(camWidth, camHeight)
cam2.setResolution(camWidth, camHeight)

cam1.setExposureManual(8)
cam2.setExposureManual(8)

cvSink = cs.getVideo(camera=cam1)

outputStream = cs.putVideo("Cam1", camWidth, camHeight)

frame = np.zeros(shape=(camHeight, camWidth, 3), dtype=np.uint8)


def listener(table, key, value, isNew):
    print("value changed: key: '%s'; value: %s; isNew: %s" %
          (key, value, isNew))
    return value

    if server:
        print("Setting up NetworkTables server")
        ntinst.startServer()
    else:
        print("Setting up NetworkTables client for team 7539")
        ntinst.startClientTeam(7539)

    SmartDashBoardValues = ntinst.getTable('SmartDashboard')

    #Start first camera
    print("Connecting to camera 0")
    exp = 4
    cs = CameraServer.getInstance()
    cs.enableLogging()
    Camera = UsbCamera('Cam 0', 0)
    Camera.setExposureManual(exp)
    Camera.setResolution(160,120)
    cs.addCamera(Camera)
    print("connected")
    sp = SmartDashBoardValues
    sp.putNumber('ExpAuto', 0)
    CvSink = cs.getVideo()
    outputStream = cs.putVideo("Processed Frames", 160,120)

    #buffers to store img data
    img = np.zeros(shape=(160,120,3), dtype=np.uint8)
    ExpStatus = sp.getNumber('ExpAuto', 0)

    cameras = []
    cameras.append(startCamera(cameraConfigs[1]))
Ejemplo n.º 6
0
def main(config):

    team = read_config(config)
    WIDTH, HEIGHT = 320, 240
    FOV = 68.5
    focal_length = WIDTH / (2 * math.tan(math.radians(FOV / 2)))
    print("Starting camera server")

    cs = CameraServer.getInstance()
    power_cell_cam = UsbCamera('power_cell_cam', 2)
    power_cell_cam.setResolution(WIDTH, HEIGHT)
    power_cell_cam.setExposureManual(50)

    tape_cam = UsbCamera('tape_cam', 0)
    tape_cam.setResolution(WIDTH, HEIGHT)
    tape_cam.setExposureManual(1)

    print("Connecting to Network Tables")
    ntinst = NetworkTablesInstance.getDefault()
    ntinst.startClientTeam(team)
    ntinst.addConnectionListener(connectionListener, immediateNotify=True)
    """Format of these entries found in WPILib documentation."""
    power_cell_angle = ntinst.getTable('ML').getEntry('power_cell_angle')
    power_cell_pos = ntinst.getTable('ML').getEntry('power_cell_pos')
    power_cell_exists = ntinst.getTable('ML').getEntry('power_cell_exists')
    power_cell_dist = ntinst.getTable('ML').getEntry('power_cell_dist')
    power_cell_x = ntinst.getTable('ML').getEntry('power_cell_x')
    power_cell_y = ntinst.getTable('ML').getEntry('power_cell_y')

    tape_angle = ntinst.getTable('ML').getEntry('tape_angle')
    tape_dist = ntinst.getTable('ML').getEntry('tape_dist')
    tape_found = ntinst.getTable('ML').getEntry('tape_found')
    sd = ntinst.getTable('SmartDashboard')

    sd.addEntryListener(listener, key='cam')

    cvSink = cs.getVideo(camera=power_cell_cam)
    img = np.zeros(shape=(HEIGHT, WIDTH, 3), dtype=np.uint8)
    output = cs.putVideo("MLOut", WIDTH, HEIGHT)

    print("Initializing ML engine")
    engine = DetectionEngine("model.tflite")

    lower_yellow = np.array([20, 75, 100])
    upper_yellow = np.array([40, 255, 255])

    lower_green = np.array([55, 124, 81])  #137 240 135 - HSV, 0,90,90 - RGB
    upper_green = np.array([255, 255,
                            255])  #143 255 148 - HSV, 86,255,255 - RGB

    lower_color = np.array([0, 0, 0])
    upper_color = np.array([0, 0, 0])

    camera_robot_x = .3302
    camera_robot_y = -.0254

    print("Starting ML mainloop")

    while True:
        cam_mode = sd.getBoolean(key='cam', defaultValue=False)

        if cam_mode:
            cvSink.setSource(tape_cam)
            lower_color = lower_green
            upper_color = upper_green
            #print('green')
        else:
            cvSink.setSource(power_cell_cam)
            lower_color = lower_yellow
            upper_color = upper_yellow
            #print('yellow')

        t, frame = cvSink.grabFrame(img)

        # Run inference.

        if not cam_mode:
            ans = engine.detect_with_image(Image.fromarray(frame),
                                           threshold=0.5,
                                           keep_aspect_ratio=True,
                                           relative_coord=False,
                                           top_k=10)
        else:
            ans = False

        center_power_cell_x = WIDTH / 2
        center_power_cell_y = HEIGHT / 2

        bool_power_cell = False
        bool_tape = False

        largest_box_xmin = 0
        largest_box_xmax = 0
        largest_box_ymin = 0
        largest_box_ymin = 0
        largest_box_area = 0

        minimum_box_area = 0

        angle_to_turn_power_cell = 0
        angle_to_turn_power_cell_robot = 0
        dist_power_cell = 0

        angle_to_turn_tape = 0
        dist_tape = 0

        new_x = 0
        new_y = 0

        center_contour_x = WIDTH / 2

        # Display result.
        if ans:
            for obj in ans:
                box = [
                    round(i, 3) for i in obj.bounding_box.flatten().tolist()
                ]
                xmin, ymin, xmax, ymax = map(int, box)
                box_area = (xmax - xmin) * (ymax - ymin)

                if box_area > largest_box_area:
                    largest_box_area = box_area
                    largest_box_xmin = xmin
                    largest_box_xmax = xmax
                    largest_box_ymin = ymin
                    largest_box_ymax = ymax

                label = '%s: %d%%' % ('power_cell', int(obj.score * 100)
                                      )  # Example: 'Cargo: 72%'
                label_size, base_line = cv2.getTextSize(
                    label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 2)
                label_ymin = max(ymin, label_size[1] + 10)

                cv2.rectangle(
                    frame, (xmin, label_ymin - label_size[1] - 10),
                    (xmin + label_size[0], label_ymin + base_line - 10),
                    (255, 255, 255), cv2.FILLED)
                cv2.putText(frame, label, (xmin, label_ymin - 7),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
                cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), (10, 255, 0),
                              4)

            if (largest_box_area > minimum_box_area):
                center_power_cell_x = 0.5 * (largest_box_xmin +
                                             largest_box_xmax)
                center_power_cell_y = 0.5 * (largest_box_ymin +
                                             largest_box_ymax)
                bool_power_cell = True

                angle_to_turn_power_cell = math.degrees(
                    math.atan((center_power_cell_x - (.5 * WIDTH - .5)) /
                              focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(
                        (largest_box_xmax - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = abs(angle_to_right_side -
                                              angle_to_turn_power_cell)
                dist_power_cell = (.5842 / (2 * math.pi)) / math.tan(
                    math.radians(angle_diff_right_center))

                x_vec = dist_power_cell / math.sqrt(
                    1 + math.tan(math.radians(-angle_to_turn_power_cell))**2)
                y_vec = x_vec * math.tan(
                    math.radians((-angle_to_turn_power_cell)))
                if (angle_to_turn_power_cell < 0):
                    x_vec *= -1
                    y_vec *= -1
                new_x = camera_robot_x + x_vec
                new_y = camera_robot_y + y_vec
                angle_to_turn_power_cell_robot = -math.degrees(
                    math.atan2(new_y, new_x))

                cv2.putText(frame, str(angle_to_turn_power_cell_robot),
                            (100, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 2)
                #cv2.putText(frame, str(new_y), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                #cv2.putText(frame, str(dist_power_cell), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

        else:

            hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, lower_color, upper_color)
            contours = cv2.findContours(mask, cv2.RETR_TREE,
                                        cv2.CHAIN_APPROX_SIMPLE)[-2]
            '''
            TODO: Determine how reflective tapes will look through eye of god
            TODO: Calculate angle and distance to tape (tape angle calculation same as power cell angle calculation)
            '''

            largest_contour = np.array([0, 0])
            for c in contours:
                area = cv2.contourArea(c)
                if area > largest_box_area:
                    largest_contour = c
                    largest_box_area = area

            if (largest_box_area > minimum_box_area and not cam_mode):
                x, y, w, h = cv2.boundingRect(largest_contour)
                cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                center_power_cell_x = 0.5 * (x + x + w)
                center_power_cell_y = 0.5 * (y + y + h)
                bool_power_cell = True

                angle_to_turn_power_cell = math.degrees(
                    math.atan((center_power_cell_x - (.5 * WIDTH - .5)) /
                              focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(((x + w) - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = angle_to_right_side - angle_to_turn_power_cell
                dist_power_cell = (.5842 / (2 * math.pi)) / math.tan(
                    math.radians(angle_diff_right_center))

                x_vec = dist_power_cell / math.sqrt(
                    1 + math.tan(math.radians(-angle_to_turn_power_cell))**2)
                y_vec = x_vec * math.tan(
                    math.radians((-angle_to_turn_power_cell)))
                if (angle_to_turn_power_cell < 0):
                    x_vec *= -1
                    y_vec *= -1
                new_x = camera_robot_x + x_vec
                new_y = camera_robot_y + y_vec
                angle_to_turn_power_cell_robot = -math.degrees(
                    math.atan2(new_y, new_x))

                cv2.putText(frame, str(angle_to_turn_power_cell_robot),
                            (100, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 255, 0), 2)
                #cv2.putText(frame, str(new_y), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
            # cv2.putText(frame, str(dist_power_cell), (100, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

            elif (largest_box_area > 0 and cam_mode):
                x, y, w, h = cv2.boundingRect(largest_contour)
                M1 = cv2.moments(largest_contour)
                center_contour_x = int(M1['m10'] / M1['m00'])
                cv2.drawContours(frame, [largest_contour], 0, (0, 255, 0), 3)

                angle_to_turn_tape = math.degrees(
                    math.atan(
                        (center_contour_x - (.5 * WIDTH - .5)) / focal_length))
                angle_to_right_side = math.degrees(
                    math.atan(((x + w) - (.5 * WIDTH - .5)) / focal_length))
                angle_diff_right_center = abs(angle_to_right_side -
                                              angle_to_turn_tape)
                dist_tape = .99695 / math.tan(
                    math.radians(angle_diff_right_center))

                cv2.putText(frame, str(dist_tape), (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                bool_tape = True

        output.putFrame(frame)

        tape_found.setBoolean(bool_tape)

        if cam_mode:
            angle_to_turn_tape = (center_contour_x / WIDTH * FOV) - FOV / 2
            tape_angle.setDouble(angle_to_turn_tape)
            #TODO: Send distance to tape
        else:
            power_cell_angle.setDouble(angle_to_turn_power_cell_robot)
            power_cell_pos.setDoubleArray(
                [center_power_cell_x, center_power_cell_y])
            power_cell_exists.setBoolean(bool_power_cell)
            power_cell_dist.setDouble(dist_power_cell)
            power_cell_x.setDouble(new_x)
            power_cell_y.setDouble(new_y)
Ejemplo n.º 7
0
def mainMain():
    #   network tables initialization
    NetworkTables.initialize(server=__IP)
    print("nt initialized")
    smartdash = NetworkTables.getTable("SmartDashboard")
    #    table = NetworkTables.getTable(__SENT_TABLE_NAME)
    #    subtable = smartdash.getSubTable(__ORIENTATION_TABLE_NAME)

    #   initialize cameras
    # CAMERA 1
    cam = UsbCamera('Cam 1 Front', 1)
    cam.setResolution(160, 120)
    cam.setExposureManual(0)
    cam.setBrightness(0)
    cam.setFPS(30)
    print("cam1 initialized")
    # CAMERA 2
    cam2 = UsbCamera('Cam 2 Back', 0)
    cam2.setResolution(160, 120)
    cam2.setExposureManual(0)
    cam2.setBrightness(0)
    cam2.setFPS(30)
    print("cam2 initialized")

    # EACH CAMERA STARTS CAPTURING VIDEO
    cs.startAutomaticCapture(camera=cam)
    cs.startAutomaticCapture(camera=cam2)

    # GETTING THE VIDEO STREAM OF EACH CAMERA
    vid = cs.getVideo(camera=cam)
    vid2 = cs.getVideo(camera=cam2)

    #   initialize outputstream, this is the place where we send frames to shuffleboard.
    output_stream = cs.putVideo('TOP CAMERAS', 160, 120)

    print("Running while loop")
    counter = 0
    while True:
        # get front or back value
        camera_value = subtable.getString("value", "FRONT")

        img = np.zeros(shape=(160, 120, 3), dtype=np.uint8)
        #   get video
        time, frame = video.grabFrame(img, 0.5)

        #   This condition sends video from different cameras based on the robots orientation.
        if camera_value == 'BACK':
            print("BACK")
            video = vid
        elif camera_value == 'FRONT':
            print("FRONT")
            video = vid2
        else:
            print('error, no camera value detected')

        print("sending frame")
        # send frame to shuffleboard
        output_stream.putFrame(frame)

        print('Done.')
        sys.stdout.flush()
        # just counting how fast it is running and if it is running at all
        counter += 1
        print(counter)
Ejemplo n.º 8
0
lower_yellow = np.array([20, 75, 100])
upper_yellow = np.array([40, 255, 255])

camWidth = 320
camHeight = 240
center_x = camWidth * .5
margin = 20

cs = CameraServer.getInstance()
cs.enableLogging()

cam1 = UsbCamera("cam1", 0)

cam1.setResolution(camWidth, camHeight)

cam1.setExposureManual(32)

cvSink = cs.getVideo(camera=cam1)

outputStream = cs.putVideo("Cam1", camWidth, camHeight)

frame = np.zeros(shape=(camHeight, camWidth, 3), dtype=np.uint8)


#Makes Raspberry Pi a listener to detect changes sent from roboRIO
def listener(table, key, value, isNew):
    print("value changed: key: '%s'; value: %s; isNew: %s" %
          (key, value, isNew))
    return value