示例#1
0
def push_network_table(table, return_list):
    '''
    Pushes a tuple to the network table
    prints the table in Debug mode
    '''
    table.putNumberArray(NETWORK_KEY, return_list)
    NetworkTables.flush()
示例#2
0
def main():
    import cscore
    from networktables import NetworkTables
    from time import monotonic

    cs = cscore.CameraServer.getInstance()
    camera = cs.startAutomaticCapture(
    )  # TODO: specify path if multiple cameras
    camera.setVideoMode(cscore.VideoMode.PixelFormat.kYUYV, 320, 240, 50)
    sink = cs.getVideo(camera=camera)

    nt = NetworkTables.getTable('/vision')
    entry = nt.getEntry('info')

    # Allocating memory is expensive. Preallocate an array for the camera image.
    frame = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

    while True:
        time, frame = sink.grabFrame(frame)
        if time == 0:
            # TODO: handle error
            pass
        else:
            start_time = monotonic()
            info = process(frame)
            info.append(monotonic() - start_time)

            entry.setNumberArray(info)
            NetworkTables.flush()
    def ramp_voltage_in_auto(self, initial_speed, ramp, rotate):

        logger.info("Activating flywheel at %.1f%%, adding %.3f per 50ms",
                    initial_speed, ramp)

        if rotate == None:
            rotate = self.STATE.angular_mode.get()
        self.rotate = rotate
        self.discard_data = False
        self.autospeed = initial_speed / 12
        NetworkTables.flush()

        try:
            while True:
                # check the queue in case we switched out of auto mode
                qdata = self.get_nowait()
                if qdata != queue.Empty:
                    return qdata

                time.sleep(0.050)
                self.autospeed = self.autospeed + (ramp * 0.05) / 12

                NetworkTables.flush()
        finally:
            self.discard_data = True
            self.autospeed = 0
示例#4
0
def pipeline2(j):
    # x1 = 0
    # x2 = 0
    # y1 = 0
    # y2 = 0
    NetworkTables.flush()
    initial = cv2.getTickCount()
    capture = grabFeed()
    thresh = filter(capture, H_LOW.getDouble(47.5), S_LOW.getDouble(67.5), L_LOW.getDouble(70), H_HIGH.getDouble(72.5), 255, 255)
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    gudContours, mix, max, miy, may = contourfilter(contours, capture, 0.1, 10, 75)
    contoursNew = cv2.drawContours(capture, gudContours, -1, (240, 0, 0), 3)
    # # hls = cv2.cvtColor(thresh, cv2.COLOR_HLS2BGR)
    # # gray = cv2.cvtColor(hls, cv2.COLOR_BGR2GRAY)
    # kernel = np.ones((5,5), np.uint8)
    # erosion = cv2.erode(thresh, kernel, iterations = 1)
    # gray = np.float32(erosion)
    # corners = cv2.goodFeaturesToTrack(gray, 8, 0.01, 10, useHarrisDetector=True)
    # print(mix, max, miy, may)
    # for corner in corners:
    #     x, y = corner.ravel()
    #     if(mix <= x <= max and miy <= y <= may):
    #         cv2.circle(contoursNew, (x, y), 5, (0, 0, 255), -1)
    # sleep(0.01)
    displayImage(contoursNew)
示例#5
0
    def execute(self):

        pid_z = 0
        if self.hold_heading:
            if self.momentum and abs(self.bno055.getHeadingRate()) < 0.005:
                self.momentum = False

            if self.vz not in [0.0, None]:
                self.momentum = True

            if not self.momentum:
                pid_z = self.heading_pid_out.output
            else:
                self.set_heading_sp_current()

        input_vz = 0
        if self.vz is not None:
            input_vz = self.vz
        vz = input_vz + pid_z

        for module in self.modules:
            module_dist = math.hypot(module.x_pos, module.y_pos)
            module_angle = math.atan2(module.y_pos, module.x_pos)
            # Calculate the additional vx and vy components for this module
            # required to achieve our desired angular velocity
            vz_x = -module_dist * vz * math.sin(module_angle)
            vz_y = module_dist * vz * math.cos(module_angle)
            # TODO: re enable this and test field-oriented mode
            if self.field_oriented:
                angle = self.bno055.getAngle()
                vx, vy = self.robot_orient(self.vx, self.vy, angle)
            else:
                vx, vy = self.vx, self.vy
            module.set_velocity(vx + vz_x, vy + vz_y)

        self.update_odometry()
        self.odometry_updated = False  # reset for next timestep

        SmartDashboard.putNumber('module_a_speed',
                                 self.modules[0].current_speed)
        SmartDashboard.putNumber('module_b_speed',
                                 self.modules[1].current_speed)
        SmartDashboard.putNumber('module_c_speed',
                                 self.modules[2].current_speed)
        SmartDashboard.putNumber('module_d_speed',
                                 self.modules[3].current_speed)
        SmartDashboard.putNumber('module_a_pos',
                                 self.modules[0].current_measured_azimuth)
        SmartDashboard.putNumber('module_b_pos',
                                 self.modules[1].current_measured_azimuth)
        SmartDashboard.putNumber('module_c_pos',
                                 self.modules[2].current_measured_azimuth)
        SmartDashboard.putNumber('module_d_pos',
                                 self.modules[3].current_measured_azimuth)
        SmartDashboard.putNumber('odometry_x', self.odometry_x)
        SmartDashboard.putNumber('odometry_y', self.odometry_y)
        SmartDashboard.putNumber('odometry_x_vel', self.odometry_x_vel)
        SmartDashboard.putNumber('odometry_y_vel', self.odometry_y_vel)
        SmartDashboard.putNumber('odometry_z_vel', self.odometry_z_vel)
        NetworkTables.flush()
示例#6
0
def loop():  # pragma: no cover
    import cscore as cs
    from networktables import NetworkTables
    from time import sleep, time as now

    nt = NetworkTables.getTable("/components/vision")

    width = 320
    height = 240
    fps = 25
    videomode = cs.VideoMode.PixelFormat.kMJPEG, width, height, fps

    #front_cam_id = "/dev/v4l/by-id/usb-046d_C922_Pro_Stream_Webcam_5FC7BADF-video-index0"
    front_cam_id = "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920_332E8C9F-video-index0"

    front_camera = cs.UsbCamera("frontcam", front_cam_id)
    front_camera.setVideoMode(*videomode)

    front_cam_server = cs.MjpegServer("frontcamserver", 5803)
    front_cam_server.setSource(front_camera)

    cvsink = cs.CvSink("cvsink")
    cvsink.setSource(front_camera)

    cvSource = cs.CvSource("cvsource", *videomode)
    cvmjpegServer = cs.MjpegServer("cvhttpserver", 5804)
    cvmjpegServer.setSource(cvSource)

    # Images are big. Preallocate an array to fill the image with.
    frame = np.zeros(shape=(height, width, 3), dtype=np.uint8)

    # Set the exposure to something bogus, in an attempt to work around a kernel bug when restarting the vision code?
    front_camera.setExposureAuto()
    sleep(1)
    cvsink.grabFrame(frame)

    # Set the exposure of the front camera to something usable.
    front_camera.setExposureManual(0)
    sleep(1)

    while True:
        time, frame = cvsink.grabFrame(frame)
        if time == 0:
            # We got an error; report it through the output stream.
            cvSource.notifyError(cvsink.getError())
        else:
            t = now()
            x, img, num_targets, target_sep = find_target(frame)
            if num_targets > 0:
                nt.putNumber("x", x)
                nt.putNumber("time", t)
                nt.putNumber("num_targets", num_targets)
                nt.putNumber("target_sep", target_sep)
                nt.putNumber("dt", now() - t)
                NetworkTables.flush()
            cvSource.putFrame(img)
示例#7
0
    def send_results(self, results):
        """Sends results to the RIO depending on connecion type. Returns Nothing."""
        if self.test:
            pass
        elif self.using_nt:

            self.entry.setDoubleArray(results)
            NetworkTables.flush()
        else:
            self.sock_send.sendto(f"{results[0]},{results[1]}".encode("utf-8"),
                                  (PI_IP, UDP_SEND_PORT))
示例#8
0
 def execute(self):
     """Store the current odometry in the queue. Allows projection of target into current position."""
     self.odometry.appendleft(
         Odometry(
             self.chassis.odometry_x,
             self.chassis.odometry_y,
             self.chassis.imu.getAngle(),
             time.monotonic(),
         ))
     self.ping()
     self.pong()
     vision_time = self.fiducial_time + self.latency
     self.processing_time = time.monotonic() - vision_time
     NetworkTables.flush()
示例#9
0
    def mainloop(self):
        while True:
            try:
                with self.lock:
                    self.run()
                if NT_AVAIL:
                    NetworkTables.flush()

            except (TypeError, AttributeError):
                LOGGER.debug("(Harmless?) Error during pipeline mainloop",
                             exc_info=True)
            except:  # todo: wildcard except
                LOGGER.exception("Error during pipeline mainloop")

            self.fps.update()
示例#10
0
def loop():
    cs = cscore.CameraServer.getInstance()
    camera = cs.startAutomaticCapture()  # TODO: specify path if multiple cameras
    camera.setVideoMode(cscore.VideoMode.PixelFormat.kYUYV, 320, 240, 60)
    sink = cs.getVideo(camera=camera)

    NetworkTables.initialize(server="roborio-4774-frc.local")
    nt = NetworkTables.getTable('/vision')
    entry = nt.getEntry('info')
    
    while True:
        frame = np.zeros(shape=(frame_height, frame_width, 3), dtype=np.uint8)
        info = process(sink.grabFrame(frame)[1])
        info = np.array(info).flatten()
        entry.setNumberArray(info)
        NetworkTables.flush()
示例#11
0
def main():
    import cscore
    from networktables import NetworkTables
    from time import monotonic

    cs = cscore.CameraServer.getInstance()
    camera = cs.startAutomaticCapture(
    )  # TODO: specify path if multiple cameras
    camera.setVideoMode(cscore.VideoMode.PixelFormat.kYUYV, 320, 240, 30)

    camera.getProperty('vertical_flip').set(True)
    camera.getProperty('horizontal_flip').set(True)
    camera.getProperty('gain_automatic').set(False)
    camera.getProperty('gain').set(0)
    camera.getProperty('saturation').set(32)

    sink = cs.getVideo(camera=camera)
    source = cs.putVideo('cv', 320, 240)

    nt = NetworkTables.getTable('/vision')
    entry = nt.getEntry('info')

    # Allocating memory is expensive. Preallocate arrays for the camera images.
    frame = np.zeros(shape=(240, 320, 3), dtype=np.uint8)
    mask = np.zeros(shape=(240, 320), dtype=np.uint8)
    hsv = np.zeros(shape=(240, 320, 3), dtype=np.uint8)

    while True:
        time, frame = sink.grabFrame(frame)
        if time == 0:
            # error reading the frame, report to CV output stream
            source.notifyError(sink.getError())
        else:
            start_time = monotonic()
            info = process(frame, mask, hsv)
            info.append(monotonic() - start_time)

            entry.setNumberArray(info)
            NetworkTables.flush()

            source.putFrame(mask)
示例#12
0
    def mainloop(self):
        while True:
            had_problem = False
            try:
                with self.lock:
                    self.run()
                if NT_AVAIL:
                    NetworkTables.flush()

            except (TypeError, AttributeError):
                LOGGER.debug("(Harmless?) Error during pipeline mainloop",
                             exc_info=True)
                had_problem = True
            except:  # todo: wildcard except
                LOGGER.exception("Error during pipeline mainloop")
                had_problem = True

            if had_problem or self.broken:
                # avoid clogging logs with errors
                time.sleep(0.5)
                self.fps.reset()
            self.fps.update()
示例#13
0
    def ramp_voltage_in_auto(self, initial_speed, ramp):

        logger.info('Activating robot at %.1f%%, adding %.3f per 50ms',
                    initial_speed, ramp)

        self.discard_data = False
        self.autospeed = initial_speed / 12
        NetworkTables.flush()

        try:
            while True:
                # check the queue in case we switched out of auto mode
                qdata = self.get_nowait()
                if qdata != queue.Empty:
                    return qdata

                time.sleep(0.050)
                self.autospeed = self.autospeed + (ramp * 0.05) / 12

                NetworkTables.flush()
        finally:
            self.discard_data = True
            self.autospeed = 0
    def process_image(self):
        '''Run the processor on the image to find the target'''

        # rvec, tvec return as None if no target found
        rvec, tvec = self.peg_processor.process_image(self.camera_frame)

        # Send the results as one big array in order to guarantee that the results
        #  all arrive at the RoboRio at the same time
        # Value is (Found, tvec, rvec) as a flat array. All values are floating point (required by NT).

        if rvec is None or tvec is None:
            res = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
        else:
            res = [1.0, ]       # Found
            res.extend(tvec)
            res.extend(rvec)
        self.target_info = res

        # Try to force an update of NT to the RoboRio. Docs say this may be rate-limited,
        #  so it might not happen every call.
        NetworkTables.flush()

        return
示例#15
0
def pipeline1():
    NetworkTables.flush()
    initial = cv2.getTickCount()
    capture = grabFeed()
    thresh = filter(capture, 50, 70, 70, 70, 255, 255)
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)

    gudContours = contourfilter(contours, capture)

    pairs = findPairs(capture, gudContours)

    i = int(counter.getDouble(0.0))
    try:
        xavg = pairs[i][2]
        xdist = pairs[i][4]
        ratio = pairs[i][10]
        width = pairs[i][11]
        height = pairs[i][12]
        legitheight = float(502 * 5.75) / height
        if (ratio > 1):
            angle = math.degrees(math.acos(1 / ratio))
            isLeft = True
        else:
            angle = math.degrees(math.acos(ratio))
            isLeft = False
    except:
        if (len(pairs) > 0):
            i = len(pairs) - 1
        else:
            i = 0
        xavg = 0
        xdist = -1
        width = 0
        height = 0
        angle = 0
        ratio = 0
        xcoord = 0
        ycoord = 0
        zcoord = 0
        legitheight = 0
        isLeft = False
    if (len(pairs) > 0):
        # widthDev.setDouble(height1)
        # heightDev.setDouble(wide1)
        # width2Dev.setDouble(height2)
        # height2Dev.setDouble(wide2)
        # print(wide1 - wide2)
        # print(height1 - height2)
        distX.setDouble(xdist)
        angleDev.setDouble(angle)
        centerX.setDouble(xavg)
        isLeftDev.setBoolean(isLeft)
        targetPresent.setBoolean(True)
    else:
        # widthDev.setDouble(0)
        # heightDev.setDouble(0)
        angleDev.setDouble(0.0)
        distX.setDouble(-1)
        centerX.setDouble(0)
        targetPresent.setBoolean(False)
    #print("ratio % d" % ratio)
    counter.setDouble(i)
    contoursNew = cv2.drawContours(capture, gudContours, -1, (240, 0, 0), 3)
    # cv2.circle(contoursNew, (int(xavg), int(yavg)), 5, (0, 255, 0))
    cv2.line(contoursNew, (int(xavg), 0), (int(xavg), 480), (255, 0, 0), 3)
    cv2.line(contoursNew, (320, 0), (320, 480), (0, 0, 255), 3)
    if (threshNet.getBoolean(False)):
        finalFrame = thresh
    else:
        finalFrame = contoursNew
    #cv2.imshow("Team3482CV", finalFrame)
    #camera.putFrame(finalFrame)
    displayImage(finalFrame)
    return (ratio, legitheight, angle)
示例#16
0
def pipeline2():
    # x1 = 0
    # x2 = 0
    # y1 = 0
    # y2 = 0
    NetworkTables.flush()
    initial = cv2.getTickCount()
    capture = grabFeed()
    thresh = filter(capture, 50, 70, 70, 70, 255, 255)
    im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)

    gudContours = contourfilter(contours, capture)

    pairs = findPairs(capture, gudContours)
    print(pairs)
    i = int(counter.getDouble(0.0))
    try:
        xavg = pairs[i][2]
        yavg = pairs[i][3]
        xdist = pairs[i][4]
        xcoord = pairs[i][15]
        ycoord = pairs[i][16]
        zcoord = pairs[i][17]
    except:
        if (len(pairs) > 0):
            i = len(pairs) - 1
        else:
            i = 0
        print("lamo you tard")
        xavg = 0
        yavg = 0
        xdist = -1
        xcoord = 0
        ycoord = 0
        zcoord = 0
    if (len(pairs) > 0):
        distX.setDouble(xdist)
        centerX.setDouble(xavg)
        #centerY.setDouble(yavg)
        targetPresent.setBoolean(True)
    else:
        distX.setDouble(-1)
        centerX.setDouble(0)
        #centerY.setDouble(0)
        targetPresent.setBoolean(False)
    distX.setDouble(xdist)
    counter.setDouble(i)
    fps = cam.get(cv2.CAP_PROP_FPS)
    # print(fps)
    contoursNew = cv2.drawContours(capture, gudContours, -1, (240, 0, 0), 3)
    # cv2.circle(contoursNew, (int(xavg), int(yavg)), 5, (0, 255, 0))
    cv2.line(contoursNew, (int(xavg), 0), (int(xavg), IMAGE_HEIGHT),
             (255, 0, 0), 3)
    cv2.line(contoursNew, (int(IMAGE_WIDTH / 2), 0),
             (int(IMAGE_WIDTH / 2), IMAGE_HEIGHT), (0, 0, 255), 3)
    displayImage(contoursNew)
    final = cv2.getTickCount()
    #fps = float((final - initial))/cv2.getTickFrequency()
    #print("FPS: %d" % fps)
    print(xcoord, ycoord, zcoord)
    return (xcoord, ycoord, zcoord)
示例#17
0
    def runTest(self, name, initial_speed, ramp, finished, rotate=None):
        try:
            # Initialize the robot commanded speed to 0
            self.autospeed = 0
            self.discard_data = True
            self.STATE.postTask(lambda: messagebox.showinfo(
                "Running " + name,
                "Please enable the robot in autonomous mode, and then " +
                "disable it before it runs out of space.\n" +
                "Note: The robot will continue to move until you disable it - "
                +
                "It is your responsibility to ensure it does not hit anything!",
                parent=self.STATE.mainGUI,
            ))

            # Wait for robot to signal that it entered autonomous mode
            with self.lock:
                self.lock.wait_for(lambda: self.mode == "auto")

            # Ramp the voltage at the specified rate
            data = self.ramp_voltage_in_auto(initial_speed, ramp, rotate)
            if data in ("connected", "disconnected"):
                self.STATE.postTask(lambda: messagebox.showerror(
                    "Error!", "NT disconnected", parent=self.STATE.mainGUI))
                return

            # wait for robot to say it is disabled
            with self.lock:
                self.lock.wait_for(lambda: self.mode == "disabled")

            # tries to retrieve disabled data
            starttime = time.time()
            while not self.data and time.time() - starttime < timeout:
                NetworkTables.flush()
                time.sleep(0.1)

            if not self.data:
                logger.info("could not receive data")
                self.STATE.postTask(lambda: messagebox.showerror(
                    "Timed out while trying to receive NT data",
                    "Maybe try running the test again?",
                    parent=self.STATE.mainGUI,
                ))
                return
            self.discard_data = True

            # deserializes data: "1, 2, ..." -> [[1,2,...],...]
            data = self.data[0].split(", ")  # turns the string sent into list
            data = np.array([float(s) for s in data[0:-1]
                             ])  # turns the string values into floats
            data = (np.reshape(
                data, (-1, num_columns))).tolist()  # converts list to 2d array

            # output sanity check
            if len(data) < 3:
                self.STATE.postTask(lambda: messagebox.showwarning(
                    "Warning!",
                    "Last run produced an unusually small amount of data",
                    parent=self.STATE.mainGUI,
                ))
            else:
                if name == "track-width":
                    gyro_distance = data[-1][GYRO_ANGLE_COL] - data[0][
                        GYRO_ANGLE_COL]
                    gyro_distance = round(
                        (gyro_distance * Units.RADIANS.unit).to(
                            Units.DEGREES.unit), 3)
                    self.STATE.postTask(lambda: messagebox.showinfo(
                        name + " Complete",
                        f"The robot reported rotating the following angle:\n{gyro_distance}\n"
                        +
                        "If that seems wrong, you should change the gyro calibration "
                        + "in the robot program or check your gyro setup",
                        parent=self.STATE.mainGUI,
                    ))
                else:
                    left_distance = (data[-1][L_ENCODER_P_COL] -
                                     data[0][L_ENCODER_P_COL]
                                     ) * self.STATE.units_per_rot.get()
                    right_distance = (data[-1][R_ENCODER_P_COL] -
                                      data[0][R_ENCODER_P_COL]
                                      ) * self.STATE.units_per_rot.get()

                    self.STATE.postTask(lambda: messagebox.showinfo(
                        name + " Complete",
                        "The robot reported traveling the following distance:\n"
                        + "Left:  %.3f %s" %
                        (left_distance, self.STATE.units.get()
                         ) + "\n" + "Right: %.3f %s" %
                        (right_distance, self.STATE.units.get()) + "\n" +
                        "If that seems wrong, you should change the encoder calibration "
                        + "in the robot program or fix your encoders!",
                        parent=self.STATE.mainGUI,
                    ))

            self.stored_data[name] = data

        finally:
            self.autospeed = 0
            self.STATE.postTask(finished)
示例#18
0
def main() -> NoReturn:
    killer = GracefulKiller()

    cs.control_led(cs.LEDPreset.SLOW_BLINK)
    # NetworkTables connections appear to be async, so
    # we should set this up first to minimize startup time.
    if c.NT_OUTPUT:
        print("connecting to networktables")
        NetworkTables.initialize(server=c.NT_IP)
        sd = NetworkTables.getTable("SmartDashboard")

    cap = open_camera()
    cs.load_settings(c.SETTINGS_FILE)

    if c.RECORD:
        print("starting recording...")
        record = cv2.VideoWriter(
            vu.generate_filename(), cv2.VideoWriter_fourcc(*"MJPG"),
            c.CAMERA_FPS,
            (int(c.CAMERA_RESOLUTION[0]), int(c.CAMERA_RESOLUTION[1])))

    # if c.STREAM:
    #     print("starting video stream...")
    #     server_queue = Queue(maxsize=2)
    #     vu.set_frame_queue(server_queue)
    #     server = HTTPServer(("0.0.0.0", 1190), vu.MJPGServer)
    #     server_thread = Thread(target=server.serve_forever)
    #     server_thread.daemon = True
    #     server_thread.start()

    if c.NT_OUTPUT:
        if not NetworkTables.isConnected():
            cs.control_led(cs.LEDPreset.FAST_BLINK)
            print("waiting for networktables...")
            while not NetworkTables.isConnected():
                time.sleep(100)  # prevents flooding the CPU
        print("networktables ready")
        # initialize NT variables bc Shuffleboard gets angery if we don't
        # noinspection PyUnboundLocalVariable
        sd.putBoolean("vision_angle",
                      999)  # this will never be legitimately returned
        sd.putBoolean("vision_shutdown", False)

    processing_module = importlib.import_module("processors." + c.PROCESSOR)
    processor = processing_module.Processor()

    cs.control_led(cs.LEDPreset.SOLID)
    print("starting...")

    rval = True

    try:
        while rval:
            if killer.kill_now:
                raise KeyboardInterrupt

            if c.RELOAD_CAMERA:
                print("reloading camera...")
                cap.release()
                cap = open_camera()
                c.RELOAD_CAMERA = False
            rval, frame = cap.read()
            data, processed_frame = processor.process(frame,
                                                      annotate=c.ANNOTATE)

            if c.WINDOW:
                cv2.imshow('k', processed_frame)
                cv2.waitKey(1)

            if c.NT_OUTPUT:
                if len(data) > 0:
                    sd.putNumber('vision_angle', data[0].angle)
                    NetworkTables.flush()  # makes NT update immediately
                if sd.getBoolean("vision_shutdown", False):
                    raise KeyboardInterrupt

            if c.RECORD:
                # noinspection PyUnboundLocalVariable
                record.write(processed_frame)

            # if c.STREAM:
            #     # noinspection PyUnboundLocalVariable
            #     Process(target=insert_into_server_queue, args=[server_queue, processed_frame]).start()
    except KeyboardInterrupt:
        print("wrapping up!")
    finally:
        if c.RECORD:
            record.release()
        cap.release()
        if c.NT_OUTPUT:
            NetworkTables.shutdown()
        # if c.STREAM:
        #     # noinspection PyUnboundLocalVariable
        #     # server_thread.something()?
        #     pass
        cs.control_led(cs.LEDPreset.OFF)
示例#19
0
import numpy as np
import cv2
from networktables import NetworkTables
import logging

logging.basicConfig(level=logging.DEBUG)

cap = cv2.VideoCapture(1)  #Sets up the webcam for for the video stream

#IP address for network tables Roborio should be "10.25.82.2"
ip = "10.25.82.2"

NetworkTables.initialize(server=ip)
NetworkTables.flush()
nt = NetworkTables.getTable("ColorWheel")

#size for the square region of intrest where the color will be determined
roi_size = 10

######################################
#          CALIBRATION AREA          #
######################################
#Use this area to callibrate your colors

blue = np.array([240, 150, 38])
green = np.array([112, 140, 56])
red = np.array([34, 53, 235])
yellow = np.array([37, 178, 211])

########################################
########################################
    def run(self):
        '''Main loop. Read camera, process the image, send to the MJPEG server'''

        frame_num = 0
        errors = 0

        fps_count = 0
        fps_startt = time.time()
        imgproc_nettime = 0

        # NetworkTables.addEntryListener(self.active_mode_changed)
        old_ts = datetime.timestamp(datetime.now())
        mode_idx = 2
        mode = ["shooter", "intake", "ballfinder", "goalfinder"]
        mode_period = [5, 5, 40, 40]

        while True:
            try:
                # Check whether DS has asked for a different camera
                # ntmode = self.nt_active_mode  # temp, for efficiency
                # ntmode = self.mode_chooser_ctrl.getSelected()
                # if ntmode != self.active_mode:
                #     self.switch_mode(ntmode)

                # if self.camera_frame is None:
                # #     self.preallocate_arrays()

                # now = datetime.now()
                # timestamp = datetime.timestamp(now)
                # if timestamp - old_ts > mode_period[(mode_idx -1 ) % 4] :
                #     self.switch_mode(mode[mode_idx % 4])
                #     mode_idx += 1
                #     old_ts = timestamp

                if self.tuning:
                    self.update_parameters()

                # Tell the CvReader to grab a frame from the camera and put it
                # in the source image.  Frametime==0 on error
                frametime, self.camera_frame = self.current_reader.next_frame()
                frame_num += 1

                imgproc_startt = time.time()

                if frametime == 0:
                    # ERROR!!
                    self.error_msg = self.current_reader.sink.getError()

                    if errors < 10:
                        errors += 1
                    else:  # if 10 or more iterations without any stream switch cameras
                        logging.warning(
                            self.active_camera +
                            " camera is no longer streaming. Switching cameras..."
                        )
                        self.switch_mode(self.mode_after_error())
                        errors = 0

                    target_res = [
                        time.time(),
                    ]
                    target_res.extend(5 * [
                        0.0,
                    ])
                else:
                    self.error_msg = None
                    errors = 0

                    if self.image_writer_state:
                        self.image_writer.setImage(self.camera_frame)

                    # frametime = time.time() * 1e8  (ie in 1/100 microseconds)
                    # convert frametime to seconds to use as the heartbeat sent to the RoboRio
                    target_res = [
                        1e-8 * frametime,
                    ]

                    proc_result = self.process_image()
                    if proc_result[2] != 0 and proc_result[2] != -1:
                        logging.info(
                            "Process image result '{}' ".format(proc_result))
                    target_res.extend(proc_result)

                # Send the results as one big array in order to guarantee that the results
                #  all arrive at the RoboRio at the same time
                # Value is (Timestamp, Found, Mode, distance, angle1, angle2) as a flat array.
                #  All values are floating point (required by NT).
                self.target_info = target_res

                # Try to force an update of NT to the RoboRio. Docs say this may be rate-limited,
                #  so it might not happen every call.
                NetworkTables.flush()

                # Done. Output the marked up image, if needed
                # Note this can also be done via the URL, but this is more efficient
                now = time.time()
                deltat = now - self.previous_output_time
                min_deltat = 1.0 / self.output_fps_limit
                if deltat >= min_deltat:
                    self.prepare_output_image()
                    self.output_stream.putFrame(self.output_frame)
                    self.previous_output_time = now

                if frame_num == 30:
                    # This is a bit stupid, but you need to poke the camera *after* the first
                    #  bunch of frames has been collected.
                    self.switch_mode(self.initial_mode)

                fps_count += 1
                imgproc_nettime += now - imgproc_startt
                if fps_count >= 150:
                    endt = time.time()
                    dt = endt - fps_startt
                    # logging.info("{0} frames in {1:.3f} seconds = {2:.2f} FPS".format(fps_count, dt, fps_count/dt))
                    # logging.info("Image processing time = {0:.2f} msec/frame".format(1000.0 * imgproc_nettime / fps_count))
                    fps_count = 0
                    fps_startt = endt
                    imgproc_nettime = 0

            except Exception as e:
                # major exception. Try to keep going
                logging.error('Caught general exception: %s', e)

        return
示例#21
0
    def execute(self):

        pid_z = 0
        if self.hold_heading:
            if self.momentum and abs(self.bno055.getHeadingRate()) < 0.005:
                self.momentum = False
            if self.vz not in [0.0, None]:
                self.momentum = True
            if self.vz is None:
                self.momentum = False

            if not self.momentum:
                pid_z = self.heading_pid.get()
            else:
                self.set_heading_sp_current()
        input_vz = 0
        if self.vz is not None:
            input_vz = self.vz
        vz = input_vz + pid_z

        for module in self.modules:
            module_dist = math.hypot(module.x_pos, module.y_pos)
            module_angle = math.atan2(module.y_pos, module.x_pos)
            # Calculate the additional vx and vy components for this module
            # required to achieve our desired angular velocity
            vz_x = -module_dist*vz*math.sin(module_angle)
            vz_y = module_dist*vz*math.cos(module_angle)
            # TODO: re enable this and test field-oriented mode
            if self.field_oriented:
                angle = self.bno055.getAngle()
                vx, vy = self.robot_orient(self.vx, self.vy, angle)
            else:
                vx, vy = self.vx, self.vy
            module.set_velocity(vx+vz_x, vy+vz_y)

        odometry_outputs = np.zeros((8, 1))
        velocity_outputs = np.zeros((8, 1))

        heading = self.bno055.getAngle()
        heading_delta = constrain_angle(heading - self.last_heading)
        timestep_average_heading = heading - heading_delta / 2

        for i, module in enumerate(self.modules):
            odometry_x, odometry_y = module.get_cartesian_delta()
            velocity_x, velocity_y = module.get_cartesian_vel()
            odometry_outputs[i*2, 0] = odometry_x
            odometry_outputs[i*2+1, 0] = odometry_y
            velocity_outputs[i*2, 0] = velocity_x
            velocity_outputs[i*2+1, 0] = velocity_y
            module.reset_encoder_delta()

        z_adj_delta = self.z_axis_adjustment * heading_delta
        z_adj_vel = self.z_axis_adjustment * self.bno055.getHeadingRate()

        odometry_outputs = odometry_outputs - z_adj_delta
        velocity_outputs = velocity_outputs - z_adj_vel

        delta_x, delta_y = self.robot_movement_from_odometry(odometry_outputs, timestep_average_heading)
        v_x, v_y = self.robot_movement_from_odometry(velocity_outputs, heading)

        self.odometry_x += delta_x
        self.odometry_y += delta_y
        self.odometry_x_vel = v_x
        self.odometry_y_vel = v_y

        SmartDashboard.putNumber('module_a_speed', self.modules[0].current_speed)
        SmartDashboard.putNumber('module_b_speed', self.modules[1].current_speed)
        SmartDashboard.putNumber('module_c_speed', self.modules[2].current_speed)
        SmartDashboard.putNumber('module_d_speed', self.modules[3].current_speed)
        SmartDashboard.putNumber('module_a_pos', self.modules[0].current_measured_azimuth)
        SmartDashboard.putNumber('module_b_pos', self.modules[1].current_measured_azimuth)
        SmartDashboard.putNumber('module_c_pos', self.modules[2].current_measured_azimuth)
        SmartDashboard.putNumber('module_d_pos', self.modules[3].current_measured_azimuth)
        SmartDashboard.putNumber('odometry_x', self.odometry_x)
        SmartDashboard.putNumber('odometry_y', self.odometry_y)
        SmartDashboard.putNumber('odometry_delta_x', delta_x)
        SmartDashboard.putNumber('odometry_delta_y', delta_y)
        SmartDashboard.putNumber('odometry_x_vel', self.odometry_x_vel)
        SmartDashboard.putNumber('odometry_y_vel', self.odometry_y_vel)
        SmartDashboard.putNumber('imu_heading', heading)
        SmartDashboard.putNumber('heading_delta', heading_delta)
        SmartDashboard.putNumber('average_heading', timestep_average_heading)
        NetworkTables.flush()

        self.last_heading = heading
示例#22
0
    def execute(self):
        if self.enabled:
            self.chassis.field_oriented = True
            self.chassis.hold_heading = False

            odom_pos = np.array([self.chassis.odometry_x, self.chassis.odometry_y])
            odom_vel = np.array([self.chassis.odometry_x_vel, self.chassis.odometry_y_vel])

            speed = np.linalg.norm(odom_vel)
            # print("Odom speed %s" % speed)

            direction_of_motion, speed_sp, next_seg, over = self.pursuit.get_output(odom_pos, speed)

            if next_seg:
                self.update_heading_profile()
            seg_end = self.pursuit.waypoints_xy[self.waypoint_idx+1]
            seg_end_dist = (np.linalg.norm(self.pursuit.segment)
                            - np.linalg.norm(seg_end - odom_pos))
            if seg_end_dist < 0:
                seg_end_dist = 0

            if self.heading_profile:
                heading_seg = self.heading_profile.pop(0)
            else:
                heading_seg = (self.waypoints[self.waypoint_idx+1][2], 0, 0)

            # get the current heading of the robot since last reset
            # getRawHeading has been swapped for getAngle
            heading = self.imu.getAngle()
            # calculate the heading error
            heading_error = heading_seg[0] - heading
            # wrap heading error, stops jumping by 2 pi from the imu
            heading_error = math.atan2(math.sin(heading_error),
                                       math.cos(heading_error))
            # sum the heading error over the timestep
            self.heading_error_i += heading_error
            # calculate the derivative of the heading error
            d_heading_error = (heading_error - self.last_heading_error)

            # generate the rotational output to the chassis
            heading_output = (
                self.kPh * heading_error + self.kVh * heading_seg[1]
                + self.heading_error_i*self.kIh + d_heading_error*self.kDh)

            # store the current errors to be used to compute the
            # derivatives in the next timestep
            self.last_heading_error = heading_error

            vx = speed_sp * math.cos(direction_of_motion)
            vy = speed_sp * math.sin(direction_of_motion)

            # self.chassis.set_velocity_heading(vx, vy, self.waypoints[self.waypoint_idx+1][2])
            self.chassis.set_inputs(vx, vy, heading_output)

            SmartDashboard.putNumber('vector_pursuit_heading', direction_of_motion)
            SmartDashboard.putNumber('vector_pursuit_speed', speed_sp)
            NetworkTables.flush()

            if over:
                print("Motion over")
                self.enabled = False
                if self.waypoints[-1][3] == 0:
                    self.chassis.set_inputs(0, 0, 0)
示例#23
0
 def flush(self):
     NetworkTables.flush()
示例#24
0
    def run(self):
        '''Main loop. Read camera, process the image, send to the MJPEG server'''

        frame_num = 0
        errors = 0
        while True:
            try:
                # Check whether DS has asked for a different camera
                ntmode = self.nt_active_mode  # temp, for efficiency
                if ntmode != self.active_mode:
                    self.switch_mode(ntmode)

                if self.camera_frame is None:
                    self.preallocate_arrays()

                if self.tuning:
                    self.update_parameters()

                # Tell the CvSink to grab a frame from the camera and put it
                # in the source image.  Frametime==0 on error
                frametime, self.camera_frame = self.current_sink.grabFrame(
                    self.camera_frame)
                frame_num += 1

                if frametime == 0:
                    # ERROR!!
                    self.error_msg = self.current_sink.getError()

                    if errors < 10:
                        errors += 1
                    else:  # if 10 or more iterations without any stream switch cameras
                        logging.warning(
                            self.active_camera +
                            " camera is no longer streaming. Switching cameras..."
                        )
                        if self.active_camera == 'intake':
                            self.switch_mode('driver')
                        else:
                            self.switch_mode('intake')
                        errors = 0

                    target_res = [
                        time.time(),
                    ]
                    target_res.extend(5 * [
                        0.0,
                    ])
                else:
                    self.error_msg = None
                    errors = 0

                    if self.image_writer_state:
                        self.image_writer.setImage(self.camera_frame)

                    # frametime = time.time() * 1e8  (ie in 1/100 microseconds)
                    # convert frametime to seconds to use as the heartbeat sent to the RoboRio
                    target_res = [
                        1e-8 * frametime,
                    ]

                    proc_result = self.process_image()
                    target_res.extend(proc_result)

                # Send the results as one big array in order to guarantee that the results
                #  all arrive at the RoboRio at the same time
                # Value is (Timestamp, Found, Mode, distance, angle1, angle2) as a flat array.
                #  All values are floating point (required by NT).
                self.target_info = target_res

                # Try to force an update of NT to the RoboRio. Docs say this may be rate-limited,
                #  so it might not happen every call.
                NetworkTables.flush()

                # Done. Output the marked up image, if needed
                now = time.time()
                deltat = now - self.previous_output_time
                min_deltat = 1.0 / self.output_fps_limit
                if deltat >= min_deltat:
                    self.prepare_output_image()

                    self.output_stream.putFrame(self.output_frame)
                    self.previous_output_time = now

                if frame_num == 30:
                    # This is a bit stupid, but you need to poke the camera *after* the first
                    #  bunch of frames has been collected.
                    self.switch_mode(VisionServer2018.INITIAL_MODE)

            except Exception as e:
                # major exception. Try to keep going
                logging.error('Caught general exception: %s', e)

        return