コード例 #1
0
    def __init__(self, pitch, width=640, height=480):
        self.width = width
        self.height = height
        self.pitch = pitch
        self.new_polygon = True
        self.polygon = self.polygons = []
        self.points = []

        self.camera = Camera(pitch=pitch)

        keys = [
            'outline', 'Zone_0', 'Zone_1', 'Zone_2', 'Zone_3', 'pitch_inner',
            "Goal_0", "Goal_1"
        ]
        self.data = self.drawing = {}

        # Create keys
        for key in keys:
            self.data[key] = []
            self.drawing[key] = []

        self.color = RED
コード例 #2
0
    def __init__(self):
        """ initializes the thread without starting to capture the data """

        super().__init__()

        self.active = True
        self.driver = Driver.instance()
        self.camera = Camera.instance()

        # define directories and file paths
        date_str = datetime.today().strftime("%Y-%m-%d-%H-%M-%S")
        self.log_dir = f"{const.Storage.DATA}/{date_str}"
        self.img_dir = f"{self.log_dir}/img/"
        self.log_path = f"{self.log_dir}/log.csv"
        self.img_extension = "npy"

        # ensure that the necessary directories exist
        os.mkdir(self.log_dir)
        os.mkdir(self.img_dir)
        assert os.path.isdir(self.log_dir), "data directory could not be created"
        assert os.path.isdir(self.img_dir), "image directory could not be created"
コード例 #3
0
    def follow_road(self):
        """ drives autonomously without explicit instructions by propagating the camera
            input through a convolutional neural network that predicts a steering angle
            NOTE this mode has no particular destination and therfore does not terminate
            unless the driver is stopped explicitly
        """

        steering_net = SteeringNet()
        steering_net.load(const.Storage.DEFAULT_STEERING_MODEL)

        self.start_driving()

        with Camera.instance() as camera:
            while self.recorder_thread is not None:
                image = camera.image
                predicted_angle = steering_net.predict(image, self.angle)
                self.angle = predicted_angle

                time.sleep(0.25)

        self.stop_driving()
コード例 #4
0
    def run(self):
        """ starts capturing the data (image, angle, previous angle)

            Raises:
                OSError: in case the log file cannot be created
                OSError: in case the log file cannot be opened after being created
        """

        with Camera.instance() as camera:
            try:
                # create log file and write headers
                with open(self.log_path, "w+") as log:
                    writer = csv.writer(log)
                    writer.writerow(["image", "angle", "previous_angle"])
            except OSError:
                raise OSError("The log file could not be created.")

            previous_angle = 0.0
            while self.active:
                if camera.image is None: continue  # skip loop if no image provided

                # save image
                img_filename = datetime.today().strftime("%H-%M-%S-%f") + "." + self.img_extension
                np.save(self.img_dir + img_filename, camera.image)

                try:
                    # write data to csv file
                    with open(self.log_path, "a") as log:
                        writer = csv.writer(log)
                        angle = str(round(self.driver.angle, 3))
                        previous_angle = str(previous_angle)
                        writer.writerow([img_filename, angle, previous_angle])
                except OSError:
                    raise OSError("The log file could not be opened.")

                previous_angle = angle  # update previous angle for next loop
                time.sleep(self.CAPTURE_INTERVAL)
コード例 #5
0
ファイル: snatcher.py プロジェクト: ut-ras/r5-2019
drivers.init()
drivers.LED4.off()

print("Self-identifying...")
identity = None
with open("identity.dat", "r") as file:
    identity = file.readlines()[0].strip()

print(identity,
      "online. Pray to Lafayette Official God.\nWaiting for start signal")
while not os.path.isfile("go"):
    pass
os.remove("go")

print("Go time!")
camera = Camera()
mod = VisionModule(width=640, height=480)
collecting = False

drivers.move(drivers.RobotState(drivers.DRIVE, -in_to_cm(6)))


def sweep():
    cube = turn_to_block()
    if cube != None:
        drivers.move(drivers.RobotState(drivers.DRIVE, -cube.dist / 3))
        cube = turn_to_block()
        if cube != None:
            drivers.move(drivers.RobotState(drivers.DRIVE, -cube.dist / 5))
            drivers.move(drivers.RobotState(drivers.TURN, math.pi))
            drivers.move(drivers.RobotState(drivers.DRIVE, 10))
コード例 #6
0
ファイル: vision_wrapper.py プロジェクト: jsren/sdp-vision
    def __init__(self, pitch, color_settings, our_name, robot_details,
                 enable_gui=False, pc_name=None, robots_on_pitch=list(), goal=None):
        """
        Entry point for the SDP system.

        Params:
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [int or string] color_settings  [0, small, 1, big] - 0 or small for pitch color settings with small numbers (previously - pitch 0)
                                            1 or big - pitch color settings with big numbers (previously - pitch 1)
            [string] colour                 The colour of our teams plates
            [string] our_name               our name
            [int] video_port                port number for the camera

            [boolean] enable_gui              Does not draw the normal image to leave space for GUI.
                                            Also forces the trackers to return circle contour positons in addition to robot details.
            [string] pc_name                Name of the PC to load the files from (BUT NOT SAVE TO). Will default to local machine if not specified.
        """
        pitch = int(pitch)
        assert pitch in [0, 1]

        if goal: assert goal in ["left", "right"]

        self.pitch = pitch
        self.target_goal = goal
        self.color_settings = color_settings

        self.calibration = Configuration.read_calibration(
            pc_name, create_if_missing=True)

        self.video_settings = Configuration.read_video_config(
            pc_name, create_if_missing=True)

        # Set up camera for frames
        self.camera = Camera(pitch)
        self.camera.start_capture()
        self.frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center()

        # Set up vision
        self.trackers = list()

        self.world_objects = dict()

        # Get machine-specific calibration

        self.enable_gui = enable_gui
        self.gui = None

        # Initialize robots
        self.robots = []

        # Initialize ball states - which robot had the ball previously.
        self.ball_median_size = 5
        self.ball_index = 0
        self.ball_states = [None] * self.ball_median_size

        self.BALL_HOLDING_AREA_SCALE = 0.1

        for r_name in robot_details.keys():
            role = 'ally'
            if robot_details[r_name]['main_colour'] != robot_details[our_name]['main_colour']:
                role = 'enemy'
            elif r_name == our_name:
                role = 'group9'

            print "[WRAPPER] Setting %s to %s." % (r_name, role)
            self.robots.append(RobotInstance(r_name,
                                             robot_details[r_name]['main_colour'],
                                             robot_details[r_name]['side_colour'],
                                             robot_details[r_name]['offset_angle'],
                                             role,
                                             r_name in robots_on_pitch))


        # Draw various things on the image
        self.draw_direction = True
        self.draw_robot     = True
        self.draw_contours  = True
        self.draw_ball      = True

        self.vision = Vision(
            pitch=pitch, frame_shape=self.frame.shape,
            frame_center=center_point, calibration=self.calibration,
            robots=self.robots,
            return_circle_contours=enable_gui, trackers_out=self.trackers)

        # Used for latency calculations
        self.t0 = time()
        self.delta_t = 0


        if self.enable_gui:
            self.gui = GUI(self.pitch, self.color_settings, self.calibration, self)

            from threading import Thread
            from gui.common import MainWindow
            from gui.status_control import StatusUI
            from gui.main import MainUI

            self.status_window = None

            def create_windows():
                self.main_window = MainUI(self)
                return [ self.main_window ]

            Thread(name="Tkinter UI", target=MainWindow.create_and_show,
                   args=[create_windows]).start()

        # Set up preprocessing and postprocessing
        # self.postprocessing = Postprocessing()
        self.preprocessing = Preprocessing()

        self.side = our_name
        self.frameQueue = []

        self.video = None
コード例 #7
0
ファイル: vision_wrapper.py プロジェクト: jsren/sdp-vision
class VisionWrapper:
    """
    Class that handles vision

    """

    ENEMY_SCALE = 1.
    GROUP9_SCALE = 1.
    ALLY_SCALE = 1.

    def __init__(self, pitch, color_settings, our_name, robot_details,
                 enable_gui=False, pc_name=None, robots_on_pitch=list(), goal=None):
        """
        Entry point for the SDP system.

        Params:
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [int or string] color_settings  [0, small, 1, big] - 0 or small for pitch color settings with small numbers (previously - pitch 0)
                                            1 or big - pitch color settings with big numbers (previously - pitch 1)
            [string] colour                 The colour of our teams plates
            [string] our_name               our name
            [int] video_port                port number for the camera

            [boolean] enable_gui              Does not draw the normal image to leave space for GUI.
                                            Also forces the trackers to return circle contour positons in addition to robot details.
            [string] pc_name                Name of the PC to load the files from (BUT NOT SAVE TO). Will default to local machine if not specified.
        """
        pitch = int(pitch)
        assert pitch in [0, 1]

        if goal: assert goal in ["left", "right"]

        self.pitch = pitch
        self.target_goal = goal
        self.color_settings = color_settings

        self.calibration = Configuration.read_calibration(
            pc_name, create_if_missing=True)

        self.video_settings = Configuration.read_video_config(
            pc_name, create_if_missing=True)

        # Set up camera for frames
        self.camera = Camera(pitch)
        self.camera.start_capture()
        self.frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center()

        # Set up vision
        self.trackers = list()

        self.world_objects = dict()

        # Get machine-specific calibration

        self.enable_gui = enable_gui
        self.gui = None

        # Initialize robots
        self.robots = []

        # Initialize ball states - which robot had the ball previously.
        self.ball_median_size = 5
        self.ball_index = 0
        self.ball_states = [None] * self.ball_median_size

        self.BALL_HOLDING_AREA_SCALE = 0.1

        for r_name in robot_details.keys():
            role = 'ally'
            if robot_details[r_name]['main_colour'] != robot_details[our_name]['main_colour']:
                role = 'enemy'
            elif r_name == our_name:
                role = 'group9'

            print "[WRAPPER] Setting %s to %s." % (r_name, role)
            self.robots.append(RobotInstance(r_name,
                                             robot_details[r_name]['main_colour'],
                                             robot_details[r_name]['side_colour'],
                                             robot_details[r_name]['offset_angle'],
                                             role,
                                             r_name in robots_on_pitch))


        # Draw various things on the image
        self.draw_direction = True
        self.draw_robot     = True
        self.draw_contours  = True
        self.draw_ball      = True

        self.vision = Vision(
            pitch=pitch, frame_shape=self.frame.shape,
            frame_center=center_point, calibration=self.calibration,
            robots=self.robots,
            return_circle_contours=enable_gui, trackers_out=self.trackers)

        # Used for latency calculations
        self.t0 = time()
        self.delta_t = 0


        if self.enable_gui:
            self.gui = GUI(self.pitch, self.color_settings, self.calibration, self)

            from threading import Thread
            from gui.common import MainWindow
            from gui.status_control import StatusUI
            from gui.main import MainUI

            self.status_window = None

            def create_windows():
                self.main_window = MainUI(self)
                return [ self.main_window ]

            Thread(name="Tkinter UI", target=MainWindow.create_and_show,
                   args=[create_windows]).start()

        # Set up preprocessing and postprocessing
        # self.postprocessing = Postprocessing()
        self.preprocessing = Preprocessing()

        self.side = our_name
        self.frameQueue = []

        self.video = None

    def get_robots_raw(self):
        # Filter robots that have no position
        return [[r.name, r.visible, r.position, r.heading, RobotType.UNKNOWN]
                for r in self.robots]

    def get_robot_position(self, robot_name):
        return filter(lambda r: r.name == robot_name, self.robots)[0].position

    def get_robot_headings(self):
        headings = dict()
        for r in self.robots:
            headings[r.name] = r.heading
        return headings

    def get_circle_position(self, robot_name):
        for r in self.robots:
            if r.name == robot_name:
                return r.side_x, r.side_y

    def get_all_robots(self):
        robots = dict()
        for r in self.robots:
            robots[r.name] = self.get_robot_position(r.name)
        return robots

    def get_ball_position(self):
        """
        :return: Actual ball position or predicted ball position if the robot was near it. Might return None.
        """
        # TODO: call methods here to get robot region if new one will be used.
        if not self.world_objects['ball'][2]:
            r_name, _ = self._mode(self.ball_states)
            if r_name:
                for r in self.robots:
                    if r.name == r_name:
                        x, y = r.predicted_ball_pos
                        self.world_objects['ball'] = (x, y, 2)

        if self.world_objects['ball'][2] and not \
                (self.world_objects['ball'][0] == 42 and self.world_objects['ball'][1] == 42):
            return self.world_objects['ball']
        else:
            return None


    def get_pitch_dimensions(self):
        from util.tools import get_pitch_size
        return get_pitch_size()

    def get_robot_direction(self, robot_name):
        return filter(lambda r: r.name == robot_name, self.robots)[0]


    def do_we_have_ball(self, robot_name):
        return self.is_ball_in_range(robot_name, scale=self.BALL_HOLDING_AREA_SCALE)


    def is_ball_in_range(self, robot_name, scale=1.):
        """
        returns True if ball is in the grabbing zone.
        :param robot_name:  robot name
        :param scale:       Zone can be scaled, to accommodate for different robots
        :return: boolean
        """
        ball = self.get_ball_position()
        if ball and ball[2]:
            for r in self.robots:
                if r.name == robot_name and r.visible:
                    if r.role == 'enemy':
                        if r.is_point_in_grabbing_zone(ball[0], ball[1], role=r.role, scale=self.ENEMY_SCALE, circular=False):
                            return True
                    elif r.role == 'group9':
                        if r.is_point_in_grabbing_zone(ball[0], ball[1], role=r.role, scale=self.GROUP9_SCALE, circular=False):
                            return True
                    else:
                        if r.is_point_in_grabbing_zone(ball[0], ball[1], role=r.role, scale=self.ALLY_SCALE, circular=False):
                            return True
                    break
        return False


    def is_ball_on_other(self, robot_name, zone, scale=1.):
        """
        returns True if ball is in the side or bottom zone.
        :param robot_name:  robot name
        :param zone:        ["left", "right", "bottom"]
        :param scale:       Zone can be scaled, to accommodate for different robots
        :return: boolean
        """
        ball = self.get_ball_position()
        if ball and ball[2]:
            for r in self.robots:
                if r.name == robot_name:
                    if r.is_point_in_other_zone(ball[0], ball[1], zone, role=r.role, scale=scale):
                        return True
                    break
        return False


    def change_drawing(self, key):
        """
        Toggles drawing of contours, heading directions, robot positions and ball tracker
        Usage:
            * 'b' for ball tracker
            * 'n' for contours
            * 'j' for robot_tracker
            * 'i' for heading direction
        :param key: keypress
        :return: nothing
        """
        if key == ord('b'):
            self.gui.draw_ball = not self.gui.draw_ball
        elif key == ord('n'):
            self.gui.draw_contours = not self.gui.draw_contours
        # THIS WAS CRASHING EVERYTHING
        # elif key == ord('j'):
        #     self.gui.draw_robot = not self.gui.draw_robot
        elif key == ord('i'):
            self.gui.draw_direction = not self.gui.draw_direction


    def get_circle_contours(self):
        """
        Careful! Does not return x and y values. Call minimum bounding circles if proper circle locations are required.
        Or look at dem fish http://docs.opencv.org/2.4/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.html
        :return: [Contours]
        """
        return self.world_objects.get('circles')

    def get_frame_size(self):
        height, width, channels = self.frame.shape
        return width, height

    def get_latency_seconds(self):
        return self.delta_t

    def _mode(self, array):
        """
        :param array:   some array
        :return: m, c   the first mode found and the number of occurences
        """
        m = max(array, key = array.count)
        return m, array.count(m)


    def start_video(self, title):
        if self.video is not None:
            try:
                self.video.release()
            except Exception, e:
                print "[WARNING] Error releasing previous video:", e
        self.video = cv2.VideoWriter('recordings/' + "test" + '.mpeg', -1, int(self.camera.capture.get(cv2.CAP_PROP_FPS)),
                                     (int(self.camera.capture.get(3)), int(self.camera.capture.get(4))))
コード例 #8
0
class Configure(object):
    def __init__(self, pitch, width=640, height=480):
        self.width = width
        self.height = height
        self.pitch = pitch
        self.new_polygon = True
        self.polygon = self.polygons = []
        self.points = []

        self.camera = Camera(pitch=pitch)

        keys = [
            'outline', 'Zone_0', 'Zone_1', 'Zone_2', 'Zone_3', 'pitch_inner',
            "Goal_0", "Goal_1"
        ]
        self.data = self.drawing = {}

        # Create keys
        for key in keys:
            self.data[key] = []
            self.drawing[key] = []

        self.color = RED

    def run(self, camera=False):
        frame = cv2.namedWindow(FRAME_NAME)

        # Set callback
        cv2.setMouseCallback(FRAME_NAME, self.draw)

        if camera:
            self.image = self.camera.get_raw_frame()
        else:
            self.image = cv2.imread('00000001.jpg')

        self.image = self.camera.fix_radial_distortion(self.image)
        self.image = self.camera.fix_perspective(self.image)

        # Get various data about the image from the user
        self.get_pitch_outline()

        self.get_zone('Zone_0', 'draw LEFT Defender')
        self.get_zone('Zone_1', 'draw RIGHT Defender')
        self.get_zone('Goal_0', 'Point in the center of the left goal')
        self.get_zone('Goal_1', 'Point in the cneter of the right goal')

        #self.get_goal('Zone_0')
        #self.get_goal('Zone_3')

        print 'Press any key to finish.'
        cv2.waitKey(0)
        cv2.destroyAllWindows()

        # Write out the data
        # self.dump('calibrations/calibrate.json', self.data)
        tools.save_croppings(pitch=self.pitch, data=self.data)

    def reshape(self):
        return np.array(self.data[self.drawing], np.int32).reshape((-1, 1, 2))

    def draw_poly(self, points):
        cv2.polylines(self.image, [points], True, self.color)
        cv2.imshow(FRAME_NAME, self.image)

    def get_zone(self, key, message):
        print '%s. %s' % (message, "Continue by pressing q")
        self.drawing, k = key, True

        while k != ord('q'):
            cv2.imshow(FRAME_NAME, self.image)
            k = cv2.waitKey(100) & 0xFF

        self.draw_poly(self.reshape())

    def get_pitch_outline(self):
        """
                Let user select points that corespond to the pitch outline.
                End selection by pressing 'q'.
                Result is masked and cropped.
                """
        self.get_zone(
            'outline',
            'Draw the outline of the pitch. Contine by pressing \'q\'')

        # Setup black mask to remove overflows
        self.image = tools.mask_pitch(self.image, self.data[self.drawing])

        # Get crop size based on points
        size = tools.find_crop_coordinates(self.image, self.data[self.drawing])
        # Crop
        #self.image = self.image[size[2]:size[3], size[0]:size[1]]

        cv2.imshow(FRAME_NAME, self.image)

    def draw(self, event, x, y, flags, param):
        """
                Callback for events
                """
        if event == cv2.EVENT_LBUTTONDOWN:
            color = self.color
            cv2.circle(self.image, (x - 1, y - 1), 2, color, -1)
            self.data[self.drawing].append((x, y))

    def get_goal(self, zone):
        """
                Returns the top and bottom corner of the goal in zone.
                """
        coords = self.data[zone]
        reverse = int(zone[-1]) % 2
        goal_coords = sorted(coords, reverse=reverse)[:2]
        if goal_coords[0][1] > goal_coords[1][1]:
            topCorner = goal_coords[1]
            bottomCorner = goal_coords[0]
        else:
            topCorner = goal_coords[0]
            bottomCorner = goal_coords[1]
        self.data[zone + '_goal'] = [topCorner, bottomCorner]
コード例 #9
0
import cv2
from vision import Camera
import argparse

parser = argparse.ArgumentParser()
parser.add_argument("pitch", help="[0] Main pitch, [1] Secondary pitch")
args = parser.parse_args()
pitch_number = int(args.pitch)

capture = Camera(pitch=pitch_number)

while True:

        frame = capture.get_frame()

        cv2.imwrite("currBg_" + str(pitch_number) + ".png", frame)
        #cv2.imwrite("../currBg_" + str(pitch_number) + ".png", frame)

        cv2.imshow('Background view', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

print "done"
コード例 #10
0
            Save perspetive point fixes
        """
        tools.save_data(self.pitch, self.points,
                        'calibrations/perspective.json')


if __name__ == '__main__':

    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("pitch", help="[0] Main pitch, [1] Secondary pitch")
    args = parser.parse_args()
    pitch_number = int(args.pitch)

    WINDOW_NAME = 'Perspective Calibration'
    cam = Camera(pitch=pitch_number)
    perspective = Perspective(pitch_number)

    cv2.namedWindow(WINDOW_NAME)
    cv2.setMouseCallback(WINDOW_NAME, perspective.add_point)

    while (1):
        frame = cam.get_raw_frame()
        frame = cam.fix_radial_distortion(frame)
        frame = perspective.draw_points(frame)

        cv2.imshow(WINDOW_NAME, frame)

        if cv2.waitKey(20) & 0xFF == 27:
            break
コード例 #11
0
    with open("identity.dat", "r") as file:
        identity = file.readlines()[0].strip()

    print(identity, "online!")

    # Figure out where I'm headed
    goal = [-1, 1]
    with open("goal.dat", "r") as file:
        nums = file.readlines[0].strip().split(" ")
        goal = [float(x) for x in nums]

    print("Targeting block at", goal)

    # System initialization
    drivers.init()
    camera = Camera()
    mod = VisionModule(width=640, height=480)

    # State
    pose = ROBOT_ORIGIN_POSES[identity]
    time = 0
    iterations = 0
    done = False
    epoch = time.time()

    # Parker robots have a simple routine
    if identity in PARKER_ROBOTS:
        drivers.move(drivers.RobotState(drivers.DRIVE, in_to_cm(ROBOT_AXIAL + 2)))
        sleep(3)
        drivers.move(drivers.RobotState(drivers.DRIVE, -in_to_cm(ROBOT_AXIAL + 3)))
        done = True