コード例 #1
0
ファイル: controller.py プロジェクト: EmilRyberg/P6BinPicking
    def __init__(self, move_robot, camera_interface: CameraInterface,
                 segmentation_weight_path):
        self.move_robot = move_robot
        self.camera = camera_interface
        self.vision = Vision(segmentation_weight_path)
        self.box_detector = BoxDetector()
        self.surface_normals = SurfaceNormals()
        self.detected_objects = None
        self.masks = None
        self.unsuccessful_grip_counter = 0
        self.unsuccessful_grip_shake_counter = 0
        self.area_threshold = 2000000
        self.score_threshold = 0.6
        self.reference_image = None
        self.depth_image = None
        self.part_position_details = None
        self.first_box_move = 0
        self.picked_parts = {
            PartCategoryEnum.BOTTOM_COVER.value: 0,
            PartCategoryEnum.PCB.value: 0,
            PartCategoryEnum.BLACK_COVER.value: 0,
            PartCategoryEnum.BLUE_COVER.value: 0,
            PartCategoryEnum.WHITE_COVER.value: 0
        }

        print("[I] Controller running")
コード例 #2
0
ファイル: simulator.py プロジェクト: sdp-2011/sdp-9
 def initVision(self):
     if self.vision:
         logging.info("Starting simulator vision bridge")
         world = self.world
         if not self.real_world:
             # Vision must always use the real World
             world = common.world.World(self.colour)
         self.vision = Vision(simulator=self, world=world)
         self.visionFile = tempfile.mktemp(suffix='.bmp')
コード例 #3
0
ファイル: main4.py プロジェクト: mvernacc/maslab-staff-2013
def main(argv):
    ardu = arduino.Arduino()
    motor_left = arduino.Motor(ardu, 0, 9, 8) # current pin, direction pin, pwm pin
    motor_right = arduino.Motor(ardu, 0, 7, 6)
    bump_right = arduino.DigitalInput(ardu, 22)
    bump_left = arduino.DigitalInput(ardu, 23)
    ardu.run() # start ardu comm thread

    vis = Vision(True)
    end_now = False
    start_time = time.time()
    try:
        while not end_now:
            if time.time() - start_time  > 180:
                break
            if bump_right.getValue():
                print 'right bump'
                motor_right.setSpeed(-200)
                motor_left.setSpeed(-50)
                cv2.waitKey(1000)
                motor_right.setSpeed(-100)
                motor_left.setSpeed(100)
                cv2.waitKey(500)
                motor_right.setSpeed(0)
                motor_left.setSpeed(0)
            if bump_left.getValue():
                print 'left bump'
                motor_right.setSpeed(-50)
                motor_left.setSpeed(-200)
                cv2.waitKey(1000)
                motor_right.setSpeed(100)
                motor_left.setSpeed(-100)
                cv2.waitKey(500)
                motor_right.setSpeed(0)
                motor_left.setSpeed(0)
            vis.grab_frame()
            feats = vis.detect_balls(side)
            if len(feats) >0:
                angle = int(feats[0].angle)
                print angle 
                motor_right.setSpeed( 64 - angle*4 )
                motor_left.setSpeed( 64 + angle*4 )
            else:
                print "No ball"
                motor_right.setSpeed(-50)
                motor_left.setSpeed(50)
            cv2.waitKey(20)
    except KeyboardInterrupt:
        print "ending..."
    time.sleep(0.1)
    motor_right.setSpeed(0)
    motor_left.setSpeed(0)
    time.sleep(0.1)
    ardu.stop()
コード例 #4
0
ファイル: controller.py プロジェクト: jonredmond/SDP
    def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyUSB0', comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch

        # Set up the Arduino communications
        self.arduino = Arduino(comm_port, 115200, 1, comms)

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch, color=color, our_side=our_side,
            frame_shape=frame.shape, frame_center=center_point,
            calibration=self.calibration)

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up main planner
        self.planner = Planner(our_side=our_side, pitch_num=self.pitch)

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()

        self.attacker = Attacker_Controller()
        self.defender = Defender_Controller()
コード例 #5
0
ファイル: mock1.py プロジェクト: mvernacc/maslab-staff-2013
def main(argv):
    red_side = True

    try:
        opts, args = getopt.getopt(argv,'', ['color='])
    except getopt.GetoptError:
        print "usage: 'main.py --color=<c>', where <c> is 'red' or 'green'"
    for opt, arg in opts:
        if opt == '--color':
            if arg == 'green':
                red_side = False
                print 'looking for green balls'
            elif arg == 'red':
                print 'looking for red balls'

    ardu = arduino.Arduino()
    motor_left = arduino.Motor(ardu, 0, 8, 10) #current pin,. direction pin, pwm pin
    motor_right = arduino.Motor(ardu, 0, 7, 9)
    ardu.run() # start ardu comm thread

    vis = Vision(red_side, True)
    end_now = False
    start_time = time.time()
    try:
        while not end_now:
            if time.time() - start_time > 10:
                end_now = True
                motor_right.setSpeed(0)
                motor_left.setSpeed(0)
                time.sleep(0.1)
                ardu.stop()
                break
            feats = vis.get_feat()
            if len(feats) >0:
                angle = int(feats[0].angle)
                print angle 
                motor_right.setSpeed( -64 - angle*4 )
                motor_left.setSpeed( -64 + angle*4 )
            else:
                print "No ball"
                motor_right.setSpeed(-32)
                motor_left.setSpeed(32)
            cv2.waitKey(20)
    except KeyboardInterrupt:
        print "ending..."
        time.sleep(0.1)
        motor_right.setSpeed(0)
        motor_left.setSpeed(0)
        time.sleep(0.1)
        ardu.stop()
        end_now = True
コード例 #6
0
    def __init__(self):
        threading.Thread.__init__(self)

        # Arduino and sensor properties
        self.ard = arduino.Arduino()
        self.pid = arduino.PID(self.ard)
        self.motors = Motors(self.ard)
        self.bumpers = Bumpers(self.ard)
        self.ir = IR(self.ard)
        self.vision = Vision()
        self.vision.color = Color.Red
        self.vision.features = Feature.Ball
        self.time = Timer(self)
        self.servoBridge = arduino.Servo(self.ard, 6)
        self.servoGate = arduino.Servo(self.ard, 3)
        # self.bridgeBump = arduino.DigitalInput(self.ard, 27)

        # Properties for match
        self.ready = False
        self.scoring = False
        self.buttoned = False # Button has been pushed
        self.deployed = False # Bridge has been deployed
        self.map = {}
        self.tower = []
        self.wall = []
        self.repeatedBarcodes = False
コード例 #7
0
ファイル: simulator.py プロジェクト: eliask/SDP2011-Robotniks
 def initVision(self):
     if self.vision:
         logging.info("Starting simulator vision bridge")
         world = self.world
         if not self.real_world:
             # Vision must always use the real World
             world = common.world.World(self.colour)
         self.vision = Vision(simulator=self, world=world)
         self.visionFile = tempfile.mktemp(suffix='.bmp')
コード例 #8
0
ファイル: controller.py プロジェクト: Tasignotas/SDP
    def __init__(self, pitch, color, our_side, video_port=0, comm_port="/dev/ttyUSB0", comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ["yellow", "blue"]
        assert our_side in ["left", "right"]

        self.pitch = pitch

        # Set up the Arduino communications
        self.arduino = Arduino(comm_port, 115200, 1, comms)

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch,
            color=color,
            our_side=our_side,
            frame_shape=frame.shape,
            frame_center=center_point,
            calibration=self.calibration,
        )

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up main planner
        self.planner = Planner(our_side=our_side, pitch_num=self.pitch)

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()

        self.attacker = Attacker_Controller()
        self.defender = Defender_Controller()
コード例 #9
0
def main(argv):
    try:
        opts, args = getopt.getopt(argv, 'rgycp')
    except getopt.GetoptError:
        print "Arguments: -r -g -y -c -p"
    color = Color.Red
    for opt, arg in opts:
        if opt == '-r':
            color = Color.Red
        if opt == '-g':
            color = Color.Green
        if opt == '-y':
            color = Color.Yellow
        if opt == '-c':
            color = Color.Cyan
        if opt == '-p':
            color = Color.Purple
    try:
        vis = Vision(True)
        while cv2.waitKey(10) == -1:
            vis.grabFrame()
            print vis._detectObjectCentroid(color)
    except:
        pass
コード例 #10
0
ファイル: Runner.py プロジェクト: maccery/sdp-group-8
    def __init__(self):
        # Set up Arduino communications.
        # self.robot =
        self.calib_file = "vision/calibrations/calibrations.json"
        self.vision = Vision(self.calib_file)
        self.gui = GUI(self.vision.calibrations)
        self.task = None

        # Set up world
        self.world = World(self.calib_file)

        # Set up post-processing
        self.postprocessing = PostProcessing()

        self.wait_for_vision = True
コード例 #11
0
    def __init__(self, pitch, color, our_side, video_port=0):
        """
        Entry point for the SDP system.

        Params:
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] colour                 The colour of our teams plates
            [string] our_side               the side we're on - 'left' or 'right'
            [int] video_port                port number for the camera
        Fields
            pitch
            camera
            calibration
            vision
            postporcessing
            color
            side
            preprocessing
            model_positions
            regular_positions
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=pitch)
        self.frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(self.frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch, color=color, our_side=our_side,
            frame_shape=self.frame.shape, frame_center=center_point,
            calibration=self.calibration)

        # Set up preprocessing and postprocessing
        self.postprocessing = Postprocessing()
        self.preprocessing = Preprocessing()

        self.color = color
        self.side = our_side

        self.frameQueue = []
コード例 #12
0
corner_right = ObjectDimension(
    Object.CANVAS_WIDTH - margin_delta - corner_radius * 2, margin_delta,
    corner_radius * 2, corner_radius * 2)

simulation = Simulation(motor_left, motor_right, corner_left, corner_right)
app = Window(root, simulation)

camera = SimulationCamera(app)

image_to_draw = prepare_image("vision/test_bild.jpg")

if image_to_draw is None:
    print("Image can not be null to continue.")
    exit(-1)

motor_interface = SimulationMotorInterface(simulation, 0.001, 5)

vision = Vision(motor_interface, 1.5, camera, image_to_draw,
                ((motor_right.center.x - motor_left.center.x) / 2, 25),
                corner_right.center.x - corner_left.center.x)
vision.run_in_thread()


def on_destroy_callback():
    vision.quit()


app.set_on_destroy_callback(on_destroy_callback)

root.after(16, app.frame)
root.mainloop()
コード例 #13
0
ファイル: controller.py プロジェクト: jonredmond/SDP
class Controller:
    """
    Primary source of robot control. Ties vision and planning together.
    """

    def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyUSB0', comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch

        # Set up the Arduino communications
        self.arduino = Arduino(comm_port, 115200, 1, comms)

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch, color=color, our_side=our_side,
            frame_shape=frame.shape, frame_center=center_point,
            calibration=self.calibration)

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up main planner
        self.planner = Planner(our_side=our_side, pitch_num=self.pitch)

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()

        self.attacker = Attacker_Controller()
        self.defender = Defender_Controller()

    def wow(self):
        """
        Ready your sword, here be dragons.
        """
        counter = 1L
        timer = time.clock()
        try:
            c = True
            while c != 27:  # the ESC key

                frame = self.camera.get_frame()
                pre_options = self.preprocessing.options
                # Apply preprocessing methods toggled in the UI
                preprocessed = self.preprocessing.run(frame, pre_options)
                frame = preprocessed['frame']
                if 'background_sub' in preprocessed:
                    cv2.imshow('bg sub', preprocessed['background_sub'])
                # Find object positions
                # model_positions have their y coordinate inverted

                model_positions, regular_positions = self.vision.locate(frame)
                model_positions = self.postprocessing.analyze(model_positions)

                # Find appropriate action
                self.planner.update_world(model_positions)
                attacker_actions = self.planner.plan('attacker')
                defender_actions = self.planner.plan('defender')

                if self.attacker is not None:
                    self.attacker.execute(self.arduino, attacker_actions)
                if self.defender is not None:
                    self.defender.execute(self.arduino, defender_actions)

                # Information about the grabbers from the world
                grabbers = {
                    'our_defender': self.planner._world.our_defender.catcher_area,
                    'our_attacker': self.planner._world.our_attacker.catcher_area
                }

                # Information about states
                attackerState = (self.planner.attacker_state, self.planner.attacker_strat_state)
                defenderState = (self.planner.defender_state, self.planner.defender_strat_state)

                # Use 'y', 'b', 'r' to change color.
                c = waitKey(2) & 0xFF
                actions = []
                fps = float(counter) / (time.clock() - timer)
                # Draw vision content and actions

                self.GUI.draw(
                    frame, model_positions, actions, regular_positions, fps, attackerState,
                    defenderState, attacker_actions, defender_actions, grabbers,
                    our_color=self.color, our_side=self.side, key=c, preprocess=pre_options)
                counter += 1

        except:
            if self.defender is not None:
                self.defender.shutdown(self.arduino)
            if self.attacker is not None:
                self.attacker.shutdown(self.arduino)
            raise

        finally:
            # Write the new calibrations to a file.
            tools.save_colors(self.pitch, self.calibration)
            if self.attacker is not None:
                self.attacker.shutdown(self.arduino)
            if self.defender is not None:
                self.defender.shutdown(self.arduino)
コード例 #14
0
class Robot(threading.Thread):
    
    def __init__(self):
        threading.Thread.__init__(self)

        # Arduino and sensor properties
        self.ard = arduino.Arduino()
        self.pid = arduino.PID(self.ard)
        self.motors = Motors(self.ard)
        self.bumpers = Bumpers(self.ard)
        self.ir = IR(self.ard)
        self.vision = Vision()
        self.vision.color = Color.Red
        self.vision.features = Feature.Ball
        self.time = Timer(self)
        self.servoBridge = arduino.Servo(self.ard, 6)
        self.servoGate = arduino.Servo(self.ard, 3)
        # self.bridgeBump = arduino.DigitalInput(self.ard, 27)

        # Properties for match
        self.ready = False
        self.scoring = False
        self.buttoned = False # Button has been pushed
        self.deployed = False # Bridge has been deployed
        self.map = {}
        self.tower = []
        self.wall = []
        self.repeatedBarcodes = False

    def run(self):
        self.ard.start()
        while self.ard.portOpened == False:
            time.sleep(0.05)

        # self.motors.start()
        self.servoGate.setAngle(45)
        self.servoBridge.setAngle(5)
        self.bumpers.start()
        self.ir.start()
        self.vision.start()

        self.ready = True

    def stop(self):
        self.pid.stop()
        # self.motors.stop()
        self.servoGate.setAngle(45)
        self.servoBridge.setAngle(5)
        self.bumpers.stop()
        self.ir.stop()
        self.vision.stop()
        time.sleep(1)
        self.ard.stop()
        
        self.ready = False

    def reverse(self, bumped):
        speed = 80
        # Reverse for 2 seconds
        self.motors.right.setSpeed(-speed)
        self.motors.left.setSpeed(-speed)
        time.sleep(2)
        # Rotate for 1 second
        if bumped[0] == True:
            # Left bump sensor hit
            self.motors.right.setSpeed(-speed)
            self.motors.left.setSpeed(speed)
        elif bumped[1] == True:
            # Right bump sensor hit
            self.motors.right.setSpeed(speed)
            self.motors.left.setSpeed(-speed)
        time.sleep(1)
        # Move forward for 1 second
        self.motors.right.setSpeed(speed)
        self.motors.left.setSpeed(speed)
        time.sleep(1)
        self.motors.right.setSpeed(0)
        self.motors.left.setSpeed(0)

    def getFarthestPoint(self):
        startAngle = ard.getHeading()
        firData = []
        self.ir.running = True
        self.motors.left.setSpeed(50)
        self.motors.right.setSpeed(-50)
        while self.time.elapsed() > 1 and ard.getHeading() - startAngle < 0.2:
            firData.append((self.ir.firRight.getValues(),
                            self.ir.firLeft.getValues(),
                            ard.getHeading()))
コード例 #15
0
ファイル: controller.py プロジェクト: Tasignotas/SDP
class Controller:
    """
    Primary source of robot control. Ties vision and planning together.
    """

    def __init__(self, pitch, color, our_side, video_port=0, comm_port="/dev/ttyUSB0", comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ["yellow", "blue"]
        assert our_side in ["left", "right"]

        self.pitch = pitch

        # Set up the Arduino communications
        self.arduino = Arduino(comm_port, 115200, 1, comms)

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch,
            color=color,
            our_side=our_side,
            frame_shape=frame.shape,
            frame_center=center_point,
            calibration=self.calibration,
        )

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up main planner
        self.planner = Planner(our_side=our_side, pitch_num=self.pitch)

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()

        self.attacker = Attacker_Controller()
        self.defender = Defender_Controller()

    def wow(self):
        """
        Ready your sword, here be dragons.
        """
        counter = 1L
        timer = time.clock()
        try:
            c = True
            while c != 27:  # the ESC key

                frame = self.camera.get_frame()
                pre_options = self.preprocessing.options
                # Apply preprocessing methods toggled in the UI
                preprocessed = self.preprocessing.run(frame, pre_options)
                frame = preprocessed["frame"]
                if "background_sub" in preprocessed:
                    cv2.imshow("bg sub", preprocessed["background_sub"])
                # Find object positions
                # model_positions have their y coordinate inverted

                model_positions, regular_positions = self.vision.locate(frame)
                model_positions = self.postprocessing.analyze(model_positions)

                # Find appropriate action
                self.planner.update_world(model_positions)
                attacker_actions = self.planner.plan("attacker")
                defender_actions = self.planner.plan("defender")

                if self.attacker is not None:
                    self.attacker.execute(self.arduino, attacker_actions)
                if self.defender is not None:
                    self.defender.execute(self.arduino, defender_actions)

                # Information about the grabbers from the world
                grabbers = {
                    "our_defender": self.planner._world.our_defender.catcher_area,
                    "our_attacker": self.planner._world.our_attacker.catcher_area,
                }

                # Information about states
                attackerState = (self.planner.attacker_state, self.planner.attacker_strat_state)
                defenderState = (self.planner.defender_state, self.planner.defender_strat_state)

                # Use 'y', 'b', 'r' to change color.
                c = waitKey(2) & 0xFF
                actions = []
                fps = float(counter) / (time.clock() - timer)
                # Draw vision content and actions

                self.GUI.draw(
                    frame,
                    model_positions,
                    actions,
                    regular_positions,
                    fps,
                    attackerState,
                    defenderState,
                    attacker_actions,
                    defender_actions,
                    grabbers,
                    our_color=self.color,
                    our_side=self.side,
                    key=c,
                    preprocess=pre_options,
                )
                counter += 1

        except:
            if self.defender is not None:
                self.defender.shutdown(self.arduino)
            if self.attacker is not None:
                self.attacker.shutdown(self.arduino)
            raise

        finally:
            # Write the new calibrations to a file.
            tools.save_colors(self.pitch, self.calibration)
            if self.attacker is not None:
                self.attacker.shutdown(self.arduino)
            if self.defender is not None:
                self.defender.shutdown(self.arduino)
コード例 #16
0
ファイル: controller.py プロジェクト: EmilRyberg/P6BinPicking
class Controller:
    def __init__(self, move_robot, camera_interface: CameraInterface,
                 segmentation_weight_path):
        self.move_robot = move_robot
        self.camera = camera_interface
        self.vision = Vision(segmentation_weight_path)
        self.box_detector = BoxDetector()
        self.surface_normals = SurfaceNormals()
        self.detected_objects = None
        self.masks = None
        self.unsuccessful_grip_counter = 0
        self.unsuccessful_grip_shake_counter = 0
        self.area_threshold = 2000000
        self.score_threshold = 0.6
        self.reference_image = None
        self.depth_image = None
        self.part_position_details = None
        self.first_box_move = 0
        self.picked_parts = {
            PartCategoryEnum.BOTTOM_COVER.value: 0,
            PartCategoryEnum.PCB.value: 0,
            PartCategoryEnum.BLACK_COVER.value: 0,
            PartCategoryEnum.BLUE_COVER.value: 0,
            PartCategoryEnum.WHITE_COVER.value: 0
        }

        print("[I] Controller running")

    def main_flow(self, debug=False):
        at_least_x_of_each_part_picked = False
        while not at_least_x_of_each_part_picked:
            self.capture_images()
            self.masks = self.vision.segment(self.reference_image)
            self.move_robot.move_to_home_suction(speed=3)
            picked_part = self.pick_and_place_part(part="any", debug=debug)
            self.picked_parts[picked_part] += 1
            at_least_x_of_each_part_picked = True
            for key, value in self.picked_parts.items():
                if value < 2:
                    at_least_x_of_each_part_picked = False

        print("Successfully picked 2 sets of 5 different parts")

    def pick_and_place_part(self, part, debug=False):
        success = False
        self.unsuccessful_grip_shake_counter = 0
        self.unsuccessful_grip_counter = 0
        mask = None
        while success == False and self.unsuccessful_grip_shake_counter < 5:
            print("finding part: ", part)
            mask = self.find_best_mask_by_part(part, self.masks, debug)
            if mask == False or self.unsuccessful_grip_counter > 2:
                if mask == False:
                    print("no appropriate part found")
                    self.unsuccessful_grip_shake_counter += 1
                    print(
                        f"shaking, try {self.unsuccessful_grip_shake_counter}/5"
                    )
                if self.unsuccessful_grip_counter > 2:
                    print("failed to pick up 3 times in a row")
                    print("shaking...")
                self.move_box()
                self.move_robot.move_out_of_view()
                self.capture_images()
                self.masks = self.vision.segment(self.reference_image)
                self.unsuccessful_grip_counter = 0
            else:
                self.unsuccessful_grip_shake_counter = 0
                np_mask = np.asarray(mask["mask"])
                if debug:
                    applied_mask = cv2.bitwise_and(self.reference_image,
                                                   self.reference_image,
                                                   mask=np_mask)
                    cv2.imshow("picking", cv2.resize(applied_mask,
                                                     (1280, 720)))
                    cv2.waitKey(0)

                if mask["part"] == PartCategoryEnum.PCB.value:
                    center, part_orientation, normal_vector, relative_angle_to_z, _ = self.part_position_details
                    print(f'gripping {mask["part"]} at {center} with suction')
                    self.move_robot.remote_print(f"gripping {mask['part']}")
                    self.move_robot.set_tcp(self.move_robot.suction_tcp)
                    default_orientation = Rotation.from_euler(
                        "xyz", [0, 3.14, 0]).as_rotvec(
                        )  # rotz=0 is down left, counterclockwise positive
                    self.move_robot.movej([-60, -60, -110, -190, -70, 100],
                                          vel=2)  #down right above box
                    approach_center = center + 200 * normal_vector
                    self.move_robot.movel2(
                        approach_center, part_orientation)  #pre pick above box
                    self.move_robot.movel2(center, part_orientation,
                                           vel=0.2)  #pick
                    self.move_robot.enable_suction()
                    lift_orientation = Rotation.from_euler(
                        "xyz", [0, 3.14, 1.57]).as_rotvec()
                    self.move_robot.movel2([center[0], center[1], 300],
                                           lift_orientation,
                                           vel=0.2)  #lift straight up
                    self.move_robot.movej([-60, -60, -110, -190, 20, 100],
                                          vel=2)  #down left above box
                    self.move_to_drop(
                        mask["part"]
                    )  #specific drop off locations for each part
                    self.move_robot.disable_suction()
                    self.move_robot.movel2([200, -200, 300],
                                           default_orientation,
                                           vel=1)  #move straight up
                    print("success")
                    success = True
                else:
                    center, rotvec, normal_vector, relative_angle_to_z, short_vector = self.surface_normals.get_gripper_orientation(
                        np_mask,
                        self.depth_image,
                        self.reference_image,
                        0,
                        debug=debug)
                    print(f'gripping {mask["part"]} at {center} with gripper')
                    self.move_robot.remote_print(f"gripping {mask['part']}")
                    self.move_robot.set_tcp(self.move_robot.gripper_tcp)
                    self.move_robot.move_to_home_gripper(speed=3)
                    self.move_robot.movel([0, -300, 300, 0, np.pi, 0], vel=0.8)
                    approach_center = center + 200 * normal_vector
                    pose_approach = np.concatenate((approach_center, rotvec))
                    self.move_robot.movel(pose_approach)
                    pose_pick = np.concatenate(
                        (center - 14 * normal_vector, rotvec))
                    self.move_robot.close_gripper(40)
                    self.move_robot.movel(pose_pick, vel=0.1)
                    gripper_close_distance = 0
                    self.move_robot.close_gripper(gripper_close_distance,
                                                  speed=0.5,
                                                  lock=True)
                    self.move_robot.movel2([center[0], center[1], 100], rotvec)
                    if not self.has_object_between_fingers(
                            15 / 1000.0):  # 18 is part width
                        self.unsuccessful_grip_counter += 1
                        print(
                            f"dropped part, attempt {self.unsuccessful_grip_counter}/3"
                        )
                        success = False
                        self.move_robot.open_gripper()
                        self.move_robot.movel(
                            [center[0], center[1], 300, 0, 3.14, 0], vel=0.8)
                        self.move_robot.move_out_of_view()
                        self.capture_images()
                        self.masks = self.vision.segment(self.reference_image)
                    else:
                        self.move_robot.movel(
                            [center[0], center[1], 200, 0, np.pi, 0], vel=0.8)
                        #self.move_robot.movel([200, -200, 50, 0, np.pi, 0])
                        self.move_to_drop(mask["part"])
                        self.move_robot.open_gripper()
                        self.move_robot.move_to_home_gripper(speed=3)
                        #self.move_robot.movel([200, -200, 200, 0, np.pi, 0])
                        print("success")
                        success = True
        picked_part = None
        if not success:
            if part == "any":
                part = input(
                    "manually pick any part and enter the picked part")
            else:
                input(f"manually pick {part} and press enter")
            picked_part = part
        else:
            picked_part = mask["part"]
        return picked_part

    def find_best_mask_by_part(self, part, masks, debug=False):
        #first cull images by some basic criteria
        for index, mask in enumerate(masks):
            if mask["area"] < self.area_threshold:
                mask["ignored"] = True
                mask["ignore_reason"] += "too small, "
            if mask["score"] < self.score_threshold:
                mask["ignored"] = True
                mask["ignore_reason"] += "low confidence, "
            x, y = mask["center"]
            box_location, box_mask = self.box_detector.find_box(
                self.reference_image, get_mask=True)
            if box_mask[y, x] == 0:
                mask["ignored"] = True
                mask["ignore_reason"] += "outside box, "

            if mask["part"] != PartCategoryEnum.PCB.value and (
                    mask["part"] == part
                    or part == "any") and not mask["ignored"]:
                center, rotvec, normal_vector, relative_angle_to_z, short_vector_2d = self.surface_normals.get_gripper_orientation(
                    mask["mask"],
                    self.depth_image,
                    self.reference_image,
                    0,
                    debug=debug)
                mask_center = np.asarray(mask["center"], dtype=np.int32)
                pil_depth = pimg.fromarray(self.depth_image)
                pil_depth = pil_depth.resize((1920, 1080))
                np_rescaled_depth_image = np.asarray(pil_depth)
                points_checking = []
                points_on_mask = []
                is_valid, mask_point, points_checked = self.check_for_valid_gripper_point(
                    mask_center, mask, short_vector_2d,
                    np_rescaled_depth_image)
                points_checking.extend(points_checked)
                points_on_mask.append(mask_point)

                if not mask["ignored"]:  # only check if first side is clear
                    is_valid, mask_point, points_checked = self.check_for_valid_gripper_point(
                        mask_center, mask, -short_vector_2d,
                        np_rescaled_depth_image)
                    points_checking.extend(points_checked)
                    points_on_mask.append(mask_point)

                if len(points_checking) > 0 and debug:
                    rgb_img = self.reference_image.copy()
                    print('rgb_img', rgb_img.shape)
                    for point in points_checking:
                        cv2.circle(rgb_img, tuple(point), 2, (0, 0, 255), -1)
                    for point in points_on_mask:
                        cv2.circle(rgb_img, tuple(point), 2, (255, 0, 0), -1)
                    cv2.imshow('points being checked', rgb_img)
                    cv2.waitKey(0)

        #next, find largest mask of matching part type
        highest_index = -1
        highest_area = -1
        for index, mask in enumerate(masks):
            if (mask["part"] == part or part == "any"
                ) and mask["area"] > highest_area and mask["ignored"] == False:
                part_position_details = self.surface_normals.vector_normal(
                    mask["mask"],
                    self.depth_image,
                    self.reference_image,
                    rotation_around_self_z=0.78
                )  #0.78=down right, clockwise positive
                _, _, _, angle_to_z, _ = self.surface_normals.get_gripper_orientation(
                    mask["mask"],
                    self.depth_image,
                    self.reference_image,
                    debug=debug)
                if part_position_details[
                        3] > 0.8:  # check how flat it is (relative angle to reference z)
                    mask["ignored"] = True
                    mask["ignore_reason"] += "not flat enough, "
                elif part_position_details[
                        4] == True:  # normal vector intersects box
                    mask["ignored"] = True
                    mask["ignore_reason"] += "normal vector intersects box, "
                elif angle_to_z > 0.8:
                    mask["ignored"] = True
                    mask["ignore_reason"] += "not flat enough (gripper calc), "
                else:
                    self.part_position_details = part_position_details
                    highest_index = index
                    highest_area = mask["area"]

        if highest_index == -1:  #if none are acceptable
            return False
        else:
            return masks[highest_index]

    def move_to_drop(
        self, part
    ):  #black cover and white cover can cause issues with the box (cause of their location), so for them we approach a bit above and then move down before dropping
        if part == "BlackCover":
            pose = [
                self.move_robot.black_cover_drop[0],
                self.move_robot.black_cover_drop[1],
                self.move_robot.black_cover_drop[2] + 50,
                self.move_robot.black_cover_drop[3],
                self.move_robot.black_cover_drop[4],
                self.move_robot.black_cover_drop[5]
            ]
            self.move_robot.movel(pose, vel=0.8)
            self.move_robot.movel(self.move_robot.black_cover_drop)
        elif part == "BlueCover":
            self.move_robot.movel(self.move_robot.blue_cover_drop, vel=0.8)
        elif part == "WhiteCover":
            pose = [
                self.move_robot.white_cover_drop[0],
                self.move_robot.white_cover_drop[1],
                self.move_robot.white_cover_drop[2] + 50,
                self.move_robot.white_cover_drop[3],
                self.move_robot.white_cover_drop[4],
                self.move_robot.white_cover_drop[5]
            ]
            self.move_robot.movel(pose, vel=0.8)
            self.move_robot.movel(self.move_robot.white_cover_drop)
        elif part == "BottomCover":
            self.move_robot.movel(self.move_robot.bottom_cover_drop, vel=0.8)
        elif part == "PCB":
            self.move_robot.movel(self.move_robot.pcb_drop, vel=0.8)

    def capture_images(self):
        self.move_robot.move_out_of_view(speed=3)
        self.reference_image = self.camera.get_image()
        self.depth_image = self.camera.get_depth()

    def move_box(self):
        grasp_location, angle = self.box_detector.box_grasp_location(
            self.reference_image)
        self.move_robot.set_tcp(self.move_robot.gripper_tcp)
        self.move_robot.move_to_home_gripper()
        rot = Rotation.from_euler("XYZ", [0, 3.14, 2.35 - angle])
        rot = rot.as_rotvec()
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] + 20,
            rot[0], rot[1], rot[2]
        ],
                              vel=0.5)
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] - 80,
            rot[0], rot[1], rot[2]
        ])
        self.move_robot.grasp_box()
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] - 10,
            rot[0], rot[1], rot[2]
        ])
        self.move_robot.movel([
            grasp_location[0] + 200, grasp_location[1] + 200,
            grasp_location[2] - 10, rot[0], rot[1], rot[2]
        ],
                              vel=2)
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] - 10,
            rot[0], rot[1], rot[2]
        ],
                              vel=2)
        self.move_robot.movel2([
            grasp_location[0] - 200, grasp_location[1] + 200,
            grasp_location[2] - 10
        ],
                               rot,
                               vel=2)
        self.move_robot.movel2(
            [grasp_location[0], grasp_location[1], grasp_location[2] - 10],
            rot,
            vel=2)
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] - 70,
            rot[0], rot[1], rot[2]
        ])
        self.move_robot.open_gripper()
        self.move_robot.movel([
            grasp_location[0], grasp_location[1], grasp_location[2] + 40,
            rot[0], rot[1], rot[2]
        ])
        #print(grasp_location)

    def choose_action(self):
        print("Please write a command (write 'help' for a list of commands):")
        command = input()
        if command == "help":
            print(
                "Possible commands are: \nblack: assemble phone with black cover \nwhite: assemble phone with white cover \n"
                +
                "blue: assemble phone with blue cover \nzero: put the robot in zero position \nquit: close the program"
            )
        elif command == "black":
            self.main_flow(PartEnum.BLACKCOVER.value)
        elif command == "white":
            self.main_flow(PartEnum.WHITECOVER.value)
        elif command == "blue":
            self.main_flow(PartEnum.BLUECOVER.value)
        elif command == "zero":
            self.move_robot.move_out_of_view()
        elif command == "quit":
            return True
        else:
            print("Invalid command, please try again")
        return False

    def has_object_between_fingers(self, distance_threshold):
        print(
            f'distance between fingers: {self.move_robot.get_gripper_distance()}, threshold: {distance_threshold + 0.00005}'
        )
        return self.move_robot.get_gripper_distance(
        ) >= distance_threshold + 0.00005  # with small sigma for floating point errors

    def check_for_valid_gripper_point(self,
                                      mask_center,
                                      mask,
                                      direction_to_check,
                                      np_scaled_depth_image,
                                      depth_margin=4):
        points_to_check = []
        point_on_mask = None
        is_valid_grasp = True
        for i in range(30, 200):
            point_to_check = np.array(np.round(mask_center +
                                               direction_to_check * i),
                                      dtype=np.int32)
            if mask["mask"][point_to_check[1], point_to_check[0]] == 0:

                point_on_mask = np.array(np.round(point_to_check -
                                                  direction_to_check * 5),
                                         dtype=np.int32)
                point_on_mask_z = self.surface_normals.get_z(
                    point_on_mask[0], point_on_mask[1], np_scaled_depth_image)
                offset_point = np.array(np.round(point_to_check +
                                                 direction_to_check * 16),
                                        dtype=np.int32)
                rotated_direction_to_check = np.array(
                    [-direction_to_check[1], direction_to_check[0]])

                points_to_check = [
                    offset_point,
                    np.array(np.round(offset_point +
                                      rotated_direction_to_check * 15),
                             dtype=np.int32),
                    np.array(np.round(offset_point -
                                      rotated_direction_to_check * 15),
                             dtype=np.int32),
                    np.array(np.round(offset_point +
                                      rotated_direction_to_check * 30),
                             dtype=np.int32),
                    np.array(np.round(offset_point -
                                      rotated_direction_to_check * 30),
                             dtype=np.int32)
                ]

                offset_point_2 = np.array(np.round(offset_point +
                                                   direction_to_check * 16),
                                          dtype=np.int32)

                points_to_check.extend([
                    offset_point_2,
                    np.array(np.round(offset_point_2 +
                                      rotated_direction_to_check * 15),
                             dtype=np.int32),
                    np.array(np.round(offset_point_2 -
                                      rotated_direction_to_check * 15),
                             dtype=np.int32),
                    np.array(np.round(offset_point_2 +
                                      rotated_direction_to_check * 30),
                             dtype=np.int32),
                    np.array(np.round(offset_point_2 -
                                      rotated_direction_to_check * 30),
                             dtype=np.int32)
                ])

                z_points = []

                for point in points_to_check:
                    z_points.append(
                        self.surface_normals.get_z(point[0], point[1],
                                                   np_scaled_depth_image))

                for z_point in z_points:
                    diff = point_on_mask_z - z_point
                    if diff <= depth_margin:
                        print(
                            f'z difference is less than {depth_margin} mm for one or more points. diff: {diff}, on mask: {point_on_mask_z}, offset: {z_point}'
                        )
                        mask["ignored"] = True
                        mask[
                            "ignore_reason"] += "not enough space for fingers, "
                        is_valid_grasp = False
                        break

                break
        return is_valid_grasp, point_on_mask, points_to_check
コード例 #17
0
from vision.vision import Vision
import cProfile

vis = Vision(False)
feats = vis.get_feat()
cProfile.run('vis.get_feat()')
コード例 #18
0
class VisionWrapper:
    """
    Class that handles vision

    """

    def __init__(self, pitch, color, our_side, video_port=0):
        """
        Entry point for the SDP system.

        Params:
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] colour                 The colour of our teams plates
            [string] our_side               the side we're on - 'left' or 'right'
            [int] video_port                port number for the camera
        Fields
            pitch
            camera
            calibration
            vision
            postporcessing
            color
            side
            preprocessing
            model_positions
            regular_positions
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=pitch)
        self.frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(self.frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        self.vision = Vision(
            pitch=pitch, color=color, our_side=our_side,
            frame_shape=self.frame.shape, frame_center=center_point,
            calibration=self.calibration)

        # Set up preprocessing and postprocessing
        self.postprocessing = Postprocessing()
        self.preprocessing = Preprocessing()

        self.color = color
        self.side = our_side

        self.frameQueue = []


    def update(self):
        """
        Gets this frame's positions from the vision system.
        """

        self.frame = self.camera.get_frame()

        # Apply preprocessing methods toggled in the UI
        self.preprocessed = self.preprocessing.run(self.frame, self.preprocessing.options)
        self.frame = self.preprocessed['frame']
        if 'background_sub' in self.preprocessed:
            cv2.imshow('bg sub', self.preprocessed['background_sub'])

        # Find object positions
        # model_positions have their y coordinate inverted
        self.model_positions, self.regular_positions = self.vision.locate(self.frame)
        self.model_positions = self.postprocessing.analyze(self.model_positions)

        #self.model_positions = self.averagePositions(3, self.model_positions)

    def averagePositions(self, frames, positions_in):
        """
        :param frames: number of frames to average
        :param positions_in: positions for the current frame
        :return: averaged positions
        """

        validFrames = self.frameQueue.__len__() + 1

        positions_out = deepcopy(positions_in)
        # Check that the incoming positions have legal values
        for obj in positions_out.items():
            if (positions_out[obj[0]].velocity is None):
                positions_out[obj[0]].velocity = 0
            if positions_out[obj[0]].x is None:
                positions_out[obj[0]].x = 0
            if positions_out[obj[0]].y is None:
                positions_out[obj[0]].y = 0
            positions_out[obj[0]].angle = positions_in[obj[0]].angle

        # Loop over queue
        for positions in self.frameQueue:
            # Loop over each object in the position dictionary
            isFrameValid = True
            for obj in positions.items():
                # Check if the current object's positions have legal values
                if (obj[1].x is None) or (obj[1].y is None) or (obj[1].angle is None) or (obj[1].velocity is None):
                    isFrameValid = False
                else:
                    positions_out[obj[0]].x += obj[1].x
                    positions_out[obj[0]].y += obj[1].y
                    positions_out[obj[0]].velocity += obj[1].velocity

            if not isFrameValid and validFrames > 1:
                #validFrames -= 1
                pass

        # Loop over each object in the position dictionary and average the values
        for obj in positions_out.items():
            positions_out[obj[0]].velocity /= validFrames
            positions_out[obj[0]].x /= validFrames
            positions_out[obj[0]].y /= validFrames


        # If frameQueue is already full then pop the top entry off
        if self.frameQueue.__len__() >= frames:
            self.frameQueue.pop(0)

        # Add our new positions to the end
        self.frameQueue.append(positions_in)

        return positions_out


    def saveCalibrations(self):
        tools.save_colors(self.vision.pitch, self.calibration)
コード例 #19
0
ファイル: controller.py プロジェクト: ChrisJamesCampbell/SDP
	def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyACM0', comms=1):
		"""
		Entry point for the SDP system.

		Params:
			[int] video_port                port number for the camera
			[string] comm_port              port number for the arduino
			[int] pitch                     0 - main pitch, 1 - secondary pitch
			[string] our_side               the side we're on - 'left' or 'right'
		
			*[int] port                     The camera port to take the feed from
			*[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
											power wire
			*[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
											power wire
		"""
		assert pitch in [0, 1]
		assert color in ['yellow', 'blue']
		assert our_side in ['left', 'right']
		

		self.pitch = pitch


		# Set up the Arduino communications
		self.arduino = arduinoComm.Communication("/dev/ttyACM0", 9600)
		time.sleep(0.5)
		self.arduino.grabberUp()
		self.arduino.grab()
		

		# Set up camera for frames
		self.camera = Camera(port=video_port, pitch=self.pitch)
		frame = self.camera.get_frame()
		center_point = self.camera.get_adjusted_center(frame)

		# Set up vision
		self.calibration = tools.get_colors(pitch)
		self.vision = Vision(
			pitch=pitch, color=color, our_side=our_side,
			frame_shape=frame.shape, frame_center=center_point,
			calibration=self.calibration)

		# Set up postprocessing for vision
		self.postprocessing = Postprocessing()

		# Set up GUI
		self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

		# Set up main planner
		self.planner = Planner(our_side=our_side, pitch_num=self.pitch, our_color=color, gui=self.GUI, comm = self.arduino)

		self.color = color
		self.side = our_side

		self.timeOfNextAction = 0

		self.preprocessing = Preprocessing()
	#it doesn't matter whether it is an Attacker or a Defender Controller
		self.controller = Attacker_Controller(self.planner._world, self.GUI)


		self.robot_action_list = []
コード例 #20
0
ファイル: controller.py プロジェクト: ChrisJamesCampbell/SDP
class Controller:
	"""
	Primary source of robot control. Ties vision and planning together.
	"""

	def __init__(self, pitch, color, our_side, video_port=0, comm_port='/dev/ttyACM0', comms=1):
		"""
		Entry point for the SDP system.

		Params:
			[int] video_port                port number for the camera
			[string] comm_port              port number for the arduino
			[int] pitch                     0 - main pitch, 1 - secondary pitch
			[string] our_side               the side we're on - 'left' or 'right'
		
			*[int] port                     The camera port to take the feed from
			*[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
											power wire
			*[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
											power wire
		"""
		assert pitch in [0, 1]
		assert color in ['yellow', 'blue']
		assert our_side in ['left', 'right']
		

		self.pitch = pitch


		# Set up the Arduino communications
		self.arduino = arduinoComm.Communication("/dev/ttyACM0", 9600)
		time.sleep(0.5)
		self.arduino.grabberUp()
		self.arduino.grab()
		

		# Set up camera for frames
		self.camera = Camera(port=video_port, pitch=self.pitch)
		frame = self.camera.get_frame()
		center_point = self.camera.get_adjusted_center(frame)

		# Set up vision
		self.calibration = tools.get_colors(pitch)
		self.vision = Vision(
			pitch=pitch, color=color, our_side=our_side,
			frame_shape=frame.shape, frame_center=center_point,
			calibration=self.calibration)

		# Set up postprocessing for vision
		self.postprocessing = Postprocessing()

		# Set up GUI
		self.GUI = GUI(calibration=self.calibration, arduino=self.arduino, pitch=self.pitch)

		# Set up main planner
		self.planner = Planner(our_side=our_side, pitch_num=self.pitch, our_color=color, gui=self.GUI, comm = self.arduino)

		self.color = color
		self.side = our_side

		self.timeOfNextAction = 0

		self.preprocessing = Preprocessing()
	#it doesn't matter whether it is an Attacker or a Defender Controller
		self.controller = Attacker_Controller(self.planner._world, self.GUI)


		self.robot_action_list = []




	def wow(self):
		"""
		Ready your sword, here be dragons.
		"""
		counter = 1L
		timer = time.clock()
		try:
			c = True
			while c != 27:  # the ESC key

				frame = self.camera.get_frame()
				pre_options = self.preprocessing.options
				# Apply preprocessing methods toggled in the UI
				preprocessed = self.preprocessing.run(frame, pre_options)
				frame = preprocessed['frame']
				if 'background_sub' in preprocessed:
					cv2.imshow('bg sub', preprocessed['background_sub'])
				# Find object positions
				# model_positions have their y coordinate inverted

				model_positions, regular_positions = self.vision.locate(frame)
				model_positions = self.postprocessing.analyze(model_positions)

				# Find appropriate action
				self.planner.update_world(model_positions)
				
				if time.time() >= self.timeOfNextAction:
					if self.robot_action_list == []:
						plan = self.planner.plan()

						if isinstance(plan, list):
							self.robot_action_list = plan
						else:
							self.robot_action_list = [(plan, 0)]
				
					if self.controller is not None:
						self.controller.execute(self.arduino, self)
	   

				# Information about the grabbers from the world
				grabbers = {
					'our_defender': self.planner._world.our_defender.catcher_area,
					'our_attacker': self.planner._world.our_attacker.catcher_area
				}

				# Information about states
				robotState = 'test'
			   

				# Use 'y', 'b', 'r' to change color.
				c = waitKey(2) & 0xFF
				actions = []
				fps = float(counter) / (time.clock() - timer)
				# Draw vision content and actions

				self.GUI.draw(
					frame, model_positions, actions, regular_positions, fps, robotState,
				   "we dont need it", '', "we dont need it", grabbers,
					our_color='blue', our_side=self.side, key=c, preprocess=pre_options)
				counter += 1

				if c == ord('a'):
					self.arduino.grabberUp()
					self.arduino.grab()

		except:
			if self.controller is not None:
				self.controller.shutdown(self.arduino)
			raise

		finally:
			# Write the new calibrations to a file.
			tools.save_colors(self.pitch, self.calibration)
			if self.controller is not None:
				self.controller.shutdown(self.arduino)
コード例 #21
0
ファイル: main.py プロジェクト: Luddzik/sdp_12
    def __init__(self,
                 pitch,
                 color,
                 our_side,
                 video_port=0,
                 comm_port='/dev/ttyACM0',
                 comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch
        # Set up communications if thre are any
        try:
            self.robotComs = RobotCommunications(debug=False)
        except:
            print("arduino unplugged moving on to vision")

        # Set up robot communications to bet sent to planner.
        if self.USE_REAL_ROBOT:
            try:
                self.robotCom = RobotCommunications(debug=False)
            except:
                self.robotCom = TestCommunications(debug=True)
                print 'Not connected to the radio, using TestCommunications instead.'
        else:
            self.robotCom = TestCommunications(debug=True)

        # Set up main planner
        if(self.robotCom is not None):
            # currently we are assuming we are the defender
            self.planner = Planner(our_side=our_side,
                                   pitch_num=self.pitch,
                                   robotCom=self.robotCom,
                                   robotType='defender')

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        print self.calibration
        self.vision = Vision(
            pitch=pitch,
            color=color,
            our_side=our_side,
            frame_shape=frame.shape,
            frame_center=center_point,
            calibration=self.calibration)

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()
コード例 #22
0
ファイル: simulator.py プロジェクト: sdp-2011/sdp-9
class Simulator(object):

    objects = []
    robots = []
    images = {}

    # Options and arguments
    headless = False
    vision = None
    pitch = None
    ai = []
    robot1 = None
    robot2 = None

    def __init__(self, **kwargs):
        logging.debug("Simulator started with the arguments:")
        for k, v in kwargs.items():
            self.__setattr__(k, v)
            logging.debug("\t%s = %s", k, v)

    def initVision(self):
        if self.vision:
            logging.info("Starting simulator vision bridge")
            world = self.world
            if not self.real_world:
                # Vision must always use the real World
                world = common.world.World(self.colour)
            self.vision = Vision(simulator=self, world=world)
            self.visionFile = tempfile.mktemp(suffix='.bmp')

    def drawEnts(self):
        if self.pitch:
            bg = self.pitch.get()
            self.screen.blit(bg, (0, 0))
        self.sprites.draw(self.screen)

        if self.vision:
            pygame.image.save(self.screen, self.visionFile)
            self.vision.processFrame()
        # Update overlay after we've passed the "raw image" to vision
        self.screen.blit(self.overlay, (0, 0))
        # Make the overlay "blank" again. Should be completely
        # transparent but I don't know how to do that
        self.overlay.fill((130, 130, 130, 255))
        if not self.headless:
            pygame.display.flip()

    def initScreen(self):
        logging.debug("Creating simulator screen")
        if self.headless:
            self.screen = pygame.Surface(World.Resolution)
        else:
            pygame.display.set_mode(World.Resolution)
            pygame.display.set_caption('SDP 9 Simulator')
            self.screen = pygame.display.get_surface()
            self.overlay = pygame.Surface(World.Resolution)
            self.overlay.convert_alpha()
            self.overlay.set_alpha(100)

    def makeObjects(self):
        logging.debug("Creating game objects")

        colours = ('blue', 'yellow')
        if random() < 0.5:
            col1, col2 = colours
        else:
            col2, col1 = colours

        logging.info("Robot 1 is %s. Robot 2 is %s", col1, col2)
        self.makeRobot(World.LeftStartPos, col1, 0, self.robot1[0])
        self.makeRobot(World.RightStartPos, col2, -pi, self.robot2[0])

        # Only make a real ball when there are two simulated robots
        if len(self.robots) == 2:
            pos = World.Pitch.center
            logging.info("Creating a ball at %s", pos2string(pos))
            self.makeBall(pos)
        else:
            logging.info("Not making a simulated ball with real robots")

        self.sprites = pygame.sprite.RenderPlain(self.objects)

    def setRobotAI(self, robot, ai):
        # TODO: just delete the image and reclassify the robot as a
        # real robot
        #del robot
        pass

    def initAI(self):
        logging.debug("Initialising AI")

        ai1, real1 = self.robot1
        ai2, real2 = self.robot2

        if ai1 and real1:
            real_interface = RealRobotInterface()
            #meta_interface = MetaInterface(real_interface, self.robots[0])
            ai = ai1(self.world, real_interface)
            self.ai.append(ai)
            robotSprite = self.robots[0]
            #self.robots[0] = ai
            #del robotSprite
            self.setRobotAI(self.robots[0], ai)
            logging.debug("AI 1 started in the real world")
        elif ai1:
            self.ai.append(ai1(self.world, self.robots[0]))
            logging.debug("AI 1 started in the simulated world")
        else:
            logging.debug("No AI 1 present")

        if ai2 and real2:
            # TODO: reverse sides here
            ai = ai2(self.world, RealRobotInterface())
            self.ai.append(ai)
            robotSprite = self.robots[0]
            self.robots[1] = ai
            #del robotSprite
            self.setRobotAI(self.robots[1], ai)
            logging.debug("AI 2 started in the real world")
        elif ai2:
            self.ai.append(ai2(self.world, self.robots[1]))
            logging.debug("AI 2 started in the simulated world")
        else:
            logging.debug("No AI 2 present")

    def runAI(self):
        #logging.debug("Running AI players")
        for ai in self.ai:
            ai.run()

    def run(self):
        pygame.init()
        self.clock = pygame.time.Clock()
        self.initVision()
        self.initScreen()
        self.loadImages()
        self.makeObjects()
        self.world.assignSides()
        self.initAI()
        self.initInput()
        # by initialising the input after the AI, we can control even
        # AI robots with keyboard
        self.drawEnts()

        while True:
            self.handleInput()
            self.clock.tick(25)
            self.drawEnts()
            self.sprites.update()
            self.runAI()

    def initInput(self):
        self.input = Input(self, self.robots[0], self.robots[1])

    def handleInput(self):
        for event in pygame.event.get():
            if event.type == QUIT:
                sys.exit(0)
            else:
                #logging.debug("Got input event: %s", event)
                self.input.robotInput(event)

    def makeRobot(self, pos, colour, angle, ai):
        ent = Robot(self, pos, self.images[colour], angle)
        ent.side = colour

        self.world.ents[colour] = ent
        self.robots.append(ent)
        self.addEnt(ent)

    def makeBall(self, pos):
        ent = Ball(self, pos, self.images['ball'])
        #ent.v += [1, 7]
        self.world.ents['ball'] = ent
        self.addEnt(ent)

    def addEnt(self, ent):
        self.objects.append(ent)

    def loadImages(self):
        logging.debug("Loading images")
        for name in World.image_names.keys():
            self.images[name] = pygame.image.load(World.image_names[name])
コード例 #23
0
from vision.vision import Vision, Feature
import cv
import cv2
import time
vis = Vision(True)
time_start = time.time()
while time.time() - time_start < 10:
    vis.detectObjects(Feature.Ball)
    cv2.waitKey(10)
コード例 #24
0
ファイル: main3.py プロジェクト: mvernacc/maslab-staff-2013
def main(argv):
    # side = "red"

    # try:
    #     opts, args = getopt.getopt(argv,'', ['color='])
    # except getopt.GetoptError:
    #     print "usage: 'main.py --color=<c>', where <c> is 'red' or 'green'"
    # for opt, arg in opts:
    #     if opt == '--color':
    #         if arg == 'green':
    #             side = "green"
    #             print 'looking for green balls'
    #         elif arg == 'red':
    #             print 'looking for red balls'

    ardu = arduino.Arduino()
    motor_left = arduino.Motor(ardu, 0, 9, 8) # current pin, direction pin, pwm pin
    motor_right = arduino.Motor(ardu, 0, 7, 6)
    motor_pickup = arduino.Motor(ardu, 0, 12, 11)
    bump_right = arduino.DigitalInput(ardu, 22)
    bump_left = arduino.DigitalInput(ardu, 23)
    ardu.run()

    vis = Vision(True)
    end_now = False

    start_code = bump_left.getValue() - bump_right.getValue()
    while start_code == 0:
        start_code = bump_left.getValue() - bump_right.getValue()
    if start_code > 0:
        side = "green"
        print "green"
    else:
        side = "red"
        print "red"
    cv2.waitKey(1000)

    motor_pickup.setSpeed(60)

    start_time = time.time()
    try:
        while not end_now:
            time_elapsed = time.time() - start_time
            if bump_right.getValue():
                print 'right bump'
                motor_right.setSpeed(-200)
                motor_left.setSpeed(-50)
                cv2.waitKey(1000)
                motor_right.setSpeed(-100)
                motor_left.setSpeed(100)
                cv2.waitKey(500)
                motor_right.setSpeed(0)
                motor_left.setSpeed(0)
            if bump_left.getValue():
                print 'left bump'
                motor_right.setSpeed(-50)
                motor_left.setSpeed(-200)
                cv2.waitKey(1000)
                motor_right.setSpeed(100)
                motor_left.setSpeed(-100)
                cv2.waitKey(500)
                motor_right.setSpeed(0)
                motor_left.setSpeed(0)
            vis.grab_frame()
            if time_elapsed < 120:
                feat_pos = vis.detect_balls(side)
            else:
                feat_pos = vis.detect_walls()
            if feat_pos != None:
                angle = int(feat_pos)
                print angle 
                motor_right.setSpeed(60 - angle)
                motor_left.setSpeed(60 + angle)
            else:
                print "No ball"
                motor_right.setSpeed(-40)
                motor_left.setSpeed(40)
            cv2.waitKey(20)
            if time_elapsed > 170:
                end_now = True
                break
    except KeyboardInterrupt:
        print "ending..."
    time.sleep(0.1)
    motor_right.setSpeed(0)
    motor_left.setSpeed(0)
    motor_pickup.setSpeed(0)
    time.sleep(0.1)
    ardu.stop()
コード例 #25
0
ファイル: simulator.py プロジェクト: eliask/SDP2011-Robotniks
class Simulator(object):

    objects=[]
    robots=[]
    images={}

    # Options and arguments
    headless=False
    vision=None
    pitch=None
    ai=[]
    robot1=None
    robot2=None

    def __init__(self, **kwargs):
        logging.debug("Simulator started with the arguments:")
        for k, v in kwargs.items():
            self.__setattr__(k, v)
            logging.debug("\t%s = %s", k, v)

    def initVision(self):
        if self.vision:
            logging.info("Starting simulator vision bridge")
            world = self.world
            if not self.real_world:
                # Vision must always use the real World
                world = common.world.World(self.colour)
            self.vision = Vision(simulator=self, world=world)
            self.visionFile = tempfile.mktemp(suffix='.bmp')

    def drawEnts(self):
        if self.pitch:
            bg = self.pitch.get()
            self.screen.blit(bg, (0,0))
        self.sprites.draw(self.screen)

        if self.vision:
            pygame.image.save(self.screen, self.visionFile)
            self.vision.processFrame()
        # Update overlay after we've passed the "raw image" to vision
        self.screen.blit(self.overlay, (0,0))
        # Make the overlay "blank" again. Should be completely
        # transparent but I don't know how to do that
        self.overlay.fill( (130,130,130,255))
        if not self.headless:
            pygame.display.flip()

    def initScreen(self):
        logging.debug("Creating simulator screen")
        if self.headless:
            self.screen = pygame.Surface(World.Resolution)
        else:
            pygame.display.set_mode(World.Resolution)
            pygame.display.set_caption('SDP 9 Simulator')
            self.screen = pygame.display.get_surface()
            self.overlay = pygame.Surface(World.Resolution)
            self.overlay.convert_alpha()
            self.overlay.set_alpha(100)

    def makeObjects(self):
        logging.debug("Creating game objects")

        colours = ('blue', 'yellow')
        if random() < 0.5:
            col1, col2 = colours
        else:
            col2, col1 = colours

        logging.info("Robot 1 is %s. Robot 2 is %s", col1, col2)
        self.makeRobot(World.LeftStartPos, col1, 0, self.robot1[0])
        self.makeRobot(World.RightStartPos, col2, -pi, self.robot2[0])

        # Only make a real ball when there are two simulated robots
        if len(self.robots) == 2:
            pos = World.Pitch.center
            logging.info("Creating a ball at %s", pos2string(pos))
            self.makeBall(pos)
        else:
            logging.info("Not making a simulated ball with real robots")

        self.sprites = pygame.sprite.RenderPlain(self.objects)

    def setRobotAI(self, robot, ai):
        # TODO: just delete the image and reclassify the robot as a
        # real robot
        #del robot
        pass

    def initAI(self):
        logging.debug("Initialising AI")

        ai1, real1 = self.robot1
        ai2, real2 = self.robot2

        if ai1 and real1:
            real_interface = RealRobotInterface()
            #meta_interface = MetaInterface(real_interface, self.robots[0])
            ai = ai1(self.world, real_interface)
            self.ai.append(ai)
            robotSprite = self.robots[0]
            #self.robots[0] = ai
            #del robotSprite
            self.setRobotAI(self.robots[0], ai)
            logging.debug("AI 1 started in the real world")
        elif ai1:
            self.ai.append( ai1(self.world, self.robots[0]) )
            logging.debug("AI 1 started in the simulated world")
        else:
            logging.debug("No AI 1 present")

        if ai2 and real2:
            # TODO: reverse sides here
            ai = ai2(self.world, RealRobotInterface())
            self.ai.append(ai)
            robotSprite = self.robots[0]
            self.robots[1] = ai
            #del robotSprite
            self.setRobotAI(self.robots[1], ai)
            logging.debug("AI 2 started in the real world")
        elif ai2:
            self.ai.append( ai2(self.world, self.robots[1]) )
            logging.debug("AI 2 started in the simulated world")
        else:
            logging.debug("No AI 2 present")

    def runAI(self):
        #logging.debug("Running AI players")
        for ai in self.ai:
            ai.run()

    def run(self):
        pygame.init()
        self.clock = pygame.time.Clock()
        self.initVision()
        self.initScreen()
        self.loadImages()
        self.makeObjects()
        self.world.assignSides()
        self.initAI()
        self.initInput()
        # by initialising the input after the AI, we can control even
        # AI robots with keyboard
        self.drawEnts()

        while True:
            self.handleInput()
            self.clock.tick(25)
            self.drawEnts()
            self.sprites.update()
            self.runAI()

    def initInput(self):
        self.input = Input(self, self.robots[0], self.robots[1])

    def handleInput(self):
        for event in pygame.event.get():
            if event.type == QUIT:
                sys.exit(0)
            else:
                #logging.debug("Got input event: %s", event)
                self.input.robotInput(event)

    def makeRobot(self, pos, colour, angle, ai):
        ent = Robot(self, pos, self.images[colour], angle)
        ent.side = colour

        self.world.ents[colour] = ent
        self.robots.append(ent)
        self.addEnt(ent)

    def makeBall(self, pos):
        ent = Ball(self, pos, self.images['ball'])
        #ent.v += [1, 7]
        self.world.ents['ball'] = ent
        self.addEnt(ent)

    def addEnt(self, ent):
        self.objects.append(ent)

    def loadImages(self):
        logging.debug("Loading images")
        for name in World.image_names.keys():
            self.images[name] = pygame.image.load(World.image_names[name])
コード例 #26
0
ファイル: main.py プロジェクト: Luddzik/sdp_12
class Controller:
    """
    This class aims to be the bridge in between vision and strategy/logic
    """
    robotCom = None

    # Set to True if we want to use the real robot.
    # Set to False if we want to print out commands to console only.
    USE_REAL_ROBOT = True

    def __init__(self,
                 pitch,
                 color,
                 our_side,
                 video_port=0,
                 comm_port='/dev/ttyACM0',
                 comms=1):
        """
        Entry point for the SDP system.

        Params:
            [int] video_port                port number for the camera
            [string] comm_port              port number for the arduino
            [int] pitch                     0 - main pitch, 1 - secondary pitch
            [string] our_side               the side we're on - 'left' or 'right'
            *[int] port                     The camera port to take the feed from
            *[Robot_Controller] attacker    Robot controller object - Attacker Robot has a RED
                                            power wire
            *[Robot_Controller] defender    Robot controller object - Defender Robot has a YELLOW
                                            power wire
        """
        assert pitch in [0, 1]
        assert color in ['yellow', 'blue']
        assert our_side in ['left', 'right']

        self.pitch = pitch
        # Set up communications if thre are any
        try:
            self.robotComs = RobotCommunications(debug=False)
        except:
            print("arduino unplugged moving on to vision")

        # Set up robot communications to bet sent to planner.
        if self.USE_REAL_ROBOT:
            try:
                self.robotCom = RobotCommunications(debug=False)
            except:
                self.robotCom = TestCommunications(debug=True)
                print 'Not connected to the radio, using TestCommunications instead.'
        else:
            self.robotCom = TestCommunications(debug=True)

        # Set up main planner
        if(self.robotCom is not None):
            # currently we are assuming we are the defender
            self.planner = Planner(our_side=our_side,
                                   pitch_num=self.pitch,
                                   robotCom=self.robotCom,
                                   robotType='defender')

        # Set up camera for frames
        self.camera = Camera(port=video_port, pitch=self.pitch)
        frame = self.camera.get_frame()
        center_point = self.camera.get_adjusted_center(frame)

        # Set up vision
        self.calibration = tools.get_colors(pitch)
        print self.calibration
        self.vision = Vision(
            pitch=pitch,
            color=color,
            our_side=our_side,
            frame_shape=frame.shape,
            frame_center=center_point,
            calibration=self.calibration)

        # Set up postprocessing for vision
        self.postprocessing = Postprocessing()

        # Set up GUI
        self.GUI = GUI(calibration=self.calibration, pitch=self.pitch)

        self.color = color
        self.side = our_side

        self.preprocessing = Preprocessing()


    def main(self):
        """
        This main method  brings in to action the controller class which so far
        does nothing but set up the vision and its ouput
        """
        counter = 1L
        timer = time.clock()
        try:
            c = True
            while c != 27:  # the ESC key

                frame = self.camera.get_frame()
                pre_options = self.preprocessing.options
                # Apply preprocessing methods toggled in the UI
                preprocessed = self.preprocessing.run(frame, pre_options)
                frame = preprocessed['frame']
                if 'background_sub' in preprocessed:
                    cv2.imshow('bg sub', preprocessed['background_sub'])
                # Find object positions
                # model_positions have their y coordinate inverted

                #  IMPORTANT
                model_positions, regular_positions = self.vision.locate(frame)
                model_positions = self.postprocessing.analyze(model_positions)

                # Update planner world beliefs
                self.planner.update_world(model_positions)
                self.planner.plan()

                # Use 'y', 'b', 'r' to change color.
                c = waitKey(2) & 0xFF
                actions = []
                fps = float(counter) / (time.clock() - timer)
                # Draw vision content and actions

                self.GUI.draw(
                    frame, model_positions, actions, regular_positions, fps, None,
                    None, None, None, False,
                    our_color=self.color, our_side=self.side, key=c, preprocess=pre_options)
                counter += 1

        except:
            # This exception is stupid TODO: refactor.
            print("TODO SOMETHING CLEVER HERE")
            raise
        finally:
            tools.save_colors(self.pitch, self.calibration)
コード例 #27
0
ファイル: Runner.py プロジェクト: maccery/sdp-group-8
class Runner(object):
    def __init__(self):
        # Set up Arduino communications.
        # self.robot =
        self.calib_file = "vision/calibrations/calibrations.json"
        self.vision = Vision(self.calib_file)
        self.gui = GUI(self.vision.calibrations)
        self.task = None

        # Set up world
        self.world = World(self.calib_file)

        # Set up post-processing
        self.postprocessing = PostProcessing()

        self.wait_for_vision = True

    def run(self):
        """
       Constantly updates the vision feed, and positions of our models
       """

        counter = 0
        timer = time.clock()

        # wait 10 seconds for arduino to connect
        print("Connecting to Arduino, please wait till confirmation message")
        time.sleep(4)

        # This asks nicely for goal location, etc
        self.initiate_world()

        try:
            c = True

            while c != 27:  # the ESC key
                if self.task is None:
                    print("Please enter the task you wish to execute:")
                    self.task = sys.stdin.readline().strip()

                t2 = time.time()
                # change of time between frames in seconds
                delta_time = t2 - timer
                timer = t2

                # getting all the data from the world state
                data, modified_frame = self.vision.get_world_state()

                # update the gui
                self.gui.update(delta_time, self.vision.frame, modified_frame, data)

                # Update our world with the positions of robot and ball
                self.world.update_positions(data)

                # Only run the task every 20 cycles, this allows us to catch up with vision
                if counter % 21 == 0:
                    self.task_execution()

                key = cv2.waitKey(4) & 0xFF
                if key == ord('q'):
                    break
                    # self.save_calibrations()

                counter += 1

        finally:
            pass
            # self.robot.stop()

    def initiate_world(self):

        print("Which side are we?")
        self.world.our_side = sys.stdin.readline().strip()

        if self.world.our_side == "left":
            self.world.defender_region.left = 40 # 40, 320
            self.world.defender_region.right = 320 # 320, 600
            self.world.attacker_region.left = 320 # 40, 320
            self.world.attacker_region.right = 600 #320, 600
            self.world.our_goal.x = 40 # 600 or 40
            self.world.our_goal.y = 235
            self.world.their_goal.x = 600 # 40 or 600
            self.world.their_goal.y = 245
        else:
            self.world.defender_region.left = 320 # 40, 320
            self.world.defender_region.right = 600 # 320, 600
            self.world.attacker_region.left = 40 # 40, 320
            self.world.attacker_region.right = 320 #320, 600
            self.world.our_goal.x = 600 # 600 or 40
            self.world.our_goal.y = 235
            self.world.their_goal.x = 40 # 40 or 600
            self.world.their_goal.y = 245

        self.world.our_robot.team_color = "blue"
        self.world.teammate.team_color = "blue"
        self.world.their_attacker.team_color = "yellow"
        self.world.their_defender.team_color = "yellow"
        self.world.our_robot.group_color = "green"
        self.world.teammate.group_color = "pink"

        self.world.their_attacker.group_color = "pink"
        self.world.their_defender.group_color = "green"


        self.world.safety_padding = 25
        self.world.pitch_boundary_left = 40
        self.world.pitch_boundary_right = 600
        self.world.pitch_boundary_bottom = 450
        self.world.pitch_boundary_top = 30

    def task_execution(self):
        """
        Executes the current task
        """

        # Only execute a task if the robot isn't currently in the middle of doing one
        print ("Task: ", self.task)
        task_to_execute = None
        if self.task == 'task_vision':
            task_to_execute = self.world.task.task_vision
        if self.task == 'task_move_to_ball':
            task_to_execute = self.world.task.task_move_to_ball
        if self.task == 'task_kick_ball_in_goal':
            task_to_execute = self.world.task.task_kick_ball_in_goal
        if self.task == 'task_move_and_grab_ball':
            task_to_execute = self.world.task.task_move_and_grab_ball
        if self.task == 'task_rotate_and_grab':
            task_to_execute = self.world.task.task_rotate_and_grab
        if self.task == 'task_grab_rotate_kick':
            task_to_execute = self.world.task.task_grab_rotate_kick
        if self.task == 'task_defender':
            task_to_execute = self.world.task.task_defender
        if self.task == 'task_defender_kick_off':
            task_to_execute = self.world.task.task_defender_kick_off
        if self.task == 'task_attacker':
            task_to_execute = self.world.task.task_attacker
        if self.task == 'task_attacker_kick_off':
            task_to_execute = self.world.task.task_attacker_kick_off
        if self.task == 'task_penalty':
            task_to_execute = self.world.task.task_penalty
        if self.task == 'task_goalie':
            task_to_execute = self.world.task.task_penalty_goalie

        # if there's a task to do, let's try it
        if self.task:
            # if it's executed fine, then we've completed the task. otherwise we're going to loop round and try again
            if task_to_execute():
                self.task = None
                print("Task: COMPLETED")

    def save_calibrations(self):
        dump_calibrations(self.vision.calibrations, self.calib_file)