示例#1
0
    def login(self):
        # empty bits on bitmap, idk how they made the launcher
        # self.locator()
        # Just send input
        # Decrypt our user name and pw. IF you want to continue,
        # generate a new key for your own credentials; or remove the encryption all together.
        self.key = self.load_key()
        f = Fernet(self.key)
        pdi.press(['tab'])
        sleep(0.05)
        pdi.typewrite(f.decrypt(self.user).decode())
        sleep(0.05)
        pdi.press(['tab'])
        sleep(0.05)
        pdi.typewrite(f.decrypt(self.pw).decode())
        sleep(0.05)
        pdi.press(['enter'])
        sleep(0.05)

        # Wait for TTR
        self.wincap.wait_hwnd()
        sleep(10.5)
        pdi.press(['up'])
        sleep(4.5)
        self.wincap = WindowCapture(self.haystack_wnd)
        self.vision = Vision("targets/bear.png")
        self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y),
                          (self.wincap.w, self.wincap.h),
                          'tooltips/tooltip_bear.png')
示例#2
0
文件: main.py 项目: rbaehr/enee408i
def main():
    c1 = LeftRightController(300, 0.25)
    c2 = ForwardBackController(40, 0.2)
    command = Command(serial_port='/dev/ttyACM0')
    vision = Vision(command, c1, c2)

    vision.loop()
示例#3
0
 def __init__(self, arm):
     self.arm = arm
     print("Begginging drawing recognition game!")
     self.v = Vision()
     self.model = load_model("nn_hog.h5")
     self.categories = ('airplane', 'backpack', 'cactus', 'dog', 'ear',
                        'face', 'garden', 'hamburger', 'icecream', 'jacket')
示例#4
0
    def __init__(self, id, color, radius, x, y, angle, random_weights=False):
        """Initialize a new Fighter."""
        super().__init__()

        self.id = id
        self.color = color
        self.radius = radius
        self.angle = angle
        self.speed = FIGHTER_SPEED
        self.unit_dx, self.unit_dy = angle_to_unit_x_y(self.angle)
        self.dx, self.dy = angle_to_x_y(self.angle, self.speed)

        self.reset_state()

        # Initialize Q-table with weights
        if random_weights:
            self.Q = np.random.random((NUM_STATES, NUM_ACTIONS))
        else:
            try:
                # Initialize Q-table with weights from previous session
                self.Q = np.loadtxt(MODEL_FILE, delimiter=",")
            except FileNotFoundError:
                print("Model not found, initializing fighter", self.id, "with random weights...")
                self.Q = np.random.random((NUM_STATES, NUM_ACTIONS))

        self.weapon_radius = int(radius / 3)
        self.torso = SmoothCircle(color, radius, x, y)
        self.weapon = SmoothCircle(GRAY, self.weapon_radius,
                                   x=x+radius-self.weapon_radius, y=y+radius, bg_color=color)
        self.vision = Vision(id, self.get_center(), self.angle, GRAY, GRAY, GRAY)

        self.add(self.torso)
        self.add(self.weapon)
        self.add(self.vision)
示例#5
0
    def update(self, actions):
        """Update fighter state/action before it is drawn."""
        # execute action
        action_func_dict = {
            0: self.turn_left,
            1: self.turn_right,
            2: self.move_forward,
            3: self.shoot
        }
        for action_type, do_action in enumerate(actions.tolist()[0]):
            if do_action:
                action_func_dict[action_type]()

        # update state
        prev_state = self.state
        self.state = self.get_state(prev_state)

        # create new vision sprite with updated colors
        self.vision.kill()
        color_left = color_middle = color_right = GRAY
        if self.fighter_on_left:
            color_left = color_middle = ORANGE
        if self.fighter_on_right:
            color_right = color_middle = ORANGE
        if self.bullet_on_left:
            color_left = color_middle = RED
        if self.bullet_on_right:
            color_right = color_middle = RED
        if self.on_target:
            color_middle = GREEN

        self.vision = Vision(self.id, self.get_center(), self.angle,
                             color_left, color_middle, color_right)
        self.add(self.vision)
        self.move_to_back(self.vision)  # layered below fighter sprite
示例#6
0
class Guard(Program):
    def __init__(self):
        super().__init__()
        self.name = "Guard"

        self.vision = Vision()

    def run(self):
        super().run()

        while True:
            movement = self.vision.findMovement()

            if movement is None:
                print("No movement found")
            else:
                print("Movement found at " + str(movement[0]) + ", " +
                      str(movement[1]))

            value = input("\nQuit? y/n ")
            if value == "y":
                break

        self.quit()

    def quit(self):
        super().quit()
        self.vision.quit()
示例#7
0
    def __init__(self):
        self.np_image = None
        self.move_robot = MoveRobot(UR_IP)
        self.vision = Vision()
        self.aruco = Calibration()

        print("[I] Controller running")
示例#8
0
文件: bot.py 项目: DorskFR/poepybot
 def __init__(self, ratio=1):
     self.RUN = True
     self.TOGGLE = False
     self.vision = Vision(ratio=ratio)
     self.keyboard = Keyboard(sleep=1)
     self.KEYCODES = {1: 18, 2: 19, 3: 20, 4: 21, 5: 23}
     self.potions_box = {"top": 850, "left": 1920, "width": 500, "height": 200}
     self.menu_box = {"top": 300, "left": 1920 + 680, "width": 320, "height": 260}
     self.mouse = Mouse()
示例#9
0
 def __init__(self):
     self.vision = Vision(
         {
             "top": 850,
             "left": 1920,
             "width": 500,
             "height": 200
         },
         ratio=0.25)
示例#10
0
def test_one_car_closed_day():
    image_bytes = load_bytes("test/unit/one_car_closed_day.jpg")

    flexmock(Vision)
    Vision.should_receive("load_multipart_stream").and_return(image_bytes).once()
    Vision.should_receive("is_point_close_enough").and_return(True).once()
    is_closed, location = Vision("localhost", 8081).look_if_closed("test/unit/template.jpg")

    assert is_closed
示例#11
0
def test_one_car_closed_day():
    image_bytes = load_bytes('test/unit/one_car_closed_day.jpg')

    flexmock(Vision)
    Vision.should_receive('load_multipart_stream').and_return(
        image_bytes).once()
    Vision.should_receive('is_point_close_enough').and_return(True).once()
    is_closed, location = Vision('localhost',
                                 8081).look_if_closed('test/unit/template.jpg')

    assert is_closed
示例#12
0
def main():
    parser = argparse.ArgumentParser(description='MSE430 robot server '
                                     'for CS 470')
    parser.add_argument('robot', help='Name or number of the robot to control')
    parser.add_argument('--port', type=int)

    Vision.cli_arguments(parser)
    Robot.cli_arguments(parser)

    args = parser.parse_args()

    Server(**vars(args)).run()
示例#13
0
    def __init__(
        self,
        symbol1,
        symbol2,
        contract1,
        contract2,
        port1=6001,
        port2=6002,
        history_period=400,
        history_longer=100,
        std_bands=0.5,
    ):

        self.symbol1 = symbol1
        self.symbol2 = symbol2
        self.contract1 = contract1
        self.contract2 = contract2

        self.PORT1 = port1
        self.PORT2 = port2
        self.HISTORY_PERIOD = history_period
        self.HISTORY_LONGER = history_longer
        self.STD_BANDS = std_bands

        # to: 主要的条件变量
        self.unit_band_up = None
        self.unit_band_down = None
        self.condition_evaluate_timestamp = None

        # 记录vision的历史数据是否加载完毕
        # 如果加载完毕,结果应该为两个True
        self.vision_history_collect_complete = []

        self.ultron_started = False
        self.ultron_is_working = False
        self.ultron_already = False

        # 为幻视提供改写奥创数据的接口
        self.vision_ticker_dict = {1: None, 2: None}

        self.vision_tickers_dict = {1: None, 2: None}

        # 为奥创添加一对vision
        self.vision1 = Vision(ultron=self,
                              vision_num=1,
                              port=port1,
                              history_period=history_period,
                              history_longer=history_longer)
        self.vision2 = Vision(ultron=self,
                              vision_num=2,
                              port=port2,
                              history_period=history_period,
                              history_longer=history_longer)
示例#14
0
    def __init__(self, target, haystack_wnd='Toontown Rewritten'):
        # Window Capture has default to TTR, else we choose from main.
        self.wincap = WindowCapture(window_name=haystack_wnd)
        # Previously, we had to use the object to call this function.
        # Now that it is static, we can call the class directly.
        # wincap.list_window_names()
        # WindowCapture.list_window_names()

        # check foreground window title
        current = self.wincap.title()
        """ 
        The Encryption Method I used:
            click.write_key()
            key = click.load_key()
            message1 = user.encode()
            print(message1) - bytes now
            message2 = pw.encode()
            print(message2)
            f = Fernet(key)
            encrypted1 = f.encrypt(message1)
            encrypted2 = f.encrypt(message2)

            print(encrypted1)
            print(encrypted2)
        """
        # Decrypt our user name and pw. IF you want to use it,
        # remove the encryption method, or generate your own encrypted values.
        # Add in your own user/pw instead.``
        self.key = self.load_key()
        f = Fernet(self.key)

        # Target is selectable from main file now.
        if (current == "Toontown Rewritten Launcher"):
            # Make TextBox Bigger
            self.vision_target = Vision('TextBox.png')

            # empty bits on bitmap, idk how they made the launcher
            # self.locator()
            # Just send input
            pdi.press(['tab'])
            time.sleep(0.05)
            pdi.typewrite(f.decrypt(self.user).decode())
            time.sleep(0.05)
            pdi.press(['tab'])
            time.sleep(0.05)
            pdi.typewrite(f.decrypt(self.pw).decode())
            time.sleep(0.05)
            pdi.press(['enter'])
            time.sleep(0.05)
        else:
            self.vision_target = Vision(target)
            # Only find best match
            self.locator(multi=False)
示例#15
0
    def command_chain(self, command, tooltip):
        self.wincap.stop()
        self.vision.stop()
        self.bot.stop()

        self.wincap = WindowCapture(window_name=self.haystack_wnd)
        self.vision = Vision(command)
        self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y),
                          (self.wincap.w, self.wincap.h), tooltip)

        self.wincap.start()
        self.vision.start()
        self.bot.start()
示例#16
0
 def __init__(self):
     print 'Starting a new game'
     self.coordinates = (0, 0)
     self.location = GameLocation(self.x_offset, self.y_offset, self.width,
                                  self.height)
     self.vision = Vision(self.location)
     self.controller = Controller(self.x_offset, self.y_offset)
     self.phone = Phone()
     self._build_buttons()
     self._reset_food()
     self._build_customers()
     self._build_recipes()
     self.mat = GameObject('Mat', (199, 380))
示例#17
0
    def execute( self, restartButton=(528,410) ):
        print("executing!")
        pyautogui.click(restartButton)
        vision = Vision()
        vision.find_game()
        print(len(self.__genomes))
        gen = 0

        for genome in self.__genomes:
            print("genome "+str(gen), end="")
            gen += 1
            vision.reset()
            sleep(1)
            jump()
            dec = 0
            while True:
                try:
                    obs = vision.find_next_obstacle()
                    inputs = [obs['distance']/1000, obs['lenght']/70000, obs['height']/1000, obs['speed']/10]
                    outputs = genome.forward(np.array(inputs, dtype=float))
                    if outputs[0] > 0.55:
                        jump()
                        dec-=1
                except Exception as e:
                    break
            genome.fitness = vision.get_fitness()
            print(", fitness: "+str(genome.fitness))
示例#18
0
文件: server.py 项目: phq7621y/mse430
 def __init__(self, robot, *args, **kwargs):
     self.loop = asyncio.get_event_loop()
     self.robot = Robot(robot, self.loop, **kwargs)
     self.vision = Vision(self.loop, self.robot.num, **kwargs)
     self.server = None
     self.port = kwargs['port'] or 55555
     self.futures = []
     self.commands = {
         'where': self.where,
         'speed': self.speed,
         'power': self.power,
         'param': self.param,
         'shutdown': self.shutdown,
         'help': self.help,
     }
示例#19
0
class VisionBlargh(Blargh):
    def __init__(self):
        # Initialize the Vision wrapper class for the vision library
        self.vision = Vision()

    def step(self, inp):
        # TODO: Handle more than one ball; handle walls
        # Update the frame
        self.vision.bar()
        # If there is more than 0 balls, then return the location of the first
        # ball-blob
        if (self.vision.getNumBalls() > 0):
            return (self.vision.getR(0), self.vision.getTheta(0))
        else:
            return None
示例#20
0
 def __init__(self):
     self.window_name = 'Gemgem'
     self.board = np.zeros((8, 8))
     self.static_templates = {
         1: 'graphics/gem1_background.png',
         2: 'graphics/gem2_background.png',
         3: 'graphics/gem3_background.png',
         4: 'graphics/gem4_background.png',
         5: 'graphics/gem5_background.png',
         6: 'graphics/gem6_background.png',
         7: 'graphics/gem7_background.png',
     }
     self.tile_width = 64
     self.vision = Vision(self.static_templates, self.window_name)
     self.control = Controller()
示例#21
0
async def game(ctx):
    await _join(ctx)
    channel = ctx.message.guild.voice_client.channel
    await asyncio.gather(
        set_mute_all(channel.members, mute=False),
        ctx.message.channel.send('Starting game in {}'.format(channel.name)),
        client.change_presence(activity=discord.Game(name='Among Us')),
    )

    vision = Vision(static_templates, monitor)
    fsm = FSM(
        on_change_state={
            'playing':
            lambda: asyncio.run_coroutine_threadsafe(
                set_mute_all(channel.members), main_loop),
            'voting':
            lambda: asyncio.run_coroutine_threadsafe(
                set_mute_all(channel.members, mute=False), main_loop)
        })
    controller = Controller(fsm, vision)

    async def step():
        controller.step()
        if not should_stop:
            await asyncio.sleep(1)
            await step()

    global should_stop
    should_stop = False
    await step()
示例#22
0
  def __init__(self, draw=True, colour='blue', side='left', main_pitch=True, load_bg_model=False, corners_from_mem=False):
    _filepath                       = sys.argv[0]
    _relpath                        = os.path.split(_filepath)[0]
    if _relpath is not '': _relpath = _relpath + '/'
    
    self._bg                        = cv.LoadImage(_relpath + "b02.jpg")
    self._draw                      = draw
    self._colour                    = colour
    self._side                      = side
    self._n_avg_bg                  = 30
    self._corners_from_mem          = corners_from_mem
    
    self._last_update               = 0
    self._fps_update_interval       = 0.5
    self._num_frames                = 0
    self._fps                       = 0

    self._calibrate                 = Calibrate((9,6))
    self.calibrate_cam(from_mem     = True)
    self._config                    = yaml.load(file('config/020311-0830-main_pitch.yaml', 'r'))
    self._vision                    = Vision(self._bg, self._calibrate, self._config, fix_distortions = True, main_pitch=main_pitch, corners_from_mem=self._corners_from_mem)
    self._vision_com                = VisionCom(self._vision, colour=self._colour, side=self._side)
    self._ba                        = None
    self._load_bg_model             = load_bg_model
    
    self._frame_name                = 'annotated'
    self._capture                   = Capture()
    
    self._base_time = 0
示例#23
0
    def __init__(self, agent_hps=None, vision_hps=None):
        super(Agent, self).__init__()
        # agent hps
        self.hidden_size = agent_hps["hidden_size"]
        self.vocab_size = agent_hps["vocab_size"]
        self.emb_size = agent_hps["emb_size"]
        self.message_length = agent_hps["message_length"]

        # shared embedding layer
        self.shared_embedding = nn.Embedding(self.vocab_size, self.emb_size)
        self.sos_embedding = nn.Parameter(torch.zeros(self.emb_size))

        # shared vision + linear layers
        self.input_channels = vision_hps["input_channels"]
        self.vision_ckpt = vision_hps["vision_ckpt"]
        self.vision = Vision(self.input_channels)
        self.fc = nn.Linear(576, self.hidden_size)

        # sender modules
        self.sender_decoder = nn.LSTMCell(self.emb_size, self.hidden_size)
        self.sender_hidden_to_output = nn.Linear(self.hidden_size,
                                                 self.vocab_size)

        # receiver modules
        self.receiver_encoder = RnnEncoder(self.vocab_size,
                                           self.shared_embedding,
                                           self.hidden_size, "lstm")
class NetworkTablesTestRobot(wpilib.IterativeRobot):

    def robotInit(self):
        self.vision = Vision()

    def teleopPeriodic(self):
        contours = self.vision.getContours()
        print(Vision.targetCenter(contours), Vision.targetDimensions(contours))
示例#25
0
    def __init__(self,
                 target='doodle.png',
                 tooltip='doodle.png',
                 haystack_wnd='Toontown Rewritten'):

        self.haystack_wnd = haystack_wnd
        # Our list of commands to execute in sequence
        self.targetList = [
            "targets/speedchat_bubble.png", "targets/Pets.png",
            "targets/good.png", "targets/Tricks.png", "targets/Play_dead.png",
            "targets/Scratch.png", "targets/Feed.png", "targets/Tired.png",
            "targets/Excited.png"
        ]

        self.tooltipList = [
            "tooltips/tooltip.png", "tooltips/Pets_tt.png",
            "tooltips/good_tt.png", "tooltips/Tricks_tt.png",
            "tooltips/Play_dead_tt.png", "tooltips/Scratch_tt.png",
            "tooltips/Feed_tt.png", "tooltips/Tired_tt.png",
            "tooltips/Excited_tt.png"
        ]

        # Window Capture has default to TTR, else we choose from main.
        self.wincap = WindowCapture(window_name=haystack_wnd)

        # WindowCapture.list_window_names()
        # check foreground window title
        current = self.wincap.title()

        # Only two modes. Does not work from character select.
        if (current == "Toontown Rewritten Launcher"):
            self.login()
        else:
            self.vision = Vision(target)
            self.bot = TTRBot((self.wincap.offset_x, self.wincap.offset_y),
                              (self.wincap.w, self.wincap.h))

        # When giving our property objects new parameters
        # we must stop and start again, otherwise "stopped"
        # property gets reset to True.
        self.wincap.start()
        self.vision.start()
        self.bot.start()

        self.locator()
示例#26
0
def run(imagepath):
    #
    enhancer = Enhancer()
    enhancer.enhance(imagepath)

    ##image = Image.open('pill3.jpg')
    ##image = ImageEnhance.Contrast(image).enhance(5)
    ##image.PIL.save("pic.jpg")

    ##quit()
    # get the labels from the Google Vision OCR
    vision = Vision()
    labels = vision.detect_document(imagepath)
    print(labels)
    # use Google Vision to detect dominant colors, then extract common color name using a custom library
    colors = vision.detect_properties(imagepath)
    print(colors)
    if len(labels) == 0 or len(colors) == 0:
        print('Could not classify')
        return 'Unidentified'

    api_call = "https://datadiscovery.nlm.nih.gov/resource/crzr-uvwg.json?" + "&splimprint=" + labels[
        0]
    # concatinate more if there are more than 1 imprints
    for i in range(1, len(labels)):
        api_call = api_call + ";" + labels[i]
    print(api_call)
    # make the API GET request
    contents = urllib.request.urlopen(api_call).read()
    # converts the bytes object to string, then to json
    json_response = json.loads(contents.decode())
    # get the number of matches
    if len(json_response) >= 1:
        return json_response[0]['medicine_name']
    elif len(json_response) < -1:
        json_res = try_with_colors(api_call, colors)
        if json_res == None:
            return 'Unidentified'
        else:
            return json_res[0]['medicine_name']
    else:
        return 'Unidentified'
示例#27
0
class Autopilot:
    def __init__(self):
        self.vision = Vision(self._on_new_capture)
        self.motion = Motion()

    def start(self):
        self.motion.set_values(0,0)
        self.motion.enable()
        self.vision.start()

    def stop(self):
        self.motion.disable()
        self.vision.stop()

    @property
    def is_running(self):
        return self.vision.is_observing

    def _on_new_capture(self, objects):
        # Speed equals the probability of the most probable object detected.
        # That is, if confident - move fast
        speed = objects[0].probability

        total_weight = 0
        direction_weight = 0
        for i in range(0, len(objects)):
            obj = objects[i]
            # box size times probability
            weight = (obj.box[2] - obj.box[0]) * (obj.box[3] - obj.box[1]) * obj.probability
            # if it's not a duck - discount heavily
            if obj.category != 1:
                weight /= 10
            total_weight += weight
            # box_mid_point = (obj.box[3] + obj.box[1]) / 2
            # translate [0,1] into [-1,1]
            # direction = box_mid_point * 2 - 1
            direction = (obj.box[3] + obj.box[1]) - 1
            direction_weight += direction * weight

        direction = direction_weight / total_weight

        self.motion.set_values(speed, direction)
示例#28
0
def main():
    load_dotenv()

    PREDICTION_KEY = os.getenv("PREDICTION_KEY")
    VISION_ENDPOINT = os.getenv("VISION_ENDPOINT")
    ITERATION_ID = os.getenv("ITERATION_ID")
    ITERATION_NAME = os.getenv("ITERATION_NAME")

    vision = Vision(PREDICTION_KEY, VISION_ENDPOINT, ITERATION_ID, ITERATION_NAME)

    try:
        argv = sys.argv[1:]
        opts, args = getopt.getopt(argv, "i:v:c:h")
    except getopt.GetoptError:
        print ("Bad arguments! Use -h for help")
        exit(1)

    # really cheap arg parsing
    # TODO: add more options using argparse
    fps = int(os.getenv("FPS"))
    size = (int(os.getenv("RESOLUTION_X")), int(os.getenv("RESOLUTION_Y")))
    for opt, arg in opts:
        if opt == "-h":
            print ("-" * 30)
            print ("Usage: python main.py <input_option> [-h]")
            print ("Input options (choose one):")
            print ("-i <path to image file> : Analyzes one image")
            print ("-v <path to video file> : Analyzes video")
            print ("-c <camera port number> : Analyzes camera stream at specified port number")
            print ("\n-h : display help")
            print ("-" * 30)
            exit(0)
        elif opt == "-i":
            predictions, people_count, violations = vision.analyzeFrame(arg, dist_threshold=50)
            print ("Number of people:", people_count)
            print ("Violations:", violations)
            draw_objects(arg, predictions, wait=True)
        elif opt == "-v":
            analyze_video(arg, vision, desired_fps=fps, size=size, show=True)
        elif opt == "-c":
            analyze_video(int(arg), vision, desired_fps=fps, size=size, show=True)
示例#29
0
    def do_start(self):
        self.queue = Queue.Queue(CACHE_LENGTH)
        self.cache = []

        self._bind_signal()

        self.init_gpio()

        self.decelerator = Decelerator()
        self.decelerator.start()

        self.ultrasonic = Ultrasonic(self)
        self.ultrasonic.start()

        self.vision = Vision()
        self.vision.start()

        self.cd_thread = Thread(name="DriverThread", target=self.run)
        self.cd_thread.start()

        logger.info("start completely.")
示例#30
0
    def modeOverTheRainbow(self):
        self.vision = Vision(self.steering, self.motors)
        slVa = VisionAttributes()
        slVa.startTiltAngle = 0.12
        slVa.startPanAngle = -1.00
        slVa.targetMinSize = 20
        slVa.targetMaxSize = 2200
        slVa.minPanAngle = -1.0
        slVa.maxPanAngle = 1.0
        slVa.targetColorPattern = Vision.COLOUR_WHITE
        slVa.topSpeed = 1.0
        slVa.topSpinSpeed = 1.0

        self.motors.move(-1, -1, 0.3)
        time.sleep(0.8)
        self.motors.stop()

        rainbowPtc = PanTiltController(self.ub, 270, 135)
        rainbowPtc.initPanServo(5000, 1000)
        self.vision.initialise(slVa, rainbowPtc)
        time.sleep(0.5)

        self.vision.scan()
        print("Scan Complete")
        index = 0
        prev_position = 0

        for ball_position in self.vision.ball_positions:
            ball_position = self.vision.ball_positions[0]
            print("Size: " + str(ball_position.size) + ', x :' +
                  str(ball_position.x) + ', y :' + str(ball_position.y) +
                  ', pan-position :' + str(ball_position.pan_position) +
                  ', angle : ' + str(ball_position.pan_position * 135) +
                  ', Colour:' + str(ball_position.colour))
            if (index > 0):
                prev_position = self.vision.ball_positions[index -
                                                           1].pan_position
            index = index + 1
            self.vision.goto_ball_position(ball_position, prev_position)
示例#31
0
class Display:
    def __init__(self):
        self.vision = Vision(
            {
                "top": 850,
                "left": 1920,
                "width": 500,
                "height": 200
            },
            ratio=0.25)

    def show_grid(self):
        # loop_time = time()
        while True:
            self.vision.capture_window()
            self.vision.draw_grid(text=True)
            cv2.imshow("screen", self.vision.sct_img)
            # print("FPS {}".format(round(1 / (time() - loop_time), 2)))
            # loop_time = time()

            if (cv2.waitKey(1) & 0xFF) == ord("q"):
                cv2.destroyAllWindows()
                break
示例#32
0
def IsMyRound():
    os.chdir(os.path.dirname(os.path.abspath(__file__)))
    # initialize the WindowCapture class
    wincap = WindowCapture('Legends of Runeterra')
    # finding myRound.jpg
    #vision_IP = Vision('img\minalegal.jpg')
    vToFind = Vision('img\isMyRound.jpg')
    loop_time = time()
    while(True):
        screen = wincap.get_screenshot()
        points = vToFind.find(screen, 0.99, 'rectangles')
        #print('FPS {}'.format(1 / (time() - loop_time)))
        if points:
            print('Is my turn.')
        else:
            print('Not my turn')
            pass
        loop_time = time()
        #return False
        if cv.waitKey(1) == ord('q'):
            cv.destroyAllWindows()
            break
        pass
示例#33
0
class DrawGame:
    def __init__(self, arm):
        self.arm = arm
        print("Begginging drawing recognition game!")
        self.v = Vision()
        self.model = load_model("nn_hog.h5")
        self.categories = ('airplane', 'backpack', 'cactus', 'dog', 'ear',
                           'face', 'garden', 'hamburger', 'icecream', 'jacket')

    def run_game(self, times=3):
        current = 0

        # Robot draws bounds.
        self.arm.move_back()
        # FormatConvert.drawFromFile("bounds.txt", self.arm, speed=3)
        self.arm.move_away()
        # Small wait
        time.sleep(1)
        # Do game loop
        while current < times:
            # Human draws
            # Wait until human is "finished"
            input("Press enter when finished...")

            # When human is finished, detect category
            img = self.v._getImage(self.v.cam1)
            img_to_predict = self.preprocessing(img)

            # Get drawing of a category

            # Robot draws the category

            # Tiny wait
            time.sleep(1)
        print("Game over!")

    def preprocessing(self, img):
        print("Preprocessing img")
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img = cv2.medianBlur(img, 5)
        _, img = cv2.threshold(img, 100, 255, cv2.THRESH_BINARY_INV)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        img = cv2.dilate(img, kernel)
        img = np.rot90(img, 1)
        img = img[605:795, 280:470]
        img = cv2.resize(img, (28, 28))
        cv2.imshow('frame', img)
        cv2.waitKey(1)
        return img
示例#34
0
文件: robot.py 项目: traker/robot
    def __init__( self ):
        self.config = ConfigParser.RawConfigParser()
        self.config.read( 'RobotConf.cfg' )
        self.bus = I2c_arduino()
        self.vue = Vision( self.config, self.bus )
        self.land = landmap.LandMap()
        self.pile_vision = core.Core()

        #self.pilevar = collections.deque()
        self.pile_move = core.Core()
        self.running = False
        self.orientation = 0.0
        self.x = 0.0
        self.y = 0.0
        self.stream = stream.HTTPServer( ( "", 8080 ), self.vue )
示例#35
0
  def __init__(self, avg_bg_frames = 30, online = True, main = True, advanced = False, config = "config/020311-0830-secondary_pitch.yaml"):
    # Set up vision, visioncom
    calibrate           = Calibrate((9,6))
    calibrate.calibrate_camera_from_mem("calibration_data/")
    self._config        = yaml.load(file(config, 'r'))
    self._offline_img   = cv.LoadImage("b02.jpg")
    self._vision        = Vision(self._offline_img, calibrate, self._config, fix_distortions = True, main_pitch = main)
    self._visioncom     = VisionCom(self._vision)

    self._capture_pipe, capture_pipe = Pipe()
    self._capture       = Process(target=Capture, args = (capture_pipe,))
    self._capture.start()

    self._fps_then    = 0
    self._fps_frames  = 0
    self._fps         = 0
    self._base_time   = 0

    self._advanced = advanced

    self._online = online
    if self._online:
      if avg_bg_frames:
        frames = []
        for i in range(avg_bg_frames):
          frames.append(self.get_frame())
        self._vision.avg_bg_frames(frames)

    # Set up GUI and start process
    self._events_pipe    , events_pipe           = Pipe(False)
    set_shapes_pipe, get_shapes_pipe = False, False
    if advanced:
      self._set_shapes_pipe, set_shapes_pipe       = Pipe(False)
      get_shapes_pipe      , self._get_shapes_pipe = Pipe(False)
    feed_pipe            , self._feed_pipe       = Pipe(False)
    locations_pipe       , self._locations_pipe  = Pipe(False)

    self._gui_process = Process(target = gui.Gui, args = (events_pipe, locations_pipe, feed_pipe, self._config, get_shapes_pipe, set_shapes_pipe))
    self._gui_process.start()

    self._locations_pipe.send(("pitch",{True:"main",False:"other"}[main]))

    self._window      = "source"
    self._our_colour  = "blue"
    self._show_window = True
    self._run         = True
    self._loop()
示例#36
0
文件: robot.py 项目: traker/robot
 def __init__( self ):
     self.config = ConfigParser.RawConfigParser()
     self.config.read( 'RobotConf.cfg' )
     self.board = pyfirmata.ArduinoMega( self.config.get( 'Arduino', 'device' ) )
     self.prop = propulsion.Propulsion( self.config, self.board )
     self.tourel = propulsion.Tourelle( self.config, self.board )
     self.vue = Vision( self.config, self.board )
     self.land = landmap.LandMap()
     self.accel = propulsion.Accel( self.config )
     self.pile_vision = core.Core()
     #self.pilevar = collections.deque()
     self.pile_move = core.Core()
     self.running = False
     self.orientation = 0.0
     self.x = 0.0
     self.y = 0.0
     self.stream = stream.HTTPServer( ( "", 8080 ), self.vue )
示例#37
0
 def __init__(self):
     # Initialize the Vision wrapper class for the vision library
     self.vision = Vision()
示例#38
0
class VisionWrapper:
  def __init__(self, draw=True, colour='blue', side='left', main_pitch=True, load_bg_model=False, corners_from_mem=False):
    _filepath                       = sys.argv[0]
    _relpath                        = os.path.split(_filepath)[0]
    if _relpath is not '': _relpath = _relpath + '/'
    
    self._bg                        = cv.LoadImage(_relpath + "b02.jpg")
    self._draw                      = draw
    self._colour                    = colour
    self._side                      = side
    self._n_avg_bg                  = 30
    self._corners_from_mem          = corners_from_mem
    
    self._last_update               = 0
    self._fps_update_interval       = 0.5
    self._num_frames                = 0
    self._fps                       = 0

    self._calibrate                 = Calibrate((9,6))
    self.calibrate_cam(from_mem     = True)
    self._config                    = yaml.load(file('config/020311-0830-main_pitch.yaml', 'r'))
    self._vision                    = Vision(self._bg, self._calibrate, self._config, fix_distortions = True, main_pitch=main_pitch, corners_from_mem=self._corners_from_mem)
    self._vision_com                = VisionCom(self._vision, colour=self._colour, side=self._side)
    self._ba                        = None
    self._load_bg_model             = load_bg_model
    
    self._frame_name                = 'annotated'
    self._capture                   = Capture()
    
    self._base_time = 0
  
  def online_feed(self): 
    frames = []
    if self._load_bg_model: frames = [self._capture.pull()]
    else: frames = [self._capture.pull() for i in range(self._n_avg_bg)]
    
    self._vision.avg_bg_frames(frames, load_from_mem=self._load_bg_model)
    
    reset_base  = False
    last_base_time = 0
    time_interval  = 60 
    while 1:
      lstart = time.time()
      if lstart - last_base_time >= time_interval:
        self.calc_baseline()
        last_base_time = time.time()
      
      frame = self._capture.pull()
      timestamp = datetime.fromtimestamp(self.correct_timestamp() / 1000.0).strftime('%Y-%m-%dT%H:%M:%S.%f')
      
      self._vision.update(frame)

      self._vision_com.transmit(timestamp)
      self.update_fps()

      try:
        if self._frame_name == 'annotated':
          vframe = self._vision.get_frame(name='source', annotated=True)
        else:
          vframe = self._vision.get_frame(name=self._frame_name, annotated=False)
      except:
        vframe   = self._vision.get_frame(name='annotated', annotated=True)      
      
      font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 0.75, 0.75, 0.0, 1, cv.CV_AA)
      msg  = ">> FPS: %.2f" % (self._fps,)  
      cv.PutText(vframe, msg, (10,15), font, cv.Scalar(0,0,255))
      
      if self._draw:
        cv.ShowImage("Feed", vframe)
            
        c = cv.WaitKey(2) % 0x100
        if c != -1:
          if c == 27 or chr(c) == 'q' : break
          if chr(c) == 'p'            : cv.SaveImage(timestamp + ".jpg", vframe)
          if chr(c) == 'a'            : self._frame_name = 'annotated'
          if chr(c) == 's'            : self._frame_name = 'source'
          if chr(c) == 'r'            : self._frame_name = 'threshed_red'
          if chr(c) == 'y'            : self._frame_name = 'threshed_yellow'
          if chr(c) == 'b'            : self._frame_name = 'threshed_blue'
          if chr(c) == 'h'            : self._frame_name = 'threshed_black'
          if chr(c) == 'g'            : self._frame_name = 'threshed_green'
          if chr(c) == 'f'            : self._frame_name = 'foreground'
          if chr(c) == 'c'            : self._frame_name = 'foreground_connected'
      
  def offline_feed(self, filename):
    capture = cv.CaptureFromFile(filename)
    width   = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_WIDTH))
    height  = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_HEIGHT))
    
    fps     = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FPS))
    frames  = int(cv.GetCaptureProperty(capture, cv.CV_CAP_PROP_FRAME_COUNT))
    
    for f in range(frames - 2):
      frame = cv.QueryFrame(capture)
      timestamp = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f')
      frame = self._calibrate.undistort_single_image(frame)

      self._vision.update(frame)
      self._vision_com.transmit(timestamp)
      self.update_fps()
      
      if self._draw:
        cv.ShowImage("Feed", frame)
      
      if cv.WaitKey(1000 / fps) == 27:
        break
  
  def calc_baseline(self):
    for i in range(3):
      self._capture.pull()
    self._base_time = int(round(time.time() * 1000))

  def correct_timestamp(self):
    t = int(round(time.time() * 1000))
    return t - (t - self._base_time) % 40

  def update_fps(self):
    self._num_frames += 1
    current_update    = time.time()

    if(current_update - self._last_update > self._fps_update_interval):
      self._fps         = self._num_frames / (current_update - self._last_update)
      self._last_update = current_update
      self._num_frames  = 0
      
  def calibrate_cam(self, from_mem=True, filenames=[]):
    if from_mem:
      self._calibrate.calibrate_camera_from_mem("calibration_data/")
    else:
      images = [cv.LoadImage(file) for file in filenames]
      self._calibrate.calibrate_camera(images, save_data=True)
示例#39
0
    def describe_image(query_item, candidate_items):
        """ Retrieve relevant descriptions for a query image """

        vision = Vision()
        language = Language()
        utilities = Utilities()

        settings = utilities.load_settings('settings.ini', 'Settings')
        neighbours = vision.retrieve_visually_similar_images(query_item, candidate_items, int(settings['farneighborsize']), settings)

        # ---------------------------------VISUALLY CLOSEST--------------------------------
        visually_closest_captions = []
        for vcCaption in neighbours[0][int(settings["captionindex"])]:  # find visually closest candidates' captions
            visually_closest_captions.append(vcCaption)

        neighbours, dist_min, dist_max = vision.remove_outliers(neighbours, int(settings["vdsindex"]),
                                                                float(settings["epsilon"]), int(settings["neighborsize"]))

        # ---------------------------- START MAIN PROCEDURE ----------------------------------

        w, model, vocab = language.load_word_models(settings)
        word_list_to_exclude = language.load_word_list_to_exclude(settings)
        sample_word_vector = language.get_word_vector('a', settings['method'], model, w, vocab)
        zeros = np.zeros(sample_word_vector.shape)

        number_of_tokens = []
        if settings['usesentencelengthpenalty'] == '1':
            for i, neighbour in enumerate(neighbours):
                for j, caption in enumerate(neighbour[int(settings["captionindex"])]):
                    if len(caption) == 1:
                        tokens = caption.split()  # tokenize by whitespace
                    else:
                        tokens = caption  # sentence is already tokenized
                    token_count = len(tokens)
                    number_of_tokens.append(token_count)
            average_token_count = int(np.mean(number_of_tokens))

        # --------------------COMPUTE QUERY VECTOR------------------------------
        total_vector_sum = zeros
        all_caption_vector_items = []
        oov_count = 0
        token_count = 0

        for i, neighbour in enumerate(neighbours):  # for each candidate
            caption_vector_sum = zeros  # to store caption vectors

            if settings['usevisualsimilarityscores'] == '1':
                visual_similarity_score = vision.compute_visual_similarity(neighbour[int(settings["vdsindex"])], dist_min, dist_max)
            else:
                visual_similarity_score = 1  # no effect

            for j, caption in enumerate(neighbour[int(settings["captionindex"])]):
                caption_vector, number_of_tokens, number_of_oovs = language.compute_sentence_vector(caption, model, w, vocab, zeros,
                                                                                                    word_list_to_exclude, settings)
                token_count = token_count + number_of_tokens
                oov_count = oov_count + number_of_oovs
                index_of_item = i + 1  # the index of item in the original list
                caption_vector = caption_vector * visual_similarity_score  # weighted summation with visual distance

                if settings['usesentencelengthpenalty'] == '1':
                    penalty = language.compute_sentence_length_penalty(number_of_tokens, average_token_count)
                    caption_vector = caption_vector * penalty

                caption_vector_item = [index_of_item, caption_vector, caption]
                all_caption_vector_items.append(caption_vector_item)
                caption_vector_sum += caption_vector

            total_vector_sum = total_vector_sum + caption_vector_sum

        query_vector = np.divide(total_vector_sum, len(all_caption_vector_items))

        cosine_similarities = []

        for caption_vector_item in all_caption_vector_items:
            cosine_similarity = language.compute_cosine_similarity(query_vector, caption_vector_item[1])  # 2nd index holds caption vector
            cosine_similarities.append(cosine_similarity)
            caption_vector_item.append(cosine_similarity)

        all_caption_vector_items.sort(key=lambda x: x[3], reverse=True)  # sort by 4th column, that is cosine similarity

        candidate_translations = []  # select top N descriptions from the results
        for i, caption_vector_item in enumerate(all_caption_vector_items[0:int(settings['numberofcaptionstoreturn'])]):
            candidate_translations.append(caption_vector_item[2])

        reference_translations = []
        for i, query_caption in enumerate(query_item[int(settings["captionindex"])]):
            reference_translations.append(query_caption)

        oov_rate = oov_count * 100 / token_count

        return [candidate_translations, reference_translations, visually_closest_captions, oov_rate]
示例#40
0
def run():
    vis = Vision(True)
    vis.color = Color.Red
    while cv2.waitKey(10) == -1:
        print vis.detectObjects(Feature.Ball)
示例#41
0
class StartGui:
  def __init__(self, avg_bg_frames = 30, online = True, main = True, advanced = False, config = "config/020311-0830-secondary_pitch.yaml"):
    # Set up vision, visioncom
    calibrate           = Calibrate((9,6))
    calibrate.calibrate_camera_from_mem("calibration_data/")
    self._config        = yaml.load(file(config, 'r'))
    self._offline_img   = cv.LoadImage("b02.jpg")
    self._vision        = Vision(self._offline_img, calibrate, self._config, fix_distortions = True, main_pitch = main)
    self._visioncom     = VisionCom(self._vision)

    self._capture_pipe, capture_pipe = Pipe()
    self._capture       = Process(target=Capture, args = (capture_pipe,))
    self._capture.start()

    self._fps_then    = 0
    self._fps_frames  = 0
    self._fps         = 0
    self._base_time   = 0

    self._advanced = advanced

    self._online = online
    if self._online:
      if avg_bg_frames:
        frames = []
        for i in range(avg_bg_frames):
          frames.append(self.get_frame())
        self._vision.avg_bg_frames(frames)

    # Set up GUI and start process
    self._events_pipe    , events_pipe           = Pipe(False)
    set_shapes_pipe, get_shapes_pipe = False, False
    if advanced:
      self._set_shapes_pipe, set_shapes_pipe       = Pipe(False)
      get_shapes_pipe      , self._get_shapes_pipe = Pipe(False)
    feed_pipe            , self._feed_pipe       = Pipe(False)
    locations_pipe       , self._locations_pipe  = Pipe(False)

    self._gui_process = Process(target = gui.Gui, args = (events_pipe, locations_pipe, feed_pipe, self._config, get_shapes_pipe, set_shapes_pipe))
    self._gui_process.start()

    self._locations_pipe.send(("pitch",{True:"main",False:"other"}[main]))

    self._window      = "source"
    self._our_colour  = "blue"
    self._show_window = True
    self._run         = True
    self._loop()

  def get_frame(self):
    self._capture_pipe.send("")
    while not self._capture_pipe.poll():
      pass
    data = self._capture_pipe.recv()
    frame = cv.CreateImage((640,480),8,3)
    cv.SetData(frame, data)
    return frame
  
  def _loop(self):
    last_base_time = 0
    time_interval  = 60
    while self._run:
      lstart = time.time()
      if lstart - last_base_time >= time_interval:
        self.calc_baseline()
        last_base_time = time.time()
      
      # Update feed and locations
      self._current_frame = self.get_frame() if self._online else cv.CloneImage(self._offline_img)
      self._vision.update(self._current_frame)
      

      timestamp = datetime.fromtimestamp(self.correct_timestamp() / 1000.0).strftime('%Y-%m-%dT%H:%M:%S.%f')
      
      self._visioncom.transmit(timestamp)
      self._calc_fps()
      self._locations_pipe.send(("fps", "fps", self._fps))
      self._src_frame = self._vision.get_frame(self._window)
      if self._show_window:
        if self._src_frame:
          frame = cv.CreateImage(cv.GetSize(self._src_frame),8,3)
          if self._window == "source":
            cv.CvtColor(self._src_frame, frame, cv.CV_BGR2RGB)
          else:
            cv.CvtColor(self._src_frame, frame, cv.CV_GRAY2RGB)
          pixbuf = (
            frame.tostring(),
            gtk.gdk.COLORSPACE_RGB,
            False,
            8,
            frame.width,
            frame.height, 
            frame.width * frame.nChannels
          )
          self._feed_pipe.send(pixbuf)
      ball = self._vision.get_ball()
      if ball:
        self._locations_pipe.send(("ball", "x", ball[0][0]))
        self._locations_pipe.send(("ball", "y", ball[0][1]))
      (our_centre, our_rotation) = self._vision.get_robot(self._our_colour)
      if our_centre:
        self._locations_pipe.send(("us", "x", our_centre[0]))
        self._locations_pipe.send(("us", "y", our_centre[1]))
      if our_rotation:
        self._locations_pipe.send(("us", "r", our_rotation))
      their_colour = {"blue":"yellow", "yellow":"blue"}[self._our_colour]
      (their_centre, their_rotation) = self._vision.get_robot(their_colour)
      if their_centre:
        self._locations_pipe.send(("them", "x", their_centre[0]))
        self._locations_pipe.send(("them", "y", their_centre[1]))
      if their_rotation:
        self._locations_pipe.send(("them", "r", their_rotation))

      # Shapes and sizes
      if self._advanced:
        vals = [
          "_pnp_dist_thresh",
          "_cc_area_thresh",
          "_max_foreground_obj_thresh",
          "_robot_area_threshold",
          "_black_dot_area_threshold",
          "_black_dot_dist_threshold",
          "_ball_area_threshold",
          "_plate_area_threshold",
          "_blob_area_threshold",
          "_pnp_dist_thresh",
          "_ball_centre",
          "_ball_radius",
          "_ball_roundness_metric",
          "_blue_robot_centre",
          "_blue_robot_t_area",
          "_blue_plate_area",
          "_yellow_robot_centre",
          "_yellow_robot_t_area",
          "_yellow_plate_area",
          "_offset_bottom"              ,
          "_offset_top"                 ,
          "_offset_left"                ,
          "_offset_right"
        ]
        while self._set_shapes_pipe.poll():
          var, val = self._set_shapes_pipe.recv()
          if var == "_max_foreground_obj_thresh": val = int(val)
          if var == "_offset_bottom": val = int(val)
          if var == "_offset_top": val = int(val)
          if var == "_offset_left": val = int(val)
          if var == "_offset_right": val = int(val)
          if var[-1] not in ["0","1"]:
            setattr(self._vision, var, val)
          else:
            tup = getattr(self._vision, var[:-1])
            temp = (val, tup[1]) if var[-1] == "0" else (tup[0], val)
            setattr(self._vision, var[:-1], temp)
        for val in vals:
          self._get_shapes_pipe.send((val, getattr(self._vision, val)))

        vals = [
          "_blue_robot_black_dot",
          "_yellow_robot_black_dot"
        ]
        for val in vals:
          temp = getattr(self._vision, val)
          if temp is not None:
            self._get_shapes_pipe.send((val, temp[0]))

      # Handle Events
      while self._events_pipe.poll():
        m,a = self._events_pipe.recv()
        if m == "quit":
          self._run = False
        elif m == "change_window":
          self._window = a[0]
        elif m == "show_window":
          self._show_window = a[0]
        elif m == "game_mode":
          print "/general/state/" + a[0]
          stdout.flush()
        elif m == "our_colour":
          self._our_colour        = a[0]
          self._visioncom._colour = a[0]
        elif m == "shooting_direction":
          self._visioncom._side = a[0]
        elif m == "save_as":
          cv.SaveImage("%s.jpg" % a[0], self._current_frame)
        elif m == "set_hsv_values":
          self._vision.set_hsv_values(*a)
        elif m == "thresholds_invert":
          break
          # TODO Invert thresholds

    self._locations_pipe.send("quit")
  
  def _calc_fps(self):
    self._fps_frames += 1
    now = time.time()
    if(now - self._fps_then > 0.5):
      self._fps        = self._fps_frames / (now - self._fps_then)
      self._fps_then   = now
      self._fps_frames = 0
      
  def calc_baseline(self):
    for i in range(3):
      self.get_frame()
    self._base_time = int(round(time.time() * 1000))

  def correct_timestamp(self):
    t = int(round(time.time() * 1000))
    return t - (t - self._base_time) % 40
示例#42
0
文件: drive.py 项目: robots/NN_Car
			if event.key == pygame.K_ESCAPE:
				robot.set_speed(0, 0)
				sys.exit(0)
		elif event.type == pygame.JOYBUTTONUP:
			if event.button == 8:
				robot.set_speed(0, 0)
				sys.exit(0)
			elif event.button == 1:
				pause = not pause



pygame.init()

pause = False
vision = Vision("Driver")

joystick = pygame.joystick.Joystick(0)
joystick.init()

robot = SerialCar('/dev/ttyUSB0', 38400)
out_values = [0, 0]

nn = libfann.neural_net()
nn.create_from_file(sys.argv[1])

try:
	while True:
		
		handle_events()
示例#43
0
文件: robot.py 项目: traker/robot
class Robot():
    '''
        classe qui rassemble l'ensemble des modules du robot
            les differente fonction principale du robot:
                exploration, decouverte du terrain
                patrouille de point en point
                deplacement d'un point a un autre ,a partir de la carte decouverte
                affchage de la carte complete du terrain
        
    '''
    def __init__( self ):
        self.config = ConfigParser.RawConfigParser()
        self.config.read( 'RobotConf.cfg' )
        self.board = pyfirmata.ArduinoMega( self.config.get( 'Arduino', 'device' ) )
        self.prop = propulsion.Propulsion( self.config, self.board )
        self.tourel = propulsion.Tourelle( self.config, self.board )
        self.vue = Vision( self.config, self.board )
        self.land = landmap.LandMap()
        self.accel = propulsion.Accel( self.config )
        self.pile_vision = core.Core()
        #self.pilevar = collections.deque()
        self.pile_move = core.Core()
        self.running = False
        self.orientation = 0.0
        self.x = 0.0
        self.y = 0.0
        self.stream = stream.HTTPServer( ( "", 8080 ), self.vue )


    def start( self ):
        self.running = True
        thread.start_new_thread( self.__cerveau__, () )
        thread.start_new_thread( self.__mouvement__, () )
        thread.start_new_thread( self.stream.serve_forever, () )

    def stop( self ):
        self.running = False

    def __mouvement__( self ):
        while self.running:
            if self.pile_move.pile.__len__() <= 0:
                time.sleep( 0.1 )
            else:
                self.pile_move.exec_next()

    def __cerveau__( self ):
        '''
            cerveau se compose de differente pile avec different priorite
        '''
        while self.running:
            if self.pile_vision.pile.__len__() <= 0:
                time.sleep( 0.1 )
            else:
                self.pile_vision.exec_next()

    def balayage( self, depart , arrive , pas=1 ):
        if depart <= arrive :
            lis = numpy.arange( depart, arrive, pas )
        else:
            lis = numpy.arange( arrive, depart, pas )
            lis = lis[::-1]
        listd = []
        self.tourel.depl_tour( depart )
        for i in lis:
            self.tourel.depl_tour( i )
            listd.append( ( i, self.vue.get_range() ) )
        return listd