async def change_position(self,
                              context,
                              destination_point,
                              destination_map=None):
        worker = context.blackboard.get_worker()
        worker_location = pydash.get(worker, 'type_specific.location')

        updated_type_specific = worker['type_specific']
        if 'theta' in destination_point is None:
            destination_point['theta'] = pydash(
                worker, 'type_specific.location.pose2d.theta')

        update = {
            'map': destination_map or worker_location['map'],
            'pose2d': destination_point or worker_location['pose2d'],
            'semantic_location': None
        }

        if 'location' in updated_type_specific:
            updated_type_specific['location'] = pydash.assign(
                {}, updated_type_specific['location'], update)
        else:
            updated_type_specific['location'] = pydash.assign({}, update)

        context.blackboard.set_worker({'type_specific': updated_type_specific})
        await context.blackboard.sync_worker()
        print('position changed')
        return True
Beispiel #2
0
    def handOutCards(self):
        temp_players = []

        for player in self.players:
            if player["id"] is not self.dealer_id:
                if self.deck.size is 0:
                    self.rebuild()

                hand = pydealer.Stack()

                card = self.deck.deal()

                hand.add(card)

                temp_players.append(pydash.assign({}, player, {"hand": hand}))

        for player in self.players:
            if player["id"] is self.dealer_id:
                if self.deck.size is 0:
                    self.rebuild()

                hand = pydealer.Stack()

                card = self.deck.deal()

                hand.add(card)

                temp_players.append(pydash.assign({}, player, {"hand": hand}))

        self.players = temp_players
def _clean_section(section):
  '''Clean the sentences and tables in a section'''
  if 'sentences' in section:
    cleaned_section = {'sentences': _clean_sentences(section['sentences'])}
  else:
    cleaned_section = section
  if 'tables' in section:
    _.assign(cleaned_section, {'tables': _clean_tables(section['tables'])})
  return _.assign({}, section, cleaned_section)
Beispiel #4
0
 async def update_board_info(self):
     info = {}
     _.assign(info, json.loads(self.control_board.config.to_json()))
     _.assign(info, json.loads(self.control_board.state.to_json()))
     _.assign(info, json.loads(self.control_board.properties.to_json()))
     _.assign(info, {"uuid": str(self.control_board.uuid)})
     await self.set_state('info', info)
Beispiel #5
0
    def getPlayers(self, number_of_players):
        player = {"hand": pydealer.Stack(), "chips": self.initial_chips}

        for id in range(1, number_of_players + 1):
            self.players.append(pydash.assign({}, player, {"id": id}))

            self.stats[id] = {"dealer_wins": 0, "dealer_losses": 0}
def clean_page(page):
  '''Remove invalid links, strip whitespace and remove improperly parsed
nested templates in the child elements of a wikipedia page from the
dumpster-dive dump'''
  cleaned_page = {'plaintext': clean_page_content(page['plaintext']),
                  'sections': [_clean_section(section) for section in page['sections']]}
  return _.assign({}, page, cleaned_page)
Beispiel #7
0
 def build_attrs(name, attrs):
     return pydash.assign(
         dict(long_name=name,
              coverage_content_type='modelResult'),  # defaults
         attrs,  # Defined by plugin
         self.var_attributes.get(name,
                                 {}))  # From config, highest priority
Beispiel #8
0
 def request(self, method, url, headers, **kargs):
     urlFinal = urlparse.urljoin(self.server, url)
     userAgentHeader = f'ringcentral-engage-client-python/v${version}'
     if headers is None:
       headers = {}
     headers = _.assign(headers, {
         'Authorization': self._bearerAuthorizationHeader(),
         'User-Agent': userAgentHeader,
         'RC-User-Agent': userAgentHeader,
         'X-User-Agent': userAgentHeader,
     })
     req = Request(
       method,
       urlFinal,
       headers = headers,
       **kargs
     )
     prepared = req.prepare()
     s = Session()
     r = s.send(prepared)
     try:
         r.raise_for_status()
     except:
         raise Exception(
           '{}\n{}\n\nStatus: {}\n Response Text: {}'.format(
               req.method + ' ' + req.url,
               'Headers:\n'.join('{}: {}'.format(k, v) for k, v in req.headers.items()),
               r.status_code,
               r.text
           )
         )
     return r
Beispiel #9
0
        def message_adder(index):
            collapsed_pulse = collapsed_pulses[index]
            pulse = pulses[index]

            return _.assign(
                collapsed_pulse,
                {'rate': collapsed_pulse.get('rate') + pulse.get('rate')})
Beispiel #10
0
    def wrapper(*args, **kwargs):
        # print('--------auth------------')
        # print(request.headers['Authorization'])
        result = {
            'message': 'token not exists',
            'statusCode': -1,
            'data': None
        }
        if 'Authorization' in request.headers and request.headers[
                'Authorization']:
            tp_tokens = request.headers['Authorization'].split(' ')

            if len(tp_tokens) != 2 and tp_tokens[0] != "Bearer":
                return result, 401
            token = tp_tokens[1]
            oauth2 = oauth(token, '', '')
            ret = oauth2.checkToken()
            if ret is not None:
                # 检查当前用户是否为该实验的负责人或者被分享用户
                tp_profile = user.findUserProfileByLinkID(
                    {"link_id": str(ret['id'])})
                tp_user = _.assign(ret, tp_profile)
                g.user = tp_user
                result = func(*args, **kwargs)
                return result
            else:
                return result, 401
        else:
            return result, 401
Beispiel #11
0
    async def moving(self,
                     context,
                     destination_pose,
                     semantic_location_id=None):

        UPDATE_INTERVAL = 500
        worker = context.blackboard.get_worker()
        worker_location = pydash.get(worker, 'type_specific.location')

        path = self.path_planner.get_path(worker_location['map'],
                                          worker_location['pose2d'],
                                          destination_pose)
        trajectory = self.path_planner.path_to_trajectory(
            path, 1, UPDATE_INTERVAL)

        print('start to moving robot on path')

        def rotate_nearby(cx, cy, x, y, angle):
            radians = (math.pi / 180) * angle
            cos = math.cos(radians)
            sin = math.sin(radians)
            nx = cos * (x - cx) + sin * (y - cy) + cx
            ny = cos * (y - cy) - sin * (x - cx) + cy
            return {'x': nx, 'y': ny}

        for point in trajectory:
            worker = context.blackboard.get_worker()
            updated_type_specific = worker['type_specific']

            if 'theta' in point and point['theta'] != None:
                pass
            else:
                point['theta'] = pydash.get(
                    worker, 'type_specific.location.pose2d.theta')

            updated_type_specific['location'] = pydash.assign(
                {}, updated_type_specific['location'], {
                    'map': worker_location['map'],
                    'pose2d': point,
                    'semantic_location': None
                })

            #if config.get('action.move') == 'nearby' and idx == len(trajectory)-1:  조건 필요?

            context.blackboard.set_worker(
                {'type_specific': updated_type_specific})
            await context.blackboard.sync_worker()

            #print('moving...sleep')
            await asyncio.sleep(0.1)
            #print('moving...done sleep')

        updated_type_specific = context.blackboard.get_worker(
        )['type_specific']
        pydash.set_(updated_type_specific, 'location.semantic_location',
                    semantic_location_id)
        context.blackboard.set_worker({'type_specific': updated_type_specific})
        await context.blackboard.sync_worker()
        return True
def _clean_sentence(sentence):
  '''Clean a sentence and its corresponding links'''
  cleaned_sentence = _.assign({}, sentence, {'text': clean_page_content(sentence['text'])})
  if 'links' in sentence:
    cleaned_sentence['links'] = reduce(_.curry(_sentence_clean_reducer)(cleaned_sentence['text']),
                                       sentence['links'],
                                       [])
  return cleaned_sentence
def _sentence_clean_link_text(link):
  '''Clean the anchor text of a link in the same way that pages as a
whole are cleaned. This keeps anchor text and the page text in sync'''
  if 'text' in link:
    cleaned_text = clean_page_content(link['text']).strip()
    return _.assign({}, link, {'text': cleaned_text})
  else:
    return link
    async def on_handler(self, context, message):

        worker = context.blackboard.get_worker()
        location = pydash.get(worker, 'type_specific.location')
        updated_type_specific = worker['type_specific']
        updated_type_specific['location'] = pydash.assign({}, location, message)
        context.blackboard.set_worker({'type_specific': updated_type_specific})
        await context.blackboard.sync_worker()
        return True
Beispiel #15
0
    def runMIMIC(self):
        default = {
            'problem': self.problem,
            'pop_size': 200,
            'keep_pct': 0.2,
            'max_attempts': 10,
            'max_iters': np.inf,
            'curve': True,
            'random_state': None
        }
        # Experimental
        self.problem.set_mimic_fast_mode(True)

        state, fitness, curve = self._run(mlrose.mimic, name='1', **default)

        keep_pct = np.linspace(0.1, 1, 5)
        pop_size = [50, 100, 200]
        bestFitness = None
        for i in keep_pct:
            for j in pop_size:
                params = _.assign(
                    {}, default, {'keep_pct': i, 'pop_size': j})

                scores = []
                for r in range(5):
                    print('Running MIMIC %i' %r)
                    randomSeed = np.random.randint(0, 1000)
                    params = _.assign(
                        {}, params, {'random_state': randomSeed})
                    state, fitness, curve = self._run(
                        mlrose.mimic, name='%s' % i, **params)
                    scores.append(fitness)
                avgFitness = np.mean(scores)

                if bestFitness == None or (self.isMaximize and avgFitness > bestFitness) or (not self.isMaximize and avgFitness < bestFitness):
                    bestFitness = avgFitness
                    (bestState, bestCurve, bestParams) = state, curve, params
                # if fitness == 0:
                #     break
        log.info('\tMIMIC - Best fitness found: %s\n\t\tkeep_pct: %s \n\t\tpop_size: %s' %
                 (bestFitness, bestParams['keep_pct'], bestParams['pop_size']))
        # log.info('MIMIC CURVE: %s' %bestCurve)

        return bestCurve
Beispiel #16
0
    def on_insert(data):
        _id, obj = util.obj_from_oplog(data, impression_track_cursor.filter)

        if _id and obj:
            obj = util.dict_projection(obj, impression_track_cursor.projection)
            obj = _.assign(obj, track_util.url_split(_.get(obj, 'uri')))
            es_client.index(index=opt['index'],
                            doc_type=opt['type'],
                            id=_id,
                            params=opt['params'],
                            body=obj)
Beispiel #17
0
def key_builder(es_field_mapping):
    map = {}
    for k, v in es_field_mapping.items():
        keys.append(k)
        if isinstance(v, str):
            map['.'.join(keys)] = v
        else:
            map = _.assign(map, key_builder(v))

        keys.pop()
    return map
Beispiel #18
0
    def runSA(self):
        default = {
            'problem': self.problem,
            'schedule': self.schedule,
            'max_attempts': 10,
            'max_iters': 1000,
            'init_state': self.init_state,
            'curve': True,
            'random_state': 1
        }

        maxAttempts = [5, 10, 20]
        schedules = [mlrose.GeomDecay(), mlrose.ExpDecay(),
                     mlrose.ArithDecay()]
        bestFitness = None
        (bestState, bestCurve, bestParams) = None, None, None
        for i in maxAttempts:
            for j in schedules:
                params = _.assign(
                    {}, default, {'max_attempts': i, 'schedule': j})

                scores = []
                for r in range(5):
                    randomSeed = np.random.randint(0, 1000)
                    params = _.assign(
                        {}, params, {'random_state': randomSeed})
                    state, fitness, curve = self._run(
                        mlrose.simulated_annealing, name='%s' % i, **params)
                    scores.append(fitness)
                avgFitness = np.mean(scores)

                if bestFitness == None or (self.isMaximize and avgFitness > bestFitness) or (not self.isMaximize and avgFitness < bestFitness):
                    bestFitness = avgFitness
                    (bestState, bestCurve, bestParams) = state, curve, params
                # if fitness == 0:
                #     break
        print('SA - Params: %s' % bestParams)
        log.info('\tSA - Best fitness found: %s\n\t\tmaxAttempts: %s \n\t\tschedule: %s' %
                 (bestFitness, bestParams['max_attempts'], type(bestParams['schedule']).__name__))

        return bestCurve
Beispiel #19
0
    def runRHC(self):
        default = {
            'problem': self.problem,
            'max_attempts': 10,
            'max_iters': np.inf,
            'init_state': self.init_state,
            'curve': True,
            'random_state': 1
        }

        maxAttempts = [5, 10, 20]
        restarts = [0, 1, 2, 3, 4, 5, 6, 7, 8]
        bestFitness = None
        (bestState, bestCurve, bestParams) = None, None, None
        for i in maxAttempts:
            for j in restarts:
                params = _.assign(
                    {}, default, {'max_attempts': i, 'restarts': j})

                scores = []
                for r in range(5):
                    randomSeed = np.random.randint(0, 1000)
                    params = _.assign(
                        {}, params, {'random_state': randomSeed})
                    state, fitness, curve = self._run(
                        mlrose.random_hill_climb, name='%s' % i, **params)
                    scores.append(fitness)
                avgFitness = np.mean(scores)

                if bestFitness == None or (self.isMaximize and avgFitness > bestFitness) or (not self.isMaximize and avgFitness < bestFitness):
                    bestFitness = avgFitness
                    (bestState, bestCurve, bestParams) = state, curve, params
                # if fitness == 0:
                #     break

        print('RHC - Best fitness found on max_attempts: %s restarts: %s' %
              (bestParams['max_attempts'], bestParams['restarts']))
        log.info('\tRHC - Best fitness found: %s\n\t\tmax_attempts: %s \n\t\trestarts: %s' %
                 (bestFitness, bestParams['max_attempts'], bestParams['restarts']))

        return bestCurve
Beispiel #20
0
    def runGA(self):
        default = {
            'problem': self.problem,
            'pop_size': 200,
            'mutation_prob': 0.1,
            'max_attempts': 10,
            'max_iters': 100,
            'curve': True,
            'random_state': None
        }

        mutation_prob = np.linspace(0.1, 1, 5)
        pop_size = [50,100, 200]
        bestFitness = None
        for i in mutation_prob:
            for j in pop_size:
                params = _.assign(
                    {}, default, {'mutation_prob': i, 'pop_size': j})

                scores = []
                for r in range(5):
                    print('Running GA %i' %r)
                    randomSeed = np.random.randint(0, 1000)
                    params = _.assign(
                        {}, params, {'random_state': randomSeed})
                    state, fitness, curve = self._run(
                        mlrose.genetic_alg, name='%s' % i, **params)
                    scores.append(fitness)
                avgFitness = np.mean(scores)

                if bestFitness == None or (self.isMaximize and avgFitness > bestFitness) or (not self.isMaximize and avgFitness < bestFitness):
                    bestFitness = avgFitness
                    (bestState, bestCurve, bestParams) = state, curve, params
                # if fitness == 0:
                #     break
        log.info('\tGA - Best fitness found: %s\n\t\tmutation_prob: %s \n\t\tpop_size: %s' %
                 (bestFitness, bestParams['mutation_prob'], bestParams['pop_size']))

        return bestCurve
Beispiel #21
0
    def datetime_clusterer(tuplet: Tuple[List[Dict], int],
                           message: Dict) -> Tuple[List, int]:

        message_anchored_time = round_to_nearest_interval_minutes(
            message.get('timestamp'))

        # No cluster: create a cluster
        if not tuplet:
            return [{'rate': 1, 'time': message_anchored_time}], 1

        pulse_clusters, max_pulse_rate = tuplet
        latest_pulse = pulse_clusters.pop()

        # Message fits in cluster - cluster it up!
        if latest_pulse.get('time') == message_anchored_time:

            new_pulse_rate = latest_pulse.get('rate') + 1
            new_pulse_clusters = _.push(
                pulse_clusters, _.assign(latest_pulse,
                                         {'rate': new_pulse_rate}))

            if max_pulse_rate >= new_pulse_rate:
                return new_pulse_clusters, max_pulse_rate
            return new_pulse_clusters, new_pulse_rate

        # Message doesn't fit in cluster
        # lock in latest cluster, create new cluster but also fill
        # in missing clusters in between
        old_pulse_cluster = _.push(pulse_clusters, latest_pulse)
        old_pulse_cluster = _.concat(
            zero_pulses(start_time=latest_pulse.get('time'),
                        end_time=message_anchored_time,
                        interval=interval), old_pulse_cluster)

        return _.push(old_pulse_cluster,
                      _.assign({}, {
                          'rate': 1,
                          'time': message_anchored_time
                      })), max_pulse_rate
  def action(tableName, action, data = None):
    """db action wrapper
    * @param {String} tableName, user or bot
    * @param {String} action, add, remove, update, get
    * @param {Object} data
    * for add, {id: xxx, token: {...}, groups: {...}}
    * for remove, {id: xxx} or {ids: [...]}
    * for update, {id: xxx, update: {...}}
    * for get, singleUser:{id: xxx}, allUser: {}, query: { 'key': 'xx', 'value': 'yy'}
    """
    debug('db op:', tableName, action, data)
    prepareDb()
    id = _.get(data, 'id')
    if _.predicates.is_number(id):
      id = str(id)

    if action == 'add':
      putItem(data, tableName)

    elif action == 'remove':
      removeItem(id, tableName)

    elif action == 'update':
      update = data['update']
      old = getItem(id, tableName)
      _.assign(old, update)
      putItem(old, tableName)

    elif action == 'get':
      if not id is None:
        return getItem(id, tableName)
      else:
        query = None
        if not _.get(data, 'key') is None:
          query = data
        return scan(tableName, query)

    return action
Beispiel #23
0
    def create_balance_object(self, balance_item):
        """
        Creates a new balance object containing only the relevant values and the BTC value of the coin's balance

        :param balance_item: The Bittrex user balance object for a coin
        :type balance_item: dict
        """
        btc_price = 1
        if balance_item["Currency"] != "BTC":
            coin_pair = "BTC-" + balance_item["Currency"]
            btc_price = self.get_current_price(coin_pair, "bid")

        return py_.assign(
            py_.pick(balance_item, "Currency", "Balance"),
            {"BtcValue": round(btc_price * balance_item["Balance"], 8)})
Beispiel #24
0
async def pulse_by_channel_route(request: Request) -> Response:
  user_session = await route_helpers.get_session_from_request(request)
  if user_session is None:
    return json_response({
      'status': 400,
      'data': [],
      'message': 'No authentication token provided',
      'errors': []
    })

  try:
    mattermost = client.Client(user_session)
  except:
    return json_response({
      'status': 400,
      'data': {},
      'message': 'Invalid credentials',
      'errors': []
    })

  channel_id = request.match_info['channel_id']

  pulse_beat = _.map_(
    pulse.pulse(
      mattermost.get_channel_messages(channel_id)
    ),
    lambda pulse: _.assign(
      pulse,
      {'time': pulse.get('time').isoformat()}
    )
  )

  if pulse_beat is None:
    return json_response({
      'status': 400,
      'data': [],
      'message': 'No pulse obtained',
      'errors': []
    })

  return json_response({
    'status': 200,
    'data': pulse_beat,
    'message': 'Success',
    'errors': []
  })
Beispiel #25
0
    def getLegacyToken(self, username='', password=''):
        url = f'{self.server}/api/v1/auth/login'
        body = f'username={username}&password={password}'
        res = self._request(
            'post',
            url,
            data=body,
            headers={'Content-Type': 'application/x-www-form-urlencoded'})
        r = res.json()
        url1 = f'{self.server}/api/v1/admin/token'
        res1 = self._request('post',
                             url1,
                             headers={'X-Auth-Token': r['authToken'] or ''})

        r1 = res1.text
        f = _.assign(r, {'apiToken': r1})
        self.token = f
Beispiel #26
0
 def patchHeader(self, header={}):
     user_agent_header = '{name} Python {major_lang_version}.{minor_lang_version} {platform}'.format(
         name='ringcentral/engage-voice',
         major_lang_version=sys.version_info[0],
         minor_lang_version=sys.version_info[1],
         platform=platform.platform(),
     )
     shareHeaders = {
         'User-Agent': user_agent_header,
         'RC-User-Agent': user_agent_header,
         'X-User-Agent': user_agent_header,
     }
     authHeader = {}
     if self.isLegacy:
         authHeader = self._legacyHeader()
     else:
         authHeader = {'Authorization': self._autorization_header()}
     return _.assign(shareHeaders, authHeader, header or {})
Beispiel #27
0
 def _check(self, batch_num=0):
     if self.dont_smooth:
         smooth = 0.0
         val_results = self.metrics_at_k(self.val_ranking_dataset, smooth)
     else:
         smooth, val_results = self._find_best_smooth()
     train_results = self.metrics_at_k(self.train_ranking_dataset, smooth)
     test_results = self.metrics_at_k(self.test_ranking_dataset, smooth)
     test_results_no_smooth = self.metrics_at_k(self.test_ranking_dataset,
                                                0.0)
     self.experiment.record_metrics(
         _.assign({},
                  _.map_keys(train_results,
                             lambda val, key: 'train_' + key),
                  _.map_keys(test_results, lambda val, key: 'test_' + key),
                  _.map_keys(test_results_no_smooth,
                             lambda val, key: 'test_no_smooth_' + key),
                  _.map_keys(val_results, lambda val, key: 'val_' + key)),
         batch_num)
Beispiel #28
0
def get_buckets_from_aggregation(aggregations, mapping, agg_keys, level):
    if not aggregations or not mapping or not agg_keys:
        return []
    bucket_key = _.get(mapping, agg_keys[level] + '.bucket_key', 'key')
    if isinstance(bucket_key, str):
        bucket_key = [bucket_key]
    result = _.get(aggregations, agg_keys[level] + '.buckets')
    if level >= len(agg_keys) - 1:
        r = []
        if result:
            for item in result:
                a = {}
                a['count'] = item['doc_count']
                for bk in bucket_key:
                    key_get = 'key'
                    key_set = 'key'
                    if isinstance(bk, str):
                        key_get = bk
                        key_set = agg_keys[level]
                    elif isinstance(bk, dict):
                        key_get = _.get(bk, 'get')
                        key_set = _.get(bk, 'set')
                    _.set_(a, key_set, _.get(item, key_get))
                r.append(a)
        return r
    else:
        _list = []
        for item in result:
            for child in get_buckets_from_aggregation(item, mapping, agg_keys, level + 1):
                for bk in bucket_key:
                    key_get = 'key'
                    key_set = 'key'
                    if isinstance(bk, str):
                        key_get = bk
                        key_set = agg_keys[level]
                    elif isinstance(bk, dict):
                        key_get = _.get(bk, 'get')
                        key_set = _.get(bk, 'set')
                    child = _.assign(child, {key_set: item.get(key_get)})
                _list.append(child)
        return _list
Beispiel #29
0
    async def bulldozer_moving(self,
                               context,
                               destination_pose,
                               semantic_location_id=None):
        UPDATE_INTERVAL = 500
        worker = context.blackboard.get_worker()
        worker_location = pydash.get(worker, 'type_specific.location')

        path = [worker_location['pose2d'], destination_pose]
        trajectory = self.path_planner.path_to_trajectory(
            path, 1, UPDATE_INTERVAL)

        print('start to bulldozerMoving robot on path')

        for point in trajectory:
            updated_type_specific = worker['type_specific']
            if 'theta' in point is None:
                point['theta'] = pydash(worker,
                                        'type_specific.location.pose2d.theta')

            updated_type_specific['location'] = pydash.assign(
                {}, updated_type_specific['location'], {
                    'map': worker_location['map'],
                    'pose2d': point,
                    'semantic_location': None
                })

            context.blackboard.set_worker(
                {'type_specific': updated_type_specific})
            await context.blackboard.sync_worker()
            await asyncio.sleep(0.1)

        updated_type_specific = context.blackboard.get_worker(
        )['type_specific']
        pydash.set_(updated_type_specific, 'location.semantic_location',
                    semantic_location_id)
        context.blackboard.set_worker({'type_specific': updated_type_specific})
        await context.blackboard.sync_worker()
        return True
    async def on_perform(self, context, args):
        station_id = pydash.find(args, {'key': 'station'})['value']
        station = await context.api_configuration.get_stations(station_id)

        if station is None:
            print('failed to get station')

        worker = context.blackboard.get_worker()
        worker_location = pydash.get(worker, 'type_specific.location')
        path_planner = PathPlanner(context)
        await path_planner.init_map()

        path = path_planner.get_path(worker_location['map'], worker_location['pose2d'], station['pose'])
        trajectory = path_planner.path_to_trajectory(path, 1, 1000)

        print('start to moving robot on path')

        for point in trajectory:
            worker = context.blackboard.get_worker()
            updated_type_specific = worker['type_specific']
            if 'theta' in point:
                pass
            else:
                point['theta'] = pydash.get(worker, 'type_specific.location.pose2d.theta')

            updated_type_specific['location'] = pydash.assign({}, updated_type_specific['location'], {
                'map': worker_location['map'],
                'pose2d': point
            })

            context.blackboard.set_worker({'type_specific': updated_type_specific})
            await context.blackboard.sync_worker()
            await asyncio.sleep(1)

        updated_type_specific['location']['pose2d']['theta'] = station['pose']['theta']
        context.blackboard.set_worker({'type_specific': updated_type_specific})
        await context.blackboard.sync_worker()
        await asyncio.sleep(1)
        return True
Beispiel #31
0
def test_assign(case, expected):
    assert _.assign(*case) == expected