Beispiel #1
0
def cache_last_stolen(team_id: int, current_round: int,
                      pipe: Pipeline) -> None:
    """
    Caches stolen flags from "flag_lifetime" rounds.

    Just adds commands to pipeline stack, don't forget to execute afterwards.

    :param team_id: attacker team id
    :param current_round: current round
    :param pipe: redis connection to add command to
    """
    game_config = game.get_current_game_config()

    with utils.db_cursor() as (_, curs):
        curs.execute(
            _SELECT_LAST_STOLEN_TEAM_FLAGS_QUERY,
            {
                'round': current_round - game_config.flag_lifetime,
                'attacker_id': team_id,
            },
        )
        flags = curs.fetchall()

    key = CacheKeys.team_stolen_flags(team_id)
    pipe.delete(key)
    if flags:
        pipe.sadd(key, *(flag[0] for flag in flags))
Beispiel #2
0
def test_rpc_ratelimit(getint: mock.MagicMock, client: TestClient,
                       pipeline: Pipeline, packages: List[Package]):
    params = {"v": 5, "type": "suggest-pkgbase", "arg": "big"}

    for i in range(4):
        # The first 4 requests should be good.
        with client as request:
            response = request.get("/rpc", params=params)
        assert response.status_code == int(HTTPStatus.OK)

    # The fifth request should be banned.
    with client as request:
        response = request.get("/rpc", params=params)
    assert response.status_code == int(HTTPStatus.TOO_MANY_REQUESTS)

    # Delete the cached records.
    pipeline.delete("ratelimit-ws:testclient")
    pipeline.delete("ratelimit:testclient")
    one, two = pipeline.execute()
    assert one and two

    # The new first request should be good.
    with client as request:
        response = request.get("/rpc", params=params)
    assert response.status_code == int(HTTPStatus.OK)
Beispiel #3
0
 def _populate(
         self,  # type: ignore
         pipeline: Pipeline,
         arg: InitArg = tuple(),
         *,
         sign: int = +1,
         **kwargs: int,
 ) -> None:
     to_set = {}
     try:
         for key, value in cast(Counter[JSONTypes], arg).items():
             to_set[key] = sign * value
     except AttributeError:
         for key in arg:
             to_set[key] = to_set.get(key, self[key]) + sign
     for key, value in kwargs.items():
         original = self[key] if to_set.get(key, 0) == 0 else to_set[key]
         to_set[key] = original + sign * value
     to_set = {key: self[key] + value for key, value in to_set.items()}
     encoded_to_set = {
         self._encode(k): self._encode(v)
         for k, v in to_set.items()
     }
     if encoded_to_set:
         pipeline.multi()
         pipeline.hset(self.key, mapping=encoded_to_set)  # type: ignore
Beispiel #4
0
def test_ratelimit_redis(get: mock.MagicMock, getboolean: mock.MagicMock,
                         getint: mock.MagicMock, pipeline: Pipeline):
    """ This test will only cover aurweb.ratelimit's Redis
    path if a real Redis server is configured. Otherwise,
    it'll use the database. """

    # We'll need a Request for everything here.
    request = Request()

    # Run check_ratelimit for our request_limit. These should succeed.
    for i in range(4):
        assert not check_ratelimit(request)

    # This check_ratelimit should fail, being the 4001th request.
    assert check_ratelimit(request)

    # Delete the Redis keys.
    host = request.client.host
    pipeline.delete(f"ratelimit-ws:{host}")
    pipeline.delete(f"ratelimit:{host}")
    one, two = pipeline.execute()
    assert one and two

    # Should be good to go again!
    assert not check_ratelimit(request)
Beispiel #5
0
def index_document_pipe( pipe: Pipeline, cfg: CollectionConfig, doc: Doc ):
    """Push a document into the index"""
    # doc_id = doc[ col.id_fld ]
    doc_id = x_id(doc, cfg.id_fld)

    pipe.hset( f'{cfg.name}/docs', doc_id, json.dumps(doc) )

    for fld in cfg.text_flds:
        if fld in doc:
            text = doc[fld]
            index_text( pipe, cfg, doc_id, text)

    for fld in cfg.facet_flds:
        if fld not in doc:
            continue

        for val in as_list( doc, fld ):
            assert is_scalar(val), f"Found non scalar value ({val}) in field '{fld}' of " \
                                   f"document with id {doc_id}"

            index_facet( pipe, cfg.name, doc_id, fld, val )

    for fld in cfg.number_flds:
        if fld not in doc:
            continue

        for val in as_list(doc, fld):
            if val is None:
                continue
            assert is_number(val), f"Found non numeric value ({val}) in field '{fld}' of " \
                                   f"document with id {doc_id}"

            index_numeric(pipe, cfg.name, doc_id, fld, val)
Beispiel #6
0
    def _redis_delete_old_and_set_new(
        self,
        labels: Mapping[SiteId, _Labels],
        label_type: str,
        pipeline: Pipeline,
    ) -> None:

        sites_list: List[SiteId] = []
        for site_id, label in labels.items():
            if site_id not in self._sites_to_update:
                continue

            if not label:
                continue

            label_key = "%s:%s:%s" % (self._namespace, site_id, label_type)
            pipeline.delete(label_key)
            # NOTE: Mapping is invariant in its key because of __getitem__, so for mypy's sake we
            # make a copy below. This doesn't matter from a performance view, hset is iterating over
            # the dict anyway, and after that there is some serious I/O going on.
            # NOTE: pylint is too dumb to see the need for the comprehension.
            # pylint: disable=unnecessary-comprehension
            pipeline.hset(label_key, mapping={k: v for k, v in label.items()})

            if site_id not in sites_list:
                sites_list.append(site_id)

        for site_id in sites_list:
            self._redis_set_last_program_start(site_id, pipeline)
    def _set_client(self, client: Client, pipe: Pipeline) -> None:
        if ASSERTS:
            assert '!' not in client.name

        pipe.setex(f'#{client.id}', client.serialize(),
                   self.default_address_ttl)
        pipe.setex(f'!{client.name}', client.id, self.default_address_ttl)
Beispiel #8
0
def add_to_indexed_offers(pipeline: Pipeline, offer_id: int,
                          offer_details: dict) -> None:
    try:
        offer_details_as_string = json.dumps(offer_details)
        pipeline.hset(RedisBucket.REDIS_HASHMAP_INDEXED_OFFERS_NAME.value,
                      offer_id, offer_details_as_string)
    except redis.exceptions.RedisError as error:
        logger.exception("[REDIS] %s", error)
Beispiel #9
0
 def _populate(
         self,
         pipeline: Pipeline,
         iterable: Iterable[JSONTypes] = tuple(),
 ) -> None:
     encoded_values = {self._encode(value) for value in iterable}
     if encoded_values:  # pragma: no cover
         pipeline.multi()
         pipeline.sadd(self.key, *encoded_values)
Beispiel #10
0
def index_text( pipe: Pipeline, cfg: CollectionConfig,  doc_id: str, text: str):
    """Index text from text field"""
    tokens = tokenize(text, cfg.transl_tbl, cfg.stop_words)

    if len(tokens) == 0:
        return

    pipe.sadd(f'{cfg.name}/text_tokens', *tokens)

    for tok in tokens:
        index_pats(pipe, cfg, tok)
        pipe.sadd( key_token( cfg.name, tok), doc_id)
Beispiel #11
0
        def tx_fn(pipeline: Pipeline) -> Response:
            """Code to be executed within a Redis transaction."""
            rstates: List[Optional[bytes]] = pipeline.mget(rkeys)

            t_s: int
            t_us: int
            t_s, t_us = pipeline.time()
            t1 = t_s + t_us / 1000000

            delay: float = 0
            states: List[State] = []
            for limit, rstate in zip(limits, rstates):
                t0, v0 = self._codec.decode(rstate) or (t1, 0)
                v1 = max(v0 - (t1 - t0) * limit.zone.rate, 0) + 1
                c = limit.burst + 1 - v1
                if c < -limit.delay:
                    pipeline.unwatch()
                    return Response(False, None)
                if c < 0:
                    delay = max(delay, -c/limit.zone.rate)
                states.append(State(t1, v1))

            pipeline.multi()
            for limit, rkey, state in zip(limits, rkeys, states):
                pipeline.setex(rkey, limit.zone.expiry,
                               self._codec.encode(state))

            return Response(True, delay)
Beispiel #12
0
 def _populate(
         self,
         pipeline: Pipeline,
         arg: InitArg = tuple(),
         **kwargs: JSONTypes,
 ) -> None:
     to_set = {}
     with contextlib.suppress(AttributeError):
         arg = cast(InitMap, arg).items()
     for key, value in itertools.chain(cast(InitIter, arg), kwargs.items()):
         to_set[self._encode(key)] = self._encode(value)
     if to_set:
         pipeline.multi()
         pipeline.hset(self.key, mapping=to_set)  # type: ignore
Beispiel #13
0
def cache_tasks(pipe: Pipeline) -> None:
    """
    Put active tasks table data from database to cache.

    Just adds commands to pipeline stack don't forget to execute afterwards.
    """
    with utils.db_cursor(dict_cursor=True) as (_, curs):
        curs.execute(models.Task.get_select_active_query())
        tasks = curs.fetchall()

    tasks = list(models.Task.from_dict(task) for task in tasks)
    key = CacheKeys.tasks()
    pipe.delete(key)
    if tasks:
        pipe.sadd(key, *(task.to_json() for task in tasks))
 def pipeline(self, transaction=True, shard_hint=None):
     """
     Return a new pipeline object that can queue multiple commands for
     later execution. ``transaction`` indicates whether all commands
     should be executed atomically. Apart from making a group of operations
     atomic, pipelines are useful for reducing the back-and-forth overhead
     between the client and server.
     Overridden in order to provide the right client through the pipeline.
     """
     p = Pipeline(connection_pool=self.redis.connection_pool,
                  response_callbacks=self.redis.response_callbacks,
                  transaction=transaction,
                  shard_hint=shard_hint)
     p.redis = p
     return p
Beispiel #15
0
def cache_last_flags(current_round: int, pipe: Pipeline) -> None:
    """
    Cache all generated flags from last "flag_lifetime" rounds.

    Just adds commands to pipeline stack, don't forget to execute afterwards.

    :param current_round: current round
    :param pipe: redis connection to add command to
    """
    game_config = game.get_current_game_config()
    expires = game_config.flag_lifetime * game_config.round_time * 2

    with utils.db_cursor(dict_cursor=True) as (_, curs):
        curs.execute(
            _SELECT_ALL_LAST_FLAGS_QUERY,
            {'round': current_round - game_config.flag_lifetime},
        )
        flags = curs.fetchall()

    flag_models = list(models.Flag.from_dict(data) for data in flags)

    pipe.set(CacheKeys.flags_cached(), 1)
    for flag in flag_models:
        pipe.set(CacheKeys.flag_by_id(flag.id), flag.to_json(), ex=expires)
        pipe.set(CacheKeys.flag_by_str(flag.flag), flag.to_json(), ex=expires)
def _process_adding(pipeline: Pipeline, client: Redis, offer_ids: List[int],
                    adding_objects: List[dict]) -> None:
    try:
        add_objects(objects=adding_objects)
        logger.info("[ALGOLIA] %i objects were indexed!", len(adding_objects))
        pipeline.execute()
        pipeline.reset()
    except AlgoliaException as error:
        logger.exception("[ALGOLIA] error when adding objects %s", error)
        add_offer_ids_in_error(client=client, offer_ids=offer_ids)
        pipeline.reset()
Beispiel #17
0
 def __delete(self, pipeline: Pipeline, index: Union[slice, int]) -> None:
     # This is monumentally stupid.  Python's list API requires us to delete
     # an element by *index.*  Of course, Redis doesn't support that,
     # because it's Redis.  Instead, Redis supports deleting an element by
     # *value.*  So our ridiculous hack is to set l[index] to 0, then to
     # delete the value 0.
     #
     # More info:
     #   http://redis.io/commands/lrem
     indices, num = self.__slice_to_indices(index), 0
     pipeline.multi()
     for index in indices:
         pipeline.lset(self.key, index, 0)
         num += 1
     if num:  # pragma: no cover
         pipeline.lrem(self.key, num, 0)
 def _get_next_session_id_via_pipeline(self, pipeline: Pipeline) -> int:
     # Do this in a loop to account for (unlikely) possibility that someone manually used a key out of order
     session_id = None
     while session_id is None:
         # Get a session id, base on stored (or initialized) value at _next_session_id_key, then bump said value
         # Remember, Redis persists strings (though it can implicitly convert from int to string on its side)
         session_id_str: Optional[str] = pipeline.get(self._next_session_id_key)
         if session_id_str is None:
             session_id = self.get_initial_session_id_value()
             pipeline.set(self._next_session_id_key, session_id + 1)
         else:
             session_id = int(session_id_str)
             pipeline.incr(self._next_session_id_key, 1)
         # However, if the key is already in use (via manual selection), we have to try again
         if pipeline.hlen(self.get_key_for_session_by_id(session_id)) != 0:
             session_id = None
     return session_id
Beispiel #19
0
def cache_teams(pipe: Pipeline) -> None:
    """
    Put "teams" table data from database to cache.

    Just adds commands to pipeline stack, don't forget to execute afterwards.
    """
    with utils.db_cursor(dict_cursor=True) as (_, curs):
        curs.execute(models.Team.get_select_active_query())
        teams = curs.fetchall()

    teams = list(models.Team.from_dict(team) for team in teams)

    key = CacheKeys.teams()
    pipe.delete(key)
    if teams:
        pipe.sadd(key, *[team.to_json() for team in teams])
    for team in teams:
        pipe.set(CacheKeys.team_by_token(team.token), team.id)
Beispiel #20
0
        def _store(p: Pipeline):
            # The mass of a vehicle includes any objects it carries, so
            # we don't need to check the mass of individual objects in
            # a container.
            if obj.mass > self.capacity_mass:
                raise NoCapacityError
            p.multi()

            object_dict = schema.dump(obj)

            if hasattr(obj, 'objects'):
                object_dict['objects'] = {}
                for name, contained_obj in obj.objects.items():
                    item_schema = object_schemas_by_type[contained_obj.type]
                    object_dict['objects'][name] = item_schema.dump(
                        contained_obj)

            p.jsonset(deck_key, f'.objects.{obj.name}', object_dict)
            p.jsonnumincrby(deck_key, '.mass', obj.mass)
 def _set_address(self, client: Client, ipv4_address: str, port: int,
                  pipe: Pipeline) -> None:
     pipe.setex(f'{client.id}:{ipv4_address}', port,
                self.default_address_ttl)
Beispiel #22
0
 def _redis_set_last_program_start(self, site_id: SiteId,
                                   pipeline: Pipeline) -> None:
     program_start = self._livestatus_get_last_program_start(site_id)
     pipeline.hset(self._program_starts, key=site_id, value=program_start)
Beispiel #23
0
def index_pats( pipe: Pipeline, cfg: CollectionConfig, tok: str ):
    """indexing starting and ending patterns"""

    if len(tok) >= 2:
        pipe.sadd(f'{cfg.name}/s_pat/{tok[0]}{tok[1]}', tok)
        pipe.sadd(f'{cfg.name}/e_pat/{tok[-2]}{tok[-1]}', tok)

    if len(tok) >= 3:
        pipe.sadd(f'{cfg.name}/s_pat/{tok[0]}?{tok[2]}', tok)
        pipe.sadd(f'{cfg.name}/s_pat/?{tok[1]}{tok[2]}', tok)

        pipe.sadd(f'{cfg.name}/e_pat/{tok[-2]}?{tok[-1]}', tok)
        pipe.sadd(f'{cfg.name}/e_pat/{tok[-3]}{tok[-2]}?', tok)

    if len(tok) >= 4:
        pipe.sadd(f'{cfg.name}/s_pat/{tok[0]}??{tok[3]}', tok)
        pipe.sadd(f'{cfg.name}/s_pat/?{tok[1]}?{tok[3]}', tok)
        pipe.sadd(f'{cfg.name}/e_pat/{tok[-4]}??{tok[-1]}', tok)
        pipe.sadd(f'{cfg.name}/e_pat/{tok[-4]}?{tok[-2]}?', tok)
Beispiel #24
0
        def _store(p: Pipeline):
            # The mass of a vehicle includes any objects it carries, so
            # we don't need to check the mass of individual objects in
            # a container.
            if obj.mass > self.capacity_mass:
                raise NoCapacityError

            item_key = keys.deck_item(self.name, obj.name)
            deck_mass_key = keys.deck_stored_mass(self.name)
            schema = object_schemas_by_type.get(obj.type)
            objects = {}

            if hasattr(obj, 'objects'):
                # This is a container, so we need to be persist its objects.
                objects = obj.objects

            p.multi()

            object_dict = schema.dump(obj)
            # Redis can't store lists in a hash, so we persist objects
            # within a container object separately.
            object_dict.pop('objects', None)

            # Persist objects in a container in their own hashes -- and
            # link them to the container using a sorted set.
            for contained_obj in objects.values():
                item_schema = object_schemas_by_type[contained_obj.type]
                container_key = keys.container_items_set(obj.name)
                container_item_key = keys.container_item(
                    obj.name, contained_obj.name)
                p.zadd(container_key, {contained_obj.name: contained_obj.mass})
                p.hset(container_item_key,
                       mapping=item_schema.dump(contained_obj))

            p.zadd(deck_items_key, {obj.name: obj.mass})
            p.hset(item_key, mapping=object_dict)
            p.incrby(deck_mass_key, obj.mass)
def redis_set_query_pipe(pipe: Pipeline, name: str, value: str, expire: int, db: int) -> List:
    try:
        pipe.execute_command('SELECT', db)
        pipe.set(name=name, value=value, ex=expire)
    except WatchError:
        logger.error(exc_info=True)
Beispiel #26
0
 def pipeline(self, transaction=True, shard_hint=None, origin=False):
     if origin:
         return _Redis.pipeline(self, transaction, shard_hint)
     else:
         return Pipeline(self.connection_pool, self.response_callbacks,
                         transaction, shard_hint)
Beispiel #27
0
def cache_game_config(pipe: Pipeline) -> None:
    """Put game config to cache (without round or game_running)."""
    game_config = game.get_db_game_config()
    data = game_config.to_json()
    pipe.set(CacheKeys.game_config(), data)
 def pipeline(self, conn):
     return Pipeline(connection_pool=FakePool(conn),
                     response_callbacks={},
                     transaction=False,
                     shard_hint=None)