Esempio n. 1
0
    def _redis_delete_old_and_set_new(
        self,
        labels: Mapping[SiteId, _Labels],
        label_type: str,
        pipeline: Pipeline,
    ) -> None:

        sites_list: List[SiteId] = []
        for site_id, label in labels.items():
            if site_id not in self._sites_to_update:
                continue

            if not label:
                continue

            label_key = "%s:%s:%s" % (self._namespace, site_id, label_type)
            pipeline.delete(label_key)
            # NOTE: Mapping is invariant in its key because of __getitem__, so for mypy's sake we
            # make a copy below. This doesn't matter from a performance view, hset is iterating over
            # the dict anyway, and after that there is some serious I/O going on.
            # NOTE: pylint is too dumb to see the need for the comprehension.
            # pylint: disable=unnecessary-comprehension
            pipeline.hset(label_key, mapping={k: v for k, v in label.items()})

            if site_id not in sites_list:
                sites_list.append(site_id)

        for site_id in sites_list:
            self._redis_set_last_program_start(site_id, pipeline)
Esempio n. 2
0
 def _populate(
         self,  # type: ignore
         pipeline: Pipeline,
         arg: InitArg = tuple(),
         *,
         sign: int = +1,
         **kwargs: int,
 ) -> None:
     to_set = {}
     try:
         for key, value in cast(Counter[JSONTypes], arg).items():
             to_set[key] = sign * value
     except AttributeError:
         for key in arg:
             to_set[key] = to_set.get(key, self[key]) + sign
     for key, value in kwargs.items():
         original = self[key] if to_set.get(key, 0) == 0 else to_set[key]
         to_set[key] = original + sign * value
     to_set = {key: self[key] + value for key, value in to_set.items()}
     encoded_to_set = {
         self._encode(k): self._encode(v)
         for k, v in to_set.items()
     }
     if encoded_to_set:
         pipeline.multi()
         pipeline.hset(self.key, mapping=encoded_to_set)  # type: ignore
Esempio n. 3
0
def index_document_pipe( pipe: Pipeline, cfg: CollectionConfig, doc: Doc ):
    """Push a document into the index"""
    # doc_id = doc[ col.id_fld ]
    doc_id = x_id(doc, cfg.id_fld)

    pipe.hset( f'{cfg.name}/docs', doc_id, json.dumps(doc) )

    for fld in cfg.text_flds:
        if fld in doc:
            text = doc[fld]
            index_text( pipe, cfg, doc_id, text)

    for fld in cfg.facet_flds:
        if fld not in doc:
            continue

        for val in as_list( doc, fld ):
            assert is_scalar(val), f"Found non scalar value ({val}) in field '{fld}' of " \
                                   f"document with id {doc_id}"

            index_facet( pipe, cfg.name, doc_id, fld, val )

    for fld in cfg.number_flds:
        if fld not in doc:
            continue

        for val in as_list(doc, fld):
            if val is None:
                continue
            assert is_number(val), f"Found non numeric value ({val}) in field '{fld}' of " \
                                   f"document with id {doc_id}"

            index_numeric(pipe, cfg.name, doc_id, fld, val)
Esempio n. 4
0
def add_to_indexed_offers(pipeline: Pipeline, offer_id: int,
                          offer_details: dict) -> None:
    try:
        offer_details_as_string = json.dumps(offer_details)
        pipeline.hset(RedisBucket.REDIS_HASHMAP_INDEXED_OFFERS_NAME.value,
                      offer_id, offer_details_as_string)
    except redis.exceptions.RedisError as error:
        logger.exception("[REDIS] %s", error)
Esempio n. 5
0
 def _populate(
         self,
         pipeline: Pipeline,
         arg: InitArg = tuple(),
         **kwargs: JSONTypes,
 ) -> None:
     to_set = {}
     with contextlib.suppress(AttributeError):
         arg = cast(InitMap, arg).items()
     for key, value in itertools.chain(cast(InitIter, arg), kwargs.items()):
         to_set[self._encode(key)] = self._encode(value)
     if to_set:
         pipeline.multi()
         pipeline.hset(self.key, mapping=to_set)  # type: ignore
Esempio n. 6
0
        def _store(p: Pipeline):
            # The mass of a vehicle includes any objects it carries, so
            # we don't need to check the mass of individual objects in
            # a container.
            if obj.mass > self.capacity_mass:
                raise NoCapacityError

            item_key = keys.deck_item(self.name, obj.name)
            deck_mass_key = keys.deck_stored_mass(self.name)
            schema = object_schemas_by_type.get(obj.type)
            objects = {}

            if hasattr(obj, 'objects'):
                # This is a container, so we need to be persist its objects.
                objects = obj.objects

            p.multi()

            object_dict = schema.dump(obj)
            # Redis can't store lists in a hash, so we persist objects
            # within a container object separately.
            object_dict.pop('objects', None)

            # Persist objects in a container in their own hashes -- and
            # link them to the container using a sorted set.
            for contained_obj in objects.values():
                item_schema = object_schemas_by_type[contained_obj.type]
                container_key = keys.container_items_set(obj.name)
                container_item_key = keys.container_item(
                    obj.name, contained_obj.name)
                p.zadd(container_key, {contained_obj.name: contained_obj.mass})
                p.hset(container_item_key,
                       mapping=item_schema.dump(contained_obj))

            p.zadd(deck_items_key, {obj.name: obj.mass})
            p.hset(item_key, mapping=object_dict)
            p.incrby(deck_mass_key, obj.mass)
Esempio n. 7
0
 def _redis_set_last_program_start(self, site_id: SiteId,
                                   pipeline: Pipeline) -> None:
     program_start = self._livestatus_get_last_program_start(site_id)
     pipeline.hset(self._program_starts, key=site_id, value=program_start)