Beispiel #1
0
    def from_data(cls, data):
        self = cls.__new__(cls)

        self._board = [list(map(int, row)) for row in sliced(data['board'], 9)]
        self._clues = {(int(x), int(y)) for x, y in sliced(data['clues'], 2)}
        self.new = False
        self.dirty = False
        self._clue_markers = DEFAULT_CLUE_EMOJIS

        return self
Beispiel #2
0
    def from_data(cls, data):
        # We are bypassing __init__ here since it doesn't apply here.
        self = cls.__new__(cls)

        self._board = [list(map(int, row)) for row in sliced(data['board'], 9)]
        self._clues = {(int(x), int(y)) for x, y in sliced(data['clues'], 2)}
        self.new = False
        self.dirty = False  # We don't need to save a game we just loaded.
        self._clue_markers = DEFAULT_CLUE_EMOJIS  # Needed to specially mark the clues.

        return self
Beispiel #3
0
    async def start(self):
        self.total_length = await self.fetch_total_length()

        self.aiofile = await aiofiles.open(self.local_path, "wb", buffering=0)
        try:
            await self.aiofile.truncate(self.total_length)
            ranges = list(
                more_itertools.sliced(range(self.total_length),
                                      self.chunk_size))
            self.parts = [(r, asyncio.ensure_future(self.download_range(r)))
                          for r in ranges]
            log.debug(
                "Started download of %s, expecting %s bytes split into %s parts",
                self.local_path, self.total_length, len(self.parts))
        except:
            self.aiofile.close()
            raise

        async def await_completed():
            try:
                completed_ranges = await asyncio.gather(
                    *(f for r, f in self.parts))
                log.debug(
                    "Finished download of %s, expecting %s bytes split into %s parts",
                    self.local_path, self.total_length, len(self.parts))
                return completed_ranges
            finally:
                await self.aiofile.close()

        self.completed = asyncio.ensure_future(await_completed())
Beispiel #4
0
def main():
    args = sys.argv[1:]
    if len(args) % 2:
        sys.exit("Expected an even number of arguments")

    with jsonstreams.Stream(jsonstreams.Type.object,
                            filename="/dev/stdout") as s:
        for k, v in more_itertools.sliced(args, 2):
            if k.endswith("@!") or k.endswith("!@"):
                # This currently isn't memory-efficient at all.  :(
                if v == '-':
                    s.write(k[0:-2], json.load(sys.stdin))
                else:
                    with open(v, "r") as f:
                        with flocked(f):
                            s.write(k[0:-2], json.load(f))
            elif k.endswith("@"):
                # Waiting on https://github.com/dcbaker/jsonstreams/issues/30
                # for a way to do this in bounded memory.
                if v == '-':
                    s.write(k[0:-1], sys.stdin.read())
                else:
                    with open(v, "r") as f:
                        with flocked(f):
                            s.write(k[0:-1], f.read())
            elif k.endswith("!"):
                s.write(k[0:-1], json.loads(v))
            else:
                s.write(k, v)
Beispiel #5
0
    async def create(cls, ctx):
        def sort_key(c):
            return command_category(c), c.qualified_name

        entries = (cmd for cmd in sorted(ctx.bot.commands, key=sort_key)
                   if not cmd.hidden)

        nested_pages = []
        per_page = 30

        # (cog, description, first 10 commands)
        # (cog, description, next 10 commands)
        # ...
        for parent, cmds in itertools.groupby(entries, key=command_category):
            command, cmds = spy(cmds)
            command = next(iter(command))  # spy returns (list, iterator)

            # We can't rely on the package being in bot.extensions, because
            # maybe they wanted to only import one or a few extensions instead
            # of the whole folder.
            pkg_name = command.module.rpartition('.')[0]
            module = sys.modules[pkg_name]
            description = inspect.getdoc(module) or 'No description... yet.'

            lines = [pair async for pair in _command_formatters(cmds, ctx)]
            nested_pages.extend((parent.title(), description, page)
                                for page in sliced(lines, per_page))

        self = cls(ctx, nested_pages,
                   per_page=1)  # needed to break the slicing in __getitem__
        return self
Beispiel #6
0
    def move(self, move):
        """Takes a move and apply it to the game."""

        if move not in self.legal_moves():
            raise ValueError(f'illegal move: {move!r}')

        board = self._board
        squares = [_xy_to_i(xy) for xy in sliced(move, 2)]
        end = squares[-1]

        piece = board[squares[0]]
        if end >> 3 == 7 * (not self.turn) and not _is_king(piece):
            # New king
            piece = piece.upper()

        for before, after in pairwise(squares):
            difference = abs(before - after)
            if difference not in {18, 14}:
                continue

            # A two step rather than a one step means a capture.
            square_between = min(before, after) + difference // 2
            board[square_between] = ' '

        board[squares[0]] = ' '
        board[end] = piece
        self._last_move = move
        self._half_moves += 1
        self.turn = not self.turn
Beispiel #7
0
    async def send_members(self,
                           ctx: commands.Context,
                           msg_list: list,
                           title: str = None) -> None:
        """send list of an object's members, slicing it if needed"""

        msg_list = sorted(msg_list, key=lambda x: x.lower())
        if title:
            msg_list.insert(0, title + '\n\n')
        msg = '```\n' + '  '.join(msg_list) + '```'
        if len(msg) < 1990:
            await ctx.send(msg)
        else:
            slices = 2
            msg_slices = None
            while True:
                msg_list_slices = more_itertools.sliced(
                    msg_list,
                    len(msg_list) // slices)
                msg_slices = ['  '.join(x) for x in msg_list_slices]
                if all(len(x) < 1970 for x in msg_slices):
                    break
                slices += 1
            for msg in msg_slices:
                msg = '```\n' + msg + '```'
                await ctx.send(msg)
Beispiel #8
0
    async def create(cls, ctx):
        def key(c):
            return c.cog_name or '\u200bMisc'

        entries = (cmd for cmd in sorted(ctx.bot.commands, key=key)
                   if not cmd.hidden)
        nested_pages = []
        per_page = 10

        # (cog, description, first 10 commands)
        # (cog, description, next 10 commands)
        # ...
        get_cog = ctx.bot.get_cog
        for cog, cmds in itertools.groupby(entries, key=key):
            cog = get_cog(cog)
            if getattr(cog, '__hidden__', False):
                continue

            if cog is None:
                description = 'This is all the misc commands!'
            else:
                description = inspect.getdoc(cog) or 'No description... yet.'

            lines = [
                ' | '.join(line)
                async for line in _command_formatters(cmds, ctx)
            ]
            nested_pages.extend(
                (cog, description, page) for page in sliced(lines, per_page))

        self = cls(
            ctx, nested_pages,
            lines_per_page=1)  # needed to break the slicing in __getitem__
        return self
Beispiel #9
0
 def __split_payload__(payload):
     """
     Encodes the given payload with base64 and splits it into chunks of size
     PAYLOAD_BYTES.
     """
     payload_base64 = base64.b64encode(
         payload.encode('ascii')).decode('utf-8')
     return more_itertools.sliced(payload_base64, PAYLOAD_BYTES)
Beispiel #10
0
    def _test(self, mode, gpu=True):
        assert mode in ['test', 'validation']
        torch.cuda.empty_cache()
        model = self.model if gpu else self.model.cpu()

        batch_size = self.test_batch_size

        model.eval()
        with torch.no_grad():
            dataset = self.data_loader.dataset
            node_features = dataset.node_features
            if mode == 'test':
                # vocab = self.test_vocab
                # node2pos = self.test_node2pos
                node2pos = dataset.test_node2parent
                vocab = list(node2pos.keys())
            else:
                vocab = self.valid_vocab
                node2pos = self.valid_node2pos
            candidate_positions = self.candidate_positions
            batched_model = []  # save the CPU graph representation
            batched_positions = []
            for us_l in tqdm(mit.sliced(candidate_positions, batch_size),
                             desc="Generating graph encoding ..."):

                bgu, bpu, lens = None, None, None
                if 'r' in self.mode:
                    us = torch.tensor(us_l)
                if 'g' in self.mode:
                    bgu = [self.node2subgraph[e] for e in us_l]
                if 'p' in self.mode:
                    bpu, lens = dataset._get_batch_edge_node_path(us_l)
                    bpu = bpu
                    lens = lens
                ur = self.model.forward_encoders(us, bgu, bpu, lens)
                batched_model.append(ur.detach().cpu())
                batched_positions.append(len(us))

            # start per query prediction
            all_ranks = []
            for i, query in tqdm(enumerate(vocab), desc='testing'):
                batched_energy_scores = []
                nf = node_features[query, :].to(self.device)
                for ur, n_position in zip(batched_model, batched_positions):
                    expanded_nf = nf.expand(n_position, -1)
                    ur = ur.to(self.device)
                    energy_scores = model.match(ur, expanded_nf)
                    batched_energy_scores.append(energy_scores)
                batched_energy_scores = torch.cat(batched_energy_scores)
                batched_energy_scores, labels = rearrange(
                    batched_energy_scores, candidate_positions,
                    node2pos[query])
                all_ranks.extend(self.pre_metric(batched_energy_scores,
                                                 labels))
            total_metrics = [metric(all_ranks) for metric in self.metrics]

        return total_metrics
Beispiel #11
0
    def _build(self, lexer):
        """Build :class:`~ctfile.ctfile.Ctab` instance.

        :return: :class:`~ctfile.ctfile.Ctab` instance.
        :rtype: :class:`~ctfile.ctfile.Ctab`.
        """
        atom_number = 1
        while True:
            token = next(lexer)
            key = token.__class__.__name__

            if key == 'CtabCountsLine':
                self[key].update(token._asdict())

            elif key == 'CtabAtomBlock':
                self[key].append(
                    Atom(atom_number=str(atom_number), **token._asdict()))
                atom_number += 1

            elif key == 'CtabBondBlock':
                first_atom_number, second_atom_number, bond_type, bond_stereo, \
                not_used1, bond_topology, reacting_center_status = token

                first_atom = self.atoms[int(first_atom_number) - 1]
                second_atom = self.atoms[int(second_atom_number) - 1]
                first_atom.neighbors.append(second_atom)
                second_atom.neighbors.append(first_atom)

                bond = Bond(first_atom=first_atom,
                            second_atom=second_atom,
                            bond_type=bond_type,
                            bond_stereo=bond_stereo,
                            not_used1=not_used1,
                            bond_topology=bond_topology,
                            reacting_center_status=reacting_center_status)
                self[key].append(bond)

            elif key == 'CtabPropertiesBlock':
                property_name = token.name
                keys = self.ctab_conf[self.version][property_name]['values']
                ctab_properties = more_itertools.sliced(
                    token.line.split()[3:], len(keys))

                for ctab_property in ctab_properties:
                    atom_number, property_value = ctab_property
                    self.atoms[
                        int(atom_number) -
                        1]._ctab_property_data[property_name] = property_value

            elif key == 'CtabBlockEnd':
                break

            else:
                raise KeyError(
                    'Ctab object does not supposed to have any other information: "{}".'
                    .format(key))
Beispiel #12
0
def genCirc(width, depth):
    circ = [['' for x in range(depth)] for y in range(width)]
    for d in xrange(depth):
        w = 0
        while w < width:
            gate = nextGate(width - w)
            for s in sliced(gate[1], 2):
                circ[w][d] = s
                w += 1
    return circ
Beispiel #13
0
    def updateData(self, data=None):
        if data is None: data = self.parent.parent.data
        lines = []
        currvals = data[self.selector.currentText()]
        for t, d in zip(currvals.index, currvals.values):
            s = hex(d)[2:]
            if len(s) % 2: s = "0" + s
            s = " ".join(sliced(s, 2))
            lines.append(f"{t:.4f}\t{s}")

        self.table.setPlainText("\n".join(lines))
    async def register_events(self, agentproc_id, events):
        """
        RPC method: Used by agent processes to hand over generated events.

        agentproc_id: index of the agent process (starts at 0)
        events: list of events
        """

        assert 0 <= agentproc_id < self.num_agentprocs

        for event_chunk in sliced(events, EVENT_CHUNKSIZE):
            await self.ev_queue_local.put(event_chunk)
        return True
Beispiel #15
0
def writeDatatoBlockchain(text,receiver,amt):
    n_splits = len(text)//350 + 1                                                               #number of splits to be created
    splits = list(sliced(text, 350))                                                            #create a sliced list of strings
    tail = writeUnitToBlockchain(splits[n_splits-1],receiver,amt)       #create a transaction which will act as a tail for the data
    cursor = tail
    if n_splits == 1:
        return cursor                                                                           #if only single transaction was created then tail is the cursor

    #for each string in the list create a transaction with txid of previous string
    for i in range(n_splits-2,-1,-1):
        splits[i] = 'next:'+cursor+" "+splits[i]
        cursor = writeUnitToBlockchain(splits[i],receiver,amt)
    return cursor
Beispiel #16
0
    def generate(prefix, id, ad=None):
        prefix = prefix.translate(Gubbins.__prefix_fixer)
        prefix_seed, prefix_checksum = Gubbins.__prefix_to_seed(prefix, ad)

        assert 0 <= id <= Gubbins.__mask
        id_point = prefix_seed ^ Gubbins.__hash_id(prefix_seed ^ id)
        id_bytes = id_point.to_bytes(length=4, byteorder='big')
        checksum = prefix_checksum ^ Gubbins.__checksum(id_bytes)
        id_bytes += bytes((checksum, ))
        assert prefix_checksum == Gubbins.__checksum(id_bytes)

        id_value = Gubbins.__encode(id_bytes, Gubbins.__alphabet).decode()
        return Gubbins.__separator.join(
            (prefix, *sliced(id_value, 4))).translate(Gubbins.__formatter)
Beispiel #17
0
def read_library_info():
    """Read library info from json file.

    :return: list of all books from json file
    """

    with open("library_.json", "r", encoding="utf-8") as file:
        library = json.load(file)

    for book in library:
        book["img_src"] = f"../{book['img_src']}"
        book["book_path"] = f"../{book['book_path']}"

    return list(sliced(library, 20))
Beispiel #18
0
async def get_or_create(tx, bstore, blob):
    hash = hasher(blob).digest()
    key = found.pack((bstore.prefix_hash, hash))
    maybe_uid = await found.get(tx, key)
    if maybe_uid is not None:
        return UUID(bytes=maybe_uid)
    # Otherwise create the hash entry and store the blob with a new uid
    # TODO: Use a counter and implement a garbage collector, and implement
    # bstore.delete
    uid = uuid4()
    found.set(tx, key, uid.bytes)
    for index, slice in enumerate(sliced(blob, found.MAX_SIZE_VALUE)):
        found.set(tx, found.pack((bstore.prefix_blob, uid, index)),
                  bytes(slice))
    return uid
Beispiel #19
0
def combine_ranges(ranges) -> [(int, int)]:
    """Combines overlapping and adjacent ranges

    example:
    [(1, 2), (3, 4), (6, 7)] to [(1, 4), (6, 7)]
    """
    result = []

    for a, b in sorted(flatten(ranges)):
        if result and a <= result[-1] + 1:
            result[-1] = b
        else:
            result.extend((a, b))

    return list(sliced(result, 2))
Beispiel #20
0
def _command_lines(command_can_run_pairs):
    if len(command_can_run_pairs) % 2:
        command_can_run_pairs = command_can_run_pairs + [('', '')]

    pairs = list(sliced(command_can_run_pairs, COMMAND_COLUMNS))
    widths = [max(len(c[0]) for c in column) for column in zip(*pairs)]

    def format_pair(pair, width):
        command, can_run = pair
        if not command:
            return ''

        formatted = f'`{_padded(command, width)}`'
        return formatted if can_run else f'~~{formatted}~~'

    return (' '.join(map(format_pair, pair, widths)) for pair in pairs)
Beispiel #21
0
def writeDatatoBlockchain(text):
    n_splits = len(text) // 350 + 1  #number of splits to be created
    splits = list(sliced(text, 350))  #create a sliced list of strings
    #TODO pass this receiving address as parameter
    tail = writeUnitToBlockchain(
        splits[n_splits - 1], 'oV9ZoREBSV5gFcZTBEJ7hdbCrDLSb4g96i'
    )  #create a transaction which will act as a tail for the data
    cursor = tail
    if n_splits == 1:
        return cursor  #if only single transaction was created then tail is the cursor

        #for each string in the list create a transaction with txid of previous string
    for i in range(n_splits - 2, -1, -1):
        splits[i] = 'next:' + cursor + " " + splits[i]
        cursor = writeUnitToBlockchain(splits[i],
                                       'oV9ZoREBSV5gFcZTBEJ7hdbCrDLSb4g96i')
    return cursor
Beispiel #22
0
def _command_lines(command_can_run_pairs):
    if len(command_can_run_pairs) % 2:
        # Avoid modifying the list if we can help it
        command_can_run_pairs = command_can_run_pairs + [('', '')]

    pairs = list(sliced(command_can_run_pairs, NUM_COMMAND_COLUMNS))
    widths = [max(len(c[0]) for c in column) for column in zip(*pairs)]

    # XXX: Does not work on iOS clients for some reason -- the
    #      strikethrough doesn't render at all.
    def format_pair(pair, width):
        command, can_run = pair
        if not command:
            return ''

        # Simply doing f'`{command:>width + 1}`' is not enough because
        # we want to cross out only the command text, not the entire
        # block. Doing that requires making two code blocks, one for
        # the command, and one padded with spaces.
        #
        # However Discord loves to be really fucky with codeblocks. If
        # there are two backticks close together, it will make one huge
        # code block with the middle two unescaped. e.g `abc``test` will
        # make one long code block with the string of "abc``test" rather
        # than what we really want.
        #
        # Discord also loves to mess around with spaces, because our code
        # block is technically empty, Discord will just strip out the
        # whitespace, leaving us with an empty code block. Thus we need
        # three zwses -- one between the two code blocks to prevent them
        # from merging, and two at each end of the 2nd code block to prevent
        # the padding spaces from getting stripped.
        #
        # ~~*phew*~~
        formatted = f'`{command}`'
        if not can_run:
            formatted = f'~~{formatted}~~'

        to_pad = width - len(command) + 1
        padding = f'\u200b`\u200b{" " * to_pad}\u200b`' if to_pad > 0 else ''

        return formatted + padding

    return (' '.join(map(format_pair, pair, widths)) for pair in pairs)
Beispiel #23
0
def _command_lines(command_can_run_pairs):
    if len(command_can_run_pairs) % 2:
        # Avoid modifying the list if we can help it
        command_can_run_pairs = command_can_run_pairs + [('', '')]

    pairs = list(sliced(command_can_run_pairs, NUM_COMMAND_COLUMNS))
    widths = [max(len(c[0]) for c in column) for column in zip(*pairs)]

    # XXX: Does not work on iOS clients for some reason -- the
    #      strikethrough doesn't render at all.
    def format_pair(pair, width):
        command, can_run = pair
        if not command:
            return ''

        formatted = f'`{_padded(command, width)}`'
        return formatted if can_run else f'~~{formatted}~~'

    return (' '.join(map(format_pair, pair, widths)) for pair in pairs)
Beispiel #24
0
def repeat(n, final=False):
    if n >= 67:
        raise ValueError(f"repeat command can't be 5 bytes long when n is {n}, use n < 67")

    bfinal = Bits(uint=0 if not final else 1, length=1)
    btype = Bits(uint=1, length=2)  # compressed using fixed Huffman codes

    dist_sym = distances[n]
    dist_code = Bits(uint=dist_sym.code, length=5)
    if dist_sym.nbits:
        dist_code += Bits(uint=n - dist_sym.lenbase, length=dist_sym.nbits)[::-1]

    x = n // 2
    y = n - x

    x_sym = lengths[x]
    x_len_code = Bits(uint=x_sym.code - 256, length=7)
    if x_sym.nbits:
        x_len_code = x_len_code + Bits(uint=x - x_sym.lenbase, length=x_sym.nbits)[::-1]

    y_sym = lengths[y]
    y_len_code = Bits(uint=y_sym.code - 256, length=7)
    if y_sym.nbits:
        y_len_code = y_len_code + Bits(uint=y - y_sym.lenbase, length=y_sym.nbits)[::-1]

    b = concat_bits(
        bfinal,
        btype[::-1],
        x_len_code,
        dist_code,
        y_len_code,
        dist_code,
    )

    bits_left = 5 * 8 - len(b)
    if bits_left > 0:
        pad = Bits([0 for _ in range(bits_left)])
        b.append(pad)

    b[:] = sum([byte[::-1] for byte in sliced(b, 8)])

    return b
Beispiel #25
0
    async def create(cls, ctx):
        def sort_key(c):
            return command_category(c), c.qualified_name

        entries = (cmd for cmd in sorted(ctx.bot.commands, key=sort_key) if not cmd.hidden)

        nested_pages = []
        per_page = 30

        for parent, cmds in itertools.groupby(entries, key=command_category):
            command, cmds = spy(cmds)
            command = next(iter(command))

            pkg_name = command.module.rpartition('.')[0]
            module = sys.modules[pkg_name]
            description = inspect.getdoc(module) or 'No description yet.'

            lines = [pair async for pair in _command_formatters(cmds, ctx)]
            nested_pages.extend((parent.title(), description, page) for page in sliced(lines, per_page))

        return cls(ctx, nested_pages, per_page=1)
Beispiel #26
0
    def __init__(self, ctx):
        permissions_in = ctx.author.permissions_in

        _channel_parsers = {
            discord.TextChannel:
            functools.partial(
                _parse_channel,
                prefix='#',
                predicate=lambda c: permissions_in(c).read_messages),
            discord.VoiceChannel:
            functools.partial(_parse_channel,
                              prefix='',
                              predicate=lambda c: permissions_in(c).connect),
        }

        entries = [(category,
                    [_channel_parsers[c.__class__](c) for c in entries])
                   for category, channels in ctx.guild.by_category()
                   for entries in sliced(channels, 10)]

        super().__init__(ctx, entries, lines_per_page=1)
Beispiel #27
0
    async def send_docs(self, ctx, docs: str) -> None:
        """send documenation string, slicing it if needed"""

        if len(docs) < 1900:
            await ctx.send('```\n' + docs + '```')
        else:
            docs_list = docs.strip().split('\n')
            slices = 2
            docs_slices = None
            while True:
                docs_list_slices = more_itertools.sliced(
                    docs_list,
                    len(docs_list) // slices)
                docs_slices = ['\n'.join(x) for x in docs_list_slices]
                if all(len(x) < 1990 for x in docs_slices):
                    break
                slices += 1
            for docs in docs_slices:
                if not docs.isspace():
                    msg = '```\n' + docs + '```'
                    await ctx.send(msg)
Beispiel #28
0
def image_to_ardif():
    if allowed_file(request.files['image'].filename):
        name = secure_filename(request.files['image'].filename)
        with tempfile.TemporaryDirectory() as tmp:
            path = os.path.join(tmp, name)
            request.files['image'].save(path)
            if request.form['max_size'] and request.form['max_size'] != '0':
                command = [
                    'convert', path, '-resize',
                    str(int(request.form['max_size'])), path
                ]
                subprocess.call(command)
            color_divisors = {
                '64': 1,
                '32': 2,
                '16': 4,
                '8': 8,
                '4': 16,
                '2': 32,
            }
            ardif_data = ardif.create_message(
                path, request.form['sender'], request.form['recipient'],
                request.form['title'], request.form['comment'],
                color_divisors[request.form['shades']])
            block_section = ''
            if request.form.get('block', '0') != '0':
                blocks = list(
                    more_itertools.sliced(ardif_data,
                                          int(request.form['block'])))
                if len(blocks) > 1:
                    block_section = '<h2>Blocks</h2><div id=blocks>'
                    for block_id, block in enumerate(blocks):
                        block_section += render_template('block.html',
                                                         number=block_id + 1,
                                                         data=block)
                    block_section += '</div>'
        return render_template('image_to_ardif.html',
                               data_len=len(ardif_data),
                               data=ardif_data).replace(
                                   '__block_section__', block_section)
Beispiel #29
0
Datei: apple.py Projekt: tg-z/hpi
def _parse_apple_xml_val(xml_el):
    if xml_el.tag == "array":
        return [_parse_apple_xml_val(el) for el in xml_el]
    elif xml_el.tag == "dict":
        return {
            key.text: _parse_apple_xml_val(val)
            for key, val in sliced(list(xml_el), 2)
        }
    elif xml_el.tag == "string":
        return xml_el.text
    elif xml_el.tag == "integer":
        return int(xml_el.text)
    elif xml_el.tag == "real":
        return float(xml_el.text)
    elif xml_el.tag == "date":
        # is this UTC? probably, since others are
        return datetime.astimezone(datetime.fromisoformat(
            xml_el.text.rstrip("Z")),
                                   tz=timezone.utc)
    elif xml_el.tag == "data":
        return xml_el.text  # BASE64 data, dont think I need this
    else:
        raise RuntimeError(f"Unknown tag: {xml_el.tag}")
Beispiel #30
0
    def _filter(self, q=None, shallow=False):
        db = self._db
        index_keys = self.index_keys
        pkvals = self.list_pk()

        if q:
            db_keys = []
            for key in q.keys():
                if key.split('.')[0] in index_keys:
                    key = key.split('.')[0]
                if key in index_keys:
                    db_key = index_keys[key]
                    db_keys.append(db_key)
            if db_keys:
                db_data = db.multi_get(db_keys)
                data = {k: msgpack.unpackb(v) for k, v in db_data.items()}
                index = {}
                for key, db_key in index_keys.items():
                    if db_key in data:
                        for k, v in data[db_key].items():
                            index.setdefault(k, {})
                            put_key(index[k], key, v)
                pkvals = [
                    i for i in pkvals
                    if q.match(index.get(i, {}), shallow_match=True)
                ]

        if not isinstance(pkvals, list):
            pkvals = list(pkvals)
        for _pkvals in more_itertools.sliced(pkvals, 200):
            for item in self.iter_bulk_get_by_pk(_pkvals, shallow=shallow):
                if q:
                    if q.match(item):
                        yield item
                else:
                    yield item