Пример #1
0
def _get_struct_module(dtype: numpy.dtype,
                       ignore_alignment: bool = False) -> Module:
    """
    Builds and returns a module with the C type definition for a given ``dtype``,
    possibly using modules for nested structures.
    """

    # `dtype.names` is not `None` at this point, restricting types
    dtype_names = typing_cast(Iterable[str], dtype.names)
    dtype_fields = typing_cast(Mapping[str, Tuple[numpy.dtype, int]],
                               dtype.fields)

    field_alignments: Dict[str, Optional[int]]
    if ignore_alignment:
        struct_alignment = None
        field_alignments = {name: None for name in dtype_names}
    else:
        wrapped_type = _align(dtype)
        struct_alignment = wrapped_type.explicit_alignment
        field_alignments = {
            name: wrapped_type.field_alignments[name]
            for name in dtype_names
        }

    # The tag (${prefix}_) is not necessary, but it helps to avoid
    # CUDA bug #1409907 (nested struct initialization like
    # "mystruct x = {0, {0, 0}, 0};" fails to compile)
    lines = ["typedef struct ${prefix}_ {"]
    kwds: Dict[str, Union[str, Module]] = {}
    for name in dtype_names:
        elem_dtype, _ = dtype_fields[name]

        base_elem_dtype = elem_dtype.base
        elem_dtype_shape = elem_dtype.shape

        array_suffix = "".join(f"[{d}]" for d in elem_dtype_shape)

        typename_var = "typename_" + name
        field_alignment = field_alignments[name]
        lines.append(
            f"    ${{{typename_var}}} {_alignment_str(field_alignment)} {name}{array_suffix};"
        )

        if base_elem_dtype.names is None:
            kwds[typename_var] = ctype_builtin(base_elem_dtype)
        else:
            kwds[typename_var] = ctype_struct(
                base_elem_dtype, ignore_alignment=ignore_alignment)

    lines.append("} " + _alignment_str(struct_alignment) + " ${prefix};")

    return Module.from_string("\n".join(lines), render_globals=kwds)
Пример #2
0
def batch_tallies(election: Election) -> BatchTallies:
    # We only support one contest for batch audits
    assert len(list(election.contests)) == 1
    contest = list(election.contests)[0]

    # Validate the batch tallies files. We can't do this validation when they
    # are uploaded because we need all of the jurisdictions' files.
    total_votes_by_choice: Dict[str, int] = defaultdict(int)
    for jurisdiction in contest.jurisdictions:
        batch_tallies = typing_cast(BatchTallies, jurisdiction.batch_tallies)
        if batch_tallies is None:
            raise Conflict(
                "Some jurisdictions haven't uploaded their batch tallies files yet."
            )
        for tally in batch_tallies.values():
            for choice_id, votes in tally[contest.id].items():
                total_votes_by_choice[choice_id] += votes

    for choice in contest.choices:
        if total_votes_by_choice[choice.id] > choice.num_votes:
            raise Conflict(
                f"Total votes in batch tallies files for contest choice {choice.name}"
                f" ({total_votes_by_choice[choice.id]}) is greater than the"
                f" reported number of votes for that choice ({choice.num_votes})."
            )

    # Key each batch by jurisdiction name and batch name since batch names
    # are only guaranteed unique within a jurisdiction
    return {
        (jurisdiction.name, batch_name): tally
        for jurisdiction in contest.jurisdictions
        for batch_name, tally in jurisdiction.batch_tallies.items()  # type: ignore
    }
Пример #3
0
    def __init__(self, source: tsdb.Database = None):
        # the parse keys exclude some that are handled specially
        self._parse_keys = '''
            ninputs ntokens readings first total tcpu tgc treal words
            l-stasks p-ctasks p-ftasks p-etasks p-stasks
            aedges pedges raedges rpedges tedges eedges ledges sedges redges
            unifications copies conses symbols others gcs i-load a-load
            date error comment
        '''.split()
        self._result_keys = '''
            result-id time r-ctasks r-ftasks r-etasks r-stasks size
            r-aedges r-pedges derivation surface tree mrs
        '''.split()
        self._run_keys = '''
            run-comment platform protocol tsdb application environment
            grammar avms sorts templates lexicon lrules rules
            user host os start end items status
        '''.split()
        self._parse_id = -1
        self._runs: Dict[int, Dict[str, Any]] = {}
        self._last_run_id = -1

        self.affected_tables = '''
            run parse result rule output edge tree decision preference
            update fold score
        '''.split()

        self._i_id_map: Dict[int, int] = {}
        if source:
            pairs = typing_cast(List[Tuple[int, int]],
                                source.select_from(
                                    'parse',
                                    ('parse-id', 'i-id'),
                                    cast=True))
            self._i_id_map.update(pairs)
Пример #4
0
    def draw_sample_for_contest(contest: Contest, sample_size: int) -> List[BallotDraw]:
        # Compute the total number of ballot samples in all rounds leading up to
        # this one. Note that this corresponds to the number of SampledBallotDraws,
        # not SampledBallots.
        num_previously_sampled = SampledBallotDraw.query.filter_by(
            contest_id=contest.id
        ).count()

        # Create the pool of ballots to sample (aka manifest) by combining the
        # manifests from every jurisdiction in the contest's universe.
        if election.audit_type == AuditType.BALLOT_POLLING:
            # In ballot polling audits, we can include all the ballot positions
            # from each batch
            manifest = {
                batch_id_to_key[batch.id]: list(range(1, batch.num_ballots + 1))
                for jurisdiction in contest.jurisdictions
                for batch in jurisdiction.batches
            }
        else:
            assert election.audit_type == AuditType.BALLOT_COMPARISON

            # In ballot comparison audits, we only include ballots that had a
            # result recorded for this contest in the CVR
            manifest = defaultdict(list)

            for jurisdiction in contest.jurisdictions:
                cvr_contests_metadata = typing_cast(
                    JSONDict, jurisdiction.cvr_contests_metadata
                )
                contest_columns = [
                    choice["column"]
                    for choice in cvr_contests_metadata[contest.name][
                        "choices"
                    ].values()
                ]

                for cvr_ballot, jurisdiction_id in cvr_ballots:
                    if jurisdiction_id != jurisdiction.id:
                        continue
                    interpretations = cvr_ballot.interpretations.split(",")
                    if any(interpretations[column] != "" for column in contest_columns):
                        manifest[batch_id_to_key[cvr_ballot.batch_id]].append(
                            cvr_ballot.ballot_position
                        )

        # Do the math! i.e. compute the actual sample
        sample = sampler.draw_sample(
            str(election.random_seed),
            dict(manifest),
            sample_size,
            num_previously_sampled,
        )
        return [
            BallotDraw(
                ballot_key=ballot_key,
                contest_id=contest.id,
                ticket_number=ticket_number,
            )
            for (ticket_number, ballot_key, _) in sample
        ]
Пример #5
0
def _flatten_dtype(
        dtype: numpy.dtype, prefix: List[Union[str, int]]=[]) \
        -> List[Tuple[List[Union[str, int]], numpy.dtype]]:

    if dtype.names is None:
        return [(prefix, dtype)]

    # `dtype.names` is not `None` at this point, restricting types
    dtype_fields = typing_cast(Mapping[str, Tuple[numpy.dtype, int]],
                               dtype.fields)

    result: List[Tuple[List[Union[str, int]], numpy.dtype]] = []
    for name in dtype.names:
        elem_dtype, _ = dtype_fields[name]

        elem_dtype_shape: Tuple[int, ...]
        if len(elem_dtype.shape) == 0:
            base_elem_dtype = elem_dtype
            elem_dtype_shape = tuple()
        else:
            base_elem_dtype = elem_dtype.base
            elem_dtype_shape = elem_dtype.shape

        if len(elem_dtype_shape) == 0:
            result += _flatten_dtype(base_elem_dtype, prefix=prefix + [name])
        else:
            for idxs in itertools.product(
                    *[range(dim) for dim in elem_dtype_shape]):
                result += _flatten_dtype(base_elem_dtype,
                                         prefix=prefix + [name] + list(idxs))
    return result
Пример #6
0
    def execute(cls, params, in_tensors, qrec: QRec, **kwargs):

        qname = kwargs['qname']
        params = typing_cast(SplitParameters, params)
        in_tensor = qrec.prepare_inputs(params, in_tensors, ktype=qname)[0]
        out_tensors = params.numpy_split(in_tensor)
        return qrec.get_outputs(params, out_tensors, ktype=qname)
Пример #7
0
    def collect(self, ts: 'TestSuite') -> Iterator[interface.Response]:
        """
        Map from test suites to response objects.

        The data in the test suite must be ordered.

        .. note::

           This method stores the 'item', 'parse', and 'result' tables
           in memory during operation, so it is not recommended when a
           test suite is very large as it may exhaust the system's
           available memory.
        """

        # type checking this function is a mess; it needs a better fix

        def get_i_id(row: 'Row') -> int:
            i_id = row['i-id']
            assert isinstance(i_id, int)
            return i_id

        def get_parse_id(row: 'Row') -> int:
            parse_id = row['parse-id']
            assert isinstance(parse_id, int)
            return parse_id

        parse_map: Dict[int, List[Dict[str, tsdb.Value]]] = {}
        rows = typing_cast(Sequence['Row'], ts['parse'])
        for key, grp in itertools.groupby(rows, key=get_i_id):
            parse_map[key] = [dict(zip(row.keys(), row)) for row in grp]

        result_map: Dict[int, List[Dict[str, tsdb.Value]]] = {}
        rows = typing_cast(Sequence['Row'], ts['result'])
        for key, grp in itertools.groupby(rows, key=get_parse_id):
            result_map[key] = [dict(zip(row.keys(), row)) for row in grp]

        for item in ts['item']:
            d: Dict[str, tsdb.Value] = dict(zip(item.keys(), item))
            i_id = d['i-id']
            assert isinstance(i_id, int)
            for parse in parse_map.get(i_id, []):
                response = interface.Response(d)
                response.update(parse)
                parse_id = parse['parse-id']
                assert isinstance(parse_id, int)
                response['results'] = result_map.get(parse_id, [])
                yield response
Пример #8
0
    def execute(cls, params, in_tensors, qrec: QuantizationRecordBase,
                **kwargs):

        qname = kwargs['qname']
        params = typing_cast(StridedSliceParameters, params)
        in_tensor = qrec.prepare_inputs(params, in_tensors, ktype=qname)[0]
        out_tensors = [params.numpy_slice(in_tensor)]
        return qrec.get_outputs(params, out_tensors, ktype=qname)
Пример #9
0
 def from_id(cls, value: Union[str, Dict], raise_error: bool = False):
     if isinstance(value, str):
         value = typing_cast(UserID, value)
         if value.startswith('U') and value in cls._bot.users:
             return cls._bot.users[value]
         if not raise_error:
             return UnknownUser(id=value)
         raise KeyError('Given ID was not found.')
     return cls(**value)
Пример #10
0
    def __init__(self, val_or_ctor: Union[JavaMember, JavaObject],
                 *args: Union[vertex_constructor_param_types, shape_types]) -> None:
        val: JavaObject
        if args:
            ctor = val_or_ctor
            val = ctor(*(Vertex.__parse_args(args)))
        else:
            val = typing_cast(JavaObject, val_or_ctor)

        super(Vertex, self).__init__(val)
Пример #11
0
def strided_slice(params,
                  in_tensors,
                  qrec: QuantizationRecordBase,
                  details=None):
    del details
    if qrec is None:
        qrec = Float32QuantizationRecord()
    params = typing_cast(StridedSliceParameters, params)
    in_tensor = qrec.prepare_inputs(params, in_tensors, ktype="float32")[0]
    out_tensors = [params.numpy_slice(in_tensor)]
    return qrec.get_outputs(params, out_tensors, ktype="float32")
Пример #12
0
    def decorator(function: FuncView) -> FuncView:
        @functools.wraps(function)
        async def wrapper(*args, **kwargs):
            if request.method in allow_methods:
                return await function(*args, **kwargs)
            elif request.method == "OPTIONS":
                return HttpResponse(headers=headers)
            else:
                return HttpResponse(status_code=405, headers=headers)

        setattr(wrapper, "__method__", method.upper())
        return typing_cast(FuncView, wrapper)
Пример #13
0
    def run(
        self,
        secret_name: Optional[str] = None,
        secret_key: Optional[str] = None,
        namespace: str = "default",
        kube_kwargs: dict = None,
        kubernetes_api_key_secret: str = "KUBERNETES_API_KEY",
    ):
        """
        Returns the value of an kubenetes secret after applying an optional `cast` function.

        Args:
            - secret_name (string, optional): The name of the kubernetes secret object
            - secret_key (string, optional): The key to look for in the kubernetes data
            - namespace (str, optional): The Kubernetes namespace to read the secret from,
                defaults to the `default` namespace.
            - kube_kwargs (dict, optional): Optional extra keyword arguments to pass to the
                Kubernetes API (e.g. `{"pretty": "...", "dry_run": "..."}`)
            - kubernetes_api_key_secret (str, optional): the name of the Prefect Secret
                which stored your Kubernetes API Key; this Secret must be a string and in
                BearerToken format

        Raises:
            - ValueError: if `raise_is_missing` is `True` and the kubernetes secret was not found.
                The value of secret_name and secret_key are mandatory as well
        """
        if not secret_name:
            raise ValueError(
                "The name of a Kubernetes secret must be provided.")

        if not secret_key:
            raise ValueError("The key of the secret must be provided.")

        api_client = typing_cast(
            client.CoreV1Api,
            get_kubernetes_client("secret", kubernetes_api_key_secret),
        )

        secret_data = api_client.read_namespaced_secret(
            name=secret_name, namespace=namespace).data

        if secret_key not in secret_data:
            if self.raise_if_missing:
                raise ValueError(
                    f"Cannot find the key {secret_key} in {secret_name} ")
            else:
                return None

        decoded_secret = base64.b64decode(
            secret_data[secret_key]).decode("utf8")

        return decoded_secret if self.cast is None else self.cast(
            decoded_secret)
Пример #14
0
def cvrs_for_contest(contest: Contest) -> supersimple.CVRS:
    choice_name_to_id = {choice.name: choice.id for choice in contest.choices}

    cvrs: supersimple.CVRS = defaultdict(lambda: {contest.id: {}})

    for jurisdiction in contest.jurisdictions:
        cvr_contests_metadata = typing_cast(JSONDict,
                                            jurisdiction.cvr_contests_metadata)
        choices_metadata = cvr_contests_metadata[contest.name]["choices"]

        interpretations_query = (CvrBallot.query.join(Batch).filter_by(
            jurisdiction_id=jurisdiction.id).join(
                SampledBallot,
                and_(
                    CvrBallot.batch_id == SampledBallot.batch_id,
                    CvrBallot.ballot_position == SampledBallot.ballot_position,
                ),
            ))
        # For targeted contests, use the ticket number to key the ballots so
        # that we count all sample draws
        if contest.is_targeted:
            interpretations_by_ballot = (
                interpretations_query.join(SampledBallotDraw).filter(
                    SampledBallotDraw.contest_id == contest.id).values(
                        SampledBallotDraw.ticket_number,
                        CvrBallot.interpretations))
        # For opportunistic contests, use the ballot id to key the ballots so
        # that we only count unique ballots
        else:
            interpretations_by_ballot = interpretations_query.values(
                SampledBallot.id, CvrBallot.interpretations)

        for ballot_key, interpretations_str in interpretations_by_ballot:
            # interpretations is the raw CVR string: 1,0,0,1,0,1,0. We need to
            # pick out the interpretation for each contest choice. We saved the
            # column index for each choice when we parsed the CVR.
            interpretations = interpretations_str.split(",")
            for choice_name, choice_metadata in choices_metadata.items():
                interpretation = interpretations[choice_metadata["column"]]
                # If the interpretations are empty, it means the contest wasn't
                # on the ballot, so we should skip this contest entirely for
                # this ballot.
                if interpretation == "":
                    cvrs[ballot_key] = {}
                else:
                    choice_id = choice_name_to_id[choice_name]
                    cvrs[ballot_key][contest.id][choice_id] = int(
                        interpretation)

    return dict(cvrs)
Пример #15
0
def split(params, in_tensors, qrec: QuantizationRecordBase, details=None):
    del details
    if qrec is None:
        qrec = Float32QuantizationRecord()
    params = typing_cast(SplitParameters, params)
    in_tensor = qrec.prepare_inputs(params, in_tensors, ktype="float32")[0]
    if params.transpose_in:
        in_tensor = np.transpose(in_tensor, params.transpose_in[0])
    out_tensors = params.numpy_split(in_tensor)
    if params.transpose_out:
        out_tensors = [(np.transpose(out_tensor, params.transpose_in[idx])
                        if params.transpose_in[idx] else out_tensor)
                       for idx, out_tensor in enumerate(out_tensors)]
    return qrec.get_outputs(params, out_tensors, ktype="float32")
Пример #16
0
    def __init__(
            self, val_or_ctor: Union[JavaMember, JavaObject],
            label: Optional[str], *args: Union[vertex_constructor_param_types,
                                               shape_types]) -> None:
        val: JavaObject
        if args:
            ctor = val_or_ctor
            val = ctor(*(Vertex.__parse_args(args)))
        else:
            val = typing_cast(JavaObject, val_or_ctor)

        super(Vertex, self).__init__(val)
        if label is not None and self.get_label() is None:
            self.set_label(label)
Пример #17
0
    def execute(cls, params, in_tensors, qrec: QuantizationRecordBase,
                **kwargs):

        qname = kwargs['qname']
        params = typing_cast(SplitParameters, params)
        in_tensor = qrec.prepare_inputs(params, in_tensors, ktype=qname)[0]
        if params.transpose_in:
            in_tensor = np.transpose(in_tensor, params.transpose_in[0])
        out_tensors = params.numpy_split(in_tensor)
        if params.transpose_out:
            out_tensors = [(np.transpose(out_tensor, params.transpose_in[idx])
                            if params.transpose_in[idx] else out_tensor)
                           for idx, out_tensor in enumerate(out_tensors)]
        return qrec.get_outputs(params, out_tensors, ktype=qname)
Пример #18
0
 def _select_raw(
         self,
         name: str,
         columns: Iterable[str] = None) -> Generator[RawRecord, None, None]:
     if name not in self.schema:
         raise TSDBError(f'relation not defined in schema: {name}')
     fields = self.schema[name]
     if columns is None:
         indices = list(range(len(fields)))
     else:
         index = make_field_index(fields)
         indices = [index[column] for column in columns]
     with open(self._path, name, encoding=self.encoding) as file:
         for line in file:
             record = typing_cast(RawRecord, split(line, fields=None))
             yield tuple(record[idx] for idx in indices)
Пример #19
0
def cvrs_for_contest(contest: Contest) -> supersimple.CVRS:
    choice_name_to_id = {choice.name: choice.id for choice in contest.choices}

    cvrs: supersimple.CVRS = {}

    for jurisdiction in contest.jurisdictions:
        cvr_contests_metadata = typing_cast(
            JSONDict, jurisdiction.cvr_contests_metadata
        )
        choices_metadata = cvr_contests_metadata[contest.name]["choices"]

        interpretations_by_ballot = (
            CvrBallot.query.join(Batch)
            .filter_by(jurisdiction_id=jurisdiction.id)
            .join(
                SampledBallot,
                and_(
                    CvrBallot.batch_id == SampledBallot.batch_id,
                    CvrBallot.ballot_position == SampledBallot.ballot_position,
                ),
            )
            .values(SampledBallot.id, CvrBallot.interpretations)
        )

        for ballot_key, interpretations_str in interpretations_by_ballot:
            ballot_cvr: supersimple.CVR = {contest.id: {}}
            # interpretations is the raw CVR string: 1,0,0,1,0,1,0. We need to
            # pick out the interpretation for each contest choice. We saved the
            # column index for each choice when we parsed the CVR.
            interpretations = interpretations_str.split(",")
            for choice_name, choice_metadata in choices_metadata.items():
                interpretation = interpretations[choice_metadata["column"]]
                # If the interpretations are empty, it means the contest wasn't
                # on the ballot, so we should skip this contest entirely for
                # this ballot.
                if interpretation == "":
                    ballot_cvr = {}
                else:
                    choice_id = choice_name_to_id[choice_name]
                    ballot_cvr[contest.id][choice_id] = int(interpretation)

            cvrs[ballot_key] = ballot_cvr

    return cvrs
Пример #20
0
 def select_from(self, name: str,
                 columns: Iterable[str] = None,
                 cast: bool = False) -> Generator[Record, None, None]:
     """
     Yield values for *columns* from relation *name*.
     """
     fields = self.schema[name]
     if columns is None:
         columns = [f.name for f in fields]
     index = make_field_index(fields)
     indices = [index[column] for column in columns]
     records = self[name]
     for record in records:
         if cast and not self.autocast:
             record = typing_cast(RawRecord, record)
             # _cast is a copy of the function cast()
             data = tuple(_cast(fields[idx].datatype, record[idx])
                          for idx in indices)
         else:
             data = tuple(record[idx] for idx in indices)
         yield data
     records.close()
Пример #21
0
def parse_map(file: Path) -> TiledMap:
    """Parse the raw Tiled map into a pytiled_parser type

    Args:
        file: Path to the map's JSON file

    Returns:
        TileSet: a properly typed TileSet.
    """

    with open(file) as map_file:
        raw_tiled_map = json.load(map_file)

    parent_dir = file.parent

    raw_tilesets: List[Union[RawTileSet,
                             _RawTilesetMapping]] = raw_tiled_map["tilesets"]
    tilesets: TilesetDict = {}

    for raw_tileset in raw_tilesets:
        if raw_tileset.get("source") is not None:
            # Is an external Tileset
            with open(parent_dir / raw_tileset["source"]) as raw_tileset_file:
                tilesets[raw_tileset["firstgid"]] = tileset.cast(
                    json.load(raw_tileset_file))
        else:
            # Is an embedded Tileset
            raw_tileset = typing_cast(RawTileSet, raw_tileset)
            tilesets[raw_tileset["firstgid"]] = tileset.cast(raw_tileset)

    # `map` is a built-in function
    map_ = TiledMap(
        map_file=file,
        infinite=raw_tiled_map["infinite"],
        layers=[layer.cast(layer_) for layer_ in raw_tiled_map["layers"]],
        map_size=Size(raw_tiled_map["width"], raw_tiled_map["height"]),
        next_layer_id=raw_tiled_map["nextlayerid"],
        next_object_id=raw_tiled_map["nextobjectid"],
        orientation=raw_tiled_map["orientation"],
        render_order=raw_tiled_map["renderorder"],
        tiled_version=raw_tiled_map["tiledversion"],
        tile_size=Size(raw_tiled_map["tilewidth"],
                       raw_tiled_map["tileheight"]),
        tilesets=tilesets,
        version=raw_tiled_map["version"],
    )

    if raw_tiled_map.get("backgroundcolor") is not None:
        map_.background_color = parse_color(raw_tiled_map["backgroundcolor"])

    if raw_tiled_map.get("hexsidelength") is not None:
        map_.hex_side_length = raw_tiled_map["hexsidelength"]

    if raw_tiled_map.get("properties") is not None:
        map_.properties = properties.cast(raw_tiled_map["properties"])

    if raw_tiled_map.get("staggeraxis") is not None:
        map_.stagger_axis = raw_tiled_map["staggeraxis"]

    if raw_tiled_map.get("staggerindex") is not None:
        map_.stagger_index = raw_tiled_map["staggerindex"]

    return map_
Пример #22
0
def append(point: TreeNode, path_format: str,
           param_convertors: Dict[str, Convertor]) -> TreeNode:
    """
    Construct the node corresponding to the specified path and return.

    The order of child nodes under the same node is determined by the order of addition.
    """
    if not path_format:
        return point

    if point.next_nodes is None:
        point.next_nodes = list()

    matched = re.match(r"^{\w+}", path_format)

    if matched is not None:
        length = matched.end()
        param_name = path_format[1:length - 1]
        convertor = param_convertors[param_name]
        re_pattern = re.compile(convertor.regex)
        if isinstance(convertor, PathConvertor) and path_format[-1] != "}":
            raise ValueError(
                "`PathConvertor` is only allowed to appear at the end of path")
        for node in (node for node in point.next_nodes or ()
                     if node.re_pattern is not None):
            if (node.re_pattern == re_pattern) != (node.characters
                                                   == param_name):
                raise ValueError(
                    "The same regular matching is used in the same position" +
                    ", but the parameter names are different.")
            if node.characters == param_name:
                return append(node, path_format[length:], param_convertors)

        new_node = TreeNode(characters=param_name, re_pattern=re_pattern)
        point.next_nodes.insert(0, new_node)
        return append(new_node, path_format[length:], param_convertors)
    else:
        length = path_format.find("{")
        if length == -1:
            length = len(path_format)

        for node in (node for node in point.next_nodes or ()
                     if node.re_pattern is None):
            prefix = find_common_prefix(node.characters, path_format[:length])
            if prefix == "":
                continue
            if node.characters == prefix:
                return append(node, path_format[len(prefix):],
                              param_convertors)

            node_index = point.next_nodes.index(node)
            prefix_node = TreeNode(characters=prefix, next_nodes=[])
            point.next_nodes[node_index] = prefix_node
            node.characters = node.characters[len(prefix):]
            typing_cast(List[TreeNode], prefix_node.next_nodes).insert(0, node)
            if path_format[:length] == prefix:
                return append(prefix_node, path_format[length:],
                              param_convertors)

            new_node = TreeNode(characters=path_format[len(prefix):length])
            typing_cast(List[TreeNode],
                        prefix_node.next_nodes).insert(0, new_node)
            return append(new_node, path_format[length:], param_convertors)

        new_node = TreeNode(characters=path_format[:length])
        point.next_nodes.insert(0, new_node)
        return append(new_node, path_format[length:], param_convertors)
Пример #23
0
def deferred(col: C) -> C:
    return typing_cast(C, sa_deferred(col))
Пример #24
0
def _align(dtype: numpy.dtype) -> WrappedType:
    """
    Builds a `WrappedType` object with the alignment information for a dtype,
    aligning it if it is not aligned, and checking the consistency of metadata if it is.
    """

    if len(dtype.shape) > 0:
        wt = _align(dtype.base)
        return WrappedType(numpy.dtype((wt.dtype, dtype.shape)),
                           wt.alignment,
                           explicit_alignment=wt.explicit_alignment,
                           wrapped_fields=wt.wrapped_fields)

    if dtype.names is None:
        return WrappedType(dtype, dtype.itemsize)

    # Since `.names` is not `None` at this point, we can restrict the type to help the inference
    dtype_fields = typing_cast(Mapping[str, Tuple[numpy.dtype, int]],
                               dtype.fields)

    wrapped_fields = {
        name: _align(dtype_fields[name][0])
        for name in dtype.names
    }

    if dtype.isalignedstruct:
        # Find out what alignment has to be set for the field in order for the compiler
        # to place it at the offset specified in the description of `dtype`.
        field_alignments = [wrapped_fields[dtype.names[0]].alignment]
        for i in range(1, len(dtype.names)):
            prev_field_dtype, prev_offset = dtype_fields[dtype.names[i - 1]]
            _, offset = dtype_fields[dtype.names[i]]
            prev_end = prev_offset + prev_field_dtype.itemsize
            field_alignment = _find_minimum_alignment(
                offset, wrapped_fields[dtype.names[i]].alignment, prev_end)
            field_alignments.append(field_alignment)

        offsets = [dtype_fields[name][1] for name in dtype.names]
    else:
        # Build offsets for the structure using a procedure
        # similar to the one a compiler would use
        offsets = [0]
        for i in range(1, len(dtype.names)):
            prev_field_dtype, _ = dtype_fields[dtype.names[i - 1]]
            prev_end = offsets[-1] + prev_field_dtype.itemsize
            alignment = wrapped_fields[dtype.names[i]].alignment
            offsets.append(min_blocks(prev_end, alignment) * alignment)

        field_alignments = [
            wrapped_fields[name].alignment for name in dtype.names
        ]

    # Same principle as above, but for the whole struct:
    # find out what alignment has to be set in order for the compiler
    # to place the next field at some dtype where this struct is a field type
    # at the offset corresponding to this struct's itemsize.

    last_dtype, _ = dtype_fields[dtype.names[-1]]
    last_offset = offsets[-1]
    struct_end = last_offset + last_dtype.itemsize

    # Find the total itemsize.
    # According to the standard, it must be a multiple of the struct alignment.
    base_struct_alignment = _struct_alignment(field_alignments)
    itemsize = min_blocks(struct_end,
                          base_struct_alignment) * base_struct_alignment
    if dtype.isalignedstruct:
        if 2**log2(dtype.itemsize) != dtype.itemsize:
            raise ValueError(
                f"Invalid non-default itemsize for dtype {dtype}: "
                f"must be a power of 2 (currently {dtype.itemsize})")

        # Should be already checked by `numpy.dtype` when an aligned struct was created.
        # Checking it just in case the behavior changes.
        assert dtype.itemsize >= itemsize

        aligned_dtype = dtype
        if dtype.itemsize > itemsize:
            struct_alignment = dtype.itemsize
        else:
            struct_alignment = base_struct_alignment
    else:
        # Must be some problems with numpy stubs - the type is too restrictive here.
        aligned_dtype = numpy.dtype(
            dict(  # type: ignore
                names=dtype.names,
                formats=[wrapped_fields[name].dtype for name in dtype.names],
                offsets=offsets,
                itemsize=itemsize,
                aligned=True))

        struct_alignment = _find_minimum_alignment(itemsize,
                                                   base_struct_alignment,
                                                   struct_end)

    field_alignments_map = {
        dtype.names[i]: field_alignments[i]
        if field_alignments[i] != wrapped_fields[dtype.names[i]].alignment else
        None
        for i in range(len(dtype.names))
    }

    return WrappedType(aligned_dtype,
                       struct_alignment,
                       explicit_alignment=struct_alignment
                       if struct_alignment != base_struct_alignment else None,
                       wrapped_fields=wrapped_fields,
                       field_alignments=field_alignments_map)