class TypeCheck: CanFilter = mypy_extensions.TypedDict("CanFilter", {"can_id": int, "can_mask": int}) CanFilterExtended = mypy_extensions.TypedDict( "CanFilterExtended", {"can_id": int, "can_mask": int, "extended": bool} ) CanFilters = typing.Sequence[typing.Union[CanFilter, CanFilterExtended]] CanData = typing.Union[bytes, bytearray, int, typing.Iterable[int]] # Used for the Abstract Base Class ChannelStr = str ChannelInt = int Channel = typing.Union[ChannelInt, ChannelStr] # vector bus v_channel = typing.Union[int, str, typing.List[int,]] AutoDetectedConfig = mypy_extensions.TypedDict( "AutoDetectedConfig", {"interface": str, "channel": Channel} ) # Used by the IO module FileLike = typing.IO[typing.Any] StringPathLike = typing.Union[str, "os.PathLike[str]"] AcceptedIOType = typing.Optional[typing.Union[FileLike, StringPathLike]] BusConfig = typing.NewType("BusConfig", dict)
from werkzeug.datastructures import FileStorage import psef.models as models from psef import app, blackboard from psef.errors import APICodes, APIException from psef.ignore import InvalidFile, IgnoreFilterManager _KNOWN_ARCHIVE_EXTENSIONS = tuple(archive.extension_map.keys()) # Gestolen van Erik Kooistra _BB_TXT_FORMAT = re.compile( r"(?P<assignment_name>.+)_(?P<student_id>.+?)_attempt_" r"(?P<datetime>\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}).txt") FileTreeBase = mypy_extensions.TypedDict( # pylint: disable=invalid-name 'FileTreeBase', { 'name': str, 'id': int, }) def init_app(_: t.Any) -> None: pass # This is valid, see https://github.com/PyCQA/pylint/issues/1927 class FileTree( # pylint: disable=inherit-non-class,missing-docstring FileTreeBase, total=False, ): entries: t.MutableSequence[t.Any]
"""Types for mypy type-checking """ import typing if typing.TYPE_CHECKING: import os import mypy_extensions CanFilter = mypy_extensions.TypedDict("CanFilter", { "can_id": int, "can_mask": int }) CanFilterExtended = mypy_extensions.TypedDict("CanFilterExtended", { "can_id": int, "can_mask": int, "extended": bool }) CanFilters = typing.Sequence[typing.Union[CanFilter, CanFilterExtended]] # TODO: Once buffer protocol support lands in typing, we should switch to that, # since can.message.Message attempts to call bytearray() on the given data, so # this should have the same typing info. # # See: https://github.com/python/typing/issues/593 CanData = typing.Union[bytes, bytearray, int, typing.Iterable[int]] # Used for the Abstract Base Class ChannelStr = str ChannelInt = int
import sqlalchemy import sqlalchemy.orm from mozilla_version.fenix import FenixVersion import cli_common.command import cli_common.utils import shipit_api.common.config import shipit_api.common.models from shipit_api.admin.release import parse_version from shipit_api.common.product import Product, ProductCategory logger = logging.getLogger(__name__) File = str ReleaseDetails = mypy_extensions.TypedDict( "ReleaseDetails", {"category": str, "product": str, "build_number": int, "description": typing.Optional[str], "is_security_driven": bool, "version": str, "date": str}, ) Releases = mypy_extensions.TypedDict("Releases", {"releases": typing.Dict[str, ReleaseDetails]}) L10n = str ReleaseL10n = mypy_extensions.TypedDict("ReleaseL10n", {"platforms": typing.List[str], "revision": str}) ReleaseL10ns = typing.Dict[L10n, ReleaseL10n] ReleasesHistory = typing.Dict[str, str] PrimaryBuildDetails = mypy_extensions.TypedDict("PrimaryBuildDetails", {"filesize": float}) PrimaryBuild = mypy_extensions.TypedDict("PrimaryBuild", {"Linux": PrimaryBuildDetails, "OS X": PrimaryBuildDetails, "Windows": PrimaryBuildDetails}) PrimaryBuilds = typing.Dict[str, typing.Dict[str, PrimaryBuild]] FirefoxVersions = mypy_extensions.TypedDict( "FirefoxVersions", { "FIREFOX_NIGHTLY": str, "FIREFOX_AURORA": str, "FIREFOX_ESR": str,
import abc import base64 import datetime as dt import json import marshmallow as m # type: ignore import mypy_extensions import typing as t import at_runner.storage as storage Event = mypy_extensions.TypedDict('Event', { 'data': bytes, 'attributes': t.Dict[str, str], }) Context = mypy_extensions.TypedDict('Context', { 'event_id': int, 'timestamp': dt.datetime, }) class Task(abc.ABC): store: storage.Storage name: str timeout: dt.timedelta TIMEOUT_BUFFER: dt.timedelta = dt.timedelta(seconds=10) def __init__(self, store: storage.Storage, name: str,
@enum.unique class ProductCategory(enum.Enum): MAJOR = 'major' DEVELOPMENT = 'dev' STABILITY = 'stability' ESR = 'esr' File = str ReleaseDetails = mypy_extensions.TypedDict( 'ReleaseDetails', { 'category': str, 'product': str, 'build_number': int, 'description': typing.Optional[str], 'is_security_driven': bool, 'version': str, 'date': str, }) Releases = mypy_extensions.TypedDict( 'Releases', { 'releases': typing.Dict[str, ReleaseDetails], }) L10n = str ReleaseL10n = mypy_extensions.TypedDict('ReleaseL10n', { 'platforms': typing.List[str], 'revision': str, }) ReleaseL10ns = typing.Dict[L10n, ReleaseL10n] ReleasesHistory = typing.Dict[str, str]
Dataset = _typing.Sequence[Record] if _typing.TYPE_CHECKING: FloatArrayType = _array.array[float] IntArrayType = _array.array[int] else: FloatArrayType = _array.array IntArrayType = _array.array CandidatePairs = _typing.Tuple[FloatArrayType, _typing.Tuple[IntArrayType, ...], _typing.Tuple[IntArrayType, ...]] BlockingFunction = _typing.Callable[[int, int, Record], _typing.Iterable[_typing.Hashable]] SimilarityFunction = _typing.Callable[[ _typing.Sequence[Dataset], float, _mypy_extensions.DefaultNamedArg(_typing.Optional[int], 'k') ], _typing.Tuple[FloatArrayType, _typing.Sequence[IntArrayType]]] DatasetChunkInfo = _mypy_extensions.TypedDict('DatasetChunkInfo', { 'datasetIndex': int, 'range': _typing.List[int] }) ChunkInfo = _typing.List[DatasetChunkInfo] DatasetAndRecordIndex = _typing.Tuple[int, int] MatchGroups = _typing.Sequence[_typing.Sequence[DatasetAndRecordIndex]]
a: str class HasB(tx.Protocol): b: str class AB(HasA, HasB): pass def use(ob: AB) -> None: print(ob.a, ob.b) Params = mx.TypedDict("Params", {"a": str, "b": str}) def run(d: Params) -> None: class Ob: z = "boo" class Ob2(Ob, AB): a = d["a"] b = d["b"] ob = Ob2() use(ob) print(ob.z)
# Copyright 2020 Dragonchain, Inc. or its affiliates. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union, Dict, Any, TYPE_CHECKING if not TYPE_CHECKING: raise RuntimeError("types should never be imported during runtime") import mypy_extensions # noqa: E402 Want to explicitly ensure not type checking before importing extensions request_response = mypy_extensions.TypedDict("request_response", {"status": int, "ok": bool, "response": Union[Dict[Any, Any], str]}) custom_index_fields_type = mypy_extensions.TypedDict( "custom_index_fields_type", {"path": str, "field_name": str, "type": str, "options": Dict[str, Any]} ) permissions_doc = mypy_extensions.TypedDict("permissions_doc", {"version": str, "default_allow": bool, "permissions": Dict[str, Any]})
# row and column are 2 character numbers # type is 'EST' or 'MOE' # Second row: Encodes the gender, type, and age group in the format: # [gender]; [type]; [ageGroup] where # gender is 'Total', 'Male', 'Female', 'Percent', 'Percent Male', or 'Percent Female' # type is 'Estimate' or 'Margin of Error' # ageGroup is 'AGE - Under 5 years', 'AGE - 85 years and over' or 'AGE - [low] to [height] years' # Third row: Contain the numerical data, either as an integer value or a percentage. # Types RawEntry = t.Tuple[str, str, str] Entry = te.TypedDict('Entry', { 'key': str, 'year': int, 'gender': str, 'age_group': int, 'count': int }) # Constants _ARCHIVE_NAME_PATTERN = re.compile(r'ACS_(?P<year>\d\d)_(EST|1YR)_S0101') _DATA_FILE_NAME_PATTERN = re.compile(r'ACS_\d\d_(EST|1YR)_S0101_with_ann.csv') _FEMALE_TOTAL_PATTERN = re.compile('Female; Estimate; Total population', re.I) _MALE_TOTAL_PATTERN = re.compile('Male; Estimate; Total population', re.I) _DATA_PATTERN = re.compile( '(?P<gender>Male|Female); ' 'Estimate; '
import mypy_extensions as te import yaml from a2agc import schema import bar_chart import histogram import infer_dist import summary # Types Column = te.TypedDict( 'Column', { 'name': str, 'type': str, 'remarks': str, 'n_non_null': int, 'pct_missing': float, 'dist_type': str, 'dist_data': t.Any }) Table = te.TypedDict( 'Table', { 'name': str, 'row_count': int, 'remarks': str, 'columns': t.Mapping[str, Column] }) Data = t.Mapping[str, Table] # Setup yaml ordered dict load/dump _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
import argparse import json import sqlite3 import sys import typing as t import mypy_extensions as te # Types Summary = te.TypedDict('Summary', { 'distinct': int, 'min': float, 'max': float }) # Data extraction def get_summary(database: sqlite3.Connection, table: str, column: str) -> Summary: distinct_query = f''' SELECT count(DISTINCT "{ column }") FROM "{ table }"; ''' min_query = f''' SELECT CASE typeof("{ column }") WHEN 'integer' THEN min("{ column }") WHEN 'real' THEN min("{ column }") ELSE min(length("{ column }")) END FROM "{ table }"; ''' max_query = f'''
import typing as t import mypy_extensions as mx from data import A BDict = mx.TypedDict("BDict", {"name": str, "age": int}) class B(A): def data(self) -> BDict: return t.cast(BDict, {**super().data(), "age": 20}) class C: def data(self) -> BDict: return {"name": "foo", "age": 20} def p(x: t.Mapping) -> None: print(x) def main() -> None: p(A().data()) p(B().data()) p(C().data())
import mypy_extensions as mx # error # Person = mx.TypedDict( # "Person", { # "name": str, # "age": int, # "info": mx.TypedDict("Info", # {"name": str}) # } # ) Info = mx.TypedDict("Info", {"name": str}) Person = mx.TypedDict("Person", { "name": str, "age": int, "info": Info, })