Esempio n. 1
0
    def _unpack(expr):
        if isinstance(expr, (ray.ObjectRef, ClientObjectRef)):
            token = expr.hex()
            repack_dsk[token] = (getitem, object_refs_token, len(object_refs))
            object_refs.append(expr)
            return token

        token = uuid.uuid4().hex
        # Treat iterators like lists
        typ = list if isinstance(expr, Iterator) else type(expr)
        if typ in (list, tuple, set):
            repack_task = (typ, [_unpack(i) for i in expr])
        elif typ in (dict, OrderedDict):
            repack_task = (typ, [[_unpack(k), _unpack(v)]
                                 for k, v in expr.items()])
        elif is_dataclass(expr):
            repack_task = (
                apply,
                typ,
                (),
                (
                    dict,
                    [[f.name, _unpack(getattr(expr, f.name))]
                     for f in dataclass_fields(expr)],
                ),
            )
        else:
            return expr
        repack_dsk[token] = repack_task
        return token
Esempio n. 2
0
def decoders(cls):
    result = {}
    for field in dataclass_fields(cls):
        if has_meta(field, 'decoder'):
            sym = f'{field.name}#decoder'
            result[sym] = field.metadata['fastclasses_json']['decoder']
    return result
Esempio n. 3
0
def adapted_fields(type) -> List[Attribute]:
    """Return the attrs format of `fields()` for attrs and dataclasses."""
    if is_dataclass(type):
        return [
            Attribute(
                attr.name,
                attr.default
                if attr.default is not MISSING
                else (
                    Factory(attr.default_factory)
                    if attr.default_factory is not MISSING
                    else NOTHING
                ),
                None,
                True,
                None,
                True,
                attr.init,
                True,
                type=attr.type,
            )
            for attr in dataclass_fields(type)
        ]
    else:
        return attrs_fields(type)
Esempio n. 4
0
    def __post_init__(self):
        # verify types
        config_types = get_type_hints(Config)
        for field in dataclass_fields(self):
            attr = field.name
            val = getattr(self, attr)
            type_ = type(val)

            # handle special cases (non primitives e.g. List[str])
            if attr == 'alias':
                if not isinstance(val, str) and val is not None:
                    raise TypeError(
                        f'Parameter "{attr}" is not a str or None. Type: {type_}'
                    )
            elif attr == 'members':
                if not isinstance(val, list):
                    raise TypeError(
                        f'Parameter "{attr}" is not a list. Type: {type_}')
                elif any(not isinstance(v, str) for v in val):
                    raise TypeError(
                        f'The parameter "{attr}" (list) contains non strings.')
            # handle normal case
            else:
                expected_type = config_types[attr]
                if not isinstance(val, expected_type):
                    raise TypeError(
                        f'Parameter "{attr}" is not of type {expected_type}. Type: {type_}'
                    )
Esempio n. 5
0
def _transform_class_to_input_type(cls, ctx: TransformContext):
    fields = []

    dc_fields = {}
    if is_dataclass(cls):
        dc_fields = {f.name: f for f in dataclass_fields(cls)}

    for name, definition, field_kw in iterate_class_attributes_for_input_type(
            cls, ctx):
        type_ = definition.type_

        dataclass_field = dc_fields.get(name)
        if dataclass_field:
            field_kw.update(dataclass_field.metadata)

        fields.append((name, type_, field_kw))

    def out_type(data: dict):
        return cls(**data)

    out_type = ctx.hook__prepare_input_object_type_out_type(cls)

    return graphql.GraphQLInputObjectType(
        name=get_name(cls),
        fields=_lazy_input_fields(ctx, fields),
        description=get_doc(cls),
        out_type=out_type,
    )
Esempio n. 6
0
def _from_dict_source(cls):

    lines = [
        'def from_dict(cls, o, *, infer_missing):',
        '    args = {}',
    ]

    fields_by_name = {f.name: f for f in dataclass_fields(cls)}

    for name, field_type in typing.get_type_hints(cls).items():

        # pop off the top layer of optional, since we are using o.get
        if typing.get_origin(field_type) == typing.Union:
            field_type = typing.get_args(field_type)[0]

        field = fields_by_name[name]

        input_name = name
        if has_meta(field, 'field_name'):
            input_name = field.metadata['fastclasses_json']['field_name']
            if not isinstance(input_name, str):
                raise TypeError(
                    "fastclasses_json, field_name must be str: "
                    f"{cls.__name__}.{name}"
                )

        has_default = (
            field.default is not MISSING
            or field.default_factory is not MISSING
        )
        use_defaults = True  # TODO: get this from a config option
        use_default = has_default and use_defaults

        access = f'o.get({input_name!r})'

        transform = expr_builder_from(field_type)
        if has_meta(field, 'decoder'):
            transform = decoder_expr(name)

        if transform('x') != 'x':
            lines.append(f'    value = {access}')
            lines.append(f'    if value is not None:')  # noqa: F541
            lines.append(f'        value = ' + transform('value'))  # noqa: E501,F541
            if use_default:
                # has a default, so no need to put in args
                lines.append(f'    if {input_name!r} in o:')
                lines.append(f'        args[{name!r}] = value')
            else:
                lines.append(f'    args[{name!r}] = value')
        else:
            if use_default:
                # has a default, so no need to put in args
                lines.append(f'    if {input_name!r} in o:')
                lines.append(f'        args[{name!r}] = {access}')
            else:
                lines.append(f'    args[{name!r}] = {access}')
    lines.append('    return cls(**args)')
    lines.append('')
    return '\n'.join(lines)
Esempio n. 7
0
def fields(type):
    try:
        return type.__attrs_attrs__
    except AttributeError:
        try:
            return dataclass_fields(type)
        except AttributeError:
            raise Exception("Not an attrs or dataclass class.")
Esempio n. 8
0
def _to_dict_source(cls):

    lines = [
        'def to_dict(self):',
        '    result = {}',
    ]

    # TODO: option for including Nones or not
    INCLUDE_NONES = False

    fields_by_name = {f.name: f for f in dataclass_fields(cls)}

    for name, field_type in typing.get_type_hints(cls).items():

        access = f'self.{name}'

        transform = expr_builder_to(field_type)

        # custom encoder and decoder routines
        field = fields_by_name[name]
        if has_meta(field, 'encoder'):
            transform = encoder_expr(name)

        # custom mapping of dataclass fieldnames to json field names
        output_name = name
        if has_meta(field, 'field_name'):
            output_name = field.metadata['fastclasses_json']['field_name']
            if not isinstance(output_name, str):
                raise TypeError(
                    "fastclasses_json, field_name must be str: "
                    f"{cls.__name__}.{name}"
                )

        if transform('x') != 'x':
            # since we have an is not none check, elide the first level
            # of optional
            if (typing.get_origin(field_type) == typing.Union
                    # This is a bit yuk. Premature optimization 🙄
                    and transform('x') == expr_builder_to(field_type)('x')):
                transform = expr_builder_to(typing.get_args(field_type)[0])
            lines.append(f'    value = {access}')
            lines.append(f'    if value is not None:')  # noqa: F541
            lines.append(f'        value = ' + transform('value'))  # noqa: E501,F541
            if INCLUDE_NONES:
                lines.append(f'    result[{output_name!r}] = value')
            else:
                lines.append(f'        result[{output_name!r}] = value')
        else:
            lines.append(f'    result[{output_name!r}] = {access}')

    lines.append('    return result')
    lines.append('')
    return '\n'.join(lines)
Esempio n. 9
0
    async def _configure(
        self,
        *,
        checks,
        nsca: dict,
        reporting_host: str = DEFAULT_HOSTNAME,
        resend_interval: str = "3min",
        overrides: Optional[dict] = None,
        **_kwargs,
    ) -> None:
        self._reporting_host = reporting_host
        self._nsca_config = NscaConfig(
            **{
                cfg_key: v
                for cfg_key, v in nsca.items()
                # ignore unknown keys in NSCA config
                if cfg_key in set(f.name for f in dataclass_fields(NscaConfig))
            })

        if overrides is not None:
            try:
                self._overrides = Overrides.from_config(overrides)
            except (ValueError, TypeError) as e:
                logger.error("Invalid overrides section in configuration: {}",
                             e)
                raise
        else:
            logger.debug(
                'Configuration did not contain an "overrides" section')

        try:
            self._global_resend_interval = Timedelta.from_string(
                resend_interval)
        except ValueError as e:
            logger.error(
                f'Invalid resend interval "{resend_interval}" in configuration: {e}'
            )
            raise

        if not self._checks:
            self._init_checks(checks)
        else:
            await self._update_checks(checks)

        c: Check
        self._has_value_checks = any(c._has_value_checks()
                                     for c in self._checks.values())

        logger.info(
            f"Configured NSCA reporter sink for host {self._reporting_host} and checks {', '.join(self._checks)!r}"
        )
        logger.debug(f"NSCA config: {self._nsca_config!r}")
Esempio n. 10
0
File: core.py Progetto: gc-ss/hrepr
    def hrepr(self, obj: meta(is_dataclass)):
        def mapping(field):
            return self.H.pair(
                self.H.atom(field.name, type="symbol"),
                self(getattr(obj, field.name)),
                delimiter="=",
            )

        return self.H.instance(
            self.transform_sequence(dataclass_fields(obj), transform=mapping),
            type=_tn(obj),
            vertical=True,
        )
Esempio n. 11
0
def _transform_class_to_output_type(cls, ctx: TransformContext):
    fields = []

    dc_fields = {}
    if is_dataclass(cls):
        dc_fields = {f.name: f for f in dataclass_fields(cls)}

    for name, definition, field_kw in iterate_class_attributes_for_output_type(
            cls, ctx):
        type_ = definition.type_

        dataclass_field = dc_fields.get(name)
        if dataclass_field:
            field_kw.update(dataclass_field.metadata)

        fields.append((name, type_, field_kw))

    if not fields:
        raise TypeError(
            f"Please define proper attributes on {cls.__qualname__}")

    if is_interface(cls):
        resolve_type = ctx.get("resolve_type",
                               getattr(cls, "resolve_type", None))

        if resolve_type is None:
            resolve_type = ctx.hook__prepare_default_interface_type_resolver(
                cls)
        else:
            resolve_type = ctx.hook__prepare_interface_type_resolver(
                resolve_type, cls)

        return graphql.GraphQLInterfaceType(
            name=get_name(cls),
            fields=_lazy_fields(ctx, fields),
            description=get_doc(cls),
            resolve_type=resolve_type,
        )

    interfaces = lambda: [
        _transform_class_to_output_type(interface_cls, ctx)
        for interface_cls in get_interfaces(cls)
    ]

    return graphql.GraphQLObjectType(
        name=get_name(cls),
        fields=_lazy_fields(ctx, fields),
        description=get_doc(cls),
        interfaces=interfaces,
    )
Esempio n. 12
0
def load_dataclass_dict_from_csv(
    input_csv_file_path: str,
    dataclass_class: type,
    dict_key_field: str,
    list_per_key: bool = False,
) -> Dict[Any, Union[Any, List[Any]]]:
    """
    Args:
        input_csv_file_path (str): File path of the csv to read from
        dataclass_class (type): The dataclass to read each row into.
        dict_key_field (str): The field of 'dataclass_class' to use as
            the dictionary key.
        list_per_key (bool) = False: If the output data structure
        contains a list of dataclass objects per key, rather than a
        single unique dataclass object.

    Returns:
        Dict[Any, Union[Any, List[Any]] mapping from the dataclass
        value at attr = dict_key_field to either:

        if 'list_per_key', a list of all dataclass objects that
        have equal values at attr = dict_key_field, equal to the key

        if not 'list_per_key', the unique dataclass object
        for which the value at attr = dict_key_field is equal to the key

    Raises:
        AssertionError: if not 'list_per_key' and there are
        dataclass obejcts with equal values at attr = dict_key_field
    """

    output_dict = defaultdict(list) if list_per_key else {}
    with g_pathmgr.open(input_csv_file_path) as dataclass_file:
        reader = csv.reader(dataclass_file, delimiter=",", quotechar='"')
        column_index = {header: i for i, header in enumerate(next(reader))}
        for line in reader:
            datum = dataclass_class(
                *(line[column_index[field.name]]
                  for field in dataclass_fields(dataclass_class)))
            dict_key = getattr(datum, dict_key_field)
            if list_per_key:
                output_dict[dict_key].append(datum)
            else:
                assert (
                    dict_key not in output_dict
                ), f"Multiple entries for {output_dict} in {dataclass_file}"
                output_dict[dict_key] = datum
    return output_dict
Esempio n. 13
0
def save_dataclass_objs_to_headered_csv(dataclass_objs: List[Any],
                                        file_name: str) -> None:
    """
    Saves a list of @dataclass objects to the specified csv file.

    Args:
        dataclass_objs (List[Any]):
            A list of @dataclass objects to be saved.

        file_name (str):
            file_name to save csv data to.
    """
    dataclass_type = type(dataclass_objs[0])
    field_names = [f.name for f in dataclass_fields(dataclass_type)]
    with g_pathmgr.open(file_name, "w") as f:
        writer = csv.writer(f, delimiter=",", quotechar='"')
        writer.writerow(field_names)
        for obj in dataclass_objs:
            writer.writerow([getattr(obj, f) for f in field_names])
Esempio n. 14
0
    def __post_init__(self) -> None:
        f"""
        This function is run by the dataclass library after '__init__'.

        Here we use this to ensure all fields are casted to their declared types
        and to apply any complex field_initializer functions that have been
        declared via the 'complex_initialized_dataclass_field' method of
        this class.

        A complex field_initializer for a given field would be stored in the
        field.metadata dictionary at:
            key = '{self.COMPLEX_INITIALIZER}' (self.COMPLEX_INITIALIZER)

        """
        for field in dataclass_fields(self):
            value = getattr(self, field.name)
            # First check if the datafield has been set to the declared type or
            # if the datafield has a declared complex field_initializer.
            if (not isinstance(value, field.type)
                    or DataclassFieldCaster.COMPLEX_INITIALIZER
                    in field.metadata):
                # Apply the complex field_initializer function for this field's value,
                # assert that the resultant type is the declared type of the field.
                if DataclassFieldCaster.COMPLEX_INITIALIZER in field.metadata:
                    setattr(
                        self,
                        field.name,
                        field.metadata[
                            DataclassFieldCaster.COMPLEX_INITIALIZER](value),
                    )
                    assert isinstance(getattr(self, field.name), field.type), (
                        f"'field_initializer' function of {field.name} must return "
                        f"type {field.type} but returned type {type(getattr(self, field.name))}"
                    )
                else:
                    # Otherwise attempt to cast the field's value to its declared type.
                    setattr(self, field.name, field.type(value))
Esempio n. 15
0
def _get_meta_fields(meta_cls):
    return [f.name for f in dataclass_fields(meta_cls)]
Esempio n. 16
0
def env_data() -> dict:
    field_names = {field.name for field in dataclass_fields(Configuration)}
    return {
        key: value
        for key, value in os.environ.items() if key in field_names
    }
Esempio n. 17
0
    def __init__(self: CDef, cls: type[JObject], jconf: JConf) -> None:
        """
        Initialize a new class definition.

        Args:
            cls (type): The JSON class for which the class definition is \
                created.
            jconf (JConf): The configuration object for the targeted class.
        """
        from .types import Types
        self._ref_names_resolved = False
        self._ref_types_resolved = False
        self._cls = cls
        jconf._cls = cls
        self._name: str = cls.__name__
        self._jconf: JConf = jconf
        self._list_fields: list[JField] = []
        self._dict_fields: dict[str, JField] = {}
        self._primary_field: Optional[JField] = None
        self._calc_fields: list[JField] = []
        self._setter_fields: list[JField] = []
        self._deny_fields: list[JField] = []
        self._nullify_fields: list[JField] = []
        self._cascade_fields: list[JField] = []
        self._field_names: list[str] = []
        self._camelized_field_names: list[str] = []
        self._reference_names: list[str] = []
        self._camelized_reference_names: list[str] = []
        self._list_reference_names: list[str] = []
        self._camelized_list_reference_names: list[str] = []
        self._virtual_reference_names: list[str] = []
        self._camelized_virtual_reference_names: list[str] = []
        self._virtual_reference_fields: dict[str, JField] = {}
        self._unique_fields: list[JField] = []
        self._assign_operator_fields: list[JField] = []
        self._auth_identity_fields: list[JField] = []
        self._auth_by_fields: list[JField] = []
        self._rfmap: dict[str, JField] = {}
        for field in dataclass_fields(cls):
            name = field.name
            self._field_names.append(name)
            if isinstance(field.default, Types):
                types = field.default
                default = None
            elif field.default == cast(Any, field).default_factory:
                types = rtypes(field.type)
                default = None
            else:
                types = rtypes(field.type)
                default = field.default
            types.fdef._cdef = self
            jfield = JField(cdef=self, name=name, default=default, types=types)
            self._camelized_field_names.append(jfield.json_name)
            self._list_fields.append(jfield)
            self._dict_fields[name] = jfield
            if types.fdef._primary:
                self._primary_field = jfield
            if types.fdef._fstore == FStore.CALCULATED:
                self._calc_fields.append(jfield)
            if types.fdef._setter is not None:
                self._setter_fields.append(jfield)
            if types.fdef._delete_rule == DeleteRule.DENY:
                self._deny_fields.append(jfield)
            elif types.fdef._delete_rule == DeleteRule.NULLIFY:
                self._nullify_fields.append(jfield)
            elif types.fdef._delete_rule == DeleteRule.CASCADE:
                self._cascade_fields.append(jfield)
            if types.fdef._unique:
                self._unique_fields.append(jfield)
            if types.fdef._requires_operator_assign:
                self._assign_operator_fields.append(jfield)
            if types.fdef._auth_identity:
                self._auth_identity_fields.append(jfield)
            if types.fdef._auth_by:
                self._auth_by_fields.append(jfield)
        self._tuple_fields: tuple[JField, ...] = tuple(self._list_fields)
Esempio n. 18
0
 def get_fields(cls) -> Sequence[Field]:
     """
     Returns the fields of this dataclass.
     """
     return list(dataclass_fields(cls))