예제 #1
0
class sUnits(Schema):
    length_units = units.getUnits('length')
    length = fields.String(validate=validate.OneOf(length_units))
    length_micro_units = units.getUnits('length_micro')
    length_micro = fields.String(validate=validate.OneOf(length_units))
    length_mili_units = units.getUnits('length_mili')
    length_mili = fields.String(validate=validate.OneOf(length_units))
    length_kilo_units = units.getUnits('length_kilo')
    length_kilo = fields.String(validate=validate.OneOf(length_units))
    area_units = units.getUnits('area')
    area = fields.String(validate=validate.OneOf(area_units))
    angle_units = units.getUnits('angle')
    angle = fields.String(validate=validate.OneOf(angle_units))
    mass_units = units.getUnits('mass')
    mass = fields.String(validate=validate.OneOf(mass_units))
    time_units = units.getUnits('time')
    time = fields.String(validate=validate.OneOf(time_units))
    speed_units = units.getUnits('speed')
    speed = fields.String(validate=validate.OneOf(speed_units))
    acceleration_units = units.getUnits('acceleration')
    acceleration = fields.String(validate=validate.OneOf(acceleration_units))
    force_units = units.getUnits('force')
    force = fields.String(validate=validate.OneOf(force_units))
    energy_units = units.getUnits('energy')
    energy = fields.String(validate=validate.OneOf(energy_units))
    power_units = units.getUnits('power')
    power = fields.String(validate=validate.OneOf(power_units))
    pressure_units = units.getUnits('pressure')
    pressure = fields.String(validate=validate.OneOf(pressure_units))
    temperature_units = units.getUnits('temperature')
    temperature = fields.String(validate=validate.OneOf(temperature_units))
    massflow_units = units.getUnits('massflow')
    massflow = fields.String(validate=validate.OneOf(massflow_units))
    flow_units = units.getUnits('flow')
    flow = fields.String(validate=validate.OneOf(flow_units))
    density_units = units.getUnits('density')
    density = fields.String(validate=validate.OneOf(density_units))
    molecularMass_units = units.getUnits('molecularMass')
    molecularMass = fields.String(validate=validate.OneOf(molecularMass_units))
    specificVolume_units = units.getUnits('specificVolume')
    specificVolume = fields.String(
        validate=validate.OneOf(specificVolume_units))
    specificEnergy_units = units.getUnits('specificEnergy')
    specificEnergy = fields.String(
        validate=validate.OneOf(specificEnergy_units))
    specificEnergyMolar_units = units.getUnits('specificEnergyMolar')
    specificEnergyMolar = fields.String(
        validate=validate.OneOf(specificEnergyMolar_units))
    specificHeat_units = units.getUnits('specificHeat')
    specificHeat = fields.String(validate=validate.OneOf(specificHeat_units))
    specificHeatMolar_units = units.getUnits('specificHeatMolar')
    specificHeatMolar = fields.String(
        validate=validate.OneOf(specificHeatMolar_units))
    thermalConductivity_units = units.getUnits('thermalConductivity')
    thermalConductivity = fields.String(
        validate=validate.OneOf(thermalConductivity_units))
    dynViscosity_units = units.getUnits('dynViscosity')
    dynViscosity = fields.String(validate=validate.OneOf(dynViscosity_units))
    kinViscosity_units = units.getUnits('kinViscosity')
    kinViscosity = fields.String(validate=validate.OneOf(kinViscosity_units))

    class Meta:
        ordered = True
예제 #2
0
class RoleSchema(Schema):
    name = fields.String()
    code = fields.String()
    source = fields.String(validate=validate.OneOf(["event", "category"]))
예제 #3
0
class RecipeChangeStatusSchema(SQLAlchemyAutoSchema):
    status = fields.Str(required=True,
                        validate=validate.OneOf(Recipe.STATUSES))
    """Setup the connections plugin."""
    if not protocol_registry:
        protocol_registry = await context.inject(ProtocolRegistry)

    protocol_registry.register_message_types(MESSAGE_TYPES)


BaseConnectionSchema = Schema.from_dict({
    'label':
    fields.Str(required=True),
    'my_did':
    fields.Str(required=True),
    'connection_id':
    fields.Str(required=True),
    'state':
    fields.Str(validate=validate.OneOf(['pending', 'active', 'error']),
               required=True),
    'their_did':
    fields.Str(required=False),  # May be missing if pending
    'role':
    fields.Str(required=False),
    'raw_repr':
    fields.Dict(required=False)
})

Connection, ConnectionSchema = generate_model_schema(
    name='Connection',
    handler='acapy_plugin_toolbox.util.PassHandler',
    msg_type=CONNECTION,
    schema=BaseConnectionSchema)
예제 #5
0
class ConnRecordSchema(BaseRecordSchema):
    """Schema to allow serialization/deserialization of connection records."""

    class Meta:
        """ConnRecordSchema metadata."""

        model_class = ConnRecord

    connection_id = fields.Str(
        required=False, description="Connection identifier", example=UUIDFour.EXAMPLE
    )
    my_did = fields.Str(
        required=False, description="Our DID for connection", **INDY_DID
    )
    their_did = fields.Str(
        required=False, description="Their DID for connection", **INDY_DID
    )
    their_label = fields.Str(
        required=False, description="Their label for connection", example="Bob"
    )
    their_role = fields.Str(
        required=False,
        description="Their role in the connection protocol",
        validate=validate.OneOf(
            [label for role in ConnRecord.Role for label in role.value]
        ),
        example=ConnRecord.Role.REQUESTER.rfc23,
    )
    rfc23_state = fields.Str(
        dump_only=True,
        description="State per RFC 23",
        example="invitation-sent",
    )
    inbound_connection_id = fields.Str(
        required=False,
        description="Inbound routing connection id to use",
        example=UUIDFour.EXAMPLE,
    )
    invitation_key = fields.Str(
        required=False, description="Public key for connection", **INDY_RAW_PUBLIC_KEY
    )
    invitation_msg_id = fields.Str(
        required=False,
        description="ID of out-of-band invitation message",
        example=UUIDFour.EXAMPLE,
    )
    request_id = fields.Str(
        required=False,
        description="Connection request identifier",
        example=UUIDFour.EXAMPLE,
    )
    routing_state = fields.Str(
        required=False,
        description="Routing state of connection",
        validate=validate.OneOf(
            [
                getattr(ConnRecord, m)
                for m in vars(ConnRecord)
                if m.startswith("ROUTING_STATE_")
            ]
        ),
        example=ConnRecord.ROUTING_STATE_ACTIVE,
    )
    accept = fields.Str(
        required=False,
        description="Connection acceptance: manual or auto",
        example=ConnRecord.ACCEPT_AUTO,
        validate=validate.OneOf(
            [
                getattr(ConnRecord, a)
                for a in vars(ConnRecord)
                if a.startswith("ACCEPT_")
            ]
        ),
    )
    error_msg = fields.Str(
        required=False,
        description="Error message",
        example="No DIDDoc provided; cannot connect to public DID",
    )
    invitation_mode = fields.Str(
        required=False,
        description="Invitation mode",
        example=ConnRecord.INVITATION_MODE_ONCE,
        validate=validate.OneOf(
            [
                getattr(ConnRecord, i)
                for i in vars(ConnRecord)
                if i.startswith("INVITATION_MODE_")
            ]
        ),
    )
    alias = fields.Str(
        required=False,
        description="Optional alias to apply to connection for later use",
        example="Bob, providing quotes",
    )
    their_public_did = fields.Str(
        required=False,
        description="Other agent's public DID for connection",
        example="2cpBmR3FqGKWi5EyUbpRY8",
    )
예제 #6
0
 class TestSchema(Schema):
     foo = fields.Integer(
         validate=validate.OneOf(mapping.values(), labels=mapping.keys()))
예제 #7
0
class ChartDataExtrasSchema(Schema):

    time_range_endpoints = fields.List(
        fields.String(
            validate=validate.OneOf(choices=("INCLUSIVE", "EXCLUSIVE")),
            description="A list with two values, stating if start/end should be "
            "inclusive/exclusive.",
        )
    )
    relative_start = fields.String(
        description="Start time for relative time deltas. "
        'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
        validate=validate.OneOf(choices=("today", "now")),
    )
    relative_end = fields.String(
        description="End time for relative time deltas. "
        'Default: `config["DEFAULT_RELATIVE_START_TIME"]`',
        validate=validate.OneOf(choices=("today", "now")),
    )
    where = fields.String(
        description="WHERE clause to be added to queries using AND operator.",
    )
    having = fields.String(
        description="HAVING clause to be added to aggregate queries using "
        "AND operator.",
    )
    having_druid = fields.List(
        fields.Nested(ChartDataFilterSchema),
        description="HAVING filters to be added to legacy Druid datasource queries.",
    )
    time_grain_sqla = fields.String(
        description="To what level of granularity should the temporal column be "
        "aggregated. Supports "
        "[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) durations.",
        validate=validate.OneOf(
            choices=(
                "PT1S",
                "PT1M",
                "PT5M",
                "PT10M",
                "PT15M",
                "PT0.5H",
                "PT1H",
                "P1D",
                "P1W",
                "P1M",
                "P0.25Y",
                "P1Y",
                "1969-12-28T00:00:00Z/P1W",  # Week starting Sunday
                "1969-12-29T00:00:00Z/P1W",  # Week starting Monday
                "P1W/1970-01-03T00:00:00Z",  # Week ending Saturday
                "P1W/1970-01-04T00:00:00Z",  # Week ending Sunday
            ),
        ),
        example="P1D",
        allow_none=True,
    )
    druid_time_origin = fields.String(
        description="Starting point for time grain counting on legacy Druid "
        "datasources. Used to change e.g. Monday/Sunday first-day-of-week.",
        allow_none=True,
    )
예제 #8
0
class ReportSchedulePutSchema(Schema):
    type = fields.String(
        description=type_description,
        required=False,
        validate=validate.OneOf(choices=tuple(key.value
                                              for key in ReportScheduleType)),
    )
    name = fields.String(description=name_description,
                         required=False,
                         validate=[Length(1, 150)])
    description = fields.String(
        description=description_description,
        allow_none=True,
        required=False,
        example="Daily sales dashboard to marketing",
    )
    context_markdown = fields.String(description=context_markdown_description,
                                     allow_none=True,
                                     required=False)
    active = fields.Boolean(required=False)
    crontab = fields.String(
        description=crontab_description,
        validate=[validate_crontab, Length(1, 1000)],
        required=False,
    )
    timezone = fields.String(
        description=timezone_description,
        default="UTC",
        validate=validate.OneOf(choices=tuple(all_timezones)),
    )
    sql = fields.String(
        description=sql_description,
        example="SELECT value FROM time_series_table",
        required=False,
        allow_none=True,
    )
    chart = fields.Integer(required=False, allow_none=True)
    creation_method = EnumField(
        ReportCreationMethodType,
        by_value=True,
        allow_none=True,
        description=creation_method_description,
    )
    dashboard = fields.Integer(required=False, allow_none=True)
    database = fields.Integer(required=False)
    owners = fields.List(fields.Integer(description=owners_description),
                         required=False)
    validator_type = fields.String(
        description=validator_type_description,
        validate=validate.OneOf(choices=tuple(
            key.value for key in ReportScheduleValidatorType)),
        allow_none=True,
        required=False,
    )
    validator_config_json = fields.Nested(ValidatorConfigJSONSchema,
                                          required=False)
    log_retention = fields.Integer(
        description=log_retention_description,
        example=90,
        required=False,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )
    grace_period = fields.Integer(
        description=grace_period_description,
        example=60 * 60 * 4,
        required=False,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )
    working_timeout = fields.Integer(
        description=working_timeout_description,
        example=60 * 60 * 1,
        allow_none=True,
        required=False,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )
    recipients = fields.List(fields.Nested(ReportRecipientSchema),
                             required=False)
    report_format = fields.String(
        default=ReportDataFormat.VISUALIZATION,
        validate=validate.OneOf(choices=tuple(key.value
                                              for key in ReportDataFormat)),
    )
    force_screenshot = fields.Boolean(default=False)
예제 #9
0
class PersonOrOrganizationSchema(Schema):
    """Person or Organization schema."""

    NAMES = [
        "organizational",
        "personal"
    ]

    type = SanitizedUnicode(
        required=True,
        validate=validate.OneOf(
            choices=NAMES,
            error=_(f'Invalid value. Choose one of {NAMES}.')
        ),
        error_messages={
            # [] needed to mirror error message above
            "required": [_(f'Invalid value. Choose one of {NAMES}.')]
        }
    )
    name = SanitizedUnicode()
    given_name = SanitizedUnicode()
    family_name = SanitizedUnicode()
    identifiers = IdentifierSet(
        fields.Nested(partial(
            IdentifierSchema,
            # It is intended to allow org schemes to be sent as personal
            # and viceversa. This is a trade off learnt from running
            # Zenodo in production.
            allowed_schemes=["orcid", "isni", "gnd", "ror"]
        ))
    )

    @validates_schema
    def validate_names(self, data, **kwargs):
        """Validate names based on type."""
        if data['type'] == "personal":
            if not data.get('family_name'):
                messages = [_("Family name must be filled.")]
                raise ValidationError({
                    "family_name": messages
                })

        elif data['type'] == "organizational":
            if not data.get('name'):
                messages = [_('Name cannot be blank.')]
                raise ValidationError({"name": messages})

    @post_load
    def update_names(self, data, **kwargs):
        """Update names for organization / person.

        Fill name from given_name and family_name if person.
        Remove given_name and family_name if organization.
        """
        if data["type"] == "personal":
            names = [data.get("family_name"), data.get("given_name")]
            data["name"] = ", ".join([n for n in names if n])

        elif data['type'] == "organizational":
            if 'family_name' in data:
                del data['family_name']
            if 'given_name' in data:
                del data['given_name']

        return data
예제 #10
0
class ValidatorConfigJSONSchema(Schema):
    op = fields.String(  # pylint: disable=invalid-name
        description=validator_config_json_op_description,
        validate=validate.OneOf(choices=["<", "<=", ">", ">=", "==", "!="]),
    )
    threshold = fields.Float()
예제 #11
0
class ReportSchedulePostSchema(Schema):
    type = fields.String(
        description=type_description,
        allow_none=False,
        required=True,
        validate=validate.OneOf(choices=tuple(key.value
                                              for key in ReportScheduleType)),
    )
    name = fields.String(
        description=name_description,
        allow_none=False,
        required=True,
        validate=[Length(1, 150)],
        example="Daily dashboard email",
    )
    description = fields.String(
        description=description_description,
        allow_none=True,
        required=False,
        example="Daily sales dashboard to marketing",
    )
    context_markdown = fields.String(description=context_markdown_description,
                                     allow_none=True,
                                     required=False)
    active = fields.Boolean()
    crontab = fields.String(
        description=crontab_description,
        validate=[validate_crontab, Length(1, 1000)],
        example="*/5 * * * *",
        allow_none=False,
        required=True,
    )
    timezone = fields.String(
        description=timezone_description,
        default="UTC",
        validate=validate.OneOf(choices=tuple(all_timezones)),
    )
    sql = fields.String(description=sql_description,
                        example="SELECT value FROM time_series_table")
    chart = fields.Integer(required=False, allow_none=True)
    creation_method = EnumField(
        ReportCreationMethodType,
        by_value=True,
        required=False,
        description=creation_method_description,
    )
    dashboard = fields.Integer(required=False, allow_none=True)
    database = fields.Integer(required=False)
    owners = fields.List(fields.Integer(description=owners_description))
    validator_type = fields.String(
        description=validator_type_description,
        validate=validate.OneOf(choices=tuple(
            key.value for key in ReportScheduleValidatorType)),
    )
    validator_config_json = fields.Nested(ValidatorConfigJSONSchema)
    log_retention = fields.Integer(
        description=log_retention_description,
        example=90,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )
    grace_period = fields.Integer(
        description=grace_period_description,
        example=60 * 60 * 4,
        default=60 * 60 * 4,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )
    working_timeout = fields.Integer(
        description=working_timeout_description,
        example=60 * 60 * 1,
        default=60 * 60 * 1,
        validate=[Range(min=1, error=_("Value must be greater than 0"))],
    )

    recipients = fields.List(fields.Nested(ReportRecipientSchema))
    report_format = fields.String(
        default=ReportDataFormat.VISUALIZATION,
        validate=validate.OneOf(choices=tuple(key.value
                                              for key in ReportDataFormat)),
    )
    force_screenshot = fields.Boolean(default=False)

    @validates_schema
    def validate_report_references(  # pylint: disable=unused-argument,no-self-use
            self, data: Dict[str, Any], **kwargs: Any) -> None:
        if data["type"] == ReportScheduleType.REPORT:
            if "database" in data:
                raise ValidationError({
                    "database":
                    ["Database reference is not allowed on a report"]
                })
예제 #12
0
class AnnotationLayerSchema(Schema):
    annotationType = fields.String(
        description="Type of annotation layer",
        validate=validate.OneOf(choices=[ann.value for ann in AnnotationType]),
    )
    color = fields.String(
        description="Layer color",
        allow_none=True,
    )
    descriptionColumns = fields.List(
        fields.String(),
        description="Columns to use as the description. If none are provided, "
        "all will be shown.",
    )
    hideLine = fields.Boolean(
        description="Should line be hidden. Only applies to line annotations",
        allow_none=True,
    )
    intervalEndColumn = fields.String(
        description=(
            "Column containing end of interval. Only applies to interval layers"
        ),
        allow_none=True,
    )
    name = fields.String(description="Name of layer", required=True)
    opacity = fields.String(
        description="Opacity of layer",
        validate=validate.OneOf(
            choices=("", "opacityLow", "opacityMedium", "opacityHigh"),
        ),
        allow_none=True,
        required=False,
    )
    overrides = fields.Dict(
        keys=fields.String(
            desciption="Name of property to be overridden",
            validate=validate.OneOf(
                choices=("granularity", "time_grain_sqla", "time_range", "time_shift"),
            ),
        ),
        values=fields.Raw(allow_none=True),
        description="which properties should be overridable",
        allow_none=True,
    )
    show = fields.Boolean(description="Should the layer be shown", required=True)
    showLabel = fields.Boolean(
        description="Should the label always be shown",
        allow_none=True,
    )
    showMarkers = fields.Boolean(
        description="Should markers be shown. Only applies to line annotations.",
        required=True,
    )
    sourceType = fields.String(
        description="Type of source for annotation data",
        validate=validate.OneOf(
            choices=(
                "",
                "line",
                "NATIVE",
                "table",
            )
        ),
    )
    style = fields.String(
        description="Line style. Only applies to time-series annotations",
        validate=validate.OneOf(
            choices=(
                "dashed",
                "dotted",
                "solid",
                "longDashed",
            )
        ),
    )
    timeColumn = fields.String(
        description="Column with event date or interval start date",
        allow_none=True,
    )
    titleColumn = fields.String(
        description="Column with title",
        allow_none=True,
    )
    width = fields.Float(
        description="Width of annotation line",
        validate=[
            Range(
                min=0,
                min_inclusive=True,
                error=_("`width` must be greater or equal to 0"),
            )
        ],
    )
    value = fields.Raw(
        description="For formula annotations, this contains the formula. "
        "For other types, this is the primary key of the source object.",
        required=True,
    )
예제 #13
0
class ChartDataBoxplotOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
    """
    Boxplot operation config.
    """

    groupby = fields.List(
        fields.String(
            description="Columns by which to group the query.",
        ),
        allow_none=True,
    )

    metrics = fields.List(
        fields.Raw(),
        description="Aggregate expressions. Metrics can be passed as both "
        "references to datasource metrics (strings), or ad-hoc metrics"
        "which are defined only within the query object. See "
        "`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. "
        "When metrics is undefined or null, the query is executed without a groupby. "
        "However, when metrics is an array (length >= 0), a groupby clause is added to "
        "the query.",
        allow_none=True,
    )

    whisker_type = fields.String(
        description="Whisker type. Any numpy function will work.",
        validate=validate.OneOf(
            choices=([val.value for val in PostProcessingBoxplotWhiskerType])
        ),
        required=True,
        example="tukey",
    )

    percentiles = fields.Tuple(
        (
            fields.Float(
                description="Lower percentile",
                validate=[
                    Range(
                        min=0,
                        max=100,
                        min_inclusive=False,
                        max_inclusive=False,
                        error=_(
                            "lower percentile must be greater than 0 and less "
                            "than 100. Must be lower than upper percentile."
                        ),
                    ),
                ],
            ),
            fields.Float(
                description="Upper percentile",
                validate=[
                    Range(
                        min=0,
                        max=100,
                        min_inclusive=False,
                        max_inclusive=False,
                        error=_(
                            "upper percentile must be greater than 0 and less "
                            "than 100. Must be higher than lower percentile."
                        ),
                    ),
                ],
            ),
        ),
        description="Upper and lower percentiles for percentile whisker type.",
        example=[1, 99],
    )
예제 #14
0
class ChartDataResponseResult(Schema):
    annotation_data = fields.List(
        fields.Dict(
            keys=fields.String(description="Annotation layer name"),
            values=fields.String(),
        ),
        description="All requested annotation data",
        allow_none=True,
    )
    cache_key = fields.String(
        description="Unique cache key for query object",
        required=True,
        allow_none=True,
    )
    cached_dttm = fields.String(
        description="Cache timestamp",
        required=True,
        allow_none=True,
    )
    cache_timeout = fields.Integer(
        description="Cache timeout in following order: custom timeout, datasource "
        "timeout, default config timeout.",
        required=True,
        allow_none=True,
    )
    error = fields.String(
        description="Error",
        allow_none=True,
    )
    is_cached = fields.Boolean(
        description="Is the result cached",
        required=True,
        allow_none=None,
    )
    query = fields.String(
        description="The executed query statement",
        required=True,
        allow_none=False,
    )
    status = fields.String(
        description="Status of the query",
        validate=validate.OneOf(
            choices=(
                "stopped",
                "failed",
                "pending",
                "running",
                "scheduled",
                "success",
                "timed_out",
            )
        ),
        allow_none=False,
    )
    stacktrace = fields.String(
        desciption="Stacktrace if there was an error",
        allow_none=True,
    )
    rowcount = fields.Integer(
        description="Amount of rows in result set",
        allow_none=False,
    )
    data = fields.List(fields.Dict(), description="A list with results")
    colnames = fields.List(fields.String(), description="A list of column names")
    coltypes = fields.List(
        fields.Integer(), description="A list of generic data types of each column"
    )
    applied_filters = fields.List(
        fields.Dict(), description="A list with applied filters"
    )
    rejected_filters = fields.List(
        fields.Dict(), description="A list with rejected filters"
    )
    from_dttm = fields.Integer(
        desciption="Start timestamp of time range", required=False, allow_none=True
    )
    to_dttm = fields.Integer(
        desciption="End timestamp of time range", required=False, allow_none=True
    )
예제 #15
0
)


MEDIATION_ID_SCHEMA = fields.UUID(
    description="Mediation record identifier",
    example=UUIDFour.EXAMPLE,
)


MEDIATION_STATE_SCHEMA = fields.Str(
    description="Mediation state (optional)",
    required=False,
    validate=validate.OneOf(
        [
            getattr(MediationRecord, m)
            for m in vars(MediationRecord)
            if m.startswith("STATE_")
        ]
    ),
    example=MediationRecord.STATE_GRANTED,
)


MEDIATOR_TERMS_SCHEMA = fields.List(
    fields.Str(
        description=(
            "Indicate terms to which the mediator requires the recipient to agree"
        )
    ),
    required=False,
    description="List of mediator rules for recipient",
예제 #16
0
class ChartDataRollingOptionsSchema(ChartDataPostProcessingOperationOptionsSchema):
    """
    Rolling operation config.
    """

    columns = (
        fields.Dict(
            description="columns on which to perform rolling, mapping source column to "
            "target column. For instance, `{'y': 'y'}` will replace the "
            "column `y` with the rolling value in `y`, while `{'y': 'y2'}` "
            "will add a column `y2` based on rolling values calculated "
            "from `y`, leaving the original column `y` unchanged.",
            example={"weekly_rolling_sales": "sales"},
        ),
    )
    rolling_type = fields.String(
        description="Type of rolling window. Any numpy function will work.",
        validate=validate.OneOf(
            choices=(
                "average",
                "argmin",
                "argmax",
                "cumsum",
                "cumprod",
                "max",
                "mean",
                "median",
                "nansum",
                "nanmin",
                "nanmax",
                "nanmean",
                "nanmedian",
                "min",
                "percentile",
                "prod",
                "product",
                "std",
                "sum",
                "var",
            )
        ),
        required=True,
        example="percentile",
    )
    window = fields.Integer(
        description="Size of the rolling window in days.", required=True, example=7,
    )
    rolling_type_options = fields.Dict(
        desctiption="Optional options to pass to rolling method. Needed for "
        "e.g. quantile operation.",
        example={},
    )
    center = fields.Boolean(
        description="Should the label be at the center of the window. Default: `false`",
        example=False,
    )
    win_type = fields.String(
        description="Type of window function. See "
        "[SciPy window functions](https://docs.scipy.org/doc/scipy/reference"
        "/signal.windows.html#module-scipy.signal.windows) "
        "for more details. Some window functions require passing "
        "additional parameters to `rolling_type_options`. For instance, "
        "to use `gaussian`, the parameter `std` needs to be provided.",
        validate=validate.OneOf(
            choices=(
                "boxcar",
                "triang",
                "blackman",
                "hamming",
                "bartlett",
                "parzen",
                "bohman",
                "blackmanharris",
                "nuttall",
                "barthann",
                "kaiser",
                "gaussian",
                "general_gaussian",
                "slepian",
                "exponential",
            )
        ),
    )
    min_periods = fields.Integer(
        description="The minimum amount of periods required for a row to be included "
        "in the result set.",
        example=7,
    )
예제 #17
0
    DELETE_CONNECTION: 'acapy_plugin_toolbox.connections'
    '.DeleteConnection',
    CONNECTION_ACK: 'acapy_plugin_toolbox.connections'
    '.ConnectionAck',
    UPDATE_CONNECTION: 'acapy_plugin_toolbox.connections'
    '.UpdateConnection',
}

ConnectionGetList, ConnectionGetListSchema = generate_model_schema(
    name='ConnectionGetList',
    handler='acapy_plugin_toolbox.connections.ConnectionGetListHandler',
    msg_type=CONNECTION_GET_LIST,
    schema={
        'initiator':
        fields.Str(
            validate=validate.OneOf(['self', 'external']),
            required=False,
        ),
        'invitation_key':
        fields.Str(required=False),
        'my_did':
        fields.Str(required=False),
        'state':
        fields.Str(validate=validate.OneOf([
            'init', 'invitation', 'request', 'response', 'active', 'error',
            'inactive'
        ]),
                   required=False),
        'their_did':
        fields.Str(required=False),
        'their_role':
예제 #18
0
class ExperimentSchema(Schema):
    _version_ = fields.Constant(extrap.__version__, data_key=extrap.__title__)
    scaling = fields.Str(required=False, allow_none=True, validate=validate.OneOf(['strong', 'weak']))
    parameters = fields.List(fields.Nested(ParameterSchema))
    measurements = TupleKeyDict(keys=(fields.Nested(CallpathSchema), fields.Nested(MetricSchema)),
                                values=fields.List(fields.Nested(MeasurementSchema, exclude=('callpath', 'metric'))))

    modelers = fields.List(fields.Nested(ModelGeneratorSchema), missing=[], required=False)

    def set_progress_bar(self, pbar):
        self.context['progress_bar'] = pbar

    @pre_load
    def add_progress(self, data, **kwargs):
        file_version = data.get(extrap.__title__)
        if file_version:
            prog_version = Version(extrap.__version__)
            file_version = Version(file_version)
            if prog_version < file_version:
                if prog_version.major != file_version.major or prog_version.minor != file_version.minor:
                    warnings.warn(
                        f"The loaded experiment was created with a newer version ({file_version}) of Extra-P. "
                        f"This Extra-P version ({prog_version}) might not work correctly with this experiment.")
                else:
                    logging.info(
                        f"The loaded experiment was created with a newer version ({file_version}) of Extra-P. ")
        if 'progress_bar' in self.context:
            pbar = self.context['progress_bar']
            models = 0
            ms = data.get('measurements')
            if ms:
                for cp in ms.values():
                    for m in cp.values():
                        models += 1
                        pbar.total += len(m)
            pbar.total += models
            ms = data.get('modelers')
            if ms:
                pbar.total += len(ms)
                pbar.total += len(ms) * models
            pbar.update(0)
        return data

    def create_object(self):
        return Experiment()

    def postprocess_object(self, obj: Experiment):
        if 'progress_bar' in self.context:
            pbar = self.context['progress_bar']
        else:
            pbar = DUMMY_PROGRESS

        for (callpath, metric), measurement in obj.measurements.items():
            obj.add_callpath(callpath)
            obj.add_metric(metric)
            for m in measurement:
                obj.add_coordinate(m.coordinate)
                m.callpath = callpath
                m.metric = metric
            pbar.update()

        obj.call_tree = io_helper.create_call_tree(obj.callpaths)
        for modeler in obj.modelers:
            modeler.experiment = obj
            for key, model in modeler.models.items():
                model.measurements = obj.measurements[key]
            pbar.update()

        return obj