Beispiel #1
0
class LanguageSchema(Schema):
    """Language schema."""

    id = SanitizedUnicode(required=True)
    title = fields.Raw(dump_only=True)
    description = fields.Raw(dump_only=True)
Beispiel #2
0
class ProductSchema(ma.Schema):
    id = fields.Integer(dump_only=True)
    timestamp = fields.String(required=True)
    data = fields.Raw()
    name = fields.String(required=True)
 def convert_default(self, field, **params):
     """Return raw field."""
     for klass, ma_field in self.TYPE_MAPPING:
         if isinstance(field, klass):
             return ma_field(**params)
     return fields.Raw(**params)
Beispiel #4
0
class APIErrorSchema(APIMessageSchema):
    code = fields.Integer()
    errors = fields.Raw()
Beispiel #5
0
class SampleSchema(Schema):
    """The required request schema for the service."""
    text = fields.Raw(validate=(lambda obj: type(obj) in [str, list]))
Beispiel #6
0
 class PaginatedListSchema(Schema):
     __alias__ = "{}_list".format(ns.subject_name)
     items = fields.List(fields.Nested(item_schema), required=True)
     _links = fields.Raw()
Beispiel #7
0
class Author(Schema):
    display_name = fields.Raw()
    family_name = fields.Raw()
    given_name = fields.Raw()
    native_name = fields.Raw()
    public_emails = fields.Raw()
    orcid = fields.Raw()

    status = fields.Raw()
    arxiv_categories = fields.Raw()
    websites = fields.Raw()
    twitter = fields.Raw()
    blog = fields.Raw()
    linkedin = fields.Raw()

    positions = fields.Raw()
    project_membership = fields.Raw()
    advisors = fields.Raw()

    comments = fields.Raw()

    acquisition_source = fields.Raw()
    bai = fields.Raw()

    @pre_dump
    def before_dump(self, data):
        family_name, given_name = self.get_name_splitted(data)
        return {
            'advisors': get_value(data, 'advisors', default=missing),
            'acquisition_source': get_value(
                data, 'acquisition_source', default=missing),
            'arxiv_categories': get_value(
                data, 'arxiv_categories', default=missing),
            'blog': self.get_first_or_missing(
                self.get_value_by_description_key(data.get('urls', []), 'blog')),
            'display_name': get_value(
                data, 'name.preferred_name', default=missing),
            'family_name': self.get_value_or_missing(family_name),
            'given_name': self.get_value_or_missing(given_name),
            'linkedin': self.get_first_or_missing(
                get_values_for_schema(data.get('ids', []), 'LINKEDIN')),
            'native_name': get_value(
                data, 'name.native_names[0]', default=missing),
            'orcid': self.get_first_or_missing(
                get_values_for_schema(data.get('ids', []), 'ORCID')),
            'positions': get_value(data, 'positions', default=missing),
            'project_membership': get_value(
                data, 'project_membership', default=missing),
            'public_emails': get_value(
                data, 'email_addresses.value', default=missing),
            'status': get_value(data, 'status', default=missing),
            'twitter': self.get_first_or_missing(
                get_values_for_schema(data.get('ids', []), 'TWITTER')),
            'websites': get_value(data, 'urls.value', default=missing),
        }

    def get_name_splitted(self, data):
        name = get_value(data, 'name.value')
        if not name:
            return missing, missing

        name = name.split(',')
        if len(name) > 1:
            return name[0].strip(), name[1].strip()
        return missing, name[0].strip()

    def get_value_or_missing(self, value):
        if value:
            return value
        return missing

    def get_first_or_missing(self, value):
        if value:
            return value.pop()
        return missing

    def get_full_name(self, family_name, given_name):
        if given_name and family_name:
            return u'{}, {}'.format(family_name, given_name)
        return given_name or family_name

    def get_value_by_description_key(self, data, value):
        return [item.get('value') for item in data if item.get('description') == value]

    @post_load
    def build_author(self, data):
        author = AuthorBuilder()

        for advisor in data.get('advisors', []):
            author.add_advisor(**advisor)

        for arxiv_category in data.get('arxiv_categories', []):
            author.add_arxiv_category(arxiv_category)

        blog = data.get('blog')
        author.add_blog(blog)

        comments = data.get('comments')
        author.add_private_note(comments)

        display_name = data.get('display_name')
        author.set_display_name(display_name)

        given_name = data.get('given_name')
        family_name = data.get('family_name')
        full_name = self.get_full_name(family_name, given_name)
        author.set_name(full_name)

        linkedin = data.get('linkedin')
        author.add_linkedin(linkedin)

        orcid = data.get('orcid')
        author.add_orcid(orcid)

        bai = data.get('bai')
        author.add_bai(bai)

        native_name = data.get('native_name')
        author.add_native_name(native_name)

        for position in data.get('positions', []):
            institution = position.get('institution')
            start_date = position.get('start_date')
            end_date = position.get('end_date')
            rank = position.get('rank')
            record = position.get('record')
            curated_relation = position.get('curated_relation', False)
            current = position.get('current', False)

            author.add_institution(
                institution,
                start_date=start_date,
                end_date=end_date,
                rank=rank,
                record=record,
                curated=curated_relation,
                current=current
            )

        for project in data.get('project_membership', []):
            name = project.get('name')
            record = project.get('record')
            start_date = project.get('start_date')
            end_date = project.get('end_date')
            curated_relation = project.get('curated_relation', False)
            current = project.get('current', False)

            author.add_project(
                name,
                record=record,
                start_date=start_date,
                end_date=end_date,
                curated=curated_relation,
                current=current
            )

        for email in data.get('public_emails', []):
            author.add_email_address(email)

        status = data.get('status')
        author.set_status(status)

        twitter = data.get('twitter')
        author.add_twitter(twitter)

        for website in data.get('websites', []):
            author.add_url(website)

        acquisition_source = data.get('acquisition_source')

        if acquisition_source:
            method = acquisition_source.get('method')
            submission_number = acquisition_source.get('submission_number')
            internal_uid = acquisition_source.get('internal_uid')
            email = acquisition_source.get('email')
            orcid = acquisition_source.get('orcid')
            source = acquisition_source.get('source')
            datetime = acquisition_source.get('datetime')
            author.add_acquisition_source(
                method,
                submission_number=submission_number,
                internal_uid=internal_uid,
                email=email,
                orcid=orcid,
                source=source,
                datetime=datetime,
            )

        return author.obj
Beispiel #8
0
class Seminar(Schema):
    additional_info = fields.Raw()
    address = fields.Raw()
    speakers = fields.Raw()
    contacts = fields.Raw()
    dates = fields.Raw()
    timezone = fields.Raw()
    abstract = fields.Raw()
    field_of_interest = fields.Raw()
    keywords = fields.Raw()
    name = fields.Raw()
    series_name = fields.Raw()
    series_number = fields.Raw()
    websites = fields.Raw()
    material_urls = fields.Raw()
    join_urls = fields.Raw()
    captioned = fields.Raw()
    literature_records = fields.Raw()

    @pre_dump
    def convert_to_form_data(self, data):
        speakers = data.get_value("speakers", [])
        for speaker in speakers:
            affiliation = get_value(speaker, "affiliations[0]")
            if affiliation:
                affiliation_value = affiliation.get("value")
                affiliation_record = affiliation.get("record")
                speaker["affiliation"] = affiliation_value
                if affiliation_record:
                    speaker["affiliation_record"] = affiliation_record
                del speaker["affiliations"]

        address = data.get_value("address")
        if address and "country_code" in address:
            address["country"] = country_code_to_name(address["country_code"])
            del address["country_code"]

        if address and "cities" in address:
            address["city"] = get_value(address, "cities[0]")
            del address["cities"]

        timezone = data.get("timezone")

        start_datetime = data.get("start_datetime")
        form_start_datetime = iso_utc_to_local_form_datetime(start_datetime, timezone)

        end_datetime = data.get("end_datetime")
        form_end_datetime = iso_utc_to_local_form_datetime(end_datetime, timezone)

        literature_records = [
            PidStoreBase.get_pid_from_record_uri(rec["record"]["$ref"])[1]
            for rec in data.get("literature_records", [])
        ]

        processed_data = {
            "name": data.get_value("title.title", missing),
            "additional_info": data.get_value("public_notes[0].value", missing),
            "address": address or missing,
            "speakers": speakers,
            "contacts": data.get_value("contact_details", missing),
            "series_name": data.get_value("series[0].name", missing),
            "series_number": data.get_value("series[0].number", missing),
            "field_of_interest": data.get_value("inspire_categories.term", missing),
            "dates": [form_start_datetime, form_end_datetime],
            "websites": data.get_value("urls.value", missing),
            "material_urls": data.get_value("material_urls", missing),
            "join_urls": data.get_value("join_urls", missing),
            "captioned": data.get("captioned", missing),
            "timezone": timezone,
            "abstract": data.get_value("abstract.value", missing),
            "keywords": data.get_value("keywords.value", missing),
            "literature_records": literature_records or missing,
        }
        return processed_data

    @post_load
    def build_seminar(self, data) -> dict:
        builder = SeminarBuilder()
        builder.set_title(title=data.get("name"))
        builder.add_inspire_categories(data.get("field_of_interest", []))
        builder.add_public_note(value=data.get("additional_info", ""))
        builder.add_series(
            name=data.get("series_name"), number=data.get("series_number")
        )

        timezone = data.get("timezone")
        builder.set_timezone(timezone)

        start_datetime = get_value(data, "dates[0]")
        start_datetime_utc = local_form_datetime_to_iso_utc(start_datetime, timezone)
        builder.set_start_datetime(start_datetime_utc)

        end_datetime = get_value(data, "dates[1]")
        end_datetime_utc = local_form_datetime_to_iso_utc(end_datetime, timezone)
        builder.set_end_datetime(end_datetime_utc)

        address = data.get("address")
        if address:
            builder.set_address(
                cities=[address.get("city")],
                state=address.get("state"),
                place_name=address.get("venue"),
                country_code=country_name_to_code(address.get("country")),
            )

        abstract = data.get("abstract")
        if abstract:
            builder.set_abstract(value=abstract)

        captioned = data.get("captioned")
        if captioned:
            builder.set_captioned(captioned)

        for contact in data.get("contacts", []):
            builder.add_contact(**contact)

        for speaker in data.get("speakers", []):
            name = speaker.get("name")
            record = speaker.get("record")
            affiliation_value = speaker.get("affiliation")
            affiliation_record = speaker.get("affiliation_record")

            affiliation = {}
            if affiliation_value:
                affiliation["value"] = affiliation_value

            if affiliation_record:
                affiliation["record"] = affiliation_record

            affiliations = [affiliation] if affiliation else None

            builder.add_speaker(name=name, record=record, affiliations=affiliations)

        for url in data.get("material_urls", []):
            builder.add_material_url(**url)

        for url in data.get("join_urls", []):
            builder.add_join_url(**url)

        for website in data.get("websites", []):
            builder.add_url(website)

        for keyword in data.get("keywords", []):
            builder.add_keyword(value=keyword)

        for literature_record_pid in data.get("literature_records", []):
            try:
                LiteratureRecord.get_record_by_pid_value(literature_record_pid)
            except PIDDoesNotExistError:
                raise InvalidDataError(
                    f"{literature_record_pid} is not a valid literature record."
                )
            record = {
                "$ref": f"{get_inspirehep_url()}/api/literature/{literature_record_pid}"
            }
            builder.add_literature_record(record=record)

        builder.record["$schema"] = url_for(
            "invenio_jsonschemas.get_schema",
            schema_path="records/seminars.json",
            _external=True,
        )

        return builder.record
Beispiel #9
0
class ExperimentsRawSchema(RecordBaseSchema):
    number_of_papers = fields.Raw()
    pass
Beispiel #10
0
class LiteratureElasticSearchSchema(ElasticSearchBaseSchema,
                                    LiteratureRawSchema):
    """Elasticsearch serialzier"""

    _oai = fields.Method("get_oai", dump_only=True)
    _ui_display = fields.Method("get_ui_display", dump_only=True)
    _latex_us_display = fields.Method("get_latex_us_display", dump_only=True)
    _latex_eu_display = fields.Method("get_latex_eu_display", dump_only=True)
    _bibtex_display = fields.Method("get_bibtex_display", dump_only=True)
    _cv_format = fields.Method("get_cv_format", dump_only=True)
    abstracts = fields.Nested(AbstractSource, dump_only=True, many=True)
    author_count = fields.Method("get_author_count")
    authors = fields.Nested(AuthorsInfoSchemaForES, dump_only=True, many=True)
    supervisors = fields.Nested(SupervisorSchema, dump_only=True, many=True)
    first_author = fields.Nested(FirstAuthorSchemaV1, dump_only=True)
    bookautocomplete = fields.Method("get_bookautocomplete")
    earliest_date = fields.Raw(dump_only=True, default=missing)
    facet_inspire_doc_type = fields.Method("get_inspire_document_type")
    facet_author_name = fields.Method("get_facet_author_name")
    id_field = fields.Integer(dump_only=True,
                              dump_to="id",
                              attribute="control_number")
    thesis_info = fields.Nested(ThesisInfoSchemaForESV1, dump_only=True)
    referenced_authors_bais = fields.Method("get_referenced_authors_bais",
                                            dump_only=True)
    primary_arxiv_category = fields.Method("get_primary_arxiv_category",
                                           dump_only=True)

    @staticmethod
    def get_referenced_authors_bais(record):
        return [
            result.author_id
            for result in db.session.query(RecordsAuthors.author_id).filter(
                RecordsAuthors.id_type == "INSPIRE BAI",
                RecordsAuthors.record_id == RecordCitations.cited_id,
                RecordCitations.citer_id == record.id,
            ).distinct(RecordsAuthors.author_id).all()
        ]

    def get_ui_display(self, record):
        return orjson.dumps(
            LiteratureDetailSchema().dump(record).data).decode("utf-8")

    def get_latex_us_display(self, record):
        from inspirehep.records.serializers.latex import latex_US

        try:
            return latex_US.latex_template().render(data=latex_US.dump(record),
                                                    format=latex_US.format)
        except Exception:
            LOGGER.exception("Cannot get latex us display", record=record)
            return " "

    def get_latex_eu_display(self, record):
        from inspirehep.records.serializers.latex import latex_EU

        try:
            return latex_EU.latex_template().render(data=latex_EU.dump(record),
                                                    format=latex_EU.format)
        except Exception:
            LOGGER.exception("Cannot get latex eu display", record=record)
            return " "

    def get_bibtex_display(self, record):
        from inspirehep.records.serializers.bibtex import literature_bibtex

        return literature_bibtex.serialize(None, record)

    def get_cv_format(self, record):
        from inspirehep.records.serializers.cv import literature_cv_html

        try:
            return literature_cv_html.serialize_inner(None, record)
        except Exception:
            LOGGER.exception("Cannot get cv format", record=record)
            return " "

    def get_author_count(self, record):
        """Prepares record for ``author_count`` field."""
        authors = record.get("authors", [])
        return len(authors)

    def get_inspire_document_type(self, record):
        """Prepare record for ``facet_inspire_doc_type`` field."""
        result = []

        result.extend(record.get("document_type", []))
        result.extend(record.get("publication_type", []))
        if "refereed" in record and record["refereed"]:
            result.append("published")
        return result

    def get_facet_author_name(self, record):
        """Prepare record for ``facet_author_name`` field."""
        authors_with_record = list(
            InspireRecord.get_linked_records_from_dict_field(
                record, "authors.record"))
        found_authors_control_numbers = set([
            author["control_number"] for author in authors_with_record
            if author.get("control_number")
        ])
        authors_without_record = [
            author for author in record.get("authors", [])
            if "record" not in author or int(
                PidStoreBase.get_pid_from_record_uri(author["record"].get(
                    "$ref"))[1]) not in found_authors_control_numbers
        ]
        result = []

        for author in authors_with_record:
            result.append(get_facet_author_name_for_author(author))

        for author in authors_without_record:
            result.append("NOREC_{}".format(
                get_display_name_for_author_name(author["full_name"])))

        return result

    def get_bookautocomplete(self, record):
        """prepare ```bookautocomplete`` field."""
        paths = ["imprints.date", "imprints.publisher", "isbns.value"]

        authors = force_list(record.get_value("authors.full_name", default=[]))
        titles = force_list(record.get_value("titles.title", default=[]))

        input_values = list(
            chain.from_iterable(
                force_list(record.get_value(path, default=[]))
                for path in paths))
        input_values.extend(authors)
        input_values.extend(titles)
        input_values = [el for el in input_values if el]

        return {"input": input_values}

    def get_oai(self, record):
        sets = []
        if is_cds_set(record):
            sets.append(current_app.config["OAI_SET_CDS"])
        if is_cern_arxiv_set(record):
            sets.append(current_app.config["OAI_SET_CERN_ARXIV"])

        if sets:
            return {
                "id": f"oai:inspirehep.net:{record['control_number']}",
                "sets": sets,
                "updated": record.updated,
            }
        return missing

    @staticmethod
    def get_primary_arxiv_category(record):
        arxiv_categories = get_value(record, "arxiv_eprints.categories")
        if not arxiv_categories:
            return missing
        arxiv_primary_categories = {
            categories[0]
            for categories in arxiv_categories
        }
        return list(arxiv_primary_categories)

    @pre_dump
    def separate_authors_and_supervisors_and_populate_first_author(self, data):
        if "authors" in data:
            data["supervisors"] = [
                author for author in data["authors"]
                if "supervisor" in author.get("inspire_roles", [])
            ]
            data["authors"] = [
                author for author in data["authors"]
                if "supervisor" not in author.get("inspire_roles", [])
            ]
            if data["authors"]:
                data["first_author"] = data["authors"][0]
        return data
Beispiel #11
0
class FileSchema(Schema):
    file = fields.Raw(
        description="File upload",
        required=True,
        type="file",
    )
Beispiel #12
0
 class Meta:
     additional_properties = fields.Raw()
Beispiel #13
0
class DictMatchAllSchema(AssertionSchema):

    key_weightings = fields.Raw()
    matches = fields.Function(lambda obj: {'matches': obj.matches})
Beispiel #14
0
    raise ValidationError(
        "Need to be either a string or a (nested) list of strings")


resultdict_entities = [
    # first level tag
    "feature",
    "setting",
    # seed connectivity
    "seed",
    # dual regression
    "map",
    "component",
    # atlas
    "atlas",
    # task
    "taskcontrast",
    # higher level tag
    "model",
    "contrast",
    # file descriptors
    "stat",
    "desc",
]

ResultdictTagsSchema = Schema.from_dict(
    OrderedDict([
        (entity, fields.Raw(validate=validate_tags))
        for entity in [*FuncTagsSchema().fields.keys(), *resultdict_entities]
    ]))
Beispiel #15
0
class WebPushRequestSchema(Schema):
    subscription = fields.Nested(WebPushSubscriptionSchema,
                                 load_from="token_info")
    headers = fields.Nested(WebPushBasicHeaderSchema)
    crypto_headers = PolyField(
        load_from="headers",
        deserialization_schema_selector=conditional_crypto_deserialize,
    )
    body = fields.Raw()
    token_info = fields.Raw()
    vapid_version = fields.String(required=False, missing=None)

    @validates('body')
    def validate_data(self, value):
        max_data = self.context["conf"].max_data
        if value and len(value) > max_data:
            raise InvalidRequest(
                "Data payload must be smaller than {}".format(max_data),
                errno=104,
            )

    @pre_load
    def token_prep(self, d):
        d["token_info"] = dict(
            api_ver=d["path_kwargs"].get("api_ver"),
            token=d["path_kwargs"].get("token"),
            ckey_header=d["headers"].get("crypto-key", ""),
            auth_header=d["headers"].get("authorization", ""),
        )
        return d

    def validate_auth(self, d):
        crypto_exceptions = [
            KeyError, ValueError, TypeError, VapidAuthException
        ]

        if self.context['conf'].use_cryptography:
            crypto_exceptions.append(InvalidSignature)
        else:
            crypto_exceptions.extend([JOSEError, JWTError, AssertionError])

        auth = d["headers"].get("authorization")
        needs_auth = d["token_info"]["api_ver"] == "v2"
        if not needs_auth and not auth:
            return
        try:
            vapid_auth = parse_auth_header(auth)
            token = vapid_auth['t']
            d["vapid_version"] = "draft{:0>2}".format(vapid_auth['version'])
            if vapid_auth['version'] == 2:
                public_key = vapid_auth['k']
            else:
                public_key = d["subscription"].get("public_key")
            jwt = extract_jwt(token,
                              public_key,
                              is_trusted=self.context['conf'].enable_tls_auth,
                              use_crypto=self.context['conf'].use_cryptography)
            if not isinstance(jwt, Dict):
                raise InvalidRequest("Invalid Authorization Header",
                                     status_code=401,
                                     errno=109,
                                     headers={"www-authenticate": PREF_SCHEME})
        except tuple(crypto_exceptions):
            raise InvalidRequest("Invalid Authorization Header",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})
        if "exp" not in jwt:
            raise InvalidRequest("Invalid bearer token: No expiration",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})

        try:
            jwt_expires = int(jwt['exp'])
        except (TypeError, ValueError):
            raise InvalidRequest("Invalid bearer token: Invalid expiration",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})

        now = time.time()
        jwt_has_expired = now > jwt_expires
        if jwt_has_expired:
            raise InvalidRequest("Invalid bearer token: Auth expired",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})
        jwt_too_far_in_future = (jwt_expires - now) > (60 * 60 * 24)
        if jwt_too_far_in_future:
            raise InvalidRequest(
                "Invalid bearer token: Auth > 24 hours in "
                "the future",
                status_code=401,
                errno=109,
                headers={"www-authenticate": PREF_SCHEME})
        jwt_crypto_key = base64url_encode(public_key)
        d["jwt"] = dict(jwt_crypto_key=jwt_crypto_key, jwt_data=jwt)

    @post_load
    def fixup_output(self, d):
        # Verify authorization
        # Note: This has to be done here, since schema validation takes place
        #       before nested schemas, and in this case we need all the nested
        #       schema logic to run first.
        self.validate_auth(d)

        # Merge crypto headers back in
        if d["crypto_headers"]:
            d["headers"].update({
                k.replace("_", "-"): v
                for k, v in d["crypto_headers"].items()
            })

        # Base64-encode data for Web Push
        d["body"] = base64url_encode(d["body"])

        # Set the notification based on the validated request schema data
        d["notification"] = WebPushNotification.from_webpush_request_schema(
            data=d,
            fernet=self.context["conf"].fernet,
            legacy=self.context["conf"]._notification_legacy,
        )

        return d
class QuerySchema(Schema):
    itemsPerPage = fields.Integer(required=True)
    offset = fields.Integer(required=True)
    query = fields.Raw(required=True)
Beispiel #17
0
class WebPushSubscriptionSchema(Schema):
    uaid = fields.UUID(required=True)
    chid = fields.UUID(required=True)
    public_key = fields.Raw(missing=None)

    @pre_load
    def extract_subscription(self, d):
        try:
            result = self.context["conf"].parse_endpoint(
                self.context["metrics"],
                token=d["token"],
                version=d["api_ver"],
                ckey_header=d["ckey_header"],
                auth_header=d["auth_header"],
            )
        except (VapidAuthException):
            raise InvalidRequest("missing authorization header",
                                 status_code=401,
                                 errno=109)
        except (InvalidTokenException, InvalidToken):
            raise InvalidRequest("invalid token", status_code=404, errno=102)
        return result

    @validates_schema(skip_on_field_errors=True)
    def validate_uaid_month_and_chid(self, d):
        db = self.context["db"]  # type: DatabaseManager

        try:
            result = db.router.get_uaid(d["uaid"].hex)
        except ItemNotFound:
            raise InvalidRequest("UAID not found", status_code=410, errno=103)

        # We must have a router_type to validate the user
        router_type = result.get("router_type")
        if router_type not in VALID_ROUTER_TYPES:
            self.context["log"].debug(format="Dropping User",
                                      code=102,
                                      uaid_hash=hasher(result["uaid"]),
                                      uaid_record=repr(result))
            self.context["metrics"].increment("updates.drop_user",
                                              tags=make_tags(errno=102))
            self.context["db"].router.drop_user(result["uaid"])
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)

        if (router_type == "gcm" and 'senderID' not in result.get(
                'router_data', {}).get("creds", {})):
            # Make sure we note that this record is bad.
            result['critical_failure'] = \
                result.get('critical_failure', "Missing SenderID")
            db.router.register_user(result)

        if (router_type == "fcm"
                and 'app_id' not in result.get('router_data', {})):
            # Make sure we note that this record is bad.
            result['critical_failure'] = \
                result.get('critical_failure', "Missing SenderID")
            db.router.register_user(result)

        if result.get("critical_failure"):
            raise InvalidRequest("Critical Failure: %s" %
                                 result.get("critical_failure"),
                                 status_code=410,
                                 errno=105)
        # Some stored user records are marked as "simplepush".
        # If you encounter one, may need to tweak it a bit to get it as
        # a valid WebPush record.
        if result["router_type"] == "simplepush":
            result["router_type"] = "webpush"

        if result["router_type"] == "webpush":
            self._validate_webpush(d, result)

        # Propagate the looked up user data back out
        d["user_data"] = result

    def _validate_webpush(self, d, result):
        db = self.context["db"]  # type: DatabaseManager
        log = self.context["log"]  # type: Logger
        metrics = self.context["metrics"]  # type: Metrics
        channel_id = normalize_id(d["chid"])
        uaid = result["uaid"]
        if 'current_month' not in result:
            log.debug(format="Dropping User",
                      code=102,
                      uaid_hash=hasher(uaid),
                      uaid_record=repr(result))
            metrics.increment("updates.drop_user", tags=make_tags(errno=102))
            db.router.drop_user(uaid)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)

        month_table = result["current_month"]
        if month_table not in db.message_tables:
            log.debug(format="Dropping User",
                      code=103,
                      uaid_hash=hasher(uaid),
                      uaid_record=repr(result))
            metrics.increment("updates.drop_user", tags=make_tags(errno=103))
            db.router.drop_user(uaid)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)
        msg = db.message_table(month_table)
        exists, chans = msg.all_channels(uaid=uaid)

        if (not exists or channel_id.lower() not in map(
                lambda x: normalize_id(x), chans)):
            log.debug("Unknown subscription: {channel_id}",
                      channel_id=channel_id)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)
Beispiel #18
0
class RelationsSchema(Schema):
    """Relations schema."""

    version = fields.Raw()
Beispiel #19
0
class ChartDataQueryObjectSchema(Schema):
    filters = fields.List(fields.Nested(ChartDataFilterSchema), required=False)
    granularity = fields.String(
        description=
        "Name of temporal column used for time filtering. For legacy Druid "
        "datasources this defines the time grain.", )
    granularity_sqla = fields.String(
        description="Name of temporal column used for time filtering for SQL "
        "datasources. This field is deprecated, use `granularity` "
        "instead.",
        deprecated=True,
    )
    groupby = fields.List(
        fields.String(description="Columns by which to group the query.", ), )
    metrics = fields.List(
        fields.Raw(),
        description="Aggregate expressions. Metrics can be passed as both "
        "references to datasource metrics (strings), or ad-hoc metrics"
        "which are defined only within the query object. See "
        "`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
    )
    post_processing = fields.List(
        fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True),
        description=
        "Post processing operations to be applied to the result set. "
        "Operations are applied to the result set in sequential order.",
    )
    time_range = fields.String(
        description="A time rage, either expressed as a colon separated string "
        "`since : until` or human readable freeform. Valid formats for "
        "`since` and `until` are: \n"
        "- ISO 8601\n"
        "- X days/years/hours/day/year/weeks\n"
        "- X days/years/hours/day/year/weeks ago\n"
        "- X days/years/hours/day/year/weeks from now\n"
        "\n"
        "Additionally, the following freeform can be used:\n"
        "\n"
        "- Last day\n"
        "- Last week\n"
        "- Last month\n"
        "- Last quarter\n"
        "- Last year\n"
        "- No filter\n"
        "- Last X seconds/minutes/hours/days/weeks/months/years\n"
        "- Next X seconds/minutes/hours/days/weeks/months/years\n",
        example="Last week",
    )
    time_shift = fields.String(
        description="A human-readable date/time string. "
        "Please refer to [parsdatetime](https://github.com/bear/parsedatetime) "
        "documentation for details on valid values.", )
    is_timeseries = fields.Boolean(
        description="Is the `query_object` a timeseries.", required=False)
    timeseries_limit = fields.Integer(
        description="Maximum row count for timeseries queries. Default: `0`", )
    timeseries_limit_metric = fields.Raw(
        description="Metric used to limit timeseries queries by.",
        allow_none=True,
    )
    row_limit = fields.Integer(
        description='Maximum row count. Default: `config["ROW_LIMIT"]`',
        validate=[
            Range(min=1,
                  error=_("`row_limit` must be greater than or equal to 1"))
        ],
    )
    row_offset = fields.Integer(
        description="Number of rows to skip. Default: `0`",
        validate=[
            Range(min=0,
                  error=_("`row_offset` must be greater than or equal to 0"))
        ],
    )
    order_desc = fields.Boolean(description="Reverse order. Default: `false`",
                                required=False)
    extras = fields.Nested(ChartDataExtrasSchema, required=False)
    columns = fields.List(
        fields.String(),
        description="",
    )
    orderby = fields.List(
        fields.List(fields.Raw()),
        description=
        "Expects a list of lists where the first element is the column "
        "name which to sort by, and the second element is a boolean ",
        example=[["my_col_1", False], ["my_col_2", True]],
    )
    where = fields.String(
        description="WHERE clause to be added to queries using AND operator."
        "This field is deprecated and should be passed to `extras`.",
        deprecated=True,
    )
    having = fields.String(
        description="HAVING clause to be added to aggregate queries using "
        "AND operator. This field is deprecated and should be passed "
        "to `extras`.",
        deprecated=True,
    )
    having_filters = fields.List(
        fields.Dict(),
        description=
        "HAVING filters to be added to legacy Druid datasource queries. "
        "This field is deprecated and should be passed to `extras` "
        "as `having_druid`.",
        deprecated=True,
    )
Beispiel #20
0
class ChartDataProphetOptionsSchema(
        ChartDataPostProcessingOperationOptionsSchema):
    """
    Prophet operation config.
    """

    time_grain = fields.String(
        description=
        "Time grain used to specify time period increments in prediction. "
        "Supports [ISO 8601](https://en.wikipedia.org/wiki/ISO_8601#Durations) "
        "durations.",
        validate=validate.OneOf(choices=[
            i for i in {
                **builtin_time_grains,
                **config["TIME_GRAIN_ADDONS"]
            }.keys() if i
        ]),
        example="P1D",
        required=True,
    )
    periods = fields.Integer(
        descrption=
        "Time periods (in units of `time_grain`) to predict into the future",
        min=0,
        example=7,
        required=True,
    )
    confidence_interval = fields.Float(
        description="Width of predicted confidence interval",
        validate=[
            Range(
                min=0,
                max=1,
                min_inclusive=False,
                max_inclusive=False,
                error=_(
                    "`confidence_interval` must be between 0 and 1 (exclusive)"
                ),
            )
        ],
        example=0.8,
        required=True,
    )
    yearly_seasonality = fields.Raw(
        # TODO: add correct union type once supported by Marshmallow
        description="Should yearly seasonality be applied. "
        "An integer value will specify Fourier order of seasonality, `None` will "
        "automatically detect seasonality.",
        example=False,
    )
    weekly_seasonality = fields.Raw(
        # TODO: add correct union type once supported by Marshmallow
        description="Should weekly seasonality be applied. "
        "An integer value will specify Fourier order of seasonality, `None` will "
        "automatically detect seasonality.",
        example=False,
    )
    monthly_seasonality = fields.Raw(
        # TODO: add correct union type once supported by Marshmallow
        description="Should monthly seasonality be applied. "
        "An integer value will specify Fourier order of seasonality, `None` will "
        "automatically detect seasonality.",
        example=False,
    )
Beispiel #21
0
class SchemaOneApp(SchemaListApp):
    instance = fields.Raw(required=True)
    solution = fields.Raw(required=True)
    config = fields.Raw(required=True)
Beispiel #22
0
class ChartDataBoxplotOptionsSchema(
        ChartDataPostProcessingOperationOptionsSchema):
    """
    Boxplot operation config.
    """

    groupby = fields.List(
        fields.String(description="Columns by which to group the query.", ),
        allow_none=True,
    )

    metrics = fields.List(
        fields.Raw(),
        description="Aggregate expressions. Metrics can be passed as both "
        "references to datasource metrics (strings), or ad-hoc metrics"
        "which are defined only within the query object. See "
        "`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. "
        "When metrics is undefined or null, the query is executed without a groupby. "
        "However, when metrics is an array (length >= 0), a groupby clause is added to "
        "the query.",
        allow_none=True,
    )

    whisker_type = fields.String(
        description="Whisker type. Any numpy function will work.",
        validate=validate.OneOf(
            choices=([val.value for val in PostProcessingBoxplotWhiskerType])),
        required=True,
        example="tukey",
    )

    percentiles = fields.Tuple(
        (
            fields.Float(
                description="Lower percentile",
                validate=[
                    Range(
                        min=0,
                        max=100,
                        min_inclusive=False,
                        max_inclusive=False,
                        error=_(
                            "lower percentile must be greater than 0 and less "
                            "than 100. Must be lower than upper percentile."),
                    ),
                ],
            ),
            fields.Float(
                description="Upper percentile",
                validate=[
                    Range(
                        min=0,
                        max=100,
                        min_inclusive=False,
                        max_inclusive=False,
                        error=_(
                            "upper percentile must be greater than 0 and less "
                            "than 100. Must be higher than lower percentile."),
                    ),
                ],
            ),
        ),
        description="Upper and lower percentiles for percentile whisker type.",
        example=[1, 99],
    )
Beispiel #23
0
class APISingleResponseSchema(APIResponseSchema):
    """Inspiration: https://google.github.io/styleguide/jsoncstyleguide.xml"""

    data = fields.Raw()
Beispiel #24
0
class AnnotationLayerSchema(Schema):
    annotationType = fields.String(
        description="Type of annotation layer",
        validate=validate.OneOf(choices=[ann.value for ann in AnnotationType]),
    )
    color = fields.String(
        description="Layer color",
        allow_none=True,
    )
    descriptionColumns = fields.List(
        fields.String(),
        description="Columns to use as the description. If none are provided, "
        "all will be shown.",
    )
    hideLine = fields.Boolean(
        description="Should line be hidden. Only applies to line annotations",
        allow_none=True,
    )
    intervalEndColumn = fields.String(
        description=(
            "Column containing end of interval. Only applies to interval layers"
        ),
        allow_none=True,
    )
    name = fields.String(description="Name of layer", required=True)
    opacity = fields.String(
        description="Opacity of layer",
        validate=validate.OneOf(choices=("", "opacityLow", "opacityMedium",
                                         "opacityHigh"), ),
        allow_none=True,
        required=False,
    )
    overrides = fields.Dict(
        keys=fields.String(
            desciption="Name of property to be overridden",
            validate=validate.OneOf(choices=("granularity", "time_grain_sqla",
                                             "time_range", "time_shift"), ),
        ),
        values=fields.Raw(allow_none=True),
        description="which properties should be overridable",
        allow_none=True,
    )
    show = fields.Boolean(description="Should the layer be shown",
                          required=True)
    showLabel = fields.Boolean(
        description="Should the label always be shown",
        allow_none=True,
    )
    showMarkers = fields.Boolean(
        description=
        "Should markers be shown. Only applies to line annotations.",
        required=True,
    )
    sourceType = fields.String(
        description="Type of source for annotation data",
        validate=validate.OneOf(choices=(
            "",
            "line",
            "NATIVE",
            "table",
        )),
    )
    style = fields.String(
        description="Line style. Only applies to time-series annotations",
        validate=validate.OneOf(choices=(
            "dashed",
            "dotted",
            "solid",
            "longDashed",
        )),
    )
    timeColumn = fields.String(
        description="Column with event date or interval start date",
        allow_none=True,
    )
    titleColumn = fields.String(
        description="Column with title",
        allow_none=True,
    )
    width = fields.Float(
        description="Width of annotation line",
        validate=[
            Range(
                min=0,
                min_inclusive=True,
                error=_("`width` must be greater or equal to 0"),
            )
        ],
    )
    value = fields.Raw(
        description="For formula annotations, this contains the formula. "
        "For other types, this is the primary key of the source object.",
        required=True,
    )
Beispiel #25
0
class SimpleSchema(Schema):
    """Test schema."""

    titles = fields.Raw(attribute='metadata.titles')
Beispiel #26
0
class WebPushRequestSchema(Schema):
    subscription = fields.Nested(WebPushSubscriptionSchema,
                                 load_from="token_info")
    headers = fields.Nested(WebPushBasicHeaderSchema)
    crypto_headers = PolyField(
        load_from="headers",
        deserialization_schema_selector=conditional_crypto_deserialize,
    )
    body = fields.Raw()
    token_info = fields.Raw()

    @validates('body')
    def validate_data(self, value):
        max_data = self.context["settings"].max_data
        if value and len(value) > max_data:
            raise InvalidRequest(
                "Data payload must be smaller than {}".format(max_data),
                errno=104,
            )

    @pre_load
    def token_prep(self, d):
        d["token_info"] = dict(
            api_ver=d["path_kwargs"].get("api_ver"),
            token=d["path_kwargs"].get("token"),
            ckey_header=d["headers"].get("crypto-key", ""),
            auth_header=d["headers"].get("authorization", ""),
        )
        return d

    def validate_auth(self, d):
        auth = d["headers"].get("authorization")
        needs_auth = d["token_info"]["api_ver"] == "v2"
        if not auth and not needs_auth:
            return

        public_key = d["subscription"].get("public_key")
        try:
            auth_type, token = auth.split(' ', 1)
        except ValueError:
            raise InvalidRequest("Invalid Authorization Header",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})

        # If its not a bearer token containing what may be JWT, stop
        if auth_type.lower() not in AUTH_SCHEMES or '.' not in token:
            if needs_auth:
                raise InvalidRequest("Missing Authorization Header",
                                     status_code=401,
                                     errno=109)
            return

        try:
            jwt = extract_jwt(token, public_key)
        except (ValueError, InvalidSignature, TypeError):
            raise InvalidRequest("Invalid Authorization Header",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})
        if "exp" not in jwt:
            raise InvalidRequest("Invalid bearer token: No expiration",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})

        try:
            jwt_expires = int(jwt['exp'])
        except ValueError:
            raise InvalidRequest("Invalid bearer token: Invalid expiration",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})

        now = time.time()
        jwt_has_expired = now > jwt_expires
        if jwt_has_expired:
            raise InvalidRequest("Invalid bearer token: Auth expired",
                                 status_code=401,
                                 errno=109,
                                 headers={"www-authenticate": PREF_SCHEME})
        jwt_too_far_in_future = (jwt_expires - now) > (60 * 60 * 24)
        if jwt_too_far_in_future:
            raise InvalidRequest(
                "Invalid bearer token: Auth > 24 hours in "
                "the future",
                status_code=401,
                errno=109,
                headers={"www-authenticate": PREF_SCHEME})
        jwt_crypto_key = base64url_encode(public_key)
        d["jwt"] = dict(jwt_crypto_key=jwt_crypto_key, jwt_data=jwt)

    @post_load
    def fixup_output(self, d):
        # Verify authorization
        # Note: This has to be done here, since schema validation takes place
        #       before nested schemas, and in this case we need all the nested
        #       schema logic to run first.
        self.validate_auth(d)

        # Merge crypto headers back in
        if d["crypto_headers"]:
            d["headers"].update({
                k.replace("_", "-"): v
                for k, v in d["crypto_headers"].items()
            })

        # Base64-encode data for Web Push
        d["body"] = base64url_encode(d["body"])

        # Set the notification based on the validated request schema data
        d["notification"] = WebPushNotification.from_webpush_request_schema(
            data=d,
            fernet=self.context["settings"].fernet,
            legacy=self.context["settings"]._notification_legacy,
        )

        return d
Beispiel #27
0
class DatabaseParametersSchemaMixin:
    """
    Allow SQLAlchemy URI to be passed as separate parameters.

    This mixin is a first step in allowing the users to test, create and
    edit databases without having to know how to write a SQLAlchemy URI.
    Instead, each database defines the parameters that it takes (eg,
    username, password, host, etc.) and the SQLAlchemy URI is built from
    these parameters.

    When using this mixin make sure that `sqlalchemy_uri` is not required.
    """

    engine = fields.String(allow_none=True,
                           description="SQLAlchemy engine to use")
    parameters = fields.Dict(
        keys=fields.String(),
        values=fields.Raw(),
        description="DB-specific parameters for configuration",
    )
    configuration_method = EnumField(
        ConfigurationMethod,
        by_value=True,
        description=configuration_method_description,
        missing=ConfigurationMethod.SQLALCHEMY_FORM,
    )

    # pylint: disable=no-self-use, unused-argument
    @pre_load
    def build_sqlalchemy_uri(self, data: Dict[str, Any],
                             **kwargs: Any) -> Dict[str, Any]:
        """
        Build SQLAlchemy URI from separate parameters.

        This is used for databases that support being configured by individual
        parameters (eg, username, password, host, etc.), instead of requiring
        the constructed SQLAlchemy URI to be passed.
        """
        parameters = data.pop("parameters", {})
        engine = data.pop("engine", None)

        configuration_method = data.get("configuration_method")
        if configuration_method == ConfigurationMethod.DYNAMIC_FORM:
            engine_spec = get_engine_spec(engine)

            if not hasattr(engine_spec, "build_sqlalchemy_uri") or not hasattr(
                    engine_spec, "parameters_schema"):
                raise ValidationError([
                    _('Engine spec "InvalidEngine" does not support '
                      "being configured via individual parameters.")
                ])

            # validate parameters
            parameters = engine_spec.parameters_schema.load(
                parameters)  # type: ignore

            serialized_encrypted_extra = data.get("encrypted_extra", "{}")
            try:
                encrypted_extra = json.loads(serialized_encrypted_extra)
            except json.decoder.JSONDecodeError:
                encrypted_extra = {}

            data[
                "sqlalchemy_uri"] = engine_spec.build_sqlalchemy_uri(  # type: ignore
                    parameters, encrypted_extra)

        return data
Beispiel #28
0
class WebPushSubscriptionSchema(Schema):
    uaid = fields.UUID(required=True)
    chid = fields.UUID(required=True)
    public_key = fields.Raw(missing=None)

    @pre_load
    def extract_subscription(self, d):
        try:
            result = self.context["settings"].parse_endpoint(
                token=d["token"],
                version=d["api_ver"],
                ckey_header=d["ckey_header"],
                auth_header=d["auth_header"],
            )
        except (VapidAuthException):
            raise InvalidRequest("missing authorization header",
                                 status_code=401,
                                 errno=109)
        except (InvalidTokenException, InvalidToken):
            raise InvalidRequest("invalid token", status_code=404, errno=102)
        return result

    @validates_schema(skip_on_field_errors=True)
    def validate_uaid_month_and_chid(self, d):
        settings = self.context["settings"]  # type: AutopushSettings
        try:
            result = settings.router.get_uaid(d["uaid"].hex)
        except ItemNotFound:
            raise InvalidRequest("UAID not found", status_code=410, errno=103)

        if result.get("router_type") not in ["webpush", "gcm", "apns", "fcm"]:
            raise InvalidRequest("Wrong URL for user", errno=108)

        if result.get("critical_failure"):
            raise InvalidRequest("Critical Failure: %s" %
                                 result.get("critical_failure"),
                                 status_code=410,
                                 errno=105)

        if result["router_type"] == "webpush":
            self._validate_webpush(d, result)

        # Propagate the looked up user data back out
        d["user_data"] = result

    def _validate_webpush(self, d, result):
        settings = self.context["settings"]  # type: AutopushSettings
        log = self.context["log"]  # type: Logger
        channel_id = normalize_id(d["chid"])
        uaid = result["uaid"]
        if 'current_month' not in result:
            log.info(format="Dropping User",
                     code=102,
                     uaid_hash=hasher(uaid),
                     uaid_record=dump_uaid(result))
            settings.router.drop_user(uaid)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)

        month_table = result["current_month"]
        if month_table not in settings.message_tables:
            log.info(format="Dropping User",
                     code=103,
                     uaid_hash=hasher(uaid),
                     uaid_record=dump_uaid(result))
            settings.router.drop_user(uaid)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)
        exists, chans = settings.message_tables[month_table].all_channels(
            uaid=uaid)

        if (not exists or channel_id.lower() not in map(
                lambda x: normalize_id(x), chans)):
            log.info("Unknown subscription: {channel_id}",
                     channel_id=channel_id)
            raise InvalidRequest("No such subscription",
                                 status_code=410,
                                 errno=106)
Beispiel #29
0
class ChartDataQueryObjectSchema(Schema):
    class Meta:  # pylint: disable=too-few-public-methods
        unknown = EXCLUDE

    datasource = fields.Nested(ChartDataDatasourceSchema, allow_none=True)
    result_type = EnumField(ChartDataResultType,
                            by_value=True,
                            allow_none=True)

    annotation_layers = fields.List(
        fields.Nested(AnnotationLayerSchema),
        description="Annotation layers to apply to chart",
        allow_none=True,
    )
    applied_time_extras = fields.Dict(
        description=
        "A mapping of temporal extras that have been applied to the query",
        allow_none=True,
        example={"__time_range": "1 year ago : now"},
    )
    apply_fetch_values_predicate = fields.Boolean(
        description="Add fetch values predicate (where clause) to query "
        "if defined in datasource",
        allow_none=True,
    )
    filters = fields.List(fields.Nested(ChartDataFilterSchema),
                          allow_none=True)
    granularity = fields.String(
        description=
        "Name of temporal column used for time filtering. For legacy Druid "
        "datasources this defines the time grain.",
        allow_none=True,
    )
    granularity_sqla = fields.String(
        description="Name of temporal column used for time filtering for SQL "
        "datasources. This field is deprecated, use `granularity` "
        "instead.",
        allow_none=True,
        deprecated=True,
    )
    groupby = fields.List(
        fields.Raw(),
        description="Columns by which to group the query. "
        "This field is deprecated, use `columns` instead.",
        allow_none=True,
    )
    metrics = fields.List(
        fields.Raw(),
        description="Aggregate expressions. Metrics can be passed as both "
        "references to datasource metrics (strings), or ad-hoc metrics"
        "which are defined only within the query object. See "
        "`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
        allow_none=True,
    )
    post_processing = fields.List(
        fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True),
        allow_none=True,
        description=
        "Post processing operations to be applied to the result set. "
        "Operations are applied to the result set in sequential order.",
    )
    time_range = fields.String(
        description="A time rage, either expressed as a colon separated string "
        "`since : until` or human readable freeform. Valid formats for "
        "`since` and `until` are: \n"
        "- ISO 8601\n"
        "- X days/years/hours/day/year/weeks\n"
        "- X days/years/hours/day/year/weeks ago\n"
        "- X days/years/hours/day/year/weeks from now\n"
        "\n"
        "Additionally, the following freeform can be used:\n"
        "\n"
        "- Last day\n"
        "- Last week\n"
        "- Last month\n"
        "- Last quarter\n"
        "- Last year\n"
        "- No filter\n"
        "- Last X seconds/minutes/hours/days/weeks/months/years\n"
        "- Next X seconds/minutes/hours/days/weeks/months/years\n",
        example="Last week",
        allow_none=True,
    )
    time_shift = fields.String(
        description="A human-readable date/time string. "
        "Please refer to [parsdatetime](https://github.com/bear/parsedatetime) "
        "documentation for details on valid values.",
        allow_none=True,
    )
    is_timeseries = fields.Boolean(
        description="Is the `query_object` a timeseries.",
        allow_none=True,
    )
    series_columns = fields.List(
        fields.Raw(),
        description="Columns to use when limiting series count. "
        "All columns must be present in the `columns` property. "
        "Requires `series_limit` and `series_limit_metric` to be set.",
        allow_none=True,
    )
    series_limit = fields.Integer(
        description="Maximum number of series. "
        "Requires `series` and `series_limit_metric` to be set.",
        allow_none=True,
    )
    series_limit_metric = fields.Raw(
        description="Metric used to limit timeseries queries by. "
        "Requires `series` and `series_limit` to be set.",
        allow_none=True,
    )
    timeseries_limit = fields.Integer(
        description="Maximum row count for timeseries queries. "
        "This field is deprecated, use `series_limit` instead."
        "Default: `0`",
        allow_none=True,
    )
    timeseries_limit_metric = fields.Raw(
        description="Metric used to limit timeseries queries by. "
        "This field is deprecated, use `series_limit_metric` instead.",
        allow_none=True,
    )
    row_limit = fields.Integer(
        description=
        'Maximum row count (0=disabled). Default: `config["ROW_LIMIT"]`',
        allow_none=True,
        validate=[
            Range(min=0,
                  error=_("`row_limit` must be greater than or equal to 0"))
        ],
    )
    row_offset = fields.Integer(
        description="Number of rows to skip. Default: `0`",
        allow_none=True,
        validate=[
            Range(min=0,
                  error=_("`row_offset` must be greater than or equal to 0"))
        ],
    )
    order_desc = fields.Boolean(
        description="Reverse order. Default: `false`",
        allow_none=True,
    )
    extras = fields.Nested(
        ChartDataExtrasSchema,
        description="Extra parameters to add to the query.",
        allow_none=True,
    )
    columns = fields.List(
        fields.Raw(),
        description="Columns which to select in the query.",
        allow_none=True,
    )
    orderby = fields.List(
        fields.Tuple((
            fields.Raw(
                validate=[
                    Length(min=1, error=_("orderby column must be populated"))
                ],
                allow_none=False,
            ),
            fields.Boolean(),
        )),
        description=
        "Expects a list of lists where the first element is the column "
        "name which to sort by, and the second element is a boolean.",
        allow_none=True,
        example=[("my_col_1", False), ("my_col_2", True)],
    )
    where = fields.String(
        description="WHERE clause to be added to queries using AND operator."
        "This field is deprecated and should be passed to `extras`.",
        allow_none=True,
        deprecated=True,
    )
    having = fields.String(
        description="HAVING clause to be added to aggregate queries using "
        "AND operator. This field is deprecated and should be passed "
        "to `extras`.",
        allow_none=True,
        deprecated=True,
    )
    having_filters = fields.List(
        fields.Nested(ChartDataFilterSchema),
        description=
        "HAVING filters to be added to legacy Druid datasource queries. "
        "This field is deprecated and should be passed to `extras` "
        "as `having_druid`.",
        allow_none=True,
        deprecated=True,
    )
    druid_time_origin = fields.String(
        description="Starting point for time grain counting on legacy Druid "
        "datasources. Used to change e.g. Monday/Sunday first-day-of-week. "
        "This field is deprecated and should be passed to `extras` "
        "as `druid_time_origin`.",
        allow_none=True,
    )
    url_params = fields.Dict(
        description=
        "Optional query parameters passed to a dashboard or Explore view",
        keys=fields.String(description="The query parameter"),
        values=fields.String(description="The value of the query parameter"),
        allow_none=True,
    )
    is_rowcount = fields.Boolean(
        description="Should the rowcount of the actual query be returned",
        allow_none=True,
    )
    time_offsets = fields.List(
        fields.String(),
        allow_none=True,
    )
Beispiel #30
0
class GenericRepositorySchema(RepositorySchema):
    formats = fields.Raw()