class WorkIdentifersSchema(StrictKeysMixin): isbn = List(ISBN()) issn = List(ISSN()) doi = DOI() RIV = RIV()
class CvePackage(Schema): name = String(required=True) source = String(required=True) ubuntu = String(required=True) debian = String(required=True) statuses = List(Nested(Status))
class CVEAPISchema(CVESchema): package_statuses = List(Nested(CvePackage), data_key="packages") notices_ids = List( String(validate=Regexp(r"(USN|LSN)-\d{1,5}-\d{1,2}")), )
class CroppingJobResult(JobResult): file_ids: TList[int] = List(Integer(), default=[])
class NoticeImportSchema(NoticeSchema): cves = List(String(validate=Regexp(r"(cve-|CVE-)\d{4}-\d{4,7}")))
class TunnelEndpoint(BaseEndpoint): """ This endpoint is responsible for handling requests for DHT data. """ def __init__(self): super(TunnelEndpoint, self).__init__() self.tunnels = None def setup_routes(self): self.app.add_routes([ web.get('/circuits', self.get_circuits), web.get('/relays', self.get_relays), web.get('/exits', self.get_exits), web.get('/swarms', self.get_swarms) ]) def initialize(self, session): super(TunnelEndpoint, self).initialize(session) self.tunnels = session.get_overlay(TunnelCommunity) @docs(tags=["Tunnels"], summary="Return a list of all current circuits.", responses={ 200: { "schema": schema( CircuitsResponse={ "circuits": [ schema( Circuit={ "circuit_id": Integer, "goal_hops": Integer, "actual_hops": Integer, "verified_hops": List(String), "unverified_hop": List(String), "type": String, "state": String, "bytes_up": Integer, "bytes_down": Integer, "creation_time": Integer }) ] }) } }) async def get_circuits(self, _): return Response({ "circuits": [{ "circuit_id": circuit.circuit_id, "goal_hops": circuit.goal_hops, "actual_hops": len(circuit.hops), "verified_hops": [hexlify(hop.mid).decode('utf-8') for hop in circuit.hops], "unverified_hop": hexlify(circuit.unverified_hop.mid).decode('utf-8') if circuit.unverified_hop else '', "type": circuit.ctype, "state": circuit.state, "bytes_up": circuit.bytes_up, "bytes_down": circuit.bytes_down, "creation_time": circuit.creation_time } for circuit in self.tunnels.circuits.values()] }) @docs(tags=["Tunnels"], summary="Return a list of all current relays.", responses={ 200: { "schema": schema( RelaysResponse={ "relays": [ schema( Relay={ "circuit_from": Integer, "circuit_to": Integer, "is_rendezvous": Boolean, "bytes_up": Integer, "bytes_down": Integer, "creation_time": Integer }) ] }) } }) async def get_relays(self, _): return Response({ "relays": [{ "circuit_from": circuit_from, "circuit_to": relay.circuit_id, "is_rendezvous": relay.rendezvous_relay, "bytes_up": relay.bytes_up, "bytes_down": relay.bytes_down, "creation_time": relay.creation_time } for circuit_from, relay in self.tunnels.relay_from_to.items()] }) @docs(tags=["Tunnels"], summary="Return a list of all current exits.", responses={ 200: { "schema": schema( ExitsResponse={ "exits": [ schema( Exit={ "circuit_from": Integer, "enabled": Boolean, "bytes_up": Integer, "bytes_down": Integer, "creation_time": Integer }) ] }) } }) async def get_exits(self, _): return Response({ "exits": [{ "circuit_from": circuit_from, "enabled": exit_socket.enabled, "bytes_up": exit_socket.bytes_up, "bytes_down": exit_socket.bytes_down, "creation_time": exit_socket.creation_time } for circuit_from, exit_socket in self.tunnels.exit_sockets.items()] }) @docs(tags=["Tunnels"], summary="Return a list of all current hidden swarms.", responses={ 200: { "schema": schema( SwarmsResponse={ "swarms": [ schema( Swarm={ "info_hash": String, "num_seeders": Integer, "num_connections": Integer, "num_connections_incomplete": Integer, "seeding": Boolean, "last_lookup": Integer, "bytes_up": Integer, "bytes_down": Integer }) ] }) } }) async def get_swarms(self, _): return Response({ "swarms": [{ "info_hash": hexlify(swarm.info_hash).decode('utf-8'), "num_seeders": swarm.get_num_seeders(), "num_connections": swarm.get_num_connections(), "num_connections_incomplete": swarm.get_num_connections_incomplete(), "seeding": swarm.seeding, "last_lookup": swarm.last_lookup, "bytes_up": swarm.get_total_up(), "bytes_down": swarm.get_total_down() } for swarm in self.tunnels.swarms.values()] })
class ShallowCombinedSchema(BaseSchema): collection_id = String() # Joint entity/document attributes collection = Nested(CollectionSchema()) schema = SchemaName() schemata = List(SchemaName()) names = List(String()) addresses = List(String()) phones = List(String()) emails = List(String()) identifiers = List(String()) countries = List(Country()) dates = List(PartialDate()) bulk = Boolean() # Entity attributes foreign_id = String() name = String() entities = List(String()) properties = Dict() # Document attributes status = String() content_hash = String() uploader_id = String() uploader = Nested(RoleReferenceSchema()) error_message = String() title = String() summary = String() languages = List(Language()) keywords = List(String()) date = PartialDate() authored_at = PartialDate() modified_at = PartialDate() published_at = PartialDate() retrieved_at = PartialDate() file_name = String() file_size = Integer() author = String() generator = String() mime_type = String() extension = String() encoding = String() source_url = String() pdf_version = String() columns = List(String()) headers = Dict() children = Integer() # TODO: is this a separate endpoint? text = String() html = String() def document_links(self, data, pk, schemata): links = { 'self': url_for('documents_api.view', document_id=pk), 'tags': url_for('entities_api.tags', id=pk), 'ui': document_url(pk) } if data.get('content_hash'): links['file'] = url_for('documents_api.file', document_id=pk, _authorize=True) if schemata.intersection([Document.SCHEMA_PDF]): links['pdf'] = url_for('documents_api.pdf', document_id=pk, _authorize=True) if schemata.intersection([Document.SCHEMA_PDF, Document.SCHEMA_TABLE]): links['records'] = url_for('documents_api.records', document_id=pk) if schemata.intersection([Document.SCHEMA_FOLDER]): query = (('filter:parent.id', pk),) links['children'] = url_for('documents_api.index', _query=query) return links def entity_links(self, data, pk, schemata): return { 'self': url_for('entities_api.view', id=pk), # 'similar': url_for('entities_api.similar', id=pk), # 'documents': url_for('entities_api.documents', id=pk), 'references': url_for('entities_api.references', id=pk), 'tags': url_for('entities_api.tags', id=pk), 'ui': entity_url(pk) } @post_dump() def hypermedia(self, data): pk = str(data.get('id')) collection = data.get('collection', {}) collection_id = collection.get('id') collection_id = collection_id or data.get('collection_id') schemata = set(data.get('schemata', [])) if Document.SCHEMA in schemata: data['links'] = self.document_links(data, pk, schemata) else: data['links'] = self.entity_links(data, pk, schemata) if data.get('bulk'): data['writeable'] = False else: data['writeable'] = request.authz.can_write(collection_id) return data
class SchemaFieldSchema(Schema): field_name = String(dump_to="field_name", load_from="field_name", allow_none=False) tags = List(String(), dump_to="tags", load_from="tags", allow_none=False) type_struct = SchemaTypeStructField( dump_to="type_struct", load_from="type_struct", allow_none=False )
class StudyFieldMixin: AKVO = SanitizedUnicode() aliases = List(SanitizedUnicode())
class OverlaysEndpoint(BaseEndpoint): """ This endpoint is responsible for handing all requests regarding the status of overlays. """ def __init__(self): super(OverlaysEndpoint, self).__init__() self.statistics_supported = None def setup_routes(self): self.app.add_routes([ web.get('', self.get_overlays), web.get('/statistics', self.get_statistics), web.post('/statistics', self.enable_statistics) ]) def initialize(self, session): super(OverlaysEndpoint, self).initialize(session) self.statistics_supported = isinstance(session.endpoint, StatisticsEndpoint) \ or isinstance(getattr(session.endpoint, 'endpoint', None), StatisticsEndpoint) @docs(tags=["Overlays"], summary="Return information about all currently loaded overlays.", responses={ 200: { "schema": schema(OverlayResponse={"overlays": [OverlaySchema]}) } }) async def get_overlays(self, _): overlay_stats = [] for overlay in self.session.overlays: peers = overlay.get_peers() statistics = self.session.endpoint.get_aggregate_statistics(overlay.get_prefix()) \ if isinstance(self.session.endpoint, StatisticsEndpoint) else {} overlay_stats.append({ "id": hexlify(overlay.community_id).decode('utf-8'), "my_peer": hexlify( overlay.my_peer.public_key.key_to_bin()).decode('utf-8'), "global_time": overlay.global_time, "peers": [{ 'ip': peer.address[0], 'port': peer.address[1], 'public_key': hexlify(peer.public_key.key_to_bin()).decode('utf-8') } for peer in peers], "overlay_name": overlay.__class__.__name__, "statistics": statistics, "max_peers": overlay.max_peers, "is_isolated": self.session.network != overlay.network, "my_estimated_wan": { "ip": overlay.my_estimated_wan[0], "port": overlay.my_estimated_wan[1] }, "my_estimated_lan": { "ip": overlay.my_estimated_lan[0], "port": overlay.my_estimated_lan[1] }, "strategies": [{ 'name': strategy.__class__.__name__, 'target_peers': target_peers } for strategy, target_peers in self.session.strategies if strategy.overlay == overlay] }) return Response({"overlays": overlay_stats}) @docs(tags=["Overlays"], summary="Return statistics for all currently loaded overlays.", responses={ 200: { "schema": schema( StatisticsResponse={ "statistics": List( Dict(keys=String, values=Nested(OverlayStatisticsSchema))), }), "examples": { 'Success': { "statistics": [{ "DiscoveryCommunity": { 'num_up': 0, 'num_down': 0, 'bytes_up': 0, 'bytes_down': 0, 'diff_time': 0 } }] } } } }) async def get_statistics(self, _): overlay_stats = [] for overlay in self.session.overlays: statistics = self.session.endpoint.get_statistics( overlay.get_prefix()) if self.statistics_supported else {} overlay_stats.append({ overlay.__class__.__name__: self.statistics_by_name(statistics, overlay) }) return Response({"statistics": overlay_stats}) def statistics_by_name(self, statistics, overlay): named_statistics = {} for message_id, network_stats in statistics.items(): if overlay.decode_map[message_id]: mapped_name = str( message_id) + ":" + overlay.decode_map[message_id].__name__ else: mapped_name = str(message_id) + ":unknown" mapped_value = network_stats.to_dict() named_statistics[mapped_name] = mapped_value return named_statistics @docs(tags=["Overlays"], summary="Enable/disable statistics for a given overlay.", responses={ 200: { "schema": DefaultResponseSchema, "examples": { 'Success': { "success": True } } }, HTTP_PRECONDITION_FAILED: { "schema": DefaultResponseSchema, "examples": { 'Statistics disabled': { "success": False, "error": "StatisticsEndpoint is not enabled." } } }, HTTP_BAD_REQUEST: { "schema": DefaultResponseSchema, "examples": { 'Missing parameter': { "success": False, "error": "Parameter 'enable' is required." } } } }) @json_schema( schema( EnableStatisticsRequest={ 'enable*': (Boolean, 'Whether to enable or disable the statistics'), 'all': (Boolean, 'Whether update applies to all overlays'), 'overlay_name': (String, 'Class name of the overlay'), })) async def enable_statistics(self, request): if not self.statistics_supported: return Response( { "success": False, "error": "StatisticsEndpoint is not enabled." }, status=HTTP_PRECONDITION_FAILED) args = await request.json() if 'enable' not in args: return Response( { "success": False, "error": "Parameter 'enable' is required" }, status=HTTP_BAD_REQUEST) if 'all' not in args and 'overlay_name' not in args: return Response( { "success": False, "error": "Parameter 'all' or 'overlay_name' is required" }, status=HTTP_PRECONDITION_FAILED) self.enable_overlay_statistics(enable=args['enable'], class_name=args.get( 'overlay_name', None), all_overlays=args.get('all', False)) return Response({"success": True}) def enable_overlay_statistics(self, enable=False, class_name=None, all_overlays=False): if all_overlays: for overlay in self.session.overlays: self.session.endpoint.enable_community_statistics( overlay.get_prefix(), enable) elif class_name: for overlay in self.session.overlays: if overlay.__class__.__name__ == class_name: self.session.endpoint.enable_community_statistics( overlay.get_prefix(), enable)
class Catalog(AfterglowSchema): """ Base class for catalog plugins Plugin modules are placed in the :mod:`resources.catalog_plugins` subpackage and must directly or indirectly subclass from :class:`Catalog`, e.g. class MyCatalog(Catalog): name = 'my_catalog' num_sources = 1000000 mags = {'B': ('Bmag', 'eBmag'), 'V': ('Vmag', 'eVmag'), 'R': ('Rmag', 'eRmag'), 'I': ('Imag', 'eImag')} filter_lookup = {'Open': '(3*B + 5*R)/8', '*': 'R'} # '*' stands for "use this for any unknown filter" def query_objects(self, names): # optional ... def query_rect(self, ra_hours, dec_degs, width_arcmins, height_arcmins, constraints=None): ... def query_circ(self, ra_hours, dec_degs, radius_arcmins, constraints=None): ... Methods: query_objects: return a list of catalog objects with the specified names query_box: return catalog objects within the specified rectangular region query_circ: return catalog objects within the specified circular region """ __polymorphic_on__ = 'name' name: str = String(default=None) display_name: str = String(default=None) num_sources: int = Integer() mags: TDict[str, TList[str]] = Dict( keys=String, values=List(String()), default={}) filter_lookup: TDict[str, str] = Dict(keys=String, values=String) def __init__(self, **kwargs): """ Create a Catalog instance :param kwargs: catalog-specific initialization parameters """ # Override catalog option defaults with CATALOG_OPTIONS config var # for the current catalog kwargs = dict(kwargs) kwargs.update(app.config.get('CATALOG_OPTIONS', {}).get(self.name, {})) super().__init__(**kwargs) if self.display_name is None: self.display_name = self.name def query_objects(self, names: TList[str]) -> TList[CatalogSource]: """ Return a list of catalog objects with the specified names :param names: object names :return: list of catalog objects with the specified names """ raise errors.MethodNotImplementedError( class_name=self.__class__.__name__, method_name='query_objects') def query_box(self, ra_hours: float, dec_degs: float, width_arcmins: float, height_arcmins: Optional[float] = None, constraints: Optional[TDict[str, str]] = None, limit: Optional[int] = None) \ -> TList[CatalogSource]: """ Return catalog objects within the specified rectangular region :param ra_hours: right ascension of region center in hours :param dec_degs: declination of region center in degrees :param width_arcmins: width of region in arcminutes :param height_arcmins: optional height of region in arcminutes; defaults to `width_arcmins` :param constraints: optional constraints on the column values :param limit: optional limit on the number of objects to return :return: list of catalog objects within the specified rectangular region """ raise errors.MethodNotImplementedError( class_name=self.__class__.__name__, method_name='query_rect') def query_circ(self, ra_hours: float, dec_degs: float, radius_arcmins: float, constraints: Optional[TDict[str, str]] = None, limit: Optional[int] = None) \ -> TList[CatalogSource]: """ Return catalog objects within the specified circular region :param ra_hours: right ascension of region center in hours :param dec_degs: declination of region center in degrees :param radius_arcmins: region radius in arcminutes :param constraints: optional constraints on the column values :param limit: optional limit on the number of objects to return :return: list of catalog objects """ raise errors.MethodNotImplementedError( class_name=self.__class__.__name__, method_name='query_circ')
class XrefSchema(Schema): against_collection_ids = List(Integer())
class CZMeshMixin: TreeNumberList = List(SanitizedUnicode())
class SubjectMixin: relatedURI = List(Url) DateCreated = DateTime() DateRevised = DateTime() DateEstablished = DateTime()
class BatchImportJob(Job): """ Batch data file import job """ type = 'batch_import' description = 'Batch Data File Import' result: BatchImportJobResult = Nested(BatchImportJobResult, default={}) settings: TList[BatchImportSettings] = List(Nested( BatchImportSettings, default={}), default=[]) session_id: int = Integer(default=None) def run(self): adb = get_data_file_db(self.user_id) try: nfiles = len(self.settings) root = get_root(self.user_id) for i, settings in enumerate(self.settings): try: asset_path = settings.path try: provider = providers[settings.provider_id] except KeyError: raise UnknownDataProviderError(id=settings.provider_id) def recursive_import(path, depth=0): asset = provider.get_asset(path) if asset.collection: if not provider.browseable: raise CannotImportFromCollectionAssetError( provider_id=provider.id, path=path) if not settings.recurse and depth: return [] return sum( [recursive_import(child_asset.path, depth + 1) for child_asset in provider.get_child_assets( asset.path)[0]], []) return [f.id for f in import_data_file( adb, root, provider.id, asset.path, asset.metadata, BytesIO(provider.get_asset_data(asset.path)), asset.name, settings.duplicates, session_id=self.session_id)] if not isinstance(asset_path, list): try: asset_path = json.loads(asset_path) except ValueError: pass if not isinstance(asset_path, list): asset_path = [asset_path] self.result.file_ids += sum( [recursive_import(p) for p in asset_path], []) except Exception as e: self.add_error(e, {'file_no': i + 1}) finally: self.update_progress((i + 1)/nfiles*100) if self.result.file_ids: adb.commit() finally: adb.remove()
class CheckTokenResp(Schema): MESSAGE_NAME = Str() TOKEN_STATUS = Bool() NAME_MODELS = List(Nested(NameModelResp))
class TrustViewEndpoint(RESTEndpoint): def __init__(self, session): super(TrustViewEndpoint, self).__init__(session) self.logger = logging.getLogger(self.__class__.__name__) self.trustchain_db = None self.trust_graph = None self.public_key = None def setup_routes(self): self.app.add_routes([web.get('', self.get_view)]) def initialize_graph(self): if self.session.trustchain_community: self.trustchain_db = self.session.trustchain_community.persistence self.public_key = self.session.trustchain_community.my_peer.public_key.key_to_bin( ) self.trust_graph = TrustGraph(hexlify(self.public_key)) # Start bootstrap download if not already done if not self.session.bootstrap: self.session.start_bootstrap_download() @docs(tags=["TrustChain"], summary="Return the trust graph.", parameters=[{ 'in': 'query', 'name': 'depth', 'description': 'Depth level (0 = all depths)', 'enum': [0, 1, 2, 3, 4], 'type': 'integer', 'required': False }], responses={ 200: { "schema": schema( GraphResponse={ 'root_public_key': String, 'graph': schema( Graph={ 'node': schema( Node={ 'id': Integer, 'key': String, 'pos': [Float], 'sequence_number': Integer, 'total_up': Integer, 'total_down': Integer }), 'edge': List(List(Integer)) }), 'bootstrap': schema( Bootstrap={ 'download': Integer, 'upload': Integer, 'progress': Float }), 'num_tx': Integer, 'depth': Integer }) } }) async def get_view(self, request): if not self.trust_graph: self.initialize_graph() def get_bandwidth_blocks(public_key, limit=5): return self.trustchain_db.get_latest_blocks( public_key, limit=limit, block_types=[b'tribler_bandwidth']) def get_friends(public_key, limit=5): return self.trustchain_db.get_connected_users(public_key, limit=limit) depth = 0 if 'depth' in request.query: depth = int(request.query['depth']) # If depth is zero or not provided then fetch all depth levels fetch_all = depth == 0 try: if fetch_all: self.trust_graph.reset(hexlify(self.public_key)) if fetch_all or depth == 1: self.trust_graph.add_blocks( get_bandwidth_blocks(self.public_key, limit=100)) if fetch_all or depth == 2: for friend in get_friends(self.public_key): self.trust_graph.add_blocks( get_bandwidth_blocks(unhexlify(friend['public_key']), limit=10)) if fetch_all or depth == 3: for friend in get_friends(self.public_key): self.trust_graph.add_blocks( get_bandwidth_blocks(unhexlify(friend['public_key']))) for fof in get_friends(unhexlify(friend['public_key'])): self.trust_graph.add_blocks( get_bandwidth_blocks(unhexlify(fof['public_key']))) if fetch_all or depth == 4: for user_block in self.trustchain_db.get_users(): self.trust_graph.add_blocks( get_bandwidth_blocks( unhexlify(user_block['public_key']))) except TrustGraphException as tgex: self.logger.warning(tgex) graph_data = self.trust_graph.compute_node_graph() return RESTResponse({ 'root_public_key': hexlify(self.public_key), 'graph': graph_data, 'bootstrap': self.get_bootstrap_info(), 'num_tx': len(graph_data['edge']), 'depth': depth, }) def get_bootstrap_info(self): if self.session.bootstrap.download and self.session.bootstrap.download.get_state( ): state = self.session.bootstrap.download.get_state() return { 'download': state.get_total_transferred(DOWNLOAD), 'upload': state.get_total_transferred(UPLOAD), 'progress': state.get_progress(), } return {'download': 0, 'upload': 0, 'progress': 0}
class PredictImagePayload(Schema): result = List(Nested(ImagePayloadList)) description = Str()
class TopicSchema(Schema): """Marshmallow schema for topics.""" topic_id36 = ID36() title = SimpleString(max_length=TITLE_MAX_LENGTH) topic_type = Enum(dump_only=True) markdown = Markdown(allow_none=True) rendered_html = String(dump_only=True) link = URL(schemes={"http", "https"}, allow_none=True) created_time = DateTime(dump_only=True) tags = List(String()) user = Nested(UserSchema, dump_only=True) group = Nested(GroupSchema, dump_only=True) @pre_load def prepare_title(self, data: dict, many: bool, partial: Any) -> dict: """Prepare the title before it's validated.""" # pylint: disable=unused-argument if "title" not in data: return data new_data = data.copy() split_title = re.split("[.?!]+", new_data["title"]) # the last string in the list will be empty if it ended with punctuation num_sentences = len([piece for piece in split_title if piece]) # strip trailing periods off single-sentence titles if num_sentences == 1: new_data["title"] = new_data["title"].rstrip(".") return new_data @pre_load def prepare_tags(self, data: dict, many: bool, partial: Any) -> dict: """Prepare the tags before they're validated.""" # pylint: disable=unused-argument if "tags" not in data: return data new_data = data.copy() tags: list[str] = [] for tag in new_data["tags"]: tag = tag.lower() # replace underscores with spaces tag = tag.replace("_", " ") # remove any consecutive spaces tag = re.sub(" {2,}", " ", tag) # remove any leading/trailing spaces tag = tag.strip(" ") # drop any empty tags if not tag or tag.isspace(): continue # handle synonyms for name, synonyms in TAG_SYNONYMS.items(): if tag in synonyms: tag = name # skip any duplicate tags if tag in tags: continue tags.append(tag) new_data["tags"] = tags return new_data @validates("tags") def validate_tags(self, value: list[str]) -> None: """Validate the tags field, raising an error if an issue exists. Note that tags are validated by ensuring that each tag would be a valid group path. This is definitely mixing concerns, but it's deliberate in this case. It will allow for some interesting possibilities by ensuring naming "compatibility" between groups and tags. For example, a popular tag in a group could be converted into a sub-group easily. """ group_schema = GroupSchema(partial=True) for tag in value: try: group_schema.load({"path": tag}) except ValidationError as exc: raise ValidationError("Tag %s is invalid" % tag) from exc @pre_load def prepare_markdown(self, data: dict, many: bool, partial: Any) -> dict: """Prepare the markdown value before it's validated.""" # pylint: disable=unused-argument if "markdown" not in data: return data new_data = data.copy() # if the value is empty, convert it to None if not new_data["markdown"] or new_data["markdown"].isspace(): new_data["markdown"] = None return new_data @pre_load def prepare_link(self, data: dict, many: bool, partial: Any) -> dict: """Prepare the link value before it's validated.""" # pylint: disable=unused-argument if "link" not in data: return data new_data = data.copy() # remove leading/trailing whitespace new_data["link"] = new_data["link"].strip() # if the value is empty, convert it to None if not new_data["link"]: new_data["link"] = None return new_data # prepend http:// to the link if it doesn't have a scheme parsed = urlparse(new_data["link"]) if not parsed.scheme: new_data["link"] = "http://" + new_data["link"] # run the link through the url-transformation process new_data["link"] = apply_url_transformations(new_data["link"]) return new_data @validates_schema def link_or_markdown(self, data: dict, many: bool, partial: Any) -> None: """Fail validation unless at least one of link or markdown were set.""" # pylint: disable=unused-argument if "link" not in data and "markdown" not in data: return link = data.get("link") markdown = data.get("markdown") if not (markdown or link): raise ValidationError("Topics must have either markdown or a link.")
class SourceMergeJobResultSchema(JobResultSchema): data: TList[SourceExtractionDataSchema] = List( Nested(SourceExtractionDataSchema), default=[])
class PredictResponseSchema(Schema): prediction = List(Float(validate=Range(min=0)), required=True)
class TrustViewEndpoint(RESTEndpoint): def __init__(self, bandwidth_db: BandwidthDatabase): super().__init__() self.bandwidth_db = bandwidth_db def setup_routes(self): self.app.add_routes([web.get('', self.get_view)]) @cached_property def trust_graph(self) -> TrustGraph: trust_graph = TrustGraph(self.bandwidth_db.my_pub_key, self.bandwidth_db) trust_graph.compose_graph_data() return trust_graph @docs(tags=["TrustGraph"], summary="Return the trust graph.", parameters=[], responses={ 200: { "schema": schema( GraphResponse={ 'root_public_key': String, 'graph': schema( Graph={ 'node': schema( Node={ 'id': Integer, 'key': String, 'pos': [Float], 'sequence_number': Integer, 'total_up': Integer, 'total_down': Integer }), 'edge': List(List(Integer)) }), 'bootstrap': schema( Bootstrap={ 'download': Integer, 'upload': Integer, 'progress': Float }), 'num_tx': Integer }) } }) async def get_view(self, request): refresh_graph = int(request.query.get('refresh', '0')) if refresh_graph: self.trust_graph.compose_graph_data() graph_data = self.trust_graph.compute_node_graph() return RESTResponse({ 'root_public_key': hexlify(self.bandwidth_db.my_pub_key), 'graph': graph_data, 'bootstrap': 0, 'num_tx': len(graph_data['edge']) })
"limit": Int( description="Number of CVEs per response. Defaults to 20.", allow_none=True, ), "offset": Int( description="Number of CVEs to omit from response. Defaults to 0.", allow_none=True, ), "component": Component( allow_none=True, enum=["main", "universe"], description="Package component", ), "version": List( ReleaseCodename(enum=release_codenames), description="List of release codenames ", allow_none=True, ), "status": List( StatusStatuses(enum=status_statuses), description="List of statuses", allow_none=True, ), "order": String( enum=["oldest"], description=( "Select order: choose `oldest` for ASC order; " "leave empty for DESC order" ), allow_none=True, ),
class ListEnumSchema(Schema): enum = List(EnumField(EnumTester))
class NoticeAPISchema(NoticeSchema): notice_type = String(data_key="type") cves_ids = List(String(validate=Regexp(r"(cve-|CVE-)\d{4}-\d{4,7}")))
class ListEnumSchema(Schema): enum = List(EnumField(EnumTester, by_value=True))
class CVEImportSchema(CVESchema): packages = List(Nested(CvePackage))
class BatchImportJobResult(JobResult): file_ids: TList[int] = List(Integer(), default=[])
class NoticeAPIDetailedSchema(NoticeAPISchema): cves = List(Nested(CVEAPISchema))
class CallForPapersSchema(mm.Schema): layout_review_questions = Nested('PaperReviewQuestionSchema', many=True) content_review_questions = Nested('PaperReviewQuestionSchema', many=True) rating_range = List(Integer())