def setUp(self): super(CustomTypeAndPropertyTest, self).setUp() schema.SCHEMA_TO_LOAD.append({ "datatypes": {}, "properties": { "politicalParty": { "comment": "Political party.", "comment_plain": "Political party.", "domains": ["Thing"], "id": "politicalParty", "label": "Political Party", "reversed_label": "%s", "ranges": ["Text"] } }, "types": { "Politician": { "ancestors": ["Thing", "Person"], "comment": "", "comment_plain": "", "id": "Politician", "label": "Politician", "specific_properties": ["politicalParty"], "subtypes": [], "supertypes": ["Person"], "url": "http://www.ecogwiki.com/sp.schema/types/Politician" } } }) self.person = schema.get_schema('Person') self.politician = schema.get_schema('Politician')
def register_schema(schema): all_schemas = update_schema(schema) for rsc in all_schemas.keys(): rsc_read = { "schema": get_schema(rsc), "datasource": { "aggregation": { "pipeline": [ {"$match": {"_id": "$id"}} ] } }, "pagination": False, "item_methods": ["GET"] } rsc_write = { "schema": get_schema(rsc), "datasource": { "source": rsc }, "resource_methods": ["POST"], "item_methods": ["PATCH", "DELETE"] # PUT doesn't have a valid use case at this point } DOMAIN[rsc] = rsc_read DOMAIN['{}_write'.format(rsc)] = rsc_write
def register_resources(rscs): if rscs == []: raise Exception('Please define resources in settings.py') else: for rsc in rscs: rsc_read = { "schema": get_schema(rsc), "datasource": { "aggregation": { "pipeline": [{ "$match": { "_id": "$id" } }] } }, "pagination": False, "public_methods": ["GET"], "item_methods": ["GET"] } rsc_write = { "schema": get_schema(rsc), "datasource": { "source": rsc }, "public_methods": ["POST"], "resource_methods": ["POST"], "item_methods": ["PATCH" ] # PUT and DELETE don't have valid use cases at this point } DOMAIN[rsc] = rsc_read DOMAIN['{}_write'.format(rsc)] = rsc_write
def test_get_plural_label(self): self.assertEqual(u'Creative Works', schema.get_schema('CreativeWork')['plural_label']) self.assertEqual(u'Medical Entities', schema.get_schema('MedicalEntity')['plural_label']) self.assertEqual(u'Local Businesses', schema.get_schema('LocalBusiness')['plural_label']) self.assertEqual(u'Attorneys', schema.get_schema('Attorney')['plural_label'])
def load(self): tokens = self.path.split('/')[1:] if tokens[0] == 'types' and len(tokens) == 1: return {'id': 'types', 'itemtypes': schema.get_itemtypes(), 'selectable_itemtypes': schema.get_selectable_itemtypes()} elif tokens[0] == 'types': return schema.get_schema(tokens[1]) elif tokens[0] == 'sctypes': return schema.get_schema(tokens[1], self_contained=True) elif tokens[0] == 'properties': return schema.get_property(tokens[1]) elif tokens[0] == 'datatypes': return schema.get_datatype(tokens[1]) else: return None
def test_properties_should_contain_all_specific_properties(self): for t, _ in schema.get_itemtypes(): item = schema.get_schema(t) self.assertEqual( set(), set(item['specific_properties']).difference( item['properties']))
def get_infobase(): """Creates infobase object.""" from infogami.infobase import infobase, dbstore, cache web.config.db_printing = True web.load() # hack to make cache work for local infobase connections cache.loadhook() web.ctx.ip = '127.0.0.1' store = dbstore.DBStore(schema.get_schema()) ib = infobase.Infobase(store, infobase.config.secret_key) if config.writelog: ib.add_event_listener(Logger(config.writelog)) ol = ib.get('openlibrary.org') if ol and config.booklog: global booklogger booklogger = Logger(config.booklog) ol.add_trigger('/type/edition', write_booklog) ol.add_trigger('/type/author', write_booklog2) if ol and config.http_listeners: ol.add_event_listener(None, http_notify) return ib
def parse_data(cls, title, body, itemtype=u'Article'): body = body.replace('\r\n', '\n') default_data = { 'name': title, 'schema': schema.get_itemtype_path(itemtype) } # collect yaml_data = cls.parse_schema_yaml(body) body_data = pairs_to_dict((m.group('name'), m.group('value')) for m in re.finditer(cls.re_data, body)) if itemtype == u'Article' or u'Article' in schema.get_schema( itemtype)[u'ancestors']: default_section = u'articleBody' else: default_section = u'longDescription' section_data = cls.parse_sections(body, default_section) # merge data = merge_dicts([default_data, yaml_data, body_data, section_data]) # validation and type conversion typed = schema.SchemaConverter.convert(itemtype, data) return typed
def load(self): tokens = self.path.split('/')[1:] if tokens[0] == 'types' and len(tokens) == 1: return { 'id': 'types', 'itemtypes': schema.get_itemtypes(), 'selectable_itemtypes': schema.get_selectable_itemtypes() } elif tokens[0] == 'types': return schema.get_schema(tokens[1]) elif tokens[0] == 'sctypes': return schema.get_schema(tokens[1], self_contained=True) elif tokens[0] == 'properties': return schema.get_property(tokens[1]) elif tokens[0] == 'datatypes': return schema.get_datatype(tokens[1]) else: return None
def test_humane_labels(self): self.assertEqual(u'Politician', schema.get_schema('Politician')['label']) self.assertEqual( u'Politicians', schema.humane_property('Politician', 'politicalParty', True)) self.assertEqual( u'Political Party', schema.humane_property('Politician', 'politicalParty'))
def load(self): tokens = self.path.split("/")[1:] if tokens[0] == "types" and len(tokens) == 1: return { "id": "types", "itemtypes": schema.get_itemtypes(), "selectable_itemtypes": schema.get_selectable_itemtypes(), } elif tokens[0] == "types": return schema.get_schema(tokens[1]) elif tokens[0] == "sctypes": return schema.get_schema(tokens[1], self_contained=True) elif tokens[0] == "properties": return schema.get_property(tokens[1]) elif tokens[0] == "datatypes": return schema.get_datatype(tokens[1]) else: return None
def setUp(self): super(CustomTypeAndPropertyTest, self).setUp() schema.SCHEMA_TO_LOAD.append({ "datatypes": { }, "properties": { "politicalParty": { "comment": "Political party.", "comment_plain": "Political party.", "domains": [ "Thing" ], "id": "politicalParty", "label": "Political Party", "reversed_label": "%s", "ranges": [ "Text" ] } }, "types": { "Politician": { "ancestors": [ "Thing", "Person" ], "comment": "", "comment_plain": "", "id": "Politician", "label": "Politician", "specific_properties": [ "politicalParty" ], "subtypes": [], "supertypes": [ "Person" ], "url": "http://www.ecogwiki.com/sp.schema/types/Politician" } } }) self.person = schema.get_schema('Person') self.politician = schema.get_schema('Politician')
def load(self): tokens = self.path.split('/')[1:] if tokens[0] == 'types' and len(tokens) == 1: return {'id': 'types', 'values': schema.get_itemtypes()} elif tokens[0] == 'types': return schema.get_schema(tokens[1]) elif tokens[0] == 'properties': return schema.get_property(tokens[1]) elif tokens[0] == 'datatypes': return schema.get_datatype(tokens[1]) else: return None
def parse_data(cls, title, body, itemtype=u'Article'): body = body.replace('\r\n', '\n') default_data = {'name': title, 'schema': schema.get_itemtype_path(itemtype)} # collect yaml_data = cls.parse_schema_yaml(body) body_data = pairs_to_dict((m.group('name'), m.group('value')) for m in re.finditer(cls.re_data, body)) if itemtype == u'Article' or u'Article' in schema.get_schema(itemtype)[u'ancestors']: default_section = u'articleBody' else: default_section = u'longDescription' section_data = cls.parse_sections(body, default_section) # merge data = merge_dicts([default_data, yaml_data, body_data, section_data]) # validation and type conversion typed = schema.SchemaConverter.convert(itemtype, data) return typed
def parse_data(cls, title, body, itemtype=u"Article"): body = body.replace("\r\n", "\n") default_data = {"name": title, "schema": schema.get_itemtype_path(itemtype)} # collect yaml_data = cls.parse_schema_yaml(body) body_data = pairs_to_dict((m.group("name"), m.group("value")) for m in re.finditer(cls.re_data, body)) if itemtype == u"Article" or u"Article" in schema.get_schema(itemtype)[u"ancestors"]: default_section = u"articleBody" else: default_section = u"longDescription" section_data = cls.parse_sections(body, default_section) # merge data = merge_dicts([default_data, yaml_data, body_data, section_data]) # validation and type conversion typed = schema.SchemaConverter.convert(itemtype, data) return typed
def setUp(self): super(SimpleCustomTypeAndPropertyTest, self).setUp() schema.SCHEMA_TO_LOAD.append({ "datatypes": { "ISBN2": { "comment": "ISBN 2", }, }, "properties": { "politicalParty": { "comment": "A political party.", } }, "types": { "Politician": { "supertypes": ["Person"], "specific_properties": ["politicalParty"], "comment": "A political party.", } } }) self.dtype = schema.get_datatype('ISBN2') self.item = schema.get_schema('Politician') self.prop = schema.get_property('politicalParty')
def test_humane_labels(self): self.assertEqual(u'Politician', schema.get_schema('Politician')['label']) self.assertEqual(u'Politicians', schema.humane_property('Politician', 'politicalParty', True)) self.assertEqual(u'Political Party', schema.humane_property('Politician', 'politicalParty'))
def test_get_schema(avro_schema: str, capsys: CaptureFixture) -> None: schema.get_schema(PROJECT_ID, AVRO_SCHEMA_ID) out, _ = capsys.readouterr() assert "Got a schema" in out assert f"{avro_schema}" in out
# -*- coding: utf-8 -*- """ Index ===== Index constrói índices de documentos com base num esquema que defina a sua estrutura. """ try: from schema import get_schema SCHEMA = get_schema() except ImportError: print "Ocorreu um erro ao importar o SCHEMA padrão." raise try: import os from whoosh import index except ImportError: print "Ocorreu um erro ao importar a biblioteca Whoosh." raise try: from constants import INDEXDIR, INDEXNAME
def get_security_enabled_fields(schema_name): schema = get_schema(schema_name) return parse(schema, [""])
MONGO_DBNAME = os.environ['MONGO_DBNAME'] AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY', "") AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY', "") AWS_S3_BUCKET_NAME = os.getenv('AWS_S3_BUCKET_NAME', "") S3_ATTACHMENTS = os.getenv('S3_ATTACHMENTS', False) public_methods = ["GET", "POST", "DELETE"] ITEM_METHODS = ["GET", "PATCH", "PUT", "DELETE"] RENDERERS = [ 'eve.render.JSONRenderer' ] users = { "schema": get_schema('users_schema'), "authentication": AssignSecAuth } def register_resources(rscs): if rscs == []: raise Exception('Please define resources in settings.py') else: for rsc in rscs: rsc_read = { "schema": get_schema(rsc), "datasource": { "aggregation": { "pipeline": [ {"$match": {"_id": "$id"}} ]
def setup_dgraph(client): print('apply schema:') print(schema.get_schema()) op = pydgraph.Operation(schema=schema.get_schema()) client.alter(op)
def test_get_custom_plural_label_for_irregular_noun(self): self.assertEqual(u'People', schema.get_schema('Person')['plural_label'])
MONGO_USERNAME = os.getenv("MONGO_USERNAME", "") MONGO_PASSWORD = os.getenv("MONGO_PASSWORD", "") MONGO_AUTH_SOURCE = os.getenv("MONGO_AUTH_SOURCE", "admin") MONGO_DBNAME = os.environ['MONGO_DBNAME'] AWS_ACCESS_KEY = os.getenv('AWS_ACCESS_KEY', "") AWS_SECRET_KEY = os.getenv('AWS_SECRET_KEY', "") AWS_S3_BUCKET_NAME = os.getenv('AWS_S3_BUCKET_NAME', "") S3_ATTACHMENTS = os.getenv('S3_ATTACHMENTS', False) public_methods = ["GET", "POST", "DELETE"] ITEM_METHODS = ["GET", "PATCH", "PUT", "DELETE"] RENDERERS = ['eve.render.JSONRenderer'] users = {"schema": get_schema('users_schema'), "authentication": AssignSecAuth} def register_resources(rscs): if rscs == []: raise Exception('Please define resources in settings.py') else: for rsc in rscs: rsc_read = { "schema": get_schema(rsc), "datasource": { "aggregation": { "pipeline": [{ "$match": { "_id": "$id" }
def test_properties_order_should_follow_that_of_source(self): article = schema.get_schema('Article') self.assertEqual('additionalType', article['properties'][0]) self.assertEqual('longDescription', article['properties'][-1])
def test_get_schema(avro_schema, capsys): schema.get_schema(PROJECT_ID, AVRO_SCHEMA_ID) out, _ = capsys.readouterr() assert "Got a schema" in out assert f"{avro_schema}" in out
def test_should_not_allow_legacy_spells(self): self.assertRaises(KeyError, schema.get_property, 'contactPoints') self.assertTrue( 'awards' not in schema.get_schema('Person')['properties'])
def test_property_inheritance(self): person = set(schema.get_schema('Person')['properties']) politician = set(schema.get_schema('Politician')['properties']) self.assertEqual(set(), person.difference(politician)) self.assertEqual({u'politicalParty'}, politician.difference(person))
def parse_content(parsed_feed): """Parses content""" content_schema = schema.get_schema('content') for feed in parsed_feed['entries']: yield {key: feed.get(key) for key in content_schema}
def test_self_contained_schema(self): s = schema.get_schema('Person', True) url = s['properties']['url'] self.assertEqual(dict, type(url)) self.assertEqual([0, 0], url['cardinality']) self.assertEqual(['URL'], url['type']['ranges'])
types.register_type('^/a/[^/]*$', '/type/author') types.register_type('^/b/[^/]*$', '/type/edition') types.register_type('^/l/[^/]*$', '/type/language') types.register_type('^/works/[^/]*$', '/type/work') types.register_type('^/subjects/[^/]*$', '/type/subject') types.register_type('^/publishers/[^/]*$', '/type/publisher') types.register_type('^/usergroup/[^/]*$', '/type/usergroup') types.register_type('^/permission/[^/]*$', '/type/permision') types.register_type('^/(css|js)/[^/]*$', '/type/rawtext') # set up infobase schema. required when running in standalone mode. import schema dbstore.default_schema = schema.get_schema() # this adds /show-marc/xxx page to infogami import showmarc # add zip and tuple to the list of public functions public(zip) public(tuple) web.template.Template.globals['NEWLINE'] = "\n" # Remove movefiles install hook. openlibrary manages its own files. infogami._install_hooks = [h for h in infogami._install_hooks if h.__name__ != "movefiles"] class Author(client.Thing): photo_url_pattern = "%s?m=change_cover"
def assertConformsToNamedSchema(self, obj, schema_name, strict=False): schema = get_schema(schema_name) if not schema: raise AssertionError('Schema {0} is unknown'.format(schema_name)) self.assertConformsToSchema(obj, schema, strict)
def test_properties_order_should_follow_that_of_source(self): article = schema.get_schema('Article') self.assertEqual('about', article['properties'][0]) self.assertEqual('wordCount', article['properties'][-1])
def test_should_not_allow_legacy_spells(self): self.assertRaises(KeyError, schema.get_property, 'contactPoints') self.assertTrue('awards' not in schema.get_schema('Person')['properties'])
def setUp(self): super(CustomTypeAndPropertyTest, self).setUp() schema.SCHEMA_TO_LOAD.append('schema-custom.json.sample') self.person = schema.get_schema('Person') self.politician = schema.get_schema('Politician')
def test_properties_should_contain_all_specific_properties(self): for t, _ in schema.get_itemtypes(): item = schema.get_schema(t) self.assertEqual(set(), set(item['specific_properties']).difference(item['properties']))
def parse_feed(feed_obj): """Parses feed""" feed_schema = schema.get_schema('feed') data = feedparser.parse(feed_obj.url, etag=feed_obj.etag, modified=feed_obj.modified_parsed) return {key: data.get(key) for key in feed_schema}
def main(): pipe = HelmPipe(pipe_metadata='pipe.yml', schema=schema.get_schema()) pipe.run()