def get_fields(context): data = context.fields_model try: schema = loadString(data).schema except Exception: schema = loadString(MODEL_DEFAULT).schema schema.setTaggedValue(CONTEXT_KEY, context) return schema
def __call__(self): """ handle AJAX save post """ if not self.authorized(): raise Unauthorized source = self.request.form.get("source") if source: # Is it valid XML? try: root = etree.fromstring(source) except etree.XMLSyntaxError as e: return dumps({ "success": False, "message": "XMLSyntaxError: {0}".format(e.message.encode("utf8")), }) # a little more sanity checking, look at first two element levels basens = "{http://namespaces.plone.org/supermodel/schema}" if root.tag != basens + "model": return dumps({ "success": False, "message": __(u"Error: root tag must be 'model'"), }) for element in root.getchildren(): if element.tag != basens + "schema": return dumps({ "success": False, "message": __(u"Error: all model elements must be 'schema'"), }) # can supermodel parse it? # This is mainly good for catching bad dotted names. try: loadString(source) except SupermodelParseError as e: message = e.args[0].replace('\n File "<unknown>"', "") return dumps({ "success": False, "message": u"SuperModelParseError: {0}".format(message), }) # clean up formatting sins source = etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="utf8") # and save self.save(source) self.request.response.setHeader("Content-Type", "application/json") return dumps({"success": True, "message": __(u"Saved")})
def parse_schema(xml): if not xml.strip(): return new_schema() try: return loadString(xml, policy=PKGNAME).schema except ExpatError: raise RuntimeError('could not parse field schema xml')
def test_restricted_columns(self): context = self.new_temporary_folder() model = loadString(self.address_model) set_columns(model.schema, [ ['firstname', 'lastname'], ['town'], ['country'], ]) security.set_read_permissions(model.schema, { 'firstname': 'cmf.ManagePortal', 'lastname': 'zope2.View' }) # anonymous columns = get_schema_columns(model.schema, context) self.assertEqual(len(columns), 3) self.assertEqual(columns[0].fields, ['lastname']) # admin with self.user('admin'): columns = get_schema_columns(model.schema, context) self.assertEqual(len(columns), 3) self.assertEqual(columns[0].fields, ['lastname', 'firstname'])
def test_get_value(self, get_schema, get_utility): model = """<?xml version='1.0' encoding='utf8'?> <model xmlns="http://namespaces.plone.org/supermodel/schema"> <schema> <field name="text" type="zope.schema.TextLine"> </field> <field name="choice" type="zope.schema.Choice"> <vocabulary>vocabulary</vocabulary> </field> </schema> </model>""" vocabulary = schema.vocabulary.SimpleVocabulary([ schema.vocabulary.SimpleTerm(value='1st', title=u'first'), schema.vocabulary.SimpleTerm(value='2nd', title=u'second') ]) class MockUtililty(object): def __call__(self, name): return vocabulary get_schema.return_value = loadString(model).schema get_utility.return_value = MockUtililty() class TestPerson(Container): pass person = TestPerson() person.choice = '1st' person.text = 'test' self.assertEquals(u'test', get_value(self.request, person, 'text')) self.assertEquals(u'first', get_value(self.request, person, 'choice')) person.choice = '2nd' self.assertEquals(u'second', get_value(self.request, person, 'choice'))
def test_had_read_access(self): model = loadString(""" <model xmlns="http://namespaces.plone.org/supermodel/schema"> <schema /> </model> """) context = self.new_temporary_folder() # no permission -> has access self.assertTrue( security.has_read_access(model.schema, 'fieldname', context) ) security.set_read_permissions( model.schema, {'fieldname': 'cmf.ManagePortal'} ) # anonymous has no access now self.assertFalse( security.has_read_access(model.schema, 'fieldname', context) ) # but the admin does with self.user('admin'): self.assertTrue( security.has_read_access(model.schema, 'fieldname', context) )
def __call__(self): """ handle AJAX save post """ if not self.authorized(): raise Unauthorized source = self.request.form.get('source') if source: # Is it valid XML? try: root = etree.fromstring(source) except etree.XMLSyntaxError, e: return dumps({ 'success': False, 'message': "XMLSyntaxError: {0}".format(e.message.encode('utf8')) }) # a little more sanity checking, look at first two element levels basens = '{http://namespaces.plone.org/supermodel/schema}' if root.tag != basens + 'model': return dumps({ 'success': False, 'message': __(u"Error: root tag must be 'model'") }) for element in root.getchildren(): if element.tag != basens + 'schema': return dumps({ 'success': False, 'message': __(u"Error: all model elements must be 'schema'") }) # can supermodel parse it? # This is mainly good for catching bad dotted names. try: loadString(source) except SupermodelParseError, e: message = e.args[0].replace('\n File "<unknown>"', '') return dumps({ 'success': False, 'message': u"SuperModelParseError: {0}".format(message) })
def test_write_selectable_schema(self): model = loadString(self.selectable_schema) set_selectable_fields(model.schema, ['spare']) xml = self.deprettify(serializeSchema(model.schema)) self.assertIn( '<people:column selectable="true"><people:item>spare', xml )
def test_load_selectable_schema(self): model = loadString(self.selectable_schema) # only colums with one field may be selectable self.assertEqual( sorted(get_selectable_fields(model.schema)), ['works'] )
def valid_xml_schema(xml): if not xml or xml == DEFAULT_MODEL_XML: return True # bypass try: return loadString(xml) except ExpatError: raise schema.ValidationError('could not parse field schema xml') return True
def load_model(xml, cache_key=None): """Load supermodel instance from XML source with optional cache key (ZCA lookups require exact schema instance to match lookups) """ try: return SCHEMA_CACHE[cache_key] except KeyError: schemata, additional_schemata = split_schema(xml) additional = loadString(additional_schemata, policy='collective.flow') try: current.model = additional model = loadString(schemata, policy='collective.flow') model.schemata.update(additional.schemata) finally: current.model = None if cache_key is not None: SCHEMA_CACHE[cache_key] = model return model
def test_load_details_schema(self): model = loadString(self.details_schema) self.assertEqual( get_detail_fields(model.schema), { 'left': ['image'], 'right': ['lastname', 'firstname'] } )
def get_model(data, context): schema = None # if schema is set on context it has priority if data is not None: try: schema = loadString(data).schema except SupermodelParseError: # pragma: no cover pass # 2nd we try aquire the model if not schema: nav_root = api.portal.get_navigation_root(context) schema = nav_root.get('easyform_model_default.xml') # finally we fall back to the hardcoded example if not schema: schema = loadString(MODEL_DEFAULT).schema schema.setTaggedValue(CONTEXT_KEY, context) return schema
def test_load_column_schema(self): model = loadString(self.column_xml) self.assertEqual( get_columns(model.schema), [['first', 'second'], ['fourth']] ) self.assertEqual( get_custom_column_titles(model.schema), [None, 'test'] )
def addToolkitFields(self): # FIXME: This relies on the order the vocabularies are returned, which # shall be fixed. vocab = getUtility( IVocabularyFactory, "org.bccvl.site.algorithm_category_vocab")(self.context) groups = OrderedDict((cat.value, []) for cat in chain((SimpleTerm(None),), vocab)) # TODO: only sdms have functions at the moment ,... maybe sptraits as # well? func_vocab = getUtility(IVocabularyFactory, name=self.func_vocab_name) functions = getattr(self.context, self.func_select_field, None) or () # TODO: could also use uuidToObject(term.value) instead of relying on # BrainsVocabluary terms for toolkit in (term.brain.getObject() for term in func_vocab(self.context)): if self.mode == DISPLAY_MODE and not self.is_toolkit_selected(toolkit.UID(), functions): # filter out unused algorithms in display mode continue # FIXME: need to cache form schema try: # FIXME: do some schema caching here parameters_model = loadString(toolkit.schema) except Exception as e: LOG.fatal("couldn't parse schema for %s: %s", toolkit.id, e) continue parameters_schema = parameters_model.schema param_group = ExperimentParamGroup( self.context, self.request, self) param_group.__name__ = "parameters_{}".format(toolkit.UID()) # param_group.prefix = ''+ form.prefix? param_group.toolkit = toolkit.UID() param_group.schema = parameters_schema # param_group.prefix = "{}{}.".format(self.prefix, toolkit.id) # param_group.fields = Fields(parameters_schema, prefix=toolkit.id) param_group.label = u"configuration for {}".format(toolkit.title) if len(parameters_schema.names()) == 0: param_group.description = u"No configuration options" groups[toolkit.algorithm_category].append(param_group) # join the lists in that order self.param_groups = { self.func_select_field: (tuple(groups[None]) + tuple(groups['profile']) + tuple(groups['machineLearning']) + tuple(groups['statistical']) + tuple(groups['geographic'])) }
def __init__(self, context, request, name=u'editschema', title=None): super(ToolkitSchemaContext, self).__init__(context, request, name, title) self.toolkit = context self.schema = loadString(self.toolkit.schema).schema #self.schemaName = u'' self.Title = lambda: u'Toolkit Schema' # turn off green edit border for anything in the type control panel request.set('disable_border', 1)
def test_write_title_schema(self): model = loadString(self.title_xml) set_title_fields(model.schema, ['stop', 'hammertime']) xml = serializeSchema(model.schema) self.assertIn('<people:item>stop</people:item>', xml) self.assertIn('<people:item>hammertime</people:item>', xml) self.assertTrue( xml.find('<people:item>stop</people:item>') < xml.find('<people:item>hammertime</people:item>') )
def __init__(self, *args, **kw): super(Schema, self).__init__(*args, **kw) if self.model: self.schema = supermodel.loadString(self.model).schema else: self.schema = DefaultSchema for id in schema.getFields(self.schema): field = self.schema[id] field.__name__ = str(field.__name__)
def test_one_column(self): model = loadString(self.address_model) set_columns(model.schema, [['lastname']]) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(len(columns), 1) self.assertEqual(columns[0].schema, model.schema) self.assertEqual(columns[0].fields, ['lastname']) self.assertEqual(columns[0].titles, [u'Nachname']) self.assertEqual(columns[0].contains_title_field, False)
def make_form_content(context, content): form_prologue = content.get('description').replace('[$NEW_LINE$]', '<br />') form_obj = createAndPublishContentInContainer(context, 'EasyForm', title=content.get('title'), submitLabel=u'Send') form_prologue = u'<p>{prologue}</p>'.format(prologue=form_prologue) form_obj.formPrologue = t2r(form_prologue) fields_model_str = make_form_fields_model(form_obj, content) fields_model = loadString(fields_model_str) fields_schema = fields_model.schema set_fields(form_obj, fields_schema) actions_model_str = make_form_actions_model(form_obj, content) actions_model = loadString(actions_model_str) actions_schema = actions_model.schema actions_schema.setTaggedValue(CONTEXT_KEY, form_obj) set_actions(form_obj, actions_schema) return form_obj
def addToolkitFields(self): # FIXME: This relies on the order the vocabularies are returned, which # shall be fixed. vocab = getUtility(IVocabularyFactory, "org.bccvl.site.algorithm_category_vocab")( self.context) groups = OrderedDict( (cat.value, []) for cat in chain((SimpleTerm(None), ), vocab)) # TODO: only sdms have functions at the moment ,... maybe sptraits as # well? func_vocab = getUtility(IVocabularyFactory, name=self.func_vocab_name) functions = getattr(self.context, self.func_select_field, None) or () # TODO: could also use uuidToObject(term.value) instead of relying on # BrainsVocabluary terms for toolkit in (term.brain.getObject() for term in func_vocab(self.context)): if self.mode == DISPLAY_MODE and not self.is_toolkit_selected( toolkit.UID(), functions): # filter out unused algorithms in display mode continue # FIXME: need to cache form schema try: # FIXME: do some schema caching here parameters_model = loadString(toolkit.schema) except Exception as e: LOG.fatal("couldn't parse schema for %s: %s", toolkit.id, e) continue parameters_schema = parameters_model.schema param_group = ExperimentParamGroup(self.context, self.request, self) param_group.__name__ = "parameters_{}".format(toolkit.UID()) # param_group.prefix = ''+ form.prefix? param_group.toolkit = toolkit.UID() param_group.schema = parameters_schema # param_group.prefix = "{}{}.".format(self.prefix, toolkit.id) # param_group.fields = Fields(parameters_schema, prefix=toolkit.id) param_group.label = u"configuration for {}".format(toolkit.title) if len(parameters_schema.names()) == 0: param_group.description = u"No configuration options" groups[toolkit.algorithm_category].append(param_group) # join the lists in that order self.param_groups = { self.func_select_field: (tuple(groups[None]) + tuple(groups['profile']) + tuple(groups['machineLearning']) + tuple(groups['statistical']) + tuple(groups['geographic'])) }
def test_write_order_schema(self): model = loadString(self.order_xml) set_order(model.schema, ['third', 'second', 'first']) xml = serializeSchema(model.schema) self.assertIn('<people:item>third</people:item>', xml) self.assertIn('<people:item>second</people:item>', xml) self.assertIn('<people:item>first</people:item>', xml) self.assertTrue( xml.find('<people:item>third</people:item>') < xml.find('<people:item>second</people:item>') < xml.find('<people:item>first</people:item>') )
def test_merged_columns(self): model = loadString(self.address_model) set_columns(model.schema, [['country', 'lastname']]) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(len(columns), 1) # ordered by field order self.assertEqual(columns[0].fields, ['lastname', 'country']) self.assertEqual(columns[0].titles, [u'Nachname', u'Land']) self.assertEqual(columns[0].contains_title_field, False)
def lookupModel(self): if self.model_source: return loadString(self.model_source, policy=self.schema_policy) elif self.model_file: model_file = self._absModelFile() return loadFile(model_file, reload=True, policy=self.schema_policy) elif self.schema: schema = self.lookupSchema() return Model({u"": schema}) raise ValueError("Neither model source, nor model file, nor schema is specified in FTI %s" % self.getId())
def test_read_permissions(self): model = loadString(""" <model xmlns="http://namespaces.plone.org/supermodel/schema"> <schema /> </model> """) security.set_read_permissions( model.schema, {'fieldname': 'zope2.View'} ) self.assertEqual(security.get_read_permissions(model.schema), { 'fieldname': 'zope2.View' })
def __call__(self, name, module): if name: name = name.strip() try: return current.model.schemata[name] except (AttributeError, KeyError): if name not in ['V', '__file__']: logger.exception( u'Schema "{0:s}" did not resolve:'.format(name), ) return loadString( DEFAULT_SCHEMA, policy='collective.flow', ).schemata[''] return None
def lookupModel(self): if self.model_source: return loadString(self.model_source, policy=self.schema_policy) elif self.model_file: model_file = self._absModelFile() return loadFile(model_file, reload=True, policy=self.schema_policy) elif self.schema: schema = self.lookupSchema() return Model({u"": schema}) raise ValueError( "Neither model source, nor model file, nor schema is specified in FTI %s" % self.getId())
def updateActionsSchema(context): """ Add default actions only if not present. """ schema = readSchemaFromFile(ACTIONS_DEFAULT_FILENAME) register_xml = etree.fromstring(schema) form_xml = etree.fromstring(context.actions_model) action_names = loadString(context.actions_model).schema.names() new_fields = register_xml.findall( ".//{http://namespaces.plone.org/supermodel/schema}field") old_schema = form_xml.findall( ".//{http://namespaces.plone.org/supermodel/schema}schema")[0] for new_field in new_fields: if new_field.get("name") not in action_names: old_schema.append(new_field) context.actions_model = etree.tostring(form_xml)
def updateFieldsSchema(context): """ Add waiting list field, if waiting list is set and not present. """ schema = readSchemaFromFile(FIELDS_DEFAULT_FILENAME) register_xml = etree.fromstring(schema) form_xml = etree.fromstring(context.fields_model) fields_names = loadString(context.fields_model).schema.names() new_fields = register_xml.findall( ".//{http://namespaces.plone.org/supermodel/schema}field") old_schema = form_xml.findall( ".//{http://namespaces.plone.org/supermodel/schema}schema")[0] for new_field in new_fields: if new_field.get("name") not in fields_names: old_schema.append(new_field) context.fields_model = etree.tostring(form_xml)
def test_load_render_options(self): model = loadString(self.render_options_xml) list_options = get_list_render_options(model.schema) detail_options = get_detail_render_options(model.schema) self.assertEqual(list_options, { 'one': { 'image_size': 'small' } }) self.assertEqual(detail_options, { 'one': { 'image_size': 'large' } })
def test_write_details_schema(self): model = loadString(self.details_schema) set_detail_fields(model.schema, { 'top': ['image', 'lastname', 'firstname'] }) xml = self.deprettify(serializeSchema(model.schema)) self.assertIn('<people:details position="top">', xml) self.assertIn('<people:item>lastname</people:item>', xml) self.assertIn('<people:item>firstname</people:item>', xml) self.assertTrue( xml.find('<people:item>lastname</people:item>') < xml.find('<people:item>firstname</people:item>') ) self.assertEqual(xml.count('<people:details'), 1)
def test_write_column_schema(self): model = loadString(self.column_xml) set_columns( model.schema, [['first'], ['third', 'fourth']] ) set_custom_column_titles( model.schema, ['one', 'two'] ) xml = self.deprettify(serializeSchema(model.schema)) self.assertIn('<people:column title="one"><people:item>first<', xml) self.assertIn('<people:column title="two"><people:item>third<', xml) self.assertIn('</people:item><people:item>fourth</people:item>', xml) self.assertTrue( xml.find('<people:item>first</people:item>') < xml.find('<people:item>third</people:item>') < xml.find('<people:item>fourth</people:item>') )
def test_contains_title_field(self): model = loadString(self.address_model) set_columns(model.schema, [['country', 'lastname']]) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(columns[0].contains_title_field, False) set_title_fields(model.schema, ['lastname']) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(columns[0].contains_title_field, True) set_title_fields(model.schema, ['firstname']) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(columns[0].contains_title_field, False) set_columns(model.schema, [['firstname']]) columns = unrestricted_get_schema_columns(model.schema) self.assertEqual(columns[0].contains_title_field, True)
def test_write_render_options(self): model = loadString(self.render_options_xml) set_list_render_options(model.schema, { 'one': { 'image_size': 'small', } }) set_detail_render_options(model.schema, { 'one': { 'image_size': 'tiny', } }) xml = serializeSchema(model.schema) self.assertIn( '<people:item render-options="image_size=small">one', xml ) self.assertIn( '<people:item render-options="image_size=tiny">one', xml )
def toolkit_schema(schema): parameters_model = loadString(schema) parameters_schema = parameters_model.schema modes = {name: mode for ifc, name, mode in parameters_schema.queryTaggedValue(MODES_KEY, ())} ret = { 'type': 'object', 'properties': {} } for name in parameters_schema.names(): field = parameters_schema[name] if field.readonly: continue if modes.get(name, '') == 'hidden': continue # TODO: catch case, where field values come from a vocabulary ret['properties'][name] = { 'type': type_to_string(field._type), 'title': field.title, 'default': field.default, 'description': field.description, } return ret
def addToolkitFields(self): groups = [] # TODO: only sdms have functions at the moment ,... maybe sptraits as well? func_vocab = getUtility(IVocabularyFactory, name=self.func_vocab_name) functions = getattr(self.context, self.func_select_field, None) or () # TODO: could also use uuidToObject(term.value) instead of relying on BrainsVocabluary terms for toolkit in (term.brain.getObject() for term in func_vocab(self.context)): if self.mode == DISPLAY_MODE and not self.is_toolkit_selected(toolkit.UID(), functions): # filter out unused algorithms in display mode continue # FIXME: need to cache form schema try: # FIXME: do some schema caching here parameters_model = loadString(toolkit.schema) except Exception as e: LOG.fatal("couldn't parse schema for %s: %s", toolkit.id, e) continue parameters_schema = parameters_model.schema param_group = ExperimentParamGroup( self.context, self.request, self) param_group.__name__ = "parameters_{}".format(toolkit.UID()) #param_group.prefix = ''+ form.prefix? param_group.toolkit = toolkit.UID() param_group.schema = parameters_schema #param_group.prefix = "{}{}.".format(self.prefix, toolkit.id) #param_group.fields = Fields(parameters_schema, prefix=toolkit.id) param_group.label = u"configuration for {}".format(toolkit.title) if len(parameters_schema.names()) == 0: param_group.description = u"No configuration options" groups.append(param_group) self.param_groups = groups
def test_import_people_schema_validation(self): model = loadString("""<?xml version='1.0' encoding='utf8'?> <model xmlns="http://namespaces.plone.org/supermodel/schema" xmlns:people="http://namespaces.plone.org/supermodel/people"> <schema> <people:title> <people:item>name</people:item> </people:title> <field name="name" type="zope.schema.TextLine"> <title>Name</title> </field> <field name="age" type="zope.schema.Int" required="false"> <title>Age</title> <required>False</required> </field> </schema> </model>""") try: validate_attribute_values(model.schema, {'age': 1}) except ContentImportError, e: self.assertIs(type(e), ContentImportError) self.assertEqual(e.colname, u'Name') self.assertEqual('Required column is missing', e.message)
def test_load_title_schema(self): model = loadString(self.title_xml) self.assertEqual( get_title_fields(model.schema), ['foo', 'bar'] )
def onEnabled(self, theme, settings, dependenciesSettings): # noqa res = queryResourceDirectory(THEME_RESOURCE_NAME, theme) if res is None: return # We need to get settings by ourselves to avoid p.a.theming caching settings = getSettings(res) if not isEnabled(settings): return # Remove imported folders not-required on run-time autocleanup = (settings.get('self-destruct') or '').lower() in ('true', 'yes', 'on', '1') # Register permissions sm = getSiteManager() for key, value in _getPermissions(settings).items(): util = sm.queryUtility(IPermission, name=key) if util is None: name = str('collective.themesitesetup.permission.' + key) util = LocalPermission(value, u'') util.id = key util.__name__ = name util.__parent__ = aq_base(sm) sm._setObject(name, util, set_owner=False, suppress_events=True) sm.registerUtility(util, provided=IPermission, name=key) addPermission(str(value)) # Import GS profile directoryName = DEFAULT_ENABLED_PROFILE_NAME if 'install' in settings: directoryName = settings['install'] directory = None if res.isDirectory(directoryName): directory = res[directoryName] if directory: tarball = createTarball(directory) portal_setup = api.portal.get_tool('portal_setup') portal_setup.runAllImportStepsFromProfile(None, purge_old=False, archive=tarball) # Self-destruct imported profile if autocleanup and IWritableResourceDirectory.providedBy(res): del res[directoryName] # Register locales localesDirectoryName = DEFAULT_ENABLED_LOCALES_NAME if 'locales' in settings: localesDirectoryName = settings['locales'] if res.isDirectory(localesDirectoryName): catalogs = getMessageCatalogs(res[localesDirectoryName]) for domain in catalogs: util = sm.queryUtility(ITranslationDomain, name=domain) if not isinstance(util, TranslationDomain): name = str('collective.themesitesetup.domain.' + domain) util = TranslationDomain() util.__name__ = name util.__parent__ = aq_base(sm) util.domain = domain sm._setObject(name, util, set_owner=False, suppress_events=True) sm.registerUtility(util, provided=ITranslationDomain, name=domain) for language in catalogs[domain]: name = '.'.join([ 'collective.themesitesetup.catalog', res.__name__, domain, language ]) if name in util: try: del util[name] except ValueError: pass util[name] = catalogs[domain][language] # Self-destruct imported catalogs if autocleanup and IWritableResourceDirectory.providedBy(res): del res[localesDirectoryName] # Update Dexterity models modelsDirectoryName = DEFAULT_ENABLED_MODELS_NAME if 'models' in settings: modelsDirectoryName = settings['models'] overwrite = overwriteModels(settings) if res.isDirectory(modelsDirectoryName): types_tool = api.portal.get_tool('portal_types') directory = res[modelsDirectoryName] for name in directory.listDirectory(): if not name.endswith('.xml') or not directory.isFile(name): continue fti = types_tool.get(name[:-4]) if not fti: continue model = unicode(directory.readFile(name), 'utf-8', 'ignore') if fti.model_source == model: continue try: loadString(model, fti.schema_policy) # fail for errors except SupermodelParseError: logger.error( u'Error while parsing {0:s}/{1:s}/{2:s}'.format( res.__name__, modelsDirectoryName, name)) raise # Set model source when model is empty of override is enabled desc = DexterityFTIModificationDescription( 'model_source', fti.model_source) if not fti.model_source: fti.model_source = model notify(ObjectModifiedEvent(fti, desc)) elif not loadString(fti.model_source, fti.schema_policy).schema.names(): # noqa fti.model_source = model notify(ObjectModifiedEvent(fti, desc)) elif overwrite: fti.model_source = model notify(ObjectModifiedEvent(fti, desc)) # Self-destruct imported models if autocleanup and IWritableResourceDirectory.providedBy(res): del res[modelsDirectoryName] # Copy resources resourcesDirectoryName = DEFAULT_ENABLED_RESOURCES_NAME if 'resources' in settings: resourcesDirectoryName = settings['resources'] purge = purgeResources(settings) overwrite = overwriteResources(settings) root = queryUtility(IResourceDirectory, name=u'persistent') if root and res.isDirectory(resourcesDirectoryName): copyResources(res[resourcesDirectoryName], root, purge, overwrite) # Invalidate site layout cache of plone.app.blocks portal_catalog = api.portal.get_tool('portal_catalog') portal_catalog._increment_counter() # Self-desctruct imported resources if autocleanup and IWritableResourceDirectory.providedBy(res): del res[resourcesDirectoryName]
# schema policy, here to get an accurate schema.__module__ class DynamicSchemaPolicy(DefaultSchemaPolicy): """ Here to override default: schema loaded from serializations should have a __module__ attribute with a value of 'uu.dynamicschema.schema.generated' This should be registered for use as a named ISchemaPolicy utility in the global site manager. """ def module(self, schemaName, tree): global generated return generated.__name__ #empty schema loader using policy defined above: new_schema = lambda: loadString(DEFAULT_MODEL_XML, policy=PKGNAME).schema loaded = {} # cached signatures to transient schema objects def parse_schema(xml): if not xml.strip(): return new_schema() try: return loadString(xml, policy=PKGNAME).schema except ExpatError: raise RuntimeError('could not parse field schema xml') def copy_schema(schema): """
def submittraits(self): # TODO: catch UNAuthorized correctly and return json error if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') # make sure we have the right context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # experiments location context = self.context.restrictedTraverse( defaults.EXPERIMENTS_FOLDER_ID) else: # custom context.... let's use in context = self.context # parse request body params = self.request.form # validate input # TODO: should validate type as well..... (e.g. string has to be # string) # TODO: validate dataset and layer id's existence if possible props = {} if params.get('species_list', None): props['species_list'] = params['species_list'] else: self.record_error('Bad Request', 400, 'Missing parameter speciesList', {'parameter': 'speciesList'}) if not params.get('title', None): self.record_error('Bad Request', 400, 'Missing parameter title', {'parameter': 'title'}) else: props['title'] = params['title'] props['description'] = params.get('description', '') if not params.get('traits_data', None): self.record_error('Bad Request', 400, 'Missing parameter traits_data', {'parameter': 'traits_data'}) else: # FIXME: should properly support source / id # for now only bccvl source is supported props['species_traits_dataset'] = params[ 'traits_data']['id'] props['species_traits_dataset_params'] = {} for col_name, col_val in params.get("columns", {}).items(): if col_val not in ('lat', 'lon', 'species', 'trait_con', 'trait_ord', 'trait_nom', 'env_var_con', 'env_var_cat', 'random_con', 'random_cat'): continue props['species_traits_dataset_params'][col_name] = col_val if not props['species_traits_dataset_params']: self.record_error('Bad Request', 400, 'Invalid values for columns', {'parameter': 'columns'}) # Check for species-level trait data i.e. species is not specified if 'species' not in props['species_traits_dataset_params'].values(): props['species_list'] = [] props['scale_down'] = params.get('scale_down', False) # env data is optional props['environmental_datasets'] = params.get('environmental_data', None) if not (props['environmental_datasets'] or 'env_var_con' not in props['species_traits_dataset_params'].values() or 'env_var_cat' not in props['species_traits_dataset_params'].values()): self.record_error('Bad Request', 400, 'No Environmental data selected', {'parameter': 'environmental_datasets'}) if params.get('modelling_region', ''): props['modelling_region'] = NamedBlobFile( data=json.dumps(params['modelling_region'])) else: props['modelling_region'] = None if not params.get('algorithms', None): self.record_error('Bad Request', 400, 'Missing parameter algorithms', {'parameter': 'algorithms'}) else: props['algorithms_species'] = {} props['algorithms_diff'] = {} funcs_env = getUtility( IVocabularyFactory, 'traits_functions_species_source')(context) funcs_species = getUtility( IVocabularyFactory, 'traits_functions_diff_source')(context) # FIXME: make sure we get the default values from our func object for algo_uuid, algo_params in params['algorithms'].items(): if algo_params is None: algo_params = {} toolkit = uuidToObject(algo_uuid) toolkit_model = loadString(toolkit.schema) toolkit_schema = toolkit_model.schema func_props = {} for field_name in toolkit_schema.names(): field = toolkit_schema.get(field_name) value = algo_params.get(field_name, field.missing_value) if value == field.missing_value: func_props[field_name] = field.default else: func_props[field_name] = value if algo_uuid in funcs_env: props['algorithms_species'][algo_uuid] = func_props elif algo_uuid in funcs_species: props['algorithms_diff'][algo_uuid] = func_props else: LOG.warn( 'Algorithm {} not in allowed list of functions'.format(toolkit.id)) if not (props['algorithms_species'] or props['algorithms_diff']): self.record_error('Bad Request', 400, 'Iinvalid algorithms selected', {'parameter': 'algorithms'}) if self.errors: raise BadRequest("Validation Failed") # create experiment with data as form would do # TODO: make sure self.context is 'experiments' folder? from plone.dexterity.utils import createContent, addContentToContainer experiment = createContent( "org.bccvl.content.speciestraitsexperiment", **props) experiment = addContentToContainer(context, experiment) experiment.parameters = dict(props['algorithms_species']) experiment.parameters.update(dict(props['algorithms_diff'])) # FIXME: need to get resolution from somewhere IBCCVLMetadata(experiment)['resolution'] = 'Resolution30m' # submit newly created experiment # TODO: handle background job submit .... at this stage we wouldn't # know the model run job ids # TODO: handle submit errors and other errors that may happen above? # generic exceptions could behandled in returnwrapper retval = { 'experiment': { 'url': experiment.absolute_url(), 'uuid': IUUID(experiment) }, 'jobs': [], } jt = IExperimentJobTracker(experiment) msgtype, msg = jt.start_job(self.request) if msgtype is not None: retval['message'] = { 'type': msgtype, 'message': msg } for result in experiment.values(): jt = IJobTracker(result) retval['jobs'].append(jt.get_job().id) return retval
def submitsdm(self): # TODO: catch UNAuthorized correctly and return json error if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': self.record_error('Request must be POST', 400) raise BadRequest('Request must be POST') # make sure we have the right context if ISiteRoot.providedBy(self.context): # we have been called at site root... let's traverse to default # experiments location context = self.context.restrictedTraverse( defaults.EXPERIMENTS_FOLDER_ID) else: # custom context.... let's use in context = self.context # parse request body params = self.request.form # validate input # TODO: should validate type as well..... (e.g. string has to be # string) # TODO: validate dataset and layer id's existence if possible props = {} if not params.get('title', None): self.record_error('Bad Request', 400, 'Missing parameter title', {'parameter': 'title'}) else: props['title'] = params['title'] props['description'] = params.get('description', '') if not params.get('occurrence_data', None): self.record_error('Bad Request', 400, 'Missing parameter occurrence_data', {'parameter': 'occurrence_data'}) else: # FIXME: should properly support source / id # for now only bccvl source is supported props['species_occurrence_dataset'] = params[ 'occurrence_data']['id'] # FIXME: should properly support source/id for onw only bccvl source is # supported props['species_absence_dataset'] = params.get( 'absence_data', {}).get('id', None) props['scale_down'] = params.get('scale_down', False) if not params.get('environmental_data', None): self.record_error('Bad Request', 400, 'Missing parameter environmental_data', {'parameter': 'environmental_data'}) else: props['environmental_datasets'] = params['environmental_data'] if params.get('modelling_region', ''): props['modelling_region'] = NamedBlobFile( data=json.dumps(params['modelling_region'])) else: props['modelling_region'] = None if not params.get('algorithms', None): self.record_error('Bad Request', 400, 'Missing parameter algorithms', {'parameter': 'algorithms'}) else: portal = ploneapi.portal.get() props['functions'] = {} # FIXME: make sure we get the default values from our func object for algo, algo_params in params['algorithms'].items(): if algo_params is None: algo_params = {} toolkit = portal[defaults.FUNCTIONS_FOLDER_ID][algo] toolkit_model = loadString(toolkit.schema) toolkit_schema = toolkit_model.schema func_props = {} for field_name in toolkit_schema.names(): field = toolkit_schema.get(field_name) value = algo_params.get(field_name, field.missing_value) if value == field.missing_value: func_props[field_name] = field.default else: func_props[field_name] = value props['functions'][IUUID(toolkit)] = func_props if self.errors: raise BadRequest("Validation Failed") # create experiment with data as form would do # TODO: make sure self.context is 'experiments' folder? from plone.dexterity.utils import createContent, addContentToContainer experiment = createContent("org.bccvl.content.sdmexperiment", **props) experiment = addContentToContainer(context, experiment) # TODO: check if props and algo params have been applied properly experiment.parameters = dict(props['functions']) # FIXME: need to get resolution from somewhere IBCCVLMetadata(experiment)['resolution'] = 'Resolution30m' # submit newly created experiment # TODO: handle background job submit .... at this stage we wouldn't # know the model run job ids # TODO: handle submit errors and other errors that may happen above? # generic exceptions could behandled in returnwrapper retval = { 'experiment': { 'url': experiment.absolute_url(), 'uuid': IUUID(experiment) }, 'jobs': [], } jt = IExperimentJobTracker(experiment) msgtype, msg = jt.start_job(self.request) if msgtype is not None: retval['message'] = { 'type': msgtype, 'message': msg } for result in experiment.values(): jt = IJobTracker(result) retval['jobs'].append(jt.get_job().id) return retval
def demosdm(self): lsid = self.request.form.get('lsid') # Run SDM on a species given by lsid (from ALA), followed by a Climate # Change projection. if self.request.get('REQUEST_METHOD', 'GET').upper() != 'POST': raise BadRequest('Request must be POST') # Swift params swiftsettings = getUtility(IRegistry).forInterface(ISwiftSettings) # get parameters if not lsid: raise BadRequest('Required parameter lsid missing') # we have an lsid,.... we can't really verify but at least some # data is here # find rest of parameters # FIXME: hardcoded path to environmental datasets # Get the future climate for climate change projection portal = ploneapi.portal.get() dspath = '/'.join([defaults.DATASETS_FOLDER_ID, defaults.DATASETS_CLIMATE_FOLDER_ID, 'australia', 'australia_1km', 'RCP85_ukmo-hadgem1_2085.zip']) ds = portal.restrictedTraverse(dspath) dsuuid = IUUID(ds) dlinfo = IDownloadInfo(ds) dsmd = IBCCVLMetadata(ds) futureclimatelist = [] for layer in ('B05', 'B06', 'B13', 'B14'): futureclimatelist.append({ 'uuid': dsuuid, 'filename': dlinfo['filename'], 'downloadurl': dlinfo['url'], 'layer': layer, 'type': dsmd['layers'][layer]['datatype'], 'zippath': dsmd['layers'][layer]['filename'] }) # Climate change projection name cc_projection_name = os.path.splitext(dlinfo['filename'])[0] # Get the current climate for SDM dspath = '/'.join([defaults.DATASETS_FOLDER_ID, defaults.DATASETS_CLIMATE_FOLDER_ID, 'australia', 'australia_1km', 'current.76to05.zip']) ds = portal.restrictedTraverse(dspath) dsuuid = IUUID(ds) dlinfo = IDownloadInfo(ds) dsmd = IBCCVLMetadata(ds) envlist = [] for layer in ('B05', 'B06', 'B13', 'B14'): envlist.append({ 'uuid': dsuuid, 'filename': dlinfo['filename'], 'downloadurl': dlinfo['url'], 'layer': layer, 'type': dsmd['layers'][layer]['datatype'], 'zippath': dsmd['layers'][layer]['filename'] }) # FIXME: we don't use a IJobTracker here for now # get toolkit and func = portal[defaults.TOOLKITS_FOLDER_ID]['demosdm'] # build job_params: job_params = { 'resolution': IBCCVLMetadata(ds)['resolution'], 'function': func.getId(), 'species_occurrence_dataset': { 'uuid': 'ala_occurrence_dataset', 'species': u'demoSDM', 'downloadurl': 'ala://ala?lsid={}'.format(lsid), }, 'environmental_datasets': envlist, 'future_climate_datasets': futureclimatelist, 'cc_projection_name': cc_projection_name } # add toolkit parameters: (all default values) # get toolkit schema schema = loadString(func.schema).schema for name, field in getFields(schema).items(): if field.default is not None: job_params[name] = field.default # add other default parameters job_params.update({ 'rescale_all_models': False, 'selected_models': 'all', 'modeling_id': 'bccvl', }) # generate script to run script = u'\n'.join([ resource_string('org.bccvl.compute', 'rscripts/bccvl.R'), resource_string('org.bccvl.compute', 'rscripts/eval.R'), func.script]) # where to store results. result = { 'results_dir': 'swift+{}/wordpress/{}/'.format(swiftsettings.storage_url, urllib.quote_plus(lsid)), 'outputs': json.loads(func.output) } # worker hints: worker = { 'script': { 'name': '{}.R'.format(func.getId()), 'script': script }, 'files': ( 'species_occurrence_dataset', 'environmental_datasets', 'future_climate_datasets' ) } # put everything together jobdesc = { 'env': {}, 'params': job_params, 'worker': worker, 'result': result, } # create job jobtool = getUtility(IJobUtility) job = jobtool.new_job( lsid=lsid, toolkit=IUUID(func), function=func.getId(), type='demosdm' ) # create job context object member = ploneapi.user.get_current() context = { # we use the site object as context 'context': '/'.join(portal.getPhysicalPath()), 'jobid': job.id, 'user': { 'id': member.getUserName(), 'email': member.getProperty('email'), 'fullname': member.getProperty('fullname') }, } # all set to go build task chain now from org.bccvl.tasks.compute import demo_task from org.bccvl.tasks.plone import after_commit_task, HIGH_PRIORITY after_commit_task(demo_task, HIGH_PRIORITY, jobdesc, context) # let's hope everything works, return result # We don't create an experiment object, so we don't count stats here # let's do it manually getUtility(IStatsUtility).count_experiment( user=member.getId(), portal_type='demosdm', ) return { 'state': os.path.join(result['results_dir'], 'state.json'), 'result': os.path.join(result['results_dir'], 'proj_metadata.json'), 'jobid': job.id }
def load_ttw_schema(string=None): if not string: string = get_schema() schema = loadString(string).schemata.get(SCHEMATA_KEY, None) return schema
def test_load_order_schema(self): model = loadString(self.order_xml) self.assertEqual( list(get_order(model.schema)), ['first', 'second', 'third'] )
def __call__(self): """ handle AJAX save post """ if not self.authorized(): raise Unauthorized source = self.request.form.get("source") if source: # Is it valid XML? try: root = etree.fromstring(source) except etree.XMLSyntaxError, e: return dumps({"success": False, "message": "XMLSyntaxError: {0}".format(e.message.encode("utf8"))}) # a little more sanity checking, look at first two element levels basens = "{http://namespaces.plone.org/supermodel/schema}" if root.tag != basens + "model": return dumps({"success": False, "message": __(u"Error: root tag must be 'model'")}) for element in root.getchildren(): if element.tag != basens + "schema": return dumps({"success": False, "message": __(u"Error: all model elements must be 'schema'")}) # can supermodel parse it? # This is mainly good for catching bad dotted names. try: loadString(source) except SupermodelParseError, e: message = e.args[0].replace('\n File "<unknown>"', "") return dumps({"success": False, "message": u"SuperModelParseError: {0}".format(message)})