def setData(self, interfacedata, metadata): """ Sets a list of properties on a object. Warning: all currently set properties which are not in the properties-list wille be removed! @param object: Plone-Object to set the properties on @type object: Plone-Object @param properties: list of propertes. See ftw.publisher.sender.extractor for format details. @param type: list @return: None """ self.logger.info('Updating interface data (UID %s)' % (self.object.UID()) ) current_ifaces = set(self.adapted.getDirectlyProvidedNames()) desired_ifaces = set(interfacedata) for iface_dotted in current_ifaces - desired_ifaces: iface = resolve(iface_dotted) noLongerProvides(self.object, iface) for iface_dotted in desired_ifaces - current_ifaces: iface = resolve(iface_dotted) alsoProvides(self.object, iface)
def getProtobuf(self, protobuf_name): """ Get a protobuf class from the identifier supplied. """ # Initialize mapping of protobuf full name to protobuf class if self._protobuf_full_name_to_class is None: self._protobuf_full_name_to_class = {} for contentType in self._content_types.values(): try: cls = resolve(contentType.python_class) self._protobuf_full_name_to_class[cls.DESCRIPTOR.full_name] = cls except ImportError: log.exception('Failed to resolve protobuf: %s', protobuf_name) if protobuf_name in self._protobuf_full_name_to_class: cls = self._protobuf_full_name_to_class[protobuf_name] else: try: config = self.getContentType(protobuf_name) except KeyError: raise SchemaException('Could not find protobuf "%s"' % protobuf_name) try: cls = resolve(config.python_class) except ImportError: raise ImportError('Could not find protobuf python class "%s"' % config.python_class) return cls
def loadPlugins(self): log.err("bit.core.plugins: loadPlugins") config = getUtility(IConfiguration) plugins = config.get("bit", "plugins") snippet = "<include package='zope.component' />" zcml = zcml_template % snippet xmlconfig(StringIO(zcml)) if isinstance(plugins, str): plugins = [plugins] for plugin in plugins: snippet = "<include package='%s' />" % plugin zcml = zcml_template % snippet zcml_path = os.path.join(resolve(plugin).__path__[0], "configure.zcml") if os.path.exists(zcml_path): xmlconfig(StringIO(zcml)) for plugin in plugins: snippet = "<include package='%s' file='meta.zcml' />" % plugin zcml = zcml_template % snippet zcml_path = os.path.join(resolve(plugin).__path__[0], "meta.zcml") if os.path.exists(zcml_path): xmlconfig(StringIO(zcml)) for plugin in plugins: snippet = "<include package='%s' file='plugin.zcml' />" % plugin zcml = zcml_template % snippet zcml_path = os.path.join(resolve(plugin).__path__[0], "plugin.zcml") if os.path.exists(zcml_path): xmlconfig(StringIO(zcml))
def remove_old_contentleadimage(context, logger=None): if logger is None: # Called as upgrade step: define our own logger. logger = logging.getLogger('cpskin.policy remove_old_contentleadimage') portal = api.portal.get() sm = portal.getSiteManager() utilities = { 'subscribers': sm.utilities._subscribers[0], 'adapters': sm.utilities._adapters[0], # 'provided': sm.utilities._provided } util_klass = resolve('plone.browserlayer.interfaces.ILocalBrowserLayerType') reg_klass = resolve('collective.contentleadimage.interfaces.ILeadImageSpecific') for sm_type in utilities.keys(): utility_registrations = utilities[sm_type] for x in utility_registrations.keys(): if x.__module__ == util_klass.__module__ and x == util_klass: for name, klass in utility_registrations[x].items(): found = find_object_or_class(klass, reg_klass) # if found: # import pdb; pdb.set_trace() if found: if type(utility_registrations[x][name]) in \ [list, tuple, set]: regs = list(utility_registrations[x][name]) regs.remove(found) logger.info('{0} {1} removed'.format(sm_type, reg_klass)) utility_registrations[x][name] = tuple(regs) else: logger.info('{0} removed'.format(name)) del utility_registrations[x][name] setattr(sm.utilities, '_' + sm_type, [utility_registrations])
def load_modules(app, config): # load up builtins and modules app.modules = [] try: _modules = [ m.strip() for m in config['aio']['builtin'].strip('').split('\n') if m.strip()] for m in _modules: app.modules.append(resolve(m)) except KeyError: pass try: _modules = [ m.strip() for m in config['aio']['modules'].strip('').split('\n') if m.strip()] for m in _modules: app.modules.append(resolve(m)) except KeyError: pass app.modules = tuple(app.modules)
def check_root_site_manager(tool): """2.0.x to 2.1.0 upgrade step checker """ portal = aq_parent(aq_inner(tool)) try: # We have to call setSite to make sure we have a site with a proper # acquisition context. setSite(portal) sm = portal.getSiteManager() if sm.utilities.LookupClass != FiveVerifyingAdapterLookup: return True except ComponentLookupError: return True for tool_interface in _BAD_UTILITIES: try: iface = resolve(tool_interface) except ImportError: continue if sm.queryUtility(iface) is not None: return True for tool_id, tool_interface in _TOOL_UTILITIES: tool_obj = getToolByName(portal, tool_id, default=None) try: iface = resolve(tool_interface) except ImportError: continue if tool_obj is not None and sm.queryUtility(iface) is None: return True return False
def register_new_custom_type(type_key, workflow_key, archetype_key): """Retrieve (create if needed) a domain interface and model for type_key, and register as new entry on TYPE_REGISTER. """ # generate custom domain interface domain_iface_name = naming.model_interface_name(type_key) try: domain_iface = resolve("%s.%s" % (INTERFACE_MODULE.__name__, domain_iface_name)) log.warn("Custom interface ALREADY EXISTS: %s" % (domain_iface)) except ImportError: domain_iface = new_custom_domain_interface(type_key, domain_iface_name) # generate custom domain_model domain_model_name = naming.model_name(type_key) try: domain_model = resolve("%s.%s" % (MODEL_MODULE.__name__, domain_model_name)) log.warn("Custom domain model ALREADY EXISTS: %s" % (domain_model)) except ImportError: domain_model = new_custom_domain_model(type_key, domain_iface, archetype_key) # type_info entry ti = TI(workflow_key, domain_iface, domain_model) ti.custom = True TYPE_REGISTRY.append((type_key, ti)) log.info("Registered custom type [%s]: %s" % (archetype_key, type_key)) return type_key, ti
def load_object(epstr): """Loads the object represented in entry-point syntax by the specified string.""" if ':' in epstr: module, attr = epstr.split(':') module = resolve(module) return getattr(module, attr) return resolve(epstr)
def _configure(self=None, set_up_packages=(), features=('devmode', 'testmode'), context=None, package=None): features = set(features) if features is not None else set() # This is normally created by a slug, but tests may not always # load the slug if os.getenv('DATASERVER_DIR_IS_BUILDOUT'): # pragma: no cover features.add('in-buildout') # zope.component.globalregistry conveniently adds # a zope.testing.cleanup.CleanUp to reset the globalSiteManager if context is None and (features or package): context = config.ConfigurationMachine() context.package = package xmlconfig.registerCommonDirectives(context) for feature in features: context.provideFeature(feature) if set_up_packages: logger.debug("Configuring %s with features %s", set_up_packages, features) for i in set_up_packages: __traceback_info__ = (i, self) if isinstance(i, tuple): filename = i[0] package = i[1] else: filename = 'configure.zcml' package = i if isinstance(package, six.string_types): package = dottedname.resolve(package) try: context = xmlconfig.file(filename, package=package, context=context) except IOError as e: # Did we pass in a test module (__name__) and there is no # configuration in that package? In that case, we want to # configure the parent package for sure module_path = getattr(package, '__file__', '') if (module_path and 'tests' in module_path and os.path.join(os.path.dirname(module_path), filename) == e.filename): parent_package_name = '.'.join(package.__name__.split('.')[:-2]) package = dottedname.resolve(parent_package_name) context = xmlconfig.file(filename, package=package, context=context) else: # pragma: no cover raise return context
def unmarshal(self, mkey, func_name, marshalled_args, marshalled_kwargs): """Does the reverse operation of ``marshal`` """ Marshaller = resolve(mkey) marshaller = Marshaller() func = resolve(func_name) args, kwargs = marshaller.unmarshal( *marshalled_args, **marshalled_kwargs ) return (func, args, kwargs)
def __init__(self, logger, data): self.input_preparation_list = [] for plugin_data in data['application_configuration']['pre_processing']['plugin_list']: self.input_preparation_list.append(resolve(plugin_data['_class'])(logger, plugin_data)) self.output_preparation_list = [] for plugin_data in data['application_configuration']['post_processing']['plugin_list']: self.output_preparation_list.append(resolve(plugin_data['_class'])(logger, plugin_data)) plugin_data = data['application_configuration']['processing_plugin'] self.execution_plugin = resolve(plugin_data['_class'])(logger, plugin_data)
def __init__(self, logger, data): self.input_preparation_list = [] for plugin_data in data["application_configuration"]["pre_processing"]["plugin_list"]: self.input_preparation_list.append(resolve(plugin_data["_class"])(logger, plugin_data)) self.output_preparation_list = [] for plugin_data in data["application_configuration"]["post_processing"]["plugin_list"]: self.output_preparation_list.append(resolve(plugin_data["_class"])(logger, plugin_data)) plugin_data = data["application_configuration"]["processing_plugin"] self.execution_plugin = resolve(plugin_data["_class"])(logger, plugin_data)
def is_operation_valid(self, registry, operation): # Check that the operation exists. op_info = registry.get(operation) if op_info is None: logger.error("Operation %r is not defined.", operation) return False op_function_name = op_info.get("operation") try: resolve(op_function_name) except ImportError: logger.error("ImportError for operation %r: %s", operation, op_function_name) return False return True
def sign(self, schema, usedottedname=False): """ sign the object with the signature of the schema used on it """ if usedottedname: if schema.__module__ != '.'.join((PKGNAME, 'schema.generated')): if isdottedname(schema.__identifier__): resolve(schema.__identifier__) # ensure it can be imported self.signature = schema.__identifier__ return saver = queryUtility(ISchemaSaver) #persist serialization of schema, get signature self.signature = saver.add(schema)
def resolve_plugin_class(self, plugin_name): try: plugin_class = dottedname.resolve(plugin_name) except ImportError as e: full_plugin_name = 'replay.plugins.' + plugin_name try: plugin_class = dottedname.resolve(full_plugin_name) except ImportError: raise e if not (inspect.isclass(plugin_class) and issubclass(plugin_class, plugins.Plugin)): raise ValueError('{} is not a Plugin'.format(plugin_name)) return plugin_class
def mangleAbsoluteFilename(filename): """ Mangle an absolute filename when the file happens to be in a package. The mangled name will then be of the form:: <dotted package name>/<base filename>. For example, let's take Five's configure.zcml as an example. We assemble it in an OS-independent way so this test works on all platforms: >>> def filesystemPath(*elements): ... return sep.join(elements) We see that the filename is now mangled: >>> mangleAbsoluteFilename(filesystemPath( ... '', 'path', 'to', 'Products', 'Five', 'configure.zcml')) 'Products.Five/configure.zcml' The name of a file that's not in a package is returned unchanged: >>> not_in_a_package = filesystemPath('', 'path', 'to', 'configure.zcml') >>> mangleAbsoluteFilename(not_in_a_package) == not_in_a_package True """ if not isabs(filename): raise ValueError("Can only determine package for absolute filenames") dir, base = split(filename) pieces = dir.split(sep) if pieces[0] == '': pieces = pieces[1:] # get pieces that comes after the last folder whose name starts by a dot for index, piece in enumerate(reversed(pieces)): if piece.startswith('.'): pieces = pieces[-index:] break while pieces: try: resolve('.'.join(pieces)) break except (ImportError, ValueError): pieces = pieces[1:] if not pieces: return filename return '.'.join(pieces) + '/' + base
def upgrade_root_site_manager(tool): """2.0.x to 2.1.0 upgrade step handler """ logger = logging.getLogger('GenericSetup.upgrade') portal = aq_parent(aq_inner(tool)) try: setSite(portal) sm = portal.getSiteManager() if sm.utilities.LookupClass != FiveVerifyingAdapterLookup: sm.__parent__ = aq_base(portal) sm.utilities.LookupClass = FiveVerifyingAdapterLookup sm.utilities._createLookup() sm.utilities.__parent__ = sm logger.info('LookupClass replaced.') else: for tool_interface in _BAD_UTILITIES: try: iface = resolve(tool_interface) except ImportError: continue if sm.queryUtility(iface) is not None: sm.unregisterUtility(provided=iface) logger.info('Unregistered utility for %s' % tool_interface) for tool_id, tool_interface in _TOOL_UTILITIES: tool_obj = getToolByName(portal, tool_id, default=None) try: iface = resolve(tool_interface) except ImportError: continue if tool_obj is not None and sm.queryUtility(iface) is None: sm.registerUtility(tool_obj, iface) logger.info('Registered %s for interface %s' % ( tool_id, tool_interface)) return except ComponentLookupError: next = find_next_sitemanager(portal) if next is None: next = base name = '/'.join(portal.getPhysicalPath()) sm = PersistentComponents(name, (next,)) sm.__parent__ = aq_base(portal) portal.setSiteManager(sm) logger.info("Site manager '%s' added." % name) getMultiAdapter((sm, SetupEnviron()), IBody).body = _COMPONENTS_XML logger.info('Utility registrations added.')
def run(self, quiet=False): from nefertari.elasticsearch import ES ES.setup(self.settings) models_paths = split_strip(self.options.models) for path in models_paths: model = resolve(path) model_name = path.split('.')[-1] params = self.options.params or '' params = dict([ [k, v[0]] for k, v in urlparse.parse_qs(params).items() ]) params.setdefault('_limit', params.get('_limit', 10000)) chunk_size = self.options.chunk or params['_limit'] es = ES(source=model_name, index_name=self.options.index) query_set = model.get_collection(**params) documents = to_dicts(query_set) if self.options.force: es.index(documents, chunk_size=chunk_size) else: es.index_missing(documents, chunk_size=chunk_size) return 0
def includableInfo(self, zcml_to_look_for, result, seen=None, exclude=(), extras=()): if seen is None: seen = set() seen.add(self.context.project_name) self.includeReqs(self.context.requires(), zcml_to_look_for, result, seen, exclude) for dotted_name in self.dottedNames(): module = resolve(dotted_name) for candidate in zcml_to_look_for: candidate_path = os.path.join( os.path.dirname(module.__file__), candidate) if os.path.isfile(candidate_path): if dotted_name not in result[candidate]: result[candidate].append(dotted_name) for extra in extras: seen.add((self.context.project_name, extra)) try: reqs = self.context.requires(extras=(extra,)) except UnknownExtra: return if reqs: self.includeReqs(reqs, zcml_to_look_for, result, seen, exclude)
def load(file_path): doc = etree.fromstring(open(file_path).read()) module_name = os.path.splitext(os.path.basename(file_path))[0] #module = resolve(".%s" % module_name, BUNGENI_BASEPATH) #actions = getattr(module, "actions") actions = resolve("._actions", BUNGENI_BASEPATH) return _load(doc, module_name, actions)
def __get__(self, inst, cls=None): global generated if inst is None: return getObjectSpecification(cls) spec = getattr(inst, '__provides__', None) if spec is None: spec = implementedBy(cls) signature = getattr(inst, 'signature', None) if signature is None: return spec if not ismd5hex(signature): if not isdottedname(signature): return spec # not an md5 signature, so perhaps we have a dotted name try: iface = resolve(signature) if not IInterface.providedBy(iface): raise ValueError('Not interface: %s' % signature) return Implements(iface, spec) except ImportError: logger.warning('SignatureAwareDescriptor: ' 'unabled to resolve interface ' '%s by dotted name.') return spec iface_name = 'I%s' % signature dynamic = [getattr(generated, iface_name)] dynamic.append(spec) spec = Implements(*dynamic) return spec
def onDiscovery(self, theme, settings, dependenciesSettings): res = queryResourceDirectory(THEME_RESOURCE_NAME, theme) if res is None: return directoryName = 'overrides' if 'directory' in settings: directoryName = settings['directory'] if res.isDirectory(directoryName): layer = getattr(schemata, theme, None) if 'layer' in settings: layerName = settings['layer'] try: layer = resolve(layerName) except (ImportError, AttributeError,): logger.warn("Could not import %s" % layerName) return path = os.path.join(res.directory, directoryName) manager = z3c.jbot.metaconfigure.handler(path, layer) self.registered[theme] = manager
def __call__(self): if self.request.get('submitted'): dryrun = self.request.get('dryrun', False) == 'true' or False try: iface = resolve(self.request.get('dottedname')) except ImportError: # can't find, let's create it and maybe we can still fix it.. module, name = self.request.get('dottedname').rsplit('.', 1) _, iface = classfactory.create_module(module, name) self.request.response.write('Removing %s\n' % \ self.request.get('dottedname')) obj = self.context if iface.providedBy(obj): if not dryrun: noLongerProvides(obj, iface) self.request.response.write('Removed from %s\n' % \ self.obj_path(obj)) if not dryrun: undoom_transaction() self.check_folder(obj, iface, dryrun) self.request.response.write('done.') else: return super(RemoveInterfaces, self).__call__()
def domain_container( self ): if self._container_class: return self._container_class container_class= resolve( self.container ) self._container_class = type( "ManagedContainer", ( _ManagedContainer, container_class), dict( container_class.__dict__) ) protectLikeUnto( self._container_class, container_class ) return self._container_class
def resolveDottedName(dottedName): """Resolve a dotted name to a real object """ global _dottedCache if dottedName not in _dottedCache: _dottedCache[dottedName] = resolve(dottedName) return _dottedCache[dottedName]
def resolve(self, id): class_path, oid = id.split('-', 1) oid, lang = oid.split(":", 1) domain_class = resolve.resolve(class_path) session = Session() value_key = container.valueKey(oid) return session.query(domain_class).get(value_key)
def __call__(self): context = aq_inner(self.context) fieldname = self.request.get('fieldname') portal_type = self.request.get('portal_type') fti = zope.component.getUtility(IDexterityFTI, name=portal_type) schema = fti.lookupSchema() field = schema.get(fieldname) if field is None: for behaviorname in fti.behaviors: behavior = resolve(behaviorname) field = behavior.get(fieldname.split('.')[1]) if field is not None: break if IList.providedBy(field) or ISet.providedBy(field): vname = field.value_type.vocabularyName else: vname = field.vocabularyName factory = zope.component.getUtility(IVocabularyFactory, vname) tree = factory(context) leafsOnly = getattr(tree, 'leafsOnly', True) tree = dict2dynatree( self.context, tree, [], # selected not needed here, this is done at js side leafsOnly, False ) return json.dumps(tree)
def delete_utility(self): if not self.request.get('submit', self.request.get('cancel')): return self.confirm_template( msg="Are you sure you want to delete %s" % \ self.request.get('util_dottedname'), action='/delete-persistent-utility', params=self.request.form.items() ) elif self.request.get('cancel') == 'No': return self.request.response.redirect( self.context.absolute_url() + '/@@fix-persistent-utilities') undoom_transaction() utilities = self.utilities() _type = self.request.get('type') utility_registrations = utilities[_type] util_dottedname = self.request.get('util_dottedname') sm = self.sitemanager() klass = resolve(self.request.get('util_dottedname')) if not self.deletable(klass): raise Exception("I'm not going to allow you to delete that!") for x in utility_registrations.keys(): if x.__module__ == klass.__module__ and x == klass: del utility_registrations[x] break if util_dottedname in sm: sm._delObject(util_dottedname, suppress_events=True) self.set_utilities(utility_registrations, _type) self.request.response.redirect( self.context.absolute_url() + '/@@fix-persistent-utilities')
def object_provides(obj, iname): """ implement plone_interface_info as plone.app.async does not pass a request and calling restrictedTraverse will end up in error """ iface = resolve(iname) return iface.providedBy(aq_base(obj))
async def __call__(self): if self.key is _marker: # No option to write the root of registry return ErrorResponse('InvalidRequest', 'Needs the registry key') data = await self.request.json() if 'value' in data: value = data['value'] else: value = data assert '.' in self.key, 'Registry key must be dotted.iface.name.fieldname' # noqa iface, name = self.key.rsplit('.', 1) iface = resolve(iface) field = iface[name] try: new_value = getMultiAdapter((value, field), IJSONToValue) except ComponentLookupError: return ErrorResponse( 'DeserializationError', 'Cannot deserialize type {}'.format(str(self.field)), status=501) try: self.request.site_settings[self.key] = new_value except DeserializationError as e: return ErrorResponse( 'DeserializationError', str(e), exc=e, status=400) return Response(response={}, status=204)
def writeCoreJetReports(self, source, directory=None, filename='corejet.xml'): # corejet.robot registers CoreJet-adapters for Robot Framework tests # XXX: there should be a more dynamic way to configure plugin adapters try: import corejet.robot except ImportError: pass try: sourceType, sourceOptions = source.split(',', 1) except ValueError: # need more than 1 value to unpack sourceType = source.strip() sourceOptions = '' # Prepare output directory if directory is None: workingDir = os.getcwd() directory = os.path.join(workingDir, 'corejet') print "Writing CoreJet report to %s" % directory functionName = None for ep in pkg_resources.iter_entry_points('corejet.repositorysource'): if ep.name == sourceType and len(ep.attrs) > 0: functionName = "%s.%s" % ( ep.module_name, ep.attrs[0], ) break if not functionName: raise ValueError("Unknown CoreJet source type %s" % sourceType) sourceFunction = resolve(functionName) catalogue = sourceFunction(sourceOptions) # Set test time catalogue.testTime = datetime.datetime.now() # Find everything we've done so far testedStories = {} # story name -> {scenario name -> (scenario, info)} for suiteInfo in self._testSuites.values(): for caseInfo in suiteInfo.testCases: # look up the story for the test through adaptation: # - for @story-decorated test, the class implements IStory # - for others, the test case may have an adapter for IStory story = IStory(caseInfo.test, IStory(caseInfo.test.__class__, None)) if not story: continue scenarios = testedStories.setdefault( story.name.strip().lower(), {}) # XXX: Relying on _testMethodName here is not very good scenario = getattr(story, caseInfo.test._testMethodName).scenario scenarios[scenario.name.strip().lower()] = ( scenario, caseInfo, ) # Allocate a status to each scenario for epic in catalogue.epics: for story in epic.stories: testedStory = testedStories.get(story.name.strip().lower(), {}) for scenario in story.scenarios: scenario.status = "pending" testedScenario, info = testedStory.get( scenario.name.strip().lower(), ( None, None, )) # Check for pass/fail if info is not None: if info.failure or info.error: scenario.status = "fail" else: scenario.status = "pass" # Init 'global' steps when they are missing setattr(story, "givens", getattr(story, "givens", [])) setattr(story, "thens", getattr(story, "thens", [])) setattr(story, "givens", getattr(story, "givens", [])) # Check for mismatch if (len(story.givens + scenario.givens) != len( testedScenario.givens) or len(story.whens + scenario.whens) != len( testedScenario.whens) or len(story.thens + scenario.thens) != len( testedScenario.thens)): scenario.status = "mismatch" if scenario.status != "mismatch": for left, right in zip( story.givens + scenario.givens, testedScenario.givens): if left.text.strip().lower( ) != right.text.strip().lower(): scenario.status = "mismatch" break if scenario.status != "mismatch": for left, right in zip( story.whens + scenario.whens, testedScenario.whens): if left.text.strip().lower( ) != right.text.strip().lower(): scenario.status = "mismatch" break if scenario.status != "mismatch": for left, right in zip( story.thens + scenario.thens, testedScenario.thens): if left.text.strip().lower( ) != right.text.strip().lower(): scenario.status = "mismatch" break # TODO: We don't handle superfluous tests yet if os.path.exists(directory): shutil.rmtree(directory) os.mkdir(directory) # Write CoreJet file with open(os.path.join(directory, filename), 'w') as output: catalogue.write(output) # Generate HTML report generateReportFromCatalogue(catalogue, directory)
class UIEditOne2OneFactory(UIDisplayOne2OneFactory): name = 'edit' viewlet_name_template = "%s%sEdit" base_viewlet = relation.One2OneEdit viewlet_manager = resolve('alchemist.ui.interfaces.IContentEditManager')
def fromUnicode(self, value): iface = resolve(value) self.context.validate(iface) return iface
def _process_feature_layers( feature_layers, coord, post_process_data, formats, unpadded_bounds, scale, buffer_cfg): # the nominal zoom is the "display scale" zoom, which may not correspond # to actual tile coordinates in future versions of the code. it just # becomes a measure of the scale between tile features and intended # display size. nominal_zoom = coord.zoom processed_feature_layers = [] # filter, and then transform each layer as necessary for feature_layer in feature_layers: layer_datum = feature_layer['layer_datum'] layer_name = layer_datum['name'] features = feature_layer['features'] transform_fn_names = layer_datum['transform_fn_names'] if transform_fn_names: transform_fns = resolve_transform_fns(transform_fn_names) layer_transform_fn = make_transform_fn(transform_fns) else: layer_transform_fn = None # perform any specific layer transformations if layer_transform_fn is None: processed_features = features else: processed_features = [] for feature in features: shape, props, feature_id = feature shape, props, feature_id = layer_transform_fn( shape, props, feature_id, nominal_zoom) transformed_feature = shape, props, feature_id processed_features.append(transformed_feature) sort_fn_name = layer_datum['sort_fn_name'] if sort_fn_name: sort_fn = resolve(sort_fn_name) processed_features = sort_fn(processed_features, nominal_zoom) feature_layer = dict( name=layer_name, features=processed_features, layer_datum=layer_datum, padded_bounds=feature_layer['padded_bounds'], ) processed_feature_layers.append(feature_layer) # post-process data here, before it gets formatted processed_feature_layers = _postprocess_data( processed_feature_layers, post_process_data, nominal_zoom, unpadded_bounds) meters_per_pixel_dim = calc_meters_per_pixel_dim(nominal_zoom) # topojson formatter expects bounds to be in lnglat unpadded_bounds_lnglat = ( mercator_point_to_lnglat(unpadded_bounds[0], unpadded_bounds[1]) + mercator_point_to_lnglat(unpadded_bounds[2], unpadded_bounds[3])) # now, perform the format specific transformations # and format the tile itself formatted_tiles = [] layer = 'all' for format in formats: formatted_tile = _create_formatted_tile( processed_feature_layers, format, scale, unpadded_bounds, unpadded_bounds_lnglat, coord, layer, meters_per_pixel_dim, buffer_cfg) formatted_tiles.append(formatted_tile) return formatted_tiles
def get_workflow_condition(self, condition): conds_module = resolve("._conditions", "bungeni_custom.workflows") return getattr(conds_module, condition) # raises AttributeError
def resolve_or_get(potential_dotted_name): if isinstance(potential_dotted_name, str): return resolve(potential_dotted_name) return potential_dotted_name
def resolve(self, id): class_path, oid = id.split('-', 1) domain_class = resolve.resolve(class_path) session = Session() value_key = container.valueKey(oid) return session.query(domain_class).get(value_key)
def _parse(source, policy): tree = etree.parse(source) root = tree.getroot() parseinfo.i18n_domain = root.attrib.get(ns('domain', prefix=I18N_NAMESPACE)) model = Model() handlers = {} schema_metadata_handlers = tuple(getUtilitiesFor(ISchemaMetadataHandler)) field_metadata_handlers = tuple(getUtilitiesFor(IFieldMetadataHandler)) policy_util = getUtility(ISchemaPolicy, name=policy) def readField(fieldElement, schemaAttributes, fieldElements, baseFields): # Parse field attributes fieldName = fieldElement.get('name') fieldType = fieldElement.get('type') if fieldName is None or fieldType is None: raise ValueError( 'The attributes \'name\' and \'type\' are required for each ' '<field /> element') handler = handlers.get(fieldType, None) if handler is None: handler = handlers[fieldType] = queryUtility( IFieldExportImportHandler, name=fieldType) if handler is None: raise ValueError( 'Field type {0} specified for field {1} is not ' 'supported'.format(fieldType, fieldName)) field = handler.read(fieldElement) # Preserve order from base interfaces if this field is an override # of a field with the same name in a base interface base_field = baseFields.get(fieldName, None) if base_field is not None: field.order = base_field.order # Save for the schema schemaAttributes[fieldName] = field fieldElements[fieldName] = fieldElement return fieldName for schema_element in root.findall(ns('schema')): parseinfo.stack.append(schema_element) schemaAttributes = {} schemaName = schema_element.get('name') if schemaName is None: schemaName = u"" bases = () baseFields = {} based_on = schema_element.get('based-on') if based_on is not None: bases = tuple([resolve(dotted) for dotted in based_on.split()]) for base_schema in bases: baseFields.update(getFields(base_schema)) fieldElements = {} # Read global fields for fieldElement in schema_element.findall(ns('field')): parseinfo.stack.append(fieldElement) readField(fieldElement, schemaAttributes, fieldElements, baseFields) parseinfo.stack.pop() # Read invariants, fieldsets and their fields invariants = [] fieldsets = [] fieldsets_by_name = {} for subelement in schema_element: parseinfo.stack.append(subelement) if subelement.tag == ns('field'): readField(subelement, schemaAttributes, fieldElements, baseFields) elif subelement.tag == ns('fieldset'): fieldset_name = subelement.get('name') if fieldset_name is None: raise ValueError( u'Fieldset in schema {0} has no name'.format( schemaName)) fieldset = fieldsets_by_name.get(fieldset_name, None) if fieldset is None: fieldset_label = subelement.get('label') fieldset_description = subelement.get('description') fieldset_order = subelement.get('order') if fieldset_order is None: fieldset_order = DEFAULT_ORDER elif isinstance(fieldset_order, six.string_types): fieldset_order = int(fieldset_order) fieldset = fieldsets_by_name[fieldset_name] = Fieldset( fieldset_name, label=fieldset_label, description=fieldset_description, order=fieldset_order, ) fieldsets_by_name[fieldset_name] = fieldset fieldsets.append(fieldset) for fieldElement in subelement.findall(ns('field')): parseinfo.stack.append(fieldElement) parsed_fieldName = readField(fieldElement, schemaAttributes, fieldElements, baseFields) if parsed_fieldName: fieldset.fields.append(parsed_fieldName) parseinfo.stack.pop() elif subelement.tag == ns('invariant'): dotted = subelement.text invariant = resolve(dotted) if not IInvariant.providedBy(invariant): raise ImportError( u'Invariant functions must provide ' u'plone.supermodel.interfaces.IInvariant') invariants.append(invariant) parseinfo.stack.pop() schema = SchemaClass(name=policy_util.name(schemaName, tree), bases=bases + policy_util.bases(schemaName, tree) + (Schema, ), __module__=policy_util.module(schemaName, tree), attrs=schemaAttributes) # add invariants to schema as tagged values if invariants: schema_invariants = schema.queryTaggedValue('invariants', []) schema.setTaggedValue('invariants', schema_invariants + invariants) # Save fieldsets schema.setTaggedValue(FIELDSETS_KEY, fieldsets) # Let metadata handlers write metadata for handler_name, metadata_handler in field_metadata_handlers: for fieldName in schema: if fieldName in fieldElements: metadata_handler.read(fieldElements[fieldName], schema, schema[fieldName]) for handler_name, metadata_handler in schema_metadata_handlers: metadata_handler.read(schema_element, schema) model.schemata[schemaName] = schema parseinfo.stack.pop() parseinfo.i18n_domain = None return model
def test_import_interfaces(self): dottedname.resolve('nti.ntiids.interfaces')
def _callFUT(self, *args, **kw): from zope.dottedname.resolve import resolve return resolve(*args, **kw)
def __call__(self): klass = resolve(self.klass) session = Session() query = session.query(klass) return query.get(self.primary_key)
def getFilePath(self): ob = resolve(self.path) return os.path.dirname(ob.__file__) + '/'
def get_form_derived(self, derived): derived_module = resolve("._derived", "bungeni_custom.forms") derived_def = getattr(derived_module, derived) # raises AttributeError return wrapped_callable(derived_def)
def fromUnicode(self, value): obj = resolve(value) self.context.validate(obj) return obj
def __init__(self, dotted_path): self.path = dotted_path self._module = resolve(self.path) # In apidoc packages are handled like modules... self.apidoc = Module(None, None, self._module, True)
def _load(workflow): transitions = [] states = [] domain = workflow.get('domain') for s in workflow.iterchildren('state'): permissions = [] for g in s.iterchildren('grant'): permissions.append((GRANT, g.get('permission'), g.get('role'))) for d in s.iterchildren('deny'): permissions.append((DENY, d.get('permission'), d.get('role'))) state_id = s.get('id') states.append( State(state_id, Message(s.get('title', domain)), permissions)) for t in workflow.iterchildren('transition'): for key in ('source', 'destination', 'id', 'title'): if t.get(key) is None: raise SyntaxError("%s not in %s" % (key, etree.tostring(t))) # XXX: There must be a better way to do this: source = t.get('source') while ' ' in source: source = source.replace(' ', ' ') sources = [s or None for s in source.split(' ')] for source in sources: if len(sources) > 1: tid = "%s-%s" % (t.get('id'), source) else: tid = t.get('id') args = (tid, Message(t.get('title'), domain), source, t.get('destination')) kw = {} # optionals for i in ('trigger', 'order', 'permission'): val = t.get(i) if not val: continue kw[i] = val require_confirmation = getattr(t, 'require_confirmation', '') if require_confirmation.lower() == 'true': kw['require_confirmation'] = True if 'trigger' in kw: k = kw['trigger'] v = trigger_value_map[k] kw['trigger'] = v # optional python resolvables for i in ('condition', 'action', 'event'): val = t.get(i) if not val: continue # raises importerror/nameerror val = resolve(val, 'bungeni.core.workflows') kw[i] = val transitions.append(StateTransition(*args, **kw)) return StateWorkflow(transitions, states)
def getFilePath(self): ob = resolve(self.path) filename = ob.__file__ if filename.endswith('o') or filename.endswith('c'): filename = filename[:-1] return filename
def get_form_validator(self, validation): validator_module = resolve("._validations", "bungeni_custom.forms") validator = getattr(validator_module, validation) # raises AttributeError return wrapped_callable(validator)
def get_form_constraint(self, constraint): constraint_module = resolve("._constraints", "bungeni_custom.forms") constraint = getattr(constraint_module, constraint) # raises AttributeError return wrapped_callable(constraint)
def onDiscovery(self, theme, settings, dependenciesSettings): res = queryResourceDirectory(THEME_RESOURCE_NAME, theme) if res is None: return directoryName = 'views' if 'directory' in settings: directoryName = settings['directory'] if res.isDirectory(directoryName): viewsDir = res[directoryName] layer = getattr(schemata, theme, None) if 'layer' in settings: layerName = settings['layer'] try: layer = resolve(layerName) except ( ImportError, AttributeError, ): logger.warn("Could not import %s" % layerName) return viewConfig = SafeConfigParser() if viewsDir.isFile(VIEW_CONFIG_FILENAME): fp = viewsDir.openFile(VIEW_CONFIG_FILENAME) try: viewConfig.readfp(fp) finally: try: fp.close() except AttributeError: pass views = [] configurationMachine = ConfigurationMachine() path = viewsDir.directory for filename in os.listdir(path): if not filename.lower().endswith(EXTENSION): continue name = viewName = filename[:-3] permission = 'zope2.View' for_ = Interface class_ = None template = os.path.join(path, filename) menu = {} # Read override options from views.cfg if applicable if viewConfig.has_section(name): if viewConfig.has_option(name, 'name'): viewName = viewConfig.get(name, 'name') if viewConfig.has_option(name, 'permission'): permission = viewConfig.get(name, 'permission') if viewConfig.has_option(name, 'for'): forStr = viewConfig.get(name, 'for') if forStr != "*": for_ = resolve(forStr) if viewConfig.has_option(name, 'class'): class_ = resolve(viewConfig.get(name, 'class')) if viewConfig.has_option(name, 'menu'): menu = dict( title=viewConfig.get(name, 'menu'), menu=getattr(zope.browsermenu.metaconfigure.menus, "plone_displayviews"), ) Products.Five.browser.metaconfigure.page(configurationMachine, name=viewName, permission=permission, for_=for_, layer=layer, template=template, class_=class_, **menu) views.append(name) if len(views) > 0: configurationMachine.execute_actions() self.registered[theme] = views
def get_workflow_condition(self, condition): condition_module = resolve("._conditions", "bungeni_custom.workflows") condition = getattr(condition_module, condition) # raises AttributeError return wrapped_callable(condition)
'Products.PloneFormGen.interfaces.IPloneFormGenField'), ('Products.PloneFormGen', 'Products.PloneFormGen.interfaces.IPloneFormGenFieldset'), ('Products.PloneFormGen', 'Products.PloneFormGen.interfaces.IPloneFormGenActionAdapter'), ('Products.PloneFormGen', 'Products.PloneFormGen.interfaces.IPloneFormGenThanksPage'), ] BELONGS_TO_PARENT_INTERFACES = [] for pkg_name, dottedname in BELONGS_TO_PARENT_CANDIDATES: try: pkg_resources.get_distribution(pkg_name) except pkg_resources.DistributionNotFound: continue BELONGS_TO_PARENT_INTERFACES.append(resolve(dottedname)) def belongs_to_parent(context): """This method returns True when the object is considered belonging to its parent. An object is considered belonging to its parent when it provides one of the known interfaces and does not have a workflow configured. The parent object is not checked; this is the job of the caller. """ wftool = getToolByName(context, 'portal_workflow') if not filter(lambda iface, c=context: iface.providedBy(c), BELONGS_TO_PARENT_INTERFACES): # The object does not provide any of the configured interfaces. return False if wftool.getWorkflowsFor(context):
from lxml import etree from zope.dottedname.resolve import resolve from zope.i18nmessageid import Message from bungeni.core.workflow import interfaces from bungeni.core.workflow.states import GRANT, DENY from bungeni.core.workflow.states import Feature, State, Transition, Workflow from bungeni.core.workflow.notification import Notification from bungeni.utils.capi import capi, bungeni_custom_errors from bungeni.ui.utils import debug # ASSIGNMENTS = (GRANT, DENY) ACTIONS_MODULE = resolve("._actions", "bungeni.core.workflows") trigger_value_map = { "manual": interfaces.MANUAL, "automatic": interfaces.AUTOMATIC, "system": interfaces.SYSTEM } # only letters, numbers and "_" char i.e. no whitespace or "-" ID_RE = re.compile("^[\w\d_]+$") FEATURE_ATTRS = ("name", "enabled", "note") STATE_ATTRS = ("id", "title", "version", "like_state", "note", "permissions_from_parent", "obsolete")
def provide_storagetype_interfaces(self, instance, storage_types): """Assign any selected storage type interfaces to the storage. """ for storage_type in storage_types: inter = resolve(storage_type) alsoProvides(instance, inter)
def get_workflow_action(self, action): action_module = resolve("._actions", "bungeni_custom.workflows") action = getattr(action_module, action) # raises AttributeError return wrapped_callable(action)
def importRecords(self, node): # May raise ImportError if interface can't be found or KeyError if # attribute is missing. interfaceName = node.attrib.get('interface', None) if interfaceName is None: raise KeyError( u"A <records /> node must have an 'interface' attribute.") prefix = node.attrib.get( 'prefix', None) # None means use interface.__identifier__ if node.attrib.get('delete') is not None: self.logger.warning(u"The 'delete' attribute of <record /> nodes " u"is deprecated, it should be replaced with " u"'remove'.") remove = node.attrib.get('remove', node.attrib.get('delete', 'false')).lower() == 'true' # May raise ImportError interface = resolve(interfaceName) omit = [] values = [ ] # Fields that should have their value set as they don't exist yet for child in node: if not isinstance(child.tag, str): continue elif child.tag.lower() == 'omit': if child.text: omit.append(unicode(child.text)) elif child.tag.lower() == 'value': values.append(child) if remove and values: raise ValueError( "A <records /> node with 'remove=\"true\"' must not contain " "<value /> nodes.") elif remove: for f in getFieldNames(interface): if f in omit: continue child = etree.Element('value', key=f, purge='True') values.append(child) # May raise TypeError self.context.registerInterface(interface, omit=tuple(omit), prefix=prefix) if not values and not remove: # Skip out if there are no value records to handle return # The prefix we ended up needs to be found if prefix is None: prefix = interface.__identifier__ for value in values: field = etree.Element("record", interface=interface.__identifier__, field=value.attrib["key"], prefix=prefix, remove=repr(remove).lower()) field.append(value) self.importRecord(field)
def process_coord_no_format(feature_layers, nominal_zoom, unpadded_bounds, post_process_data, output_calc_mapping): extra_data = dict(size={}) processed_feature_layers = [] # filter, and then transform each layer as necessary for feature_layer in feature_layers: layer_datum = feature_layer['layer_datum'] layer_name = layer_datum['name'] geometry_types = layer_datum['geometry_types'] padded_bounds = feature_layer['padded_bounds'] transform_fn_names = layer_datum['transform_fn_names'] if transform_fn_names: transform_fns = resolve_transform_fns(transform_fn_names) layer_transform_fn = make_transform_fn(transform_fns) else: layer_transform_fn = None layer_output_calc = output_calc_mapping.get(layer_name) assert layer_output_calc, 'output_calc_mapping missing layer: %s' % \ layer_name features = [] features_size = 0 for row in feature_layer['features']: wkb = row.pop('__geometry__') shape = loads(wkb) if shape.is_empty: continue if not shape.is_valid: continue if geometry_types is not None: if shape.type not in geometry_types: continue # since a bounding box intersection is used, we # perform a more accurate check here to filter out # any extra features # the formatter specific transformations will take # care of any additional filtering geom_type_bounds = padded_bounds[normalize_geometry_type( shape.type)] shape_padded_bounds = geometry.box(*geom_type_bounds) if not shape_padded_bounds.intersects(shape): continue feature_id = row.pop('__id__') props = {} feature_size = getsizeof(feature_id) + len(wkb) label = row.pop('__label__', None) if label: # TODO probably formalize as part of the feature props['mz_label_placement'] = label feature_size += len('__label__') + _sizeof(label) # first ensure that all strings are utf-8 encoded # it would be better for it all to be unicode instead, but # some downstream transforms / formatters might be # expecting utf-8 row = utils.encode_utf8(row) query_props = row.pop('__properties__') feature_size += len('__properties__') + _sizeof(query_props) # TODO: # Right now this is hacked to map the particular source, # which all relevant queries include, back to another # metadata property # The reason for this is to support the same yaml syntax # for python output calculation and sql min zoom function # generation. # This is done in python here to avoid having to update # all the queries in the jinja file with redundant # information. meta = meta_for_properties(query_props) # set the "tags" key # some transforms expect to be able to read it from this location # longer term, we might want to separate the notion of # "input" and "output" properties as a part of the feature props['tags'] = query_props output_props = layer_output_calc(shape, query_props, feature_id, meta) assert output_props, 'No output calc rule matched' # a feature can belong to more than one layer # this check ensures that it only appears in the # layers it should # NOTE: the min zoom can be calculated by the yaml, so # this check must happen after that min_zoom = output_props.get('min_zoom') assert min_zoom is not None, \ 'Missing min_zoom in layer %s' % layer_name # TODO would be better if 16 wasn't hard coded here if nominal_zoom < 16 and min_zoom >= nominal_zoom + 1: continue for k, v in output_props.items(): if v is not None: props[k] = v if layer_transform_fn: shape, props, feature_id = layer_transform_fn( shape, props, feature_id, nominal_zoom) feature = shape, props, feature_id features.append(feature) features_size += feature_size extra_data['size'][layer_datum['name']] = features_size sort_fn_name = layer_datum['sort_fn_name'] if sort_fn_name: sort_fn = resolve(sort_fn_name) features = sort_fn(features, nominal_zoom) feature_layer = dict( name=layer_name, features=features, layer_datum=layer_datum, padded_bounds=padded_bounds, ) processed_feature_layers.append(feature_layer) # post-process data here, before it gets formatted processed_feature_layers = _postprocess_data(processed_feature_layers, post_process_data, nominal_zoom, unpadded_bounds) return processed_feature_layers, extra_data
def importRecord(self, node): name = node.get('name', '') if node.get('delete') is not None: self.logger.warning(u"The 'delete' attribute of <record /> nodes " u"is deprecated, it should be replaced with " u"'remove'.") remove = node.get('remove', node.get('delete', 'false')) interfaceName = node.get('interface', None) fieldName = node.get('field', None) if not name and (interfaceName and fieldName): prefix = node.get('prefix', None) if prefix is None: prefix = interfaceName name = "%s.%s" % (prefix, fieldName) if not name: raise NameError("No name given for <record /> node!") # Unicode is not supported name = str(name) # Handle deletion and quit if remove.lower() == 'true': if name in self.context.records: del self.context.records[name] self.logger.info("Removed record %s." % name) else: self.logger.warning( "Record %s was marked for deletion, but was not found." % name) return # See if we have an existing record existing_record = self.context.records.get(name, None) interface = None field = None value = _marker value_purge = True # If we are given an interface and field name, try to resolve them if interfaceName and fieldName: try: interface = resolve(interfaceName) field = IPersistentField(interface[fieldName]) except ImportError: self.logger.warning("Failed to import interface %s for \ record %s" % (interfaceName, name)) interface = None field = None except KeyError: self.logger.warning("Interface %s specified for record %s has \ no field %s." % (interfaceName, name, fieldName)) interface = None field = None except TypeError: self.logger.warning("Field %s in interface %s specified for \ record %s cannot be used as a persistent field." % (fieldName, interfaceName, name)) interface = None field = None # Find field and value nodes field_node = None value_node = None for child in node: if not isinstance(child.tag, str): continue elif child.tag.lower() == 'field': field_node = child elif child.tag.lower() == 'value': value_node = child # Let field not potentially override interface[fieldName] if field_node is not None: field_ref = field_node.attrib.get('ref', None) if field_ref is not None: # We have a field reference if field_ref not in self.context: raise KeyError( u"Record %s references field for record %s, \ which does not exist" % (name, field_ref)) ref_record = self.context.records[field_ref] field = FieldRef(field_ref, ref_record.field) else: # We have a standard field field_type = field_node.attrib.get('type', None) field_type_handler = queryUtility(IFieldExportImportHandler, name=field_type) if field_type_handler is None: raise TypeError( "Field of type %s used for record %s is not supported." % (field_type, name)) else: field = field_type_handler.read(field_node) if not IPersistentField.providedBy(field): raise TypeError( "Only persistent fields may be imported. \ %s used for record %s is invalid." % (field_type, name)) if field is not None and not IFieldRef.providedBy(field): # Set interface name and fieldName, if applicable field.interfaceName = interfaceName field.fieldName = fieldName # Fall back to existing record if neither a field node nor the # interface yielded a field change_field = True if field is None and existing_record is not None: change_field = False field = existing_record.field if field is None: raise ValueError("Cannot find a field for the record %s. Add a \ <field /> element or reference an interface and field name." % name) # Extract the value if value_node is not None: value_purge = value_node.attrib.get('purge', '').lower() != 'false' value = elementToValue(field, value_node, default=_marker) # Now either construct or update the record if value is _marker: value = field.default value_purge = True if existing_record is not None: if change_field: existing_record.field = field existing_value = existing_record.value if change_field or value != existing_value: if not value_purge and type(value) == type(existing_value): if isinstance(value, list): value = existing_value + [ v for v in value if v not in existing_value ] elif isinstance(value, tuple): value = existing_value + tuple( [v for v in value if v not in existing_value]) elif isinstance(value, ( set, frozenset, )): value = existing_value.union(value) elif isinstance(value, dict): for key, value in value.items(): # check if value is list, if so, let's add # instead of overridding if type(value) == list: if key in existing_value and \ not shouldPurgeList(value_node, key): existing = existing_value[key] for item in existing: # here, we'll remove existing items # point is that we don't want duplicates # and don't want to reorder if item in value: value.remove(item) existing.extend(value) value = existing existing_value[key] = value value = existing_value existing_record.value = value else: self.context.records[name] = Record(field, value)
def _process_feature_layers(feature_layers, coord, post_process_data, formats, unpadded_bounds, scale, layers_to_format, buffer_cfg): processed_feature_layers = [] # filter, and then transform each layer as necessary for feature_layer in feature_layers: layer_datum = feature_layer['layer_datum'] layer_name = layer_datum['name'] features = feature_layer['features'] transform_fn_names = layer_datum['transform_fn_names'] if transform_fn_names: transform_fns = resolve_transform_fns(transform_fn_names) layer_transform_fn = make_transform_fn(transform_fns) else: layer_transform_fn = None # perform any specific layer transformations if layer_transform_fn is None: processed_features = features else: processed_features = [] for feature in features: shape, props, feature_id = feature shape, props, feature_id = layer_transform_fn( shape, props, feature_id, coord.zoom) transformed_feature = shape, props, feature_id processed_features.append(transformed_feature) sort_fn_name = layer_datum['sort_fn_name'] if sort_fn_name: sort_fn = resolve(sort_fn_name) processed_features = sort_fn(processed_features, coord.zoom) feature_layer = dict( name=layer_name, features=processed_features, layer_datum=layer_datum, padded_bounds=feature_layer['padded_bounds'], ) processed_feature_layers.append(feature_layer) # post-process data here, before it gets formatted processed_feature_layers = _postprocess_data(processed_feature_layers, post_process_data, coord, unpadded_bounds) meters_per_pixel_dim = calc_meters_per_pixel_dim(coord.zoom) # topojson formatter expects bounds to be in lnglat unpadded_bounds_lnglat = ( mercator_point_to_lnglat(unpadded_bounds[0], unpadded_bounds[1]) + mercator_point_to_lnglat(unpadded_bounds[2], unpadded_bounds[3])) # now, perform the format specific transformations # and format the tile itself formatted_tiles = [] layer = 'all' for format in formats: formatted_tile = _create_formatted_tile(processed_feature_layers, format, scale, unpadded_bounds, unpadded_bounds_lnglat, coord, layer, meters_per_pixel_dim, buffer_cfg) formatted_tiles.append(formatted_tile) # this assumes that we only store single layers, and no combinations for layer, formats, zoom_start, zoom_until in layers_to_format: if not (zoom_start <= coord.zoom <= zoom_until): continue for feature_layer in processed_feature_layers: if feature_layer['name'] == layer: pruned_feature_layers = [feature_layer] for format in formats: formatted_tile = _create_formatted_tile( pruned_feature_layers, format, scale, unpadded_bounds, unpadded_bounds_lnglat, coord, layer, meters_per_pixel_dim, buffer_cfg) formatted_tiles.append(formatted_tile) break return formatted_tiles
def retrieve_domain_model(type_key): """Infer and retrieve the target domain model class from the type key. Raise Attribute error if not defined on domain. """ return resolve("%s.%s" % (MODEL_MODULE.__name__, naming.model_name(type_key)))