def originatedObjects(self): if self.__originatedObjects is None: components = set() [ components.update(six.itervalues(_v)) for _v in six.itervalues(self.__categoryObjectMap) ] self.__originatedObjects = frozenset(components) return self.__originatedObjects
def __processSchema (self, generation_uid): global pyxb import pyxb.xmlschema print('PS %s' % (generation_uid,)) if self.__schema is not None: print('Already have schema') return self.__schema for t in self.types: for wc in t.wildcardElements(): if isinstance(wc, xml.dom.Node) and pyxb.namespace.XMLSchema.nodeIsNamed(wc, 'schema'): # Try to load component models for any namespace referenced by this. # Probably shouldn't need to do this except for imported ones. for ns in six.itervalues(self.namespaceContext().inScopeNamespaces()): try: ns.validateComponentModel() except Exception as e: print('Error validating component model for %s: %s' % (ns.uri(), e)) self.__schema = pyxb.xmlschema.schema.CreateFromDOM(wc, namespace_context=self.namespaceContext(), generation_uid=generation_uid) elif isinstance(wc, pyxb.xmlschema.schema): self.__schema = wc else: print('No match: %s %s' % (wc.namespaceURI, namespace.localName)) if self.__schema is not None: return self.__schema return None
def __finalizeReferences(self): tns = self.namespaceContext().targetNamespace() for m in six.itervalues(tns.messages()): for p in m.part: if (p.element is not None) and (p.elementReference is None): elt_en = p.element p._setElementReference(elt_en.elementDeclaration()) if (p.type is not None) and (p.typeReference is None): type_en = p.type p._setTypeReference(type_en.typeDefinition())
def __finalizeReferences (self): tns = self.namespaceContext().targetNamespace() for m in six.itervalues(tns.messages()): for p in m.part: if (p.element is not None) and (p.elementReference is None): elt_en = p.element p._setElementReference(elt_en.elementDeclaration()) if (p.type is not None) and (p.typeReference is None): type_en = p.type p._setTypeReference(type_en.typeDefinition())
def __processSchema(self, generation_uid): global pyxb import pyxb.xmlschema print('PS %s' % (generation_uid, )) if self.__schema is not None: print('Already have schema') return self.__schema for t in self.types: for wc in t.wildcardElements(): if isinstance( wc, xml.dom.Node) and pyxb.namespace.XMLSchema.nodeIsNamed( wc, 'schema'): # Try to load component models for any namespace referenced by this. # Probably shouldn't need to do this except for imported ones. for ns in six.itervalues( self.namespaceContext().inScopeNamespaces()): try: ns.validateComponentModel() except Exception as e: print( 'Error validating component model for %s: %s' % (ns.uri(), e)) self.__schema = pyxb.xmlschema.schema.CreateFromDOM( wc, namespace_context=self.namespaceContext(), generation_uid=generation_uid) elif isinstance(wc, pyxb.xmlschema.schema): self.__schema = wc else: print('No match: %s %s' % (wc.namespaceURI, namespace.localName)) if self.__schema is not None: return self.__schema return None
require_xsi_type=True) # Set the parameters that you want enabled. See # http://www.nws.noaa.gov/xml/docs/elementInputNames.php weather_params = ndfd.weatherParametersType(maxt=True, mint=True, temp=True, sky=True, pop12=True, rh=True, wx=True, appt=True) # The schema didn't say the other parameters are optional (even though # they are), so set them to false if not already initialized. for eu in six.itervalues(weather_params._ElementMap): if eu.value(weather_params) is None: eu.set(weather_params, False) # There is no schema element or type corresponding to the request # message; it's only in a WSDL message definition. We need to build # it manually. # Create a root element corresponding to the operation's input message root = bds.createChildElement('NDFDgen') # Create a map from the message part name to the value to use for that # part. request_values = { 'latitude': lat, 'longitude': lon,
def AvailableNamespaces (cls): """Return a set of all Namespace instances defined so far.""" return cls.__AbsentNamespaces.union(six.itervalues(cls.__Registry))
def _namedObjects (self): objects = set() for category_map in six.itervalues(self.__categoryMap): objects.update(six.itervalues(category_map)) return objects
def moduleRecords (self): return list(six.itervalues(self.__moduleRecordMap))
def testIterValues (self): vals = set() for e in six.itervalues(cards): vals.add(e) self.assertEqual(self.Expected, vals)
def origins(self): return list(six.itervalues(self.__originMap))
def moduleRecords(self): return list(six.itervalues(self.__moduleRecordMap))
def PreLoadArchives(cls, archive_path=None, reset=False): """Scan for available archives, associating them with namespaces. This only validates potential archive contents; it does not load namespace data from the archives. @keyword archive_path: A list of files or directories in which namespace archives can be found. The entries are separated by os.pathsep, which is a colon on POSIX platforms and a semi-colon on Windows. See L{PathEnvironmentVariable}. Defaults to L{GetArchivePath()}. If not defaulted, C{reset} will be forced to C{True}. For any directory in the path, all files ending with C{.wxs} are examined. @keyword reset: If C{False} (default), the most recently read set of archives is returned; if C{True}, the archive path is re-scanned and the namespace associations validated. """ from pyxb.namespace import builtin reset = reset or (archive_path is not None) or (cls.__NamespaceArchives is None) if reset: # Get a list of pre-existing archives, initializing the map if # this is the first time through. if cls.__NamespaceArchives is None: cls.__NamespaceArchives = {} existing_archives = set(six.itervalues(cls.__NamespaceArchives)) archive_set = set() # Ensure we have an archive path. If not, don't do anything. if archive_path is None: archive_path = GetArchivePath() if archive_path is not None: # Get archive instances for everything in the archive path candidate_files = pyxb.utils.utility.GetMatchingFiles( archive_path, cls.__ArchivePattern_re, default_path_wildcard='+', default_path=GetArchivePath(), prefix_pattern='&', prefix_substituend=DefaultArchivePrefix) for afn in candidate_files: try: nsa = cls.__GetArchiveInstance( afn, stage=cls._STAGE_readModules) archive_set.add(nsa) except pickle.UnpicklingError: _log.exception('Cannot unpickle archive %s', afn) except pyxb.NamespaceArchiveError: _log.exception('Cannot process archive %s', afn) # Do this for two reasons: first, to get an iterable that won't # cause problems when we remove unresolvable archives from # archive_set; and second to aid with forced dependency inversion # testing ordered_archives = sorted(list(archive_set), key=lambda _a: _a.archivePath()) ordered_archives.reverse() # Create a graph that identifies dependencies between the archives archive_map = {} for a in archive_set: archive_map[a.generationUID()] = a archive_graph = pyxb.utils.utility.Graph() for a in ordered_archives: prereqs = a._unsatisfiedModulePrerequisites() if 0 < len(prereqs): for p in prereqs: if builtin.BuiltInObjectUID == p: continue da = archive_map.get(p) if da is None: _log.warning( '%s depends on unavailable archive %s', a, p) archive_set.remove(a) else: archive_graph.addEdge(a, da) else: archive_graph.addRoot(a) # Verify that there are no dependency loops. archive_scc = archive_graph.sccOrder() for scc in archive_scc: if 1 < len(scc): raise pyxb.LogicError( "Cycle in archive dependencies. How'd you do that?\n " + "\n ".join([_a.archivePath() for _a in scc])) archive = scc[0] if not (archive in archive_set): archive.discard() existing_archives.remove(archive) continue #archive._readToStage(cls._STAGE_COMPLETE) # Discard any archives that we used to know about but now aren't # supposed to. @todo make this friendlier in the case of archives # we've already incorporated. for archive in existing_archives.difference(archive_set): _log.info('Discarding excluded archive %s', archive) archive.discard()
def originatedObjects (self): if self.__originatedObjects is None: components = set() [ components.update(six.itervalues(_v)) for _v in six.itervalues(self.__categoryObjectMap) ] self.__originatedObjects = frozenset(components) return self.__originatedObjects
def origins (self): return list(six.itervalues(self.__originMap))
uri_src = open('ndfdXML.wsdl') doc = domutils.StringToDOM(uri_src.read()) spec = wsdl.definitions.createFromDOM(doc.documentElement, process_schema=True) # Create a helper that will generate XML in the WSDL's namespace, # qualifying every element with xsi:type just like the service # expects. bds = domutils.BindingDOMSupport(default_namespace=spec.targetNamespace(), require_xsi_type=True) # Set the parameters that you want enabled. See # http://www.nws.noaa.gov/xml/docs/elementInputNames.php weather_params = ndfd.weatherParametersType(maxt=True, mint=True, temp=True, sky=True, pop12=True, rh=True, wx=True, appt=True) # The schema didn't say the other parameters are optional (even though # they are), so set them to false if not already initialized. for eu in six.itervalues(weather_params._ElementMap): if eu.value(weather_params) is None: eu.set(weather_params, False) # There is no schema element or type corresponding to the request # message; it's only in a WSDL message definition. We need to build # it manually. # Create a root element corresponding to the operation's input message root = bds.createChildElement('NDFDgen') # Create a map from the message part name to the value to use for that # part. request_values = { 'latitude' : lat , 'longitude' : lon , 'startTime' : today
def testIterValues(self): vals = set() for e in six.itervalues(cards): vals.add(e) self.assertEqual(self.Expected, vals)
def PreLoadArchives (cls, archive_path=None, reset=False): """Scan for available archives, associating them with namespaces. This only validates potential archive contents; it does not load namespace data from the archives. @keyword archive_path: A list of files or directories in which namespace archives can be found. The entries are separated by os.pathsep, which is a colon on POSIX platforms and a semi-colon on Windows. See L{PathEnvironmentVariable}. Defaults to L{GetArchivePath()}. If not defaulted, C{reset} will be forced to C{True}. For any directory in the path, all files ending with C{.wxs} are examined. @keyword reset: If C{False} (default), the most recently read set of archives is returned; if C{True}, the archive path is re-scanned and the namespace associations validated. """ from pyxb.namespace import builtin reset = reset or (archive_path is not None) or (cls.__NamespaceArchives is None) if reset: # Get a list of pre-existing archives, initializing the map if # this is the first time through. if cls.__NamespaceArchives is None: cls.__NamespaceArchives = { } existing_archives = set(six.itervalues(cls.__NamespaceArchives)) archive_set = set() # Ensure we have an archive path. If not, don't do anything. if archive_path is None: archive_path = GetArchivePath() if archive_path is not None: # Get archive instances for everything in the archive path candidate_files = pyxb.utils.utility.GetMatchingFiles(archive_path, cls.__ArchivePattern_re, default_path_wildcard='+', default_path=GetArchivePath(), prefix_pattern='&', prefix_substituend=DefaultArchivePrefix) for afn in candidate_files: try: nsa = cls.__GetArchiveInstance(afn, stage=cls._STAGE_readModules) archive_set.add(nsa) except pickle.UnpicklingError: _log.exception('Cannot unpickle archive %s', afn) except pyxb.NamespaceArchiveError: _log.exception('Cannot process archive %s', afn) # Do this for two reasons: first, to get an iterable that won't # cause problems when we remove unresolvable archives from # archive_set; and second to aid with forced dependency inversion # testing ordered_archives = sorted(list(archive_set), key=lambda _a: _a.archivePath()) ordered_archives.reverse() # Create a graph that identifies dependencies between the archives archive_map = { } for a in archive_set: archive_map[a.generationUID()] = a archive_graph = pyxb.utils.utility.Graph() for a in ordered_archives: prereqs = a._unsatisfiedModulePrerequisites() if 0 < len(prereqs): for p in prereqs: if builtin.BuiltInObjectUID == p: continue da = archive_map.get(p) if da is None: _log.warning('%s depends on unavailable archive %s', a, p) archive_set.remove(a) else: archive_graph.addEdge(a, da) else: archive_graph.addRoot(a) # Verify that there are no dependency loops. archive_scc = archive_graph.sccOrder() for scc in archive_scc: if 1 < len(scc): raise pyxb.LogicError("Cycle in archive dependencies. How'd you do that?\n " + "\n ".join([ _a.archivePath() for _a in scc ])) archive = scc[0] if not (archive in archive_set): archive.discard() existing_archives.remove(archive) continue #archive._readToStage(cls._STAGE_COMPLETE) # Discard any archives that we used to know about but now aren't # supposed to. @todo make this friendlier in the case of archives # we've already incorporated. for archive in existing_archives.difference(archive_set): _log.info('Discarding excluded archive %s', archive) archive.discard()