def _combine(self, d): items = itertools.chain( iteritems(self._full_dict), iteritems(d) ) return dict(items)
def _iter_fields(obj): attrs = [] if hasattr(obj, "_fields"): attrs.append(iteritems(obj._fields)) if hasattr(obj, "__dict__"): attrs.append(iteritems(vars(obj))) return itertools.chain.from_iterable(attrs)
def _iter_vars(obj): attrs = [] if hasattr(obj, "__dict__"): attrs.append(iteritems(vars(obj))) if hasattr(obj, "_fields"): attrs.append(iteritems(obj._fields)) return itertools.chain.from_iterable(attrs)
def to_file(self, location=None): """ Writes content of mapping dictionary to file. *Note: The output file directory and filename will be {Drive}:{PATH}/Desktop/fields.py """ if location is None: path = os.path.expanduser("~/Desktop") destination = os.path.abspath(path + "/fields.py") else: destination = location with open(destination, "w") as outfile: outfile_header(outfile, self._VERSIONS) outfile_fields_header(outfile) for key in sorted(iterkeys(self._FIELDS)): outfile.write(" " * 4) outfile.write("\"{0}\": {{\n".format(key)) for in_key, in_val in sorted(iteritems(self._FIELDS[key])): outfile.write(" " * 8) outfile.write("\"{0}\": \"{1}\",\n".format(in_key, in_val)) outfile.write(" " * 4) outfile.write("},\n") outfile.write("}\n")
def _check_latest_versions(self, root, namespaces, version): # noqa """Checks that all major STIX constructs versions are equal to the latest version. """ to_check = common.STIX_COMPONENT_VERSIONS[version] results = BestPracticeWarningCollection('Latest Component Versions') def _is_expected(node, expected): if 'version' not in node.attrib: return True return node.attrib['version'] == expected for selector, expected in iteritems(to_check): xpath = "//%s" % selector for node in root.xpath(xpath, namespaces=namespaces): if _is_expected(node, expected): continue warning = BestPracticeWarning(node) warning['version found'] = node.attrib['version'] warning['version expected'] = expected results.append(warning) return results
def print_results(results, options): """Prints `results` to stdout. If ``options.json_output`` is set, the results are printed in JSON format. Args: results: A dictionary of ValidationResults instances. The key is the file path to the validated document. options: An instance of ``ValidationOptions`` which contains output options. """ if options.json_results: print_json_results(results) return level = 0 for fn, result in sorted(iteritems(results)): print("=" * 80) print_level("[-] Results: %s", level, fn) if result.schema_results is not None: print_schema_results(result.schema_results, level) if result.best_practice_results is not None: print_best_practice_results(result.best_practice_results, level) if result.profile_results is not None: print_profile_results(result.profile_results, level) if result.fatal is not None: print_fatal_results(result.fatal, level)
def _get_rules(self, version): """Returns a list of best practice check functions that are applicable to the STIX `version`. """ def can_run(stix_version, rule_min, rule_max): if not rule_min: return True doc_ver = StrictVersion(remove_version_prefix(stix_version)) min_ver = StrictVersion(remove_version_prefix(rule_min)) if rule_max: max_ver = StrictVersion(remove_version_prefix(rule_max)) return (min_ver <= doc_ver <= max_ver) return min_ver <= doc_ver StrictVersion = distutils.version.StrictVersion all_rules = iteritems(self._rules) # noqa # Get a generator which yields all best practice methods that are # assigned a version number <= the input STIX document version number. rules = [] for (versions, funcs) in all_rules: min_, max_ = versions rules.extend(f for f in funcs if can_run(version, min_, max_)) return rules
def _apply_field_markings(self): field_markings = self._container._field_markings if not field_markings: return for field, markings_info in iteritems(field_markings): self._apply_markings_to_field(field, markings_info)
def entity_class(cls, key): if not key: return VocabString for xsitype, klass in six.iteritems(_VOCAB_MAP): if key in xsitype: return klass return VocabString
def as_dict(self): """Returns a dictionary representation of this class instance. This is implemented for consistency across other validation error types. The :class:`.BestPracticeWarning` class extends :class:`collections.MutableMapping`, so this method isn't really necessary. """ return dict(iteritems(self))
def lookup_class(xsi_type): if not xsi_type: return VocabString for (k, v) in six.iteritems(_VOCAB_MAP): # TODO: for now we ignore the prefix and just check for # a partial match if xsi_type in k: return v return VocabString
def test_key_in_field_dictionary(self): """Tests if a key is present in the _FIELDS dictionary.""" for entity in self.cybox: fields = entity.typed_fields() attrs = vars(entity.__class__) for field in fields: for attr_name, attr in iteritems(attrs): if attr is field: selector = attrmap.xmlfield(entity, attr_name) self.assertTrue(selector, self.msg.format(entity, attr))
def flip_dict(d): """Returns a copy of the input dictionary `d` where the values of `d` become the keys and the keys become the values. Note: This does not even attempt to address key collisions. Args: d: A dictionary """ return dict((v,k) for k, v in iteritems(d))
def namespaces(self): """Returns a list of etree Elements that represent Schematron ``<ns prefix='foo' uri='bar'>`` elements. """ namespaces = [] for ns, prefix in iteritems(self._namespaces): ns = schematron.make_ns(prefix, ns) namespaces.append(ns) return namespaces
def _finalize_schemalocs(self, schemaloc_dict=None): # If schemaloc_dict was passed in, make a copy so we don't mistakenly # modify the original. if schemaloc_dict: schemaloc_dict = dict(iteritems(schemaloc_dict)) else: schemaloc_dict = {} # Get our id namespace id_ns = idgen.get_id_namespace() # Build our schemalocation dictionary! # # Initialize it from values found in the parsed, input schemalocations # (if there are any) and the schemaloc_dict parameter values (if there # are any). # # If there is a schemalocation found in both the parsed schemalocs and # the schema_loc dict, use the schemaloc_dict value. for ns, loc in iteritems(self._input_schemalocs): if ns in schemaloc_dict: continue schemaloc_dict[ns] = loc # Iterate over the finalized namespaces for a document and attempt # to map them to schemalocations. Warn if the namespace should have a # schemalocation and we can't find it anywhere. nsset = set(itervalues(self.finalized_namespaces)) for ns in nsset: if ns in DEFAULT_STIX_SCHEMALOCATIONS: schemaloc_dict[ns] = DEFAULT_STIX_SCHEMALOCATIONS[ns] elif ns in schemaloc_dict: continue elif (ns == id_ns) or (ns in XML_NAMESPACES): continue else: error = "Unable to map namespace '{0}' to schemaLocation" warnings.warn(error.format(ns)) return schemaloc_dict
def get_schemaloc_str(self, schemaloc_dict): if not schemaloc_dict: return "" schemaloc_str_start = 'xsi:schemaLocation="\n\t' schemaloc_str_end = '"' pairs = sorted(iteritems(schemaloc_dict)) schemaloc_str_content = "\n\t".join( "%s %s" % (ns, loc) for ns, loc in pairs ) return schemaloc_str_start + schemaloc_str_content + schemaloc_str_end
def _get_validators(self, schema_dir=None): validators = {self._KEY_SCHEMALOC: self._get_validator_impl()} if schema_dir: validators = { self._KEY_USER_DEFINED: self._get_validator_impl(schema_dir) } else: for version, location in iteritems(self._SCHEMAS): validator = self._get_validator_impl(location) validators[version] = validator return validators
def iter_vars(obj): """Returns a generator which yields a ``(property name, property value)`` tuple with each iteration. Note: This will not yield vars that are attached during parse, such as ``__input_schemalocations__`` and ``__input_namespaces__``. """ def check(name): return name not in ('__input_namespaces__', '__input_schemalocations__') instance_vars = iteritems(vars(obj)) return ((attr_name(name), val) for name, val in instance_vars if check(name))
def outfile_header(outfile, versions): today = datetime.datetime.utcnow() ver_str = str() for k, v in iteritems(versions): ver_str += "# {mod}\t-\t{ver}\n".format(mod=k, ver=v) outfile.write( "# Copyright (c) {year}, The MITRE Corporation. All rights reserved.\n" "# See LICENSE.txt for complete terms.\n\n" "# This is an auto-generated file.\n" "{versions}\n" "__date__ = \"{datetime}\"\n\n".format(year=today.year, versions=ver_str, datetime=today))
def assert_valid(self): """For debugging; does some sanity checks on this set. Raises InvalidNamespaceSetError if this namespace set is invalid. Otherwise, raises/returns nothing.""" for ns_uri, ni in six.iteritems(self.__ns_uri_map): if not ni.uri: raise InvalidNamespaceSetError( "URI not set in _NamespaceInfo (id={0}):\n{1}".format( id(ni), ni)) if ns_uri != ni.uri: raise InvalidNamespaceSetError( "URI mismatch in dict ({0}) and _NamespaceInfo ({1})". format(ns_uri, ni.uri)) if (ni.preferred_prefix is not None and ni.preferred_prefix not in ni.prefixes): raise InvalidNamespaceSetError( "Namespace {0.uri}: preferred prefix " \ '"{0.preferred_prefix}" not in prefixes ' \ "{0.prefixes}".format(ni) ) for prefix in ni.prefixes: if not prefix: raise InvalidNamespaceSetError( "Namespace {0.uri}: empty value in prefix " \ "set: {0.prefixes}".format(ni) ) other_ni = self.__prefix_map.get(prefix) if other_ni is None: raise InvalidNamespaceSetError( 'Namespace {0.uri}: prefix "{1}" not in ' \ 'prefix map'.format(ni, prefix) ) if other_ni is not ni: raise InvalidNamespaceSetError( 'Namespace {0.uri}: prefix "{1}" maps to ' \ 'wrong _NamespaceInfo (id={2}, uri={3.uri})'.format( ni, prefix, id(other_ni), other_ni ) ) if None in self.__prefix_map: # None can be a preferred prefix, but should not be in the # prefix map. raise InvalidNamespaceSetError("None is in prefix map!")
def handle_duplicate_objects(cls, bundle, all_objects): """Replace all of the duplicate Objects with references to the unique object placed in the "Re-used Objects" Collection.""" for duplicate_object_id, unique_object_id in iteritems(cls.object_ids_mapping): # Modify the existing Object to serve as a reference to # the unique Object in the collection if duplicate_object_id and duplicate_object_id in cls.id_objects: object = cls.id_objects[duplicate_object_id] object.idref = unique_object_id object.id_ = None object.properties = None object.related_objects = None object.domain_specific_object_properties = None if duplicate_object_id and duplicate_object_id in cls.idref_objects: for object in cls.idref_objects[duplicate_object_id]: object.idref = unique_object_id
def to_xml_file(self, file, namespace_dict=None, custom_header=None): """Export an object to an XML file. Only supports Package or Bundle objects at the moment. Args: file: the name of a file or a file-like object to write the output to. namespace_dict: a dictionary of mappings of additional XML namespaces to prefixes. custom_header: a string, list, or dictionary that represents a custom XML header to be written to the output. """ if not namespace_dict: namespace_dict = {} else: # Make a copy so we don't pollute the source namespace_dict = namespace_dict.copy() # Update the namespace dictionary with namespaces found upon import input_namespaces = self._ns_to_prefix_input_namespaces() namespace_dict.update(input_namespaces) # Check whether we're dealing with a filename or file-like Object if isinstance(file, string_types): out_file = open(file, 'w') else: out_file = file out_file.write("<?xml version='1.0' encoding='UTF-8'?>\n") # Write out the custom header, if included if isinstance(custom_header, list): out_file.write("<!--\n") for line in custom_header: out_file.write(line.replace("-->", "\\-\\->") + "\n") out_file.write("-->\n") elif isinstance(custom_header, dict): out_file.write("<!--\n") for key, value in iteritems(custom_header): sanitized_key = str(key).replace("-->", "\\-\\->") sanitized_value = str(value).replace("-->", "\\-\\->") out_file.write(sanitized_key + ": " + sanitized_value + "\n") out_file.write("-->\n") elif isinstance(custom_header, string_types): out_file.write("<!--\n") out_file.write(custom_header.replace("-->", "\\-\\->") + "\n") out_file.write("-->\n") out_file.write(self.to_xml(namespace_dict=namespace_dict)) out_file.close()
def handle_duplicate_objects(cls, bundle, all_objects): """Replace all of the duplicate Objects with references to the unique object placed in the "Re-used Objects" Collection.""" for duplicate_object_id, unique_object_id in iteritems( cls.object_ids_mapping): # Modify the existing Object to serve as a reference to # the unique Object in the collection if duplicate_object_id and duplicate_object_id in cls.id_objects: object = cls.id_objects[duplicate_object_id] object.idref = unique_object_id object.id_ = None object.properties = None object.related_objects = None object.domain_specific_object_properties = None if duplicate_object_id and duplicate_object_id in cls.idref_objects: for object in cls.idref_objects[duplicate_object_id]: object.idref = unique_object_id
def obj_from_dict(obj, dictionary, **kwargs): """Converts a `dictionary` into a stix-edh object. Args: dictionary: A dictionary representation of a stix-edh object. **kwargs: A key-to-class mapping if the the corresponding value is a dictionary representation of a stix-edh object. """ get = dictionary.get for key, klass in iteritems(kwargs): if klass: val = klass.from_dict(get(key)) else: val = get(key) setattr(obj, key, val)
def _build_uber_schema(self, doc, schemaloc=False): """Builds a schema which is made up of ``xs:import`` directives for each schema required to validate `doc`. If schemaloc is ``True``, the ``xsi:schemaLocation`` attribute values are used to create the ``xs:import`` directives. If ``False``, the initialization schema directory is used. Returns: An ``etree.XMLSchema`` instance used to validate `doc`. Raise: .XMLSchemaImportError: If an error occurred while building the dictionary of namespace to schemalocation mappings used to drive the uber schema creation. """ root = utils.get_etree_root(doc) imports = self._build_required_imports(root, schemaloc) if not imports: raise errors.XMLSchemaImportError( "Cannot validate document. Error occurred while determining " "schemas required for validation." ) xsd = etree.fromstring( """ <xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" targetNamespace="http://stix.mitre.org/tools/validator" elementFormDefault="qualified" attributeFormDefault="qualified"/> """ ) for ns, loc in iteritems(imports): loc = loc.replace("\\", "/") attrib = {'namespace': ns, 'schemaLocation':loc} import_ = etree.Element(xmlconst.TAG_XS_IMPORT, attrib=attrib) xsd.append(import_) return etree.XMLSchema(xsd)
def dict_merge(target, *args): '''Merge multiple dictionaries into one''' if len(args) > 1: for obj in args: dict_merge(target, obj) return target # Recursively merge dicts and set non-dict values obj = args[0] if not isinstance(obj, dict): return obj for k, v in iteritems(obj): if k in target and isinstance(target[k], dict): dict_merge(target[k], v) elif k in target and isinstance(target[k], list): target[k] = (target[k] + v) else: target[k] = deepcopy(v) return target
def _get_namespaces(self, recurse=True): # Get all _namespaces for parent classes nsset = set(x._namespace for x in self.__class__.__mro__ if hasattr(x, '_namespace')) #In case of recursive relationships, don't process this item twice self.touched = True if recurse: for x in self._get_children(): if not hasattr(x, 'touched'): nsset.update(x._get_namespaces()) del self.touched # Add any additional namespaces that may be included in the entity input_ns = self._ns_to_prefix_input_namespaces() for namespace, alias in iteritems(input_ns): nsset.update(namespace) return nsset
def rules(self): """Builds and returns a dictionary of ``BaseProfileRule`` implementations. The key is the Rule context. """ notype = schematron.make_pattern("no-type") typed = schematron.make_pattern("xsi-typed") rules = [notype, typed] collected = self._collect_rules() for ctx, profile_rules in iteritems(collected): rule = schematron.make_rule(ctx) rule.extend(x.as_etree() for x in profile_rules) if "@xsi:type=" in utils.strip_whitespace(ctx): typed.append(rule) else: notype.append(rule) return rules
def _get_namespace_def(self, additional_ns_dict=None): # copy necessary namespaces namespaces = self._get_namespaces() # if there are any other namepaces, include xsi for "schemaLocation" # also, include the MAEC default vocabularies schema by default if namespaces: namespaces.add(lookup_prefix('xsi')) namespaces.add(lookup_prefix('maecVocabs')) ns_set = make_namespace_subset_from_uris(namespaces) if additional_ns_dict: for ns, prefix in iteritems(additional_ns_dict): ns_set.add_namespace_uri(ns, prefix) else: return "" return ('\n\t' + ns_set.get_xmlns_string(sort=True, delim='\n\t') + '\n\t' + ns_set.get_schemaloc_string(sort=True, delim='\n\t'))
def print_profile_results(results, level): """Prints STIX Profile validation results to stdout. Args: results: An instance of sdv.validators.STIXProfileResults. level: The level to print the results. """ marker = "+" if results.is_valid else "!" print_level("[%s] Profile: %s", level, marker, results.is_valid) if results.is_valid: return errors_ = collections.defaultdict(list) for e in results.errors: errors_[e.message].append(e.line) for msg, lines in iteritems(errors_): lines = ', '.join(str(x) for x in lines) print_level("[!] %s [%s]", level+1, msg, lines)
def print_profile_results(results, level): """Prints STIX Profile validation results to stdout. Args: results: An instance of sdv.validators.STIXProfileResults. level: The level to print the results. """ marker = "+" if results.is_valid else "!" print_level("[%s] Profile: %s", level, marker, results.is_valid) if results.is_valid: return errors_ = collections.defaultdict(list) for e in results.errors: errors_[e.message].append(e.line) for msg, lines in iteritems(errors_): lines = ', '.join(str(x) for x in lines) print_level("[!] %s [%s]", level + 1, msg, lines)
def _lookup_unprefixed(typename): """Attempts to resolve a class for the input XML type `typename`. Args: typename: The name of an STIX XML type (e.g., TLPMarkingStructureType) without a namespace prefix. Returns: A stix.Entity implementation class for the `typename`. Raises: ValueError: If no class has been registered for the input `typename`. """ for xsi_type, klass in iteritems(_EXTENSION_MAP): if typename in xsi_type: return klass error = "Unregistered extension type: %s" % typename raise ValueError(error)
def _walk_schemas(self, schema_dir): """Walks the `schema_dir` directory and builds a dictionary of schema ``targetNamespace`` values to a list of schema file paths. Because multiple schemas can declare the same ``targetNamespace`` value, the ``value`` portion of the returned dictionary is a ``list``. Note: This method attempts to resolve issues where the same schema exists in two or more locations under `schema_dir` by keeping a record of visited target namespaces and filenames. If the same filename:targetNS (not file path) pair has been visited already, the file is not added to the schemalocation dictionary. Returns: A dictionary of schema ``targetNamespace`` values to a list of schema file paths. """ seen = [] schemalocs = collections.defaultdict(list) for top, _, files in os.walk(schema_dir): for fn in files: if not fn.endswith('.xsd'): continue fp = os.path.abspath(os.path.join(top, fn)) target_ns = utils.get_target_ns(fp) if (target_ns, fn) in seen: continue schemalocs[target_ns].append(fp) seen.append((target_ns, fn)) for ns, loc in iteritems(self.OVERRIDE_SCHEMALOC): schemalocs[ns] = [loc] return schemalocs
def print_json_results(results): """Prints `results` to stdout in JSON format. Args: results: An instance of ``ValidationResults`` which contains the results to print. """ json_results = {} for fn, result in iteritems(results): d = {} if result.schema_results is not None: d['schema validation'] = result.schema_results.as_dict() if result.profile_results is not None: d['profile results'] = result.profile_results.as_dict() if result.best_practice_results is not None: d['best practice results'] = result.best_practice_results.as_dict() if result.fatal is not None: d['fatal'] = result.fatal.as_dict() json_results[fn] = d print(json.dumps(json_results))
def _finalize_binding_namespaces(self): """Returns a namespace-to-prefix dictionary view of the finalized_namespaces (which are mapped prefix-to-namespace). The bindings expect an NS-to-prefix mapping, while our ns processing code builds dictionaries that map prefix-to-Namespace(s). Because of this, we need to flip our dictionaries before handing them off to the bindings for serialization. """ if not self.finalized_namespaces: return {} # TODO: Should this return the DEFAULT_STIX_NAMESPACES? binding_namespaces = {} for alias, ns in iteritems(self.finalized_namespaces): binding_namespaces[ns] = alias # Always use the default STIX prefixes for STIX namespaces. # This is because of xsi:type prefixes used by the STIX/CybOX user-level # API classes. binding_namespaces.update(DEFAULT_STIX_NAMESPACES) return binding_namespaces
def _process_includes(self, imports): """Attempts to resolve cases where multiple schemas declare the same ``targetNamespace`` value. This is due to the use of the ``xs:include`` directive, which can be found in OASIS CIQ schemas along with others. This is done by building an ``xs:include`` graph, and returning the root of that graph. Note: This method is flawed! This assumes that the ``xs:include`` graph is really a tree, and has a root which can be imported and used to validate all instance data which belongs to its namespace. A better way may be to programatically combine all "split" schemas within a single schema document and map the targetNamespace to that combined schema document. Args: imports: A dictionary of namespaces to a list of schema file paths. Most often, this list will have only one file path in it. Returns: A dictionary of schema targetNamespaces to a single schema file path. """ processed = {} for ns, schemas in iteritems(imports): if len(schemas) > 1: base_schema = self._get_include_root(ns, schemas) processed[ns] = base_schema else: processed[ns] = schemas[0] return processed
def _check_namespaces(self, ns_dict): """Check that all the prefixes in `ns_dict` are mapped to only one namespace. Args: ns_dict: A ``prefix: [namespaces]`` dictionary. Raises: ` .DuplicatePrefixError: If a prefix is mapped to more than one namespace. """ for prefix, namespaces in iteritems(ns_dict): if len(namespaces) == 1: continue error = "Namespace prefix '{0}' mapped to multiple namespaces: {1}" error = error.format(prefix, namespaces) raise DuplicatePrefixError( message=error, prefix=prefix, namespaces=tuple(namespaces) )
def _set_var(self, klass, try_cast=True, arg=None, **kwargs): """Sets an instance property value. * If the input value is ``None``, the property is set to ``None``. * If the input value is an instance of `klass`, the property is set the input value. * If the input value is not an instance of `klass` and `try_cast` is ``True``, an attempt will be made to cast the input value to an instance of `klass`. Args: klass: The expected input value class. try_cast: If ``True`` attempt to cast the input value to `klass` if it is not an instance of `klass`. arg: The __init__ parameter name to use when casting the input value to `klass`. E.g., StructuredText(value=input), the `arg` is `value`. If ``None``, it is assumed that the first __init__ parameter will accept the value. **kwargs: The field name and value. The field name is the key and the field value is the value. """ name, item = next(iteritems(kwargs)) attr = utils.private_name(name) # 'title' => '_title' if item is None: setattr(self, attr, None) elif isinstance(item, klass): setattr(self, attr, item) elif try_cast: promoted = utils.cast_var(item, klass, arg=arg) setattr(self, attr, promoted) else: error = "'{0}' expects an instance of {1}. Received: {2}." error = error.format(name, klass, type(item)) raise TypeError(error)
def _collect_namespaces(self, node): self.__input_namespaces__ = dict(iteritems(node.nsmap))
def to_xml(self, include_namespaces=True, include_schemalocs=False, ns_dict=None, schemaloc_dict=None, pretty=True, auto_namespace=True, encoding='utf-8'): """Serializes a :class:`Entity` instance to an XML string. The default character encoding is ``utf-8`` and can be set via the `encoding` parameter. If `encoding` is ``None``, a string (unicode in Python 2, str in Python 3) is returned. Args: auto_namespace: Automatically discover and export XML namespaces for a STIX :class:`Entity` instance. include_namespaces: Export namespace definitions in the output XML. Default is ``True``. include_schemalocs: Export ``xsi:schemaLocation`` attribute in the output document. This will attempt to associate namespaces declared in the STIX document with schema locations. If a namespace cannot be resolved to a schemaLocation, a Python warning will be raised. Schemalocations will only be exported if `include_namespaces` is also ``True``. ns_dict: Dictionary of XML definitions (namespace is key, alias is value) to include in the exported document. This must be passed in if `auto_namespace` is ``False``. schemaloc_dict: Dictionary of XML ``namespace: schema location`` mappings to include in the exported document. These will only be included if `auto_namespace` is ``False``. pretty: Pretty-print the XML. encoding: The output character encoding. Default is ``utf-8``. If `encoding` is set to ``None``, a string (unicode in Python 2, str in Python 3) is returned. Returns: An XML string for this :class:`Entity` instance. Default character encoding is ``utf-8``. """ from .utils import nsparser parser = nsparser.NamespaceParser() if auto_namespace: ns_info = nsparser.NamespaceInfo() else: ns_info = None obj = self.to_obj(ns_info=ns_info) if (not auto_namespace) and (not ns_dict): raise Exception( "Auto-namespacing was disabled but ns_dict was empty " "or missing." ) if auto_namespace: ns_info.finalize(ns_dict=ns_dict, schemaloc_dict=schemaloc_dict) obj_ns_dict = ns_info.binding_namespaces else: ns_info = nsparser.NamespaceInfo() ns_info.finalized_namespaces = ns_dict or {} ns_info.finalized_schemalocs = schemaloc_dict or {} obj_ns_dict = dict( itertools.chain( iteritems(ns_dict), iteritems(nsparser.DEFAULT_STIX_NAMESPACES) ) ) namespace_def = "" if include_namespaces: xmlns = parser.get_xmlns_str(ns_info.finalized_namespaces) namespace_def += ("\n\t" + xmlns) if include_schemalocs and include_namespaces: schemaloc = parser.get_schemaloc_str(ns_info.finalized_schemalocs) namespace_def += ("\n\t" + schemaloc) if not pretty: namespace_def = namespace_def.replace('\n\t', ' ') with save_encoding(encoding): sio = StringIO() obj.export( sio.write, # output buffer 0, # output level obj_ns_dict, # namespace dictionary pretty_print=pretty, # pretty printing namespacedef_=namespace_def # namespace/schemaloc def string ) # Ensure that the StringIO buffer is unicode s = text_type(sio.getvalue()) if encoding: return s.encode(encoding) return s
def _finalize_namespaces(self, ns_dict=None): """Returns a dictionary of namespaces to be exported with an XML document. This loops over all the namespaces that were discovered and built during the execution of ``collect()`` and ``_parse_collected_classes()`` and attempts to merge them all. Returns: An ``alias: namespace`` dictionary containing all namespaces required to be present on an exported document. Raises: .DuplicatePrefixError: If namespace prefix was mapped to more than one namespace. """ if not ns_dict: ns_dict = {} # Copy and flip the input dictionary from ns=>alias to alias=>ns user_namespaces = {} for ns, alias in iteritems(ns_dict): user_namespaces[alias] = ns # Our return value ns_dict = collections.defaultdict(set) # Add the ID namespaces id_alias = idgen.get_id_namespace_alias() id_ns = idgen.get_id_namespace() ns_dict[id_alias].add(id_ns) # Build namespace dictionaries from the collected Entity objects. collected_prefixed = dict(iteritems(self._collected_namespaces)) # Pop the unprefixed entries. no_prefix = collected_prefixed.pop(None, set()) # Resolve namespace aliases for the unprefixed namespaces. collected_unprefixed = self._resolve_unprefixed(no_prefix) # Remap the example namespace to the one expected by the APIs if the # sample example namespace is found. self._fix_example_namespace() # All the namespaces dictionaries we need to merge and export. namespace_dicts = itertools.chain( iteritems(self._BASELINE_NAMESPACES), iteritems(self._input_namespaces), iteritems(collected_prefixed), iteritems(collected_unprefixed), iteritems(user_namespaces) ) # Build our merged namespace dictionary. It will be inspected for # duplicate ns prefix mappings. for alias, ns in namespace_dicts: ns_dict[alias].add(ns) # Check that all the prefixes are mapped to only one namespace self._check_namespaces(ns_dict) # Flatten the dictionary by popping the namespace from the namespace # set values in ns_dict. flattened = {} for alias, ns_set in iteritems(ns_dict): flattened[alias] = ns_set.pop() # Return the flattened dictionary return flattened
def get_xmlns_str(self, ns_dict): pairs = sorted(iteritems(ns_dict)) return "\n\t".join( 'xmlns:%s="%s"' % (alias, ns) for alias, ns in pairs )