def OnFundamental( element, *args, **kwargs): # <Unused argument> pylint: disable = W0613 if not isinstance(element.TypeInfo, EnumTypeInfo): return if not element.TypeInfo.FriendlyValues: return name = element.Name reversed_name = "{}_reversed".format(name) prefix = "{} = OrderedDict".format(name) reversed_prefix = "{} = OrderedDict".format(reversed_name) f.write( textwrap.dedent( """\ {prefix}{assignments} {name}_max_key_length = len(max({name}.keys(), len)) {name}_max_value_length = len(max({name}.values(), len)) {reversed_prefix}{reversed_assignments} {reversed_name}_max_key_length = len(max({reversed_name}.keys(), len)) {reversed_name}_max_value_length = len(max({reversed_name}.values(), len)) """, ).format( prefix=prefix, assignments=StringHelpers.LeftJustify( "([ {}\n ])".format( "\n ".join([ '( "{}", "{}" ),'.format(v, fv) for v, fv in six.moves.zip( element.TypeInfo.Values, element.TypeInfo.FriendlyValues) ]), ), len(prefix), ), name=name, reversed_prefix=reversed_prefix, reversed_assignments=StringHelpers.LeftJustify( "([ {}\n ])".format( "\n ".join([ '( "{}", "{}" ),'.format(fv, v) for v, fv in six.moves.zip( element.TypeInfo.Values, element.TypeInfo.FriendlyValues) ]), ), len(reversed_prefix), ), reversed_name=reversed_name, ), ) nonlocals.wrote_value = True
def OnSimple_VisitedChildren(element): content = fundamental_type_info_visitor.Accept( element.TypeInfo.Items[ element.FundamentalAttributeName]) if content.startswith("<xsd:restriction"): f.write( textwrap.dedent( """\ <xsd:simpleType name="_{}_Item_content"> {} </xsd:simpleType> """, ).format( element.DottedName, StringHelpers.LeftJustify( content.strip(), 2)), ) content = "_{}_Item_content".format( element.DottedName) f.write( textwrap.dedent( """\ <xsd:complexType name="_{name}_Item"> <xsd:simpleContent> <xsd:extension base="{base}"> {attributes} </xsd:extension> </xsd:simpleContent> </xsd:complexType> """, ).format( name=element.DottedName, base=content, attributes=StringHelpers.LeftJustify( "".join([ textwrap.dedent( """\ <xsd:attribute name="{name}" use="{use}" type="{type}" /> """, ).format( name=attribute.Name, use="optional" if attribute.TypeInfo.Arity. IsOptional else "required", type=GetBaseTypeName( attribute.Resolve()), ) for attribute in element.Attributes ], ), 6, ).rstrip(), ), )
def Execute(invoke_reason, context, status_stream, verbose_stream, verbose): # <unused argument> pylint: disable = W0613 assert len( context["output_filenames"]) == 1, context["output_filenames"] output_filename = context["output_filenames"][0] status_stream.write("Writing '{}'...".format(output_filename)) with status_stream.DoneManager(): sink = six.StringIO() CommonEnvironment.Describe( context, output_stream=sink, ) sink = sink.getvalue() with open(output_filename, "w") as f: f.write( textwrap.dedent( """\ # ---------------------------------------------------------------------- # | # | Debug Output - Execute # | # ---------------------------------------------------------------------- invoke_reason: {invoke_reason} context: {context} """, ).format( invoke_reason=invoke_reason, context=StringHelpers.LeftJustify(sink.strip(), 4), ), )
def OnFundamental(self, element): python_name = ToPythonName(element) statement = "{type_info}.{method_prefix}Item({python_name}_TypeInfo, {item_statement}, **{serialize_args})".format( type_info=self._type_info_serialization_name, method_prefix=self._method_prefix, python_name=python_name, item_statement=self._source_writer.GetFundamental("item", element), serialize_args=self._custom_serialize_item_args, ) if not element.IsAttribute: statement = self._dest_writer.CreateFundamentalElement( element, statement) self._output_stream.write( textwrap.dedent( """\ # ---------------------------------------------------------------------- @classmethod def _{python_name}_Item(cls, item): return {statement} """, ).format( python_name=python_name, statement=StringHelpers.LeftJustify(statement, 4).strip(), ), )
def Write(name, items, is_os_specific_func): cols = [ 40, 9, 120, ] template = "{name:<%d} {type:<%d} {fullpath:<%d}" % tuple(cols) output_stream.write(textwrap.dedent( """\ {sep} {name} {sep} {header} {underline} {content} """).format( sep='=' * len(name), name=name, header=template.format( name="Name", type="Type", fullpath="Path", ), underline=template.format(**{ k : v for k, v in zip( [ "name", "type", "fullpath", ], [ '-' * col for col in cols ], ) }), content="No items" if not items else StringHelpers.LeftJustify( '\n'.join([ template.format( name="{}{}".format( os.path.basename(item), ' *' if is_os_specific_func(item) else '', ), type="Directory" if os.path.isdir(item) else "File", fullpath=item, ) for item in items ]), 2, ), ))
def GenerateContent(root, is_root): items = [] for k, v in six.iteritems(root): if isinstance(v, six.string_types): items.append( '<repo root="{}" shortname="{}" />\n'.format( v, os.path.basename(k), )) else: tag_name = "allgroup" if is_root else "group" items.append( textwrap.dedent("""\ <{tag_name} name="{name}"> {content} </{tag_name}> """).format( tag_name=tag_name, name=k, content=StringHelpers.LeftJustify( GenerateContent(v, False), 2).rstrip(), )) return ''.join(items)
def GetDefOutputStatementsConstraintsAndSuffix(self): # ---------------------------------------------------------------------- def ToOrtTypeString(value): return value.replace("std::", "").replace("_t", "") # ---------------------------------------------------------------------- output_statements = [] for index, member in enumerate(self._custom_struct.members): output_statements.append( '.Output({index}, "{name}", "{desc}", "{type}")'.format( index=index, name=member.name, desc=member.description or "No information available", type=self._type_to_template_lookup[member.type], ), ) return ( output_statements, OrderedDict( [ (k, [ToOrtTypeString(v)]) for k, v in six.iteritems(self._template_to_types) ], ), textwrap.dedent( """\ .TypeAndShapeInferenceFunction( [](ONNX_NAMESPACE::InferenceContext& ctx) {{ const bool has_shape = hasInputShape(ctx, 1); {statements} }} ) """, ).format( statements=StringHelpers.LeftJustify( "\n".join( [ textwrap.dedent( """\ propagateElemTypeFromDtypeToOutput(ctx, ONNX_NAMESPACE::TensorProto_DataType_{type}, {index}); if(has_shape) {{ propagateShapeFromInputToOutput(ctx, 1, {index}); }} """, ).format( index=index, type=ToOrtTypeString(member.type).upper(), ) for index, member in enumerate(self._custom_struct.members) ], ), 6, ), ) )
def FormatInlineFuncDesc(content): initial_whitespace = 37 assert MAX_COLUMN_WIDTH > initial_whitespace content = StringHelpers.Wrap( content, MAX_COLUMN_WIDTH - initial_whitespace) return StringHelpers.LeftJustify(content, initial_whitespace)
def OnFundamental(element): content = fundamental_type_info_visitor.Accept( element.TypeInfo) if content.startswith("<xsd:restriction"): f.write( textwrap.dedent( """\ <xsd:simpleType name="_{}_Item"> {} </xsd:simpleType> """, ).format( element.DottedName, StringHelpers.LeftJustify( content, 2).strip()), ) else: element._xsd_base_type = content
def Write(content, prefix, result): ClearTempStatus() message = StringHelpers.LeftJustify( "{}: {}\n".format(prefix, content.strip()), len(prefix), ) if nonlocals.first_write: message = "{}{}".format(dm.stream._line_prefix(0), message) nonlocals.first_write = False nonlocals.dm_write_ref(message) nonlocals.reset_content = False if ( result is not None and (dm.result in [ None, 0, ] or (dm.result > 0 and result < 0)) ): dm.result = result
def WriteExtensions(name, items): cols = [ 120, ] template = "{fullpath:<%d}" % tuple(cols) output_stream.write(textwrap.dedent( """\ {sep} {name} {sep} {header} {underline} {content} """).format( sep='=' * len(name), name=name, header=template.format(fullpath="Path"), underline=template.format(**{ k : v for k, v in zip( [ "fullpath", ], [ '-' * col for col in cols ], ) }), content="No items" if not items else StringHelpers.LeftJustify( '\n'.join(items), 2, ), ))
def _GenerateUsageInformation(self, entry_point): cols = OrderedDict([ ("Name", 30), ("Type", 15), ("Arity", 8), ("Default", 20), ("Description", 80), ]) # Calculate the verbose template and the left padding associated with verbose # descriptions. col_padding = 2 verbose_template = [] verbose_desc_offset = 0 for index, width in enumerate(six.itervalues(cols)): verbose_template.append("{{{}:<{}}}".format(index, width)) verbose_desc_offset += width + col_padding # Remove the description size from the verbose offset verbose_desc_offset -= width assert verbose_desc_offset < MAX_COLUMN_WIDTH, (verbose_desc_offset, MAX_COLUMN_WIDTH) verbose_template = (col_padding * ' ').join(verbose_template) # Gather the command line and verbose parts command_line = [] verbose = [] if entry_point.Parameters: verbose.append(verbose_template.format(*six.iterkeys(cols))) verbose.append( verbose_template.format( *['-' * col_width for col_width in six.itervalues(cols)])) is_multi_line = len(entry_point.Parameters) > 4 for index, parameter in enumerate(entry_point.Parameters): arg = parameter.Name if parameter.IsSwitch: arg = "{}{}".format( self.CommandLineArgPrefix, arg, ) elif isinstance(parameter.TypeInfo, DictTypeInfo): if not parameter.IsPositional: prefix = "{}{}{}".format( self.CommandLineArgPrefix, arg, self.CommandLineKeywordSeparator, ) else: prefix = '' arg = "{}<tag>{}<value>".format( prefix, self.CommandLineDictTagValueSeparator, ) elif not parameter.IsPositional: arg = "{}{}{}<value>".format( self.CommandLineArgPrefix, arg, self.CommandLineKeywordSeparator, ) if parameter.IsRequired: arg = "<{}>".format(arg) else: arg = "[{}]".format(arg) if parameter.DisplayArity in [ '*', '+', ]: arg += parameter.DisplayArity if is_multi_line: arg = "\n {}".format(arg) elif index: arg = " {}".format(arg) command_line.append(arg) # Verbose if parameter.DefaultValue is not EntryPointInformation.ParameterInfo.NoDefault: if parameter.IsSwitch: default_value = "on" if parameter.DefaultValue else "off" else: default_value = parameter.DefaultValue else: default_value = '' verbose.append( verbose_template.format( parameter.Name, "switch" if parameter.IsSwitch else "Dictionary" if isinstance(parameter.TypeInfo, DictTypeInfo) else parameter.TypeInfo.Desc, parameter.DisplayArity, str(default_value), StringHelpers.LeftJustify( StringHelpers.Wrap( parameter.Description, MAX_COLUMN_WIDTH - verbose_desc_offset), verbose_desc_offset, ).rstrip(), )) constraints = parameter.TypeInfo.ConstraintsDesc if constraints: verbose.append(" - {}".format(constraints)) return ''.join(command_line), '\n'.join(verbose)
def Move( no_move=False, ignore_warnings=False, output_stream=sys.stdout, ): """Moves any new python libraries to the appropriate Libraries folder associated with the activated repository.""" with StreamDecorator(output_stream).DoneManager( line_prefix='', prefix="\nResults: ", suffix='\n', ) as dm: if no_move: dm.stream.write("***** Output is for information only; nothing will be moved. *****\n\n") move_func = lambda *args, **kwargs: None else: # ---------------------------------------------------------------------- def Impl(source_dir_or_filename, dest_dir): # shutil.move won't overwrite files, so use distutils (which will) if os.path.isdir(source_dir_or_filename): import distutils.dir_util distutils.dir_util.copy_tree(source_dir_or_filename, os.path.join(dest_dir, os.path.basename(source_dir_or_filename))) FileSystem.RemoveTree(source_dir_or_filename) else: FileSystem.MakeDirs(dest_dir) shutil.move(source_dir_or_filename, dest_dir) # ---------------------------------------------------------------------- move_func = Impl dm.stream.write("Calculating new library content...") with dm.stream.DoneManager(): new_content = _NewLibraryContent.Create(_EnvironmentSettings()) # Group libraries and distinfo bundles # ---------------------------------------------------------------------- class PythonLibrary(object): def __init__(self, fullpath): self.Fullpath = fullpath self.metadata_path = None self.version = None self.scripts = [] # ---------------------------------------------------------------------- libraries = OrderedDict() dm.stream.write("Grouping libraries...") with dm.stream.DoneManager( done_suffix=lambda: "{} found".format(inflect.no("library", len(libraries))), suffix='\n', ) as this_dm: for library_path in new_content.Libraries: basename = os.path.basename(library_path) if ( not basename.endswith(".dist-info") and not basename.endswith(".egg-info") ): libraries[basename] = PythonLibrary(library_path) lowercase_map = { k.lower() : k for k in six.iterkeys(libraries) } # Extract library metadata for library_path in new_content.Libraries: if os.path.isfile(library_path): continue basename = os.path.basename(library_path) if not ( basename.endswith(".dist-info") or basename.endswith(".egg-info") ): continue index = basename.find('-') if index == -1: this_dm.result = this_dm.result or 1 this_dm.stream.write("WARNING: The library name for '{}' could not be extracted.\n".format(library_path)) continue potential_name = basename[:index] potential_names = [ potential_name, "{}.py".format(potential_name), ] # Try to find the python library based on the potential names python_library = None for potential_name in potential_names: python_library = libraries.get(potential_name, None) if python_library is None: library_key = lowercase_map.get(potential_name.lower(), None) if library_key is not None: python_library = libraries.get(library_key, None) if python_library is not None: break if python_library is None: this_dm.result = this_dm.result or 1 this_dm.stream.write("WARNING: The library name '{}' was not found ({}).\n".format(potential_names[0], library_path)) continue if basename.endswith(".dist-info"): python_library.metadata_path = library_path version = None if version is None: metadata_filename = os.path.join(library_path, "metadata.json") if os.path.isfile(metadata_filename): with open(metadata_filename) as f: data = json.load(f) if "version" not in data: this_dm.result = -1 this_dm.stream.write("ERROR: 'version' was not found in '{}'.\n".format(library_path)) continue version = data["version"] if version is None: metadata_filename = os.path.join(library_path, "METADATA") if os.path.isfile(metadata_filename): for line in open(metadata_filename).readlines(): if line.startswith("Version:"): version = line[len("Version:"):].strip() break if version is None: this_dm.result = -1 this_dm.stream.write("ERROR: Metadata was not found for '{}'.\n".format(library_path)) continue python_library.version = version elif basename.endswith(".egg-info"): python_library.metadata_path = library_path metadata_filename = os.path.join(library_path, "PKG-INFO") if not os.path.isfile(metadata_filename): this_dm.result = -1 this_dm.stream.write("ERROR: Metadata was not found for '{}'.\n".format(metadata_filename)) continue version = None for line in open(metadata_filename).readlines(): if line.startswith("Version:"): version = line[len("Version:"):].strip() break if version is None: this_dm.result = -1 this_dm.stream.write("ERROR: 'Version:' was not found in '{}'.\n".format(metadata_filename)) continue python_library.version = version else: assert False, basename # Eliminate all library info where we couldn't extract the version for library_name in list(six.iterkeys(libraries)): if libraries[library_name].version is None: this_dm.result = this_dm.result or 1 this_dm.stream.write("WARNING: Version information was not found for '{}'.\n".format(library_name)) libraries.pop(library_name) # Associate scripts with the known library info for script_fullpath in new_content.Scripts: if os.path.isdir(script_fullpath): this_dm.result = this_dm.result or 1 this_dm.stream.write("WARNING: '{}' is a directory and will not be processed.\n".format(script_fullpath)) continue script_name_lower = os.path.splitext(os.path.basename(script_fullpath))[0].lower() found = False for potential_library_name, potential_library_info in six.iteritems(libraries): if potential_library_name.lower() in script_name_lower: potential_library_info.scripts.append(script_fullpath) found = True break if not found: this_dm.result = this_dm.result or 1 this_dm.stream.write("WARNING: The library for the script '{}' could not be found.\n".format(script_fullpath)) if dm.result < 0: return dm.result if not ignore_warnings and dm.result > 0: dm.stream.write("\nWarnings were encountered. To continue execution even with warnings, specify 'ignore_warnings' on the command line.\n") return dm.result if not libraries: return dm.stream.write("Moving content...") with dm.stream.DoneManager( suffix='\n', ) as move_dm: # BugBug: Handle case where existing dest dir isn't os-specific but now needs to be with new changes # ---------------------------------------------------------------------- def DestinationIsOSSpecific(dest_dir): # BugBug: I don't think that this works. Should be looking for os-specific names and then python names. if not os.path.isdir(dest_dir): return False found_one = False for item in os.listdir(dest_dir): if not item.startswith("python"): return False fullpath = os.path.join(dest_dir, item) if not os.path.isdir(fullpath): return False found_one = True return found_one # ---------------------------------------------------------------------- python_version_dir = "python{}".format(os.getenv("DEVELOPMENT_ENVIRONMENT_PYTHON_VERSION").split('.')[0]) display_template = "{0:<50} -> {1}\n" for index, (library_name, library_info) in enumerate(six.iteritems(libraries)): move_dm.stream.write("Processing '{}' ({} of {})...".format( library_name, index + 1, len(libraries), )) with move_dm.stream.DoneManager( suffix='\n', ) as this_dm: try: if library_name.endswith(".py"): library_name = library_name[:-len(".py")] dest_dir = os.path.join(os.getenv("DEVELOPMENT_ENVIRONMENT_REPOSITORY"), RepositoryBootstrapConstants.LIBRARIES_SUBDIR, PythonActivationActivity.Name, library_name, "v{}".format(library_info.version)) has_os_specific_dest_dir = False if new_content.HasOSSpecificLibraryExtensions(library_info.Fullpath) or DestinationIsOSSpecific(dest_dir): dest_dir = os.path.join(dest_dir, CurrentShell.CategoryName, python_version_dir) has_os_specific_dest_dir = True # Copy the library library_dest_dir = dest_dir this_dm.stream.write(display_template.format(os.path.basename(library_info.Fullpath), library_dest_dir)) move_func(library_info.Fullpath, library_dest_dir) # Copy the metadata assert library_info.metadata_path metadata_dest_dir = dest_dir this_dm.stream.write(display_template.format(os.path.basename(library_info.metadata_path), metadata_dest_dir)) move_func(library_info.metadata_path, metadata_dest_dir) # Copy the scripts scripts_dest_dir = os.path.join(dest_dir, SCRIPTS_DIR_NAME) if not has_os_specific_dest_dir and bool(next((script for script in library_info.scripts if os.path.splitext(script)[1] in [ CurrentShell.ScriptExtension, CurrentShell.ExecutableExtension, ]), None)): scripts_dest_dir = os.path.join(scripts_dest_dir, CurrentShell.CategoryName) for script in library_info.scripts: this_dm.stream.write(display_template.format("{} [Script]".format(os.path.basename(script)), scripts_dest_dir)) if not no_move: if PythonActivationActivity.NormalizeScript(script) == PythonActivationActivity.NormalizeScriptResult_Modified: this_dm.stream.write(" ** The script '{}' was normalized.\n".format(script)) move_func(script, scripts_dest_dir) except Exception as ex: this_dm.result = -1 this_dm.stream.write("ERROR: {}\n".format(StringHelpers.LeftJustify( str(ex), len("ERROR: "), ))) return dm.result
def _Impl( display_sentinel, json_filename, result_filename, first, output_stream, method_name, parser, ): output_stream = StreamDecorator( output_stream, line_prefix=display_sentinel, ) with open(json_filename) as f: try: data = parser(f.read(), is_root=True) except Exception as ex: output_stream.write("ERROR: {} ({})\n".format(str(ex), ex.stack)) return -1 output_stream.write("Parsing dependencies...") with output_stream.DoneManager(): dependencies = ActivationData.Load(None, None, None).PrioritizedRepositories has_config_specific = False output_stream.write("Validating...") with output_stream.DoneManager() as dm: for index, repository_info in enumerate(dependencies): dm.stream.write("Processing '{}' ({} of {})...".format( repository_info.Name, index + 1, len(dependencies), )) with dm.stream.DoneManager() as this_dm: with Utilities.CustomMethodManager(os.path.join(repository_info.Root, Constants.HOOK_ENVIRONMENT_CUSTOMIZATION_FILENAME), method_name) as method: if not method: continue args = OrderedDict([ ( "data", data ), ( "output_stream", this_dm.stream ), ]) # Get the method args to see if a configuration is requried func_code = six.get_function_code(method) if "configuration" in func_code.co_varnames[:func_code.co_argcount]: args["configuration"] = repository_info.Configuration has_config_specific = True elif not first: # Don't call a config-agnostic method more than once continue try: this_dm.result = Interface.CreateCulledCallable(method)(args) or 0 except Exception as ex: this_dm.stream.write(StringHelpers.LeftJustify( "ERROR: {}\n".format(str(ex).rstrip()), len("ERROR: "), )) this_dm.result = -1 with open(result_filename, 'w') as f: f.write('-1' if dm.result != 0 else '1' if has_config_specific else '0') return dm.result
def _Invoke(cls, context, status_stream, verbose): """Handles the complexities of compiler invocation, ultimately calling _InvokeImpl.""" assert context status_stream = StreamDecorator(status_stream) invoke_reason = cls._GetInvokeReason( context, StreamDecorator(status_stream if verbose else None)) if invoke_reason is None: status_stream.write("No changes were detected.\n") return 0 input_items = cls.GetInputItems(context) assert input_items status_stream.write( cls._GetStatusText(cls.InvokeVerb, context, input_items)) with status_stream.DoneManager() as dm: if verbose: output_items = cls.GetOutputItems(context) if "display_name" in context or len(input_items) == 1: indentation = 4 else: indentation = 8 verbose_stream = StreamDecorator( dm.stream, prefix=StringHelpers.LeftJustify( textwrap.dedent( # <Wrong hanging indentation> pylint: disable = C0330 """\ ======================================== VERBOSE Output {} -> {} ======================================== """ ).format( '\n'.join(input_items), StringHelpers.LeftJustify( '\n'.join(output_items) if output_items else "[None]", 4), ), 2, skip_first_line=False, ), suffix='\n', line_prefix=' ' * indentation, ) status_stream = verbose_stream else: status_stream = dm.stream verbose_stream = StreamDecorator(None) dm.result = cls._InvokeImpl( invoke_reason, context, status_stream, verbose_stream, verbose, ) or 0 if dm.result >= 0: cls._PersistContext(context) return dm.result
def CreateAdditionalDataItem(cls, dest_writer, name_var_name, source_var_name): temporary_element = cls.CreateTemporaryElement(name_var_name, "1") temporary_children_element = cls.CreateTemporaryElement("k", "+") return textwrap.dedent( """\ # The following types should be returned directly without additional conversion if isinstance({source_var_name}, (int, float, str, bool)): return {source_var_name} assert not isinstance({source_var_name}, list), {source_var_name} if not isinstance({source_var_name}, dict): {source_var_name} = {source_var_name}.__dict__ source_attribute_names = {source_var_name}.get("{attribute_names}", set()) attributes = OrderedDict() items = OrderedDict() for k, v in six.iteritems(source): if k.startswith("_"): continue if k in source_attribute_names: attributes[k] = v else: items[k] = v if len(items) == 1 and next(six.iterkeys(items)) == {source_var_name}.get("{fundamental_name}", None): return {simple_statement} result = {compound_statement} for k, v in six.iteritems(items): try: if isinstance(v, list): new_items = [] for index, child in enumerate(v): try: new_items.append(cls._CreateAdditionalDataItem("item", child)) except: _DecorateActiveException("Index {{}}".format(index)) {append_children} else: new_item = cls._CreateAdditionalDataItem(k, v) {append_child} except: _DecorateActiveException(k) return result """, ).format( source_var_name=source_var_name, attribute_names=cls.ATTRIBUTES_ATTRIBUTE_NAME, fundamental_name=cls.SIMPLE_ELEMENT_FUNDAMENTAL_ATTRIBUTE_NAME, simple_statement=StringHelpers.LeftJustify( dest_writer.CreateSimpleElement( temporary_element, "attributes", '{}[{}["{}"]]'.format(source_var_name, source_var_name, cls.SIMPLE_ELEMENT_FUNDAMENTAL_ATTRIBUTE_NAME), ), 4, ).strip(), compound_statement=dest_writer.CreateCompoundElement(temporary_element, "attributes").strip(), append_children=StringHelpers.LeftJustify( dest_writer.AppendChild(temporary_children_element, "result", dest_writer.CreateCollection(temporary_children_element, "new_items")), 12, ).strip(), append_child=StringHelpers.LeftJustify(dest_writer.AppendChild(cls.CreateTemporaryElement("k", "1"), "result", "new_item"), 8).strip(), )
def EntryPoint( plugin, input_filename, output_dir, include=None, exclude=None, output_stream=sys.stdout, ): """Generates content based on a configuration file according to the specified plugin""" plugin = PLUGINS[plugin] # ---------------------------------------------------------------------- def ToRegex(value): try: return re.compile(value) except: raise CommandLine.UsageException( "'{}' is not a valid regular expression".format(value), ) # ---------------------------------------------------------------------- includes = [ToRegex(arg) for arg in include] del include excludes = [ToRegex(arg) for arg in exclude] del exclude with StreamDecorator(output_stream).DoneManager( line_prefix="", prefix="\nResults: ", suffix="\n", ) as dm: dm.stream.write("Reading input data...") with dm.stream.DoneManager() as this_dm: try: data = Serialization.Deserialize(input_filename) except Exception as e: this_dm.stream.write( textwrap.dedent( """\ ERROR: {} {} """, ).format( StringHelpers.LeftJustify(str(e), len("ERROR: ")), str(getattr(e, "stack", None)), ), ) this_dm.result = -1 return this_dm.result nonlocals = CommonEnvironment.Nonlocals(skipped=0, ) dm.stream.write("Preprocessing data...") with dm.stream.DoneManager( done_suffix=lambda: "{} were skipped".format( inflect.no("file", nonlocals.skipped), ), suffix=lambda: "\n" if nonlocals.skipped else None, ) as this_dm: # ---------------------------------------------------------------------- def NormalizeEnum(enum): # Simplify the provided enum structure be creating an ordered dictionary with names and values if hasattr(enum, "integer_values"): if len(enum.integer_values) != len(enum.values): raise Exception( "When integer values are specified for an enum, the number of integers must match the number of enums ('{}', '{}')" .format(enum.values, enum.integer_values)) integer_values = enum.integer_values del enum.integer_values else: integer_values = list( range(enum.starting_index, enum.starting_index + len(enum.values))) del enum.starting_index assert len( enum.values) == len(integer_values), (enum.values, integer_values) enum.values = OrderedDict([ (k, v) for k, v in zip(enum.values, integer_values) ]) return enum # ---------------------------------------------------------------------- # Get the global custom structs global_custom_struct_names = set() global_custom_structs = [] for item in data.custom_structs: if item.name in global_custom_struct_names: raise Exception( "The custom struct '{}' has already been defined". format(item.name)) global_custom_struct_names.add(item.name) global_custom_structs.append(item) # Get the global custom enums global_custom_enum_names = set() global_custom_enums = [] for item in data.custom_enums: if item.name in global_custom_enum_names: raise Exception( "The custom enum '{}' has already been defined".format( item.name)) global_custom_enum_names.add(item.name) global_custom_enums.append(NormalizeEnum(item)) # If there are templates at play, preprocess the content and expand the values new_data = [] for item in data.featurizers: if item.status != "Available": this_dm.stream.write( "The status for '{}' is set to '{}' and will not be processed.\n" .format( item.name, item.status, ), ) nonlocals.skipped += 1 continue if excludes and any( exclude.match(item.name) for exclude in excludes): this_dm.stream.write( "'{}' has been explicitly excluded.\n".format( item.name), ) nonlocals.skipped += 1 continue if includes and not any( include.match(item.name) for include in includes): this_dm.stream.write( "'{}' has not been included.\n".format(item.name), ) nonlocals.skipped += 1 continue for enum in getattr(item, "custom_enums", []): NormalizeEnum(enum) if not hasattr(item, "templates"): assert item.type_mappings for mapping in item.type_mappings: new_item = copy.deepcopy(item) new_item.input_type = mapping.input_type new_item.output_type = mapping.output_type new_data.append([new_item]) continue new_data_items = [] for template in item.templates: regex = re.compile(r"\b{}\b".format(template.name)) for template_type in template.types: new_item = copy.deepcopy(item) new_item.template = template_type # Remove the template mapping and list of templates del new_item.templates del new_item.type_mappings for configuration_param in getattr( new_item, "configuration_params", [], ): configuration_param.type = regex.sub( template_type, configuration_param.type, ) for custom_struct in getattr(new_item, "custom_structs", []): if any(gcs for gcs in global_custom_structs if gcs.name == custom_struct.name): raise Exception( "The custom structure '{}' in '{}' has already been defined as a global custom struct.\n" .format(custom_struct.name, item.name)) for member in custom_struct.members: member.type = regex.sub( template_type, member.type) for custom_enum in getattr(new_item, "custom_enums", []): if any(gce for gce in global_custom_enums if gce.name == custom_enum.name): raise Exception( "The custom enum '{}' in '{}' has already been defined as a global custom enum.\n" .format(custom_enum.name, item.name)) custom_enum.underlying_type = regex.sub( template_type, custom_enum.underlying_type) for mapping in item.type_mappings: # TODO: sub all types (for example: map<K, V> if not regex.search( mapping.input_type) and not regex.search( mapping.output_type): continue new_item.input_type = regex.sub( template_type, mapping.input_type) if new_item.input_type != mapping.input_type: new_item.input_type_template_mapping = OrderedDict( [ (template_type, template.name), ], ) new_item.output_type = regex.sub( template_type, mapping.output_type) if new_item.output_type != mapping.output_type: new_item.output_type_template_mapping = OrderedDict( [ (template_type, template.name), ], ) # This will end up copying one more time than needed, but I couldn't think of a better way for now. new_data_items.append(copy.deepcopy(new_item)) new_data.append(new_data_items) data = new_data # Validate parameters dm.stream.write("Validating types...") with dm.stream.DoneManager(): for items in data: for item in items: # ---------------------------------------------------------------------- def IsSupportedType(typename): for potential_type in SUPPORTED_TYPES: if hasattr(potential_type, "match"): if potential_type.match(typename): return True elif typename == potential_type: return True return False # ---------------------------------------------------------------------- def IsCustomStructType(typename): return any(custom_struct for custom_struct in itertools.chain( getattr(item, "custom_structs", []), global_custom_structs) if custom_struct.name == typename) # ---------------------------------------------------------------------- def IsCustomEnumType(typename): return any(custom_enum for custom_enum in itertools.chain( getattr(item, "custom_enums", []), global_custom_enums) if custom_enum.name == typename) # ---------------------------------------------------------------------- input_type = item.input_type if input_type.endswith("?"): input_type = input_type[:-1] if (not IsSupportedType(input_type) and not IsCustomStructType(input_type) and not IsCustomEnumType(input_type)): raise Exception( "The input type '{}' defined in '{}' is not valid." .format( input_type, item.name, ), ) from None output_type = item.output_type if output_type.endswith("?"): output_type = output_type[:-1] if (not IsSupportedType(output_type) and not IsCustomStructType(output_type) and not IsCustomEnumType(output_type)): raise Exception( "The output type '{}' defined in '{}' is not valid." .format( output_type, item.name, ), ) from None dm.stream.write("Generating content...") with dm.stream.DoneManager() as this_dm: FileSystem.MakeDirs(output_dir) # ---------------------------------------------------------------------- def CalcHash(filename): hash = hashlib.sha256() with open(filename, "rb") as f: while True: block = f.read(4096) if not block: break hash.update(block) return hash.digest() # ---------------------------------------------------------------------- @contextlib.contextmanager def FileWriter(filename, mode): """\ Method that writes to a temporary location and only copies to the intended destination if there are changes. This prevents full rebuilds (which are triggered based on timestamps) on files that haven't changed. """ temp_filename = CurrentShell.CreateTempFilename() with open(temp_filename, mode) as f: yield f if not os.path.isfile(filename) or CalcHash( temp_filename) != CalcHash(filename): FileSystem.RemoveFile(filename) shutil.move(temp_filename, filename) else: FileSystem.RemoveFile(temp_filename) # ---------------------------------------------------------------------- this_dm.result = plugin.Generate( FileWriter, global_custom_structs, global_custom_enums, data, output_dir, this_dm.stream, ) if this_dm.result != 0: return this_dm.result return dm.result
def OnCompound_VisitedChildren(element): attributes = [] elements = [] for child in cls._EnumerateChildren( element, include_definitions=False, ): if getattr(child, "IsAttribute", False): attributes.append( textwrap.dedent( """\ <xsd:attribute name="{name}" use="{use}" type="{type}"{default} /> """, ).format( name=child.Name, use="optional" if child.TypeInfo.Arity.IsOptional else "required", type=GetBaseTypeName( child.Resolve()), default="" if not hasattr(child, "default") else ' default="{}"'.format( child.default), ), ) else: elements.append( textwrap.dedent( """\ <xsd:element name="{name}" type="_{type}" minOccurs="{min}" maxOccurs="1"{default} /> """, ).format( name=child.Name, type=child.DottedName, min="0" if child.TypeInfo.Arity.Min == 0 else "1", default="" if not hasattr(child, "default") else ' default="{}"'.format( child.default), ), ) element_process_additional_data = getattr( element, "process_additional_data", None) if element_process_additional_data is None: element_process_additional_data = process_additional_data if element_process_additional_data: elements.append( '<xsd:any minOccurs="0" maxOccurs="unbounded" processContents="skip" />\n' ) content = textwrap.dedent( """\ <xsd:sequence> {elements} </xsd:sequence> {attributes} """, ).format( elements=StringHelpers.LeftJustify( "".join(elements), 2).strip(), attributes="".join(attributes).strip(), ) f.write( textwrap.dedent( """\ <xsd:complexType name="_{name}_Item"{mixed}> {content} </xsd:complexType> """, ).format( name=element.DottedName, content=StringHelpers.LeftJustify( content, 2).strip(), mixed=' mixed="true"' if element_process_additional_data else "", ), )
def GetDestroyOutputInfo( self, arg_name="result", ): result = self.GetOutputInfo(arg_name, ) input_parameters = [ self.Type(self._StripPointer(p.Type), p.Name) for p in result.Parameters ] assert input_parameters[-1].Type == "size_t", input_parameters[-1].Type assert input_parameters[-1].Name.endswith( "_items"), input_parameters[-1].Name pointer_parameters = input_parameters[:-1] # Create the destroy statements destroy_result = self._type_info.GetDestroyOutputInfo( "{}_destroy_item".format(arg_name)) if destroy_result is not None: assert len(destroy_result.Parameters) == len(result.Parameters) - 1 destroy_statements = textwrap.dedent( """\ {variable_statements} while({name}_items--) {{ {assignment_statements} {delete_statements} {increment_statements} }} """, ).format( name=arg_name, variable_statements="\n".join([ "{type} this_{name}({name});".format( type=p.Type, name=p.Name, ) for p in pointer_parameters ], ), assignment_statements=StringHelpers.LeftJustify( "\n".join([ """{destroy_type} const & {destroy_name}(*this_{parameter_name});""" .format( destroy_type=destroy_p.Type, destroy_name=destroy_p.Name, parameter_name=standard_p.Name, ) for destroy_p, standard_p in zip( destroy_result.Parameters, pointer_parameters) ]), 4, ), delete_statements=StringHelpers.LeftJustify( textwrap.dedent( """\ {} {} """, ).format( destroy_result.ValidationStatements.rstrip() if destroy_result.ValidationStatements else "// No validation statements", destroy_result.InvocationStatements.rstrip(), ), 4, ), increment_statements=StringHelpers.LeftJustify( "\n".join([ "++this_{};".format(p.Name) for p in pointer_parameters ]), 4, ), ) else: destroy_statements = "// No destroy statements" return self.Result( input_parameters, textwrap.dedent( """\ if({initial_ptr_name} != nullptr && {name}_items == 0) throw std::invalid_argument("'{name}_items' is 0"); if({initial_ptr_name} == nullptr && {name}_items != 0) throw std::invalid_argument("'{name}_items' is not 0"); {ptr_validations} """, ).format( initial_ptr_name=input_parameters[0].Name, name=arg_name, ptr_validations="\n".join([ """if(bool({name}) != bool({initial_ptr_name})) throw std::invalid_argument("'{name}' is not internally consistent");""" .format( initial_ptr_name=input_parameters[0].Name, name=p.Name, ) for p in input_parameters[1:] ]), ), textwrap.dedent( """\ if({initial_ptr_name} != nullptr) {{ {statements} {delete_ptrs} }} """, ).format( initial_ptr_name=input_parameters[0].Name, statements=StringHelpers.LeftJustify( destroy_statements, 4).rstrip(), delete_ptrs=StringHelpers.LeftJustify( "\n".join([ "delete [] {};".format(p.Name) for p in pointer_parameters ]), 4, ), ), )
def DoneManager( self, line_prefix=" ", prefix=None, # string or def Func() -> string suffix=None, # string or def Func() -> string done_suffix=None, # string or def Func() -> string done_suffixes=None, # List of string or def Func() -> string display=True, # Display done information; will sometimes be False for nested DoneManagers created to manage error propagation display_result=True, # Display the result display_time=True, # Display the time delta display_exceptions=True, # Display exception information display_exception_callstacks=True, # Display exception callstack info suppress_exceptions=False, # Do not let exception propagate associated_stream=None, associated_streams=None, # Streams that should be adjusted in conjunction with this stream. Most of the time, this is used # to manage verbose streams that are aligned with status streams, where the status stream is self # and the verbose stream content is interleaved with it. # # Example: # import sys # # from StringIO import StringIO # from CommonEnvironment.StreamDecorator import StreamDecorator # # sink = StringIO() # # verbose_stream = StreamDecorator(sink) # status_stream = StreamDecorator([ sys.stdout, verbose_stream, ]) # # status_stream.write("0...") # with status_stream.DoneManager(associated_stream=verbose_stream) as ( dm1, verbose_stream1 ): # verbose_stream1.write("Verbose 0\n----") # # dm1.stream.write("1...") # with dm1.stream.DoneManager(associated_stream=verbose_stream1) as ( dm2, verbose_stream2 ): # verbose_stream2.write("Verbose 1\n----") # # dm2.stream.write("2...") # with dm2.stream.DoneManager(associated_stream=verbose_stream2) as ( dm3, verbose_stream3 ): # verbose_stream3.write("Verbose 2\n----") # # dm3.stream.write("3...") # with dm3.stream.DoneManager(associated_stream=verbose_stream3) as ( dm4, verbose_stream4 ): # verbose_stream4.write("Verbose 3\n----") # verbose_stream4.flush() # # sys.stdout.write("\n**\n{}\n**\n".format(sink.getvalue())) ): """Displays done information upon completion.""" assert display_exceptions or not suppress_exceptions, "It isn't wise to not display exceptions while also suppressing them" # ---------------------------------------------------------------------- def ToFunctor(value): if callable(value): return value if value is None: value = '' return lambda: value # ---------------------------------------------------------------------- prefix_func = ToFunctor(prefix) suffix_func = ToFunctor(suffix) if not display: line_prefix = '' nonlocals = Nonlocals(time_delta=None) info = self._DoneManagerInfo(self, line_prefix) done_suffixes_funcs = [ ToFunctor(ds) for ds in (done_suffixes or []) ] if done_suffix: done_suffixes_funcs.append(ToFunctor(done_suffix)) if display_time: done_suffixes_funcs.insert(0, lambda: str(nonlocals.time_delta)) if display_result: done_suffixes_funcs.insert(0, lambda: str(info.result)) # ---------------------------------------------------------------------- def Cleanup(): assert info.result is not None if display: prefix = prefix_func() suffix = suffix_func() suffixes = [] for done_suffix in done_suffixes_funcs: result = done_suffix() if result: suffixes.append(result) if suffixes: content = ', '.join(suffixes) if prefix.strip(): content = "({})".format(content) elif not line_prefix: pass else: # Single-line results content = "DONE! ({})".format(content) else: content = '' self.write("{}{}{}\n".format( prefix, content, suffix, )) self.flush() # Propogate the result if info.result != 0: for index, dm in enumerate(info.Enumerate()): if index == 0: continue if ( dm.result != info.result and (dm.result == 0 or (dm.result > 0 and info.result < 0)) ): dm.result = info.result # ---------------------------------------------------------------------- with CallOnExit(Cleanup): start_time = time.time() try: associated_streams = associated_streams or [] if associated_stream: associated_streams.append(associated_stream) if not associated_streams: yield info else: yield tuple([ info, ] + [ StreamDecorator( stream, line_prefix=' ' * len(line_prefix), one_time_prefix='\n', one_time_suffix="\n<flush>", is_associated_stream=True, ) for stream in associated_streams ]) if info.result is None: info.result = 0 except Exception: if info.result is None or info.result >= 0: info.result = -1 if display_exceptions: ex = sys.exc_info()[1] if ( not getattr(ex, "_DisplayedException", False) and not getattr(info.stream, "IsAssociatedStream", False) ): ex._DisplayedException = True if display_exception_callstacks: import traceback info.stream.write("ERROR: {}\n".format(StringHelpers.LeftJustify(traceback.format_exc(), len("ERROR: ")).rstrip())) else: info.stream.write("ERROR: {}\n".format(StringHelpers.LeftJustify(str(ex), len("ERROR: ")).rstrip())) if not suppress_exceptions: raise finally: current_time = time.time() # There is a bug (and I have seen it) where the value calculated # in the past will be greater than the value calculated now. This is # wrong in theory, but apparently there is a BIOS bug that causes the # behavior on multicore machines (I have a hunch that virtualized machines # contribute to the problem as well). More info at http://bytes.com/topic/python/answers/527849-time-clock-going-backwards. # Regardless, asserting here is causing problems and this method is # only used for scripts. If we encounter the scenario, populate with # bogus data. if start_time > current_time: # This is a total lie, but hopefully the value is unique enough to # generate a double take. This is preferable to causing a long- # running process to fail. current_time = start_time + (12 * 60 * 60) + (34 * 60) + 56 # 12:34:56 assert current_time >= start_time, (current_time, start_time) nonlocals.time_delta = str(datetime.timedelta(seconds=current_time - start_time))
def Execute( cls, on_status_update, compiler, context, command_line, includes=None, excludes=None, verbose=False, ): execute_result = cls.ExecuteResult( test_result=None, test_output=None, test_time=None, ) coverage_start_time = time.time() coverage_output = OrderedDict() code_coverage_executor = cls._CodeCoverageExecutor() # ---------------------------------------------------------------------- def Impl(): # Instrument the binaries on_status_update("Instrumenting Binaries") # ---------------------------------------------------------------------- def Invoke(task_index, output_stream): output_filename = context["output_filenames"][task_index] return code_coverage_executor.PreprocessBinary( output_filename, output_stream) # ---------------------------------------------------------------------- sink = six.moves.StringIO() execute_result.CoverageResult = TaskPool.Execute( [ TaskPool.Task(output_filename, Invoke) for output_filename in context["output_filenames"] ], sink, verbose=True, ) coverage_output["Instrumenting Binaries"] = sink.getvalue() if execute_result.CoverageResult != 0: return # Start coverage coverage_output_filename = os.path.join( context["output_dir"], code_coverage_executor.DefaultFileName) on_status_update("Starting Coverage Monitor") sink = six.moves.StringIO() execute_result.CoverageResult = code_coverage_executor.StartCoverage( coverage_output_filename, sink) coverage_output["Starting Coverage Monitor"] = sink.getvalue() if execute_result.CoverageResult != 0: return # Execute the test(s) on_status_update("Testing") test_start_time = time.time() sink = six.moves.StringIO() execute_result.TestResult = code_coverage_executor.Execute( command_line, sink) execute_result.TestOutput = sink.getvalue() execute_result.TestTime = datetime.timedelta( seconds=(time.time() - test_start_time), ) if execute_result.TestResult != 0: return # Stop code coverage monitoring and extract the results on_status_update("Stopping Coverage Monitor") sink = six.moves.StringIO() execute_result.CoverageResult = code_coverage_executor.StopCoverage( sink) coverage_output["Stopping Coverage Monitor"] = sink.getvalue() if execute_result.CoverageResult != 0: return # Process the results output_names = [ os.path.basename(output_filename) for output_filename in context["output_filenames"] ] all_results = [None] * len(output_names) nonlocals = CommonEnvironment.Nonlocals( remaining=len(output_names), ) nonlocals_lock = threading.Lock() status_template = "Extracting Coverage Results ({} remaining)" on_status_update(status_template.format(nonlocals.remaining)) # ---------------------------------------------------------------------- def Invoke(task_index, output_stream): output_filename = context["output_filenames"][task_index] # This is a filename that can be used to specify includes and excludes. Note that this # does not correspond to an actual file, as we don't have that information available. mock_filter_filename = os.path.join( context["input"], os.path.splitext(os.path.basename(output_filename))[0], ) includes, excludes = CodeCoverageFilter.GetFilters( mock_filter_filename) this_result = code_coverage_executor.ExtractCoverageInfo( coverage_output_filename, output_filename, includes, excludes, output_stream, ) if not isinstance(this_result, tuple): return this_result covered, not_covered = this_result all_results[task_index] = (covered, not_covered) with nonlocals_lock: nonlocals.remaining -= 1 on_status_update( status_template.format(nonlocals.remaining)) return 0 # ---------------------------------------------------------------------- sink = six.moves.StringIO() execute_result.CoverageResult = TaskPool.Execute( [ TaskPool.Task(output_name, Invoke) for output_name in output_names ], sink, verbose=True, ) coverage_output["Extracting Coverage Results"] = sink.getvalue() if execute_result.CoverageResult != 0: return # Concatenate the results on_status_update("Finalizing Results") total_covered = 0 total_not_covered = 0 all_percentages = OrderedDict() for output_name, (covered, not_covered) in zip(output_names, all_results): total_covered += covered total_not_covered += not_covered result_blocks = covered + not_covered all_percentages[output_name] = ( None if not result_blocks else ((float(covered) / result_blocks) * 100.0), "{} of {} {} covered".format(covered, result_blocks, code_coverage_executor.Units), ) total_blocks = total_covered + total_not_covered execute_result.CoverageDataFilename = coverage_output_filename execute_result.CoveragePercentage = (float(total_covered) / total_blocks if total_blocks else 0.0) * 100.0 execute_result.CoveragePercentages = all_percentages # ---------------------------------------------------------------------- Impl() execute_result.CoverageOutput = "".join([ textwrap.dedent( """\ {} {} {} """, ).format( header, "-" * len(header), StringHelpers.LeftJustify(content.strip(), 4)) for header, content in six.iteritems(coverage_output) ], ) execute_result.CoverageTime = datetime.timedelta( seconds=(time.time() - coverage_start_time), ) # Subtract the time spent testing (if it exists) if execute_result.TestTime is not None: assert execute_result.CoverageTime >= execute_result.TestTime execute_result.CoverageTime -= execute_result.TestTime execute_result.TestTime = str(execute_result.TestTime) execute_result.CoverageTime = str(execute_result.CoverageTime) return execute_result
def CreateCMakeFile( output_filename, data, output_name, input_filenames, cpp_wrapper_filename, generate_header_func, binary_version="1.0.0", ): output = [generate_header_func("# ")] output.append( textwrap.dedent( """\ cmake_minimum_required(VERSION 3.5.0) project({output_name} VERSION {binary_version} LANGUAGES CXX) set(CMAKE_MODULE_PATH "$ENV{{DEVELOPMENT_ENVIRONMENT_CMAKE_MODULE_PATH}}") set(_includes "$ENV{{INCLUDE}}") set(_libs "$ENV{{LIB}}") if(NOT WIN32) string(REPLACE ":" ";" CMAKE_MODULE_PATH "${{CMAKE_MODULE_PATH}}") string(REPLACE ":" ";" _includes "${{_includes}}") string(REPLACE ":" ";" _libs "${{_libs}}") endif() include(CppCommon OPTIONAL) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) if(CMAKE_CXX_COMPILER_ID MATCHES Clang) foreach(_flag IN ITEMS -Wno-missing-prototypes ) string(APPEND CMAKE_CXX_FLAGS " ${{_flag}}") endforeach() endif() add_library( {output_name} SHARED {filenames} ) target_include_directories({output_name} PRIVATE ${{_includes}}) target_link_directories({output_name} PRIVATE ${{_libs}}) """, ).format( output_name=output_name, binary_version=binary_version, filenames=StringHelpers.LeftJustify( Path("\n".join(input_filenames + [cpp_wrapper_filename])).as_posix(), 4).rstrip(), ), ) with open(output_filename, "w") as f: f.write("".join(output)) return 0
def EntryPoint( root_dir, output_stream=sys.stdout, ): with StreamDecorator(output_stream).DoneManager( line_prefix='', prefix="\nResults: ", suffix='\n', ) as dm: repositories = [] dm.stream.write( "\nSearching for repositories in '{}'...".format(root_dir)) with dm.stream.DoneManager(done_suffix=lambda: inflect.no( "repository", len(repositories)), ): for scm, directory in EnumSCMs(root_dir): if scm.Name != "Mercurial": continue repositories.append(directory) # Organize the repos dm.stream.write("Organizing...") with dm.stream.DoneManager(): repo_dict = OrderedDict() common_prefix = FileSystem.GetCommonPath(*repositories) common_prefix_len = len(common_prefix) for repository in repositories: suffix = repository[common_prefix_len:] parts = suffix.split(os.path.sep) repo_name = parts[-1] prefixes = parts[:-1] rd = repo_dict for prefix in prefixes: rd.setdefault(prefix, OrderedDict()) rd = rd[prefix] rd[repo_name] = repository # Write the content dm.stream.write("Writing TortoiseHg content...") with dm.stream.DoneManager(): filename = os.path.join(os.getenv("APPDATA"), "TortoiseHg", "thg-reporegistry.xml") assert os.path.isfile(filename), filename with open(filename, 'w') as f: # ---------------------------------------------------------------------- def GenerateContent(root, is_root): items = [] for k, v in six.iteritems(root): if isinstance(v, six.string_types): items.append( '<repo root="{}" shortname="{}" />\n'.format( v, os.path.basename(k), )) else: tag_name = "allgroup" if is_root else "group" items.append( textwrap.dedent("""\ <{tag_name} name="{name}"> {content} </{tag_name}> """).format( tag_name=tag_name, name=k, content=StringHelpers.LeftJustify( GenerateContent(v, False), 2).rstrip(), )) return ''.join(items) # ---------------------------------------------------------------------- f.write( textwrap.dedent("""\ <?xml version="1.0" encoding="UTF-8"?> <reporegistry> <treeitem> {} </treeitem> </reporegistry> """).format( StringHelpers.LeftJustify( GenerateContent(repo_dict, True).rstrip(), 4, skip_first_line=False, ))) return dm.result
def OnVariant(self, element): statements = [] new_types = [] for variation in element.Variations: if isinstance(variation, Elements.ReferenceElement): statement = "cls._{}_Item".format( ToPythonName(variation.Reference)) resolved_element = variation.Reference.Resolve() if isinstance( resolved_element, (Elements.CompoundElement, Elements.SimpleElement)): statement = '((lambda item: {}(item, process_additional_data=False, always_include_optional=False)), "{}")'.format( statement, resolved_element.DottedName) statements.append(statement) else: assert not isinstance(variation, (Elements.CompoundElement, Elements.SimpleElement)), variation new_types.append(variation) statements.append("cls._{}_Item".format( ToPythonName(variation))) python_name = ToPythonName(element) self._output_stream.write( textwrap.dedent( """\ # ---------------------------------------------------------------------- @classmethod def _{python_name}_Item(cls, item): for method_info in [ {statements} ]: if isinstance(method_info, tuple): potential_method, class_name = method_info else: potential_method = method_info class_name = None try: result = potential_method(item) if class_name is not None: {apply_class_name_statement} return result except: pass raise {exception_type}Exception("The value cannot be converted to any of the supported variations") """, ).format( python_name=python_name, statements=StringHelpers.LeftJustify( "\n".join([ "{},".format(statement) for statement in statements ]), 8).rstrip(), apply_class_name_statement=StringHelpers.LeftJustify( self._dest_writer.AppendChild( self._dest_writer.CreateTemporaryElement( '"{}"'.format( self._dest_writer. VARIANT_CLASS_TYPE_ATTRIBUTE_NAME), "1"), "result", "class_name", ), 16, ).rstrip(), exception_type="Serialize" if self._is_serializer else "Deserialize", ), ) self.Accept(new_types)
def GetInputBufferInfo( self, arg_name, invocation_template, items_var_name=None, ): # Don't reuse the items var (if it exists) items_var_name = "{}_items".format(arg_name) result = self._type_info.GetInputBufferInfo( "{}_item".format(arg_name), self._InvocationTemplate, items_var_name=items_var_name, ) assert result.InputBufferType is not None, self._type_info input_parameters = [ self.Type("{} const *".format(p.Type), "{}_ptr".format(p.Name)) for p in result.Parameters ] invocation_statements, invocation_tuple = self._ExtractDecoratedInvocationStatements( result.InvocationStatements) assert not invocation_statements, invocation_statements # If the input buffer type is a pointer, it means that we don't # have to transform the input prior to passing it on. If it is not # a pointer, transformation is required. if self._IsPointer(result.InputBufferType.Type): # No transformation is required buffer_type = self.Type( "std::vector<std::tuple<{type}, {type}>>".format( type=result.InputBufferType.Type, ), "{}_buffer".format(arg_name), ) buffer_assignment = "{name}_buffer.emplace_back({invocation_ptr}, {invocation_ptr} + {invocation_size});".format( name=arg_name, invocation_ptr=invocation_tuple[0], invocation_size=invocation_tuple[1], ) validation_suffix = "" else: # Transformation is required buffer_type = self.Type( "std::vector<{}>".format(result.InputBufferType.Type), "{}_temp_buffer".format(arg_name), ) buffer_assignment = "{buffer_name}.emplace_back(std::move({item}));".format( buffer_name=buffer_type.Name, item=result.InputBufferType.Name, ) # We have a vector of the concrete types, but need to pass a vector of tuples # to the featurizer itself. Create a new vector that has that info. validation_suffix = textwrap.dedent( """\ std::vector<std::tuple<{type}, {type}>> {name}_buffer; {name}_buffer.reserve({temp_buffer}.size()); for(auto const & {temp_buffer}_item : {temp_buffer}) {name}_buffer.emplace_back({temp_buffer}_item.data(), {temp_buffer}_item.data() + {temp_buffer}_item.size()); """, ).format( name=arg_name, type="typename {}::const_pointer".format( result.InputBufferType.Type), temp_buffer=buffer_type.Name, ) validation_statements = textwrap.dedent( """\ {parameter_validation} if({items_var_name} == 0) throw std::invalid_argument("'{items_var_name}' is 0"); {buffer_type} {buffer_name}; {buffer_name}.reserve({items_var_name}); while({buffer_name}.size() < {items_var_name}) {{ {references} {validation_statements} {invocation_statements} {buffer_assignment} {increment_pointers} }}{validation_suffix} """, ).format( parameter_validation="\n".join([ """if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""" .format(name=p.Name, ) for p in input_parameters ]), name=arg_name, items_var_name=items_var_name, buffer_type=buffer_type.Type, buffer_name=buffer_type.Name, references=StringHelpers.LeftJustify( "\n".join([ "{type}{const_and_ref}{name}(*{name}_ptr);".format( type=p.Type, name=p.Name, const_and_ref=" const &" if not self._IsPointer(p.Type) else "", ) for p in result.Parameters ]), 4, ), validation_statements=StringHelpers.LeftJustify( result.ValidationStatements.rstrip(), 4, ), invocation_statements=StringHelpers.LeftJustify( invocation_statements.rstrip(), 4, ), buffer_assignment=buffer_assignment, increment_pointers=StringHelpers.LeftJustify( "\n".join(["++{};".format(p.Name) for p in input_parameters]), 4, ), validation_suffix="" if not validation_suffix else "\n\n{}".format(validation_suffix), ) return self.Result( input_parameters + [self.Type("size_t", items_var_name)], validation_statements, invocation_template.format( "{name}_buffer.data(), {name}_buffer.size()".format( name=arg_name, ), ), input_buffer_type=self.Type(buffer_type, "{}_buffer".format(arg_name)), )
def _GenerateClass(self, element): if getattr(element, "as_dictionary", False): return self._GenerateDictionary(element) attributes = [] statements = [] attribute_names = [] for child in self._enumerate_children_func( element, include_definitions=False, ): child_python_name = ToPythonName(child) resolved_child_python_name = ToPythonName(child.Resolve()) attribute_names.append(child.Name) is_compound_like = isinstance( child.Resolve(), (Elements.CompoundElement, Elements.SimpleElement)) # Note that we have to use getattr here, as Compound- and SimpleElements don't support IsAttribute if getattr(child, "IsAttribute", False): is_attribute = True assert not child.TypeInfo.Arity.IsCollection if child.TypeInfo.Arity.IsOptional: if hasattr(child, "default"): default_value = ', default_value_func=lambda: StringSerialization.DeserializeItem({}_TypeInfo, "{}")'.format( resolved_child_python_name, child.default) else: default_value = "" self.IncludeApplyOptionalAttribute = True statement_template = 'cls._ApplyOptionalAttribute(item, "{{name}}", attributes, cls.{{python_name}}, always_include_optional{})'.format( default_value) else: statement_template = textwrap.dedent( """\ attributes["{name}"] = cls.{python_name}( {get_child_statement}, ) """, ) statement = statement_template.format( name=child.Name, python_name=child_python_name, get_child_statement=StringHelpers.LeftJustify( self._source_writer.GetChild("item", child), 4).strip(), ) else: is_attribute = False if child.TypeInfo.Arity.Min == 0: if is_compound_like: statement = "lambda value: cls.{}(value, always_include_optional, process_additional_data)".format( child_python_name) else: statement = "cls.{}".format(child_python_name) if child.TypeInfo.Arity.Max == 1: self.IncludeApplyOptionalChild = True function_name = "_ApplyOptionalChild" if hasattr(child, "default"): default_value = ', default_value_func=lambda: StringSerialization.DeserializeItem({}_TypeInfo, "{}")'.format( resolved_child_python_name, child.default) else: default_value = "" else: self.IncludeApplyOptionalChildren = True function_name = "_ApplyOptionalChildren" default_value = "" statement = 'cls.{function_name}(item, "{name}", result, {statement}, always_include_optional{default_value})'.format( function_name=function_name, name=child.Name, statement=statement, default_value=default_value, ) else: if is_compound_like: extra_params = StringHelpers.LeftJustify( textwrap.dedent( """\ always_include_optional, process_additional_data """, ), 4, ) else: extra_params = "" statement = self._dest_writer.AppendChild( child, "result", textwrap.dedent( """\ cls.{python_name}( {get_child},{extra_params} ) """, ).format( python_name=ToPythonName(child), get_child=StringHelpers.LeftJustify( self._source_writer.GetChild( "item", child), 4).strip(), name=child.Name, extra_params=extra_params, ), ) (attributes if is_attribute else statements).append( textwrap.dedent( """\ # {name} try: {statement} except: _DecorateActiveException("{name}") """, ).format( name=child.Name, statement=StringHelpers.LeftJustify(statement, 4).strip(), ), ) if isinstance(element, Elements.SimpleElement): attribute_names.append(element.FundamentalAttributeName) statement = textwrap.dedent( """\ # <fundamental value> try: fundamental_value = {} except: _DecorateActiveException("value type") result = {} {} """, ).format( StringHelpers.LeftJustify( "{type_info}.{method_prefix}Item({python_name}__value__TypeInfo, {value}, **{serialize_args})" .format( type_info=self._type_info_serialization_name, method_prefix=self._method_prefix, python_name=ToPythonName(element), value=self._source_writer.GetChild( "item", self._source_writer.CreateTemporaryElement( '"{}"'.format( element.FundamentalAttributeName), "1"), is_simple_schema_fundamental=True, ), serialize_args=self._custom_serialize_item_args, ), 4, ), self._dest_writer.CreateSimpleElement( element, "attributes" if attributes else None, "fundamental_value"), "".join(statements).strip(), ) else: statement = textwrap.dedent( """\ result = {} {} """, ).format( self._dest_writer.CreateCompoundElement( element, "attributes" if attributes else None).strip(), "".join(statements).strip()) python_name = ToPythonName(element) validation_statement_template = textwrap.dedent( """\ {}_TypeInfo.ValidateItem( {{}}, recurse=False, require_exact_match=not process_additional_data, ) """, ).format(python_name) if self._is_serializer: prefix = validation_statement_template.format("item") suffix = "" else: prefix = "" suffix = textwrap.dedent( """\ else: cls._RejectAdditionalData( item, exclude_names=[{attribute_names}], ) {validation_statement} """, ).format( attribute_names=", ".join([ '"{}"'.format(attribute_name) for attribute_name in attribute_names ]), validation_statement=validation_statement_template.format( "result"), ) self._output_stream.write( textwrap.dedent( """\ # ---------------------------------------------------------------------- @classmethod def _{python_name}_Item(cls, item, always_include_optional, process_additional_data): {prefix}{prefix_whitespace}{attributes_decl}{attributes}{statement} # Additional data if process_additional_data: cls._ApplyAdditionalData( item, result, exclude_names={{{attribute_names}}}, ) {suffix} return result """, ).format( python_name=python_name, prefix=StringHelpers.LeftJustify( "{}\n\n".format(prefix.strip()) if prefix else prefix, 4), prefix_whitespace=" " if prefix else "", suffix=StringHelpers.LeftJustify( "\n{}\n".format(suffix.strip()) if suffix else suffix, 4), attributes_decl="" if not attributes else "attributes = OrderedDict()\n\n ", attributes="" if not attributes else "{}\n\n ".format( StringHelpers.LeftJustify("".join(attributes), 4).strip()), statement=StringHelpers.LeftJustify(statement, 4).strip(), attribute_names=", ".join([ '"{}"'.format(attribute_name) for attribute_name in attribute_names ]), ), )
def Execute( root_dir, output_dir, mode=None, debug_only=False, release_only=False, output_stream=sys.stdout, verbose=False, ): """Recursively calls Build files with the desired mode(s)""" assert os.path.isdir(root_dir), root_dir assert output_dir modes = mode or [ "clean", "build", ]; del mode assert output_stream with StreamDecorator(output_stream).DoneManager( line_prefix='', prefix="\nResults: ", suffix='\n', ) as dm: build_infos = _GetBuildInfos(root_dir, dm.stream) if not build_infos: return dm.result # Find all the build files that have configurations that we can process build_configurations = [] dm.stream.write("Processing build files...") with dm.stream.DoneManager( done_suffix=lambda: "{} found".format(inflect.no("configuration", len(build_configurations))), ) as this_dm: # ---------------------------------------------------------------------- def GetSupportedConfigurations(configurations): # If there is a configuration that indicates completeness, execute that # and skip everything else. if COMPLETE_CONFIGURATION_NAME in configurations: yield COMPLETE_CONFIGURATION_NAME return for config in build_configurations: config_lower = config.lower() if ( (debug_only and "debug" in config_lower) or (release_only and "release" in config_lower) or (not debug_only and not release_only) ): yield config # ---------------------------------------------------------------------- for build_info in build_infos: if not build_info.configuration.Configurations: build_configurations.append(( build_info.filename, build_info.configuration, None, )) else: for config in GetSupportedConfigurations(build_info.configuration.Configurations): build_configurations.append(( build_info.filename, build_info.configuration, config, )) if not build_configurations: return dm.result dm.stream.write('\n') for mode_index, mode in enumerate(modes): dm.stream.write("Invoking '{}' ({} of {})...".format( mode, mode_index + 1, len(modes), )) with dm.stream.DoneManager() as mode_dm: for build_index, (build_filename, config, configuration) in enumerate(build_configurations): mode_dm.stream.write("Processing '{}'{} ({} of {})...".format( build_filename, " - '{}'".format(configuration) if configuration else '', build_index + 1, len(build_configurations), )) with mode_dm.stream.DoneManager() as build_dm: build_output_dir = os.path.join(output_dir, config.SuggestedOutputDirLocation, configuration or "Build") FileSystem.MakeDirs(build_output_dir) command_line = 'python "{build_filename}" {mode}{configuration}{output_dir}' \ .format( build_filename=build_filename, mode=mode, configuration=' "{}"'.format(configuration) if configuration else '', output_dir=' "{}"'.format(build_output_dir) if config.RequiresOutputDir else '', ) build_dm.result, output = Process.Execute(command_line) # It is possible that the cleaning process deleted the output directory. Recreate it # if necessary to store the log file. FileSystem.MakeDirs(build_output_dir) with open(os.path.join(build_output_dir, BUILD_LOG_TEMPLATE.format(mode=mode)), 'w') as f: f.write(output) if build_dm.result != 0: build_dm.stream.write(output) elif verbose: build_dm.stream.write(StringHelpers.LeftJustify("INFO: {}".format(output), len("INFO: "))) return dm.result
def Invoke( self, output_stream=sys.stdout, verbose=False, print_results=False, allow_exceptions=False, ): arg_strings = list(self.Args) debug_mode = False if len(arg_strings) > 1 and arg_strings[1].lower( ) == DEBUG_COMMAND_LINE_ARG: debug_mode = True del arg_strings[1] # Is there a request for verbose help? if any( arg.startswith(self.CommandLineArgPrefix) and arg[len(self.CommandLineArgPrefix):].lower() in [ "?", "help", "h", ] for arg in arg_strings): # Is the request for a specific method? if len(self.EntryPoints) > 1 and len(arg_strings) > 2: potential_method_name = arg_strings[1] else: potential_method_name = None return self.Usage( verbose=True, potential_method_name=potential_method_name, ) # Get the function to call if len(self.EntryPoints) == 1: # If there is only 1 entry point, don't make the user provide the name # on the command line entry_point = self.EntryPoints[0] arg_strings = arg_strings[1:] else: # The first arg is the entry point name if len(arg_strings) < 2: return self.Usage(verbose=verbose) name = arg_strings[1] name_lower = name.lower() arg_strings = arg_strings[2:] entry_point = next( (ep for ep in self.EntryPoints if ep.Name.lower() == name_lower), None) if entry_point is None: return self.Usage( error="'{}' is not a valid command".format(name), verbose=verbose, ) assert entry_point if debug_mode: output_stream.write( textwrap.dedent("""\ DEBUG INFO: {} """).format('\n'.join( [str(parameter) for parameter in entry_point.Parameters]))) return 1 # Read the arguments from a file if necessary if len(arg_strings) == 1 and arg_strings[0].startswith( self.ArgsInAFilePrefix): filename = os.path.join( os.getcwd(), arg_strings[0][len(self.ArgsInAFilePrefix):]) if not os.path.isfile(filename): return self.Usage( error="'{}' is not a valid filename".format(filename), verbose=verbose, ) arg_strings = [] with open(filename) as f: for line in f.readlines(): line = line.strip() if not line or line.startswith('#'): continue arg_strings.append(line) # Parse the command line result = self._ParseCommandLine(entry_point, arg_strings) if isinstance(result, six.string_types): return self.Usage( error=result, verbose=verbose, ) kwargs = result if verbose: output_stream.write( textwrap.dedent("""\ INFO: Calling '{name}' was the arguments: {args} """).format( name=entry_point.Name, args='\n'.join([ " {k:<20} {v}".format( k="{}:".format(k), v=v, ) for k, v in six.iteritems(kwargs) ]), )) # Invoke the method try: result = entry_point(**kwargs) if print_results: if isinstance(result, types.GeneratorType): result = '\n'.join([ "{}) {}".format(index, str(item)) for index, item in enumerate(result) ]) output_stream.write( textwrap.dedent("""\ ** Result ** {} """).format(result)) result = 0 if result is None: result = 0 except UsageException as ex: result = self.Usage( error=str(ex), verbose=verbose, ) except ValidationException as ex: result = self.Usage( error=str(ex), verbose=verbose, ) except KeyboardInterrupt: result = -1 except: if allow_exceptions: raise if not getattr(sys.exc_info()[1], "_DisplayedException", False): import traceback output_stream.write("ERROR: {}".format( StringHelpers.LeftJustify(traceback.format_exc(), len("ERROR: ")))) result = -1 return result
def CreateAdditionalDataItem(cls, dest_writer, name_var_name, source_var_name): temporary_element = cls.CreateTemporaryElement( "{}.tag".format(source_var_name), "1") return textwrap.dedent( """\ attributes = OrderedDict() for k, v in six.iteritems({source_var_name}.attrib): if k.startswith("_"): continue attributes[k] = v if {source_var_name}.text and {source_var_name}.text.strip() and not {source_var_name}: return {simple_element} result = {compound_statement} for child_name, child_or_children in cls._GenerateAdditionalDataChildren({source_var_name}, set()): try: if isinstance(child_or_children, list): new_items = [] for index, child in enumerate(child_or_children): try: new_items.append(cls._CreateAdditionalDataItem("{collection_item_name}", child)) except: _DecorateActiveException("Index {{}}".format(index)) if child_name is None: result = new_items break {append_children} else: assert child_name is not None new_item = cls._CreateAdditionalDataItem(child_name, child_or_children) {append_child} except: _DecorateActiveException(child_name) return result """, ).format( source_var_name=source_var_name, collection_item_name=Plugin.COLLECTION_ITEM_NAME, compound_statement=dest_writer.CreateCompoundElement( temporary_element, "attributes").strip(), simple_element=StringHelpers.LeftJustify( dest_writer.CreateSimpleElement( temporary_element, "attributes", "{}.text".format(source_var_name)), 4).strip(), append_children=StringHelpers.LeftJustify( dest_writer.AppendChild( cls.CreateTemporaryElement("child_name", "+"), "result", "new_items"), 12).strip(), append_child=StringHelpers.LeftJustify( dest_writer.AppendChild( cls.CreateTemporaryElement("child_name", "1"), "result", "new_item"), 8).strip(), )
def Usage( self, error=None, error_stream=sys.stderr, verbose=False, potential_method_name=None, ): error_stream.write( textwrap.dedent("""\ {desc}{prefix} Usage: """).format( desc=StringHelpers.Wrap(self.ScriptDescription, MAX_COLUMN_WIDTH), prefix='' if not self.ScriptDescriptionPrefix else "\n\n{}".format(self.ScriptDescriptionPrefix), )) indented_stream = StreamDecorator(error_stream, line_prefix=" ") # Narrow the list down if help was requested for a single method entry_points = self.EntryPoints if len(self.EntryPoints) > 1 and len(self.Args) >= 2: potential_method_name = self.Args[1].lower() for entry_point in entry_points: if entry_point.Name.lower() == potential_method_name: entry_points = [ entry_point, ] break # Display a single item or multiple items if len(entry_points) == 1: standard, verbose_desc = self._GenerateUsageInformation( entry_points[0]) # Add the method name if necessary if len(self.EntryPoints) > 1: if '\n' in standard: standard = "\n {}{}".format( entry_points[0].Name, standard, ) else: standard = "{} {}".format( entry_points[0].Name, standard, ) if verbose: standard = "{}\n\n{}".format(standard, verbose_desc) indented_stream.write(" {} {}\n\n".format( self.ScriptName, StringHelpers.LeftJustify(standard, 4), )) else: # ---------------------------------------------------------------------- def FormatInlineFuncDesc(content): initial_whitespace = 37 assert MAX_COLUMN_WIDTH > initial_whitespace content = StringHelpers.Wrap( content, MAX_COLUMN_WIDTH - initial_whitespace) return StringHelpers.LeftJustify(content, initial_whitespace) # ---------------------------------------------------------------------- indented_stream.write( textwrap.dedent("""\ {script_name} <command> [args] Where '<command>' can be one of the following: ---------------------------------------------- """).format(script_name=self.ScriptName, )) for entry_point in entry_points: indented_stream.write(" - {name:<30} {desc}\n".format( name=entry_point.Name, desc=FormatInlineFuncDesc(entry_point.Description), )) indented_stream.write('\n') for entry_point in entry_points: intro = "When '<command>' is '{}':".format(entry_point.Name) standard, verbose_desc = self._GenerateUsageInformation( entry_point) # Insert the function name as an argument if '\n' in standard: multi_line_args = True standard = " {}{}".format( entry_point.Name, StringHelpers.LeftJustify(standard, 4)) else: multi_line_args = False standard = "{} {}".format(entry_point.Name, standard) if verbose: standard = "{}\n\n{}".format( standard, StringHelpers.LeftJustify(verbose_desc, 4, skip_first_line=False), ) indented_stream.write( textwrap.dedent("""\ {intro} {sep} {script_name}{newline}{standard} """).format( intro=intro, sep='-' * len(intro), script_name=self.ScriptName, newline='\n' if multi_line_args else ' ', standard=standard, )) if self.ScriptDescriptionSuffix: error_stream.write("\n{}\n".format( self.ScriptDescriptionSuffix.strip())) if not verbose: error_stream.write( textwrap.dedent("""\ Run "{script_name} {prefix}?" for additional information. """).format( script_name=self.ScriptName, prefix=self.CommandLineArgPrefix, )) if error: error = "\n\nERROR: {}\n".format( StringHelpers.LeftJustify(error, len("ERROR: "))) try: import colorama colorama.init(autoreset=True) error_stream = sys.stderr error_stream.write("{}{}{}".format( colorama.Fore.RED, colorama.Style.BRIGHT, error, )) except ImportError: error_stream.write(error) return -1