示例#1
0
 def Print_Messages():
     'Prints all pending messages.'
     while messages:
         message = messages.pop(0)
         # TODO: maybe allow this if Settings are set up, otherwise
         # might give an error on eg. missing x4 path.
         #Plugin_Log.Print(message)
         if verbose:
             Print(message)
示例#2
0
 def Safe_Update_MD(xml_root, xpath, attr, old_text, new_text):
     'Helper function for editing md nodes.'
     # Note: add some safety incase the lookups fail.
     nodes = xml_root.xpath(xpath)
     if not nodes:
         msg = 'Scale_Sector_Size failed to find a target MD script node; skipping this node.'
         Plugin_Log.Print(msg)
         Print(msg)
     else:
         nodes[0].set(attr, nodes[0].get(attr).replace(old_text, new_text))
示例#3
0
def Write_Modified_Binaries():
    '''
    Write out any modified binaries.  These are placed in the main x4
    folder, not in an extension.
    '''    
    # Return early if settings have writeback disabled.
    if Settings.disable_cleanup_and_writeback:
        Print('Skipping Write_Extension; writeback disabled in Settings.')
        return
    
    # Trigger writeback.
    # Don't clear old stuff; this has no associated log, and just overwrites.
    File_System.Write_Non_Ext_Files()
    return
示例#4
0
def Match_Trees(original_root, modified_root, text_based_node_matches):
    '''
    Manually compare nodes between the xml trees, and try to find matches.
    Updates modified_root tail ids directly.
    '''
    # Gather hashes, with and without attributes included.
    attr_hash_dict, no_attr_hash_dict = Fill_Element_Hashes(original_root)
    Fill_Element_Hashes(modified_root, attr_hash_dict, no_attr_hash_dict)

    # The top level node should always match, so do that directly.
    if original_root.tag != modified_root.tag:
        Print('Generate_Diffs error: root tag mismatch, {} vs {}'.format(
            original_root.tag, modified_root.tag))
    modified_root.tail = original_root.tail

    # Fill in child node matches, recursively.
    Match_Children(original_root, modified_root, attr_hash_dict,
                   no_attr_hash_dict, text_based_node_matches)
    return
示例#5
0
    def Logging_Function(message):

        # Detect if this extension has its name in the message.
        this_ext_name_in_message = re.search(re_name, message)

        # Want to skip messages based on diff patches by other
        # extensions. Can check the ext_currently_patching attribute
        # of the source_reader for this.
        if (source_reader.ext_currently_patching != None
                and source_reader.ext_currently_patching != extension_name
                # As a backup, don't skip if this extension's name is in
                # the message for some reason (though that case isn't really
                # expected currently).
                and not this_ext_name_in_message):
            return

        # Skip dependency errors from other extensions.
        # TODO: think of a smarter way to do this that can safely ignore
        # messages like this without ignoring those caused by this extension.
        # (Perhaps don't rely on the extension resort to catch these,
        #  but a specific extension checker.)
        if not this_ext_name_in_message:
            for skip_string in [
                    'duplicated extension id', 'missing hard dependency',
                    'multiple dependency matches'
            ]:
                if skip_string in message:
                    return

        if message in messages_seen:
            return
        if 'Error' in message or 'error' in message:
            messages_seen.add(message)
            nonlocal success
            success = False

            # Record the message, if requested.
            if return_log_messages:
                logged_messages.append(message)

            # Print with an indent for visual niceness.
            Print('  ' + message)
        return
示例#6
0
def Write_To_Extension(skip_content = False):
    '''
    Write all currently modified game files to the extension
    folder. Existing files at the location written on a prior
    call will be cleared out. Content.xml will have dependencies
    added for files modified from existing extensions.

    * skip_content
      - Bool, if True then the content.xml file will not be written.
      - Content is automatically skipped if Make_Extension_Content_XML
        was already called.
      - Defaults to False.
    '''
    # TODO: maybe make path and extension name into arguments,
    # though for now they should be set up in Settings since
    # this is the location log files were written to.

    ## If no name given, use the one from settings.
    #if not extension_name:
    #    extension_name = Settings.extension_name

    # Return early if settings have writeback disabled.
    if Settings.disable_cleanup_and_writeback:
        Print('Skipping Write_Extension; writeback disabled in Settings.')
        return

    # Clean old files, based on whatever old log is there.
    File_System.Cleanup()

    # Create a content.xml game file.
    if not skip_content:
        Make_Extension_Content_XML()
    
    # Trigger writeback.
    File_System.Write_Files()
    return
示例#7
0
def Run():

    # Test if the gui window is open; error message if so.
    from Plugins.GUI import Main_Window
    if Main_Window.qt_application != None:
        Print('Error: Generate_Diffs standalone script is only supported by'
              ' command line calls, not from the GUI.')
        return

    # To avoid errors that print to the Plugin_Log trying to then load Settings
    # which may not be set (eg. where to write the plugin_log to), monkey
    # patch the log to do a pass through.
    from Framework import Plugin_Log
    Plugin_Log.logging_function = lambda line: print(str(line))

    # Set up command line arguments.
    argparser = argparse.ArgumentParser(
        description=('XML Diff Patch Generator, part of X4 Customizer v{}.'
                     ' Generates xml diff patch files by comparing an original'
                     ' to a modified full xml file. Works on a single pair of'
                     ' files, or on a pair of directories.').format(
                         Get_Version()),
        epilog=
        ('Example (files)  : Generate_Diffs.bat original.xml modded.xml diff.xml\n'
         'Example (folders): Generate_Diffs.bat originals modded diffs'),
    )

    argparser.add_argument(
        'base',
        help='Path to the original unmodified base file, or directory of files.'
    )

    argparser.add_argument(
        'mod',
        help='Path to the modified file, or directory of files where all'
        ' have name matches in the -base directory.')

    argparser.add_argument(
        'out',
        help='Path name of the output diff file, or directory to write files.')

    argparser.add_argument(
        '-f',
        '--force-attr',
        default=None,
        # Consume all following plain args.
        nargs='*',
        help='Element attributes that xpaths are forced to use, even when'
        ' not necessary, as a space separated list.')

    argparser.add_argument(
        '-s',
        '--skip-unchanged',
        action='store_true',
        help='Produce nothing for files are unchanged (removing any prior diff);'
        ' default is to create an empty diff file.')

    argparser.add_argument(
        '-v',
        '--verbose',
        action='store_true',
        help='Print extra messages on each diff file generation.')

    # TODO: pattern matching rules for the files to include or exclude.

    args = argparser.parse_args(sys.argv[1:])

    # Make the source a Path, and convert to absolute to fill in the parents.
    base = (Path.cwd() / Path(args.base)).resolve()
    mod = (Path.cwd() / Path(args.mod)).resolve()
    out = (Path.cwd() / Path(args.out)).resolve()

    # Reprint the args for clarity, in case a bad path was given, to make
    # it more obvious.
    print()
    print('Base : {}'.format(base))
    print('Mod  : {}'.format(mod))
    print('Out  : {}'.format(out))
    print()

    # Check that the inputs exist.
    for path in [base, mod]:
        if not path.exists():
            print('Error: No folder or file found on path {}'.format(path))
            return

    # Stick any forced attributes in Settings.
    # TODO: maybe move to a Generate_Diff(s) arg.
    # If not given, leave Settings alone; it may already have some generic
    # attributes set.
    if args.force_attr:
        Settings(forced_xpath_attributes=args.force_attr)

    # Determine if this is file or directory mode.
    mode = 'file' if base.is_file() else 'dir'

    if mode == 'file':

        # TODO: maybe verify xml files were given, though perhaps someone
        # might use this for other xml-style types (xsd, etc), so skip for now.

        # All args should be files (output can be non-existing, but
        # shouldn't be a folder).
        if not base.is_file() or not mod.is_file() or out.is_dir():
            print('Error: mixed files and directories.')
            return

        # Create a single diff.
        Generate_Diff(
            original_file_path=base,
            modified_file_path=mod,
            output_file_path=out,
            skip_unchanged=args.skip_unchanged,
            verbose=args.verbose,
        )

    else:

        # All args should be dirs (output can be non-existing, but
        # shouldn't be a file).
        if not base.is_dir() or not mod.is_dir() or out.is_file():
            print('Error: mixed files and directories.')
            return

        # Hand off to the mult-diff generator, which will handle
        # globbing and such.
        Generate_Diffs(
            original_dir_path=base,
            modified_dir_path=mod,
            output_dir_path=out,
            skip_unchanged=args.skip_unchanged,
            verbose=args.verbose,
        )
示例#8
0
def Check_Extension(extension_name,
                    check_other_orderings=False,
                    return_log_messages=False):
    '''
    Checks an extension for xml diff patch errors and dependency errors.
    Problems are printed to the console.
    Returns True if no errors found, else False.

    Performs up to three passes that adjust extension loading order:
    in alphabetical folder order, as early as possible (after its
    dependencies), and as late as possible (after all other extensions 
    that can go before it).

    * extension_name
      - Name (folder) of the extension being checked.
      - May be given in original or lower case.
      - This should match an enabled extension name findable on
        the normal search paths set in Settings.
    * check_other_orderings
      - Bool, if True then the 'earliest' and 'latest' loading orders
        will be checked, else only 'alphabetical' is checked.
      - These are recommended to identify where dependencies should
        be added to extensions, to protect against other extensions
        changing their folder name and thereby their loading order.
      - Defaults to False, to avoid printing errors that won't be
        present with the current extension order.
    * return_log_messages
      - Bool, if True then instead of the normal True/False return,
        this will instead return a list of logged lines that
        contain any error messages.
      - Does not stop the normal message Prints.
    '''
    # TODO: think about also checking later extensions to see if they
    #  might overwrite this extension.

    Print('')
    Print('Checking extension: {}'.format(extension_name))

    # Lowercase the name to standardize it for lookups.
    extension_name = extension_name.lower()

    # Success flag will be set false on any unexpected message.
    success = True

    # Pull out the source_reader; this also initializes it if needed.
    source_reader = File_Manager.File_System.Get_Source_Reader()

    # Verify the extension name is valid.
    if extension_name not in source_reader.extension_source_readers:
        raise AssertionError(
            'Extension "{}" not found in enabled extensions: {}'.format(
                extension_name, sorted(source_reader.Get_Extension_Names())))

    # Look up the display name of the extension, which might be used
    # in some messages being listened to.
    extension_display_name = source_reader.extension_source_readers[
        extension_name].extension_summary.display_name

    # Handle logging messages during the loading tests.
    # Do this by overriding the normal log function.
    # Keep a history of messages seen, to avoid reprinting them when
    # the loading order is switched.
    messages_seen = set()

    # Keep a list of lines seen, to possibly return.
    logged_messages = []

    # For name checks, use re to protect against one extension name
    # being inside another longer name by using '\b' as word edges;
    # also add a (?<!/) check to avoid matching when the extension
    # name is in a virtual_path (always preceeding by a '\').
    # Note: the extension_name could have special re characters in it;
    #  can use re.escape to pre-format it.
    # Use (a|b) style to match both forms of the extension name.
    re_name = r'(?<!/)\b({}|{})\b'.format(re.escape(extension_name),
                                          re.escape(extension_display_name))

    def Logging_Function(message):

        # Detect if this extension has its name in the message.
        this_ext_name_in_message = re.search(re_name, message)

        # Want to skip messages based on diff patches by other
        # extensions. Can check the ext_currently_patching attribute
        # of the source_reader for this.
        if (source_reader.ext_currently_patching != None
                and source_reader.ext_currently_patching != extension_name
                # As a backup, don't skip if this extension's name is in
                # the message for some reason (though that case isn't really
                # expected currently).
                and not this_ext_name_in_message):
            return

        # Skip dependency errors from other extensions.
        # TODO: think of a smarter way to do this that can safely ignore
        # messages like this without ignoring those caused by this extension.
        # (Perhaps don't rely on the extension resort to catch these,
        #  but a specific extension checker.)
        if not this_ext_name_in_message:
            for skip_string in [
                    'duplicated extension id', 'missing hard dependency',
                    'multiple dependency matches'
            ]:
                if skip_string in message:
                    return

        if message in messages_seen:
            return
        if 'Error' in message or 'error' in message:
            messages_seen.add(message)
            nonlocal success
            success = False

            # Record the message, if requested.
            if return_log_messages:
                logged_messages.append(message)

            # Print with an indent for visual niceness.
            Print('  ' + message)
        return

    # Connect the custom logging function.
    Plugin_Log.logging_function = Logging_Function

    # Set up the loading orders by adjusting priority.
    # -1 will put this first, +1 will put it last, after satisfying
    # other dependencies. 0 will be used for standard alphabetical,
    # which some mods may rely on.
    priorities = [0]
    if check_other_orderings:
        priorities += [-1, 1]

    # Loop over sorting priorities.
    for priority in priorities:
        if priority == 0:
            Print('  Loading alphabetically...')
        elif priority == -1:
            Print('  Loading at earliest...')
        else:
            Print('  Loading at latest...')

        # Resort the extensions.
        # This will also check dependencies and for unique extension ids.
        source_reader.Sort_Extensions(priorities={extension_name: priority})

        # TODO: maybe think about doing a dependency version check as well,
        # but that isn't very important since x4 will catch those problems,
        # so this tool can somewhat safely assume they will get dealt with
        # by the user.

        # Loop over all files in the extension.
        for virtual_path in source_reader.Gen_Extension_Virtual_Paths(
                extension_name):

            # Caught exception.
            exception = None

            # The path could be to an original file, or to a patch on an
            # existing file.  Without knowing, need to try out both cases
            # and see if either works.
            # Start by assuming this is an original file.
            try:
                Load_File(virtual_path,
                          test_load=True,
                          error_if_unmatched_diff=True)

            # If it was a diff with no base file, catch the error.
            except Unmatched_Diff_Exception:

                # Pop off the extensions/mod_name part of the path.
                _, _, test_path = virtual_path.split('/', 2)

                # Note: some mods may try to patch files from other mods that
                # aren't enabled. This could be an error or intentional.
                # Here, only consider it a warning; explicit dependencies
                # should be caught in the content.xml dependency check.
                # Check if this path is to another extension.
                error_if_not_found = True
                if test_path.startswith('extensions/'):
                    error_if_not_found = False

                # Do a test load; this preserves any prior loads that
                # may have occurred before this plugin was called.
                try:
                    game_file = Load_File(
                        test_path,
                        test_load=True,
                        error_if_not_found=error_if_not_found)
                    if game_file == None:
                        Print(
                            '  Warning: could not find file "{test_path}"; skipping diff'
                        )

                # Some loading problems will be printed to the log and then
                # ignored, but others can be passed through as an exception;
                # catch the exceptions.
                # TODO: maybe in developer mode reraise the exception to
                # get the stack trace.
                except Exception as ex:
                    exception = ex

            except Exception as ex:
                exception = ex

            # Did either attempt get an exception?
            if exception != None:
                # Pass it to the logging function.
                Logging_Function(
                    ('Error when loading file {}; returned exception: {}'
                     ).format(virtual_path, exception))

    Print('  Overall result: ' + ('Success' if success else 'Error detected'))

    # Detach the logging function override.
    Plugin_Log.logging_function = None

    # Return the messages if requested, else the success flag.
    if return_log_messages:
        return logged_messages
    return success
def Print_Object_Stats(
        category, 
        file_name,
        version = None,
    ):
    '''
    Print out statistics for objects of a given category.
    This output will be similar to that viewable in the gui live editor
    pages, except formed into one or more tables.
    Produces csv and html output.
    Will include changes from enabled extensions.

    * category
      - String, category name of the objects, eg. 'weapons'.
    * file_name
      - String, name to use for generated files, without extension.
    * version
      - Optional string, version of the objects to use.
      - One of ['vanilla','patched','current','edited'].
      - Defaults to 'current'.
    '''
    try:
        tree_view = Live_Editor.Get_Tree_View(category)
        assert tree_view != None
    except Exception as ex:
        Print(('Could not print objects, error obtaining tree view for'
               ' category "{}".').format(category))
        if Settings.developer:
            raise ex
        return

    if version == None:
        version = 'current'

    # Convert it to an edit table group.
    table_group = tree_view.Convert_To_Table_Group()
    table_list = []

    for edit_table in table_group.Get_Tables():

        # This returns a 2d list of lists holding edit items, that
        # still need to be converted to strings.
        # These will use the selected version for filling out references.
        item_table = edit_table.Get_Table(version = version)
        
        # Prep a clean table of strings.
        new_table = []
        table_list.append(new_table)

        # Copy over the headers.
        new_table.append(item_table[0])

        for row in item_table[1:]:
            new_row = []
            new_table.append(new_row)
            for item in row:

                # Get its current value, or an empty string if no
                # item was available.
                value = '' if item == None else item.Get_Value(version)
                new_row.append( value )

    # Write results.
    Write_Tables(file_name, *table_list)
    return
示例#10
0
def Apply_Binary_Patch_Group(patch_list):
    '''
    Applies a group of patches as a single unit.
    If any patch runs into an error, no patch in the group will be applied.
    Returns True on success, False on failure.
    '''
    # Start with a search for matches.
    # These calls may raise an exception on error, or could return None
    #  in dev mode.
    matches_list = []
    for patch in patch_list:
        matches_list.append(_Get_Matches(patch))

    # Return early if a None was returned.
    if None in matches_list:

        # Get the indices of the patches that had errors or no errors.
        correct_patches = []
        failed_patches = []
        for index, matches in enumerate(matches_list):
            if matches == None:
                failed_patches.append(index)
            else:
                correct_patches.append(index)

        # Print them.
        if Settings.developer:
            Print('Correct patches : {}.'.format(correct_patches))
            Print('Failed patches  : {}.'.format(failed_patches))
        return False

    # It should now be safe to apply all patches in the group.
    file_contents = File_Manager.Load_File(patch.file)

    for patch, matches in zip(patch_list, matches_list):
        # With at least one match, set as modified.
        # (If the patch defined 0 matches as okay, this may be skipped.)
        file_contents.Set_Modified()

        # Loop over the matches to apply each of them.
        for match in matches:

            # Grab the offset of the match.
            offset = match.start()

            # Verify there are a matched number of insertions and
            #  deletions in the new_code.
            if patch.new_code.count('+') != patch.new_code.count('-'):
                raise Exception('Error: Binary patch changes code size.')

            #-Removed; old style before insert/delete characters were
            #  added in. Was this unsafe on the wildcards anyway, which
            #  could take the same value as normal bytes? May have just
            #  gotten lucky that this case didn't come up.
            ## Get the wildcard char, as an int (since the loop below unpacks
            ##  the byte string into ints automatically, and also pulls ints
            ##  from the original binary).
            #wildcard = str.encode('.')[0]
            ## Apply the patch, leaving wildcard entries unchanged.
            ## This will edit in place on the bytearray.
            #new_bytes = _String_To_Bytes(patch.new_code)
            #for index, new_byte in enumerate(new_bytes):
            #    if new_byte == wildcard:
            #        continue
            #    file_contents.binary[offset + index] = new_byte

            # Stride through the new code.
            # For convenience, this will work on char pairs (for byte
            #  conversion when needed), and so a pre-pass will duplicate
            #  all control characters (+-) accordingly. '.' is not
            #  duplicated since it is already doubled in the original
            #  string.
            new_code = patch.new_code
            for control_char in ['+', '-']:
                new_code = new_code.replace(control_char, control_char * 2)

            # Note the code length before starting, for error check later.
            start_length = len(file_contents.binary)

            # Loop over the pairs, using even indices.
            for even_index in range(0, len(new_code), 2):
                char_pair = new_code[even_index:even_index + 2]

                # If this is a wildcard, advance the offset with no change.
                if char_pair == '..':
                    offset += 1

                # If this is a deletion, remove the byte from the
                #  file_contents and do not advance the offset (which will
                #  then be pointing at the post-deletion byte automatically).
                elif char_pair == '--':
                    file_contents.binary.pop(offset)

                # If this is an addition, insert a 0.
                elif char_pair == '++':
                    file_contents.binary.insert(offset, 0)

                else:
                    # This is a replacement byte.
                    # Convert it, insert, and inc the offset.
                    # Note: bytearray requires an int version of this value,
                    #  and hex2bin returns a byte string version.
                    #  Indexing into a bytearray of byte strings
                    #  returns an int, not a string.
                    new_byte = hex2bin(char_pair)[0]
                    file_contents.binary[offset] = new_byte
                    offset += 1

            # Error check.
            assert len(file_contents.binary) == start_length
    return True
示例#11
0
def _Get_Matches(patch):
    '''
    Find locations in the binary code where a patch can be applied.
    Returns a list of re match objects.

    This will search for the ref_code, using regex, and applies
    the patch where a match is found.
    Error if the number of matches is not what the patch expects, or if
    the match location doesn't match the reference code.
    '''
    file_contents = File_Manager.Load_File(patch.file)

    # Get a match pattern from the ref_code, using a bytes pattern.
    # This needs to convert the given ref_code into a suitable
    #  regex pattern that will match bytes.
    ref_bytes = _String_To_Bytes(patch.ref_code, add_escapes=True)
    pattern = re.compile(
        ref_bytes,
        # Need to set . to match newline, just in case a newline character
        #  is in the wildcard region (which came up for hired TLs).
        flags=re.DOTALL)

    # Get all match points.
    # Need to use finditer for this, as it is the only one that will
    #  return multiple matches.
    # Note: does not capture overlapped matches; this is not expected
    #  to be a problem.
    matches = [x for x in re.finditer(pattern, file_contents.binary)]

    # Look up the calling transform's name for any debug printout.
    try:
        caller_name = inspect.stack()[1][3]
    except:
        caller_name = '?'

    # Do the error check if a non-expected number of matches found.
    if len(matches) not in patch.expected_matches:
        # Can raise a hard or soft error depending on mode.
        # Message will be customized based on error type.
        if Settings.developer:
            Print('Error: Binary patch reference code found {} matches,'
                  ' expected {}, in {}.'.format(
                      len(matches),
                      patch.expected_matches,
                      caller_name,
                  ))
            Print('Pattern in use:')
            Print(pattern)
        else:
            raise Binary_Patch_Exception()
        return

    # Loop over the matches to check each of them.
    for match in matches:

        # Grab the offset of the match.
        offset = match.start()
        #print(hex(offset))

        # Get the wildcard char, as an int (since the loop below unpacks
        #  the byte string into ints automatically, and also pulls ints
        #  from the original binary).
        wildcard = str.encode('.')[0]

        # Quick verification of the ref_code, to ensure re was used correctly.
        # This will not add escapes, since they confuse the values.
        for index, ref_byte in enumerate(_String_To_Bytes(patch.ref_code)):

            # Don't check wildcards.
            if ref_byte == wildcard:
                continue

            # Check mismatch.
            # This exists as a redundant verification added during
            #  code development to make sure the regex match location was
            #  correct.
            original_byte = file_contents.binary[offset + index]
            if ref_byte != original_byte:
                if Settings.developer:
                    Print('Error: Binary patch regex verification mismatch')
                    return
                else:
                    raise Binary_Patch_Exception()

    return matches
示例#12
0
def Run(*args):
    '''
    Run the customizer.
    This expect a single argument: the name of the .py file to load
    which specifies file paths and the transforms to run. Some other
    command line args supported.  Excess args will be placed in
    sys.args for called script to argparse if desired.
    '''
    # Rename the settings for convenience.
    Settings = Framework.Settings

    # Set up command line arguments.
    argparser = argparse.ArgumentParser(
        description='Main function for running X4 Customizer version {}.'.
        format(Framework.Change_Log.Get_Version()),
        # Special setting to add default values to help messages.
        # -Removed; doesn't work on positional args.
        #formatter_class = argparse.ArgumentDefault_ScriptsHelpFormatter,

        # To better support nested scripts doing their own argparsing,
        #  prevent abbreviation support at this level.
        allow_abbrev=False,
    )

    # Set this to default to None, which will be caught manually.
    argparser.add_argument(
        'control_script',
        # Consume 0 or 1 argument.
        # This prevents an error message when an arg not given,
        # and the default is used instead.
        nargs='?',
        help='Python control script which will run directly instead of'
        ' launching the gui; path may be given relative to the'
        ' Scripts folder; .py extension is optional; ')

    # Flag to clean out old files.
    argparser.add_argument(
        '-clean',
        action='store_true',
        help='Cleans out any files created on the prior run,'
        ' and reverts any file renamings back to their original names.'
        ' Files in the user source folder will be moved to the game'
        ' folders without modifications.'
        ' Still requires a user_transform file which specifies'
        ' the necessary paths, but transforms will not be run.')

    argparser.add_argument(
        '-dev',
        action='store_true',
        help='Enables developer mode, which makes some changes to'
        ' exception handling.')

    argparser.add_argument('-quiet',
                           action='store_true',
                           help='Hides some status messages.')

    argparser.add_argument(
        '-test',
        action='store_true',
        help='Performs a test run of the transforms, behaving like'
        ' a normal run but not writing out modified files.')

    argparser.add_argument(
        '-argpass',
        action='store_true',
        help='Indicates the control script has its own arg parsing;'
        ' extra args and "-h" are passed through sys.argv.')

    argparser.add_argument(
        '-nogui',
        action='store_true',
        help='Suppresses the gui from launching; a default script'
        ' will attempt to run if no script was given.')

    # Capture leftover args.
    # Note: when tested, this appears to be buggy, and was grabbing
    # "-dev" even though that has no ambiguity; parse_known_args
    # works better.
    #argparser.add_argument('args', nargs=argparse.REMAINDER)

    # Parsing behavior will change depending on if args are being
    # passed downward.
    if not '-argpass' in args:
        # Do a normal arg parsing.
        args = argparser.parse_args(args)

    else:
        # Pick out the -h flag, so help can be printed in the
        # control script instead of here. Also catch --help.
        pass_help_arg = None
        for help_str in ['-h', '--help']:
            if help_str in args:
                pass_help_arg = help_str
                # Need to swap from tuple to list to remove an item.
                args = list(args)
                args.remove(help_str)

        # Do a partial parsing.
        args, remainder = argparser.parse_known_args(args)

        # Put the remainder in sys.argv so called scripts can use it;
        # these should go after the first argv (always the called script name,
        # eg. Main.py).
        sys.argv = [sys.argv[0]] + remainder
        # Add back in the -h flag.
        if pass_help_arg:
            sys.argv.append(pass_help_arg)

    # Check for a gui launch.
    # This has been changed to act as the default when no script is given.
    if not args.nogui and not args.control_script:
        # In this case, the gui takes over and no script is expected.
        # TODO: maybe pass an input script path to the gui, but it
        # isn't important.
        Plugins.GUI.Start_GUI()
        # Return when the gui closes.
        return

    # Set the input script to default if one wasn't given.
    if not args.control_script:
        args.control_script = 'Default_Script'
    # Convenience flag for when the default script is in use.
    using_default_script = args.control_script == 'Default_Script'

    # Convert the script to a Path, for convenience.
    args.control_script = Path(args.control_script)

    # Add the .py extension if needed.
    if args.control_script.suffix != '.py':
        args.control_script = args.control_script.with_suffix('.py')

    # If the given script isn't found, try finding it in the scripts folder
    # or its subdirectories.
    # Only support this switch for relative paths.
    if not args.control_script.exists(
    ) and not args.control_script.is_absolute():
        # Recursive search for a matching script name.
        for path in scripts_dir.glob(f'**/{args.control_script.name}'):
            # Use the first one found.
            args.control_script = path
            break
        #alt_path = scripts_dir / args.control_script
        #if alt_path.exists():
        #    args.control_script = alt_path

    # Handle if the script isn't found.
    if not args.control_script.exists():
        # If the default script is in use, Main may have been called with
        # no args, which case print the argparse help.
        if using_default_script:
            argparser.print_help()

        # Follow up with an error on the control script name.
        Print('Error: {} not found.'.format(args.control_script))

        # Print some extra help text if the user tried to run the default
        #  script from the bat file.
        if using_default_script:
            # Avoid word wrap in the middle of a word by using an explicit
            #  newline.
            Print('For new users, please open Scripts/'
                  'Default_Script_template.py\n'
                  'for first time setup instructions.')
        return

    # Add the script location to the search path, so it can include
    # other scripts at that location.
    # This will often just by the Scripts folder, which is already in
    # the sys.path, but for cases when it is not, put this path
    # early in the search order.
    # TODO: remove this path when done, for use in gui when it might
    # switch between multiple scripts.
    control_script_dir = args.control_script.parent
    if str(control_script_dir) not in sys.path:
        sys.path.insert(0, str(control_script_dir))

    # Handle other args.
    if args.quiet:
        Settings.verbose = False

    if args.clean:
        Print('Enabling cleanup mode; plugins will be skipped.')
        Settings.skip_all_plugins = True

    if args.dev:
        Print('Enabling developer mode.')
        Settings.developer = True

    if args.test:
        Print('Performing test run.')
        # This uses the disable_cleanup flag.
        Settings.disable_cleanup_and_writeback = True

    Print('Calling {}'.format(args.control_script))
    try:
        # Attempt to load/run the module.
        # TODO: some way to detect if this is not a valid script, other
        # than whatever possible error occurring, eg. if the user tried
        # to run one of the plugin files.

        import importlib
        module = importlib.machinery.SourceFileLoader(
            # Provide the name sys will use for this module.
            # Use the basename to get rid of any path, and prefix
            #  to ensure the name is unique (don't want to collide
            #  with other loaded modules).
            'control_script_' + args.control_script.name.replace(' ', '_'),
            # Just grab the name; it should be found on included paths.
            str(args.control_script)).load_module()

        #Print('Run complete')

        # Since a plugin normally handles file cleanup and writeback,
        #  cleanup needs to be done manually here when needed.
        if args.clean:
            Framework.File_System.Cleanup()

    except Exception as ex:
        # Make a nice message, to prevent a full stack trace being
        #  dropped on the user.
        Print('Exception of type "{}" encountered.\n'.format(
            type(ex).__name__))
        ex_text = str(ex)
        if ex_text:
            Print(ex_text)

        # Close the plugin log safely (in case of raising another
        #  exception).
        Framework.Common.Logs.Plugin_Log.Close()

        # In dev mode, print the exception traceback.
        if Settings.developer:
            Print(traceback.format_exc())
            # Raise it again, just in case vs can helpfully break
            # at the problem point. (This won't help with the gui up.)
            raise ex
        #else:
        #    Print('Enable developer mode for exception stack trace.')

    return
示例#13
0
def Cat_Pack(
    source_dir_path,
    dest_cat_path,
    include_pattern=None,
    exclude_pattern=None,
    generate_sigs=True,
    separate_sigs=False,
):
    '''
    Packs all files in subdirectories of the given directory into a
    new catalog file.  Only subdirectories matching those used
    in the X4 file system are considered.

    * source_dir_path
      - Path to the directory holding subdirectories to pack.
      - Subdirectories are expected to match typical X4 folder names,
        eg. 'aiscripts','md', etc.
    * dest_cat_path
      - Path and name for the catalog file being generated.
      - Prefix the cat file name with 'ext_' when patching game files,
        or 'subst_' when overwriting game files.
    * include_pattern
      - String or list of strings, optional, wildcard patterns for file
        names to include in the unpacked output.
      - Eg. "*.xml" to unpack only xml files, "md/*" to  unpack only
        mission director files, etc.
      - Case is ignored.
    * exclude_pattern
      - String or list of strings, optional, wildcard patterns for file
        names to include in the unpacked output.
      - Eg. "['*.lua','*.dae']" to skip lua and dae files.
    * generate_sigs
      - Bool, if True then dummy signature files will be created.
    * separate_sigs
      - Bool, if True then any signatures will be moved to a second
        cat/dat pair suffixed with .sig.
    '''
    # Do some error checking on the paths.
    try:
        source_dir_path = Path(source_dir_path)
        assert source_dir_path.exists()
    except Exception:
        raise AssertionError(
            'Error in the source path ({})'.format(source_dir_path))

    try:
        dest_cat_path = Path(dest_cat_path)
        # Error if it an existing folder (and not a file).
        assert not dest_cat_path.is_dir()
        # Error if it doesn't end in '.cat'.
        assert dest_cat_path.suffix == '.cat'
        # Make the dest dir if needed.
        dest_cat_path.parent.mkdir(parents=True, exist_ok=True)
    except Exception:
        raise AssertionError(
            'Error in the dest path ({})'.format(dest_cat_path))

    # Pack up the patterns given to always be lists or None.
    if isinstance(include_pattern, str):
        include_pattern = [include_pattern]
    if isinstance(exclude_pattern, str):
        exclude_pattern = [exclude_pattern]

    # Prepare a new catalog.
    cat_writer = File_Manager.Cat_Writer.Cat_Writer(cat_path=dest_cat_path)

    # Set up a reader for the source location.
    # Assume in the general case that this is an extension, and will
    # want to grab stuff from a nested "extensions" subfolder.
    source_reader = File_Manager.Source_Reader.Location_Source_Reader(
        location=source_dir_path, is_extension=True)

    # Pick out the subfolders to be included.
    subfolder_names = File_Manager.Source_Reader_Local.valid_virtual_path_prefixes

    num_writes = 0
    num_pattern_skips = 0
    num_folder_skips = 0

    # Pull out all of the files.
    for virtual_path, abs_path in sorted(
            source_reader.Get_All_Loose_Files().items()):

        # Skip if a pattern given and this doesn't match.
        if not _Pattern_Match(virtual_path, include_pattern, exclude_pattern):
            num_pattern_skips += 1
            continue

        # Skip all that do not match an expected X4 subfolder.
        if not any(virtual_path.startswith(x) for x in subfolder_names):
            num_folder_skips += 1
            continue

        # Get the file binary; skip the Read_File function since that
        #  returns a semi-processed game file (eg. stripping off xml
        #  headers and such), and just want pure binary here.
        (file_path, file_binary) = source_reader.Read_Loose_File(virtual_path)
        # Pack into a game_file, expected by the cat_writer.
        game_file = File_Manager.File_Types.Misc_File(
            virtual_path=virtual_path, binary=file_binary)
        cat_writer.Add_File(game_file)

        # Be verbose for now.
        num_writes += 1
        Print('Packed {}'.format(virtual_path))

    # If no files found, skip cat creation.
    if num_writes != 0:
        # Generate the actual cat file.
        cat_writer.Write(
            generate_sigs=generate_sigs,
            separate_sigs=separate_sigs,
        )

    Print('Files written                    : {}'.format(num_writes))
    Print('Files skipped (pattern mismatch) : {}'.format(num_pattern_skips))
    Print('Files skipped (not x4 subdir)    : {}'.format(num_folder_skips))
    return
示例#14
0
def Cat_Unpack(source_cat_path,
               dest_dir_path,
               include_pattern=None,
               exclude_pattern=None,
               allow_md5_errors=False):
    '''
    Unpack a single catalog file, or a group if a folder given.
    When a file is in multiple catalogs, the latest one in the list
    will be used. If a file is already present at the destination,
    it is compared to the catalog version and skipped if the same.

    * source_cat_path
      - Path to the catalog file, or to a folder.
      - When a folder given, catalogs are read in X4 priority order
        according to its expected names.
    * dest_dir_path
      - Path to the folder where unpacked files are written.
    * include_pattern
      - String or list of strings, optional, wildcard patterns for file
        names to include in the unpacked output.
      - Eg. "*.xml" to unpack only xml files
      - Case is ignored.
    * exclude_pattern
      - String or list of strings, optional, wildcard patterns for file
        names to include in the unpacked output.
      - Eg. "['*.lua']" to skip lua files.
    * allow_md5_errors
      - Bool, if True then files with md5 errors will be unpacked, otherwise
        they are skipped.
      - Such errors may arise from poorly constructed catalog files.
    '''
    # Do some error checking on the paths.
    try:
        source_cat_path = Path(source_cat_path).resolve()
        assert source_cat_path.exists()
    except Exception:
        raise AssertionError(
            'Error in the source path ({})'.format(source_cat_path))

    try:
        dest_dir_path = Path(dest_dir_path).resolve()
        # Make the dest dir if needed.
        # -Removed; create it only when a file gets unpacked, so that it
        # doesn't make a spurious folder if the bat file is launched
        # directly from the customizer folder.
        #dest_dir_path.mkdir(parents = True, exist_ok = True)
    except Exception:
        raise AssertionError(
            'Error in the dest path ({})'.format(dest_dir_path))

    # Pack up the patterns given to always be lists or None.
    if isinstance(include_pattern, str):
        include_pattern = [include_pattern]
    if isinstance(exclude_pattern, str):
        exclude_pattern = [exclude_pattern]

    # Sourcing behavior depends on if a folder or file given.
    if source_cat_path.is_dir():

        # Set up a reader for the source location.
        # If this is an extension, it needs some more annotation; can
        # test for the content.xml at the path.
        extension_summary = None
        content_xml_path = source_cat_path / 'content.xml'
        if content_xml_path.exists():
            extension_summary = File_Manager.Extension_Finder.Extension_Summary(
                content_xml_path)

        source_reader = File_Manager.Source_Reader.Location_Source_Reader(
            location=source_cat_path, extension_summary=extension_summary)

        # Print how many catalogs were found.
        Print((
            '{} catalog files found using standard naming convention.').format(
                len(source_reader.catalog_file_dict)))
    else:
        # Set up an empty reader.
        source_reader = File_Manager.Source_Reader.Location_Source_Reader(
            location=None)
        # Manually add the cat path to it.
        source_reader.Add_Catalog(source_cat_path)

    # Some counts for printout at the end.
    num_writes = 0
    num_pattern_skips = 0
    num_hash_skips = 0
    num_md5_skips = 0

    # TODO:
    # Record a json record of already extracted file hashes, for fast
    # checking them instead of re-hashing every time.

    # TODO:
    # Switch to pulling out all virtual_paths first, then use fnmatch.filter
    # on them for each pattern, then use some set operations to merge the
    # results down to the desired set of paths.
    # This would mostly be useful if switching to storing hashes from
    # prior extractions for fast comparison, as currently the hashing
    # takes far more time than the fnmatching.

    # Loop over the Cat_Entry objects; the reader takes care of
    #  cat priorities.
    # Note: virtual_path is lowercase, but cat_entry.cat_path has
    #  original case.
    for virtual_path, cat_entry in source_reader.Get_Cat_Entries().items():

        # Skip if a pattern given and this doesn't match.
        if not _Pattern_Match(virtual_path, include_pattern, exclude_pattern):
            num_pattern_skips += 1
            continue

        dest_path = dest_dir_path / cat_entry.cat_path

        # To save some effort, check if the file already exists at
        #  the dest, and if so, get its md5 hash.
        if dest_path.exists():
            existing_binary = dest_path.read_bytes()
            dest_hash = File_Manager.Cat_Reader.Get_Hash_String(
                existing_binary)
            # If hashes match, skip.
            # Ego uses 0's instead of a proper hash for empty files, so also
            # check that case.
            if (dest_hash == cat_entry.hash_str
                    or (not existing_binary and cat_entry.hash_str
                        == '00000000000000000000000000000000')):
                num_hash_skips += 1
                continue

        # Make a folder for the dest if needed.
        dest_path.parent.mkdir(parents=True, exist_ok=True)

        # Get the file binary, catching any md5 error.
        # This will only throw the exception if allow_md5_errors is False.
        try:
            cat_path, file_binary = source_reader.Read_Catalog_File(
                virtual_path, allow_md5_error=allow_md5_errors)
        except Cat_Hash_Exception:
            num_md5_skips += 1
            continue

        # Write it back out to the destination.
        with open(dest_path, 'wb') as file:
            file.write(file_binary)

        # Be verbose for now.
        num_writes += 1
        Print('Extracted {}'.format(virtual_path))

    Print('Files written                    : {}'.format(num_writes))
    Print('Files skipped (pattern mismatch) : {}'.format(num_pattern_skips))
    Print('Files skipped (hash match)       : {}'.format(num_hash_skips))
    Print('Files skipped (md5 hash failure) : {}'.format(num_md5_skips))

    return
示例#15
0
def Generate_Diffs(
    original_dir_path,
    modified_dir_path,
    output_dir_path,
    skip_unchanged=False,
    verbose=False,
):
    '''
    Generate diffs for changes between two xml containing folders, 
    creating diff patches.

    * original_dir_path
      - Path to the original xml file that acts as the baseline.
    * modified_dir_path
      - Path to the modified version of the xml file.
    * output_dir_path
      - Path to write the diff patch to.
    * skip_unchanged
      - Bool, skip output for files that are unchanged (removing any
        existing diff patch).
      - Default will generate empty diff patches.
    * verbose
      - Bool, print the path of the outputs on succesful writes.
    '''
    # Cast to paths to be safe.
    original_dir_path = Path(original_dir_path).resolve()
    modified_dir_path = Path(modified_dir_path).resolve()
    output_dir_path = Path(output_dir_path).resolve()

    # Gather all xml files from the input directorys.
    # Make dicts for ease of use, keyed by relative path from the
    # base folder.
    #original_paths = {x.relative_to(original_dir_path) : x for x in original_dir_path.glob('**/*.xml')}
    modified_paths = {
        x.relative_to(modified_dir_path): x
        for x in modified_dir_path.glob('**/*.xml')
    }

    # Pair off the modified files with originals by name.
    # If an original is not found, error.
    # Ignore excess originals.
    for rel_path, mod_path in modified_paths.items():

        orig_path = original_dir_path / rel_path
        if not orig_path.exists() and orig_path.is_file():
            Print('No matching original file found for {}'.format(name))
            continue

        # Set up the output.
        out_path = output_dir_path / rel_path

        if verbose:
            Print('Generating diff for {}'.format(rel_path.name))

        # Generate the diff. If this errors, the file will be skipped
        # (due to plugin wrapper).
        Generate_Diff(original_file_path=orig_path,
                      modified_file_path=mod_path,
                      output_file_path=out_path,
                      skip_unchanged=skip_unchanged,
                      verbose=verbose)

    return
示例#16
0
def Update_Content_XML_Dependencies():
    '''
    Update the dependencies in the content xml file, based on which other
    extensions touched files modified by the current script.
    If applied to an existing content.xml (not one created here), existing
    dependencies are kept, and only customizer dependencies are updated.

    Note: an existing xml file may loose custom formatting.
    '''
    # TODO: framework needs more development to handle cases with an
    # existing content.xml cleanly, since currently the output extension is
    # always ignored, and there is no particular method of dealing with
    # output-ext new files not having an extensions/... path.

    # Try to load a locally created content.xml.
    content_file = Load_File('content.xml', error_if_not_found = False)

    # If not found, then search for an existing content.xml on disk.
    if not content_file:
        # Manually load it.
        content_path = Settings.Get_Output_Folder() / 'content.xml'
        # Verify the file exists.
        if not content_path.exists():
            Print('Error in Update_Content_XML_Dependencies: could not find an existing content.xml file')
            return

        content_file = File_System.Add_File( XML_File(
            # Plain file name as path, since this will write back to the
            # extension folder.
            virtual_path = 'content.xml',
            binary = content_path.read_bytes(),
            # Edit the existing file.
            edit_in_place = True,
            ))

    root = content_file.Get_Root()
    
    # Set the ID based on replacing spaces.
    this_id = Settings.extension_name.replace(' ','_')
    
    # Remove old dependencies from the customizer, and record others.
    existing_deps = []
    for dep in root.xpath('./dependency'):
        if dep.get('from_customizer'):
            dep.getparent().remove(dep)
        else:
            existing_deps.append(dep.get('id'))

    # Add in dependencies to existing extensions.
    # These should be limited to only those extensions which sourced
    #  any of the files which were modified.
    # TODO: future work can track specific node edits, and set dependencies
    #  only where transform modified nodes might overlap with extension
    #  modified nodes.
    # Dependencies use extension ids, so this will do name to id
    #  translation.
    # Note: multiple dependencies may share the same ID if those extensions
    #  have conflicting ids; don't worry about that here.
    source_extension_ids = set()
    for file_name, game_file in File_System.game_file_dict.items():
        if not game_file.modified:
            continue
        
        for ext_name in game_file.source_extension_names:
            # Translate extension names to ids.
            ext_id = File_System.source_reader.extension_source_readers[
                                ext_name].extension_summary.ext_id
            source_extension_ids.add(ext_id)

    # Add the elements; keep alphabetical for easy reading.
    for ext_id in sorted(source_extension_ids):

        # Omit self, just in case; shouldn't come up, but might.
        if ext_id == this_id:
            Print('Error: output extension appears in its own dependencies,'
                  ' indicating it transformed its own prior output.')
            continue

        # Skip if already defined.
        if ext_id in existing_deps:
            continue
        
        # Add it, with a tag to indicate it came from the customizer.
        # TODO: optional dependencies?
        root.append( ET.Element('dependency', id = ext_id, from_customizer = 'true' ))

    content_file.Update_Root(root)
    return
示例#17
0
def _Build_Ware_Objects():
    '''
    Returns a list of Edit_Objects for all found wares.
    Meant for calling from the Live_Editor.
    '''
    #t_file = Load_File('t/0001-L044.xml')
    # Look up the ware file.
    wares_file = Load_File('libraries/wares.xml')
    xml_root = wares_file.Get_Root_Readonly()

    # Get the ware nodes; only first level children.
    ware_nodes = wares_file.Get_Root_Readonly().findall('./ware')

    start_time = time.time()

    # TODO: maybe condition this on if Settings.disable_threading is
    # set or not.
    if 1:
        '''
        Try out multiprocessing to speed this up.

        Observations, letting python split up ware nodes"
        - Time goes from ~20 to ~30 seconds with 1 worker.
        - Down to ~10 seconds with 6 workers; not much gain.

        To possibly reduce data copy overhead, split up the ware nodes
        manually into lists, and send a single full list to each worker.
        - Down to 7-8 seconds from doing this.
        - Still not great, but more than 2x speedup, so it's something.
        - Note: for different process counts, best was at system
          max threads, with higher counts not losing much time.
        - Can let the Pool handle the thread counting automatically,
          and it does get close, though that doesn't help with picking
          the work unit size.
        - Update: after making production nodes conditional, normal
          runs went 20 to ~4.5 seconds, and this went down to ~2.5.
        
        Later observations:
        - The node ids tagged onto xml element tails seem to be
          transferred okay through pickling, except the one on the
          root node that has to be pruned.

        - The file system appears to get completely replicated for
          every thread, such that the wares file gets node ids
          applied once globally and once per thread.
          The global node ids are higher than the threads since they
          are offset somewhat by any prior loaded xml, while the threads
          all start from 0.

        - This node id discrepency means the loaded elements mess up
          the live editor patch matching, where editing maja snails
          ends up changing marines.

        - How can the stupid python threading be prevented from making
          such a dumb complete system copy that doesn't even catch
          everything? Eg. it should at least be copying the original
          node ids, not starting from scratch.
          - It seems like it goes:
            - Item gets created with paths
            - Item runs value init
            - Value init calls Load_File, expecting it to be a quick
              dict lookup.
            - Multiprocessing barfs on itself and makes a new copy
              of the file system that does not have the wanted
              file loaded, and has to reload it from disk (with
              diff patching).

        - Workaround: change how object building works, such that
          items are linked directly to their source game file and
          do not have to do a file system load.
          Further, tweak the pickler to keep the tag on the top
          element copied.
          Result: things seem to work okay now.
        '''

        # Pick the process runs needed to do all the work.
        # Leave 1 thread free for system stuff.
        num_processes = max(1, cpu_count() - 1)
        max_nodes_per_worker = len(ware_nodes) // num_processes + 1
        slices = []
        start = 0
        while start < len(ware_nodes):
            # Compute the end point, limiting to the last node.
            end = start + max_nodes_per_worker
            if end > len(ware_nodes):
                end = len(ware_nodes)
            # Get the slice and store it.
            slices.append(ware_nodes[start:end])
            # Update the start.
            start = end

        # Use a starmap for this, since it needs to pass both the
        # wares file and the ware node. Starmap will splat out
        # the iterables.
        inputs = [(slice, wares_file) for slice in slices]

        pool = Pool()  #processes = num_processes)
        ware_edit_objects = sum(pool.starmap(
            _Create_Objects,
            inputs,
        ), [])

    else:
        # Single thread style.
        ware_edit_objects = _Create_Objects(ware_nodes, wares_file)

    Print(
        'Ware Edit_Objects creation took {:0.2f} seconds'.format(time.time() -
                                                                 start_time))

    return ware_edit_objects