Esempio n. 1
0
    def get_backup_paths_by_timestamps(self,
                                       filepath,
                                       ignore_future_dates=False):
        backup_paths = {}
        backup_dir = self.get_backup_dir(filepath)
        filepath = Path(path_normalize(os.path.realpath(str(filepath))))
        src_fname = filepath.stem.lower()

        for root, _, files in os.walk(str(backup_dir)):
            for fname in files:
                fpath = path_normalize(join(root, fname))

                # split the file basename by the src basename.
                # if there is leftover on the left side, the file
                # names don't match. if we can't convert the right
                # side to an int, it's not a backup of this tag
                try:
                    fname = os.path.splitext(fname)[0].lower()
                    remainder, num = fname.split(src_fname)
                    if remainder:
                        continue
                    elif num:
                        int(num.lstrip("_ "))

                    timestamp = os.path.getmtime(fpath)
                    if timestamp <= time.time() or not ignore_future_dates:
                        backup_paths[timestamp] = Path(fpath)
                except Exception:
                    pass

        return backup_paths
Esempio n. 2
0
    def populate_dependency_tree(self):
        filepath = self.tag_filepath.get()
        if not filepath:
            return

        app = self.app_root
        handler = self.handler = app.handler
        handler_name = app.handler_names[app._curr_handler_index]
        if handler_name not in app.tags_dir_relative:
            print("Change the current tag set.")
            return

        filepath = path_normalize(filepath)

        if not is_in_dir(filepath, self.handler.tagsdir):
            print("%s\nis not in tagsdir\n%s" %
                  (filepath, self.handler.tagsdir))
            return

        rel_filepath = Path(filepath).relative_to(self.handler.tagsdir)
        tag = self.get_tag(rel_filepath)
        if tag is None:
            print("Could not load tag:\n    %s" % filepath)
            return

        self.dependency_frame.handler = handler
        self.dependency_frame.tags_dir = self.handler.tagsdir
        self.dependency_frame.root_tag_path = tag.filepath
        self.dependency_frame.root_tag_text = rel_filepath

        self.dependency_frame.reload()
Esempio n. 3
0
    def get_unique_filename(self, filepath, dest, src=(), rename_tries=None):
        '''
        Attempts to rename the string 'filepath' to a name that
        does not already exist in 'dest' or 'src'. This is done by
        incrementing a number on the end of the filepath(if it's a
        valid integer), or appending one if one doesnt already exist.

        Raises RuntimeError if 'rename_tries' is exceeded.

        Required arguments:
            filepath(str)
            dest(iterable)
        Optional arguments:
            src(iterable)
            rename_tries(int)

        src and dest are iterables which contain the filepaths to
        check against to see if the generated filename is unique.
        '''
        filepath = str(filepath)
        splitpath, ext = splitext(path_normalize(filepath))
        newpath = splitpath

        # find the location of the last underscore
        last_us = None
        for i in range(len(splitpath)):
            if splitpath[i] == '_':
                last_us = i

        # sets are MUCH faster for testing membership than lists
        src = set(src)
        dest = set(dest)

        # if the stuff after the last underscore is not an
        # integer, treat it as if there is no last underscore
        try:
            i = int(splitpath[last_us + 1:])
            oldpath = splitpath[:last_us] + '_'
        except Exception:
            i = 0
            oldpath = splitpath + '_'

        # increase rename_tries by the number we are starting at
        if rename_tries is None:
            rename_tries = len(src) + len(dest)

        rename_tries += i

        # make sure the name doesnt already
        # exist in both src or dest
        while (newpath + ext) in dest or (newpath + ext) in src:
            newpath = oldpath + str(i)
            if i > rename_tries:
                raise RuntimeError("Maximum attempts exceeded while " +
                                   "trying to find a unique name for " +
                                   "the tag:\n    %s" % filepath)
            i += 1

        return newpath + ext
    def jma_dir_browse(self):
        if self._compiling or self._loading or self._saving:
            return

        tags_dir = self.tags_dir.get()
        # Add data to the path and then use path_replace to match the case of any
        # data directory that might already be here.
        data_dir = str(
            path_replace(
                Path(tags_dir).parent.joinpath("data"), "data", "data"))
        jma_dir = self.jma_dir.get()
        if tags_dir and not jma_dir:
            jma_dir = data_dir

        dirpath = path_normalize(
            askdirectory(
                initialdir=jma_dir,
                parent=self,
                title="Select the folder of animations to compile..."))

        if not dirpath:
            return

        dirpath = str(Path(dirpath))
        if tags_dir and data_dir and os.path.basename(
                dirpath).lower() == "animations":
            object_dir = os.path.dirname(dirpath)

            if object_dir and is_in_dir(object_dir, data_dir):
                tag_path = os.path.join(object_dir,
                                        os.path.basename(object_dir))
                tag_path = os.path.join(tags_dir,
                                        os.path.relpath(tag_path, data_dir))
                self.model_animations_path.set(tag_path + ".model_animations")

        self.app_root.last_load_dir = os.path.dirname(dirpath)
        self.jma_dir.set(dirpath)
        if not self.tags_dir.get():
            self.tags_dir.set(
                os.path.join(path_split(self.app_root.last_load_dir, "data"),
                             "tags"))
    def tags_dir_browse(self):
        if self._compiling or self._loading or self._saving:
            return

        old_tags_dir = self.tags_dir.get()
        tags_dir = askdirectory(initialdir=old_tags_dir,
                                parent=self,
                                title="Select the root of the tags directory")

        if not tags_dir:
            return

        tags_dir = path_normalize(tags_dir)

        mod2_path = self.gbxmodel_path.get()
        if old_tags_dir and mod2_path and not is_in_dir(mod2_path, tags_dir):
            # adjust mod2 filepath to be relative to the new tags directory
            mod2_path = os.path.join(tags_dir,
                                     os.path.relpath(mod2_path, old_tags_dir))
            self.gbxmodel_path.set(mod2_path)

        self.app_root.last_load_dir = os.path.dirname(tags_dir)
        self.tags_dir.set(tags_dir)
Esempio n. 6
0
    def scan(self):
        handler = self.handler
        self.stop_scanning = False

        logpath = path_normalize(self.logfile_path.get())
        dirpath = path_normalize(self.directory_path.get())

        if not is_in_dir(dirpath, self.handler.tagsdir):
            print(
                "Specified directory is not located within the tags directory")
            return

        #this is the string to store the entire debug log
        log_name = "HEK Tag Scanner log"
        debuglog = "\n%s%s%s\n\n" % ("-" * 30, log_name, "-" *
                                     (50 - len(log_name)))
        debuglog += "tags directory = %s\nscan directory = %s\n\n" % (
            self.handler.tagsdir, dirpath)
        debuglog += "Broken dependencies are listed below.\n"
        tag_specific_errors = {}

        get_nodes = handler.get_nodes_by_paths
        get_tagref_invalid = handler.get_tagref_invalid

        s_time = time()
        c_time = s_time
        p_int = self.print_interval

        all_tag_paths = {
            self.listbox_index_to_def_id[int(i)]: []
            for i in self.def_ids_listbox.curselection()
        }
        ext_id_map = handler.ext_id_map
        id_ext_map = handler.id_ext_map

        print("Locating tags...")

        for root, directories, files in os.walk(dirpath):
            root = path_normalize(os.path.join(root, ""))

            rel_root = Path(root).relative_to(self.handler.tagsdir)

            for filename in files:
                filepath = rel_root.joinpath(filename)

                if time() - c_time > p_int:
                    c_time = time()
                    print(' ' * 4, filepath, sep="")
                    self.app_root.update_idletasks()

                if self.stop_scanning:
                    print('Tag scanning operation cancelled.\n')
                    return

                tag_paths = all_tag_paths.get(
                    ext_id_map.get(os.path.splitext(filename)[-1].lower()))

                if tag_paths is not None:
                    tag_paths.append(filepath)

        # make the debug string by scanning the tags directory
        for def_id in sorted(all_tag_paths.keys()):
            tag_ref_paths = handler.tag_ref_cache.get(def_id)

            self.app_root.update_idletasks()
            print("Scanning '%s' tags..." % id_ext_map[def_id][1:])
            tags_coll = all_tag_paths[def_id]

            # always display the first tag's filepath
            c_time = time() - (p_int + 100)

            for filepath in sorted(tags_coll):
                if self.stop_scanning:
                    print('Tag scanning operation cancelled.\n')
                    break

                if time() - c_time > p_int:
                    c_time = time()
                    print(' ' * 4, filepath, sep="")
                    self.app_root.update_idletasks()

                tag = self.get_tag(self.handler.tagsdir.joinpath(filepath))
                if tag is None:
                    print("    Could not load '%s'" % filepath)
                    continue

                # find tag specific errors
                self.tag_specific_scan(tag, tag_specific_errors)

                try:
                    if tag_ref_paths is None:
                        # no dependencies for this tag. continue on
                        continue

                    missed = get_nodes(tag_ref_paths, tag.data,
                                       get_tagref_invalid)

                    if not missed:
                        continue

                    debuglog += "\n\n%s\n" % filepath
                    block_name = None

                    for block in missed:
                        if block.NAME != block_name:
                            debuglog += '%s%s\n' % (' ' * 4, block.NAME)
                            block_name = block.NAME
                        try:
                            ext = '.' + block.tag_class.enum_name
                        except Exception:
                            ext = ''
                        debuglog += '%s%s\n' % (' ' * 8, block.STEPTREE + ext)

                except Exception:
                    print(format_exc())
                    print("    Could not scan '%s'" % tag.filepath)
                    continue

            if self.stop_scanning:
                break

        if tag_specific_errors:
            debuglog += "\nTag specific errors are listed below.\n"

        for def_id in sorted(tag_specific_errors.keys()):
            debuglog += "\n\n%s specific errors:\n%s" % (
                def_id, tag_specific_errors[def_id])

        print("\nScanning took %s seconds." % int(time() - s_time))
        print("Writing logfile to %s..." % logpath)
        self.app_root.update_idletasks()

        # make and write to the logfile
        try:
            handler.make_log_file(debuglog, logpath)
            try:
                print("Scan completed.\n")
                if self.open_logfile.get():
                    open_in_default_program(logpath)

            except Exception:
                print("Could not open written log.")
            return
        except Exception:
            print("Could not create log. Printing log to console instead.\n\n")
            for line in debuglog.split('\n'):
                try:
                    print(line)
                except Exception:
                    print("<COULD NOT PRINT THIS LINE>")

            print("Scan completed.\n")
Esempio n. 7
0
    def reload_defs(self, **kwargs):
        """ this function is used to dynamically load and index
        all tag definitions for all valid tags. This allows
        functionality to be extended simply by creating a new
        definition and dropping it into the defs folder."""
        ######################################################
        #
        # This whole function is pretty much a mess and needs
        # to be rewritten, especially since it wont work if a
        # definitions directory is given that isnt within an
        # already loaded module.
        #
        ######################################################

        self.defs.clear()

        if not self.defs_path:
            self.defs_path = self.default_defs_path

        valid_ids = kwargs.get("valid_def_ids")
        if not hasattr(valid_ids, '__iter__'):
            valid_ids = None
        elif not valid_ids:
            return

        # get the filepath or import path to the tag definitions module
        self.defs_path = kwargs.get("defs_path", self.defs_path)
        self.import_rootpath = kwargs.get("import_rootpath",
                                          self.import_rootpath)

        # if the defs_path is an empty string, return
        if not self.defs_path:
            return

        # cut off the trailing '.' if it exists
        if self.defs_path.endswith('.'):
            self.defs_path = self.defs_path[:-1]

        # import the root definitions module to get its absolute path
        defs_module = import_module(self.defs_path)

        # try to get the absolute folder path of the defs module
        try:
            # Try to get the filepath of the module
            self.defs_filepath = split(defs_module.__file__)[0]
        except Exception:
            # If the module doesnt have an __init__.py in the folder
            # then an exception will occur trying to get '__file__'
            # in the above code. This method must be used(which I
            # think looks kinda hacky)
            self.defs_filepath = tuple(defs_module.__path__)[0]
        self.defs_filepath = path_normalize(self.defs_filepath)

        if 'imp_paths' in kwargs:
            imp_paths = kwargs['imp_paths']
        elif is_main_frozen():
            imp_paths = self.frozen_imp_paths
        else:
            # Log the location of every python file in the defs root
            # search for possibly valid definitions in the defs folder
            imp_paths = []
            for root, _, files in os.walk(str(self.defs_filepath)):
                for module_path in files:
                    base, ext = splitext(module_path)

                    # do NOT use relpath here
                    fpath = Path(root.split(str(self.defs_filepath))[-1])

                    # make sure the file name ends with .py and isnt already loaded
                    if ext.lower() in (".py",
                                       ".pyw") and base not in imp_paths:
                        parts = fpath.parts + (base, )
                        if parts[0] == fpath.root:
                            parts = parts[1:]

                        imp_paths.append('.'.join(parts))

        # load the defs that were found
        for mod_name in imp_paths:
            # try to import the definition module
            try:
                fpath = Path(self.defs_path, mod_name)
                parts = fpath.parts
                if parts[0] == fpath.root:
                    parts = parts[1:]

                def_module = import_module('.'.join(parts))
            except Exception:
                def_module = None
                if self.debug >= 1:
                    print(format_exc() + "\nThe above exception occurred " +
                          "while trying to import a tag definition.\n\n")
                    continue

            # make sure this is a valid tag module by making a few checks
            if hasattr(def_module, 'get'):
                # finally, try to add the definition
                # and its constructor to the lists
                try:
                    tagdefs = def_module.get()

                    if not hasattr(tagdefs, '__iter__'):
                        tagdefs = (tagdefs, )

                    for tagdef in tagdefs:
                        try:
                            # if a def doesnt have a usable def_id, skip it
                            def_id = tagdef.def_id
                            if not bool(def_id):
                                continue

                            if def_id in self.defs:
                                raise KeyError(
                                    ("The def_id '%s' already " +
                                     "exists in the loaded defs " + "dict.") %
                                    def_id)

                            # if it does though, add it to the definitions
                            if valid_ids is None or def_id in valid_ids:
                                self.add_def(tagdef)
                        except Exception:
                            if self.debug >= 3:
                                raise

                except Exception:
                    if self.debug >= 2:
                        print(format_exc() +
                              "\nThe above exception occurred " +
                              "while trying to load a tag definition.")
Esempio n. 8
0
    def __init__(self, **kwargs):
        '''
        Initializes a Handler with the supplied keyword arguments.

        Keyword arguments:

        # bool
        allow_corrupt ---- Enables returning corrupt tags rather than discard
                           them and reporting the exception. Instead, the
                           exception will be printed to the console and the tag
                           will be returned like normal. For debug use only.
        check_extension -- Whether or not(when indexing tags) to make sure a
                           tag's extension also matches the extension for that
                           def_id. The main purpose is to prevent loading temp
                           files. This is only useful when overloading the
                           constructors 'get_def_id' function since the default
                           constructor verifies tags by their extension.
        write_as_temp ---- Whether or not to keep tags as temp files when
                           calling self.write_tags. Overridden by supplying
                           'temp' as a keyword when calling self.write_tags.
        backup ----------- Whether or not to backup a file that exists
                           with the same name as a tag that is being saved.
                           The file will be renamed with the extension
                           '.backup'. If a backup already exists then
                           the oldest backup will be kept.

        # dict
        tags ------------- A dict of dicts which holds every loaded tag.
                           Nested dicts inside the tags dict each hold all of
                           one def_id of tag, with each of the tags keyed by
                           their tag path(which is relative to self.tagsdir).
                           Accessing a tag is done like so:
                               tags[def_id][filepath] = Tag

        # int
        debug ------------ The level of debugging information to show. 0 to 10.
                           The higher the number, the more information shown.
                           Currently this is of very limited use.
        tags_loaded ------ This is the number of tags that were loaded when
                           self.load_tags() was run.

        # iterable
        valid_def_ids ---- Some form of iterable containing the def_id
                           strings that this Handler will be working with.
                           You may instead provide a single def_id string
                           if working with just one kind of tag.

        # str
        tagsdir ---------- A filepath string pointing to the working directory
                           which all our tags are loaded from and written to.
                           When adding a tag to tags[def_id][filepath]
                           the filepath key is the path to the tag relative to
                           this tagsdir string. So if the tagsdir
                           string were 'c:/tags/' and a tag were located in
                           'c:/tags/test/a.tag', filepath would be 'test/a.tag'
        log_filename ----- The name of the file all logs will be written to.
                           The file will be created in the tagsdir folder
                           if it doesn't exist. If it does exist, the file will
                           be opened and any log writes will be appended to it.
        '''

        # this is the filepath to the tag currently being constructed
        self.current_tag = ''
        self.tags_loaded = 0
        self.tags = {}

        self.import_rootpath = ''
        self.defs_filepath = ''
        self.defs_path = ''
        self.id_ext_map = {}
        self.defs = {}

        # valid_def_ids will determine which tag types are possible to load
        if isinstance(kwargs.get("valid_def_ids"), str):
            kwargs["valid_def_ids"] = tuple([kwargs["valid_def_ids"]])

        self.debug = kwargs.pop("debug", 0)
        self.log_filename = kwargs.pop("log_filename", self.log_filename)
        self.backup = bool(kwargs.pop("backup", True))
        self.int_test = bool(kwargs.pop("int_test", True))
        self.allow_corrupt = bool(kwargs.pop("allow_corrupt", False))
        self.write_as_temp = bool(kwargs.pop("write_as_temp", True))
        self.check_extension = bool(kwargs.pop("check_extension", True))
        self.case_sensitive = bool(kwargs.pop("case_sensitive", False))

        self.import_rootpath = kwargs.pop("import_rootpath",
                                          self.import_rootpath)
        self.defs_filepath = kwargs.pop("defs_filepath", self.defs_filepath)
        self.defs_path = kwargs.pop("defs_path", self.defs_path)

        self.tagsdir = path_normalize(kwargs.pop("tagsdir", self.tagsdir))
        self.tags = kwargs.pop("tags", self.tags)

        if kwargs.get("reload_defs", True):
            self.reload_defs(**kwargs)
        elif kwargs.get("defs"):
            defs = kwargs["defs"]
            if isinstance(defs, dict):
                defs = defs.values()
            for tagdef in defs:
                self.add_def(tagdef)

        # make slots in self.tags for the types we want to load
        if kwargs.get("reset_tags", True):
            self.reset_tags(self.defs.keys())