Example #1
0
    def project_closed(self, project):
        """
        Called when a project is closed.

        :param project: Project instance
        """
        yield from super().project_closed(project)
        # delete useless Dynamips files
        project_dir = project.module_working_path(self.module_name.lower())

        files = glob.glob(os.path.join(glob.escape(project_dir), "*.ghost"))
        files += glob.glob(os.path.join(glob.escape(project_dir), "*_lock"))
        files += glob.glob(os.path.join(glob.escape(project_dir), "ilt_*"))
        files += glob.glob(os.path.join(glob.escape(project_dir), "c[0-9][0-9][0-9][0-9]_i[0-9]*_rommon_vars"))
        files += glob.glob(os.path.join(glob.escape(project_dir), "c[0-9][0-9][0-9][0-9]_i[0-9]*_log.txt"))
        for file in files:
            try:
                log.debug("Deleting file {}".format(file))
                if file in self._ghost_files:
                    self._ghost_files.remove(file)
                yield from wait_run_in_executor(os.remove, file)
            except OSError as e:
                log.warn("Could not delete file {}: {}".format(file, e))
                continue

        # Release the dynamips ids if we want to reload the same project
        # later
        if project.id in self._dynamips_ids:
            del self._dynamips_ids[project.id]
Example #2
0
File: items.py Project: szsdk/symP
    def __init__(self, file_name, command=None, show_string=None, match_string=None):
        #if not os.path.isfile(file_name):
        if not glob.escape(glob.escape(file_name)):
            raise FileNotFoundError('%s does not exist' % file_name)
        self.file_name=file_name
        if show_string:
            self.show_string = show_string
        else:
            self.show_string = file_name

        if match_string:
            self.show_string = match_string
        else:
            self.match_string = self.show_string

        userpath = os.path.expanduser('~')
        if self.show_string[:len(userpath)] == userpath:
            self.show_string = '~'+self.show_string[len(userpath):]

        if command:
            self.command=command
        else:
            self.command=choose_program(file_name)

        self.rating = 1
Example #3
0
def _convert_2_0_0_beta_2(topo, topo_path):
    """
    Convert topologies from GNS3 2.0.0 beta 2 to beta 3.

    Changes:
     * Node id folders for dynamips
    """
    topo_dir = os.path.dirname(topo_path)
    topo["revision"] = 7

    for node in topo.get("topology", {}).get("nodes", []):
        if node["node_type"] == "dynamips":
            node_id = node["node_id"]
            dynamips_id = node["properties"]["dynamips_id"]

            dynamips_dir = os.path.join(topo_dir, "project-files", "dynamips")
            node_dir = os.path.join(dynamips_dir, node_id)
            try:
                os.makedirs(os.path.join(node_dir, "configs"), exist_ok=True)
                for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))):
                    shutil.move(path, os.path.join(node_dir, os.path.basename(path)))
                for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
                    shutil.move(path, os.path.join(node_dir, "configs", os.path.basename(path)))
            except OSError as e:
                raise aiohttp.web.HTTPConflict(text="Can't convert project {}: {}".format(topo_path, str(e)))
    return topo
Example #4
0
    def _rename_nvram_file(self):
        """
        Before starting the VM, rename the nvram and vlan.dat files with the correct IOU application identifier.
        """

        destination = self._nvram_file()
        for file_path in glob.glob(os.path.join(glob.escape(self.working_dir), "nvram_*")):
            shutil.move(file_path, destination)
        destination = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
        for file_path in glob.glob(os.path.join(glob.escape(self.working_dir), "vlan.dat-*")):
            shutil.move(file_path, destination)
Example #5
0
    def addMissingImage(self, filename, server, vm_type):
        """
        Add a missing image to the queue of images require to be upload on remote server
        :param filename: Filename of the image
        :param server: Server where image should be uploaded
        :param vm_type: Type of the image
        """

        if self._asked_for_this_image.setdefault(server.id(), {}).setdefault(filename, False):
            return
        self._asked_for_this_image[server.id()][filename] = True

        if server.isLocal():
            return
        path = os.path.join(self.getDirectoryForType(vm_type), filename)
        if os.path.exists(path):
            if self._askForUploadMissingImage(filename, server):

                if filename.endswith(".vmdk"):
                    # A vmdk file could be split in multiple vmdk file
                    search = glob.escape(path).replace(".vmdk", "-*.vmdk")
                    for file in glob.glob(search):
                        self._uploadImageToRemoteServer(file, server, vm_type)

                self._uploadImageToRemoteServer(path, server, vm_type)
                del self._asked_for_this_image[server.id()][filename]
Example #6
0
def find_file(file_dir, file_name):
    glob_pattern = '*{}*'.format(glob.escape(file_name))
    glob_dir = glob.escape(file_dir)
    glob_path = os.path.join(glob_dir, glob_pattern)
    debug_print(1, 'Glob pattern: {}'.format(glob_path))
    file_list = glob.glob(glob_path)
    if len(file_list) > 1:
        print('Found more than one file matching pattern {}'\
                .format(glob_path))
        return None
    elif len(file_list) < 1:
        print('Could not find any files matching pattern {}'\
                .format(glob_path))
        return None
    else:
        return file_list[0]
Example #7
0
def load_scripts(script_directory, entry_point, logger=None):
    if logger is None:
        logger = log
    entry_points = []
    found_errors = False
    glob_pattern = os.path.join(glob.escape(script_directory),
                                "[0-9][0-9]*.py")
    for filename in sorted(glob.glob(glob_pattern)):
        try:
            with open(filename, "r") as scriptfile:
                my_globals = {}
                exec(compile(scriptfile.read(), filename, "exec"),
                     my_globals)
                entry = my_globals.get(entry_point)
                if entry is not None:
                    entry_points.append(Script(filename, entry))
                else:
                    found_errors = True
                    logger.error("Cannot find entry point %r in %r",
                                 entry_point, filename)
        except Exception:
            found_errors = True
            logger.exception("Exception while trying to find entry point %r in %r",
                             entry_point, filename)
    if found_errors:
        raise RuntimeError("Errors found while loading scripts."
                           " See log file for details")
    return entry_points
Example #8
0
	def execute_repair(self):
		repairedfiles=[]
		recreatedfiles=[]
		if self.len_verified_actions>0:
			for f,retcode in self.verifiedfiles_repairable:
				yield f
				retval = self.runpar([self.par_cmd,"r",f])
				if retval == 0:
					if not self.keep_old and os.path.isfile(f+".1"):
						send2trash(f+".1")
					repairedfiles.append([ f , retval ])
			for f,retcode in self.verifiedfiles_err:
				yield f
				pars = glob.glob(glob.escape(f)+'*.par2')
				for p in pars:
					send2trash(p)
				recreatedfiles.append([ f , self.runpar([self.par_cmd,"c","-r"+self.percentage,"-n"+self.nr_parfiles,f]) ])

		self.recreate = sorted(recreatedfiles)
		self.recreate_err = sorted([f for f,err in recreatedfiles if err !=0])
		self.fixes = sorted([f for f,err in repairedfiles if err ==0])
		self.fixes_err = sorted([f for f,err in repairedfiles if err !=0])

		self.len_all_err = self.len_all_err + len(self.recreate_err) + len(self.fixes_err)

		return
Example #9
0
def get_albumart(song):
    albumArt = None
    if(song != "STOPPED"):
        aaDir = re.sub(r"[^/]*$", "", song["file"])
        for albumArt in glob.glob(glob.escape(MUSICDIR + aaDir) + PATTERN):
            break
    return(albumArt)
Example #10
0
def complete_path(prefix, line, start, end, ctx, cdpath=True, filtfunc=None):
    """Completes based on a path name."""
    # string stuff for automatic quoting
    path_str_start = ''
    path_str_end = ''
    append_end = True
    p = _path_from_partial_string(line, end)
    lprefix = len(prefix)
    if p is not None:
        lprefix = len(p[0])
        prefix = p[1]
        path_str_start = p[2]
        path_str_end = p[3]
        if len(line) >= end + 1 and line[end] == path_str_end:
            append_end = False
    tilde = '~'
    paths = set()
    env = builtins.__xonsh_env__
    csc = env.get('CASE_SENSITIVE_COMPLETIONS')
    glob_sorted = env.get('GLOB_SORTED')
    prefix = glob.escape(prefix)
    for s in xt.iglobpath(prefix + '*', ignore_case=(not csc),
                          sort_result=glob_sorted):
        paths.add(s)
    if len(paths) == 0 and env.get('SUBSEQUENCE_PATH_COMPLETION'):
        # this block implements 'subsequence' matching, similar to fish and zsh.
        # matches are based on subsequences, not substrings.
        # e.g., ~/u/ro completes to ~/lou/carcolh
        # see above functions for details.
        p = _splitpath(os.path.expanduser(prefix))
        if len(p) != 0:
            if p[0] == '':
                basedir = ('', )
                p = p[1:]
            else:
                basedir = None
            matches_so_far = {basedir}
            for i in p:
                matches_so_far = _expand_one(matches_so_far, i, csc)
            paths |= {_joinpath(i) for i in matches_so_far}
    if len(paths) == 0 and env.get('FUZZY_PATH_COMPLETION'):
        threshold = env.get('SUGGEST_THRESHOLD')
        for s in xt.iglobpath(os.path.dirname(prefix) + '*',
                              ignore_case=(not csc),
                              sort_result=glob_sorted):
            if xt.levenshtein(prefix, s, threshold) < threshold:
                paths.add(s)
    if tilde in prefix:
        home = os.path.expanduser(tilde)
        paths = {s.replace(home, tilde) for s in paths}
    if cdpath:
        _add_cdpaths(paths, prefix)
    paths = set(filter(filtfunc, paths))
    paths, _ = _quote_paths({_normpath(s) for s in paths},
                            path_str_start,
                            path_str_end,
                            append_end)
    paths.update(filter(filtfunc, _dots(prefix)))
    paths.update(filter(filtfunc, _env(prefix)))
    return paths, lprefix
Example #11
0
    def update_srcdir(self, db, source, repo):
        need_update = True

        # Ensure that every file that Git touches has a timestamp
        # which is strictly greater than this, even if filesystem
        # timestamps are imprecise.
        before_update = time.time()
        time.sleep(2)

        if not os.path.exists(source):
            sys.stderr.write("Cloning {} into {}...\n"
                             .format(repo, source))
            subprocess.check_call(["git", "clone", repo, source])
            need_update = False

        if not os.path.isdir(os.path.join(source, ".git")):
            raise RuntimeError("{!r} exists but is not a Git checkout"
                               .format(source))
        if not os.path.isdir(os.path.join(source, "csv")):
            raise RuntimeError("{!r} exists but its contents are "
                               "not as expected"
                               .format(source))

        if need_update:
            sys.stderr.write("Updating {}...\n".format(source))
            subprocess.check_call(["git", "pull"], cwd=source)

        to_import = []

        with db, db.cursor() as cur:
            # for f in ${source}/csv/*.csv
            for f in glob.iglob(os.path.join(glob.escape(source),
                                             "csv", "*.csv")):
                b = os.path.basename(f)
                if b.startswith("00-LEGEND-"):
                    continue
                country_code = os.path.splitext(b)[0].upper()
                # for the database's sake, we replace 'global' and
                # 'cis' with two-letter codes in the "user-assigned"
                # ISO 3166-1a2 space.
                if country_code == "GLOBAL": country_code = "ZZ"
                elif country_code == "CIS":  country_code = "XC"
                elif len(country_code) != 2:
                    sys.stderr.write("{!r}: name doesn't contain a 2-letter "
                                     "country code or recognized exception\n"
                                     .format(f))
                    self.delayed_failure = True
                    continue

                cur.execute("SELECT 1 AS one FROM urls_citizenlab "
                            "WHERE country = %s "
                            "LIMIT 1", (country_code,))
                prev_import = (cur.fetchone() is not None)

                st = os.lstat(f)
                if (stat.S_ISREG(st.st_mode) and
                    (not prev_import or st.st_mtime_ns > before_update)):
                    to_import.append((f, country_code))

        return to_import
Example #12
0
 def update_file_index(self, filename, index):
     pattern = self.dir_to_dl + '*{}.*'.format(glob.escape(filename))
     file_to_rename = glob.glob(pattern)[0]
     count_len = len(str(index))
     new_index = '0'*(6-count_len) + str(index)
     old_index = file_to_rename.rpartition('\\')[2][:6]
     new_filename = file_to_rename[:].replace(old_index, new_index)
     os.rename(file_to_rename, new_filename)
Example #13
0
 def revisions(self):
     revision_to_timestamp = collections.OrderedDict()
     prefix = self._path_revision('')
     for filename in sorted(glob.glob(glob.escape(prefix) + '*Z')):
         revision = filename[len(prefix):]
         timestamp = revision[:-1]
         revision_to_timestamp[revision] = timestamp
     return revision_to_timestamp
Example #14
0
    def find_fixtures(self, fixture_label):
        """Find fixture files for a given label."""
        if fixture_label == READ_STDIN:
            return [(READ_STDIN, None, READ_STDIN)]

        fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
        databases = [self.using, None]
        cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
        ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]

        if self.verbosity >= 2:
            self.stdout.write("Loading '%s' fixtures..." % fixture_name)

        if os.path.isabs(fixture_name):
            fixture_dirs = [os.path.dirname(fixture_name)]
            fixture_name = os.path.basename(fixture_name)
        else:
            fixture_dirs = self.fixture_dirs
            if os.path.sep in os.path.normpath(fixture_name):
                fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
                                for dir_ in fixture_dirs]
                fixture_name = os.path.basename(fixture_name)

        suffixes = (
            '.'.join(ext for ext in combo if ext)
            for combo in product(databases, ser_fmts, cmp_fmts)
        )
        targets = {'.'.join((fixture_name, suffix)) for suffix in suffixes}

        fixture_files = []
        for fixture_dir in fixture_dirs:
            if self.verbosity >= 2:
                self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
            fixture_files_in_dir = []
            path = os.path.join(fixture_dir, fixture_name)
            for candidate in glob.iglob(glob.escape(path) + '*'):
                if os.path.basename(candidate) in targets:
                    # Save the fixture_dir and fixture_name for future error messages.
                    fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))

            if self.verbosity >= 2 and not fixture_files_in_dir:
                self.stdout.write("No fixture '%s' in %s." %
                                  (fixture_name, humanize(fixture_dir)))

            # Check kept for backwards-compatibility; it isn't clear why
            # duplicates are only allowed in different directories.
            if len(fixture_files_in_dir) > 1:
                raise CommandError(
                    "Multiple fixtures named '%s' in %s. Aborting." %
                    (fixture_name, humanize(fixture_dir)))
            fixture_files.extend(fixture_files_in_dir)

        if not fixture_files:
            raise CommandError("No fixture named '%s' found." % fixture_name)

        return fixture_files
Example #15
0
 def remove_level_tiles_before(self, level, timestamp):
     level_cache = self._get_level(level)
     if timestamp == 0:
         level_cache.cleanup()
         os.unlink(level_cache.mbtile_file)
         for file in glob.glob("%s-*" % glob.escape(level_cache.mbtile_file)):
             os.unlink(file)
         return True
     else:
         return level_cache.remove_level_tiles_before(level, timestamp)
Example #16
0
def iter_paths(paths):
    """Yield paths, expanding globbing patterns if present"""
    for path in paths:
        if path == escape(path):
            # path does not contain globbing patterns
            yield path
        else:
            for filepath in iglob(path, recursive=True):
                if not isdir(filepath):
                    yield filepath
Example #17
0
 def _parse_line(self, line):
     m = re.match(r'.*\[info\] GalleryDownloader: Finished download of gallery: (.+)\n', line)
     if not m:
         return
     name = m.group(1)
     paths = self._download_path.glob('{0}*'.format(glob.escape(name)))
     paths = list(paths)
     if len(paths) != 1:
         ERROR('acdul') << '(hah)' << name << 'has multiple target' << paths
         return
     self._loop.add_callback(self._upload, paths[0])
Example #18
0
def sizeChecker(folderSize):
    oldFolderSize = folderSize
    print("Folder size (old size):" , oldFolderSize , "Elements" )
    time.sleep(10) #in seconds
    newFolderSize = len([file for file in glob.glob(glob.escape(pathToSearchIn) + '/**/*', recursive=True)])
    print("Folder size (new size):" , newFolderSize , "Elements")
    if newFolderSize > oldFolderSize:
        print ("Changes detected")
        reOrderFiles()
    else: 
        print ("No changes")
    sizeChecker(newFolderSize) #run again the function after time set
Example #19
0
 def _load_from_data(self, irc, channel, m):
     base_path = os.path.join(conf.supybot.directories.data(), 'Markovgen', channel)
     if not os.path.isdir(base_path):
         return
     for extracter_name in os.listdir(base_path):
         extracter = get_extracter(extracter_name)
         path = os.path.join(base_path, extracter_name)
         path = glob.escape(path)
         filenames = rec_list_files(path)
         for filename in filenames:
             with open(filename, 'rb') as fd:
                 m.feed_from_file(fd, extracter)
Example #20
0
def find_species(output, specie_spec):
    if not specie_spec:
        return output.model.species()

    have_globs = any(glob.escape(pat) != pat for pat in specie_spec)
    if not have_globs:
        # preserve the specified order
        return specie_spec

    # use the order in the file (globs can match more than once, so glob order is not useful)
    matches = [sp for sp in output.model.species()
               if any(fnmatch.fnmatchcase(sp, pat)
                      for pat in specie_spec)]
    if not matches:
        raise ValueError('no species matched by {}'.format(specie_spec))
    return matches
Example #21
0
def get_mp3_files(path):
    """Finds all music files in a directory non-recursively.

    :param path: The path in which to look for music files.
    :type path: str

    :returns: A list of all music files that were found.
    :rtype: list
    """

    # TODO: Account for bug in glob module in Python versions < 3.4.
    # - Before Python 3.4, the glob module didn't automatically escape metacharacters. Escape ?, *, [, and ]. Simply
    #   replace with [?], [*], [[], []]. When using regex to make a substitution/replacement, make sure not to replace
    #   the brackets added by the prior substitutions with escaped brackets. That would result in "double escaped"
    #   brackets.

    return glob.glob(glob.escape(path) + "/*.mp3")
Example #22
0
    def _randomize_id(self, topology):
        """
        Iterate on all keys and replace the uuid by a new one.Use by save as
        for create new topology. It's also rename the VM folder on disk.

        """
        topology["project_id"] = str(uuid.uuid4())
        if "nodes" in topology["topology"]:
            for key, node in enumerate(topology["topology"]["nodes"]):
                old_uuid = topology["topology"]["nodes"][key].get("vm_id", None)
                new_uuid = str(uuid.uuid4())
                topology["topology"]["nodes"][key]["vm_id"] = new_uuid
                if old_uuid:
                    for path in glob.glob(os.path.join(glob.escape(self.project.filesDir()), "project-files", "*", old_uuid)):
                        new_path = path.replace(old_uuid, new_uuid)
                        shutil.move(path, new_path)
        return topology
Example #23
0
def rglob(dirname, pattern, dirs=False, sort=True):
    """recursive glob, gets all files that match the pattern within the directory tree"""
    fns = []
    if os.path.isdir(dirname):
        fns = glob(os.path.join(dirname, pattern))
        dns = [fn for fn 
                in [escape(os.path.join(dirname, fn)) 
                    for fn in os.listdir(dirname)] 
                if os.path.isdir(fn)]
        if dirs==True:
            fns += dns
        for d in dns:
            if os.path.isdir(d):
                fns += rglob(d, pattern)
        if sort==True:
            fns.sort()
    return fns
Example #24
0
    def _kasp_import_keys(self, keydir, bind_keydir, zone_name):
        Keymgr.run(keydir, "init")

        # add zone if not exists
        exitcode, _, _ = Keymgr.run(keydir, "zone", "show", zone_name)
        if exitcode != 0:
            Keymgr.run_check(keydir, "zone", "add", zone_name)

        # retrieve existing keys
        _, stdout, _ = Keymgr.run_check(keydir, "zone", "key", "list", zone_name)
        tags = [int(re.search(r'\bkeytag\s+(\d+)\b', x).group(1)) for x in stdout.splitlines()]

        # import new keys, ignore existing (compare keytag)
        assert(zone_name.endswith("."))
        for pkey_path in glob.glob("%s/K*.private" % glob.escape(bind_keydir)):
            pkey = os.path.basename(pkey_path)
            m = re.match(r'K(?P<name>[^+]+)\+(?P<algo>\d+)\+(?P<tag>\d+)\.private', pkey)
            if m and m.group("name") == zone_name.lower() and int(m.group("tag")) not in tags:
                Keymgr.run_check(keydir, "zone", "key", "import", zone_name, pkey_path)
Example #25
0
    def _load(self):
        """Load text to memory"""

        corpus_directory = glob.escape(self.corpus_directory)
        file_list = sorted(glob.glob(os.path.join(corpus_directory, "*.txt")))

        for path in file_list:
            with open(path, "r", encoding="utf8") as text:
                # Read content from text file
                content = text.read()

                # Preprocessing
                content = self._preprocessing(content)

                # Create text instance
                text = Text(path, os.path.basename(path), content)

                # Add text to corpus
                self.__corpus.append(text)
Example #26
0
def glob_escape(pathname):
    """
    Escape all special chars for glob.
    For Python after 3.4 we use the glob.escape method.

    :returns: Escaped path
    """

    if sys.version_info < (3, 4):
        # Extracted from Python 3.4 source code
        # Escaping is done by wrapping any of "*?[" between square brackets.
        # Metacharacters do not work in the drive part and shouldn't be escaped.
        magic_check = re.compile('([*?[])')
        magic_check_bytes = re.compile(b'([*?[])')
        drive, pathname = os.path.splitdrive(pathname)
        if isinstance(pathname, bytes):
            pathname = magic_check_bytes.sub(br'[\1]', pathname)
        else:
            pathname = magic_check.sub(r'[\1]', pathname)
        return drive + pathname
    else:
        return glob.escape(pathname)
Example #27
0
    def check_state(self):
        #set that shit if changed in gui
        for k, v in self.args.items():
            setattr(self, k, v)
        self.percentage = str(self.percentage)

        if self.runpar([self.par_cmd]) == 200:
            return 200
            #if 200, then par2 doesnt exist.

        allfiles = [
            f for f in glob.glob(os.path.join(self.directory, "**", "*"),
                                 recursive=True) if os.path.isfile(f)
        ]  #not sure why required, but glob may introduce paths...

        if 'root' in self.excludes:
            allfiles = [
                f for f in allfiles if os.path.dirname(f) != self.directory
            ]
            self.excludes.remove('root')
        for excl in self.excludes:
            allfiles = [
                f for f in allfiles
                if not f.startswith(os.path.join(self.directory, excl))
            ]
        for ext in self.extexcludes:
            allfiles = [f for f in allfiles if not f.endswith(ext)]

        parrables = [f for f in allfiles if not f.endswith(".par2")]

        pattern = '.+vol[0-9]+\+[0-9]+\.par2'
        par2corrfiles = [f for f in allfiles if re.search(pattern, f)]
        par2files = [
            f for f in allfiles
            if f.endswith(".par2") and not re.search(pattern, f)
        ]

        par2errcopies = [
            f for f in allfiles if f.endswith(".1") or f.endswith(".2")
        ]  #remove copies with errors fixed previously by par.

        create = []
        verify = []
        incomplete = []
        #print("Checking files for parrability ...")
        for f in parrables:
            # check if both or one of the par files is missing
            ispar = os.path.isfile(f + ".par2")
            isvolpar = len(glob.glob(glob.escape(f) + ".vol*.par2")) > 0
            if self.overwrite:
                create.append(f)
            elif not ispar and not isvolpar:
                #both missing
                create.append(f)
            elif not self.noverify and ispar and isvolpar:
                #both present
                verify.append(f)
            elif self.noverify and ispar and isvolpar:
                #both present, but noverify is on, so no action
                pass
            else:
                #one of them is missing but not both
                incomplete.append(f)

        unused = []
        if not self.keep_old:
            #print("Checking for unused par2 files ...")
            for f in par2files:
                if not os.path.isfile(f[:-5]):
                    unused.append(f)
            for f in par2corrfiles:
                if not os.path.isfile(f.split('.vol')[0]):
                    unused.append(f)

        self.create = sorted(create)
        self.incomplete = sorted(incomplete)
        self.verify = sorted(verify)
        self.unused = sorted(unused)
        self.par2errcopies = sorted(par2errcopies)

        self.parrables = sorted(parrables)
        self.par2corrfiles = sorted(par2corrfiles)
        self.par2files = sorted(par2files)

        self.len_all_actions = len(create) + len(incomplete) + len(
            verify) + len(unused) + len(par2errcopies)

        return
    def read_ensemble_preds(self):
        """
            reading predictions on ensemble building data set;
            populates self.read_preds
        """
        self.logger.debug("Read ensemble data set predictions")

        if self.y_true_ensemble is None:
            try:
                self.y_true_ensemble = self.backend.load_targets_ensemble()
            except FileNotFoundError:
                self.logger.debug(
                    "Could not find true targets on ensemble data set: %s",
                    traceback.format_exc(),
                )
                return False

        # no validation predictions so far -- no dir
        if not os.path.isdir(self.dir_ensemble):
            self.logger.debug("No ensemble dataset prediction directory found")
            return False

        if self.shared_mode is False:
            pred_path = os.path.join(
                glob.escape(self.dir_ensemble),
                'predictions_ensemble_%s_*.npy' % self.seed,
            )
        # pSMAC
        else:
            pred_path = os.path.join(
                glob.escape(self.dir_ensemble),
                'predictions_ensemble_*_*.npy',
            )

        y_ens_files = glob.glob(pred_path)
        # no validation predictions so far -- no files
        if len(y_ens_files) == 0:
            self.logger.debug("Found no prediction files on ensemble data set:"
                              " %s" % pred_path)
            return False

        n_read_files = 0
        for y_ens_fn in y_ens_files:

            if self.read_at_most and n_read_files >= self.read_at_most:
                # limit the number of files that will be read
                # to limit memory consumption
                break

            if not y_ens_fn.endswith(".npy"):
                self.logger.info('Error loading file (not .npy): %s', y_ens_fn)
                continue

            match = self.model_fn_re.search(y_ens_fn)
            _seed = int(match.group(1))
            _num_run = int(match.group(2))

            if not self.read_preds.get(y_ens_fn):
                self.read_preds[y_ens_fn] = {
                    "ens_score": -1,
                    "mtime_ens": 0,
                    "mtime_valid": 0,
                    "mtime_test": 0,
                    "seed": _seed,
                    "num_run": _num_run,
                    Y_ENSEMBLE: None,
                    Y_VALID: None,
                    Y_TEST: None,
                    # Lazy keys so far:
                    # 0 - not loaded
                    # 1 - loaded and in memory
                    # 2 - loaded but dropped again
                    "loaded": 0
                }

            if self.read_preds[y_ens_fn]["mtime_ens"] == os.path.getmtime(
                    y_ens_fn):
                # same time stamp; nothing changed;
                continue

            # actually read the predictions and score them
            try:
                with open(y_ens_fn, 'rb') as fp:
                    y_ensemble = self._read_np_fn(fp=fp)
                    score = calculate_score(
                        solution=self.
                        y_true_ensemble,  # y_ensemble = y_true for ensemble set
                        prediction=y_ensemble,
                        task_type=self.task_type,
                        metric=self.metric,
                        all_scoring_functions=False)

                    if self.read_preds[y_ens_fn]["ens_score"] > -1:
                        self.logger.critical(
                            'Changing ensemble score for file %s from %f to %f '
                            'because file modification time changed? %f - %f',
                            y_ens_fn,
                            self.read_preds[y_ens_fn]["ens_score"],
                            score,
                            self.read_preds[y_ens_fn]["mtime_ens"],
                            os.path.getmtime(y_ens_fn),
                        )

                    self.read_preds[y_ens_fn]["ens_score"] = score
                    self.read_preds[y_ens_fn][Y_ENSEMBLE] = y_ensemble
                    self.read_preds[y_ens_fn]["mtime_ens"] = os.path.getmtime(
                        y_ens_fn)
                    self.read_preds[y_ens_fn]["loaded"] = 1

                    n_read_files += 1

            except:
                self.logger.warning(
                    'Error loading %s: %s',
                    y_ens_fn,
                    traceback.format_exc(),
                )
                self.read_preds[y_ens_fn]["ens_score"] = -1

        self.logger.debug(
            'Done reading %d new prediction files. Loaded %d predictions in '
            'total.', n_read_files,
            np.sum([pred["loaded"] > 0 for pred in self.read_preds.values()]))
        return True
Example #29
0
def parse_directory(
    directory: str,
    verbose: bool,
    excluded_files: List[str],
    tree_arg: int,
    short: bool,
    mode: int,
    oldparser: bool,
) -> int:
    if tree_arg:
        assert mode == PARSE, "Mode should be 1 (parse), when comparing the generated trees"

    if oldparser and tree_arg:
        print("Cannot specify tree argument with the cpython parser.", file=sys.stderr)
        return 1

    # For a given directory, traverse files and attempt to parse each one
    # - Output success/failure for each file
    errors = 0
    files = []
    trees = {}  # Trees to compare (after everything else is done)
    total_seconds = 0

    for file in sorted(glob(os.path.join(escape(directory), f"**/*.py"), recursive=True)):
        # Only attempt to parse Python files and files that are not excluded
        if any(PurePath(file).match(pattern) for pattern in excluded_files):
            continue

        with tokenize.open(file) as f:
            source = f.read()

        try:
            result, dt = parse_file(source, file, mode, oldparser)
            total_seconds += dt
            if tree_arg:
                trees[file] = result
            report_status(succeeded=True, file=file, verbose=verbose, short=short)
        except SyntaxError as error:
            if is_parsing_failure(source):
                print(f"File {file} cannot be parsed by either parser.")
            else:
                report_status(
                    succeeded=False, file=file, verbose=verbose, error=error, short=short
                )
                errors += 1
        files.append(file)

    t1 = time.time()

    generate_time_stats(files, total_seconds)
    if short:
        print_memstats()

    if errors:
        print(f"Encountered {errors} failures.", file=sys.stderr)

    # Compare trees (the dict is empty unless -t is given)
    compare_trees_errors = 0
    for file, tree in trees.items():
        if not short:
            print("Comparing ASTs for", file)
        if compare_trees(tree, file, verbose, tree_arg >= 2) == 1:
            compare_trees_errors += 1

    if errors or compare_trees_errors:
        return 1

    return 0
Example #30
0
 def test_glob_escape(self):
     self.assertEqual(glob.escape('S01E01 - Show Name [SickChill].avi'), 'S01E01 - Show Name [[]SickChill].avi')
     self.assertEqual(glob.escape('S01E01 - Show Name [SickChill].avi'), 'S01E01 - Show Name [[]SickChill].avi')
     self.assertEqual(glob.escape('S01E01 - Show Name [SickChill].avi'), 'S01E01 - Show Name [[]SickChill].avi')
     self.assertEqual(glob.escape('S01E01 - Show Name [SickChill].avi'), 'S01E01 - Show Name [[]SickChill].avi')
Example #31
0
def scan(path, name):
    pattern = os.path.join(glob.escape(path), '**', name)
    return sorted(glob.glob(pattern, recursive=True))
Example #32
0
def escapePathWildcards(path):
	"""Escape wildcards in the path"""
	# TODO: Implement this manually if not supported by the current vresion of Python.
	# Though, it is not very important, because occurs extremely seldom
	return glob.escape(path) if hasattr(glob, 'escape') else path
Example #33
0
def complete_path(prefix, line, start, end, ctx, cdpath=True, filtfunc=None):
    """Completes based on a path name."""
    # string stuff for automatic quoting
    path_str_start = ""
    path_str_end = ""
    append_end = True
    p = _path_from_partial_string(line, end)
    lprefix = len(prefix)
    if p is not None:
        lprefix = len(p[0])
        prefix = p[1]
        path_str_start = p[2]
        path_str_end = p[3]
        if len(line) >= end + 1 and line[end] == path_str_end:
            append_end = False
    tilde = "~"
    paths = set()
    env = builtins.__xonsh_env__
    csc = env.get("CASE_SENSITIVE_COMPLETIONS")
    glob_sorted = env.get("GLOB_SORTED")
    prefix = glob.escape(prefix)
    for s in xt.iglobpath(prefix + "*",
                          ignore_case=(not csc),
                          sort_result=glob_sorted):
        paths.add(s)
    if len(paths) == 0 and env.get("SUBSEQUENCE_PATH_COMPLETION"):
        # this block implements 'subsequence' matching, similar to fish and zsh.
        # matches are based on subsequences, not substrings.
        # e.g., ~/u/ro completes to ~/lou/carcolh
        # see above functions for details.
        p = _splitpath(os.path.expanduser(prefix))
        if len(p) != 0:
            if p[0] == "":
                basedir = ("", )
                p = p[1:]
            else:
                basedir = None
            matches_so_far = {basedir}
            for i in p:
                matches_so_far = _expand_one(matches_so_far, i, csc)
            paths |= {_joinpath(i) for i in matches_so_far}
    if len(paths) == 0 and env.get("FUZZY_PATH_COMPLETION"):
        threshold = env.get("SUGGEST_THRESHOLD")
        for s in xt.iglobpath(
                os.path.dirname(prefix) + "*",
                ignore_case=(not csc),
                sort_result=glob_sorted,
        ):
            if xt.levenshtein(prefix, s, threshold) < threshold:
                paths.add(s)
    if tilde in prefix:
        home = os.path.expanduser(tilde)
        paths = {s.replace(home, tilde) for s in paths}
    if cdpath and cd_in_command(line):
        _add_cdpaths(paths, prefix)
    paths = set(filter(filtfunc, paths))
    paths, _ = _quote_paths({_normpath(s)
                             for s in paths}, path_str_start, path_str_end,
                            append_end)
    paths.update(filter(filtfunc, _dots(prefix)))
    paths.update(filter(filtfunc, _env(prefix)))
    return paths, lprefix
Example #34
0
 def list_all_models(self, seed: int) -> List[str]:
     runs_directory = self.get_runs_directory()
     model_files = glob.glob(
         os.path.join(glob.escape(runs_directory), '%d_*' % seed,
                      '%s.*.*.model' % seed))
     return model_files
Example #35
0
 def _cleanup(self):
     for tmp in glob.iglob(glob.escape(self._path_vid) + '*.img*~*'):
         _remove_file(tmp)
     _remove_file(self._path_import)
Example #36
0
def test_prefix_cleanup():
    # regular
    assert wr.s3._list._prefix_cleanup("foo*") == "foo"
    assert wr.s3._list._prefix_cleanup("*foo") == ""
    assert wr.s3._list._prefix_cleanup("foo*boo") == "foo"
    assert wr.s3._list._prefix_cleanup("foo?") == "foo"
    assert wr.s3._list._prefix_cleanup("?foo") == ""
    assert wr.s3._list._prefix_cleanup("foo?boo") == "foo"
    assert wr.s3._list._prefix_cleanup("[]foo") == ""
    assert wr.s3._list._prefix_cleanup("foo[]") == "foo"
    assert wr.s3._list._prefix_cleanup("foo[]boo") == "foo"

    # escaped
    assert wr.s3._list._prefix_cleanup(glob.escape("foo*")) == "foo"
    assert wr.s3._list._prefix_cleanup(glob.escape("*foo")) == ""
    assert wr.s3._list._prefix_cleanup(glob.escape("foo*boo")) == "foo"
    assert wr.s3._list._prefix_cleanup(glob.escape("foo?")) == "foo"
    assert wr.s3._list._prefix_cleanup(glob.escape("?foo")) == ""
    assert wr.s3._list._prefix_cleanup(glob.escape("foo?boo")) == "foo"
    assert wr.s3._list._prefix_cleanup(glob.escape("[]foo")) == glob.escape("")
    assert wr.s3._list._prefix_cleanup(
        glob.escape("foo[]")) == glob.escape("foo")
    assert wr.s3._list._prefix_cleanup(
        glob.escape("foo[]boo")) == glob.escape("foo")
Example #37
0
def init():
    folderSize = len([
        file for file in glob.glob(glob.escape(pathToSearchIn) + '/**/*',
                                   recursive=True)
    ])
    return folderSize
    def get_valid_test_preds(self, selected_keys: list):
        """
        get valid and test predictions from disc
        and store them in self.read_preds

        Parameters
        ---------
        selected_keys: list
            list of selected keys of self.read_preds

        Return
        ------
        success_keys:
            all keys in selected keys for which we could read the valid and test predictions
        """
        success_keys_valid = []
        success_keys_test = []

        for k in selected_keys:
            valid_fn = glob.glob(
                os.path.join(
                    glob.escape(self.dir_valid),
                    'predictions_valid_%d_%d.npy' %
                    (self.read_preds[k]["seed"],
                     self.read_preds[k]["num_run"])))
            test_fn = glob.glob(
                os.path.join(
                    glob.escape(self.dir_test), 'predictions_test_%d_%d.npy' %
                    (self.read_preds[k]["seed"],
                     self.read_preds[k]["num_run"])))

            # TODO don't read valid and test if not changed
            if len(valid_fn) == 0:
                # self.logger.debug("Not found validation prediction file "
                #                   "(although ensemble predictions available): "
                #                   "%s" % valid_fn)
                pass
            else:
                valid_fn = valid_fn[0]
                if self.read_preds[k]["mtime_valid"] == os.path.getmtime(valid_fn) \
                        and self.read_preds[k][Y_VALID] is not None:
                    success_keys_valid.append(k)
                    continue
                try:
                    with open(valid_fn, 'rb') as fp:
                        y_valid = self._read_np_fn(fp)
                        self.read_preds[k][Y_VALID] = y_valid
                        success_keys_valid.append(k)
                        self.read_preds[k]["mtime_valid"] = os.path.getmtime(
                            valid_fn)
                except Exception as e:
                    self.logger.warning('Error loading %s: %s', valid_fn,
                                        traceback.format_exc())

            if len(test_fn) == 0:
                # self.logger.debug("Not found test prediction file (although "
                #                   "ensemble predictions available):%s" %
                #                   test_fn)
                pass
            else:
                test_fn = test_fn[0]
                if self.read_preds[k]["mtime_test"] == \
                        os.path.getmtime(test_fn) \
                        and self.read_preds[k][Y_TEST] is not None:
                    success_keys_test.append(k)
                    continue
                try:
                    with open(test_fn, 'rb') as fp:
                        y_test = self._read_np_fn(fp)
                        self.read_preds[k][Y_TEST] = y_test
                        success_keys_test.append(k)
                        self.read_preds[k]["mtime_test"] = os.path.getmtime(
                            test_fn)
                except Exception as e:
                    self.logger.warning('Error loading %s: %s', test_fn,
                                        traceback.format_exc())

        return success_keys_valid, success_keys_test
Example #39
0
 def _remove_incomplete_files(self):
     for tmp in glob.iglob(glob.escape(self._path_vid) + '*.img*~*'):
         _remove_file(tmp)
     _remove_file(self._path_import)
Example #40
0
    def execute(self):
        create = self.create
        incomplete = self.incomplete
        verify = self.verify
        unused = self.unused

        create.extend(incomplete)
        unused.extend(self.par2errcopies)

        errorcodes = {
            0: "Succes.",  #can mean no error, but also succesfully repaired!
            1: "Repairable damage found.",
            2: "Irreparable damage found.",
            3: "Invalid commandline arguments.",
            4: "Parity file unusable.",
            5: "Repair failed.",
            6: "IO error.",
            7: "Internal error",
            8: "Out of memory.",
            100: "os.remove succeeded.",
            101: "os.remove did not succeed.",
            200: "par2 command not found."
        }

        createdfiles = []
        createdfiles_err = []
        if len(create) > 0:
            #print('Creating ...')
            for f in create:
                yield f
                pars = glob.glob(glob.escape(f) + '*.par2')
                for p in pars:
                    os.remove(p)
                createdfiles.append([
                    f,
                    self.runpar([
                        self.par_cmd, "c", "-r" + self.percentage,
                        "-n" + self.nr_parfiles, f
                    ])
                ])
            createdfiles_err = [[i, j] for i, j in createdfiles
                                if j != 0 and j != 100]

        verifiedfiles = []
        verifiedfiles_succes = []
        verifiedfiles_err = []
        verifiedfiles_repairable = []
        if not self.noverify and not self.overwrite and len(verify) > 0:
            #print('Verifying ...')
            for f in verify:
                yield f
                verifiedfiles.append([f, self.runpar([self.par_cmd, "v", f])])
            verifiedfiles_err = [[i, j] for i, j in verifiedfiles
                                 if j != 0 and j != 100 and j != 1]
            verifiedfiles_repairable = [[i, j] for i, j in verifiedfiles
                                        if j == 1]
            verifiedfiles_succes = [[i, j] for i, j in verifiedfiles if j == 0]

        removedfiles = []
        removedfiles_err = []
        if not self.keep_old and len(unused) > 0:
            #print('Removing ...')
            for f in unused:
                yield f
                if os.path.isfile(
                        f):  # so os.remove always succeeds and returns None
                    os.remove(f)
                    removedfiles.append([f, 100])
                else:
                    removedfiles.append([f, 101])
            removedfiles_err = [[i, j] for i, j in removedfiles
                                if j != 0 and j != 100]

        self.createdfiles = createdfiles
        self.verifiedfiles_succes = verifiedfiles_succes
        self.removedfiles = removedfiles

        self.createdfiles_err = createdfiles_err
        self.verifiedfiles_err = verifiedfiles_err
        self.verifiedfiles_repairable = verifiedfiles_repairable
        self.removedfiles_err = removedfiles_err

        self.len_all_err = len(self.createdfiles_err) + len(
            self.verifiedfiles_err) + len(self.verifiedfiles_repairable) + len(
                self.removedfiles_err)
        self.len_verified_actions = len(self.verifiedfiles_err) + len(
            self.verifiedfiles_repairable)

        return
Example #41
0
def getExcelLinks(args):

    print("Getting .xlsx Links")

    # Handle arguments and flags
    parser = argparse.ArgumentParser(usage=instructions, add_help=False)
    parser.add_argument("--help", "-h", action="store_true")
    parser.add_argument("-r", action="store_true")
    parser.add_argument("-l", action="store_true")
    parser.add_argument("-o", action="store")
    parser.add_argument("file_names", nargs="*")

    args = parser.parse_args(args)

    # Replace arguments with wildcards with their expansion.
    # If a string does not contain a wildcard, glob will return it as is.
    # Mostly important if we run this on Windows systems.
    file_names = []

    for name in args.file_names:
        file_names += glob.glob(glob.escape(name))

    # If the filenames don't exist, say so and quit.
    if file_names == []:
        sys.exit("No file or directory found by that name.")

    # Don't run the script on itself.
    if sys.argv[0] in file_names:
        file_names.remove(sys.argv[0])

    if args.help:
        sys.exit(instructions)

    filecount = 0
    linklist = []
    target_is_folder = False

    for name in file_names:
        # Make sure single files exist.
        assert os.path.exists(name), "File or directory not found."

        # If it's just a file...
        if os.path.isfile(name):
            # Make sure this is an Excel file (just check extension)
            if name.lower().endswith(".xlsx"):
                # Get the links from that file.
                newlinks = getLinks(name, args, False)
                if newlinks != []:
                    linklist.extend()
                    filecount += 1

        # If it's a directory:
        if os.path.isdir(name):
            target_is_folder = True
            # Recursive version using os.walk for all levels.
            if args.r:
                for dirpath, dirnames, files in os.walk(name):
                    for eachfile in files:
                        # Get links from every file in that directory.
                        if eachfile.lower().endswith(".xlsx"):
                            linklist.extend(getLinks(eachfile, args, dirpath))
                            filecount += 1
            # Non-recursive version breaks os.walk after the first level.
            else:
                topfiles = []
                for (dirpath, dirnames, files) in os.walk(name):
                    topfiles.extend(files)
                    break
                for eachfile in topfiles:
                    if eachfile.lower().endswith(".xlsx"):
                        linklist.extend(getLinks(eachfile, args, dirpath))
                        filecount += 1

    if args.l:
        # When asked to return a list, quietly return one and stop.
        return linklist
    else:
        # Otherwise, create output file as sibling to the original target of the script.
        outFileName = args.o if args.o else "Excel_Doc_Links.csv"
        if target_is_folder:
            outFileFolder = os.path.abspath(
                os.path.join(file_names[0], os.pardir))
            outFilePath = os.path.join(outFileFolder, outFileName)
        else:
            outFilePath = os.path.join(os.path.dirname(file_names[0]),
                                       outFileName)

        writeFile(linklist, filecount, outFileName, outFilePath, args)
Example #42
0
 def revisions(self):
     prefix = self._path_clean + '.'
     paths = glob.iglob(glob.escape(prefix) + '*@*Z')
     items = (path[len(prefix):-1].split('@') for path in paths)
     return collections.OrderedDict(
         sorted(items, key=lambda item: int(item[0])))
Example #43
0
 def check_dimension(self):
     if os.path.getsize(self.file_name) > 50 * 1024 * 1023:
         os.system('split -b 49M "{0}" "{1}"'.format(
             self.file_name, self.file_name))
         os.remove(self.file_name)
     return glob(escape(self.file_name) + '*')
Example #44
0
 def check_escape(self, arg, expected):
     self.assertEqual(glob.escape(arg), expected)
     self.assertEqual(glob.escape(os.fsencode(arg)), os.fsencode(expected))
Example #45
0
    def _load(self,
              names=None,
              variables=None,
              iterations=None,
              repetition=None,
              sample_index=None,
              configs=None,
              aliases=None,
              use_alias=True,
              concat_config=False,
              drop_columns=True,
              **kwargs):
        self.configs = []
        for filename in glob.glob(os.path.join(self.path, 'configs', '*')):
            with open(filename, 'rb') as f:
                self.configs.append(dill.load(f))

        if len(kwargs) > 0:
            if configs is None:
                configs = kwargs
            else:
                configs.update(kwargs)

        if configs is not None:
            self._filter_configs(config=configs, repetition=repetition)
        elif aliases is not None:
            self._filter_configs(alias=aliases, repetition=repetition)
        elif repetition is not None:
            self._filter_configs(repetition=repetition)

        if names is None:
            names = list(self.description['executables'].keys())

        if variables is None:
            variables = [
                variable for unit in self.description['executables'].values()
                for variable in unit['variables']
            ]

        names = self._get_list(names)
        variables = self._get_list(variables)
        iterations = self._get_list(iterations)

        all_results = []
        for config_alias in self.configs:
            alias_str = config_alias.alias(as_string=True)
            _repetition = config_alias.pop_config('repetition')
            _update = config_alias.pop_config('update')
            path = os.path.join(self.path, 'results', alias_str)

            for unit in names:
                sample_folders = glob.glob(
                    os.path.join(glob.escape(path), sample_index or '*'))
                for sample_folder in sample_folders:
                    files = glob.glob(
                        glob.escape(os.path.join(sample_folder, unit)) +
                        '_[0-9]*')
                    files = self._sort_files(files, iterations)
                    if len(files) != 0:
                        res = []
                        for filename, iterations_to_load in files.items():
                            with open(filename, 'rb') as file:
                                res.append(
                                    self._slice_file(dill.load(file),
                                                     iterations_to_load,
                                                     variables))
                        res = self._concat(res, variables)
                        self._fix_length(res)

                        config_alias.pop_config('_dummy')
                        if concat_config:
                            res['config'] = config_alias.alias(as_string=True)
                        if use_alias:
                            if not concat_config or not drop_columns:
                                res.update(config_alias.alias(as_string=False))
                        else:
                            res.update(config_alias.config())
                        res.update(
                            {'repetition': _repetition.config()['repetition']})
                        res.update({'update': _update.config()['update']})
                        all_results.append(pd.DataFrame({'name': unit, **res}))
        return pd.concat(all_results, sort=False).reset_index(
            drop=True) if len(all_results) > 0 else pd.DataFrame(None)
Example #46
0
 def safe_glob(self, pathname):
     return glob.iglob(glob.escape(pathname) + '*')
Example #47
0
def delete_files():
    # we don't know the precise name the underlying database uses
    # so we use glob to locate all names
    for f in glob.glob(glob.escape(_fname) + "*"):
        os_helper.unlink(f)
Example #48
0
 def check_escape(self, arg, expected):
     self.assertEqual(glob.escape(arg), expected)
     self.assertEqual(glob.escape(os.fsencode(arg)), os.fsencode(expected))
Example #49
0
    def run(self):
        logger = self.get_logger()

        with self._appimage.mount() as mountpoint:
            # find desktop file, get name of icon and look for it in AppDir root
            desktop_files = glob.glob(op.join(mountpoint, "*.desktop"))

            # we can of course check the validity of all icon files we find, but there's always one main icon that is
            # referenced from the desktop file
            main_icon_name = None

            if not desktop_files:
                logger.error("Could not find desktop file in root directory")

            else:
                logger.debug("Found desktop files: %s", desktop_files)

                desktop_file = desktop_files[0]
                logger.info("Extracting icon name from desktop file: %s",
                            desktop_file)

                with open(desktop_file) as f:
                    # find Icon= entry and get the name of the icon file to look for
                    # we don't need to check things like "is there just one Icon entry" etc., that's the job of another
                    # test
                    desktop_file_contents = f.read()

                    # note for self: Python's re doesn't do multiline unless explicitly asked for with re.MULTILINE
                    match = re.search(r"Icon=(.+)", desktop_file_contents)

                    if not match:
                        logger.error(
                            "Could not find Icon= entry in desktop file")
                    else:
                        main_icon_name = match.group(1)

            # to be able to filter out non-icon files with the same prefix in the AppDir root
            known_image_exts = ("png", "xpm", "svg", "jpg")

            # assuming test broke
            # now prove me wrong!
            root_icon_valid = False

            if main_icon_name is not None:
                if "/" in main_icon_name:
                    logger.error(
                        "main icon name is a path, not a filename (contains /)"
                    )
                else:
                    # properly escape some "magic" characters in the original filename so they won't be interpreted by glob
                    fixed_main_icon_name = glob.escape(main_icon_name)

                    # build glob pattern
                    pattern = "{}.*".format(fixed_main_icon_name)

                    logger.debug(
                        "Trying to find main icon in AppDir root, pattern: {}".
                        format(repr(pattern)))

                    appdir_root_icons = glob.glob(op.join(mountpoint, pattern))

                    if not appdir_root_icons:
                        logger.error(
                            "Could not find suitable icon for desktop file's Icon= entry"
                        )

                    else:
                        # filter out all files with a not-well-known extension
                        appdir_root_icons = [
                            i for i in appdir_root_icons if op.splitext(i)
                            [-1].lstrip(".") in known_image_exts
                        ]

                        if len(appdir_root_icons) > 1:
                            logger.warning(
                                "Multiple matching icons found in AppDir root, checking all"
                            )

                        main_icon_check_results = []
                        for icon in appdir_root_icons:
                            valid = self._check_icon_for_valid_resolution(icon)
                            main_icon_check_results.append(valid)

                        # if only one of the checks failed, we can't guarantee a working root icon
                        root_icon_valid = all(main_icon_check_results)

            yield TestResult(root_icon_valid, "icons.valid_appdir_root_icon",
                             "Valid icon in AppDir root")

            # next, check that .DirIcon is available and valid
            dotdiricon_valid = self._check_icon_for_valid_resolution(
                op.join(mountpoint, ".DirIcon"))
            yield TestResult(dotdiricon_valid, "icons.valid_dotdiricon",
                             "Valid icon file in .DirIcon")

            # now check all remaining icons in usr/share/icons/...
            other_icons_root_path = op.join(mountpoint,
                                            "usr/share/icons/**/*.*")
            other_icons = glob.glob(other_icons_root_path, recursive=True)

            # assume everything works
            # prove me wrong!
            other_icons_checks_success = True

            for abs_path in other_icons:
                # check if this icon even belongs to here
                rel_path = op.relpath(abs_path,
                                      op.join(mountpoint, "usr/share/icons"))
                filename = op.basename(abs_path)

                split_fname = op.splitext(filename)

                # not an error, but means we don't have to process that file any further
                if split_fname[0] != main_icon_name:
                    logger.warning(
                        "Icon found whose file name doesn't match the Icon= entry in desktop file: %s",
                        rel_path)

                else:
                    # also just a warning
                    if split_fname[1].lstrip(".") not in known_image_exts:
                        logger.warning("Icon has invalid extension: %s",
                                       split_fname[1])

                    logger.debug(
                        "checking whether icon has good resolution in general")
                    if not self._check_icon_for_valid_resolution(abs_path):
                        logger.warning("icon %s has invalid resolution",
                                       abs_path)
                        other_icons_checks_success = False

                    logger.debug(
                        "checking whether icon is in correct location")

                    # split path into the interesting components: icon theme, resolution and actual filename
                    split_path = rel_path.split("/")

                    # find resolution component in split path
                    path_res = None

                    def extract_res_from_path_component(s):
                        if s == "scalable":
                            return s
                        return tuple([int(i) for i in s.split("x")])

                    if len(split_path) != 4 or split_path[2] != "apps":
                        logger.warning("Icon %s is in non-standard location",
                                       rel_path)
                    else:
                        try:
                            path_res = extract_res_from_path_component(
                                split_path[1])
                        except:
                            pass

                    if not path_res:
                        # something's definitely broken
                        other_icons_checks_success = False

                        logger.warning(
                            "Could not find icon resolution at expected position in path, "
                            "trying to guess from entire path")
                        for comp in split_path:
                            try:
                                path_res = extract_res_from_path_component(
                                    comp)
                            except:
                                pass
                            else:
                                break

                    if not path_res:
                        other_icons_checks_success = False
                        logger.error(
                            "Could not extract resolution from icon path,"
                            "should be usr/share/icons/<theme>/<res>/apps/<name>.<ext>"
                        )

                    else:
                        # make sure extracted resolution corresponds to the file's resolution
                        actual_res = self._get_icon_res(abs_path)
                        if actual_res != path_res:
                            other_icons_checks_success = False
                            logger.error(
                                "Icon resolution doesn't match resolution in path: %s (file resolution is %s)",
                                path_res, actual_res)

            if not other_icons_checks_success:
                logger.warning("no other icons found")

            yield TestResult(other_icons_checks_success,
                             "icons.valid_other_icons",
                             "Other integration icons valid")
Example #50
0
                return
            yaml_data = yaml_data['settings']

    for k, v in dict_attrs(yaml_data, autoconfig=autoconfig):
        if xresources and isinstance(v, str):
            v = v.format_map(xresources)
        yield k, v


config.load_autoconfig(True)

xresources = read_xresources('*')

config_files = (
    fn
    for fn in glob.iglob(os.path.join(glob.escape(config.configdir), '*.yml')))

config_data = {}
for filename in config_files:
    for k, v in read_yml(filename, xresources=xresources):
        if k in CONFIG_KEYS_DICT:
            if k not in config_data:
                config_data[k] = config.get(k)

            if all(isinstance(x, dict) for x in config_data[k].values()):
                v = {
                    subkey: {
                        **config_data[k].get(subkey, {}),
                        **subval
                    }
                    for subkey, subval in v.items()
Example #51
0
        dot are special cases that are not matched by '*' and '?'
        patterns.
        
        If recursive is true, the pattern '**' will match any files and
        zero or more directories and subdirectories.
DATA
    __all__ = ['glob', 'iglob', 'escape']
FILE
    c:\python37\lib\glob.py
'''
print(dir(glob))
'''
'escape', 'fnmatch', 'glob', 'glob0', 'glob1', 'has_magic', 'iglob', 'magic_check', 'magic_check_bytes', 'os', 're']
'''

glob.escape()

'''
유니스 셀이 사용하는 규칙에 따라 지정된 패턴과 일치하는 모든 경로명을 찾는다.
결과는 무순서로 반환된다.
*, ?, []로 표시되는 문자 범위는 올바르게 일치한다.
리터럴 일치를 위해서는 대괄호 안에 메타문자를 넣는다. 예를 들면 '[?]'는 '?'
문자와 일치한다.

glob.glob(pathname, *, recursive=False) :
경로 지정을 포함하는 문자열인 pathname에 일치하는 경로 이름의 비어있을 수 있는 리스트를 반환한다.
pathname은 절대경로와 상대경로를 지정할 수 있다.

glob.iglob(pathname, *, recursive=False)
glob()과 같은 값을 산출하지만 이터레이터를 반환한다.
Example #52
0
    def on_build_gen(
        self, cxx_gen_dir, missing_reporter: Optional[MissingReporter] = None
    ):

        if not self.cfg.generate:
            return

        cxx_gen_dir = join(cxx_gen_dir, self.name)

        if missing_reporter:
            report_only = True
        else:
            report_only = False
            missing_reporter = MissingReporter()

        thisdir = abspath(dirname(__file__))

        hppoutdir = join(self.rpy_incdir, "rpygen")
        tmpl_dir = join(thisdir, "templates")
        cpp_tmpl = join(tmpl_dir, "gen_pybind11.cpp.j2")
        hpp_tmpl = join(tmpl_dir, "gen_cls_trampoline.hpp.j2")
        classdeps_tmpl = join(tmpl_dir, "gen_classdeps.json.j2")

        pp_includes = self._all_includes(False)

        # TODO: only regenerate files if the generated files
        #       have changed
        if not report_only:

            if self.dev_config.only_generate is None:
                shutil.rmtree(cxx_gen_dir, ignore_errors=True)
                shutil.rmtree(hppoutdir, ignore_errors=True)

            os.makedirs(cxx_gen_dir, exist_ok=True)
            os.makedirs(hppoutdir, exist_ok=True)

        per_header = False
        data_fname = self.cfg.generation_data
        if self.cfg.generation_data:
            datapath = join(self.setup_root, normpath(self.cfg.generation_data))
            per_header = isdir(datapath)
            if not per_header:
                data = self._load_generation_data(datapath)
        else:
            data = HooksDataYaml()

        sources = self.cfg.sources[:]
        pp_defines = [self._cpp_version] + self.platform.defines + self.cfg.pp_defines
        casters = self._all_casters()

        # These are written to file to make it easier for dev mode to work
        classdeps = {}

        processor = ConfigProcessor(tmpl_dir)

        if self.dev_config.only_generate is not None:
            only_generate = {n: True for n in self.dev_config.only_generate}
        else:
            only_generate = None

        generation_search_path = [self.root] + self._all_includes(False)

        for gen in self.cfg.generate:
            for name, header in gen.items():

                header = normpath(header)
                for path in generation_search_path:
                    header_path = join(path, header)
                    if exists(header_path):
                        break
                else:
                    import pprint

                    pprint.pprint(generation_search_path)
                    raise ValueError("could not find " + header)

                if report_only:
                    templates = []
                    class_templates = []
                else:
                    cpp_dst = join(cxx_gen_dir, f"{name}.cpp")
                    sources.append(cpp_dst)
                    classdeps_dst = join(cxx_gen_dir, f"{name}.json")
                    classdeps[name] = classdeps_dst

                    hpp_dst = join(
                        hppoutdir,
                        "{{ cls['namespace'] | replace(':', '_') }}__{{ cls['name'] }}.hpp",
                    )

                    templates = [
                        {"src": cpp_tmpl, "dst": cpp_dst},
                        {"src": classdeps_tmpl, "dst": classdeps_dst},
                    ]
                    class_templates = [{"src": hpp_tmpl, "dst": hpp_dst}]

                if only_generate is not None and not only_generate.pop(name, False):
                    continue

                if per_header:
                    data_fname = join(datapath, name + ".yml")
                    if not exists(data_fname):
                        print("WARNING: could not find", data_fname)
                        data = HooksDataYaml()
                    else:
                        data = self._load_generation_data(data_fname)

                # for each thing, create a h2w configuration dictionary
                cfgd = {
                    # generation code depends on this being just one header!
                    "headers": [header_path],
                    "templates": templates,
                    "class_templates": class_templates,
                    "preprocess": True,
                    "pp_retain_all_content": False,
                    "pp_include_paths": pp_includes,
                    "pp_defines": pp_defines,
                    "vars": {"mod_fn": name},
                }

                cfg = Config(cfgd)
                cfg.validate()
                cfg.root = self.incdir

                hooks = Hooks(data, casters)
                processor.process_config(cfg, data, hooks)

                hooks.report_missing(data_fname, missing_reporter)

        if only_generate:
            unused = ", ".join(sorted(only_generate))
            # raise ValueError(f"only_generate specified unused headers! {unused}")
            # TODO: make this a warning

        if not report_only:
            for name, contents in missing_reporter.as_yaml():
                print("WARNING: some items not in generation yaml for", basename(name))
                print(contents)

        # generate an inline file that can be included + called
        if not report_only:
            self._write_wrapper_hpp(cxx_gen_dir, classdeps)
            gen_includes = [cxx_gen_dir]
        else:
            gen_includes = []

        # Add the root to the includes (but only privately)
        root_includes = [self.root]

        # update the build extension so that build_ext works
        self.extension.sources = sources
        # use normpath to get rid of .. otherwise gcc is weird
        self.extension.include_dirs = [
            normpath(p)
            for p in (self._all_includes(True) + gen_includes + root_includes)
        ]
        self.extension.library_dirs = self._all_library_dirs()
        self.extension.libraries = self._all_library_names()
        self.extension.extra_objects = self._all_extra_objects()

        for f in glob.glob(join(glob.escape(hppoutdir), "*.hpp")):
            self._add_generated_file(f)
Example #53
0
 def __init__(self):
     self._handler = Handler()
     # See ydl_opts['forcejson']
     self._expect_info_dict_json = False
     self._info_dict = None
     self._allow_authentication_request = True
     self._skip_authentication = False
     self._skipped_count = 0
     self.ydl_opts = {
         'logger': self,
         'logtostderr': True,
         'no_color': True,
         'progress_hooks': [self._on_progress],
         'fixup': 'detect_or_warn',
         'ignoreerrors': True,  # handled via logger error callback
         'retries': 10,
         'fragment_retries': 10,
         'postprocessors': [{
             'key': 'XAttrMetadata'
         }]
     }
     url = self._handler.get_url()
     download_dir = os.path.abspath(self._handler.get_download_dir())
     with tempfile.TemporaryDirectory() as temp_dir:
         self.ydl_opts['cookiefile'] = os.path.join(temp_dir, 'cookies')
         # Collect info without downloading videos
         testplaylist_dir = os.path.join(temp_dir, 'testplaylist')
         noplaylist_dir = os.path.join(temp_dir, 'noplaylist')
         fullplaylist_dir = os.path.join(temp_dir, 'fullplaylist')
         for path in [testplaylist_dir, noplaylist_dir, fullplaylist_dir]:
             os.mkdir(path)
         self.ydl_opts['writeinfojson'] = True
         self.ydl_opts['writethumbnail'] = True
         self.ydl_opts['skip_download'] = True
         self.ydl_opts['playlistend'] = 2
         self.ydl_opts['outtmpl'] = '%(autonumber)s.%(ext)s'
         # Test playlist
         info_testplaylist, skipped_testplaylist = self._load_playlist(
             testplaylist_dir, url)
         self.ydl_opts['noplaylist'] = True
         if len(info_testplaylist) + skipped_testplaylist > 1:
             info_noplaylist, skipped_noplaylist = self._load_playlist(
                 noplaylist_dir, url)
         else:
             info_noplaylist = info_testplaylist
             skipped_noplaylist = skipped_testplaylist
         del self.ydl_opts['noplaylist']
         del self.ydl_opts['playlistend']
         if (len(info_testplaylist) + skipped_testplaylist >
                 len(info_noplaylist) + skipped_noplaylist):
             self.ydl_opts['noplaylist'] = (
                 not self._handler.on_playlist_request())
             if not self.ydl_opts['noplaylist']:
                 info_playlist, _ = self._load_playlist(
                     fullplaylist_dir, url)
             else:
                 info_playlist = info_noplaylist
         elif len(info_testplaylist) + skipped_testplaylist > 1:
             info_playlist, _ = self._load_playlist(fullplaylist_dir, url)
         else:
             info_playlist = info_testplaylist
         # Download videos
         self._allow_authentication_request = False
         del self.ydl_opts['writeinfojson']
         del self.ydl_opts['writethumbnail']
         del self.ydl_opts['skip_download']
         # Include id and format_id in outtmpl to prevent youtube-dl
         # from continuing wrong file
         self.ydl_opts['outtmpl'] = '%(id)s.%(format_id)s.%(ext)s'
         # Output info_dict as JSON handled via logger debug callback
         self.ydl_opts['forcejson'] = True
         mode = self._handler.get_mode()
         if mode == 'audio':
             resolution = MAX_RESOLUTION
             prefer_mpeg = False
             self.ydl_opts['format'] = 'bestaudio/best'
             self.ydl_opts['postprocessors'].insert(
                 0, {
                     'key': 'FFmpegExtractAudio',
                     'preferredcodec': 'mp3',
                     'preferredquality': '192'
                 })
             self.ydl_opts['postprocessors'].insert(
                 1, {
                     'key': 'EmbedThumbnail',
                     'already_have_thumbnail': True
                 })
         else:
             resolution = self._handler.get_resolution()
             prefer_mpeg = self._handler.get_prefer_mpeg()
         try:
             os.makedirs(download_dir, exist_ok=True)
         except OSError as e:
             self._handler.on_error(
                 'ERROR: Failed to create download folder: %s' % e)
             raise
         for i, info_path in enumerate(info_playlist):
             with open(info_path) as f:
                 info = json.load(f)
             title = info.get('title') or info.get('id') or 'video'
             output_title = self._get_output_title(title)
             thumbnail_paths = list(
                 filter(
                     lambda p: os.path.splitext(p)[1][1:] != 'json',
                     glob.iglob(
                         glob.escape(info_path[:-len('info.json')]) + '*')))
             thumbnail_path = thumbnail_paths[0] if thumbnail_paths else ''
             if thumbnail_path:
                 # Convert thumbnail to JPEG and limit resolution
                 new_thumbnail_path = thumbnail_path + '-converted.jpg'
                 try:
                     subprocess.run([
                         FFMPEG_EXE, '-i',
                         os.path.abspath(thumbnail_path), '-vf',
                         ('scale=\'min({0},iw):min({0},ih):'
                          'force_original_aspect_ratio=decrease\''
                          ).format(MAX_THUMBNAIL_RESOLUTION),
                         os.path.abspath(new_thumbnail_path)
                     ],
                                    check=True,
                                    stdin=subprocess.DEVNULL,
                                    stdout=sys.stderr)
                 except FileNotFoundError:
                     self._handler.on_error('ERROR: %r not found' %
                                            FFMPEG_EXE)
                     raise
                 except subprocess.CalledProcessError:
                     traceback.print_exc(file=sys.stderr)
                     sys.stderr.flush()
                     new_thumbnail_path = ''
                 # No longer needed
                 os.remove(thumbnail_path)
                 thumbnail_path = new_thumbnail_path
             self._handler.on_progress_start(i, len(info_playlist), title,
                                             thumbnail_path)
             for thumbnail in info.get('thumbnails') or []:
                 thumbnail['filename'] = thumbnail_path
             sort_formats(
                 info.get('formats') or [], resolution, prefer_mpeg)
             with open(info_path, 'w') as f:
                 json.dump(info, f)
             # Check if we already got the file
             existing_filename = self._find_existing_download(
                 download_dir, output_title, mode)
             if existing_filename is not None:
                 self._handler.on_progress_end(existing_filename)
                 continue
             # Download into separate directory because youtube-dl generates
             # many temporary files
             temp_download_dir = os.path.join(download_dir,
                                              output_title + '.part')
             # Lock download directory to prevent other processes from
             # writing to the same files
             temp_download_dir_cm = contextlib.ExitStack()
             try:
                 temp_download_dir_cm.enter_context(
                     self._create_and_lock_dir(temp_download_dir))
             except OSError as e:
                 self._handler.on_error(
                     'ERROR: Failed to lock download folder: %s' % e)
                 raise
             with temp_download_dir_cm:
                 # Check if the file got downloaded in the meantime
                 existing_filename = self._find_existing_download(
                     download_dir, output_title, mode)
                 if existing_filename is not None:
                     filename = existing_filename
                 else:
                     # See ydl_opts['forcejson']
                     self._expect_info_dict_json = True
                     self._load_playlist(temp_download_dir,
                                         info_path=info_path)
                     if self._expect_info_dict_json:
                         raise RuntimeError('info_dict not received')
                     # Move finished download from download to target dir
                     temp_filename = self._info_dict['_filename']
                     if mode == 'audio':
                         temp_filename = (
                             os.path.splitext(temp_filename)[0] + '.mp3')
                     output_ext = os.path.splitext(temp_filename)[1]
                     filename = output_title + output_ext
                     try:
                         os.replace(
                             os.path.join(temp_download_dir, temp_filename),
                             os.path.join(download_dir, filename))
                     except OSError as e:
                         self._handler.on_error(
                             ('ERROR: Falied to move finished download to '
                              'download folder: %s') % e)
                         raise
                 # Delete download directory
                 with contextlib.suppress(OSError):
                     shutil.rmtree(temp_download_dir)
             self._handler.on_progress_end(filename)
             self._info_dict = None
Example #54
0
    def find_fixtures(self, fixture_label):
        """Find fixture files for a given label."""
        if fixture_label == READ_STDIN:
            return [(READ_STDIN, None, READ_STDIN)]

        fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
        databases = [self.using, None]
        cmp_fmts = list(self.compression_formats) if cmp_fmt is None else [cmp_fmt]
        ser_fmts = (
            serializers.get_public_serializer_formats()
            if ser_fmt is None
            else [ser_fmt]
        )

        if self.verbosity >= 2:
            self.stdout.write("Loading '%s' fixtures..." % fixture_name)

        if os.path.isabs(fixture_name):
            fixture_dirs = [os.path.dirname(fixture_name)]
            fixture_name = os.path.basename(fixture_name)
        else:
            fixture_dirs = self.fixture_dirs
            if os.path.sep in os.path.normpath(fixture_name):
                fixture_dirs = [
                    os.path.join(dir_, os.path.dirname(fixture_name))
                    for dir_ in fixture_dirs
                ]
                fixture_name = os.path.basename(fixture_name)

        suffixes = (
            ".".join(ext for ext in combo if ext)
            for combo in product(databases, ser_fmts, cmp_fmts)
        )
        targets = {".".join((fixture_name, suffix)) for suffix in suffixes}

        fixture_files = []
        for fixture_dir in fixture_dirs:
            if self.verbosity >= 2:
                self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
            fixture_files_in_dir = []
            path = os.path.join(fixture_dir, fixture_name)
            for candidate in glob.iglob(glob.escape(path) + "*"):
                if os.path.basename(candidate) in targets:
                    # Save the fixture_dir and fixture_name for future error messages.
                    fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))

            if self.verbosity >= 2 and not fixture_files_in_dir:
                self.stdout.write(
                    "No fixture '%s' in %s." % (fixture_name, humanize(fixture_dir))
                )

            # Check kept for backwards-compatibility; it isn't clear why
            # duplicates are only allowed in different directories.
            if len(fixture_files_in_dir) > 1:
                raise CommandError(
                    "Multiple fixtures named '%s' in %s. Aborting."
                    % (fixture_name, humanize(fixture_dir))
                )
            fixture_files.extend(fixture_files_in_dir)

        if not fixture_files:
            raise CommandError("No fixture named '%s' found." % fixture_name)

        return fixture_files
			        print(ruta)
			 
			# glob con un patrón que utiliza recursividad, Se obtienen los directorios y archivos coincidentes
			 
			raiz = '/home/ant/img/2015/**'
			rutas = glob.glob(raiz, recursive=True)
			print('\nrutas:', len(rutas))
			for ruta in rutas:
			    print(ruta)

	glob.iglob (pathname, recursive=False) # devuelve iterador en lugar de lista lo que permite procesarlo directamente sin asignarlo antes.
		raiz = '/home/ant/img/2015/**'
		for ruta in glob.iglob(raiz, recursive=True):
		    print(ruta)

	glob.escape(pathname) # (desde python 3.4) Permite en pathname carácteres especiales, coincidentes con los comodines.
		# glob.escape('/Viajar?.txt') equivale a glob.glob(/Viajar[?].txt)


# MÓDULO FNMATCH. busca coincidencias de un archivo con un patrón.

	# fnmatch.fnmatch(archivo, patrón) > Devuelve True si coincide con patrón, diferencia mayúsculas o minúsculas si el sistema operativo lo hace. '''
		# ejemplo:
		import os, fnmatch 
		for archivo in os.listdir('.'):
		    if fnmatch.fnmatch(archivo.upper(), '*.MD'): # interpreta el archivo en mayúsculas.
		        print(archivo)

	# fnmatch.fnmatchcase(filename, pattern) > True si coincide. Diferencia mayúsculas y minúsculas en cualquier caso.

	# fnmatch.traslate('patrón')  > traduce los comodines a una expresión regular.
Example #56
0
                num = int(num)
                # e.g. ("SSL_R_BAD_DATA", ("ERR_LIB_SSL", "BAD_DATA", 390))
                codes.append((code, (libcode, name, num)))
    assert codes, f"no codes found in {h_file}"
    return codes


if __name__ == "__main__":
    openssl_inc = sys.argv[1]
    outfile = sys.argv[2]
    use_stdout = outfile == '-'
    f = sys.stdout if use_stdout else open(outfile, "w")
    # mnemonic -> (library code, error prefix, header file)
    error_libraries = {}
    for error_header in glob.glob(
            os.path.join(glob.escape(openssl_inc), 'include/openssl/*err.h')):
        base = os.path.basename(error_header)
        if base in ('buffererr.h', 'objectserr.h', 'storeerr.h'):
            # Deprecated in 3.0.
            continue
        mnemonic = base[:-5].upper()
        if mnemonic == "":
            # err.h
            lib_codes = {
                code: num
                for (code, (
                    _, _,
                    num)) in parse_error_codes(error_header, 'ERR_LIB_', None)
            }
        else:
            error_libraries[mnemonic] = (f'ERR_LIB_{mnemonic}',
Example #57
0
 def safe_glob(self, pathname):
     return glob.iglob(glob.escape(pathname) + '*')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("results_dir", type=str)
    parser.add_argument(
        "--method_dirs",
        type=str,
        nargs='+',
        help='directories in results_dir (all of them by default)')
    parser.add_argument("--method_names",
                        type=str,
                        nargs='+',
                        help='method names for the header')
    parser.add_argument("--web_dir",
                        type=str,
                        help='default is results_dir/web')
    parser.add_argument(
        "--sort_by",
        type=str,
        nargs=2,
        help='task and metric name to sort by, e.g. prediction mse')
    parser.add_argument("--no_ffmpeg", action='store_true')
    parser.add_argument("--batch_size",
                        type=int,
                        default=1,
                        help="number of samples in batch")
    parser.add_argument(
        "--num_samples",
        type=int,
        help=
        "number of samples for the table of sequence (all of them by default)")
    parser.add_argument("--show_se",
                        action='store_true',
                        help="show standard error in the table metrics")
    parser.add_argument("--only_metrics", action='store_true')
    args = parser.parse_args()

    if args.web_dir is None:
        args.web_dir = os.path.join(args.results_dir, 'web')
    webpage = html.HTML(args.web_dir,
                        'Experiment name = %s' %
                        os.path.normpath(args.results_dir),
                        reflesh=1)
    webpage.add_header1(os.path.normpath(args.results_dir))

    if args.method_dirs is None:
        unsorted_method_dirs = os.listdir(args.results_dir)
        # exclude web_dir and all directories that starts with web
        if args.web_dir in unsorted_method_dirs:
            unsorted_method_dirs.remove(args.web_dir)
        unsorted_method_dirs = [
            method_dir for method_dir in unsorted_method_dirs
            if not os.path.basename(method_dir).startswith('web')
        ]
        # put ground_truth and repeat in the front (if any)
        method_dirs = []
        for first_method_dir in ['ground_truth', 'repeat']:
            if first_method_dir in unsorted_method_dirs:
                unsorted_method_dirs.remove(first_method_dir)
                method_dirs.append(first_method_dir)
        method_dirs.extend(sorted(unsorted_method_dirs))
    else:
        method_dirs = list(args.method_dirs)
    if args.method_names is None:
        method_names = list(method_dirs)
    else:
        method_names = list(args.method_names)
    method_dirs = [
        os.path.join(args.results_dir, method_dir)
        for method_dir in method_dirs
    ]

    if args.sort_by:
        task_name, metric_name = args.sort_by
        sort_criterion = []
        for method_id, (method_name,
                        method_dir) in enumerate(zip(method_names,
                                                     method_dirs)):
            metric = load_metrics(
                os.path.join(method_dir, task_name, 'metrics', metric_name))
            sort_criterion.append(np.mean(metric))
        sort_criterion, method_ids, method_names, method_dirs = \
            zip(*sorted(zip(sort_criterion, range(len(method_names)), method_names, method_dirs)))
        webpage.add_header3('sorted by %s, %s' % tuple(args.sort_by))
    else:
        method_ids = range(len(method_names))

    # infer task and metric names from first method
    metric_fnames = sorted(
        glob.glob('%s/*/metrics/*.csv' % glob.escape(method_dirs[0])))
    task_names = []
    metric_names = []
    for metric_fname in metric_fnames:
        head, tail = os.path.split(metric_fname)
        task_name = head.split('/')[-2]
        metric_name, _ = os.path.splitext(tail)
        task_names.append(task_name)
        metric_names.append(metric_name)

    # save metrics
    webpage.add_table()
    header_txts = ['']
    header_colspans = [2]
    for task_name in task_names:
        if task_name != header_txts[-1]:
            header_txts.append(task_name)
            header_colspans.append(2 if args.show_se else
                                   1)  # mean and standard error for each task
        else:
            # group consecutive task names that are the same
            header_colspans[-1] += 2 if args.show_se else 1
    webpage.add_row(header_txts, header_colspans)
    subheader_txts = ['id', 'method']
    for task_name, metric_name in zip(task_names, metric_names):
        subheader_txts.append('%s (mean)' % metric_name)
        if args.show_se:
            subheader_txts.append('%s (se)' % metric_name)
    webpage.add_row(subheader_txts)
    all_metric_means = []
    for method_id, method_name, method_dir in zip(method_ids, method_names,
                                                  method_dirs):
        metric_txts = [method_id, method_name]
        metric_means = []
        for task_name, metric_name in zip(task_names, metric_names):
            metric = load_metrics(
                os.path.join(method_dir, task_name, 'metrics', metric_name))
            metric_mean = np.mean(metric)
            num_samples = len(metric)
            metric_se = np.std(metric) / np.sqrt(num_samples)
            metric_txts.append('%.4f' % metric_mean)
            if args.show_se:
                metric_txts.append('%.4f' % metric_se)
            metric_means.append(metric_mean)
        webpage.add_row(metric_txts)
        all_metric_means.append(metric_means)
    webpage.save()

    if args.only_metrics:
        return

    # infer task names from first method
    outputs_dirs = sorted(
        glob.glob('%s/*/outputs' % glob.escape(method_dirs[0])))
    task_names = [outputs_dir.split('/')[-2] for outputs_dir in outputs_dirs]

    # save image sequences
    image_dir = os.path.join(args.web_dir, 'images')
    webpage.add_table()
    header_txts = ['']
    subheader_txts = ['id']
    methods_subheader_txts = ['']
    header_colspans = [1]
    subheader_colspans = [1]
    methods_subheader_colspans = [1]
    num_samples = args.num_samples or num_samples
    for sample_ind in range(num_samples):
        if sample_ind % args.batch_size == 0:
            print("saving samples from %d to %d" %
                  (sample_ind, sample_ind + args.batch_size))
        ims = [None]
        txts = [sample_ind]
        links = [None]
        colspans = [1]
        for task_name in task_names:
            # load input images from first method
            input_fnames = sorted(
                glob.glob('%s/inputs/*_%05d_??.png' % (glob.escape(
                    os.path.join(method_dirs[0], task_name)), sample_ind)))
            input_images = load_images(input_fnames)
            # save input images as image sequence
            input_fnames = [
                os.path.join(task_name, 'inputs',
                             os.path.basename(input_fname))
                for input_fname in input_fnames
            ]
            save_images([
                os.path.join(image_dir, input_fname)
                for input_fname in input_fnames
            ], input_images)
            # infer output names from first method
            output_fnames = sorted(
                glob.glob('%s/outputs/*_%05d_??.png' % (glob.escape(
                    os.path.join(method_dirs[0], task_name)), sample_ind)))
            output_names = sorted(
                set(
                    os.path.splitext(os.path.basename(output_fname))[0][:-9]
                    for output_fname in output_fnames))  # remove _?????_??.png
            # load output images
            all_output_images = []
            for output_name in output_names:
                for method_name, method_dir in zip(method_names, method_dirs):
                    output_fnames = sorted(
                        glob.glob(
                            '%s/outputs/%s_%05d_??.png' %
                            (glob.escape(os.path.join(method_dir, task_name)),
                             output_name, sample_ind)))
                    output_images = load_images(output_fnames)
                    all_output_images.append(output_images)
            # concatenate output images of all the methods
            all_output_images = concat_images(all_output_images)
            # save output images as image sequence or as gif clip
            output_fname = os.path.join(
                task_name, 'outputs',
                '%s_%05d.gif' % ('_'.join(output_names), sample_ind))
            if args.no_ffmpeg:
                save_gif(os.path.join(image_dir, output_fname),
                         all_output_images,
                         fps=4)
            else:
                ffmpeg_save_gif(os.path.join(image_dir, output_fname),
                                all_output_images,
                                fps=4)

            if sample_ind == 0:
                header_txts.append(task_name)
                subheader_txts.extend(['inputs', 'outputs'])
                header_colspans.append(
                    len(input_fnames) + len(method_ids) * len(output_names))
                subheader_colspans.extend(
                    [len(input_fnames),
                     len(method_ids) * len(output_names)])
                method_id_strs = [
                    '%02d' % method_id for method_id in method_ids
                ]
                methods_subheader_txts.extend([''] + list(
                    itertools.chain(*[method_id_strs] * len(output_names))))
                methods_subheader_colspans.extend(
                    [len(input_fnames)] + [1] *
                    (len(method_ids) * len(output_names)))
            ims.extend(input_fnames + [output_fname])
            txts.extend([None] * (len(input_fnames) + 1))
            links.extend(input_fnames + [output_fname])
            colspans.extend([1] * len(input_fnames) +
                            [len(method_ids) * len(output_names)])

        if sample_ind == 0:
            webpage.add_row(header_txts, header_colspans)
            webpage.add_row(subheader_txts, subheader_colspans)
            webpage.add_row(methods_subheader_txts, methods_subheader_colspans)
        webpage.add_images(ims, txts, links, colspans, height=64, width=None)
        if (sample_ind + 1) % args.batch_size == 0:
            webpage.save()
    webpage.save()
 def get_checkpoint_fnames():
     for checkpoint_fname in glob.glob(
             os.path.join(glob.escape(log_dir), '*.ckpt.pth')):
         epoch = int(os.path.basename(checkpoint_fname).split('.')[0])
         if epoch < args.num_epochs:
             yield epoch, checkpoint_fname
Example #60
0
def path_walk(p):
    pattern = pathlib.Path(glob.escape(p)) / "**"
    paths = glob.iglob(pattern, recursive=True)
    return (pathlib.Path(p) for p in paths)