def run(args):
  if not args: args = [ '.' ]
  work = set()
  arg_filenames = []
  for arg in args:
    if os.path.isdir(arg):
      for dirpath, dirnames, filenames in os.walk(arg):
        work.update( os.path.join(dirpath, f)
                     for f in fnmatch.filter(filenames, '*.py') )
    else:
      arg_filenames.append(arg)
  work.update(fnmatch.filter(arg_filenames, '*.py'))
  for filename in work:
    try:
      old_style = python_code_parsing.find_old_style_classes(
        python_source_filename=filename)
    except Exception, e:
      import traceback
      print filename
      print traceback.format_exc()
      continue
    if old_style:
      print 'In file %s:' % filename
      print old_style
      print
예제 #2
0
파일: __init__.py 프로젝트: himito/madmom
def filter_files(files, suffix):
    """
    Filter the list to contain only files matching the given `suffix`.

    Parameters
    ----------
    files : list
        List of files to be filtered.
    suffix : str
        Return only files matching this suffix.

    Returns
    -------
    list
        List of files.

    """
    import fnmatch
    # make sure files is a list
    if not isinstance(files, list):
        files = [files]
    # no suffix given, return the list unaltered
    if suffix is None:
        return files
    # filter the files with the given suffix
    file_list = []
    if isinstance(suffix, list):
        # a list of suffices is given
        for s in suffix:
            file_list.extend(fnmatch.filter(files, "*%s" % s))
    else:
        # a single suffix is given
        file_list.extend(fnmatch.filter(files, "*%s" % suffix))
    # return the filtered list
    return file_list
예제 #3
0
 def files(self):
     if self.is_dir:
         if self.recurse:
             for root_dir, dirnames, filenames in os.walk(self.scan_path):
                 if self.filetype_filter != '*':
                     filenames = fnmatch.filter(filenames, self.filetype_filter)
                 for filename in filenames:
                     processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(filename, type_glob_string)]
                     yield ScannerJobFile(os.path.join(root_dir, filename), processing_callbacks, self.data_callback, self.context)
         else:
             file_paths = os.listdir(self.scan_path)
             if self.filetype_filter != '*':
                 file_paths = fnmatch.filter(file_paths, self.filetype_filter)
             for file_path in file_paths:
                 processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(file_path, type_glob_string)]
                 yield ScannerJobFile(os.path.join(self.scan_path, file_path), processing_callbacks, self.data_callback, self.context)
     else:  # single file
         processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(self.scan_path, type_glob_string)]
         yield ScannerJobFile(self.scan_path, processing_callbacks, self.data_callback, self.context)
예제 #4
0
def am_2_cmake_dir(directory):
    for root, dirnames, filenames in os.walk(directory):
        for filename in fnmatch.filter(filenames, 'Makefile.am'):
            print "root=%s" % root
            print "filename=%s" % filename
            amfile=os.path.join(root, filename)
            cmfile=os.path.join(root, 'CMakeLists.txt')
            is_module_root = len(fnmatch.filter(filenames, 'dune.module'))
            
            print ''.join(['Converting ', amfile, ' -> ', cmfile])
            print is_module_root
            am_2_cmake(amfile, cmfile, is_module_root)
    
    # Add doxygen target
    doxygendir=os.path.join(directory, 'doc', 'doxygen')
    if os.path.isdir(doxygendir) and os.path.exists(os.path.join(doxygendir, 'Doxylocal')):
        output=open(os.path.join(doxygendir, 'CMakeLists.txt'),'a')
        output.write('\n'.join(['# Create Doxyfile.in and Doxyfile, and doxygen documentation',
'add_doxygen_target()']))
        output.close()
    # Add directives to create CMake packe configuration files with autotools
    output=open("Makefile.am", "a")
    output.write('\n'.join(['', '# Generate package configuration files for finding',
                            '# installed modules with CMake',
                            'include $(top_srcdir)/am/cmake-pkg-config\n']))
    output.close()
예제 #5
0
 def run_toggle(self, homedir):
     loop = 0
     # Record original log files.  There should never be overlap
     # with these even after they're removed.
     orig_logs = fnmatch.filter(os.listdir(homedir), "*gerLog*")
     while loop < 3:
         # Reopen with logging on to run recovery first time
         on_conn = self.wiredtiger_open(homedir, self.conn_on)
         on_conn.close()
         if loop > 0:
             # Get current log files.
             cur_logs = fnmatch.filter(os.listdir(homedir), "*gerLog*")
             scur = set(cur_logs)
             sorig = set(orig_logs)
             # There should never be overlap with the log files that
             # were there originally.  Mostly this checks that after
             # opening with logging disabled and then re-enabled, we
             # don't see log file 1.
             self.assertEqual(scur.isdisjoint(sorig), True)
             if loop > 1:
                 # We should be creating the same log files each time.
                 for l in cur_logs:
                     self.assertEqual(l in last_logs, True)
                 for l in last_logs:
                     self.assertEqual(l in cur_logs, True)
             last_logs = cur_logs
         loop += 1
         # Remove all log files before opening without logging.
         cur_logs = fnmatch.filter(os.listdir(homedir), "*gerLog*")
         for l in cur_logs:
             path=homedir + "/" + l
             os.remove(path)
         off_conn = self.wiredtiger_open(homedir, self.conn_off)
         off_conn.close()
예제 #6
0
def generate_complete_lists(mfcc_dir, trans_dir, list_dir):
    """
    Create list of all audio and transcription files

    @param mfcc_dir:     Directory with processed audio files (.mfc)
    @param trans_dir:    Directory with processed transcriptions (.txt)
    @param list_dir      Output directory for audio_all.lst and trans_all.lst
    """

    audio_out_name = os.path.join(list_dir, "audio_all.lst")
    trans_out_name = os.path.join(list_dir, "trans_all.lst")

    try:
        audio_out = open(audio_out_name,"w")
    except IOError:
        print "Error: unable to write to " + audio_out_name
    for (dir_path, dir_names, file_names) in os.walk(mfcc_dir):
      for file_name in fnmatch.filter(file_names,'*.mfc'):
        audio_out.write(os.path.join(dir_path,file_name) + "\n")
    audio_out.close

    try:
        trans_out = open(trans_out_name,"w")
    except IOError:
        print "Error: unable to write to " + audio_out_name
    for (dir_path, dir_names, file_names) in os.walk(trans_dir):
      for file_name in fnmatch.filter(file_names,'*.txt'):
        trans_out.write(os.path.join(dir_path,file_name) + "\n")
    trans_out.close
예제 #7
0
def make_histo_pngs():
    """
    """
    dire = "/local/home/data/S1/DN_analysis/WV/DN_histodb"
    files = []
    for root, dirnames, filenames in os.walk(dire):
        for filename in fnmatch.filter(filenames, "*.pkl"):
            files.append(os.path.join(root, filename))
    outdir = '/local/home/data/S1/DN_analysis/WV/DN_histo/'
    # WV1 HH
    f = fnmatch.filter(files, '*/s1a-wv1-slc-hh-*')
    tit = 'WV1 HH %i imagettes' % len(f)
    print tit
    make_histo_png(f, os.path.join(outdir, 's1a-wv1-slc-hh'), title=tit)
    # WV1 VV
    f = fnmatch.filter(files, '*/s1a-wv1-slc-vv-*')
    tit = 'WV1 VV %i imagettes' % len(f)
    print tit
    make_histo_png(f, os.path.join(outdir, 's1a-wv1-slc-vv'), title=tit)
    # WV2 HH
    f = fnmatch.filter(files, '*/s1a-wv2-slc-hh-*')
    tit = 'WV2 HH %i imagettes' % len(f)
    print tit
    make_histo_png(f, os.path.join(outdir, 's1a-wv2-slc-hh'), title=tit)
    # WV2 VV
    f = fnmatch.filter(files, '*/s1a-wv2-slc-vv-*')
    tit = 'WV2 VV %i imagettes' % len(f)
    print tit
    make_histo_png(f, os.path.join(outdir, 's1a-wv2-slc-vv'), title=tit)
예제 #8
0
파일: test.py 프로젝트: abigagli/CxxProf
def main():
    #set the PATH to find Thirdparty
    os.environ['PATH'] = THIRDPARTY_PATH + '/boost/bin/'
    os.environ['PATH'] += ';' + THIRDPARTY_PATH + '/pluma/bin/'
    os.environ['PATH'] += ';' + THIRDPARTY_PATH + '/cmake/'
    
    #find our own components, set the PATH for them
    matches = []
    for root, dirnames, filenames in os.walk( INSTALL_PATH ):
        for filename in fnmatch.filter(dirnames, 'bin'):
            matches.append(os.path.join(root, filename))
    for path in matches:
        os.environ['PATH'] = os.environ['PATH'] + ';' + path
    
    #search for projects which need to be tested
    for root, dirnames, filenames in os.walk( BUILD_PATH ):
        for filename in fnmatch.filter(filenames, 'CTestTestfile.cmake'):            
            os.chdir( root )
            
            #run the tests
            testCmd = []
            testCmd.append(CTEST_EXE)
            testCmd.append("--no-compress-output")
            testCmd.append("-T")
            testCmd.append("Test")
            testCmd.append(".")
            process = subprocess.Popen(testCmd)
            process.wait()
            print "Tests executed with errorcode: " + str(process.returncode)
예제 #9
0
    def _load_i18n_dir(self, basepath):
        """
        Loads an i18n directory (Zope3 PTS format)
        Format:
            Products/MyProduct/i18n/*.po
        The language and domain are stored in the po file
        """
        log('looking into ' + basepath, logging.DEBUG)
        if not os.path.isdir(basepath):
            log('it does not exist', logging.DEBUG)
            return

        # print deprecation warning for mo files
        depr_names = fnmatch.filter(os.listdir(basepath), '*.mo')
        if depr_names:
            import warnings
            warnings.warn(
                'Compiled po files (*.mo) found in %s. '
                'PlacelessTranslationService now compiles '
                'mo files automatically. All mo files have '
                'been ignored.' % basepath, DeprecationWarning, stacklevel=4)

        # load po files
        names = fnmatch.filter(os.listdir(basepath), '*.po')
        if not names:
            log('nothing found', logging.DEBUG)
            return
        for name in names:
            self._load_catalog_file(name, basepath)

        log('Initialized:', detail = repr(names) + (' from %s\n' % basepath))
예제 #10
0
  def _link_current_reports(self, report_dir, link_dir, preserve):
    # Kill everything not preserved.
    for name in os.listdir(link_dir):
      path = os.path.join(link_dir, name)
      if name not in preserve:
        if os.path.isdir(path):
          safe_rmtree(path)
        else:
          os.unlink(path)

    # Link ~all the isolated run/ dir contents back up to the stable workdir
    # NB: When batching is enabled, files can be emitted under different subdirs. If those files
    # have the like-names, the last file with a like-name will be the one that is used. This may
    # result in a loss of information from the ignored files. We're OK with this because:
    # a) We're planning on deprecating this loss of information.
    # b) It is the same behavior as existed before batching was added.
    for root, dirs, files in safe_walk(report_dir, topdown=True):
      dirs.sort()  # Ensure a consistent walk order for sanity sake.
      for f in itertools.chain(fnmatch.filter(files, '*.err.txt'),
                               fnmatch.filter(files, '*.out.txt'),
                               fnmatch.filter(files, 'TEST-*.xml')):
        src = os.path.join(root, f)
        dst = os.path.join(link_dir, f)
        safe_delete(dst)
        os.symlink(src, dst)

    for path in os.listdir(report_dir):
      if path in ('coverage', 'reports'):
        src = os.path.join(report_dir, path)
        dst = os.path.join(link_dir, path)
        os.symlink(src, dst)
예제 #11
0
 def __init__(self, src, dest, destsubdir, exclude=["CVS", ".svn"], include=None):
     """creates the fileset by walking through src directory"""
     self.src = src
     self.dest = dest
     self.destsubdir = destsubdir
     self.exclude = exclude
     self.include = include
     for root, dirs, files in os.walk(self.src):
         filenames = []
         for exclude in self.exclude:
             for exdir in fnmatch.filter(dirs, exclude):
                 dirs.remove(exdir)
             for f in fnmatch.filter(files, exclude):
                 files.remove(f)
         if self.include is not None:
             filtered_names = set()
             for include in self.include:
                 filtered_names.update(fnmatch.filter(files, include))
             files = sorted(filtered_names)
         for name in files:
             filenames.append(os.path.join(root, name))
         if len(filenames) > 0:
             destsubdirname = root.replace(self.src, self.destsubdir, 1)
             destpath = os.path.join(self.dest, destsubdirname)
             self.append((destpath, filenames))
예제 #12
0
def package_output(outDir, dirMatch, fileMatch, newLabel):
	for root, dirnames, fnames in os.walk(pipeOutputDir):	
		for dirname in fnmatch.filter(dirnames, dirMatch):
				
			subject = os.path.basename(os.path.normpath(root))
	
			for r, d, filenames in os.walk(os.path.join(root,dirname)):
				for filename in fnmatch.filter(filenames, fileMatch):
					filetitle, fileExtension = os.path.splitext(filename)
			
					oldFile = os.path.join(r,filename)
					print oldFile

					newFile = os.path.join(outDir,(dataSet + '_' + subject + '_' + newLabel + fileExtension))
					print newFile

					shutil.copyfile(oldFile,newFile)

					if dirMatch == 'biginvariant' or dirMatch == 'smallinvariant':
					
						outZip = newFile + '.zip'
						make_zipfile(outZip, newFile)
						os.remove(newFile)
					
	return 'Success'
예제 #13
0
 def walk(self,path=None, filter=None, blacklist=None, recursive=None):
     path = path or self.path
     filter = filter or self.filter
     recursive = recursive or self.recursive
     blacklist = blacklist or self.blacklist
     
     if isinstance(filter, basestring):
         filter=[filter]
     
     for root, dirnames, filenames in os.walk(path):
         if filter:
             filtered_files = []
             for f in filter:
                 filtered_files+=fnmatch.filter(filenames,f)
             filenames = set(filtered_files)
             
         # filter blacklist
         if blacklist:
             filtered_files = []
             for b in blacklist:
                 filtered_files+=fnmatch.filter(filenames,b)
             
             # now remove all paths in filenames that are in filtered_files
             filenames = [f for f in filenames if f not in filtered_files]
                 
         
         for filename in filenames:
             yield os.path.join(root,filename)
         if not recursive:
             break  
예제 #14
0
def main():

    print("Validating Config Style")

    sqf_list = []
    bad_count = 0

    parser = argparse.ArgumentParser()
    parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default="")
    args = parser.parse_args()

    # Allow running from root directory as well as from inside the tools directory
    rootDir = "../addons"
    if (os.path.exists("addons")):
        rootDir = "addons"

    for root, dirnames, filenames in os.walk(rootDir + '/' + args.module):
      for filename in fnmatch.filter(filenames, '*.cpp'):
        sqf_list.append(os.path.join(root, filename))
      for filename in fnmatch.filter(filenames, '*.hpp'):
        sqf_list.append(os.path.join(root, filename))

    for filename in sqf_list:
        bad_count = bad_count + check_config_style(filename)

    print("------\nChecked {0} files\nErrors detected: {1}".format(len(sqf_list), bad_count))
    if (bad_count == 0):
        print("Config validation PASSED")
    else:
        print("Config validation FAILED")

    return bad_count
예제 #15
0
def locate(pattern, root):
    """Searches for filenames specified by the pattern.

       Args:
           pattern: A string containing the relevant file pattern. The
              string can be a single string or a list of strings.
           root: A string containing the relevant folder to search.

       Returns:
           A list containing the files found.
    """

    matches = []

    # If the pattern to search is a list do:
    if type(pattern) is list:
        for path, dirs, files in os.walk(os.path.abspath(root)):
            for ext in pattern:
                for filename in fnmatch.filter(files, ext):
                    matches.append(os.path.join(path, filename))
        return matches


    # If the pattern is a single string do:
    else:
        for path, dirs, files in os.walk(os.path.abspath(root)):
            for filename in fnmatch.filter(files, pattern):
                matches.append(os.path.join(path, filename))
        return matches
예제 #16
0
def file_generator(dir_path, random_order=False, limit=None):
    """Generates full file paths to all xml files in `dir_path`.

    :param dir_path: The path to get files from.
    :param random_order: If True, will generate file names randomly (possibly with repeats) and will never stop
        generating file names.
    :param limit: If not None, will limit the number of files generated to this integer.
    """
    count = 0
    if not random_order:
        for root, dir_names, file_names in os.walk(dir_path):
            for file_name in fnmatch.filter(file_names, '*.xml'):
                yield os.path.join(root, file_name).replace('\\', '/')
                count += 1
                if count == limit:
                    return
    else:
        while True:
            for root, dir_names, file_names in os.walk(dir_path):
                shuffle(dir_names)
                names = fnmatch.filter(file_names, '*.xml')
                if names:
                    shuffle(names)
                    yield os.path.join(root, names[0]).replace('\\', '/')
                    break
            count += 1
            if count == limit:
                return
def get_model_param_file_from_directory(model_folder, iteration=None):
    """
    Gets the 003500.pth and 003500.pth.opt files from the specified folder

    :param model_folder: location of the folder containing the param files 001000.pth. Can be absolute or relative path. If relative then it is relative to pdc/trained_models/
    :type model_folder:
    :param iteration: which index to use, e.g. 3500, if None it loads the latest one
    :type iteration:
    :return: model_param_file, optim_param_file, iteration
    :rtype: str, str, int
    """

    if not os.path.isdir(model_folder):
        pdc_path = getPdcPath()
        model_folder = os.path.join(pdc_path, "trained_models", model_folder)

    # find idx.pth and idx.pth.opt files
    if iteration is None:
        files = os.listdir(model_folder)
        model_param_file = sorted(fnmatch.filter(files, '*.pth'))[-1]
        iteration = int(model_param_file.split(".")[0])
        optim_param_file = sorted(fnmatch.filter(files, '*.pth.opt'))[-1]
    else:
        prefix = getPaddedString(iteration, width=6)
        model_param_file = prefix + ".pth"
        optim_param_file = prefix + ".pth.opt"

    print "model_param_file", model_param_file
    model_param_file = os.path.join(model_folder, model_param_file)
    optim_param_file = os.path.join(model_folder, optim_param_file)

    return model_param_file, optim_param_file, iteration
예제 #18
0
def start_project(copy_to=None, copy_from=None, no_prompt=False, no_git=False):
    if not copy_from:
        copy_from = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                    'templates', 'project', 'ff0000'))

    if not copy_to:
        copy_to = os.getcwd()
        copy_tree(copy_from, copy_to)
        matches = []
        for root, dirnames, filenames in os.walk(copy_to):
            for filename in fnmatch.filter(filenames, '*.pyc') + \
                            fnmatch.filter(filenames, 'red_start*'):
                matches.append(os.path.join(root, filename))

        for m in matches:
            if os.path.exists(m):
                os.remove(m)
    else:
        if os.path.exists(copy_to):
            print "%s already exists" % copy_to
            return
        shutil.copytree(copy_from, copy_to, ignore=shutil.ignore_patterns('red_start*','*.pyc'))

    # 2. If template has a settings file, run its after_copy method
    settings_path = os.path.join(copy_from, 'red_start_settings.py')
    if os.path.exists(settings_path):
        sys.path.append(copy_from)
        import red_start_settings
        if callable(getattr(red_start_settings, 'after_copy', None)):
            # First change current directory to copy_to
            os.chdir(copy_to)
            red_start_settings.after_copy(no_prompt=no_prompt, no_git=no_git)
        sys.path.remove(copy_from)
예제 #19
0
파일: Monitor.py 프로젝트: junhe/chopper
def get_all_my_files(target):
    matches = []
    for root, dirnames, filenames in os.walk(target):
        for filename in fnmatch.filter(filenames, "*.file"):
            matches.append(os.path.join(root, filename))
        dirnames[:] = fnmatch.filter(dirnames, "dir.*")
    return matches
예제 #20
0
def generateDatabaseJson():
    databaseJson = {
        'checksum': [],
        'matching': [],
        'signature_database_version': '1.0.0',
        'wms_version': '1.0'
    }

    for root, dirnames, filenames in os.walk("./signatures/checksum/"):
        for filename in fnmatch.filter(filenames, '*.json'):
            filepath = os.path.join(root, filename)
            filedata = open(filepath).read()
            hash = hashlib.md5()
            hash.update(filedata)
            filechecksum = hash.hexdigest()
            databaseJson["checksum"].append({
                'filename': filepath,
                'md5': filechecksum
            })

    for root, dirnames, filenames in os.walk("./signatures/matching/"):
        for filename in fnmatch.filter(filenames, '*.json'):
            filepath = os.path.join(root, filename)
            filedata = open(filepath).read()
            hash = hashlib.md5()
            hash.update(filedata)
            filechecksum = hash.hexdigest()
            databaseJson["rules"].append({
                'filename': filepath,
                'md5': filechecksum
            })

    with open('./signatures/database.json', 'w') as outfile:
        json.dump(databaseJson, outfile)
예제 #21
0
def RefTestFile(EngineDumpExe, file, refdir):
	# create an XML dump and bitmap renderings of all pages
	base = os.path.splitext(os.path.split(file)[1])[0]
	tgaPath = pjoin(refdir, base + "-%d.cmp.tga")
	xmlDump = EngineDump(EngineDumpExe, file, tgaPath)
	
	# compare the XML dumps (remove the dump if it's the same as the reference)
	xmlRefPath = pjoin(refdir, base + ".ref.xml")
	xmlCmpPath = pjoin(refdir, base + ".cmp.xml")
	if not os.path.isfile(xmlRefPath):
		open(xmlRefPath, "wb").write(xmlDump)
	elif open(xmlRefPath, "rb").read() != xmlDump:
		open(xmlCmpPath, "wb").write(xmlDump)
		print "  FAIL!", xmlCmpPath
	elif os.path.isfile(xmlCmpPath):
		os.remove(xmlCmpPath)
	
	# compare all bitmap renderings (and create diff bitmaps where needed)
	for file in fnmatch.filter(os.listdir(refdir), base + "-[0-9]*.ref.tga"):
		tgaRefPath = pjoin(refdir, file)
		tgaCmpPath, tgaDiffPath = tgaRefPath[:-8] + ".cmp.tga", tgaRefPath[:-8] + ".diff.tga"
		if BitmapDiff(tgaRefPath, tgaCmpPath, tgaDiffPath):
			print "  FAIL!", tgaCmpPath
		else:
			os.remove(tgaCmpPath)
			if os.path.isfile(tgaDiffPath):
				os.remove(tgaDiffPath)
	for file in fnmatch.filter(os.listdir(refdir), base + "-[0-9]*.cmp.tga"):
		tgaCmpPath = pjoin(refdir, file)
		tgaRefPath = tgaCmpPath[:-8] + ".ref.tga"
		if not os.path.isfile(tgaRefPath):
			os.rename(tgaCmpPath, tgaRefPath)
예제 #22
0
def apply_filter(manifest_fp, files):
    pats = open(manifest_fp, 'rt').readlines()

    new_pats = []
    for pat in pats:
        pat = re.sub('#.*', '', pat)
        pat = pat.strip()
        if pat:
            new_pats.append(pat)
    pats = new_pats

    includes = []
    excludes = []
    for pat in pats:
        if pat.startswith('-'):
            pat = re.sub('^[-]\s*', '', pat)
            excludes.append(pat)
        elif pat.startswith('+'):
            pat = re.sub('^[+]\s*', '', pat)
            includes.append(pat)

    new_files = set()
    for pat in includes:
        new_files = new_files.union(fnmatch.filter(files, pat))

    for pat in excludes:
        new_files = new_files - set(fnmatch.filter(new_files, pat))
    files = new_files

    return files
예제 #23
0
	def __getSourceFiles(self, target):
		""" This functions gathers and returns all .cpp files for the given target. """
		source = []

		# walk source directory and find ONLY .cpp files
		for (dirpath, dirnames, filenames) in os.walk(self.__src_dir):
			for name in fnmatch.filter(filenames, "*.cpp"):
				source.append(os.path.join(dirpath, name))

		# exclude files depending on target, executables will be addes later
		xpatterns = ["*-X.cpp"]
		excluded = []

		# only the target "Test" requires Benchmark and GTest files
		if (target not in ["Tests"]):
			# exclude files matching following patterns
			xpatterns += ["*GTest.cpp","*Benchmark.cpp"]

		for pattern in xpatterns:
			for name in fnmatch.filter(source, pattern):
				excluded.append(name)

		#print("excluded source files: {0}".format(excluded))
		source = [name for name in source if name not in excluded]

		# add executable
		if target == "Tests":
			source.append(os.path.join(srcDir, "Unittests-X.cpp"))
		elif target in ["Core","Lib"]:
			pass # no executable
		else:
			print("Unknown target: {0}".format(target))
			exit(1)
		return source
예제 #24
0
def evaluateStrings(mergedStringDict):
    weightedDict = {}
    stringDict = {}
    for string in mergedStringDict:
        c = string.count('*')
        weight = pow(2, len(string)-c)
        #weight = len(string)-c
        weightedDict[string] = mergedStringDict[string] * weight 
    #TODO 
    stringList = mergedStringDict.keys()

    #print '\nmergedStringDict'
    #for string in sorted(mergedStringDict, key = mergedStringDict.get, reverse=True):
        #print string, mergedStringDict[string], fnmatch.filter(stringList, string)
    
    #print '\nweightedDict'
    for string in sorted(weightedDict, key = weightedDict.get, reverse=True):
        #print string, weightedDict[string], '(',mergedStringDict[string], ") :", fnmatch.filter(stringList, string) 
        stringDict[string] = 0
        for matchedString in fnmatch.filter(stringList, string):
            stringDict[string] += weightedDict[matchedString]
        stringDict[string] /= len(fnmatch.filter(stringList, string))

    totalScore = sum(stringDict.values())
    #print '\nstringDict'
    #print 'totalScore: ', totalScore
    #for string in sorted(stringDict, key = stringDict.get, reverse=True):
        #print string, stringDict[string], '(', str(stringDict[string]*100/totalScore), '%)', fnmatch.filter(stringList, string)
    
    return stringDict
예제 #25
0
def remove_thumbnails(pic_url, root=settings.MEDIA_ROOT, url_root=settings.MEDIA_URL):
    if not pic_url:
        return  # empty url

    file_name = get_path_from_url(pic_url, root, url_root)
    base, ext = os.path.splitext(os.path.basename(file_name))
    basedir = os.path.dirname(file_name)
    for file in fnmatch.filter(os.listdir(str(basedir)), _THUMBNAIL_GLOB % (base, ext)):
        path = os.path.join(basedir, file)
        try:
            os.remove(path)
        except OSError:
            pass
    for file in fnmatch.filter(os.listdir(str(basedir)), _THUMBNAIL_ASPECT % (base, ext)):
        path = os.path.join(basedir, file)
        try:
            os.remove(path)
        except OSError:
            pass
    for file in fnmatch.filter(os.listdir(str(basedir)), _WATERMARK % (base, ext)):
        path = os.path.join(basedir, file)
        try:
            os.remove(path)
        except OSError:
            pass
예제 #26
0
파일: utils.py 프로젝트: Aulla/eneboo-tools
def find_files(basedir, glob_pattern = "*", abort_on_match = False):
    ignored_files = [
        "*~",
        ".*",
        "*.bak",
        "*.bakup",
        "*.tar.gz",
        "*.tar.bz2",
        "*.BASE.*",
        "*.LOCAL.*",
        "*.REMOTE.*",
        "*.*.rej",
        "*.*.orig",
    ]
    retfiles = []
    
    for root, dirs, files in os.walk(basedir):
        baseroot = os.path.relpath(root,basedir)
        for pattern in ignored_files:
            delfiles = fnmatch.filter(files, pattern)
            for f in delfiles: files.remove(f)
            deldirs = fnmatch.filter(dirs, pattern)
            for f in deldirs: dirs.remove(f)
        pass_files = [ os.path.join( baseroot, filename ) for filename in fnmatch.filter(files, glob_pattern) ]
        if pass_files and abort_on_match:
            dirs[:] = [] 
        retfiles += pass_files
    return retfiles
예제 #27
0
파일: utils.py 프로젝트: Aulla/eneboo-tools
def get_max_mtime(path, filename):
    ignored_files = [
        "*~",
        ".*",
        "*.bak",
        "*.bakup",
        "*.tar.gz",
        "*.tar.bz2",
        "*.BASE.*",
        "*.LOCAL.*",
        "*.REMOTE.*",
        "*.*.rej",
        "*.*.orig",
    ]
    
    basedir = os.path.join(path, os.path.dirname(filename))
    max_mtime = 0
    for root, dirs, files in os.walk(basedir):
        for pattern in ignored_files:
            delfiles = fnmatch.filter(files, pattern)
            for f in delfiles: files.remove(f)
            deldirs = fnmatch.filter(dirs, pattern)
            for f in deldirs: dirs.remove(f)
        for filename in files:
            filepath = os.path.join(root,filename)
            file_stat = os.stat(filepath)
            if file_stat.st_mtime > max_mtime:
                max_mtime = file_stat.st_mtime
    return max_mtime
예제 #28
0
파일: json_utils.py 프로젝트: Jorges1000/TS
    def __init__(self, libraryName):
        
        self.fastaDir = "%s/%s" % (TMAP_REFERENCE_BASE_LOCATION, libraryName)
        self.fasta = None
        import fnmatch
        try:
            self.fasta = str( fnmatch.filter( os.listdir( self.fastaDir ), "*.fasta" )[-1] )
        except OSError:
            print "[ReferenceLibrary]: searching for reference library.."
            ref_base = None
            for i in xrange(7):
                ref_base = "/results%s/referenceLibrary/tmap-f%s/%s" % (str(i), TMAP_FILE_INDEX_VERSION, libraryName)
                try:
                    self.fasta = str(fnmatch.filter( os.listdir( ref_base ), "*.fasta" )[-1])
                    break
                except:
                    print "[ReferenceLibrary] not found in ", ref_base
                    
                    
            if not self.fasta:
                try:
                    ref_base = "/results/referenceLibrary/tmap-f%s/%s" % (TMAP_FILE_INDEX_VERSION, libraryName)
                    self.fasta = str( fnmatch.filter( os.listdir( ref_base ), "*.fasta" )[-1] )
                except:
                    print "[ReferenceLibrary] not found in ", ref_base
                    ref_base = None

            self.fastaDir = ref_base
예제 #29
0
def SelectRuns(globs, shard, shards):
  index = 0
  results = []
  all_configs = set()
  all_runs = {}

  for glob in globs:
    all_configs.update(fnmatch.filter(CONFIGS.keys(), glob))
    for fileset_name, fileset_data in FILE_SETS.iteritems():
      if fnmatch.fnmatch(fileset_name, glob):
        matching_files = fileset_data['files']
      else:
        matching_files = fnmatch.filter(fileset_data['files'], glob)
      for f in matching_files:
        all_runs.setdefault(f, set()).update(fileset_data['bitrates'])

  for config in sorted(all_configs):
    for f, bitrates in all_runs.iteritems():
      for b in sorted(bitrates):
        if index % shards == shard:
          result = dict(CONFIGS[config])
          result.update({'config': config, 'filename': f, 'bitrate': b})
          results.append(result)
        index += 1
  return sorted(results, key=lambda x: x['configure_flags'])
예제 #30
0
    def parseDirectoryName(self, path):
        # verify directory name structure
        directory_name = os.path.basename(path)
        parts = directory_name.split(' - ')
        if len(parts) != 3:
            # check directory contains mp3s..
            if len(fnmatch.filter(os.listdir(path), '*.mp3')) == 0:
                raise AppException("No MP3s found in '{}'".format(directory_name))
            else:
                raise AppException("Badly formed directory name '{}'".format(directory_name))

        self.meta = {}
        self.meta['artist'] = parts[0]
        self.meta['year'] = int(parts[1])
        self.meta['album'] = parts[2]

        # in compilation mode, artist will vary per track
        self.meta['album_artist'] = self.meta['artist']

        # clean '(Disc *)' from album name; don't want it in ID3 tag
        if '(Disc' in self.meta['album']:
            self.meta['album'] = self.meta['album'][0:self.meta['album'].find('(Disc')-1]

        # count mp3's for track total
        self.meta['total_num_tracks'] = len(fnmatch.filter(os.listdir(path), '*.mp3'))

        self.meta['image'] = None
        if os.path.exists(os.path.join(path, 'folder.jpg')):
            # extract existing 'FRONT_COVER' image
            # check new/existing has largest height
            # set largest as folder.jpg
            with open(os.path.join(path, 'folder.jpg'), 'rb') as f:
                self.meta['image'] = f.read()
예제 #31
0
    def __init__(self, site, config={}):
        """ Load specified site, exit out if site isn't found. """

        # webchomp configuration
        self.config = config

        # set site
        self.site = site

        # get logger
        self.logger = logging.getLogger('webchomp')

        # Store site path
        if "path" in self.config and 'site' in self.config['path']:
            conf_site_path = os.path.normpath(self.config['path']['site'])
            self.site_path = os.path.normpath(
                os.path.join(conf_site_path, site))
        else:
            self.site_path = os.path.normpath("site/%s" % site)

        # Store other useful pathes
        self.site_page_path = "%s/page" % self.site_path
        self.site_template_path = "%s/template" % self.site_path
        self.site_asset_path = "%s/asset" % self.site_path

        if "path" in self.config and 'extension' in self.config['path']:
            self.extension_path = os.path.normpath(
                self.config['path']['extension'])
        else:
            self.extension_path = os.path.normpath("extension")

        self.site_extension_path = os.path.normpath("%s/extension" %
                                                    self.site_path)

        if "path" in self.config and 'output' in self.config['path']:
            conf_output_path = os.path.normpath(self.config['path']['output'])
            self.site_output_path = os.path.normpath(
                os.path.join(conf_output_path, self.site))
        else:
            self.site_output_path = os.path.normpath("output/%s" % self.site)

        # site pages cache
        self.site_pages_cache = {}

        # list of pages for site, includes dynamic ones
        self.pages = self.get_site_pages()

        # page info cache
        self.page_info = {}

        # verify site exists
        if not os.path.exists(self.site_path):
            self.logger.critical("Site '%s' does not exist" % site)
            return sys.exit()

        # Found, yay!
        self.logger.info("Found site '%s'" % (site))

        # create site output if not exist
        if not os.path.exists("output"):
            os.mkdir("output")
        if not os.path.exists("output/%s" % self.site):
            os.mkdir("output/%s" % self.site)
            os.mkdir("output/%s/asset" % self.site)
            os.mkdir("output/%s/asset/css" % self.site)

        # load site conf yml
        self.site_conf = {}
        if os.path.exists("%s/site.yml" % self.site_path):
            f_io = open("%s/site.yml" % self.site_path, "r")
            self.site_conf = yaml.load(f_io.read())
            f_io.close()

        # load all jinja extensions
        self.extensions = {}
        for root, dirnames, filenames in itertools.chain(
                os.walk(self.extension_path),
                os.walk(self.site_extension_path)):
            for filename in fnmatch.filter(filenames, '*.py'):
                extension = imp.load_source(
                    "extension_%s" % os.path.splitext(filename)[0],
                    os.path.join(root, filename))
                if hasattr(extension, 'jinja_extension'):
                    self.extensions["extension_%s" % os.path.splitext(filename)
                                    [0]] = extension.jinja_extension(self)

                self.logger.debug("Load extension: %s" %
                                  os.path.splitext(filename)[0])

        # load jinja functions
        self.jinja_functions = {}
        for extension in self.extensions:
            # append functions
            if hasattr(self.extensions[extension], "get_jinja_functions"):
                self.jinja_functions[extension.replace(
                    "extension_", "")] = dict(self.extensions[extension].
                                              get_jinja_functions().items())

        # load dynamic pages
        for extension in self.extensions:
            # append pages to page_info
            if hasattr(self.extensions[extension], "get_dynamic_pages"):
                dynamic_pages = self.extensions[extension].get_dynamic_pages()
                for key in dynamic_pages:
                    self.page_info[key] = dynamic_pages[key]
                    self.pages.append(key)
                    self.logger.debug("Generate dynamic page: %s" % key)
예제 #32
0
파일: train.py 프로젝트: rcornall/ezGO
def get_files(dataType="train"):
    inputDataFilesList = []
    for root, dirnames, filenames in os.walk(DATA_DIRECTORY):
        for filename in fnmatch.filter(filenames, '%s*.npz' % dataType):
            inputDataFilesList.append(os.path.join(root, filename))
    return inputDataFilesList
예제 #33
0
# Extract to path
subprocess.call(
    [
        "unzip", ipa_path,
        "-d", working_path
    ]
)

# Look for file
import fnmatch
import plistlib

plist_matches = []
app = ""
for root, dirnames, filenames in os.walk(working_path):
    for filename in fnmatch.filter(filenames, 'Info.plist'):
        plist_matches.append(os.path.join(root, filename))
    for dirname in dirnames:
        if dirname.endswith(".app"):
            app = os.path.join(root, dirname)

# Read XML
bundle_identifier = ""
for elem in plist_matches:
    xml = plistlib.readPlistFromString(read_bin_xml(elem))
    if 'CFBundleIdentifier' in xml:
        bundle_identifier = xml['CFBundleIdentifier']
        break

subprocess.call(["open", "/Applications/Xcode.app/Contents/Developer/Applications/Simulator.app"])
예제 #34
0
#Fetch default LO run_card.dat and set parameters
extras = { 'lhe_version':'2.0', 
           'cut_decays':'F', 
           'pdlabel':"'cteq6l1'"}
build_run_card(run_card_old=get_default_runcard(process_dir),run_card_new='run_card.dat',
               nevts=nevents,rand_seed=runArgs.randomSeed,beamEnergy=beamEnergy,extras=extras)

print_cards()
    
runName='run_01'     


str_param_card='MadGraph_2HDM_for_multitops_paramcard_400_new.dat'
for root, dirnames, filenames in os.walk('.'):
    for filename in fnmatch.filter(filenames, str_param_card):
        param_grid_location=(os.path.join(root, filename))

#generate(run_card_loc='run_card.dat',param_card_loc=None,mode=mode,proc_dir=process_dir,run_name=runName)
generate(run_card_loc='run_card.dat',param_card_loc=param_grid_location,mode=mode,proc_dir=process_dir,run_name=runName)
arrange_output(run_name=runName,proc_dir=process_dir,outputDS=runName+'._00001.events.tar.gz')  

   


#### Shower 
evgenConfig.description = 'MadGraph_ttbb'
evgenConfig.keywords+=['Higgs','jets']
evgenConfig.inputfilecheck = runName
runArgs.inputGeneratorFile=runName+'._00001.events.tar.gz'
예제 #35
0
def find_files(directory, pattern):
    for root, dirs, files in os.walk(directory):
        for filename in fnmatch.filter(files, pattern):
            yield os.path.join(root, filename)
예제 #36
0
def FindInDirectory(directory, filename_filter):
  files = []
  for root, _dirnames, filenames in os.walk(directory):
    matched_files = fnmatch.filter(filenames, filename_filter)
    files.extend((os.path.join(root, f) for f in matched_files))
  return files
예제 #37
0
    help="File which represents last time msggen generated templates")
p.add_argument("-s",
               "--messages",
               nargs="*",
               help="Messages to generate. Defaults to all")

options = p.parse_args()

messages_file = os.path.abspath(options.input)

#build speedup. This essentially tracks its own dependencies since it gets run
#every build anyway
if options.marker_file and os.path.exists(options.marker_file):
    dep_files = [messages_file]
    for root, dirnames, filenames in os.walk(options.template_dir):
        for filename in fnmatch.filter(filenames, "*.template*"):
            dep_files.append(os.path.join(root, filename))
    for root, dirnames, filenames in os.walk(os.path.dirname(sys.argv[0])):
        for filename in fnmatch.filter(filenames, "*.py"):
            dep_files.append(os.path.join(root, filename))
    marker_mtime = os.path.getmtime(options.marker_file)
    for dep_file in dep_files:
        if marker_mtime < os.path.getmtime(dep_file):
            break
    else:
        exit()

import hashlib

from msggenyacc import parser
from Cheetah.Template import Template
예제 #38
0
def find_auth_file(dir):
    key_from_home = fnmatch.filter(os.listdir(dir), "cs202-auth-*")
    key_paths = [os.path.join(dir, f) for f in key_from_home]
    return key_paths
예제 #39
0
#!/usr/bin/python
import os
import sys
import fnmatch

for root, dirnames, filenames in os.walk('.'):
    if fnmatch.filter(filenames, '*.py'):
        sys.path.append(root)
예제 #40
0
    #################
    if runsig:

        ### set dataset loader
        if args.private: sdml = SigDatasetMapLoader()
        else: sdml = CentralSignalMapLoader()

        ### set sample name
        # sampleSig = 'mXX-150_mA-0p25_lxy-300|mXX-500_mA-1p2_lxy-300|mXX-800_mA-5_lxy-300'.split('|')
        # sampleSig.extend( 'mXX-100_mA-5_lxy-0p3|mXX-1000_mA-0p25_lxy-0p3'.split('|') )
        sampleSig = []
        if args.sigparam:
            for s in args.sigparam:
                if '*' in s or '?' in s:
                    sampleSig.extend(
                        fnmatch.filter(sdml.get_datasets('4mu').keys(), s))
                else:
                    sampleSig.append(s)
            sampleSig = list(set(sampleSig))

        print 'Signal samples to process:'
        print ', '.join(sampleSig)

        if '4mu' in args.channel:
            sigDS_4mu_inc, sigSCALE_4mu_inc = sdml.fetch('4mu')

            outnames = []
            for s in sampleSig:
                files, scale = sigDS_4mu_inc[s], sigSCALE_4mu_inc[s]
                _outname = outname[:-5] + '__' + s + outname[-5:]
                events_ = imp.MyEvents(files=files,
예제 #41
0
import os
import fnmatch

from setuptools import setup

package_name = 'webots_ros2_universal_robot'
worlds = [
    'worlds/universal_robot_multiple.wbt', 'worlds/universal_robot_rviz.wbt',
    'worlds/universal_robot.wbt', 'worlds/universal_robot_lidar.wbt',
    'worlds/.universal_robot_multiple.wbproj',
    'worlds/.universal_robot_rviz.wbproj', 'worlds/.universal_robot.wbproj',
    'worlds/.universal_robot_lidar.wbproj'
]
textures = []
for rootPath, dirNames, fileNames in os.walk('worlds/textures'):
    for fileName in fnmatch.filter(fileNames, '*.jpg'):
        filePath = os.path.relpath(os.path.join(rootPath, fileName))
        textures.append(filePath)
launchers = [
    'launch/universal_robot.launch.py',
    'launch/universal_robot_multiple.launch.py',
    'launch/universal_robot_rviz.launch.py',
    'launch/universal_robot_rviz_dynamic.launch.py'
]

data_files = []
data_files.append(('share/ament_index/resource_index/packages',
                   ['resource/' + package_name]))
data_files.append(('share/' + package_name, launchers))
data_files.append(('share/' + package_name + '/worlds', worlds))
data_files.append(('share/' + package_name + '/worlds/textures', textures))
예제 #42
0
def recursive_glob(treeroot, pattern):
    results = []
    for base, dirs, files in os.walk(treeroot):
        goodfiles = fnmatch.filter(files, pattern)
        results.extend(os.path.join(base, f) for f in goodfiles)
    return results
예제 #43
0
    return os.path.join("src", "runtime", interop_file)


if __name__ == "__main__":
    setupdir = os.path.dirname(__file__)
    if setupdir:
        os.chdir(setupdir)

    sources = []
    for ext in (".sln", ".snk", ".config"):
        sources.extend(glob("*" + ext))

    for root, dirnames, filenames in os.walk("src"):
        for ext in (".cs", ".csproj", ".sln", ".snk", ".config", ".il", ".py",
                    ".c", ".h", ".ico"):
            for filename in fnmatch.filter(filenames, "*" + ext):
                sources.append(os.path.join(root, filename))

    for root, dirnames, filenames in os.walk("tools"):
        for ext in (".exe"):
            for filename in fnmatch.filter(filenames, "*" + ext):
                sources.append(os.path.join(root, filename))

    setup_requires = []
    interop_file = _get_interop_filename()
    if not os.path.exists(interop_file):
        setup_requires.append("pycparser")

    setup(name="pythonnet",
          version="2.1.0",
          description=".Net and Mono integration for Python",
예제 #44
0
    parser.add_argument('work_dir', type=Path, help='Sets working directory')
    args = parser.parse_args()

    render_path = Path('../../target/debug/rendersvg').resolve()
    if not render_path.exists():
        raise RuntimeError('rendersvg executable not found')

    with open('allow-{}.txt'.format(args.backend), 'r') as f:
        allowed_files_list = f.read().splitlines()
    allowed_files_list.extend(CRASH_ALLOWED)

    prev_render_path = build_prev_version()

    start_idx = load_last_pos()
    files = os.listdir(args.in_dir)
    files = fnmatch.filter(files, '*.svg')
    files = sorted(files)
    for idx, file in enumerate(files):
        svg_path = args.in_dir / file
        png_path_prev = args.work_dir / change_ext(file, '_prev', 'png')
        png_path_curr = args.work_dir / change_ext(file, '_curr', 'png')
        diff_path = args.work_dir / change_ext(file, '_diff', 'png')

        # remove leftovers
        rm_file(png_path_prev)
        rm_file(png_path_curr)
        rm_file(diff_path)

        if idx < start_idx:
            continue
예제 #45
0
파일: apt.py 프로젝트: villgust/ansible
def package_status(m, pkgname, version, cache, state):
    try:
        # get the package from the cache, as well as the
        # low-level apt_pkg.Package object which contains
        # state fields not directly accessible from the
        # higher-level apt.package.Package object.
        pkg = cache[pkgname]
        ll_pkg = cache._cache[pkgname]  # the low-level package object
    except KeyError:
        if state == 'install':
            try:
                provided_packages = cache.get_providing_packages(pkgname)
                if provided_packages:
                    is_installed = False
                    upgradable = False
                    # when virtual package providing only one package, look up status of target package
                    if cache.is_virtual_package(pkgname) and len(
                            provided_packages) == 1:
                        package = provided_packages[0]
                        installed, upgradable, has_files = package_status(
                            m, package.name, version, cache, state='install')
                        if installed:
                            is_installed = True
                    return is_installed, upgradable, False
                m.fail_json(msg="No package matching '%s' is available" %
                            pkgname)
            except AttributeError:
                # python-apt version too old to detect virtual packages
                # mark as upgradable and let apt-get install deal with it
                return False, True, False
        else:
            return False, False, False
    try:
        has_files = len(pkg.installed_files) > 0
    except UnicodeDecodeError:
        has_files = True
    except AttributeError:
        has_files = False  # older python-apt cannot be used to determine non-purged

    try:
        package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
    except AttributeError:  # python-apt 0.7.X has very weak low-level object
        try:
            # might not be necessary as python-apt post-0.7.X should have current_state property
            package_is_installed = pkg.is_installed
        except AttributeError:
            # assume older version of python-apt is installed
            package_is_installed = pkg.isInstalled

    if version:
        versions = package_versions(pkgname, pkg, cache._cache)
        avail_upgrades = fnmatch.filter(versions, version)

        if package_is_installed:
            try:
                installed_version = pkg.installed.version
            except AttributeError:
                installed_version = pkg.installedVersion

            # Only claim the package is installed if the version is matched as well
            package_is_installed = fnmatch.fnmatch(installed_version, version)

            # Only claim the package is upgradable if a candidate matches the version
            package_is_upgradable = False
            for candidate in avail_upgrades:
                if package_version_compare(candidate, installed_version) > 0:
                    package_is_upgradable = True
                    break
        else:
            package_is_upgradable = bool(avail_upgrades)
    else:
        try:
            package_is_upgradable = pkg.is_upgradable
        except AttributeError:
            # assume older version of python-apt is installed
            package_is_upgradable = pkg.isUpgradable

    return package_is_installed, package_is_upgradable, has_files
예제 #46
0
def find_files(root, pattern='*'):
    matches = []
    for root, _, filenames in os.walk(root):
        for filename in fnmatch.filter(filenames, pattern):
            matches.append(os.path.join(root, filename))
    return matches
예제 #47
0
def moose_docs_import(root_dir=None,
                      include=None,
                      exclude=None,
                      base=None,
                      extensions=None):
    """
    Cretes a list of files to "include" from files, include, and/or exclude lists. All paths should
    be defined with respect to the repository base directory.

    Args:
        root_dir[str]: The directory which all other paths should be relative to.
        base[str]: The path to the base directory, this is the directory that is walked
                   to search for files that exists. It should be defined relative to the root_dir.
        include[list]: List of file/path globs to include, relative to the 'base' directory.
        exclude[list]: List of file/path glob patterns to exclude (do not include !), relative
                       to the 'base' directory.
        extension[str]: Limit the search to an extension (e.g., '.md')
    """

    # Define the include/exclude/extensions lists
    if include is None:
        include = []
    if exclude is None:
        exclude = []
    if extensions is None:
        extensions = ('')

    # Define root and base directories
    if root_dir is None:
        root_dir = MooseDocs.ROOT_DIR
    if not os.path.isabs(root_dir):
        root_dir = os.path.join(MooseDocs.ROOT_DIR, root_dir)

    # Check types
    if not isinstance(exclude, list) or any(not isinstance(x, str)
                                            for x in exclude):
        LOG.error('The "exclude" must be a list of str items.')
        return None
    if not isinstance(include, list) or any(not isinstance(x, str)
                                            for x in include):
        LOG.error('The "include" must be a list of str items.')
        return None

    # Loop through the base directory and create a set of matching filenames
    matches = set()
    for root, _, files in os.walk(os.path.join(root_dir, base)):
        filenames = [
            os.path.join(root, fname) for fname in files
            if fname.endswith(extensions) and not fname.startswith('.')
        ]
        for pattern in include:
            for filename in fnmatch.filter(filenames,
                                           os.path.join(root_dir, pattern)):
                matches.add(filename)

    # Create a remove list
    remove = set()
    for pattern in exclude:
        for filename in fnmatch.filter(matches,
                                       os.path.join(root_dir, pattern)):
            remove.add(filename)

    # Return a sorted lists of matches
    matches -= remove
    return sorted(matches)
예제 #48
0
    def run(self, context):
        try:
            swift = self.get_object_client(context)
            map_file = swift.get_object(self.container,
                                        'capabilities-map.yaml')
            capabilities = yaml.safe_load(map_file[1])
        except Exception:
            err_msg = ("Error parsing capabilities-map.yaml.")
            LOG.exception(err_msg)
            return actions.Result(error=err_msg)
        try:
            container_files = swift.get_container(self.container)
            container_file_list = [
                entry['name'] for entry in container_files[1]
            ]
        except Exception as swift_err:
            err_msg = ("Error retrieving plan files: %s" % swift_err)
            LOG.exception(err_msg)
            return actions.Result(error=err_msg)

        try:
            env = plan_utils.get_env(swift, self.container)
        except swiftexceptions.ClientException as err:
            err_msg = ("Error retrieving environment for plan %s: %s" %
                       (self.container, err))
            LOG.exception(err_msg)
            return actions.Result(error=err_msg)

        selected_envs = [
            item['path'] for item in env['environments'] if 'path' in item
        ]

        # extract environment files
        plan_environments = []
        for env_group in capabilities['topics']:
            for envs in env_group['environment_groups']:
                for files in envs['environments']:
                    file = files.get('file')
                    if file:
                        plan_environments.append(file)

        # parse plan for environment files
        env_files = fnmatch.filter(container_file_list, '*environments/*.yaml')
        env_user_files = fnmatch.filter(container_file_list,
                                        '*user-environment.yaml')

        outstanding_envs = list(
            set(env_files).union(env_user_files) - set(plan_environments))

        # change capabilities format
        data_to_return = {}

        for topic in capabilities['topics']:
            title = topic.get('title', '_title_holder')
            data_to_return[title] = topic
            for eg in topic['environment_groups']:
                for env in eg['environments']:
                    if selected_envs and env.get('file') in selected_envs:
                        env['enabled'] = True
                    else:
                        env['enabled'] = False

        # add custom environment files
        other_environments = []
        for env in outstanding_envs:
            flag = selected_envs and env in selected_envs
            new_env = {
                "description": "Enable %s environment" % env,
                "enabled": flag,
                "file": env,
                "title": env,
            }
            other_environments.append(new_env)
        other_environments.sort(key=lambda x: x['file'])

        other_environment_groups = []
        for group in other_environments:
            new_group = {
                "description": None,
                "environments": [group],
                "title": group['file'],
            }
            other_environment_groups.append(new_group)

        other_environments_topic_dict = {
            "description": None,
            "title": "Other",
            "environment_groups": other_environment_groups
        }

        other_environments_topic = {"Other": other_environments_topic_dict}
        data_to_return.update(other_environments_topic)

        return data_to_return
예제 #49
0
파일: tables.py 프로젝트: tsarjak/sunpy
def entries_from_dir(fitsdir,
                     recursive=False,
                     pattern='*',
                     default_waveunit=None):
    """Search the given directory for FITS files and use the corresponding FITS
    headers to generate instances of :class:`DatabaseEntry`. FITS files are
    detected by reading the content of each file, the `pattern` argument may be
    used to avoid reading entire directories if one knows that all FITS files
    have the same filename extension.

    Parameters
    ----------
    fitsdir : string
        The directory where to look for FITS files.

    recursive : bool, optional
        If True, the given directory will be searched recursively. Otherwise,
        only the given directory and no subdirectories are searched. The
        default is `False`, i.e. the given directory is not searched
        recursively.

    pattern : string, optional
        The pattern can be used to filter the list of filenames before the
        files are attempted to be read. The default is to collect all files.
        This value is passed to the function :func:`fnmatch.filter`, see its
        documentation for more information on the supported syntax.

    default_waveunit : str, optional
        See
        :meth:`sunpy.database.tables.DatabaseEntry.add_fits_header_entries_from_file`.

    Returns
    -------
    generator of (DatabaseEntry, str) pairs
        A generator where each item is a tuple consisting of a
        :class:`DatabaseEntry` instance and the absolute path to the filename
        which was used to make the database entry.

    Examples
    --------
    >>> from sunpy.data.test import rootdir as fitsdir
    >>> from sunpy.database.tables import entries_from_dir
    >>> entries = list(entries_from_dir(fitsdir, default_waveunit='angstrom'))
    >>> len(entries)
    38
    >>> # and now search `fitsdir` recursive
    >>> entries = list(entries_from_dir(fitsdir, True, default_waveunit='angstrom'))
    >>> len(entries)
    59

    """
    for dirpath, dirnames, filenames in os.walk(fitsdir):
        filename_paths = (os.path.join(dirpath, name) for name in filenames)
        for path in fnmatch.filter(filename_paths, pattern):
            try:
                filetype = sunpy_filetools._detect_filetype(path)
            except (sunpy_filetools.UnrecognizedFileTypeError,
                    sunpy_filetools.InvalidJPEG2000FileExtension):
                continue
            if filetype == 'fits':
                for entry in entries_from_file(path, default_waveunit):
                    yield entry, path
        if not recursive:
            break
예제 #50
0
    def __init__(self, settings, ncpu=None):
        self.settings = settings
        self.logger = logging.getLogger(__name__)
        self.stats = defaultdict(int)
        self.init_pool(ncpu)
        check_or_create_dir(settings['destination'])

        # Build the list of directories with images
        albums = self.albums = {}
        src_path = self.settings['source']

        ignore_dirs = settings['ignore_directories']
        ignore_files = settings['ignore_files']

        progressChars = cycle(["/", "-", "\\", "|"])
        show_progress = (self.logger.getEffectiveLevel() >= logging.WARNING
                         and os.isatty(sys.stdout.fileno()))
        self.progressbar_target = None if show_progress else Devnull()

        for path, dirs, files in os.walk(src_path,
                                         followlinks=True,
                                         topdown=False):
            if show_progress:
                print("\rCollecting albums " + next(progressChars), end="")
            relpath = os.path.relpath(path, src_path)

            # Test if the directory match the ignore_dirs settings
            if ignore_dirs and any(
                    fnmatch.fnmatch(relpath, ignore)
                    for ignore in ignore_dirs):
                self.logger.info('Ignoring %s', relpath)
                continue

            # Remove files that match the ignore_files settings
            if ignore_files:
                files_path = {join(relpath, f) for f in files}
                for ignore in ignore_files:
                    files_path -= set(fnmatch.filter(files_path, ignore))

                self.logger.debug('Files before filtering: %r', files)
                files = [os.path.split(f)[1] for f in files_path]
                self.logger.debug('Files after filtering: %r', files)

            # Remove sub-directories that have been ignored in a previous
            # iteration (as topdown=False, sub-directories are processed before
            # their parent
            for d in dirs[:]:
                path = join(relpath, d) if relpath != '.' else d
                if path not in albums.keys():
                    dirs.remove(d)

            album = Album(relpath, settings, dirs, files, self)

            if not album.medias and not album.albums:
                self.logger.info('Skip empty album: %r', album)
            else:
                album.create_output_directories()
                albums[relpath] = album

        with progressbar(albums.values(),
                         label="%16s" % "Sorting albums",
                         file=self.progressbar_target) as progress_albums:
            for album in progress_albums:
                album.sort_subdirs(settings['albums_sort_attr'])

        with progressbar(albums.values(),
                         label="%16s" % "Sorting media",
                         file=self.progressbar_target) as progress_albums:
            for album in progress_albums:
                album.sort_medias(settings['medias_sort_attr'])

        self.logger.debug('Albums:\n%r', albums.values())
        signals.gallery_initialized.send(self)
예제 #51
0
파일: shutil.py 프로젝트: syurk738/labpin
 def _ignore_patterns(path, names):
     ignored_names = []
     for pattern in patterns:
         ignored_names.extend(fnmatch.filter(names, pattern))
     return set(ignored_names)
예제 #52
0
파일: setup.py 프로젝트: escalab/TPUPoint
def find_files(pattern, root):
    """Return all the files matching pattern below root dir."""
    for dirpath, _, files in os.walk(root):
        for filename in fnmatch.filter(files, pattern):
            yield os.path.join(dirpath, filename)
예제 #53
0
def main():
    arguments = docopt(__doc__, version='0.1.3')
    if arguments['--dir'] is not None:
        static_path = arguments['--dir']
    else:
        static_path = os.path.join(os.getcwd(), 'static')

    if arguments['generate']:
        command = (
            "wget "
            "--recursive "  # follow links to download entire site
            "--convert-links "  # make links relative
            "--page-requisites "  # grab everything: css / inlined images
            "--no-parent "  # don't go to parent level
            "--directory-prefix {1} "  # download contents to static/ folder
            "--no-host-directories "  # don't create domain named folder
            "--restrict-file-name=unix "  # don't escape query string
            "{0}").format(arguments['--domain'], static_path)
        os.system(command)

        if arguments['--domain']:
            domain = arguments['--domain']
        else:
            domain = 'http://localhost:2368'
        target_domain = arguments['--target_domain']

        # remove query string since Ghost 0.4
        file_regex = re.compile(r'.*?(\?.*)')
        for root, dirs, filenames in os.walk(static_path):
            for filename in filenames:
                if file_regex.match(filename):
                    newname = re.sub(r'\?.*', '', filename)
                    print "Rename", filename, "=>", newname
                    os.rename(os.path.join(root, filename),
                              os.path.join(root, newname))

        # remove superfluous "index.html" from relative hyperlinks found in text
        abs_url_regex = re.compile(r'^(?:[a-z]+:)?//', flags=re.IGNORECASE)

        def fixLinks(text, parser):
            d = PyQuery(bytes(bytearray(text, encoding='utf-8')),
                        parser=parser)
            for element in d('a'):
                e = PyQuery(element)
                href = e.attr('href')
                if not abs_url_regex.search(href):
                    new_href = re.sub(r'rss/index\.html$', 'rss/index.rss',
                                      href)
                    new_href = re.sub(r'/index\.html$', '/', new_href)
                    e.attr('href', new_href)
                    print "\t", href, "=>", new_href
            if parser == 'html':
                return d.html(method='html').encode('utf8')
            return d.__unicode__().encode('utf8')

        def fix_share_links(text, parser):
            filetext = text.decode('utf8')
            td_regex = re.compile(target_domain + '|')

            assert target_domain, "target domain must be specified --target_domain=<http://your-host-url>"
            d = PyQuery(bytes(bytearray(filetext, encoding='utf-8')),
                        parser=parser)
            for share_class in ['.share_links a']:
                print "share_class : ", share_class
                for element in d(share_class):
                    e = PyQuery(element)
                    print "element : ", e
                    href = e.attr('href')
                    print "href : ", href
                    print "domain : ", domain
                    print "target_domain : ", target_domain
                    new_href = re.sub(domain, target_domain, href)
                    e.attr('href', new_href)
                    print "\t", href, "=>", new_href
            if parser == 'html':
                return d.html(method='html').encode('utf8')
            return d.__unicode__().encode('utf8')

        def fix_meta_url_links(text, parser):
            filetext = text.decode('utf8')
            td_regex = re.compile(target_domain + '|')

            assert target_domain, "target domain must be specified --target_domain=<http://your-host-url>"
            d = PyQuery(bytes(bytearray(filetext, encoding='utf-8')),
                        parser=parser)
            for share_class in [
                    'meta[property="og:url"], meta[name="twitter:url"]'
            ]:
                print "share_class : ", share_class
                for element in d(share_class):
                    e = PyQuery(element)
                    print "element : ", e
                    href = e.attr('content')
                    print "href : ", href
                    print "domain : ", domain
                    print "target_domain : ", target_domain
                    new_href = re.sub(domain, target_domain, href)
                    e.attr('content', new_href)
                    print "\t", href, "=>", new_href
            if parser == 'html':
                return d.html(method='html').encode('utf8')
            return d.__unicode__().encode('utf8')

        def fix_meta_image_links(text, parser):
            filetext = text.decode('utf8')
            td_regex = re.compile(target_domain + '|')

            assert target_domain, "target domain must be specified --target_domain=<http://your-host-url>"
            d = PyQuery(bytes(bytearray(filetext, encoding='utf-8')),
                        parser=parser)
            for share_class in [
                    'meta[property="og:image"], meta[name="twitter:image"]'
            ]:
                print "share_class : ", share_class
                for element in d(share_class):
                    e = PyQuery(element)
                    print "element : ", e
                    href = e.attr('content')
                    print "href : ", href
                    print "domain : ", domain
                    content_target_domain = target_domain.replace(
                        "/static", "")
                    print "target_domain : ", content_target_domain
                    new_href = re.sub(domain, content_target_domain, href)
                    e.attr('content', new_href)
                    print "\t", href, "=>", new_href
            if parser == 'html':
                return d.html(method='html').encode('utf8')
            return d.__unicode__().encode('utf8')

        # fix links in all html files
        for root, dirs, filenames in os.walk(static_path):
            for filename in fnmatch.filter(filenames, "*.html"):
                filepath = os.path.join(root, filename)
                parser = 'html'
                if root.endswith("/rss"):  # rename rss index.html to index.rss
                    parser = 'xml'
                    newfilepath = os.path.join(
                        root,
                        os.path.splitext(filename)[0] + ".rss")
                    os.rename(filepath, newfilepath)
                    filepath = newfilepath
                with open(filepath) as f:
                    filetext = f.read().decode('utf8')
                print "fixing links in ", filepath
                newtext = fixLinks(filetext, parser)
                newtext = fix_share_links(newtext, parser)
                newtext = fix_meta_url_links(newtext, parser)
                newtext = fix_meta_image_links(newtext, parser)
                with open(filepath, 'w') as f:
                    f.write(newtext)

    elif arguments['preview']:
        os.chdir(static_path)

        Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
        httpd = SocketServer.TCPServer(("", 9000), Handler)

        print "Serving at port 9000"
        # gracefully handle interrupt here
        httpd.serve_forever()

    elif arguments['setup']:
        if arguments['--gh-repo']:
            repo_url = arguments['--gh-repo']
        else:
            repo_url = raw_input("Enter the Github repository URL:\n").strip()

        # Create a fresh new static files directory
        if os.path.isdir(static_path):
            confirm = raw_input(
                "This will destroy everything inside static/."
                " Are you sure you want to continue? (y/N)").strip()
            if confirm != 'y' and confirm != 'Y':
                sys.exit(0)
            shutil.rmtree(static_path)

        # User/Organization page -> master branch
        # Project page -> gh-pages branch
        branch = 'gh-pages'
        regex = re.compile(".*[\w-]+\.github\.(?:io|com).*")
        if regex.match(repo_url):
            branch = 'master'

        # Prepare git repository
        repo = Repo.init(static_path)
        git = repo.git

        if branch == 'gh-pages':
            git.checkout(b='gh-pages')
        repo.create_remote('origin', repo_url)

        # Add README
        file_path = os.path.join(static_path, 'README.md')
        with open(file_path, 'w') as f:
            f.write(
                '# Blog\nPowered by [Ghost](http://ghost.org) and [Buster](https://github.com/axitkhurana/buster/).\n'
            )

        print "All set! You can generate and deploy now."

    elif arguments['deploy']:
        repo = Repo(static_path)
        repo.git.add('.')

        current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
        repo.index.commit('Blog update at {}'.format(current_time))

        origin = repo.remotes.origin
        repo.git.execute(
            ['git', 'push', '-u', origin.name, repo.active_branch.name])
        print "Good job! Deployed to Github Pages."

    elif arguments['add-domain']:
        repo = Repo(static_path)
        custom_domain = arguments['<domain-name>']

        file_path = os.path.join(static_path, 'CNAME')
        with open(file_path, 'w') as f:
            f.write(custom_domain + '\n')

        print "Added CNAME file to repo. Use `deploy` to deploy"

    else:
        print __doc__
예제 #54
0
    multenterbox(msg='Fill in values for the fields.',
                 title='Enter',
                 fields=(fieldNames),
                 values=["", "C:\Data", "400", "25", "0", "1"]))

serialNum = str(fieldValues[0])
dircIm = str(fieldValues[1]) + '\\*.csv'
dircIm2 = str(fieldValues[1])
focalDistance = float((fieldValues[2]))
lowerThresh = float(fieldValues[3])
bPlotImages = float(fieldValues[4])
rBall = float(fieldValues[5])

# count number of valid files
print('Number of files:')
print(len(fnmatch.filter(os.listdir(dircIm2), '*.csv')))

criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# grid size
objp = np.zeros((7 * 7, 3), np.float32)
objp[:, :2] = (1 / 3) * 128 * np.mgrid[0:7, 0:7].T.reshape(-1, 2)

objpoints = []  # 3D point in real world space
imgpoints = []  # 2D points in image plane.

# load images from given wokring directory
images = glob.glob(dircIm)

firstImage = images[0]
filePath = os.path.dirname(firstImage)
예제 #55
0
def get_test_cases(args):
    """
    If a test case file is specified, retrieve tests from that file.
    Otherwise, glob for all json files in subdirectories and load from
    each one.
    Also, if requested, filter by category, and add tests matching
    certain ids.
    """
    import fnmatch

    flist = []
    testdirs = ['tc-tests']

    if args.file:
        # at least one file was specified - remove the default directory
        testdirs = []

        for ff in args.file:
            if not os.path.isfile(ff):
                print("IGNORING file " + ff + "\n\tBECAUSE does not exist.")
            else:
                flist.append(os.path.abspath(ff))

    if args.directory:
        testdirs = args.directory

    for testdir in testdirs:
        for root, dirnames, filenames in os.walk(testdir):
            for filename in fnmatch.filter(filenames, '*.json'):
                candidate = os.path.abspath(os.path.join(root, filename))
                if candidate not in testdirs:
                    flist.append(candidate)

    alltestcases = list()
    for casefile in flist:
        alltestcases = alltestcases + (load_from_file(casefile))

    allcatlist = get_test_categories(alltestcases)
    allidlist = get_id_list(alltestcases)

    testcases_by_cats = get_categorized_testlist(alltestcases, allcatlist)
    idtestcases = filter_tests_by_id(args, alltestcases)
    cattestcases = filter_tests_by_category(args, alltestcases)

    cat_ids = [x['id'] for x in cattestcases]
    if args.execute:
        if args.category:
            alltestcases = cattestcases + [
                x for x in idtestcases if x['id'] not in cat_ids
            ]
        else:
            alltestcases = idtestcases
    else:
        if cat_ids:
            alltestcases = cattestcases
        else:
            # just accept the existing value of alltestcases,
            # which has been filtered by file/directory
            pass

    return allcatlist, allidlist, testcases_by_cats, alltestcases
# -*- coding: utf-8 -*-

import json
import os
import random
from progress.bar import ChargingBar
from fnmatch import filter

processed_count = 0
num_of_files = 0

for path, dirs, files in os.walk("processed"):
    for f in filter(files, '*.json'):
        num_of_files += 1

train_from = open("new_data/train.from", "w", encoding="utf-8")
train_to = open("new_data/train.to", "w", encoding="utf-8")

tst2012_from = open("new_data/tst2012.from", "w", encoding="utf-8")
tst2012_to = open("new_data/tst2012.to", "w", encoding="utf-8")

tst2013_from = open("new_data/tst2013.from", "w", encoding="utf-8")
tst2013_to = open("new_data/tst2013.to", "w", encoding="utf-8")

bar = ChargingBar("Progress", max=num_of_files)
for path, dirs, files in os.walk("processed"):

    for f in filter(files, '*.json'):
        processed_count += 1

        fullpath = os.path.abspath(os.path.join(path, f))
예제 #57
0
    def from_directory(cls, directory, progress_callback=None, use_tqdm=False, exiftool_path=None):
        """
        Create an ImageSet recursively from the files in a directory.
        :param directory: str system file path
        :param progress_callback: function to report progress to
        :param use_tqdm: boolean True to use tqdm progress bar
        :param exiftool_path: str system file path to exiftool location
        :return: ImageSet instance
        """

        # progress_callback deprecation warning
        if progress_callback is not None:
            warnings.warn(message='The progress_callback parameter will be deprecated in favor of use_tqdm',
                          category=PendingDeprecationWarning)

        # ensure exiftoolpath is found per MicaSense setup instructions
        if exiftool_path is None and os.environ.get('exiftoolpath') is not None:
            exiftool_path = os.path.normpath(os.environ.get('exiftoolpath'))

        cls.basedir = directory
        matches = []
        for root, _, filenames in os.walk(directory):
            [matches.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.tif')]

        images = []

        with exiftool.ExifTool(exiftool_path) as exift:
            if use_tqdm:  # to use tqdm progress bar instead of progress_callback
                kwargs = {
                    'total': len(matches),
                    'unit': ' Files',
                    'unit_scale': False,
                    'leave': True
                }
                for path in tqdm(iterable=matches, desc='Loading ImageSet', **kwargs):
                    images.append(image.Image(path, exiftool_obj=exift))
            else:
                print('Loading ImageSet from: {}'.format(directory))
                for i, path in enumerate(matches):
                    images.append(image.Image(path, exiftool_obj=exift))
                    if progress_callback is not None:
                        progress_callback(float(i) / float(len(matches)))

        # create a dictionary to index the images so we can sort them into captures
        # {
        #     "capture_id": [img1, img2, ...]
        # }
        captures_index = {}
        for img in images:
            c = captures_index.get(img.capture_id)
            if c is not None:
                c.append(img)
            else:
                captures_index[img.capture_id] = [img]
        captures = []
        for cap_imgs in captures_index:
            imgs = captures_index[cap_imgs]
            newcap = capture.Capture(imgs)
            captures.append(newcap)
        if progress_callback is not None:
            progress_callback(1.0)
        return cls(captures)
예제 #58
0
파일: unix.py 프로젝트: zhongyibill/gpdb
 def execute(self):
     return fnmatch.filter(os.listdir(self.path), self.pattern)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "source_paths",
        nargs="+",
        help=
        "Path(s) to directory or directories with simulation generated files.")
    parser.add_argument(
        "-u",
        "--unmanaged-runs",
        action="store_true",
        default=False,
        help="Not a managed simulation (no 'run-manifest.json' available);"
        " simply summarize results of all tree files found in source path(s).")
    args = parser.parse_args()
    args.quiet = False

    param_keys = collections.OrderedDict()
    param_keys["disp.model"] = "dispersal_model"
    param_keys["s0"] = "s0"
    param_keys["e0"] = "e0"
    param_keys["disp.rate"] = "dispersal_rate"
    param_keys["niche.shift.prob"] = "niche_evolution_prob"

    summaries = []
    out = sys.stdout
    float_template = "{:1.1e}"
    if args.unmanaged_runs:
        tree_filepaths = []
        pattern = "*.trees"
        for source_path in args.source_paths:
            root_dir = os.path.expanduser(os.path.expandvars(source_path))
            for root, dirs, files in os.walk(root_dir):
                for filename in fnmatch.filter(files, pattern):
                    tree_filepaths.append(os.path.join(root, filename))
        if not tree_filepaths:
            sys.exit("No tree files found")
        col_width = max(len(tree_filepath) for tree_filepath in tree_filepaths)
        text_template = "{{:{}}}".format(col_width)
        row_template = text_template + ":   " + float_template
        for tree_filepath in tree_filepaths:
            trees = dendropy.TreeList.get_from_path(tree_filepath, "newick")
            value = get_species_index_max_and_min(trees)[1]
            out.write(row_template.format(tree_filepath, value))
            out.write("\n")
    else:
        for source_path in args.source_paths:
            source_dir = os.path.abspath(
                os.path.expanduser(os.path.expandvars(source_path)))
            run_manifest_path = os.path.join(source_dir, "run-manifest.json")
            if not os.path.exists(run_manifest_path):
                sys.exit(
                    "Manifest file not found: {}".format(run_manifest_path))
            with open(run_manifest_path, "r") as run_manifest_f:
                run_manifest = json.load(run_manifest_f)
            jobs = list(run_manifest.keys())
            header = list(param_keys.keys()) + ["num.lin"]
            col_width = max(len(h) for h in header)
            text_template = "{{:{}}}".format(col_width)
            header = [text_template.format(h) for h in header]
            out.write(" | ".join(header))
            out.write("\n")
            for job_idx, job in enumerate(jobs):
                params = {}
                for param_key in param_keys:
                    params[param_key] = run_manifest[job][
                        param_keys[param_key]]
                run_data = run_manifest[job]
                tree_filepath = os.path.join(source_dir, run_data["treefile"])
                if not os.path.exists(tree_filepath):
                    sys.stderr.write(
                        "Skipping job {} of {} (missing): {}\n".format(
                            job_idx + 1, len(jobs), job))
                    continue
                if not args.quiet:
                    sys.stderr.write("Processing job {} of {}: {}\n".format(
                        job_idx + 1, len(jobs), job))
                values = [
                    params["s0"], params["e0"], params["disp.rate"],
                    params["niche.shift.prob"]
                ]
                trees = dendropy.TreeList.get_from_path(
                    tree_filepath, "newick")
                values.append(get_species_index_max_and_min(trees)[1])
                values = [float_template.format(v) for v in values]
                values.insert(0, params["disp.model"])
                values = [text_template.format(v) for v in values]
                out.write(" | ".join(values))
                out.write("\n")
예제 #60
0
    try:
        os.mkdir(blur_path)
    except OSError:
        print("Creation of the directory %s failed" % blur_path)
    else:
        print("Successfully created the directory %s " % blur_path)

if not path.isdir(not_blur_path):
    try:
        os.mkdir(not_blur_path)
    except OSError:
        print("Creation of the directory %s failed" % not_blur_path)
    else:
        print("Successfully created the directory %s " % not_blur_path)

file_count = len(fnmatch.filter(os.listdir(target_dir), "*.jpg"))

print("Checking " + str(file_count) + " files")

Iter = 0

print("Executing...")

for image_path in os.listdir(target_dir):
    file_name = os.path.join(target_dir, image_path)
    if os.path.isfile(file_name):
        Iter += 1
        print("\r" + str(Iter) + '/' + str(file_count), end='')

        file_name = os.path.join(target_dir, image_path)