示例#1
0
def CheckMissingHeaders(subdir, doxfiles, ignores):

    found = []
    # print "Subdir: ", subdir
    # print "Files: ", doxfiles
    # print "Ignores: ", ignores
    for root, dirs, files in os.walk(subdir):
        files = filter(lambda f: 
                       not fnmatch(f, "moc_*")
                       and not fnmatch(f, "ui_*")
                       and not fnmatch(f, "*.ui*")
                       and not fnmatch(f, "uic_*")
                       and fnmatch(f, "*.h"), files)
        found += [os.path.normpath(os.path.join(root, f)) for f in files]
        if '.svn' in dirs:
            dirs.remove('.svn')

    known = [ os.path.normpath(os.path.join(subdir, p))
              for p in doxfiles+ignores ]
    missing = [ f for f in found if f not in known ]
    missing.sort()
    if len(missing) > 0:
        print("Header files missing in "+subdir+":")
        print("  "+"\n  ".join(missing))
    return missing
示例#2
0
def parse_lpstat_l( buffer, filter = '*', key = 'printer' ):
	printers = {}
	current = None
	for prt in buffer:
		if not prt:
			continue
		if prt.startswith( 'printer ' ):
			dummy, printer, status = prt.split( ' ', 2 )
			printers[ printer ] = {}
			current = printer
			if not status.startswith( 'disabled' ):
				printers[ printer ][ 'state' ] = _( 'active' )
			else:
				printers[ printer ][ 'state' ] = _( 'inactive' )
			continue

		if not current:
			continue
		prt = prt.strip()
		for attribute in ( 'Description', 'Location' ):
			pattern = '%s:' % attribute
			if prt.startswith( pattern ):
				value = prt[ prt.find( pattern ) + len( pattern )  + 1 : ]
				printers[ printer ][ attribute.lower() ] = unicode( value, 'utf8' )

	filtered = {}
	for printer, attrs in printers.items():
		if key == 'printer' and not fnmatch( printer, filter ):
			continue
		elif attrs.get(key) and not fnmatch(attrs[key], filter):
			continue
		filtered[ printer ] = attrs
	return filtered
def get_file_list(pattern, avoid, root):  #Done
    """
    

    Parameters
    ----------
    pattern : list of strings
        contains the text pattern that should be in the file name.
    avoid : list of strings
        contains the text parttern that must not be in the file name.
    root : string
        file path containing the data. Subdirectrories are also taken into account

    Returns
    -------
    result : TYPE
        DESCRIPTION.

    """

    if root == '':
        root = os.getcwd()

    root = root.replace('\\', '/')

    file_list = []
    name_list = []
    final_list = []
    """Get list of all files in directory and the subdirectories with *.txt pattern"""
    for path, subdirs, files in os.walk(root):
        for name in files:
            if fnmatch(name, '*' + pattern + '*.txt'):
                spec_file = os.path.join(path, name)
                name_list.append(name)
                file_list.append(spec_file)

    for i in range(0, len(file_list)):

        file = file_list[i]
        name = name_list[i]

        a_count = 0
        for item in avoid:
            if fnmatch(name, '*' + item + '*') == True:
                a_count = a_count + 1

        if a_count == 0:
            final_list.append(file)

    #return file list (paths) as dataframe

    result = pd.DataFrame({'file_path': final_list})

    os.chdir(root)

    return result
示例#4
0
def find_packages(*args, **kwrds):
    """Find all packages and sub-packages and return a list of them.

    The function accept any number of directory names that will be
    searched to find packages. Packages are identified as
    sub-directories containing an __init__.py file.  All duplicates
    will be removed from the list and it will be sorted
    alphabetically.

    Packages can be excluded by pattern using the 'exclude' keyword,
    which accepts a list of patterns.  All packages with names that
    match the beginning of an exclude pattern will be excluded.
    """
    from fnmatch import fnmatch
    excludes = kwrds.get('exclude', [])
    pkgs = {}
    for base_path in args:
        for root, _dirs, files in os.walk(base_path):
            if '__init__.py' in files:
                assert root.startswith(base_path)
                pkg = root[len(base_path)+1:].replace(os.sep, '.')
                pkgs[pkg] = root

    result = pkgs.keys()
    for excl in excludes:
        # We exclude packages that *begin* with an exclude pattern.
        result = [pkg for pkg in result if not fnmatch(pkg, excl + "*")]
    result.sort()
    return result
示例#5
0
def filter_files(filenodes, exclude_filenodes=None, exclude_dirnodes=None, exclude_filename_pattern=None):
    """
    Filter file nodes
    :param filenodes: A list of file nodes (list of File)
    :param exclude_filenodes: A list of file nodes to filter out (list of File)
    :param exclude_dirnodes: A list of directory nodes to filter out (list of Dir)
    :param exclude_filename_pattern: A file name pattern to filter out files with a matching file name pattern (str)
    :return: A list of filtered file nodes (list of File)
    """
    exclude_filenodes = exclude_filenodes if (exclude_filenodes is not None) else []
    exclude_dirnodes = exclude_dirnodes if (exclude_dirnodes is not None) else []

    filenodes = list(map(File, filenodes))
    exclude_filenodes = list(map(File, exclude_filenodes))
    exclude_dirnodes = list(map(Dir, exclude_dirnodes))

    filtered_filenodes = []
    filtered_filenodes.extend(list(filter(lambda filenode: filenode not in exclude_filenodes, filenodes)))

    if exclude_filename_pattern is not None:
        filtered_filenodes.extend(list(filter(lambda filenode: not fnmatch(filenode.name, exclude_filename_pattern), filtered_filenodes)))

    new_filtered_filenodes = []
    if exclude_dirnodes is not None:
        for filenode in filtered_filenodes:
            for dirnode in exclude_dirnodes:
                if dirnode.abspath in filenode.abspath:
                    break
            else:
                new_filtered_filenodes.append(filenode)
        filtered_filenodes = new_filtered_filenodes

    return filtered_filenodes
示例#6
0
def compile_all_layouts_in_path(path):
    # Add the resource directory to QT for the zMayaTools prefix.  See fixup_ui_source.
    Qt.QDir.setSearchPaths('zMayaTools', [path + '/qt_resources'])

    qt_path = path + '/qt/'
    qt_generated_path = path + '/qt_generated/'

    # If qt_generated/__init__.py doesn't exist, create it so the directory is treated
    # as a module.  This isn't checked into the source tree so that all of the files in
    # that directory can be safely deleted.
    init_file = '%s/__init__.py' % qt_generated_path
    if not os.access(init_file, os.R_OK):
        open(init_file, 'w').close()

    # Compile *.ui layout files.
    for fn in os.listdir(qt_path):
        if not fnmatch(fn, '*.ui'):
            continue

        input_file = qt_path + fn
        output_file = qt_generated_path + fn.replace('.ui', '.py')
        if mtime(input_file) < mtime(output_file):
            continue

        with open(input_file) as input:
            input_xml = input.read()

        input_xml = fixup_ui_source(input_xml)

        with open(output_file, 'w') as output:
            Qt.pysideuic.compileUi(StringIO(input_xml), output)
示例#7
0
def regex_walk(dirname, regex_input):
    """
    Adds all the directories under the specified dirname to the system path to 
    be able to import the modules.
    
    Args:
    dirname: This is the tensorflow_path passed as an argument is the path to 
    tensorflow source code.
    
    regex_input: Regular expression input string to filter and list/run tests.
    Few examples of accepted regex_input are:
    math_ops_test
    math_ops_test.DivNanTest
    math_ops_test.DivNoNanTest.testBasic
    math_ops_test.DivNoNanTest.*
    math_ops_test.D*
    math_ops_test.*
    math_*_test
    math_*_*_test
    math*_test
    """
    if (re.search("\.", regex_input) is None):
        test = regex_input + '.py'
    else:
        test = (re.split("\.", regex_input))[0] + '.py'
    module_list = []
    for path, subdirs, files in os.walk(dirname):
        for name in files:
            if fnmatch(name, test):
                sys.path.append(path)
                name = os.path.splitext(name)[0]
                module_list.append(name)
    if not module_list:
        sys.exit(1)
    return module_list
示例#8
0
def find_packages(*args, **kwrds):
    """Find all packages and sub-packages and return a list of them.

    The function accept any number of directory names that will be
    searched to find packages. Packages are identified as
    sub-directories containing an __init__.py file.  All duplicates
    will be removed from the list and it will be sorted
    alphabetically.

    Packages can be excluded by pattern using the 'exclude' keyword,
    which accepts a list of patterns.  All packages with names that
    match the beginning of an exclude pattern will be excluded.
    """
    from fnmatch import fnmatch
    excludes = kwrds.get('exclude', [])
    pkgs = {}
    for base_path in args:
        for root, _dirs, files in os.walk(base_path):
            if '__init__.py' in files:
                assert root.startswith(base_path)
                pkg = root[len(base_path) + 1:].replace(os.sep, '.')
                pkgs[pkg] = root

    result = pkgs.keys()
    for excl in excludes:
        # We exclude packages that *begin* with an exclude pattern.
        result = [pkg for pkg in result if not fnmatch(pkg, excl + "*")]
    result.sort()
    return result
示例#9
0
文件: utils.py 项目: pombredanne/wms
 def rl_glob(pattern, glob=glob.glob, fnmatch=fnmatch.fnmatch, _RL_DIR=_RL_DIR, pjoin=os.path.join):
     c, pfn = __startswith_rl(pattern)
     r = glob(pfn)
     if c or r == []:
         r += map(
             lambda x, D=_archivepfx, pjoin=pjoin: pjoin(_archivepfx, x),
             filter(lambda x, pfn=pfn, fnmatch=fnmatch: fnmatch(x, pfn), __loader__._files.keys()),
         )
     return r
示例#10
0
    def filter_target(self, target):
        for ignore in self.ignores:
            if fnmatch(target, ignore):
                self.logger.debug("Drop: user ignores: {0}".format(target))
                return False

        if not os.path.lexists(target):
            self.logger.error(
                "Drop: the pathname doesn't exist: {0}".format(target))
            return False

        return True
示例#11
0
def fqToFasta():  #for creating fqToFasta_job file
    data = json.loads(open('Config.json').read())

    for opt, arg in options:  #checking the options for input folder provided
        if opt in ('--input'):
            inFolder = str(arg)  #input folder

    for path, subdirs, files in os.walk(inFolder):
        for name in files:
            if fnmatch(name, '*fastq.gz*'):
                baseFile = name  #baseFile is filename without extentions
                while fnmatch(baseFile, '*.*'):
                    baseFile = os.path.splitext(baseFile)[0]

                #constructing completeArg
                print('fastq to fasta: ' + str(baseFile))
                completeArg = data['fastx'][
                    'fastxFormatterPath'] + ' -i ' + data['bbduk'][
                        'filesPath'] + path[
                            path.find('/SP'):] + '/bbduk/' + str(
                                baseFile) + '_SE_final.fq -o ' + data['bbduk'][
                                    'filesPath'] + path[
                                        path.find('/SP'):] + '/bbduk/' + str(
                                            baseFile) + '_SE_final.fasta'

                #if the jobfile folder doesn't exist, create one
                if not os.path.exists('jobFiles/'):
                    os.makedirs('jobFiles/')
                #if the jobfile doesn't exist, create one
                if not os.path.exists('jobFiles/fqToFasta_job.py'):
                    f = open('jobFiles/fqToFasta_job.py', 'w+')
                    f.write('from subprocess import *' + '\n\n')
                    f.close()

                #open and append completeArg to the jobfile
                f = open('jobFiles/fqToFasta_job.py', 'a')
                f.write("call(['" + completeArg + "'],shell=True);" + '\n')
                f.close()

    print('\n*** Fastx Format Complete ***\n')
示例#12
0
文件: fileSource.py 项目: twdb/txhis
 def getMsg (self):
     'Get msg names and how many there are to process in the poll'
     fileList = os.listdir(self.filePath)
    
     for i in range(len(fileList)):
         try:
             if fnmatch(fileList[i], self.fileFilter):
                 #raw_input("find matched file %s" % fileList[i] )
                 self.addMsg(fileList[i])
         except: 
             pass
     #the return just for debugging
     return self.nbMsg
示例#13
0
文件: ftpSource.py 项目: twdb/txhis
    def getMsg (self):
        'Get msg names and how many there are to process in the poll'
        
        fileList = self.ftpConn.nlst()
       
        for i in range(len(fileList)):
            try:
                if fnmatch(fileList[i], self.fileFilter):
                    self.addMsg(fileList[i])
            except: 
                pass

        return self.nbMsg
示例#14
0
 def rl_glob(pattern,
             glob=glob.glob,
             fnmatch=fnmatch.fnmatch,
             _RL_DIR=_RL_DIR,
             pjoin=os.path.join):
     c, pfn = __startswith_rl(pattern)
     r = glob(pfn)
     if c or r == []:
         r += map(
             lambda x, D=_archivepfx, pjoin=pjoin: pjoin(_archivepfx, x),
             filter(lambda x, pfn=pfn, fnmatch=fnmatch: fnmatch(x, pfn),
                    __loader__._files.keys()))
     return r
示例#15
0
文件: __init__.py 项目: B-Rich/smart
	def list_printers(self,request):
		""" Lists the printers for the overview grid. """

		# ----------- DEBUG -----------------
		MODULE.info("printers/query invoked with:")
		pp = pprint.PrettyPrinter(indent=4)
		st = pp.pformat(request.options).split("\n")
		for s in st:
			MODULE.info("   << %s" % s)
		# -----------------------------------

		key = request.options.get('key','printer')
		pattern = request.options.get('pattern','*')

		quota = self._quota_enabled()		# we need it later

		result = []
		plist = self._list_printers()
		for element in plist:
			try:
				printer = element['printer']
				data = self._printer_details(printer)
				for field in data:
					element[field] = data[field]
				# filter according to query
				if fnmatch(element[key],pattern):
					if printer in quota:
						element['quota'] = quota[printer]
					else:
						element['quota'] = False
					result.append(element)
			except:
				pass

		# ---------- DEBUG --------------
		MODULE.info("printers/query returns:")
		pp = pprint.PrettyPrinter(indent=4)
		st = ''
		if len(result) > 5:
			tmp = result[0:5]
			MODULE.info("   >> %d entries, first 5 are:" % len(result))
			st = pp.pformat(tmp).split("\n")
		else:
			st = pp.pformat(result).split("\n")
		for s in st:
			MODULE.info("   >> %s" % s)
		# --------------------------------

		self.finished(request.id,result)
示例#16
0
    def list_printers(self, request):
        """ Lists the printers for the overview grid. """

        # ----------- DEBUG -----------------
        MODULE.info("printers/query invoked with:")
        pp = pprint.PrettyPrinter(indent=4)
        st = pp.pformat(request.options).split("\n")
        for s in st:
            MODULE.info("   << %s" % s)
        # -----------------------------------

        key = request.options.get('key', 'printer')
        pattern = request.options.get('pattern', '*')

        quota = self._quota_enabled()  # we need it later

        result = []
        plist = self._list_printers()
        for element in plist:
            try:
                printer = element['printer']
                data = self._printer_details(printer)
                for field in data:
                    element[field] = data[field]
                # filter according to query
                if fnmatch(element[key], pattern):
                    if printer in quota:
                        element['quota'] = quota[printer]
                    else:
                        element['quota'] = False
                    result.append(element)
            except:
                pass

        # ---------- DEBUG --------------
        MODULE.info("printers/query returns:")
        pp = pprint.PrettyPrinter(indent=4)
        st = ''
        if len(result) > 5:
            tmp = result[0:5]
            MODULE.info("   >> %d entries, first 5 are:" % len(result))
            st = pp.pformat(tmp).split("\n")
        else:
            st = pp.pformat(result).split("\n")
        for s in st:
            MODULE.info("   >> %s" % s)
        # --------------------------------

        self.finished(request.id, result)
示例#17
0
 def readText(self, infile):
     status = True
     tfile = '%s.txt' % infile
     qf = QFile(tfile)
     if not qf.exists():
         return False
     qf.open(QFile.ReadOnly)
     qts = QTextStream(qf)
     while True:
         str = qts.readLine()
         if (len(str) == 0):
             break
         tokenize = str.split('=')
         if fnmatch(tokenize[0], 'mega0'):
             self.omega0 = tokenize[1].trimmed()
         if fnmatch(tokenize[0], 'megaR'):
             self.omegaR = tokenize[1].trimmed()
         if fnmatch(tokenize[0], 'chi'):
             self.chi = tokenize[1].trimmed()
         if fnmatch(tokenize[0], 'detector'):
             self.detector = tokenize[1].trimmed()
         if fnmatch(tokenize[0], 'exp'):
             self.exposureT = tokenize[1].trimmed()
     return True
示例#18
0
def finderPlus():
    import os
    from fnmatch import fnmatch

    d = []
    for root, dirr, files in os.walk(r"c:\Google Drive"):
        for aux in files:
            for aux2 in xrange(4, 11, 1):
                if fnmatch(aux, '*(%d)*' % aux2):
                    #            print (os.path.join(root,aux))
                    d.append(os.path.join(root, aux))
                    pass
                pass
            pass
        pass

    import shutil as sh
    for aux in d:
        sh.move(aux, 'c:/Google Drive/ZZZ/' + os.path.basename(aux))
        pass
示例#19
0
def regex_walk(dirname, regex_input):
    """
    Adds all the directories under the specified dirname to the system path to 
    be able to import the modules.
    
    Args:
    dirname: This is the tensorflow_path passed as an argument is the path to 
    tensorflow source code.
    
    regex_input: Regular expression input string to filter and list/run tests.
    Few examples of accepted regex_input are:
    math_ops_test.DivNoNanTest.testBasic
    math_ops_test.DivNoNanTest.*
    math_ops_test.D*
    math_ops_test.*
    math_*_test
    math_*_*_test
    math*_test
    """
    if (re.search(r'\.', regex_input) is None):
        # a module name regex was given
        test = regex_input + '.py'
    else:
        # regex has dot(s) e.g. module.class.testfunc
        test = (re.split("\.", regex_input))[0] + '.py'
    module_list = []
    for path, subdirs, files in os.walk(dirname):
        for name in files:
            if fnmatch(name, test):
                if path not in sys.path:
                    sys.path.append(os.path.abspath(path))
                name = os.path.splitext(name)[0]
                module_list.append(name)
    if not module_list:
        print("Test pattern/name does not exist:", regex_input, "dirname",
              dirname)

    return module_list
 def gen_find(filepat, top):
     for path, dirlist, filelist in os.walk(top):
         for filename in fnmatch(filelist, filepat):
             yield os.path.join(path, filename)
The fnmatch module compares file names against glob-style patterns such as used
by Unix/Linux shells.

These are not the same as the more sophisticated regular expression rules. 

Its purely a string matching operation. 

If you find it more convenient to use a different pattern style, for example 
regular expressions, then simply use regex operations to match your filenames.

"""****************************************************"""
# What does it do?

The fnmatch module is used for the wild-card pattern matching.

# Simple Matching
fnmatch() compares a single file name against a pattern and
returns a boolean indicating whether or not they match. 

The comparison is case-sensitive when the operating system uses a case-sensitive
file system.

# Filtering
To test a sequence of filenames, you can use filter(). 

It returns a list of the names that match the pattern argument.

"""****************************************************"""
# Find all mp3 files
# This script will search for *.mp3 files from the rootPath ("/")
if __name__ == '__main__':

    # 文件列表
    all_names = os.listdir('.')
    # print(all_names)

    # Get all regular files
    file_names = [name for name in os.listdir('.')
                  if os.path.isfile(os.path.join('.', name))]
    # print(file_names)

    # Get all dirs
    dir_names = [name for name in os.listdir('.')
                 if os.path.isdir(os.path.join('.', name))]
    # print(dir_names)

    # 字符串的 startswith() 和 endswith() 方法对于过滤一个目录的内容也是很有用的。
    pyfiles = [name for name in os.listdir('.') if name.endswith('.py')]

    # print(help(glob))
    pyfiles2 = glob.glob('somedir/*.py')

    pyfiles3 = [name for name in os.listdir('somedir')
                if fnmatch(name, '*.py')]

    # Get file metadata
    file_metadata = [(name, os.stat(name)) for name in pyfiles]
    for name, meta in file_metadata:
        print(name, meta.st_size, meta.st_mtime)
示例#23
0
def preAnalysis():
    remove()

    #GENERATE STAT FILES FOR ALL LAYERS

    #generate stat files for all layers
    for path, subdirs, files in os.walk('.', topdown=False):  #walk for files
        if fnmatch(path, '*layer*'):
            for name in files:
                if fnmatch(name, '*_result.hit.txt'):  #catch result.hit files
                    #create stat file
                    if os.path.isfile(path + name[6:9] + 'stat.txt'):
                        f = open(path + name[6:9] + 'stat.txt', 'a')
                    else:
                        f = open(path + name[6:9] + 'stat.txt', 'w')
                        f.write('query_name' + '	' + 'subject_description' +
                                '	' + 'E value' + '	' + 'bit score' +
                                '\n')  #add header on creation
                    #add all desired data to memory
                    with open(os.path.join(path, name)) as tsv:
                        for column in zip(*[
                                line
                                for line in csv.reader(tsv,
                                                       dialect="excel-tab")
                        ]):  #grab tsv by column
                            if column[0] == 'query_name':
                                query = []

                                for i in range(1, len(column)):
                                    query.append(column[i])

                            if column[0] == 'subject_description':
                                subject = []

                                for i in range(1, len(column)):
                                    subject.append(column[i])

                            if column[0] == 'E value':
                                eValue = []

                                for i in range(1, len(column)):
                                    eValue.append(column[i])

                            if column[0] == 'bit score':
                                bitScore = []

                                for i in range(1, len(column)):
                                    bitScore.append(column[i])

                    #Write to tsv
                    for i in range(
                            0,
                            min(len(query), len(subject), len(eValue),
                                len(bitScore))):
                        f.write(query[i] + '	' + subject[i] + '	' + eValue[i] +
                                '	' + bitScore[i] + '\n')
                    f.close()

    #GENERATE STAT FILES FOR ALL EXPERIMENTS

    #find stat
    for path, subdirs, files in os.walk('.', topdown=False):  #walk for files
        if fnmatch(path, '*Experiment*'):
            #print files
            for name in files:
                if fnmatch(name, '*stat*'):  #catch stat files

                    #create stat files
                    if os.path.isfile(
                            os.path.dirname(path) + '/' +
                            os.path.basename(path) +
                            'stat.txt'):  #append if exist
                        f = open(
                            os.path.dirname(path) + '/' +
                            os.path.basename(path) + 'stat.txt', 'a')
                    else:
                        f = open(
                            os.path.dirname(path) + '/' +
                            os.path.basename(path) + 'stat.txt',
                            'w')  #create if not exist
                        f.write('query_name' + '	' + 'subject_description' +
                                '	' + 'E value' + '	' + 'bit score' +
                                '\n')  #add header on creation

                    #add all desired data to memory
                    with open(os.path.join(path, name)) as tsv:
                        for column in zip(*[
                                line
                                for line in csv.reader(tsv,
                                                       dialect="excel-tab")
                        ]):  #grab tsv by column
                            if column[0] == 'query_name':
                                query = []

                                for i in range(1, len(column)):
                                    query.append(column[i])

                            if column[0] == 'subject_description':
                                subject = []

                                for i in range(1, len(column)):
                                    subject.append(column[i])

                            if column[0] == 'E value':
                                eValue = []

                                for i in range(1, len(column)):
                                    eValue.append(column[i])

                            if column[0] == 'bit score':
                                bitScore = []

                                for i in range(1, len(column)):
                                    bitScore.append(column[i])

                    #Write to tsv
                    for i in range(
                            0,
                            min(len(query), len(subject), len(eValue),
                                len(bitScore))):
                        f.write(query[i] + '	' + subject[i] + '	' + eValue[i] +
                                '	' + bitScore[i] + '\n')
                    f.close()

    #GENERATE STAT FILES FOR ALL SAMPLE DATES

    #find stat
    for path, subdirs, files in os.walk('.', topdown=False):  #walk for files
        if fnmatch(path, '*SP*'):
            #print files
            for name in files:
                if fnmatch(name, '*Experiment*'):  #catch stat files
                    if fnmatch(name, '*stat*'):  #catch stat files
                        #create stat files

                        if os.path.isfile(
                                os.path.dirname(path) + '/' +
                                os.path.basename(path) +
                                'stat.txt'):  #append if exist
                            f = open(
                                os.path.dirname(path) + '/' +
                                os.path.basename(path) + 'stat.txt', 'a')
                        else:
                            f = open(
                                os.path.dirname(path) + '/' +
                                os.path.basename(path) + 'stat.txt',
                                'w')  #create if not exist
                            f.write('query_name' + '	' +
                                    'subject_description' + '	' + 'E value' +
                                    '	' + 'bit score' +
                                    '\n')  #add header on creation

                        #add all desired data to memory
                        with open(os.path.join(path, name)) as tsv:
                            for column in zip(*[
                                    line
                                    for line in csv.reader(tsv,
                                                           dialect="excel-tab")
                            ]):  #grab tsv by column
                                if column[0] == 'query_name':
                                    query = []

                                    for i in range(1, len(column)):
                                        query.append(column[i])

                                if column[0] == 'subject_description':
                                    subject = []

                                    for i in range(1, len(column)):
                                        subject.append(column[i])

                                if column[0] == 'E value':
                                    eValue = []

                                    for i in range(1, len(column)):
                                        eValue.append(column[i])

                                if column[0] == 'bit score':
                                    bitScore = []

                                    for i in range(1, len(column)):
                                        bitScore.append(column[i])

                        #Write to tsv
                        for i in range(
                                0,
                                min(len(query), len(subject), len(eValue),
                                    len(bitScore))):
                            f.write(query[i] + '	' + subject[i] + '	' +
                                    eValue[i] + '	' + bitScore[i] + '\n')
                        f.close()

    #GENERATE SUBJECT LIST FOR RETURN ANALYSIS

    #Populate subject for analysis
    subject = []

    #Retreive data from all hit files
    for path, subdirs, files in os.walk('.', topdown=False):  #walk for files
        for name in files:
            if fnmatch(name, '*layer*'):
                if fnmatch(name, '*_result.hit.txt'):

                    #add all desired data to memory
                    with open(os.path.join(path, name)) as tsv:
                        for column in zip(*[
                                line
                                for line in csv.reader(tsv,
                                                       dialect="excel-tab")
                        ]):  #grab tsv by column
                            if column[0] == 'subject_description':
                                for i in range(1, len(column)):
                                    subject.append(column[i])
    #return to funtions one, two, or three
    return subject
示例#24
0
def remove():
    for path, subdirs, files in os.walk('.', topdown=False):  #walk for files
        #print files
        for name in files:
            if fnmatch(name, '*stat*'):  #catch stat files
                os.remove(os.path.join(path, name))
示例#25
0
    def getImageBase(self, filename):
        """
        getImageBase returns the prefix which will be used for subsequent file naming based upon
        an input filename
        :param filename:
        :return:
        """
        self.imFile = filename
        ind_of_suffx = find_last_index_of(filename, '.')
        ind_of_start_num = find_last_index_of(filename, '_') + 1
        extension = os.path.splitext(filename)[1]
        if "h5" in extension:
            self.h5Flag = True
        else:
            self.h5Flag = False

        newbase = filename[0:ind_of_start_num]
        if not self.h5Flag:
            imstringFilt = '%s*.tif' % newbase
        else:
            imstringFilt = '%s*.h5' % newbase
        curimage = filename
        self.filenum = self.getFileNumber(filename)
        #print 'Debug : %s'%self.filenum
        qfinfo = QFileInfo(filename)
        apath = qfinfo.absolutePath()
        fileonly = qfinfo.fileName()
        tmpind = find_last_index_of(fileonly, '_') + 1
        tmpbase = fileonly[0:tmpind]
        qd = QDir(apath)

        #qd.setNameFilters ([imstringFilt])
        imfiles0 = qd.entryList()
        imfiles = []
        for nm in imfiles0:
            nm = '%s/%s' % (apath, nm)
            if fnmatch(nm, imstringFilt):
                imfiles.append(nm)

        #print qd.entryList()
        n = len(imfiles)
        self.minImageNum = 1E10
        self.maxImageNum = -1
        for i in imfiles:
            if i.rfind(tmpbase) < 0:
                continue
            #if (i.contains(tmpbase) == False) :
            #continue

            startInd = find_last_index_of(i, '_') + 1
            endInd = find_last_index_of(i, '.')
            self.numDigits = endInd - startInd

            num = int(i[startInd:startInd + self.numDigits])
            print num, '   ', i
            if (num < self.minImageNum):
                self.minImageNum = num
            if (num > self.maxImageNum):
                self.maxImageNum = num
        print 'min image : ', self.minImageNum
        print 'max image : ', self.maxImageNum
        self.numImages = self.maxImageNum - self.minImageNum + 1
        print 'num images : ', self.numImages
        if (self.base != newbase):
            self.base = newbase
            self.checkForFiles()

        else:
            setfile = filename + '.txt'
            qf = QFile(setfile)
            if (qf.exists()):
                self.readFileSettings(setfile)
        return self.base
示例#26
0
文件: Project.py 项目: comptech/atrex
    def getImageBase (self, filename) :
        """
        getImageBase returns the prefix which will be used for subsequent file naming based upon
        an input filename
        :param filename:
        :return:
        """
        self.imFile = filename
        ind_of_suffx = find_last_index_of (filename,'.')
        ind_of_start_num = find_last_index_of(filename,'_') +1
        extension = os.path.splitext(filename)[1]
        if "h5" in extension :
            self.h5Flag = True
        else :
            self.h5Flag = False

        newbase = filename[0:ind_of_start_num]
        if not self.h5Flag :
            imstringFilt = '%s*.tif'%newbase
        else :
            imstringFilt = '%s*.h5'%newbase
        curimage = filename
        self.filenum = self.getFileNumber (filename)
        #print 'Debug : %s'%self.filenum
        qfinfo = QFileInfo (filename)
        apath = qfinfo.absolutePath()
        fileonly = qfinfo.fileName ()
        tmpind = find_last_index_of(fileonly,'_')+1
        tmpbase = fileonly[0:tmpind]
        qd = QDir (apath)

        #qd.setNameFilters ([imstringFilt])
        imfiles0 = qd.entryList()
        imfiles = []
        for nm in imfiles0 :
            nm = '%s/%s'%(apath,nm)
            if fnmatch (nm, imstringFilt) :
                imfiles.append(nm)

        #print qd.entryList()
        n = len(imfiles)
        self.minImageNum = 1E10
        self.maxImageNum = -1
        for i in imfiles :
            if i.rfind(tmpbase) < 0 :
                continue
            #if (i.contains(tmpbase) == False) :
                #continue

            startInd = find_last_index_of(i,'_')+1
            endInd = find_last_index_of (i,'.')
            self.numDigits = endInd - startInd

            num = int(i[startInd:startInd+self.numDigits])
            print num, '   ',i
            if (num < self.minImageNum) :
                self.minImageNum = num
            if (num > self.maxImageNum) :
                self.maxImageNum = num
        print 'min image : ', self.minImageNum
        print 'max image : ', self.maxImageNum
        self.numImages = self.maxImageNum - self.minImageNum + 1
        print 'num images : ',self.numImages
        if (self.base != newbase) :
            self.base = newbase
            self.checkForFiles()

        else :
            setfile = filename+'.txt'
            qf = QFile (setfile)
            if (qf.exists()) :
                self.readFileSettings (setfile)
        return self.base
示例#27
0
def add_package_to_params(package_name, params):
    assert(isinstance(params,SetupParams))
    package_name = package_name.lower()
    if package_name == "opengl":
        opengl_includes = build_opengl_plugin_list() + opengl_accelerate_includes + opengl_platform_includes
        params.includes.extend(opengl_includes)
        return
    if package_name == "ipython":
        '''
        ipython requires a patch in load_qt / monkey patch is done in startup script
        '''
        warning = """
            Using Ipython frozen console requires, and setting right backend with matplotlib
            Sourcecode : IPython/external/qt_loaders.py

                commit_api(api)
                return result
            else:
                #Append data here
                #if getattr(sys,"frozen",False):
                #    api = loaded_api()
                #    result = loaders[api]()
                #    api = result[-1]  # changed if api = QT_API_PYQT_DEFAULT
                #    commit_api(loaded_api())
                #    return result
            raise ImportError("""
        params.includes.extend(pygments_includes)
        params.includes.extend(zmq_includes)
        params.packages.extend(zmq_packages)
        params.includes.extend(matplotlib_includes)
        params.excludes.extend(matplotlib_excludes)
        params.excludes.extend(qt_excludes)

        ipython_include_files = build_ipython_dir_list()
        params.includefiles.extend(ipython_include_files)
        return
    if package_name in ["zmq", "pizco"]:
        params.includes.extend(zmq_includes)
        params.packages.extend(zmq_packages)
        return
    if package_name in ["numpy","scipy"]:
        params.includes.extend(numpy_scipy_includes)
        params.includefiles.extend(numpy_scipy_includefiles)
        params.packages.extend(numpy_scipy_packages)
        params.includes.extend(numba_includes)
        params.packages.extend(numba_packages)

        return
    if package_name in ["guiqwt","guidata"]:
        params.includes.extend(qt_includes)
        params.excludes.extend(qt_excludes)
        params.packages.extend(qt_packages)
        from guidata import configtools
        guidata_add_source=configtools.get_module_data_path("guidata","images")
        import guiqwt
        guiqwt_mod_path = guiqwt.__path__[0]
        guiqwt_add_source = guiqwt_mod_path + "\\images"
        params.includefiles.extend([(guidata_add_source,"guidata/images/")])
        params.includefiles.extend([(guiqwt_add_source,"guiqwt/images")])
        return
    if package_name in ["pyqt"]:
        params.includes.extend(qt_includes)
        params.excludes.extend(qt_excludes)
        params.packages.extend(qt_packages)
        return
    if package_name in ["local_svg"]:
        list_files_in_cur = os.listdir(os.getcwd())
        svg_list = []
        for file in list_files_in_cur:
            if fnmatch(file,"*.svg"):
                svg_list.append(file)
        params.includefiles.append(svg_list)
示例#28
0
import glob
import fnmatch
import os.path

if __name__ == "__main__":
    dir_path = '/root/tmp/test'
    path = '/root/tmp/test/*.py'
    pyfiles = glob.glob(path)
    pyfiles2 = [name for name in os.listdir(dir_path) if fnmatch(name, '*.py')]
示例#29
0
def main(argv=None):
    if argv is None:
        argv = sys.argv[1:]

    # Gather all files to process.
    eprint(
        "+++ :female-detective: Looking for BUILD, BUILD.bazel and *.bzl files"
    )
    files = []
    build_bazel_found = False
    for root, dirnames, filenames in os.walk("."):
        for filename in filenames:
            if fnmatch.fnmatch(filename, "BUILD.bazel"):
                build_bazel_found = True
            for pattern in ("BUILD", "BUILD.bazel", "*.bzl"):
                if fnmatch.fnmatch(filename, pattern):
                    files.append(os.path.relpath(os.path.join(root, filename)))
    if build_bazel_found:
        eprint(
            "Found BUILD.bazel files in the workspace, thus ignoring BUILD files without suffix."
        )
        files = [
            fname for fname in files
            if not fnmatch(os.path.basename(fname), "BUILD")
        ]
    if not files:
        eprint("No files found, exiting.")
        return 0

    # Run buildifier.
    eprint("+++ :bazel: Running buildifier")
    result = subprocess.run(["buildifier", "--lint=warn"] + sorted(files),
                            capture_output=True,
                            universal_newlines=True)

    # If buildifier was happy, there's nothing left to do for us.
    if result.returncode == 0:
        eprint("+++ :tada: Buildifier found nothing to complain about")
        return result.returncode

    # Parse output.
    eprint("+++ :gear: Parsing buildifier output")
    findings = []
    for line in result.stderr.splitlines():
        # Skip empty lines.
        line = line.strip()
        if not line:
            continue

        # Try to parse as structured data.
        match = regex.match(line)
        if match:
            findings.append(match)
        else:
            output = "##### :bazel: buildifier: error while parsing output\n"
            output += "<pre><code>" + html.escape(
                result.stderr) + "</code></pre>"
            if "BUILDKITE_JOB_ID" in os.environ:
                output += "\n\nSee [job {job}](#{job})\n".format(
                    job=os.environ["BUILDKITE_JOB_ID"])
            upload_output(output)
            return result.returncode

    output = "##### :bazel: buildifier: found {} problems in your BUILD and *.bzl files\n".format(
        len(findings))
    output += "<pre><code>"
    for finding in findings:
        file_url = get_file_url(finding["filename"], finding["line"])
        if file_url:
            output += '<a href="{}">{}:{}</a>:'.format(file_url,
                                                       finding["filename"],
                                                       finding["line"])
        else:
            output += "{}:{}:".format(finding["filename"], finding["line"])
        if finding["column"]:
            output += "{}:".format(finding["column"])
        output += ' <a href="{}">{}</a>: {}\n'.format(finding["message_url"],
                                                      finding["message_id"],
                                                      finding["message"])
    output = output.strip() + "</pre></code>"
    upload_output(output)

    # Preserve buildifier's exit code.
    return result.returncode
示例#30
0
def bbduk():  #for creating bbduk_job file
    data = json.loads(open('Config.json').read())

    for opt, arg in options:  #checking the options for input folder provided
        if opt in ('--input'):
            inFolder = str(arg)  #input folder

    for path, subdirs, files in os.walk(inFolder):
        for name in files:
            if fnmatch(name, '*.fastq.gz*'):
                baseFile = name  #baseFile is filename without extentions
                while fnmatch(baseFile, '*.*'):
                    baseFile = os.path.splitext(baseFile)[0]

                #TRIM#
                #constructing completeArg
                print('TRIM: ' + str(baseFile))
                completeArg = data['bbduk']['bbdukPath'] + ' ' + 'in=' + data[
                    'bbduk']['filesPath'] + path[path.find(
                        '/SP'
                    ):] + '/' + name + ' out=' + data['bbduk']['filesPath'] + path[path.find(
                        '/SP'
                    ):] + '/bbduk/' + baseFile + '_PE_good.fq outm=' + data[
                        'bbduk']['filesPath'] + path[path.find(
                            '/SP'
                        ):] + '/bbduk/' + baseFile + '_PE_fail.fq outs=' + data[
                            'bbduk']['filesPath'] + path[path.find(
                                '/SP'
                            ):] + '/bbduk/' + baseFile + '_SE_pass.fq ref=' + data[
                                'bbduk']['adapterPWD'] + ' ' + data['bbduk'][
                                    'ktrim'] + ' ' + data['bbduk']['k'] + ' ' + data[
                                        'bbduk']['mink'] + ' ' + data['bbduk'][
                                            'hdist'] + ' ' + data['bbduk'][
                                                'misc'] + ' ' + data['bbduk'][
                                                    'qtrim'] + ' ' + data['bbduk'][
                                                        'trimq'] + ' ' + data['bbduk'][
                                                            'maq'] + ' ' + data['bbduk'][
                                                                'entropy'] + ' ' + data[
                                                                    'bbduk'][
                                                                        'minlen']

                #if the jobfile folder doesn't exist, create one
                if not os.path.exists('jobFiles/'):
                    os.makedirs('jobFiles/')
                #if the jobfile doesn't exist, create one
                if not os.path.exists('jobFiles/bbduk_job.py'):
                    f = open('jobFiles/bbduk_job.py', 'w+')
                    f.write('from subprocess import *' + '\n\n')
                    f.close()

                #open and append completeArg to the jobfile
                f = open('jobFiles/bbduk_job.py', 'a')
                f.write("call(['" + completeArg + "'],shell=True);" + '\n')
                f.close()

                #PHIX#
                #constructing completeArg
                print('PHIX: ' + str(baseFile))
                completeArg = data['bbduk']['bbdukPath'] + ' ' + 'in=' + data[
                    'bbduk']['filesPath'] + path[path.find(
                        '/SP'
                    ):] + '/bbduk/' + baseFile + '_SE_pass.fq out=' + data[
                        'bbduk']['filesPath'] + path[path.find(
                            '/SP'
                        ):] + '/bbduk/' + baseFile + '_SE_final.fq ref=' + data[
                            'bbduk']['phixPWD'] + ' ' + 'k=31 ' + data[
                                'bbduk']['hdist']

                #open and append completeArg to the jobfile
                f = open('jobFiles/bbduk_job.py', 'a')
                f.write("call(['" + completeArg + "'],shell=True);" + '\n')
                f.close()

    print('\n*** BBDuk Cleanup Complete ***\n')
示例#31
0
def convert(input_dir, output_dir):
    input_files = fnmatch("%s/*.nef", input_dir)
    print input_files
示例#32
0
def spreadSheet():

    #current row,col,layer and samplepoint
    row, col, layer, experiment, samplePoint = 0, 0, 0, 0, 0

    #create excel file
    workbook = xlsxwriter.Workbook('SampleReport.xlsx')
    worksheet = workbook.add_worksheet()

    #generate all stat files required
    totalSubject = preAnalysis()

    #populate list of variants
    totalVariants = []
    for i in range(0, len(totalSubject)):
        split = totalSubject[i].split(';')
        totalVariants.append(split[0])

    #get count and set of unique variants
    totalCount = Counter(totalVariants)
    #count = [Decimal(n) for n in count]
    totalUnique = set(totalVariants)

    #for each layers
    for i in range(1, 6):
        layer += 1
        worksheet.write(col, 0, 'LAYER ' + str(layer))

        #for all files in the current layer
        for path, subdirs, files in os.walk('.',
                                            topdown=False):  #walk for files
            if fnmatch(path, '*-' + str(layer) + '*'):
                for name in files:
                    if fnmatch(name,
                               '*_result.hit.txt'):  #catch result.hit files
                        # Add output to Excel
                        #add all desired data to memory
                        with open(os.path.join(path, name)) as tsv:
                            for column in zip(*[
                                    line
                                    for line in csv.reader(tsv,
                                                           dialect="excel-tab")
                            ]):  #grab tsv by column
                                if column[0] == 'subject_description':
                                    subject = []

                                    for i in range(1, len(column)):
                                        subject.append(column[i])

                                    #populate list of variants in each file
                                    variants = []
                                    for i in range(0, len(subject)):
                                        split = subject[i].split(';')
                                        variants.append(split[0])

                                    #get count and set of unique variants
                                    count = Counter(variants)
                                    #count = [Decimal(n) for n in count]
                                    unique = set(variants)

                                    #for each unique organism
                                    for i in totalUnique:
                                        col += 1
                                        worksheet.write(col, 0, i)

                                        for j in unique:
                                            if i == j:
                                                row += 1
                                                worksheet.write(col, row, i)

                                        #print count
                                        #print('YEET')

    # for obj in unique:
    # 	div=count[obj]
    # 	print '%s : %d : %f' % (obj, count[obj],(count[obj]/sum((count).values()))*100)

    #adding skip line between layers
        col += 2
	def gen_find(filepat, top):
		for path, dirlist, filelist in os.walk(top):
			for filename in fnmatch(filelist, filepat):
				yield os.path.join(path, filename)
__author__ = 'andrew.H'
import sys,os,os.path,json,fnmatch,shutil,getopt,re
from fnmatch import fnmatch
from subprocess import *

inFolder,baseFile,clean = '/data/userdata/ahowden/dataForPipelineOrganized','',''

for opt, arg in options:#checking the options for input folder provided
	if opt in ('--input'):
		inFolder = str(arg)#input folder


#find xml files by layer#_#
for path, subdirs, files in os.walk(inFolder):
	for name in files:
		if fnmatch(name,'*.xml'):
				baseFile = name #baseFile without pwd"+'\n')
				while fnmatch(baseFile, '*.*'):
					baseFile = os.path.splitext(baseFile)[0]
				filename = str(baseFile[:8])
				#create folder layer#_# in same location as file if not already created
				if not os.path.exists(os.path.join(path,filename)):
					os.makedirs(os.path.join(path,filename))
					#move file to created folder
					os.rename(path +'/'+ name,path +'/'+ filename +'/'+ name)
				else:
					#move file to existing folder
					os.rename(path +'/'+ name,path +'/'+ filename +'/'+ name)
                     

#deploy parsemerge, input.json, xmlparser.pl