Example #1
0
def _should_include_path(path, includes, excludes):
    """Return True iff the given path should be included."""
    from os.path import basename
    from fnmatch import fnmatch

    base = basename(path)
    if includes:
        for include in includes:
            if fnmatch(base, include):
                try:
                    log.debug("include `%s' (matches `%s')", path, include)
                except (NameError, AttributeError):
                    pass
                break
        else:
            try:
                log.debug("exclude `%s' (matches no includes)", path)
            except (NameError, AttributeError):
                pass
            return False
    for exclude in excludes:
        if fnmatch(base, exclude):
            try:
                log.debug("exclude `%s' (matches `%s')", path, exclude)
            except (NameError, AttributeError):
                pass
            return False
    return True
Example #2
0
 def files(self):
     if self.is_dir:
         if self.recurse:
             for root_dir, dirnames, filenames in os.walk(self.scan_path):
                 if self.filetype_filter != '*':
                     filenames = fnmatch.filter(filenames, self.filetype_filter)
                 for filename in filenames:
                     processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(filename, type_glob_string)]
                     yield ScannerJobFile(os.path.join(root_dir, filename), processing_callbacks, self.data_callback, self.context)
         else:
             file_paths = os.listdir(self.scan_path)
             if self.filetype_filter != '*':
                 file_paths = fnmatch.filter(file_paths, self.filetype_filter)
             for file_path in file_paths:
                 processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(file_path, type_glob_string)]
                 yield ScannerJobFile(os.path.join(self.scan_path, file_path), processing_callbacks, self.data_callback, self.context)
     else:  # single file
         processing_callbacks = [callback for type_glob_string, callback
                             in self.filetype_handlers.iteritems()
                             if fnmatch.fnmatch(self.scan_path, type_glob_string)]
         yield ScannerJobFile(self.scan_path, processing_callbacks, self.data_callback, self.context)
Example #3
0
    def expect_content(self, name, content, exact=False):
        actual = self.__read_file(name, exact)
        content = string.replace(content, "$toolset", self.toolset+"*")

        matched = False
        if exact:
            matched = fnmatch.fnmatch(actual, content)
        else:
            def sorted_(x):
                x.sort()
                return x
            actual_ = map(lambda x: sorted_(x.split()), actual.splitlines())
            content_ = map(lambda x: sorted_(x.split()), content.splitlines())
            if len(actual_) == len(content_):
                matched = map(
                    lambda x, y: map(lambda n, p: fnmatch.fnmatch(n, p), x, y),
                    actual_, content_)
                matched = reduce(
                    lambda x, y: x and reduce(
                        lambda a, b: a and b,
                    y),
                    matched)

        if not matched:
            print "Expected:\n"
            print content
            print "Got:\n"
            print actual
            self.fail_test(1)
Example #4
0
def build_archive_list(pattern_list, target_dir):
  glob_file_list = []

  for pattern in pattern_list:
    # Extract patterns
    subdir = pattern[0]
    file_pattern = pattern[1]
    date_pattern = pattern[2]

    # Build list of files
    if len(date_pattern) > 0:
      file_list = [ (datetime.strptime(file_name, date_pattern).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), file_name, subdir)
                      for file_name in os.listdir(target_dir) if fnmatch.fnmatch(file_name, file_pattern) ]
    else:
      file_list = [ (date.fromtimestamp(tardate(file_name)).strftime("%Y-%m-%dT%H:%M:%S.%fZ"), file_name, subdir)
                      for file_name in os.listdir(target_dir) if fnmatch.fnmatch(file_name, file_pattern) ]

    assert len(file_list) > 0

    # Append files to global list
    glob_file_list.extend(file_list)

  # Sort the global list by date and return it
  glob_file_list.sort(key=lambda x: x[0])
  return glob_file_list
Example #5
0
 def add_extra_headers(self, headers, path, url):
     if fnmatch.fnmatch(url, '*.xpi'):
         headers['Content-Type'] = 'application/x-xpinstall'
     if fnmatch.fnmatch(url, '*.rdf'):
         headers['Content-Type'] = 'text/rdf'
     if fnmatch.fnmatch(url, '*.json'):
         headers['Content-Type'] = 'application/json'
Example #6
0
    def allow(self, include=None, exclude=None):
        """
        Given a set of wilcard patterns in the include and exclude arguments,
        tests if the patterns allow this item for processing.

        The exclude parameter is processed first as a broader filter and then
        include is used as a narrower filter to override the results for more
        specific files.

        Example:
        exclude = (".*", "*~")
        include = (".htaccess")

        """
        if not include:
            include = ()
        if not exclude:
            exclude = ()

        if reduce(lambda result,
         pattern: result or
            fnmatch.fnmatch(self.name, pattern), include, False):
            return True

        if reduce(lambda result, pattern:
            result and not fnmatch.fnmatch(self.name, pattern),
                exclude, True):
            return True

        return False
Example #7
0
def _CheckLinks(input_api, output_api, results):
  for affected_file in input_api.AffectedFiles():
    name = affected_file.LocalPath()
    absolute_path = affected_file.AbsoluteLocalPath()
    if not os.path.exists(absolute_path):
      continue
    if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or
        fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or
        fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH) or
        fnmatch.fnmatch(name, '%s*' % API_PATH)):
      contents = _ReadFile(absolute_path)
      args = []
      if input_api.platform == 'win32':
        args = [input_api.python_executable]
      args.extend([os.path.join('docs', 'server2', 'link_converter.py'),
                   '-o',
                   '-f',
                   absolute_path])
      output = input_api.subprocess.check_output(
          args,
          cwd=input_api.PresubmitLocalPath(),
          universal_newlines=True)
      if output != contents:
        changes = ''
        for i, (line1, line2) in enumerate(
            zip(contents.split('\n'), output.split('\n'))):
          if line1 != line2:
            changes = ('%s\nLine %d:\n-%s\n+%s\n' %
                (changes, i + 1, line1, line2))
        if changes:
          results.append(output_api.PresubmitPromptWarning(
              'File %s may have an old-style <a> link to an API page. Please '
              'run docs/server2/link_converter.py to convert the link[s], or '
              'convert them manually.\n\nSuggested changes are: %s' %
              (name, changes)))
def compare(list, values, attrName, subattrName=None, otherList=None, linkProperties=None):

    if len(values) == 0:
        return Exception

    # List to return
    returnList = []

    for value in values:
        for item in list:
            if otherList is None:
                # Get attribut "attrName" at first level
                compareName = getattr(item, attrName)
                if subattrName is None:
                    if fnmatch.fnmatch(compareName, value):
                        returnList.append(item)
                # If sub attribute requested, get it
                else:
                    compareName2 = getattr(compareName, subattrName)
                    if fnmatch.fnmatch(compareName2, value):
                        returnList.append(item)
            else:
                for otherItem in otherList:
                    if getattr(item, linkProperties[0]) == getattr(otherItem, linkProperties[1]):
                        compareName = getattr(otherItem, attrName)
                        if subattrName is None:
                            if fnmatch.fnmatch(compareName, value):
                                returnList.append(item)
                        else:
                            compareName2 = getattr(compareName, subattrName)
                            if fnmatch.fnmatch(compareName2, value):
                                returnList.append(item)
    return returnList
Example #9
0
def find_fit_speed_log(out_file, path):
    pattern = "fitlog_time_speed.dat"
    fitlog = []
    for root, dirs, files in os.walk(path):
        for name in files:
            if fnmatch.fnmatch(name, pattern):
                fitlog.append(os.path.join(root, name))

    pattern = "fitlog_time_speed.dat"
    fitlog_time_speed = []
    for root, dirs, files in os.walk(path):
        for name in files:
            if fnmatch.fnmatch(name, pattern):
                fitlog_time_speed.append(os.path.join(root, name))

    string = "plot "
    for my_file in fitlog:
        string = string + "'" + my_file + "' using ($1/60/60):($2) with lp,"

        # for my_file in fitlog_time_speed:
        # string=string+"'"+my_file+"' using ($2) axis x1y2 with lp,"

    string = string[:-1]
    text_file = open(out_file, "w")
    text_file.write(string)
    text_file.close()
def write_test_for_class(item, out):
    all_screenshots = _recursive_glob('*.png', SCREENSHOTS_ROOT)

    always_screenshots = []
    try_screenshots = []

    for filename in all_screenshots:
        if any(fnmatch.fnmatch(filename, x) for x in item.screenshots):
            always_screenshots.append(filename)
        elif any(fnmatch.fnmatch(filename, x) for x in item.try_screenshots):
            try_screenshots.append(filename)

    if len(always_screenshots) + len(try_screenshots) == 0:
        return 0

    always_screenshots.sort()
    try_screenshots.sort()
    out.write(dedent('''\


        def auto_selftest_{name}():
            r"""
        ''').format(name=item.name))

    for expr in item.expressions:
        for s in always_screenshots:
            out.write("    >>> %s\n" % expr.format(frame='f("%s")' % s))
        for s in try_screenshots:
            out.write("    >>> %s # remove-if-false\n" % expr.format(
                frame='f("%s")' % s))

    out.write('    """\n    pass\n')
    return len(always_screenshots) + len(try_screenshots)
Example #11
0
File: common.py Project: paskma/py
    def visit(self, fil=None, rec=None, ignore=_dummyclass):
        """ yields all paths below the current one

            fil is a filter (glob pattern or callable), if not matching the
            path will not be yielded, defaulting to None (everything is
            returned)

            rec is a filter (glob pattern or callable) that controls whether
            a node is descended, defaulting to None

            ignore is an Exception class that is ignoredwhen calling dirlist()
            on any of the paths (by default, all exceptions are reported)
        """
        if isinstance(fil, str):
            fil = fnmatch(fil)
        if rec: 
            if isinstance(rec, str):
                rec = fnmatch(fil)
            elif not callable(rec): 
                rec = lambda x: True 
        reclist = [self]
        while reclist: 
            current = reclist.pop(0)
            try:
                dirlist = current.listdir() 
            except ignore:
                return
            for p in dirlist:
                if fil is None or fil(p):
                    yield p
                if p.check(dir=1) and (rec is None or rec(p)):
                    reclist.append(p)
Example #12
0
def recurse_path(root, excludes=None):
    if excludes is None:
        excludes = []

    basepath = os.path.realpath(root)
    pathlist = [basepath]

    patterns = [e for e in excludes if os.path.sep not in e]
    absolute = [e for e in excludes if os.path.isabs(e)]
    relative = [e for e in excludes if os.path.sep in e and
                not os.path.isabs(e)]
    for root, dirs, files in os.walk(basepath, topdown=True):
        dirs[:] = [
            d for d in dirs
            if not any([fnmatch.fnmatch(d, pattern) for pattern in patterns])
            if not any([fnmatch.fnmatch(os.path.abspath(os.path.join(root, d)),
                                        path)
                        for path in absolute])
            if not any([fnmatch.fnmatch(os.path.relpath(os.path.join(root, d)),
                                        path)
                        for path in relative])
        ]
        pathlist.extend([os.path.join(root, path) for path in dirs])

    return pathlist
Example #13
0
def main(argv):
    api_files = rst_files = [rst for rst in os.listdir('doc/api')
                             if fnmatch.fnmatch(rst, '*.rst')
                             and rst not in excluded_docs]
    cmd = argv.pop(0)
    def has(*options):
        for opt in options:
            if opt in argv:
                return argv.pop(argv.index(opt))
    if has('-h', '--help'):
        usage(cmd)
    verbose = has('-v', '--verbose')
    only_documented = not has('-a', '--all')
    if argv:
        given_files = []
        for arg in argv:
            arg = arg.replace('\\', '/').replace(api_doc + '/', '')
            arg = arg.replace('.rst', '') + '.rst'
            if '*' in arg: # glob pattern
                given_files += [rst for rst in api_files
                                if fnmatch.fnmatch(rst, arg)]
            elif arg in api_files:
                given_files.append(arg)
        api_files = given_files
    rst_basenames = sorted(f[:-4] for f in rst_files)
    for rst in api_files:
        basename = rst.replace('.rst', '')
        if verbose or len(api_files) > 1:
            print("== Checking %s ... " % rst)
        check_api_doc(basename, verbose, only_documented,
                      any(f.startswith(basename) and f != basename
                          for f in rst_basenames))
Example #14
0
def write_mol(mol, filename):
    """
    Write a molecule to a file based on file extension. For example, anything
    ending in a "xyz" is assumed to be a XYZ file. Supported formats include
    xyz, Gaussian input (gjf|g03|g09|com|inp), and pymatgen's JSON serialized
    molecules.

    Args:
        mol (Molecule/IMolecule): Molecule to write
        filename (str): A filename to write to.
    """
    fname = os.path.basename(filename)
    if fnmatch(fname.lower(), "*.xyz*"):
        return XYZ(mol).write_file(filename)
    elif any([fnmatch(fname.lower(), "*.{}*".format(r))
              for r in ["gjf", "g03", "g09", "com", "inp"]]):
        return GaussianInput(mol).write_file(filename)
    elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
        with zopen(filename, "wt") as f:
            return f.write(str2unicode(json.dumps(mol, cls=MontyEncoder)))
    else:
        m = re.search("\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
                      filename.lower())
        if m:
            return BabelMolAdaptor(mol).write_file(filename, m.group(1))

    raise ValueError("Unrecognized file extension!")
Example #15
0
def filter_models(request, models, exclude):
    """
    Returns (model, perm,) for all models that match models/exclude patterns
    and are visible by current user.
    """
    items = get_avail_models(request)
    included = []
    full_name = lambda model: '%s.%s' % (model.__module__, model.__name__)
    
    # I beleive that that implemented
    # O(len(patterns)*len(matched_patterns)*len(all_models))
    # algorythm is fine for model lists because they are small and admin
    # performance is not a bottleneck. If it is not the case then the code
    # should be optimized.
    
    if len(models) == 0:
        included = items
    else:
        for pattern in models:
            pattern_items = []
            for item in items:
                model, perms = item
                if fnmatch(full_name(model), pattern) and item not in included:
                    pattern_items.append(item)
            pattern_items.sort(key=lambda x:x[0]._meta.verbose_name_plural)
            included.extend(pattern_items)
    
    result = included[:]
    for pattern in exclude:
        for item in included:
            model, perms = item
            if fnmatch(full_name(model), pattern):
                result.remove(item)
    return result
Example #16
0
    def test_run_sql_test_template(self):
        test_case = MockSQLConcurrencyTestCaseTemplate('test_template_query02')
        test_case.__class__.__unittest_skip__ = False
        test_result = unittest.TestResult()

        # Cleanup
        if os.path.exists(test_case.get_out_dir()):
            for file in os.listdir(test_case.get_out_dir()):
                if fnmatch.fnmatch(file, 'template_query02**.out'):
                    os.remove(os.path.join(test_case.get_out_dir(),file))

        test_case.run(test_result)
        self.assertEqual(test_result.testsRun, 1)
        self.assertEqual(len(test_result.errors), 0)
        self.assertEqual(len(test_result.skipped), 0)
        self.assertEqual(len(test_result.failures), 0)
        count = 0
        for file in os.listdir(test_case.get_out_dir()):
            if fnmatch.fnmatch(file, 'template_query02*.out'):
                count = count + 1
        self.assertEqual(count, 12)

        # Cleanup
        if os.path.exists(test_case.get_out_dir()):
            for file in os.listdir(test_case.get_out_dir()):
                path = os.path.join(test_case.get_out_dir(), file)
                if fnmatch.fnmatch(file, 'template_query02*.*'):
                    os.remove(os.path.join(test_case.get_out_dir(), file))
                if fnmatch.fnmatch(file, 'regress_sql_concurrency_test_case'):
                    shutil.rmtree(path)
Example #17
0
    def test_run_sql_test_failure_with_gpdiff(self):
        test_case = MockSQLConcurrencyTestCase('test_query03')

        # As explained above, we want MockSQLTestCase to run if and only if
        # it's being invoked by our unit tests. So, it's skipped if discovered
        # directly by unit2. Here, bearing in mind that SQLConcurrencyTestCaseTests is itself
        # triggered by unit2, we override MockSQLTestCase's skip decorator to allow
        # this explicit construction of MockSQLTestCase to proceed.
        test_case.__class__.__unittest_skip__ = False
        test_result = unittest.TestResult()
        current_dir = os.path.dirname(inspect.getfile(test_case.__class__))
        i = 1
        for file in os.listdir(test_case.get_sql_dir()):
            if fnmatch.fnmatch(file, 'query03_part*.sql'):
                i += 1
        self.assertTrue(test_case.gpdiff)
        test_case.run(test_result)
        self.assertEqual(test_result.testsRun, 1)
        self.assertEqual(len(test_result.errors), 0)
        self.assertEqual(len(test_result.skipped), 0)
        self.assertEqual(len(test_result.failures), 1)
        # Cleanup
        if os.path.exists(test_case.get_out_dir()):
            for file in os.listdir(test_case.get_out_dir()):
                if fnmatch.fnmatch(file, 'query03*.out'):
                    os.remove(os.path.join(test_case.get_out_dir(),file))
Example #18
0
def get_type_by_name(path):
    """Returns type of file by its name, or None if not known"""
    if not _cache_uptodate:
        _cache_database()

    leaf = os.path.basename(path)
    if leaf in literals:
        return literals[leaf]

    lleaf = leaf.lower()
    if lleaf in literals:
        return literals[lleaf]

    ext = leaf
    while 1:
        p = ext.find('.')
        if p < 0: break
        ext = ext[p + 1:]
        if ext in exts:
            return exts[ext]
    ext = lleaf
    while 1:
        p = ext.find('.')
        if p < 0: break
        ext = ext[p+1:]
        if ext in exts:
            return exts[ext]
    for (glob, mime_type) in globs:
        if fnmatch.fnmatch(leaf, glob):
            return mime_type
        if fnmatch.fnmatch(lleaf, glob):
            return mime_type
    return None
Example #19
0
 def included(self, event, base=None):
     path = ''
     if not base:
         if hasattr(event, 'dest_path'):
             base = os.path.basename(event.dest_path)
             path = self.remove_prefix(self.get_unicode_path(event.dest_path))
         else:
             base = os.path.basename(event.src_path)
             path = self.remove_prefix(self.get_unicode_path(event.src_path))
     try:
         #logging.info(" 1 : " + str(type(base)) + " " + str(type(path)))
         if isinstance(path, str):
             #logging.info('ENCODING PATH')
             path = unicode(path, 'utf-8')
         if isinstance(base, str):
             #logging.info('ENCODING BASE')
             base = unicode(base, 'utf-8')
         #logging.info("2 : " + str(type(base)) + " " + str(type(path)))
         #logging.info(base + u" " + path)
     except Exception as e:
         logging.exception(e)
     if path == '.':
         return False
     for i in self.includes:
         if not fnmatch.fnmatch(base, i):
             return False
     for e in self.excludes:
         if fnmatch.fnmatch(base, e):
             return False
     for e in self.excludes:
         if (e.startswith('/') or e.startswith('*/')) and fnmatch.fnmatch(path, e):
             return False
     return True
	def filter(self, value):
		""" Sets a filter for items present in the tree. Only shows tree items 
		that match the specified file extension(s) and hides the others.

		value : None, str or list
			If None is passed, this clears the filter, making all items present
			in the tree visible again.

			If a string is passed, it will be used as a single file extension
			to compare the items against.

			If a list of file extensions is passed, than items will be shown if
			they match any of the extensions present in the list.
		"""
		# Check if supplied a valid value
		if not isinstance(value, list) and \
		not isinstance(value, basestring) and \
		not value is None:
			raise ValueError('Supplied filter invalid, needs to be list, string'
				' or None')

		# Store the filter for later reference
		self._filter = value

		# Iterate over the items
		iterator = QtWidgets.QTreeWidgetItemIterator(self)
		while(iterator.value()):
			item = iterator.value()
			# Check if item is of type 'file'
			# Filters are only applicable to files
			item_type = item.data(1, QtCore.Qt.DisplayRole)
			if item_type == "file":
				# If filter is None, it means everything should be
				# visible, so set this item to visible and continue.
				if self._filter is None:
					item.setHidden(False)
					iterator += 1
					continue

				# Check if filter extension is contained in filename
				item_data = item.data(0, QtCore.Qt.UserRole)
				filename = item_data['attributes']['name']

				# Assume no match by default
				typematch = False
				# If filter is a single string, just check directly
				if isinstance(self._filter, basestring):
					typematch = fnmatch.fnmatch(filename, self._filter)
				# If filter is a list, compare to each item in it
				if isinstance(self._filter, list):
					for ext in self._filter:
						if fnmatch.fnmatch(filename, ext):
							typematch = True
							break
				# Set item's visibility according to value of typematch
				if typematch:
					item.setHidden(False)
				else:
					item.setHidden(True)
			iterator += 1
Example #21
0
    def _getLanguage(self, document):
        """Get language name by file path
        """
        fileName = document.fileName()

        if not fileName:
            return

        for languageName, fileNameGlobs, firstLineGlobs, iconPath in self.iterLanguages():  # pylint: disable=W0612
            for fileNameGlob in fileNameGlobs:
                # Empty patterns are ignored
                if fileNameGlob and \
                    fnmatch.fnmatch(fileName, fileNameGlob):
                    return languageName

        firstLine = document.line(0).strip()  # first line without \n and other spaces
        if firstLine is not None:
            for languageName, fileNameGlobs, firstLineGlobs, iconPath in self.iterLanguages():
                for firstLineGlob in firstLineGlobs:
                    # Empty patterns are ignored
                    if firstLineGlob and \
                       fnmatch.fnmatch(firstLine, firstLineGlob):
                        return languageName

        return None
Example #22
0
def step_back(io_manager):
    '''
    deletes the output files from the most recent runs
    :type io_manager: IOManger
    '''
    current_iteration = None
    target_iteration = None

    # Figure out the current iteration and modify the scan specs file appropriately
    parameter_scan_specs_xml_file_path = io_manager.parameter_scan_specs_xml_file_path
    xml_file = parse(parameter_scan_specs_xml_file_path)
    xml_root = xml_file.getroot()
    for parameter_element in xml_root.iter('Parameter'):
        current_iteration = int(parameter_element.attrib['CurrentIteration'])
        target_iteration = current_iteration - 1

        print('Stepping up files to redo batch run {}'.format(target_iteration))

        parameter_element.set('CurrentIteration', str(target_iteration))
    ElementTree(xml_root).write(parameter_scan_specs_xml_file_path)
    
    # Remove screenshots
    for root, dirs, files in os.walk(io_manager.screenshot_output_path):
        for dir in dirs:
            if dir == str(target_iteration):
                shutil.rmtree(os.path.join(root, dir))
    
    # Remove most recent .csv and .txt in output folder
    for root, dirs, files in os.walk(io_manager.output_folder):
        for file in files:
            if fnmatch(file, '*output{}.txt'.format(target_iteration)) or fnmatch(file, '*output{}.csv'.format(target_iteration)):
                os.remove(os.path.join(root, file))
Example #23
0
def print_result_summaries_list(topnum=5):
    print('\n<(^_^<)\n')
    # Print out some summary of all results you have
    hs = ld2.HotSpotter()
    hs.load_tables(ld2.DEFAULT)
    result_file_list = os.listdir(hs.dirs.result_dir)

    sorted_rankres = []
    for result_fname in iter(result_file_list):
        if fnmatch.fnmatch(result_fname, 'rankres_str*.csv'):
            print(result_fname)
            with open(join(hs.dirs.result_dir, result_fname), 'r') as file:

                metaline = file.readline()
                toprint = metaline
                # skip 4 metalines
                [file.readline() for _ in xrange(4)]
                top5line = file.readline()
                top1line = file.readline()
                toprint += top5line + top1line
                line = read_until(file, '# NumData')
                num_data = int(line.replace('# NumData', ''))
                file.readline()  # header
                res_data_lines = [file.readline() for _ in xrange(num_data)]
                res_data_str = np.array([line.split(',') for line in res_data_lines])
                tt_scores = np.array(res_data_str[:, 5], dtype=np.float)
                bt_scores = np.array(res_data_str[:, 6], dtype=np.float)
                tf_scores = np.array(res_data_str[:, 7], dtype=np.float)

                tt_score_sum = sum([score for score in tt_scores if score > 0])
                bt_score_sum = sum([score for score in bt_scores if score > 0])
                tf_score_sum = sum([score for score in tf_scores if score > 0])

                toprint += ('tt_scores = %r; ' % tt_score_sum)
                toprint += ('bt_scores = %r; ' % bt_score_sum)
                toprint += ('tf_scores = %r; ' % tf_score_sum)
                if topnum == 5:
                    sorted_rankres.append(top5line + metaline)
                else:
                    sorted_rankres.append(top1line + metaline)
                print(toprint + '\n')

    print('\n(>^_^)>\n')

    sorted_mapscore = []
    for result_fname in iter(result_file_list):
        if fnmatch.fnmatch(result_fname, 'oxsty_map_csv*.csv'):
            print(result_fname)
            with open(join(hs.dirs.result_dir, result_fname), 'r') as file:
                metaline = file.readline()
                scoreline = file.readline()
                toprint = metaline + scoreline

                sorted_mapscore.append(scoreline + metaline)
                print(toprint)

    print('\n'.join(sorted(sorted_rankres)))
    print('\n'.join(sorted(sorted_mapscore)))

    print('\n^(^_^)^\n')
 def _ignore_patterns(path, names):
     ignored_names = []
     for pattern in patterns:
         for name in names:
             if fnmatch.fnmatch(name, pattern) or fnmatch.fnmatch(os.path.join(path, name), pattern):
                 ignored_names.append(name)
     return set(ignored_names)
Example #25
0
File: ec2.py Project: pior/awstools
def filter_instances(specifiers, instances):
    targets = set()

    for instance in instances:
        for specifier in specifiers:

            if re.match(RE_INSTANCE_ID, specifier):
                if instance.id == specifier:
                    targets.add(instance)

            elif re.match(RE_PRIVATE_IP, specifier):
                if instance.private_ip_address == specifier:
                    targets.add(instance)

            elif re.match(RE_PRIVATE_HOSTNAME_1, specifier):
                if instance.private_dns_name.startswith(specifier):
                    targets.add(instance)

            elif re.match(RE_PRIVATE_HOSTNAME_2, specifier):
                if instance.private_dns_name.startswith(specifier):
                    targets.add(instance)

            else:
                name = instance.tags.get('Name', '').lower()
                altname = instance.tags.get('altName', '').lower()

                if fnmatch(name, specifier.lower()):
                    targets.add(instance)
                elif fnmatch(altname, specifier.lower()):
                    targets.add(instance)

    return list(targets)
Example #26
0
    def _get(genre):
        response = []
        for f in os.listdir("%s/%s" % (options.store, genre)):
            if not fnmatch.fnmatch(f, '*_thumbnail.jpg') and not fnmatch.fnmatch(f, '*_meta.json'):
                response.append(MediaManager._build_response_for(genre, f))

        return response
Example #27
0
    def get_index(self, requestContext):
        matches = []

        for root, _, files in walk(settings.WHISPER_DIR):
          root = root.replace(settings.WHISPER_DIR, '')
          for base_name in files:
            if fnmatch.fnmatch(base_name, '*.wsp'):
              match = join(root, base_name).replace('.wsp', '').replace('/', '.').lstrip('.')
              bisect.insort_left(matches, match)

        # unlike 0.9.x, we're going to use os.walk with followlinks
        # since we require Python 2.7 and newer that supports it
        if RRDReader.supported:
          for root, _, files in walk(settings.RRD_DIR, followlinks=True):
            root = root.replace(settings.RRD_DIR, '')
            for base_name in files:
              if fnmatch.fnmatch(base_name, '*.rrd'):
                absolute_path = join(settings.RRD_DIR, root, base_name)
                base_name = splitext(base_name)[0]
                metric_path = join(root, base_name)
                rrd = RRDReader(absolute_path, metric_path)
                for datasource_name in rrd.get_datasources(absolute_path):
                  match = join(metric_path, datasource_name).replace('.rrd', '').replace('/', '.').lstrip('.')
                  if match not in matches:
                    bisect.insort_left(matches, match)

        return matches
Example #28
0
File: Util.py Project: pjz/TMDAng
def findmatch(list, addrs):
    """Determine whether any of the passed e-mail addresses match a
    Unix shell-style wildcard pattern contained in list.  The
    comparison is case-insensitive.  Also, return the second half of
    the string if it exists (for exp and ext addresses only)."""
    for address in addrs:
        if address:
            address = address.lower()
            for p in list:
                stringparts = p.split()
                p = stringparts[0]
                # Handle special @=domain.dom syntax.
                try:
                    at = p.rindex('@')
                    atequals = p[at+1] == '='
                except (ValueError, IndexError):
                    atequals = None
                if atequals:
                    p1 = p[:at+1] + p[at+2:]
                    p2 = p[:at+1] + '*.' + p[at+2:]
                    match = (fnmatch.fnmatch(address,p1)
                             or fnmatch.fnmatch(address,p2))
                else:
                    match = fnmatch.fnmatch(address,p)
                if match:
                    try:
                        return stringparts[1]
                    except IndexError:
                        return 1
    def get_obs(self, bins, excludebin=None):

        if isinstance(bins, basestring):
            bins = [bins]
        if isinstance(excludebin, basestring):
            excludebin = [excludebin]

        matching_bins = set([])

        for binpattern in bins:
            for realbin in self.card.exp.keys():
                # First check if we explicitly exclude it
                excluded = False
                if excludebin is not None:
                    for excludepattern in excludebin:
                        if fnmatch.fnmatch(realbin, excludepattern):
                            excluded = True
                            break
                if excluded:
                    continue
                # otherwise check if it matches a desired bin
                if fnmatch.fnmatch(realbin, binpattern) or fnmatch.fnmatch(realbin, 'bin' + binpattern):
                    matching_bins.add(realbin)

        obs = 0
        for bin in matching_bins:
            obs += self.card.obs[bin]
        return int(obs)
Example #30
0
def filter_out(strings, patterns):
    '''Filter out any string that matches any of the specified patterns.'''
    for s in strings:
        if all(not fnmatch.fnmatch(s, p) for p in patterns):
            yield s
Example #31
0
def vcftools():
    directories = args.pop1 + '/*/Bcftools/' + filename
    file_list = glob.glob(directories)
    for f in file_list:
        cmd1 = ['bcftools', 'index', '-c', '-f', f]
        process1 = subprocess.Popen(cmd1, \
         stdout=subprocess.PIPE)
        while process1.wait() is None:
            pass
        process1.stdout.close()

    directories1 = args.pop2 + '/*/Bcftools/' + filename
    file_list1 = glob.glob(directories1)
    for f1 in file_list1:
        cmd_1 = ['bcftools', 'index', '-c', '-f', f1]
        process_1 = subprocess.Popen(cmd_1, \
         stdout=subprocess.PIPE)
        while process_1.wait() is None:
            pass
        process_1.stdout.close()

    # Make directory for the merged vcf-files
    # for population1 and population2.
    if not os.path.exists('Populations'):
        os.makedirs('Populations')

    # Making a list of vcf-files that will be input to bcftools merge
    # and then merge population1.
    directories2 = args.pop1 + '/*/Bcftools/' + filename
    name_list1 = glob.glob(directories2)
    myfile = open("name_1_list.txt", "w")
    for n1 in name_list1:
        myfile.write(add + "%s\n" % n1)

    myfile.close()
    cmd2 = ['bcftools', 'merge', \
     '-l', add+names1, \
     '-Oz', '-o', merged_vcf_pop1]
    process2 = subprocess.Popen(cmd2, \
     stdout=subprocess.PIPE, \
     cwd='Populations')
    while process2.wait() is None:
        pass
    process2.stdout.close()

    # Making a list of vcf-files that will be input to bcftools merge
    # and then merge population2.
    directories3 = args.pop2 + '/*/Bcftools/' + filename
    name_list2 = glob.glob(directories3)
    myfile2 = open("name_2_list.txt", "w")
    for n2 in name_list2:
        myfile2.write(add + "%s\n" % n2)

    myfile2.close()
    cmd3 = ['bcftools', 'merge', \
     '-l', add+names2, \
     '-Oz', '-o', merged_vcf_pop2]
    process3 = subprocess.Popen(cmd3, \
     stdout=subprocess.PIPE, \
     cwd='Populations')
    while process3.wait() is None:
        pass
    process3.stdout.close()

    # Making a txt file of the names of the individuals in the populations
    # that is needed for vcftools --wei-fst-pop and indexing
    # the merged files for population1 and population2.
    for file in os.listdir('Populations'):
        if fnmatch.fnmatch(file, '*_merged_pop1.vcf.gz'):
            cmd4 = ['bcftools', 'index', '-c', '-f', merged_vcf_pop1]
            process4 = subprocess.Popen(cmd4, \
             stdout=subprocess.PIPE, \
             cwd='Populations')
            while process4.wait() is None:
                pass
            process4.stdout.close()

            cmd5 = ('bcftools query -l %s > %s') \
             % (merged_vcf_pop1, indv_txt_pop1)
            process5 = subprocess.Popen(cmd5, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Populations')
            while process5.wait() is None:
                pass
            process5.stdout.close()

        elif fnmatch.fnmatch(file, '*_merged_pop2.vcf.gz'):
            cmd6 = ['bcftools', 'index', '-c', '-f', merged_vcf_pop2]
            process6 = subprocess.Popen(cmd6, \
             stdout=subprocess.PIPE, \
             cwd='Populations')
            while process6.wait() is None:
                pass
            process6.stdout.close()

            cmd7 = ('bcftools query -l %s > %s') \
             % (merged_vcf_pop2, indv_txt_pop2)
            process7 = subprocess.Popen(cmd7, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Populations')
            while process7.wait() is None:
                pass
            process7.stdout.close()

    # Making a list of vcf-files that will be input to bcftools merge
    # and then merge population1 and population2 to an "all_merged"
    # vcf file, this file will be the input file to
    # vcftools --weir-fst-pop.
    directories4 = 'Populations/*_merged_*.vcf.gz'
    pop_list = glob.glob(directories4)
    myfile3 = open("pop_list.txt", "w")
    for p in pop_list:
        myfile3.write(add + "%s\n" % p)

    myfile3.close()
    cmd8 = ['bcftools', 'merge', \
     '-l', add+population_list, \
     '-Oz', '-o', all_pop_merged]
    process8 = subprocess.Popen(cmd8, \
     stdout=subprocess.PIPE, \
     cwd='Populations')
    while process8.wait() is None:
        pass
    process8.stdout.close()
Example #32
0
def build_site_packages():
    """Use PEX to resolve dependencies in a virtual environment,
    with some customizations to reduce the size of our build.

    https://www.pantsbuild.org/pex.html
    """
    # Remove flywheel_cli from cache
    # If you skip this step, it doesn't automatically update the python code
    if os.path.isdir(PEX_BUILD_CACHE_DIR):
        for name in os.listdir(PEX_BUILD_CACHE_DIR):
            if fnmatch.fnmatch(name, 'flywheel_cli*.whl'):
                path = os.path.join(PEX_BUILD_CACHE_DIR, name)
                print('Removing {} from cache...'.format(name))
                os.remove(path)

    # Read ignore list
    # See package-ignore.txt, largely we're removing test files and
    # Multi-megabyte dicoms from the dicom folder
    ignore_patterns = read_ignore_patterns()

    # Create resolver
    # Loosely based on: https://github.com/pantsbuild/pex/blob/982cb9a988949ffff3348b9bca98ae72a0bf8847/pex/bin/pex.py#L577
    resolver_option_builder = ResolverOptionsBuilder()
    resolvables = [
        Resolvable.get('flywheel-cli=={}'.format(PYTHON_CLI_VERSION),
                       resolver_option_builder)
    ]
    resolver = CachingResolver(PEX_BUILD_CACHE_DIR, None)

    # Effectively we resolve (possibly from cache) The source and all of the dependency packages
    # Then create the virtual environment, which contains those files
    print('Resolving distributions')
    resolved = resolver.resolve(resolvables)

    print('Building package lists')
    builder = PEXBuilder()
    for dist in resolved:
        builder.add_distribution(dist)
        builder.add_requirement(dist.as_requirement())

    # After this point, builder.chroot contains a full list of the files
    print('Compiling package')
    builder.freeze(bytecode_compile=False)

    site_packages_path = os.path.join(BUILD_DIR, 'site-packages.zip')

    # Create an uncompressed site-packages.zip and add all of the discovered files
    # (Except those that are filtered out)
    with open(site_packages_path, 'wb') as f:
        added_files = set()
        with zipfile.ZipFile(f, 'w') as zf:
            for filename in sorted(builder.chroot().files()):
                if is_ignored_file(ignore_patterns, filename):
                    continue

                if not filename.startswith('.deps'):
                    continue

                # Determine new path
                src_path = os.path.join(builder.chroot().chroot, filename)
                dst_path = '/'.join(filename.split('/')[2:])

                # Optionally, compile the file
                _, ext = os.path.splitext(src_path)
                if ext == '.py':
                    cfile_path = src_path + 'c'
                    dst_path += 'c'

                    print('Compiling: {}'.format(dst_path))
                    py_compile.compile(src_path,
                                       cfile=cfile_path,
                                       dfile=dst_path,
                                       optimize=1)
                    src_path = cfile_path

                if not dst_path in added_files:
                    zf.write(src_path, dst_path)
                    added_files.add(dst_path)

    return site_packages_path
Example #33
0
def wildcard_match(word, wildcard):
    return word and fnmatch.fnmatch(word, wildcard)
Example #34
0
def main():
    try:

        usage = "[>] ./ioshook %prog [options] arg\n\n\r[>] Example for spawn or attach app with -s(--script) options:\n./ioshook -p com.apple.AppStore / [-n 'App Store'] -s trace_class.js\n\n\r[>] Example for spawn or attach app with -m(--method) options:\n./ioshook -p com.apple.AppStore / [-n 'App Store'] -m app-static\n\n\r[>] Example dump decrypt ipa with -d(--dump) and -o(--output) options:\n./ioshook -p com.apple.AppStore / [-n 'App Store'] -d -o App_dump_name\n\n\r[>] Example dump memory of application with --dump-memory and -s(--string) options:\n./ioshook -n 'App Store' --dump-memory '-s(--string)'\n\n\r[>] Example Scan IPA with file task:\n./ioshook --hexbyte-scan 'scan AppStore.ipa' -t /hexbyscan-tasks/openssl_hook.json"
        parser = optparse.OptionParser(usage, add_help_option=False)
        info = optparse.OptionGroup(parser, "Information")
        quick = optparse.OptionGroup(parser, "Quick Method")
        dump = optparse.OptionGroup(parser, "Dump decrypt IPA")
        hexscan = optparse.OptionGroup(parser, "HexByte Scan IPA")
        dumpmemory = optparse.OptionGroup(parser, "Dump memory of Application")

        parser.add_option('-h',
                          "--help",
                          action="help",
                          dest="help",
                          help="Show basic help message and exit")
        #Using options -p(--package) for spawn application and load script
        parser.add_option("-p",
                          "--package",
                          dest="package",
                          help="Identifier of the target app",
                          metavar="PACKAGE",
                          action="store",
                          type="string")
        #Using options -n(--name) for attach script to application is running
        parser.add_option("-n",
                          "--name",
                          dest="name",
                          help="Name of the target app",
                          metavar="NAME",
                          action="store",
                          type="string")

        parser.add_option("-s",
                          "--script",
                          dest="script",
                          help="Frida Script Hooking",
                          metavar="SCIPRT.JS")

        parser.add_option("-c",
                          "--check-version",
                          action="store_true",
                          help="Check iOS hook for the newest version",
                          dest="checkversion")
        parser.add_option("-u",
                          "--update",
                          action="store_true",
                          help="Update iOS hook to the newest version",
                          dest="update")

        quick.add_option(
            "-m",
            "--method",
            dest="method",
            type="choice",
            choices=[
                'app-static', 'bypass-jb', 'bypass-ssl', 'i-url-req',
                'i-crypto'
            ],
            help=
            "__app-static: Static Ananlysis Application(-n)\n\n\r\r__bypass-jb: Bypass Jailbreak Detection(-p)\n\n\r\r\r\r\r\r__bypass-ssl: Bypass SSL Pinning(-p)\n\n\n\n\n\n\n\n\n\r\r\r\r\r\r__i-url-req: Intercept URLRequest in App(-n)\n\n\n\n\n\n\n\n\n\r\r\r\r\r\r__i-crypto: Intercept Crypto in App(-p)",
            metavar="app-static / bypass-jb / bypass-ssl / i-url-req / i-crypto"
        )
        #Some options to get info from device and applications
        info.add_option("--list-devices",
                        action="store_true",
                        help="List All Devices",
                        dest="listdevices")
        #Listapp option using the code of the AloneMonkey's repo frida-ios-dump - Link: https://github.com/AloneMonkey/frida-ios-dump
        info.add_option("--list-apps",
                        action="store_true",
                        help="List The Installed apps",
                        dest="listapps")
        info.add_option("--list-appinfo",
                        action="store_true",
                        help="List Info of Apps on Itunes",
                        dest="listappinfo")
        info.add_option("--list-scripts",
                        action="store_true",
                        help="List All Scripts",
                        dest="listscripts")
        #Dump decrypt IPA using the code of the AloneMonkey's repo frida-ios-dump - Link: https://github.com/AloneMonkey/frida-ios-dump
        dump.add_option("-d",
                        "--dump",
                        action="store_true",
                        help="Dump decrypt application.ipa",
                        dest="dumpapp")
        dump.add_option("-o",
                        "--output",
                        action="store",
                        dest="output_ipa",
                        help="Specify name of the decrypted IPA",
                        metavar="OUTPUT_IPA",
                        type="string")

        #Dump memory of application using the code of Nightbringer21's repo fridump - Link: https://github.com/Nightbringer21/fridump
        dumpmemory.add_option("--dump-memory",
                              action="store",
                              help="Dump memory of application",
                              dest="dumpmemory")

        #Hexbytescan of application using the code of karek314's repo hexbytescanner - Link: https://github.com/karek314/hexbytescanner
        hexscan.add_option("--hexbyte-scan",
                           action="store",
                           help="Scan or Patch IPA with byte patterns",
                           dest="hexscan")
        hexscan.add_option("-t",
                           "--task",
                           action="store",
                           help="Task for hexbytescan",
                           dest="task")

        parser.add_option_group(dump)
        parser.add_option_group(dumpmemory)
        parser.add_option_group(hexscan)
        parser.add_option_group(info)
        parser.add_option_group(quick)

        options, args = parser.parse_args()

        methods = [
            "methods/ios_list_apps.js",  #0
            "methods/static_analysis.js",  #1
            "methods/bypass_ssl.js",  #2
            "methods/bypass_jailbreak.js",  #3
            "methods/intercept_url_request.js",  #4
            "methods/intercept_crypto.js",  #5
            "methods/dump.js"  #6
        ]

        utils = [
            "core/utils/dump.py"  #0
        ]

        if options.listdevices:
            logger.info('[*] List All Devices: ')
            os.system('frida-ls-devices')

        elif options.listapps:
            logger.info('[*] List All Apps on Devies: ')
            device = get_usb_iphone()
            list_applications(device)

        elif options.listappinfo:
            method = methods[0]
            if os.path.isfile(method):
                logger.info('[*] List Info of Apps on Itunes: ')
                process = 'itunesstored'
                os.system('frida -U -n ' + process + ' -l ' + method)
                #sys.stdin.read()
            else:
                logger.error('[?] Script not found!')

        elif options.listscripts:
            path = 'frida-scripts/'
            if os.path.exists(path):
                logger.info('[*] List All Scripts: ')
                for file_name in os.listdir(path):
                    if fnmatch.fnmatch(file_name, '*.js'):
                        print('[*] ' + file_name)
            else:
                logger.error('[?] Path frida-script not exists!')

        #Spawning application and load script
        elif options.package and options.script:
            if os.path.isfile(options.script):
                logger.info('[*] Spawning: ' + options.package)
                logger.info('[*] Script: ' + options.script)
                time.sleep(2)
                pid = frida.get_usb_device().spawn(options.package)
                session = frida.get_usb_device().attach(pid)
                hook = open(options.script, 'r')
                script = session.create_script(hook.read())
                script.load()
                frida.get_usb_device().resume(pid)
                sys.stdin.read()
            else:
                logger.error('[?] Script not found!')

        #Spawning application and load script with output

        #Attaching script to application
        elif options.name and options.script:
            if os.path.isfile(options.script):
                logger.info('[*] Attaching: ' + options.name)
                logger.info('[*] Script: ' + options.script)
                time.sleep(2)
                process = frida.get_usb_device().attach(options.name)
                hook = open(options.script, 'r')
                script = process.create_script(hook.read())
                script.load()
                sys.stdin.read()
            else:
                logger.error('[?] Script not found!')

        #Static Analysis Application
        elif options.name and options.method == "app-static":
            method = methods[1]
            if os.path.isfile(method):
                logger.info('[*] Attaching: ' + options.name)
                logger.info('[*] Method: ' + options.method)
                time.sleep(2)
                process = frida.get_usb_device().attach(options.name)
                method = open(method, 'r')
                script = process.create_script(method.read())
                script.load()
                sys.stdin.read()
            else:
                logger.error('[?] Script not found!')

        #Bypass jailbreak
        elif options.package and options.method == "bypass-jb":
            method = methods[3]
            if os.path.isfile(method):
                logger.info('[*] Bypass Jailbreak: ')
                logger.info('[*] Spawning: ' + options.package)
                logger.info('[*] Script: ' + method)
                time.sleep(2)
                pid = frida.get_usb_device().spawn(options.package)
                session = frida.get_usb_device().attach(pid)
                hook = open(method, 'r')
                script = session.create_script(hook.read())
                script.load()
                frida.get_usb_device().resume(pid)
                sys.stdin.read()
            else:
                logger.error('[?] Script for method not found!')

        #Bypass SSL Pinning
        elif options.package and options.method == "bypass-ssl":
            method = methods[2]
            if os.path.isfile(method):
                logger.info('[*] Bypass SSL Pinning: ')
                logger.info('[*] Spawning: ' + options.package)
                logger.info('[*] Script: ' + method)
                os.system('frida -U -f ' + options.package + ' -l ' + method +
                          ' --no-pause')
                #sys.stdin.read()
            else:
                logger.error('[?] Script for method not found!')

        #Intercept url request in app
        elif options.name and options.method == "i-url-req":
            method = methods[4]
            if os.path.isfile(method):
                logger.info('[*] Intercept UrlRequest: ')
                logger.info('[*] Attaching: ' + options.name)
                logger.info('[*] Script: ' + method)
                time.sleep(2)
                process = frida.get_usb_device().attach(options.name)
                method = open(method, 'r')
                script = process.create_script(method.read())
                script.load()
                sys.stdin.read()
            else:
                logger.error('[?] Script for method not found!')

        #Intercept Crypto Operations
        elif options.package and options.method == "i-crypto":
            method = methods[5]
            if os.path.isfile(method):
                logger.info('[*] Intercept Crypto Operations: ')
                logger.info('[*] Spawning: ' + options.package)
                logger.info('[*] Script: ' + method)
                os.system('frida -U -f ' + options.package + ' -l ' + method +
                          ' --no-pause')
                #sys.stdin.read()
            else:
                logger.error('[?] Script for method not found!')

        #check newversion
        elif options.checkversion:
            logger.info('[*] Checking for updates...')
            is_newest = check_version(speak=True)
            # if not is_newest:
            #     logger.info('[*] There is an update available for iOS hook')

        #update newversion
        elif options.update:
            logger.info('[*] Update in progress...')
            cmd = shlex.split("git reset --hard & git pull origin master")
            subprocess.call(cmd)
            sys.exit(0)

        #dump decrypt application
        elif (options.package or options.name) and options.dumpapp:
            logger.info('[*] Dumping...')
            util = utils[0]
            if options.name is None:
                if options.output_ipa is None:
                    cmd = shlex.split("python3 " + util + " " +
                                      options.package)
                else:
                    cmd = shlex.split("python3 " + util + " " +
                                      options.package + " -o " +
                                      options.output_ipa)
            else:
                if options.output_ipa is None:
                    cmd = shlex.split("python3 " + util + " " + "'" +
                                      options.name + "'")
                else:
                    cmd = shlex.split("python3 " + util + " " + "'" +
                                      options.name + "'" + " -o " +
                                      options.output_ipa)
            subprocess.call(cmd)
            sys.exit(0)

        #dump memory application
        elif options.name and options.dumpmemory:
            dump_memory(options.dumpmemory, options.name)

        #hexbytescan ipa
        elif options.hexscan:
            hexbyte_scan(options.hexscan, options.task)

        else:
            logger.warning("[!] Specify the options. use (-h) for more help!")
            # sys.exit(0)

    #EXCEPTION FOR FRIDA
    except frida.ServerNotRunningError:
        logger.error("Frida server is not running.")
    except frida.TimedOutError:
        logger.error("Timed out while waiting for device to appear.")
    except frida.TransportError:
        logger.error("[x_x] The application may crash or lose connection.")
    except (frida.ProcessNotFoundError, frida.InvalidOperationError):
        logger.error("[x_x] Unable to find process with name " + options.name +
                     ". You need run app first.!!")
    #EXCEPTION FOR OPTIONPARSING

    #EXCEPTION FOR SYSTEM
    except Exception as e:
        logger.error(
            "[x_x] Something went wrong, please check your error message.\n Message - {0}"
            .format(e))

    except KeyboardInterrupt:
        logger.info("Bye bro!!")
Example #35
0
    return self.distribution.headers or []

  def get_outputs(self):
    return self.outfiles


def find_files(pattern, root):
  """Return all the files matching pattern below root dir."""
  for dirpath, _, files in os.walk(root):
    for filename in fnmatch.filter(files, pattern):
      yield os.path.join(dirpath, filename)


so_lib_paths = [
    i for i in os.listdir('.')
    if os.path.isdir(i) and fnmatch.fnmatch(i, '_solib_*')
]

matches = []
for path in so_lib_paths:
  matches.extend(
      ['../' + x for x in find_files('*', path) if '.py' not in x]
  )

if os.name == 'nt':
  EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.pyd'
else:
  EXTENSION_NAME = 'python/_pywrap_tensorflow_internal.so'

headers = (
    list(find_files('*.h', 'tensorflow/core')) + list(
Example #36
0
def handle_request(proxied_request):
    """Proxy the given request to the URL in the Forward-Host header with an
    Authorization header set using an OIDC bearer token for the Cloud
    Function's service account. If the header is not present, return a 400
    error.
    """

    host = TARGET_HOST

    scheme = proxied_request.headers.get('X-Forwarded-Proto', 'https')
    url = '{}://{}{}'.format(scheme, host, proxied_request.path)
    headers = dict(proxied_request.headers)

    # Check path against whitelist.
    path = proxied_request.path
    if not path:
        path = '/'

    if '*' not in _whitelist:
        _is_match = False
        for entry in _whitelist:
            if fnmatch.fnmatch(path, entry):
                _is_match = True
                break
        if not _is_match:
            logging.warn('Rejected {} {}, not in whitelist'.format(
                proxied_request.method, url))
            return 'Requested path {} not in whitelist'.format(path), 403

    global _oidc_token
    if not _oidc_token or _oidc_token.is_expired():
        _oidc_token = _get_google_oidc_token()
        logging.info('Renewed OIDC bearer token for {}'.format(
            _adc_credentials.service_account_email))

    # Add the Authorization header with the OIDC token.
    headers['Authorization'] = 'Bearer {}'.format(_oidc_token)

    webhook_body = proxied_request.data
    gitlab_secret = proxied_request.headers.get(SECRET_HEADER, '')
    if SECRET_KEY:
        try:
            _body = json.loads(webhook_body)
            # Add the Gitlab secret token into the request body
            _body[SECRET_KEY] = gitlab_secret
            webhook_body = json.dumps(_body)
        except json.JSONDecodeError as e:
            return 'Failed to decode webhook body', 500

    print('(%s)' % webhook_body)
    # We don't want to forward the Host header.
    headers.pop('Host', None)
    request = Request(proxied_request.method,
                      url,
                      headers=headers,
                      data=webhook_body)

    # Send the proxied request.
    prepped = request.prepare()
    logging.info('{} {}'.format(prepped.method, prepped.url))
    resp = _session.send(prepped)

    # Strip hop-by-hop headers and Content-Encoding.
    headers = _strip_hop_by_hop_headers(resp.headers)
    headers.pop('Content-Encoding', None)

    return resp.content, resp.status_code, headers.items()
Example #37
0
def sliding_window():
    # If using sliding window.
    for file in os.listdir('Populations'):
        if fnmatch.fnmatch(file, 'all_pop_merged.vcf.gz'):
            cmda = ['vcftools', \
             '--gzvcf', all_pop_merged, \
             '--weir-fst-pop', indv_txt_pop1, \
             '--weir-fst-pop', indv_txt_pop2, \
             '--fst-window-size', args.window, \
             '--fst-window-step', args.step, \
             '--out', fst_out]
            processa = subprocess.Popen(cmda, \
             stdout=subprocess.PIPE, \
             cwd='Populations')
            while processa.wait() is None:
                pass
            processa.stdout.close()

    for file in os.listdir('Populations'):
        if fnmatch.fnmatch(file, '*.weir.fst'):
            cmdb = ('cat %s | grep -v "nan" > %s') \
             % (fst_out_window_in, fst_out_flt)
            processb = subprocess.Popen(cmdb, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processb.wait() is None:
                pass
            processb.stdout.close()

    # Removing the results below zero.
    for file in os.listdir('Fst_stats'):
        if fnmatch.fnmatch(file, '*flt.table'):
            cmdc = ("awk '{if ($6 >0) print}' %s > %s") \
             % (fst_out_flt, fst_out_flt_results)
            processc = subprocess.Popen(cmdc, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processc.wait() is None:
                pass
            processc.stdout.close()
            # Calculating the midpoint position when using sliding window,
            # column (2+3)/2 and the output in column 2.
            cmde = ('''awk '{a=int(($2+$3)/2); $2=a; print}' %s > %s''') \
             % (fst_out_flt_results, fst_out_flt2_results)
            processe = subprocess.Popen(cmde, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processe.wait() is None:
                pass
            processe.stdout.close()

            # Rearrange columns (if needed) and keep midpoint value in column 2.
            cmdd = ('''awk '{print $1 "\\t" $2 "\\t" $6}' %s > %s''') \
             % (fst_out_flt2_results, fst_calculated)
            processd = subprocess.Popen(cmdd, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processd.wait() is None:
                pass
            processd.stdout.close()

            # Remove the header provided when using sliding window.
            cmdf = ('echo "$(tail -n +2 %s)" > %s') \
             % (fst_calculated, rm_headers)
            processf = subprocess.Popen(cmdf, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processf.wait() is None:
                pass
            processf.stdout.close()

            # Add headers.
            cmdg = ('echo -e "CHROM\\tPOS\\tWEIR_AND_COCKERHAM_FST" \
				| cat - %s > %s'                             ) \
             % (rm_headers, fst_headers)
            processg = subprocess.Popen(cmdg, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processg.wait() is None:
                pass
            processg.stdout.close()

            # Sorting the POS column (needed for x-axis in highcharts).
            cmdi = ("cat %s | sort -n > %s") \
             % (fst_headers, fst_results_sorted)
            processi = subprocess.Popen(cmdi, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processi.wait() is None:
                pass
            processi.stdout.close()

            # Making a csv file.
            cmdj = ('cat %s | tr "\\t" ","  > %s') \
             % (fst_results_sorted, fst_results_sorted_csv)
            processj = subprocess.Popen(cmdj, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while processj.wait() is None:
                pass
            processj.stdout.close()
Example #38
0
    except subprocess.TimeoutExpired:
        stats.timeout += 1
        fail("Timeout")
        continue
    except subprocess.CalledProcessError:
        stats.crashed += 1
        fail("Crashed")
        continue

    if "expect_sha1" in cmd:
        actual = hashlib.sha1(output).hexdigest()
        if actual != cmd['expect_sha1']:
            fail(f"Actual sha1: {actual}")

    if "expect_pattern" in cmd:
        if not fnmatch.fnmatch(output.decode("utf-8"), cmd['expect_pattern']):
            fail(f"Output does not match pattern")

    print()

pprint(stats)

if stats.failed:
    print(f"{ansi.FAIL}=======================")
    print(f" FAILED: {stats.failed}/{stats.total_run}")
    print(f"======================={ansi.ENDC}")
    sys.exit(1)

else:
    print(f"{ansi.OKGREEN}=======================")
    print(f" All {stats.total_run} tests OK")
Example #39
0
def plot():
    for file in os.listdir('Fst_stats'):
        if fnmatch.fnmatch(file, 'pop1_pop2_flt_results_sorted.csv'):
            # Import csv file with Fst results.
            gl = pd.read_csv('Fst_stats/pop1_pop2_flt_results_sorted.csv')

            # Optimize memory usage.
            gl_int = gl.select_dtypes(include=['int'])
            converted_int = gl_int.apply(pd.to_numeric, downcast='unsigned')
            gl_float = gl.select_dtypes(include=['float'])
            converted_float = gl_float.apply(pd.to_numeric, downcast='float')
            optimized_gl = gl.copy()
            optimized_gl[converted_int.columns] = converted_int
            optimized_gl[converted_float.columns] = converted_float

            # Convert CHROM column from object to category.
            gl_obj = gl.select_dtypes(include=['object']).copy()
            chrom = gl_obj.CHROM
            chrom_cat = chrom.astype('category')
            converted_obj = pd.DataFrame()

            # If unique values are more than 50% of the data do not
            # convert to category, it will not optimize memory usage.
            for col in gl_obj.columns:
                num_unique_values = len(gl_obj[col].unique())
                num_total_values = len(gl_obj[col])
                if num_unique_values / num_total_values < 0.5:
                    converted_obj.loc[:, col] = gl_obj[col].astype('category')
                else:
                    converted_obj.loc[:, col] = gl_obj[col]

            # Apply on the csv file.
            optimized_gl[converted_obj.columns] = converted_obj
            dtypes_col = optimized_gl.dtypes.index
            dtypes_type = [i.name for i in optimized_gl.dtypes.values]
            column_types = dict(zip(dtypes_col, dtypes_type))
            read_and_optimized = pd.read_csv('Fst_stats/pop1_pop2_flt_results_sorted.csv', \
                 dtype=column_types)

            # Rename the read and optimized csv file
            # from the Fst analysis to "df".
            df = read_and_optimized
            df['code'] = chrom_cat.cat.codes

            df['ind'] = range(len(df))
            df_grouped = df.groupby(('code'))

            # Dict for the contig names and index number.
            names = dict(enumerate(df['CHROM'].cat.categories))

            # Make plot of data.
            fig = plt.figure(figsize=(80, 20))
            ax = fig.add_subplot(111)
            colors = ['green', 'turquoise', \
             'blue', 'purple', \
             'red', 'orange', \
             'yellow']
            x_labels = []
            x_labels_pos = []
            for num, (name, group) in enumerate(df_grouped):
                group.plot(kind='scatter', x='ind', y='WEIR_AND_COCKERHAM_FST', \
                color=colors[num % len(colors)], ax=ax)
                x_labels.append(name)
                x_labels_pos.append((group['ind'].iloc[-1] \
                - (group['ind'].iloc[-1] - group['ind'].iloc[0])/2))
                ax.set_xticks(x_labels_pos)
                ax.set_xticklabels(x_labels, rotation='vertical', fontsize=10)
                ax.set_xlim([0, len(df)])
                ax.set_ylim([0, 1])
                ax.set_xlabel('contigs', fontsize=24)
                ax.set_ylabel('Fst value', fontsize=24)
                ax.set_title('Weir and Cockerham Fst', fontsize=40)
                plt.tick_params(axis='x', length=0.01)

            # Add legend with key values paired with the name of the contig.
            legend_list = []
            for key, value in names.items():
                temp = [key, value]
                legend_list.append(temp)

            plt.legend(legend_list,bbox_to_anchor=(1.01, 1), \
               ncol=5, \
               borderaxespad=0)
            plt.tight_layout(pad=7)

            # Save plot as pdf.
            plt.savefig("Fst_stats/Fst_plot_vcftools.pdf")
Example #40
0
def get_files_recursive(dir_name, exclude_mask=''):
    for root, dirs, files in os.walk(dir_name):
        for _file in files:
            if not fnmatch.fnmatch(_file, exclude_mask):
                yield os.path.join(root, _file)
Example #41
0
def fst():
    # Fst_statistics using vcftools --weir-fst-pop, input files are a
    # vcf file with all merged populations one txt file with names
    # of the individuals from population1 and one txt file with
    # names of the individulas from population2, output is a table
    # of Fst values and a log file of the results.
    for file in os.listdir('Populations'):
        if fnmatch.fnmatch(file, 'all_pop_merged.vcf.gz'):
            cmd9 = ['vcftools', \
             '--gzvcf', all_pop_merged, \
             '--weir-fst-pop', indv_txt_pop1, \
             '--weir-fst-pop', indv_txt_pop2, \
             '--out', fst_out]
            process9 = subprocess.Popen(cmd9, \
             stdout=subprocess.PIPE, \
             cwd='Populations')
            while process9.wait() is None:
                pass
            process9.stdout.close()

    # Filtering the resulting files from vcftools and making a new
    # directory called 'Fst_stats' with the resulting files,
    # output is a csv file and a tab separated table, the csv file
    # will be the input file to pandas, matplotlib and highcharts.
    for file in os.listdir('Populations'):
        if fnmatch.fnmatch(file, '*.weir.fst'):
            cmd10 = ('cat %s | grep -v "nan" > %s') \
             % (fst_out_in, fst_out_flt)
            process10 = subprocess.Popen(cmd10, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while process10.wait() is None:
                pass
            process10.stdout.close()

    # Removing the results below zero.
    for file in os.listdir('Fst_stats'):
        if fnmatch.fnmatch(file, '*flt.table'):
            cmd11 = ("awk '{if ($3 >0) print}' %s > %s") \
             % (fst_out_flt, fst_out_flt_results)
            process11 = subprocess.Popen(cmd11, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while process11.wait() is None:
                pass
            process11.stdout.close()

            # Rearrange columns (if needed).
            cmd12 = ('''awk '{print $1 "\\t" $2 "\\t" $3}' %s > %s''') \
            % (fst_out_flt_results, fst_out_flt2_results)
            process12 = subprocess.Popen(cmd12, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while process12.wait() is None:
                pass
            process12.stdout.close()

            # Sorting the POS column (needed for x-axis in highcharts).
            cmd13 = ("cat %s | sort -n > %s") \
             % (fst_out_flt2_results, fst_results_sorted)
            process13 = subprocess.Popen(cmd13, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while process13.wait() is None:
                pass
            process13.stdout.close()

            # Making a csv file.
            cmd14 = ('cat %s | tr "\\t" ","  > %s') \
             % (fst_results_sorted, fst_results_sorted_csv)
            process14 = subprocess.Popen(cmd14, \
             stdout=subprocess.PIPE, \
             shell=True, \
             cwd='Fst_stats')
            while process14.wait() is None:
                pass
            process14.stdout.close()
Example #42
0
        print("Successfully sent email")
    except Exception:
        print("Error: unable to send email")


walz_failed_files = []
print("Processing...")
if not os.path.exists(corrupt_archive_path):
    os.makedirs(corrupt_archive_path)
if not os.path.exists(pipe_delimiter_archive_path):
    os.makedirs(pipe_delimiter_archive_path)
if not os.path.exists(duplicates_archive_path):
    os.makedirs(duplicates_archive_path)

for f in os.listdir(search_dir):
    if fnmatch.fnmatch(f, '*.zip'):
        single_file = {}
        single_file['Filename'] = f
        single_file['Reason'] = ''
        single_file['Content'] = ''
        walz_failed_files.append(single_file)
        del (single_file)

print("No. of failed files:", len(walz_failed_files))

#Search reason from outlook
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
search_folder = "App Trace"
outlook = outlook.Folders

#Fetch messages from outlook
Example #43
0
def is_compressable(filename, gzip_globs):
    """
    Determine if a filename is a gzippable type
    by comparing to a known list.
    """
    return any([fnmatch(filename, glob) for glob in gzip_globs])
Example #44
0
#!/usr/bin/python2
#
# This is a simple script that we use to check for files in git
# and not in the distribution. It was previously written in cinnamon
# and inlined in the Makefile.am, but 'git ls-files --exclude=<pattern>'
# was changed to no longer do anything useful, which made that
# too challenging to be worthwhile.

import fnmatch, os, subprocess, sys

srcdir=sys.argv[1]
distdir=sys.argv[2]
excludes=sys.argv[3:]

os.chdir(srcdir)

status=0
for f in subprocess.Popen(["git", "ls-files"], stdout=subprocess.PIPE).stdout:
    f = f.strip()
    if (not os.path.exists(os.path.join(distdir, f)) and
            not any((fnmatch.fnmatch(f, p) for p in excludes))):
        print "File missing from distribution:", f
        status=1

sys.exit(status)
Example #45
0
def find_package_data():
    """
    Find package_data.
    """
    # This is not enough for these things to appear in an sdist.
    # We need to muck with the MANIFEST to get this to work

    # exclude components and less from the walk;
    # we will build the components separately
    excludes = [
        pjoin('static', 'components'),
        pjoin('static', '*', 'less'),
        pjoin('static', '*', 'node_modules')
    ]

    # walk notebook resources:
    cwd = os.getcwd()
    os.chdir('notebook')
    static_data = []
    for parent, dirs, files in os.walk('static'):
        if any(fnmatch(parent, pat) for pat in excludes):
            # prevent descending into subdirs
            dirs[:] = []
            continue
        for f in files:
            static_data.append(pjoin(parent, f))

    # for verification purposes, explicitly add main.min.js
    # so that installation will fail if they are missing
    for app in ['auth', 'edit', 'notebook', 'terminal', 'tree']:
        static_data.append(pjoin('static', app, 'js', 'main.min.js'))

    components = pjoin("static", "components")
    # select the components we actually need to install
    # (there are lots of resources we bundle for sdist-reasons that we don't actually use)
    static_data.extend([
        pjoin(components, "backbone", "backbone-min.js"),
        pjoin(components, "bootstrap", "js", "bootstrap.min.js"),
        pjoin(components, "bootstrap-tour", "build", "css",
              "bootstrap-tour.min.css"),
        pjoin(components, "bootstrap-tour", "build", "js",
              "bootstrap-tour.min.js"),
        pjoin(components, "font-awesome", "css", "*.css"),
        pjoin(components, "es6-promise", "*.js"),
        pjoin(components, "font-awesome", "fonts", "*.*"),
        pjoin(components, "google-caja", "html-css-sanitizer-minified.js"),
        pjoin(components, "jed", "jed.js"),
        pjoin(components, "jquery", "jquery.min.js"),
        pjoin(components, "jquery-typeahead", "dist",
              "jquery.typeahead.min.js"),
        pjoin(components, "jquery-typeahead", "dist",
              "jquery.typeahead.min.css"),
        pjoin(components, "jquery-ui", "ui", "minified", "jquery-ui.min.js"),
        pjoin(components, "jquery-ui", "themes", "smoothness",
              "jquery-ui.min.css"),
        pjoin(components, "jquery-ui", "themes", "smoothness", "images", "*"),
        pjoin(components, "marked", "lib", "marked.js"),
        pjoin(components, "preact", "index.js"),
        pjoin(components, "preact-compat", "index.js"),
        pjoin(components, "proptypes", "index.js"),
        pjoin(components, "requirejs", "require.js"),
        pjoin(components, "requirejs-plugins", "src", "json.js"),
        pjoin(components, "requirejs-text", "text.js"),
        pjoin(components, "underscore", "underscore-min.js"),
        pjoin(components, "moment", "moment.js"),
        pjoin(components, "moment", "min", "*.js"),
        pjoin(components, "xterm.js", "dist", "xterm.js"),
        pjoin(components, "xterm.js", "dist", "xterm.css"),
        pjoin(components, "text-encoding", "lib", "encoding.js"),
    ])

    # Ship all of Codemirror's CSS and JS
    for parent, dirs, files in os.walk(pjoin(components, 'codemirror')):
        for f in files:
            if f.endswith(('.js', '.css')):
                static_data.append(pjoin(parent, f))

    # Trim mathjax
    mj = lambda *path: pjoin(components, 'MathJax', *path)
    static_data.extend([
        mj('MathJax.js'),
        mj('config', 'TeX-AMS-MML_HTMLorMML-full.js'),
        mj('config', 'Safe.js'),
    ])

    trees = []
    mj_out = mj('jax', 'output')

    if os.path.exists(mj_out):
        for output in os.listdir(mj_out):
            path = pjoin(mj_out, output)
            static_data.append(pjoin(path, '*.js'))
            autoload = pjoin(path, 'autoload')
            if os.path.isdir(autoload):
                trees.append(autoload)

    for tree in trees + [
            mj('localization'),  # limit to en?
            mj('fonts', 'HTML-CSS', 'STIX-Web', 'woff'),
            mj('extensions'),
            mj('jax', 'input', 'TeX'),
            mj('jax', 'output', 'HTML-CSS', 'fonts', 'STIX-Web'),
            mj('jax', 'output', 'SVG', 'fonts', 'STIX-Web'),
            mj('jax', 'element', 'mml'),
    ]:
        for parent, dirs, files in os.walk(tree):
            for f in files:
                static_data.append(pjoin(parent, f))

    os.chdir(os.path.join('tests', ))
    js_tests = glob('*.js') + glob('*/*.js')

    os.chdir(cwd)

    package_data = {
        'notebook': ['templates/*'] + static_data,
        'notebook.tests':
        js_tests,
        'notebook.bundler.tests':
        ['resources/*', 'resources/*/*', 'resources/*/*/.*'],
        'notebook.services.api': ['api.yaml'],
    }

    return package_data
Example #46
0
def search_file(pattern, f):
    """
    Function to searach a single file for a single search pattern.
    """

    fn_matched = False
    contents_matched = False

    # Use mimetypes to exclude binary files where possible
    (ftype, encoding) = mimetypes.guess_type(os.path.join(f['root'], f['fn']))
    if encoding is not None:
        return False
    if ftype is not None and ftype.startswith('image'):
        return False

    # Search pattern specific filesize limit
    if pattern.get('max_filesize') is not None and 'filesize' in f:
        if f['filesize'] > pattern.get('max_filesize'):
            return False

    # Search by file name (glob)
    if pattern.get('fn') is not None:
        if fnmatch.fnmatch(f['fn'], pattern['fn']):
            fn_matched = True
            if pattern.get('contents') is None and pattern.get(
                    'contents_re') is None:
                return True

    # Search by file name (regex)
    if pattern.get('fn_re') is not None:
        if re.match(pattern['fn_re'], f['fn']):
            fn_matched = True
            if pattern.get('contents') is None and pattern.get(
                    'contents_re') is None:
                return True

    # Search by file contents
    if pattern.get('contents') is not None or pattern.get(
            'contents_re') is not None:
        if pattern.get('contents_re') is not None:
            repattern = re.compile(pattern['contents_re'])
        try:
            with io.open(os.path.join(f['root'], f['fn']),
                         "r",
                         encoding='utf-8') as f:
                l = 1
                for line in f:
                    # Search by file contents (string)
                    if pattern.get('contents') is not None:
                        if pattern['contents'] in line:
                            contents_matched = True
                            if pattern.get('fn') is None and pattern.get(
                                    'fn_re') is None:
                                return True
                            break
                    # Search by file contents (regex)
                    elif pattern.get('contents_re') is not None:
                        if re.search(repattern, line):
                            contents_matched = True
                            if pattern.get('fn') is None and pattern.get(
                                    'fn_re') is None:
                                return True
                            break
                    # Break if we've searched enough lines for this pattern
                    if pattern.get(
                            'num_lines') and l >= pattern.get('num_lines'):
                        break
                    l += 1
        except (IOError, OSError, ValueError, UnicodeDecodeError):
            if config.report_readerrors:
                logger.debug(
                    "Couldn't read file when looking for output: {}".format(
                        f['fn']))
                return False

    return fn_matched and contents_matched
Example #47
0
def find_package_data():
    """
    Find IPython's package_data.
    """
    # This is not enough for these things to appear in an sdist.
    # We need to muck with the MANIFEST to get this to work

    # exclude components and less from the walk;
    # we will build the components separately
    excludes = [
        pjoin('static', 'components'),
        pjoin('static', '*', 'less'),
    ]

    # walk notebook resources:
    cwd = os.getcwd()
    os.chdir(os.path.join('IPython', 'html'))
    static_data = []
    for parent, dirs, files in os.walk('static'):
        if any(fnmatch(parent, pat) for pat in excludes):
            # prevent descending into subdirs
            dirs[:] = []
            continue
        for f in files:
            static_data.append(pjoin(parent, f))

    components = pjoin("static", "components")
    # select the components we actually need to install
    # (there are lots of resources we bundle for sdist-reasons that we don't actually use)
    static_data.extend([
        pjoin(components, "backbone", "backbone-min.js"),
        pjoin(components, "bootstrap", "bootstrap", "js", "bootstrap.min.js"),
        pjoin(components, "bootstrap-tour", "build", "css",
              "bootstrap-tour.min.css"),
        pjoin(components, "bootstrap-tour", "build", "js",
              "bootstrap-tour.min.js"),
        pjoin(components, "font-awesome", "font", "*.*"),
        pjoin(components, "google-caja", "html-css-sanitizer-minified.js"),
        pjoin(components, "highlight.js", "build", "highlight.pack.js"),
        pjoin(components, "jquery", "jquery.min.js"),
        pjoin(components, "jquery-ui", "ui", "minified", "jquery-ui.min.js"),
        pjoin(components, "jquery-ui", "themes", "smoothness",
              "jquery-ui.min.css"),
        pjoin(components, "marked", "lib", "marked.js"),
        pjoin(components, "requirejs", "require.js"),
        pjoin(components, "underscore", "underscore-min.js"),
    ])

    # Ship all of Codemirror's CSS and JS
    for parent, dirs, files in os.walk(pjoin(components, 'codemirror')):
        for f in files:
            if f.endswith(('.js', '.css')):
                static_data.append(pjoin(parent, f))

    os.chdir(os.path.join('tests', ))
    js_tests = glob('*.js') + glob('*/*.js')

    os.chdir(os.path.join(cwd, 'IPython', 'nbconvert'))
    nbconvert_templates = [
        os.path.join(dirpath, '*.*') for dirpath, _, _ in os.walk('templates')
    ]

    os.chdir(cwd)

    package_data = {
        'IPython.config.profile': ['README*', '*/*.py'],
        'IPython.core.tests': ['*.png', '*.jpg'],
        'IPython.lib.tests': ['*.wav'],
        'IPython.testing.plugin': ['*.txt'],
        'IPython.html': ['templates/*'] + static_data,
        'IPython.html.tests':
        js_tests,
        'IPython.qt.console': ['resources/icon/*.svg'],
        'IPython.nbconvert':
        nbconvert_templates + ['tests/files/*.*', 'exporters/tests/files/*.*'],
        'IPython.nbconvert.filters': ['marked.js'],
        'IPython.nbformat': ['tests/*.ipynb']
    }

    return package_data
Example #48
0
    print("----This is the layer info template ----")
    print(layer_info)
except:
    print("Unexpected error:", sys.exc_info()[0])
    

#-------------------------------------------------------
# Get the list of all available csv files in wide format
#-------------------------------------------------------

wide_files = []

listOfFiles = os.listdir(data_dir)  
pattern = '*_wide.csv'
for entry in listOfFiles:  
    if fnmatch.fnmatch(entry, pattern):
        wide_files.append(entry)
       
#============================================
# START PROCESSING EACH DATA SERIES
#============================================
        
# Set parameters:
        
property_update_only = False
update_symbology = True

wide_files = ['1.1.1-SI_POV_DAY1_wide.csv']
for i in range(len(wide_files)):

    #----------------------------------------
Example #49
0
def get_filelist(run_module_names):
    """
    Go through all supplied search directories and assembly a master
    list of files to search. Then fire search functions for each file.
    """
    # Prep search patterns
    spatterns = [{}, {}, {}, {}, {}, {}, {}]
    epatterns = [{}, {}]
    ignored_patterns = []
    for key, sps in config.sp.items():
        mod_name = key.split('/', 1)[0]
        if mod_name.lower() not in [m.lower() for m in run_module_names]:
            ignored_patterns.append(key)
            continue
        files[key] = list()
        if not isinstance(sps, list):
            sps = [sps]

        # Warn if we have any unrecognised search pattern keys
        expected_sp_keys = [
            'fn', 'fn_re', 'contents', 'contents_re', 'num_lines', 'shared',
            'max_filesize', 'exclude_fn', 'exclude_fn_re', 'exclude_contents',
            'exclude_contents_re'
        ]
        unrecognised_keys = [
            y for x in sps for y in x.keys() if y not in expected_sp_keys
        ]
        if len(unrecognised_keys) > 0:
            logger.warn("Unrecognised search pattern keys for '{}': {}".format(
                key, ', '.join(unrecognised_keys)))

        # Split search patterns according to speed of execution.
        if any([x for x in sps if 'contents_re' in x]):
            if any([x for x in sps if 'num_lines' in x]):
                spatterns[4][key] = sps
            elif any([x for x in sps if 'max_filesize' in x]):
                spatterns[5][key] = sps
            else:
                spatterns[6][key] = sps
        elif any([x for x in sps if 'contents' in x]):
            if any([x for x in sps if 'num_lines' in x]):
                spatterns[1][key] = sps
            elif any([x for x in sps if 'max_filesize' in x]):
                spatterns[2][key] = sps
            else:
                spatterns[3][key] = sps
        else:
            spatterns[0][key] = sps

    if len(ignored_patterns) > 0:
        logger.debug(
            "Ignored search patterns as didn't match running modules: {}".
            format(', '.join(ignored_patterns)))

    def add_file(fn, root):
        """
        Function applied to each file found when walking the analysis
        directories. Runs through all search patterns and returns True
        if a match is found.
        """
        f = {'fn': fn, 'root': root}

        # Check that this is a file and not a pipe or anything weird
        if not os.path.isfile(os.path.join(root, fn)):
            return None

        # Check that we don't want to ignore this file
        i_matches = [
            n for n in config.fn_ignore_files if fnmatch.fnmatch(fn, n)
        ]
        if len(i_matches) > 0:
            logger.debug(
                "Ignoring file as matched an ignore pattern: {}".format(fn))
            return None

        # Limit search to small files, to avoid 30GB FastQ files etc.
        try:
            f['filesize'] = os.path.getsize(os.path.join(root, fn))
        except (IOError, OSError, ValueError, UnicodeDecodeError):
            logger.debug(
                "Couldn't read file when checking filesize: {}".format(fn))
        else:
            if f['filesize'] > config.log_filesize_limit:
                return False

        # Test file for each search pattern
        for patterns in spatterns:
            for key, sps in patterns.items():
                for sp in sps:
                    if search_file(sp, f):
                        # Check that we shouldn't exclude this file
                        if not exclude_file(sp, f):
                            # Looks good! Remember this file
                            files[key].append(f)
                        # Don't keep searching this file for other modules
                        if not sp.get('shared', False):
                            return
                        # Don't look at other patterns for this module
                        else:
                            break

    # Go through the analysis directories and get file list
    for path in config.analysis_dir:
        if os.path.islink(path) and config.ignore_symlinks:
            continue
        elif os.path.isfile(path):
            searchfiles.append([os.path.basename(path), os.path.dirname(path)])
        elif os.path.isdir(path):
            for root, dirnames, filenames in os.walk(
                    path, followlinks=(not config.ignore_symlinks),
                    topdown=True):
                bname = os.path.basename(root)

                # Skip any sub-directories matching ignore params
                orig_dirnames = dirnames[:]
                for n in config.fn_ignore_dirs:
                    dirnames[:] = [
                        d for d in dirnames
                        if not fnmatch.fnmatch(d, n.rstrip(os.sep))
                    ]
                    if len(orig_dirnames) != len(dirnames):
                        removed_dirs = [
                            os.path.join(root, d)
                            for d in set(orig_dirnames).symmetric_difference(
                                set(dirnames))
                        ]
                        logger.debug(
                            "Ignoring directory as matched fn_ignore_dirs: {}".
                            format(", ".join(removed_dirs)))
                        orig_dirnames = dirnames[:]
                for n in config.fn_ignore_paths:
                    dirnames[:] = [
                        d for d in dirnames if not fnmatch.fnmatch(
                            os.path.join(root, d), n.rstrip(os.sep))
                    ]
                    if len(orig_dirnames) != len(dirnames):
                        removed_dirs = [
                            os.path.join(root, d)
                            for d in set(orig_dirnames).symmetric_difference(
                                set(dirnames))
                        ]
                        logger.debug(
                            "Ignoring directory as matched fn_ignore_paths: {}"
                            .format(", ".join(removed_dirs)))

                # Skip *this* directory if matches ignore params
                d_matches = [
                    n for n in config.fn_ignore_dirs
                    if fnmatch.fnmatch(bname, n.rstrip(os.sep))
                ]
                if len(d_matches) > 0:
                    logger.debug(
                        "Ignoring directory as matched fn_ignore_dirs: {}".
                        format(bname))
                    continue
                p_matches = [
                    n for n in config.fn_ignore_paths
                    if fnmatch.fnmatch(root, n.rstrip(os.sep))
                ]
                if len(p_matches) > 0:
                    logger.debug(
                        "Ignoring directory as matched fn_ignore_paths: {}".
                        format(root))
                    continue
                # Search filenames in this directory
                for fn in filenames:
                    searchfiles.append([fn, root])
    # Search through collected files
    with click.progressbar(searchfiles,
                           label="Searching {} files..".format(
                               len(searchfiles))) as sfiles:
        for sf in sfiles:
            add_file(sf[0], sf[1])
Example #50
0
def assemble_files_to_ship(complete_file_list):
    """
    This looks for all files which should be shipped in the sdist
    """
    # All files which are in the repository except these:
    ignore_patterns = (
        # Developer-only tools
        '.github/*',
        '.github/*/*',
        'changelogs/fragments/*',
        'hacking/backport/*',
        'hacking/shippable/*',
        'hacking/tests/*',
        'hacking/ticket_stubs/*',
        'test/sanity/code-smell/botmeta.*',
        'test/utils/*',
        'test/utils/*/*',
        'test/utils/*/*/*',
        '.git*',
    )
    ignore_files = frozenset((
        # Developer-only tools
        'changelogs/config.yaml',
        'changelogs/.changes.yaml',
        'hacking/README.md',
        'hacking/ansible-profile',
        'hacking/cgroup_perf_recap_graph.py',
        'hacking/create_deprecated_issues.py',
        'hacking/deprecated_issue_template.md',
        'hacking/fix_test_syntax.py',
        'hacking/get_library.py',
        'hacking/metadata-tool.py',
        'hacking/report.py',
        'hacking/return_skeleton_generator.py',
        'hacking/test-module',
        'hacking/test-module.py',
        'test/support/README.md',
        '.cherry_picker.toml',
        '.mailmap',
        # Possibly should be included
        'examples/scripts/uptime.py',
        'examples/DOCUMENTATION.yml',
        'examples/play.yml',
        'examples/hosts.yaml',
        'examples/hosts.yml',
        'examples/inventory_script_schema.json',
        'examples/plugin_filters.yml',
        'hacking/env-setup',
        'hacking/env-setup.fish',
        'MANIFEST',
    ))

    # These files are generated and then intentionally added to the sdist

    # Manpages
    manpages = ['docs/man/man1/ansible.1']
    for dirname, dummy, files in os.walk('bin'):
        for filename in files:
            path = os.path.join(dirname, filename)
            if os.path.islink(path):
                if os.readlink(path) == 'ansible':
                    manpages.append('docs/man/man1/%s.1' % filename)

    # Misc
    misc_generated_files = [
        'SYMLINK_CACHE.json',
        'PKG-INFO',
    ]

    shipped_files = manpages + misc_generated_files

    for path in complete_file_list:
        if path not in ignore_files:
            for ignore in ignore_patterns:
                if fnmatch.fnmatch(path, ignore):
                    break
            else:
                shipped_files.append(path)

    return shipped_files
Example #51
0
 def include(module):
     return not any(fnmatch.fnmatch(module, pat)
                    for pat in excluded_modules)
Example #52
0
 def ignore_file_filter(path):
     for exc in ignore_file_pattern:
         if fnmatch.fnmatch(path, exc):
             return False
     return True
Example #53
0
    def __init__(self):

        BASE_DIR = "/sys/bus/usb/drivers/robotic_arm/"

        for file in os.listdir(BASE_DIR):
            if fnmatch.fnmatch(file, '*:*'):
                self.file = file

        self.base_motor = open("%s%s/basemotor" % (BASE_DIR, self.file), "w")
        self.arm_motor = open("%s%s/motor4" % (BASE_DIR, self.file), "w")
        self.elbow_motor = open("%s%s/motor3" % (BASE_DIR, self.file), "w")
        self.wrist_motor = open("%s%s/motor2" % (BASE_DIR, self.file), "w")
        self.jaw_motor = open("%s%s/gripmotor" % (BASE_DIR, self.file), "w")
        self.led_device = open("%s%s/led" % (BASE_DIR, self.file), "w")

        Gtk.Window.__init__(self, title="Robotic Arm Manual Control Program")
        self.connect("delete_event", self.delete_event)
        self.connect("destroy", self.destroy)
        self.set_border_width(10)

        self.base_label = Gtk.Label("Base: ")
        self.base_label.set_alignment(0, 0)
        self.base_label.show()

        self.base_clockwise = Gtk.Button("Clockwise")
        self.base_clockwise.connect("clicked", self.init_base_clockwise, None)
        self.base_clockwise.show()

        self.base_anticlockwise = Gtk.Button("AntiClockwise")
        self.base_anticlockwise.connect("clicked",
                                        self.init_base_anticlockwise, None)
        self.base_anticlockwise.show()

        self.base_stop = Gtk.Button("Stop")
        self.base_stop.connect("clicked", self.init_base_stop, None)
        self.base_stop.show()

        self.arm_label = Gtk.Label("Arm: ")
        self.arm_label.set_alignment(0, 0)
        self.arm_label.show()

        self.arm_up = Gtk.Button("Up")
        self.arm_up.connect("clicked", self.init_arm_up, None)
        self.arm_up.show()

        self.arm_down = Gtk.Button("Down")
        self.arm_down.connect("clicked", self.init_arm_down, None)
        self.arm_down.show()

        self.arm_stop = Gtk.Button("Stop")
        self.arm_stop.connect("clicked", self.init_arm_stop, None)
        self.arm_stop.show()

        self.elbow_label = Gtk.Label("Elbow: ")
        self.elbow_label.set_alignment(0, 0)
        self.elbow_label.show()

        self.elbow_up = Gtk.Button("Up")
        self.elbow_up.connect("clicked", self.init_elbow_up, None)
        self.elbow_up.show()

        self.elbow_down = Gtk.Button("Down")
        self.elbow_down.connect("clicked", self.init_elbow_down, None)
        self.elbow_down.show()

        self.elbow_stop = Gtk.Button("Stop")
        self.elbow_stop.connect("clicked", self.init_elbow_stop, None)
        self.elbow_stop.show()

        self.wrist_label = Gtk.Label("Wrist: ")
        self.wrist_label.set_alignment(0, 0)
        self.wrist_label.show()

        self.wrist_up = Gtk.Button("Up")
        self.wrist_up.connect("clicked", self.init_wrist_up, None)
        self.wrist_up.show()

        self.wrist_down = Gtk.Button("Down")
        self.wrist_down.connect("clicked", self.init_wrist_down, None)
        self.wrist_down.show()

        self.wrist_stop = Gtk.Button("Stop")
        self.wrist_stop.connect("clicked", self.init_wrist_stop, None)
        self.wrist_stop.show()

        self.jaw_label = Gtk.Label("Jaw: ")
        self.jaw_label.set_alignment(0, 0)
        self.jaw_label.show()

        self.jaw_open = Gtk.Button("Open")
        self.jaw_open.connect("clicked", self.init_jaw_open, None)
        self.jaw_open.show()

        self.jaw_close = Gtk.Button("Close")
        self.jaw_close.connect("clicked", self.init_jaw_close, None)
        self.jaw_close.show()

        self.jaw_stop = Gtk.Button("Stop")
        self.jaw_stop.connect("clicked", self.init_jaw_stop, None)
        self.jaw_stop.show()

        self.led_label = Gtk.Label("LED: ")
        self.led_label.set_alignment(0, 0)
        self.led_label.show()

        self.led = Gtk.Button("Off")
        self.led.connect("clicked", self.switch_led, None)
        self.led.show()

        self.controls = Gtk.Grid()
        self.controls.attach(self.base_label, 0, 0, 1, 1)
        self.controls.attach(self.base_clockwise, 1, 0, 1, 1)
        self.controls.attach(self.base_anticlockwise, 2, 0, 1, 1)
        self.controls.attach(self.base_stop, 3, 0, 1, 1)
        self.controls.attach(self.arm_label, 0, 1, 1, 1)
        self.controls.attach(self.arm_up, 1, 1, 1, 1)
        self.controls.attach(self.arm_down, 2, 1, 1, 1)
        self.controls.attach(self.arm_stop, 3, 1, 1, 1)
        self.controls.attach(self.elbow_label, 0, 2, 1, 1)
        self.controls.attach(self.elbow_up, 1, 2, 1, 1)
        self.controls.attach(self.elbow_down, 2, 2, 1, 1)
        self.controls.attach(self.elbow_stop, 3, 2, 1, 1)
        self.controls.attach(self.wrist_label, 0, 3, 1, 1)
        self.controls.attach(self.wrist_up, 1, 3, 1, 1)
        self.controls.attach(self.wrist_down, 2, 3, 1, 1)
        self.controls.attach(self.wrist_stop, 3, 3, 1, 1)
        self.controls.attach(self.jaw_label, 0, 4, 1, 1)
        self.controls.attach(self.jaw_open, 1, 4, 1, 1)
        self.controls.attach(self.jaw_close, 2, 4, 1, 1)
        self.controls.attach(self.jaw_stop, 3, 4, 1, 1)
        self.controls.attach(self.led_label, 0, 5, 1, 1)
        self.controls.attach(self.led, 1, 5, 3, 1)
        self.controls.show()

        self.add(self.controls)
        self.show()
Example #54
0
def prune_final_dir_for_clang_tidy(final_dir, osx_cross_compile):
    # Make sure we only have what we expect.
    dirs = [
        "bin",
        "include",
        "lib",
        "lib32",
        "libexec",
        "msbuild-bin",
        "share",
        "tools",
    ]
    if is_linux():
        dirs.append("x86_64-unknown-linux-gnu")
    for f in glob.glob("%s/*" % final_dir):
        if os.path.basename(f) not in dirs:
            raise Exception("Found unknown file %s in the final directory" % f)
        if not os.path.isdir(f):
            raise Exception("Expected %s to be a directory" % f)

    kept_binaries = [
        "clang-apply-replacements",
        "clang-format",
        "clang-tidy",
        "clangd",
        "clang-query",
    ]
    re_clang_tidy = re.compile(r"^(" + "|".join(kept_binaries) + r")(\.exe)?$",
                               re.I)
    for f in glob.glob("%s/bin/*" % final_dir):
        if re_clang_tidy.search(os.path.basename(f)) is None:
            delete(f)

    # Keep include/ intact.

    # Remove the target-specific files.
    if is_linux():
        if os.path.exists(os.path.join(final_dir, "x86_64-unknown-linux-gnu")):
            shutil.rmtree(os.path.join(final_dir, "x86_64-unknown-linux-gnu"))

    # In lib/, only keep lib/clang/N.M.O/include and the LLVM shared library.
    re_ver_num = re.compile(r"^\d+\.\d+\.\d+$", re.I)
    for f in glob.glob("%s/lib/*" % final_dir):
        name = os.path.basename(f)
        if name == "clang":
            continue
        if osx_cross_compile and name in [
                "libLLVM.dylib", "libclang-cpp.dylib"
        ]:
            continue
        if is_linux() and (fnmatch.fnmatch(name, "libLLVM*.so")
                           or fnmatch.fnmatch(name, "libclang-cpp.so*")):
            continue
        delete(f)
    for f in glob.glob("%s/lib/clang/*" % final_dir):
        if re_ver_num.search(os.path.basename(f)) is None:
            delete(f)
    for f in glob.glob("%s/lib/clang/*/*" % final_dir):
        if os.path.basename(f) != "include":
            delete(f)

    # Completely remove libexec/, msbuild-bin and tools, if it exists.
    shutil.rmtree(os.path.join(final_dir, "libexec"))
    for d in ("msbuild-bin", "tools"):
        d = os.path.join(final_dir, d)
        if os.path.exists(d):
            shutil.rmtree(d)

    # In share/, only keep share/clang/*tidy*
    re_clang_tidy = re.compile(r"format|tidy", re.I)
    for f in glob.glob("%s/share/*" % final_dir):
        if os.path.basename(f) != "clang":
            delete(f)
    for f in glob.glob("%s/share/clang/*" % final_dir):
        if re_clang_tidy.search(os.path.basename(f)) is None:
            delete(f)
Example #55
0
    def __init__(self, settings, ncpu=None):
        self.settings = settings
        self.logger = logging.getLogger(__name__)
        self.stats = defaultdict(int)
        self.init_pool(ncpu)
        check_or_create_dir(settings['destination'])

        # Build the list of directories with images
        albums = self.albums = {}
        src_path = self.settings['source']

        ignore_dirs = settings['ignore_directories']
        ignore_files = settings['ignore_files']

        progressChars = cycle(["/", "-", "\\", "|"])
        show_progress = (self.logger.getEffectiveLevel() >= logging.WARNING and
                         os.isatty(sys.stdout.fileno()))
        self.progressbar_target = None if show_progress else Devnull()

        for path, dirs, files in os.walk(src_path, followlinks=True,
                                         topdown=False):
            if show_progress:
                print("\rCollecting albums " + next(progressChars), end="")
            relpath = os.path.relpath(path, src_path)

            # Test if the directory match the ignore_dirs settings
            if ignore_dirs and any(fnmatch.fnmatch(relpath, ignore)
                                   for ignore in ignore_dirs):
                self.logger.info('Ignoring %s', relpath)
                continue

            # Remove files that match the ignore_files settings
            if ignore_files:
                files_path = {join(relpath, f) for f in files}
                for ignore in ignore_files:
                    files_path -= set(fnmatch.filter(files_path, ignore))

                self.logger.debug('Files before filtering: %r', files)
                files = [os.path.split(f)[1] for f in files_path]
                self.logger.debug('Files after filtering: %r', files)

            # Remove sub-directories that have been ignored in a previous
            # iteration (as topdown=False, sub-directories are processed before
            # their parent
            for d in dirs[:]:
                path = join(relpath, d) if relpath != '.' else d
                if path not in albums.keys():
                    dirs.remove(d)

            album = Album(relpath, settings, dirs, files, self)

            if not album.medias and not album.albums:
                self.logger.info('Skip empty album: %r', album)
            else:
                album.create_output_directories()
                albums[relpath] = album

        with progressbar(albums.values(), label="%16s" % "Sorting albums",
                         file=self.progressbar_target) as progress_albums:
            for album in progress_albums:
                album.sort_subdirs(settings['albums_sort_attr'])

        with progressbar(albums.values(), label="%16s" % "Sorting media",
                         file=self.progressbar_target) as progress_albums:
            for album in progress_albums:
                album.sort_medias(settings['medias_sort_attr'])

        self.logger.debug('Albums:\n%r', albums.values())
        signals.gallery_initialized.send(self)
Example #56
0
def iter_filenames(paths, exclude):
    exclude = filter(None, (exclude or '').split(','))
    for path in walk_paths(paths):
        if all(not fnmatch.fnmatch(path, pattern) for pattern in exclude):
            yield path
Example #57
0
 def _matches(cls, hit):
     # pylint: disable=protected-access
     if cls._index._name is None:
         return True
     return fnmatch(hit.get('_index', ''), '{}*'.format(cls._index._name))
Example #58
0
    def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
        self._play = play
        self._blocks = []
        self._variable_manager = variable_manager

        self._task_uuid_cache = dict()

        # Default options to gather
        gather_subset = play_context.gather_subset
        gather_timeout = play_context.gather_timeout
        fact_path = play_context.fact_path

        # Retrieve subset to gather
        if self._play.gather_subset is not None:
            gather_subset = self._play.gather_subset
        # Retrieve timeout for gather
        if self._play.gather_timeout is not None:
            gather_timeout = self._play.gather_timeout
        # Retrieve fact_path
        if self._play.fact_path is not None:
            fact_path = self._play.fact_path

        setup_block = Block(play=self._play)
        setup_task = Task(block=setup_block)
        setup_task.action = 'setup'
        setup_task.name = 'Gathering Facts'
        setup_task.tags = ['always']
        setup_task.args = {
            'gather_subset': gather_subset,
        }
        if gather_timeout:
            setup_task.args['gather_timeout'] = gather_timeout
        if fact_path:
            setup_task.args['fact_path'] = fact_path
        setup_task.set_loader(self._play._loader)
        # short circuit fact gathering if the entire playbook is conditional
        if self._play._included_conditional is not None:
            setup_task.when = self._play._included_conditional[:]
        setup_block.block = [setup_task]

        setup_block = setup_block.filter_tagged_tasks(play_context, all_vars)
        self._blocks.append(setup_block)
        self.cache_block_tasks(setup_block)

        for block in self._play.compile():
            new_block = block.filter_tagged_tasks(play_context, all_vars)
            if new_block.has_tasks():
                self.cache_block_tasks(new_block)
                self._blocks.append(new_block)

        for handler_block in self._play.handlers:
            self.cache_block_tasks(handler_block)

        self._host_states = {}
        start_at_matched = False
        for host in inventory.get_hosts(self._play.hosts):
            self._host_states[host.name] = HostState(blocks=self._blocks)
            # if we're looking to start at a specific task, iterate through
            # the tasks for this host until we find the specified task
            if play_context.start_at_task is not None and not start_at_done:
                while True:
                    (s, task) = self.get_next_task_for_host(host, peek=True)
                    if s.run_state == self.ITERATING_COMPLETE:
                        break
                    if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
                       task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
                        start_at_matched = True
                        break
                    else:
                        self.get_next_task_for_host(host)

                # finally, reset the host's state to ITERATING_SETUP
                if start_at_matched:
                    self._host_states[host.name].did_start_at_task = True
                    self._host_states[host.name].run_state = self.ITERATING_SETUP

        if start_at_matched:
            # we have our match, so clear the start_at_task field on the
            # play context to flag that we've started at a task (and future
            # plays won't try to advance)
            play_context.start_at_task = None
Example #59
0
    def _diff(self):
        if self.ignore_blank_cards:
            cardsa = [c for c in self.a.cards if str(c) != BLANK_CARD]
            cardsb = [c for c in self.b.cards if str(c) != BLANK_CARD]
        else:
            cardsa = list(self.a.cards)
            cardsb = list(self.b.cards)

        # build dictionaries of keyword values and comments
        def get_header_values_comments(cards):
            values = {}
            comments = {}
            for card in cards:
                value = card.value
                if self.ignore_blanks and isinstance(value, string_types):
                    value = value.rstrip()
                values.setdefault(card.keyword, []).append(value)
                comments.setdefault(card.keyword, []).append(card.comment)
            return values, comments

        valuesa, commentsa = get_header_values_comments(cardsa)
        valuesb, commentsb = get_header_values_comments(cardsb)

        # Normalize all keyword to upper-case for comparison's sake;
        # TODO: HIERARCH keywords should be handled case-sensitively I think
        keywordsa = set(k.upper() for k in valuesa)
        keywordsb = set(k.upper() for k in valuesb)

        self.common_keywords = sorted(keywordsa.intersection(keywordsb))
        if len(cardsa) != len(cardsb):
            self.diff_keyword_count = (len(cardsa), len(cardsb))

        # Any other diff attributes should exclude ignored keywords
        keywordsa = keywordsa.difference(self.ignore_keywords)
        keywordsb = keywordsb.difference(self.ignore_keywords)
        if self.ignore_keyword_patterns:
            for pattern in self.ignore_keyword_patterns:
                keywordsa = keywordsa.difference(fnmatch.filter(keywordsa,
                                                                pattern))
                keywordsb = keywordsb.difference(fnmatch.filter(keywordsb,
                                                                pattern))

        if '*' in self.ignore_keywords:
            # Any other differences between keywords are to be ignored
            return

        left_only_keywords = sorted(keywordsa.difference(keywordsb))
        right_only_keywords = sorted(keywordsb.difference(keywordsa))

        if left_only_keywords or right_only_keywords:
            self.diff_keywords = (left_only_keywords, right_only_keywords)

        # Compare count of each common keyword
        for keyword in self.common_keywords:
            if keyword in self.ignore_keywords:
                continue
            if self.ignore_keyword_patterns:
                skip = False
                for pattern in self.ignore_keyword_patterns:
                    if fnmatch.fnmatch(keyword, pattern):
                        skip = True
                        break
                if skip:
                    continue

            counta = len(valuesa[keyword])
            countb = len(valuesb[keyword])
            if counta != countb:
                self.diff_duplicate_keywords[keyword] = (counta, countb)

            # Compare keywords' values and comments
            for a, b in zip(valuesa[keyword], valuesb[keyword]):
                if diff_values(a, b, tolerance=self.tolerance):
                    self.diff_keyword_values[keyword].append((a, b))
                else:
                    # If there are duplicate keywords we need to be able to
                    # index each duplicate; if the values of a duplicate
                    # are identical use None here
                    self.diff_keyword_values[keyword].append(None)

            if not any(self.diff_keyword_values[keyword]):
                # No differences found; delete the array of Nones
                del self.diff_keyword_values[keyword]

            if '*' in self.ignore_comments or keyword in self.ignore_comments:
                continue
            if self.ignore_comment_patterns:
                skip = False
                for pattern in self.ignore_comment_patterns:
                    if fnmatch.fnmatch(keyword, pattern):
                        skip = True
                        break
                if skip:
                    continue

            for a, b in zip(commentsa[keyword], commentsb[keyword]):
                if diff_values(a, b):
                    self.diff_keyword_comments[keyword].append((a, b))
                else:
                    self.diff_keyword_comments[keyword].append(None)

            if not any(self.diff_keyword_comments[keyword]):
                del self.diff_keyword_comments[keyword]
Example #60
-1
File: EvE.py Project: moka-guys/EvE
    def find_FEfiles(self):
        '''this reads the list created above containing filename pattern and replaces this with the full file name'''
        # for each row of the text file split into fields
        for i in self.files_to_find:
            file1_pattern = i[0]
            file1_dye = i[1]
            file2_pattern = i[2]
            file2_dye = i[3]
            out_file_prefix=i[4]

            # set filename as none to help identify when no match has been found below
            file1_filename = None
            file2_filename = None

            # search for a FE file which matches the filename pattern
            for afile in os.listdir(self.chosenfolder):
                # file 1
                if fnmatch.fnmatch(afile, file1_pattern):
                    file1_filename = afile
                else:
                    pass
                # file 2
                if fnmatch.fnmatch(afile, file2_pattern):
                    file2_filename = afile
                else:
                    pass

            # if both files have been identified add this to a new list else report.
            if file1_filename is not None and file2_filename is not None:
                self.list_of_files.append((file1_filename, file1_dye, file2_filename, file2_dye,out_file_prefix))
            else:
                raise ValueError("no match for " + file1_pattern + " and " + file2_pattern)