Exemplo n.º 1
0
def nodes_files_of(node, recurse, includes, excludes):
	"""
	Recursively returns subnodes of this node. All node filenames that
	matches a pattern in includes but does not match a pattern in
	excludes is returned.

	@param includes: list of wildcards to include.
	@param excludes: list of wildcards to exclude.
	@param recurse: whether to recurse or not.
	"""
	# perform the listdir in the source directory, once
	node.__class__.bld.rescan(node)

	# doxygen looks at the files under the source directory
	buf = []
	for x in node.__class__.bld.cache_dir_contents[node.id]:
		filename = node.abspath() + os.sep + x
		if any(fnmatchcase(filename, pat) for pat in excludes):
			continue
		st = os.stat(filename)
		if stat.S_ISREG(st[stat.ST_MODE]):
			k = node.find_resource(x)
			if not any(fnmatchcase(k.abspath(), pat) for pat in includes):
				continue
			buf.append(k)
		elif stat.S_ISDIR(st[stat.ST_MODE]) and recurse:
			nd = node.find_dir(x)
			if nd.id != nd.__class__.bld.bldnode.id:
				buf += nodes_files_of(nd, recurse, includes, excludes)
	return buf
Exemplo n.º 2
0
    def get_search_results(self, req, terms, filters):
        if 'filename' not in filters: return # Early bailout if we aren't active
        
        db = self.env.get_db_cnx()
        cursor = db.cursor()
        repo = self.env.get_repository(req.authname)
        youngest_rev = repo.get_youngest_rev()
        # ???: Ask cboos about this. <NPK>
        if isinstance(youngest_rev, basestring) and youngest_rev.isdigit():
            youngest_rev = int(youngest_rev)        

        cursor.execute("""SELECT max("""+db.cast('rev','int')+"""), path FROM node_change
                          WHERE node_type = %s AND change_type != 'D'
                          GROUP BY path""", ('F',))

        all_files = [(r,p) for r,p in cursor]# if repo.has_node(p, youngest_rev)]
        cset_cache = {}
        
        for term in terms:
            for rev, path in all_files:
                match = None
                if '/' in term:
                    match = fnmatchcase(path, term.lstrip('/'))
                else:
                    match = sum([fnmatchcase(x, term) for x in path.split('/')])
                if match:
                    cset = cset_cache.setdefault(rev, repo.get_changeset(rev))
                    msg = ''
                    if self.check_gone and not repo.has_node(path, youngest_rev):
                        if self.show_gone:
                            msg = 'Not in the youngest revision, file has possibly been moved.'
                        else:
                            continue
                    yield (req.href.browser(path, rev=rev), path, cset.date, cset.author, msg)
Exemplo n.º 3
0
    def get_lines_matching_pattern(self, string, pattern, case_insensitive=False):
        """Returns lines of the given ``string`` that match the ``pattern``.

        The ``pattern`` is a _glob pattern_ where:
        | ``*``        | matches everything |
        | ``?``        | matches any single character |
        | ``[chars]``  | matches any character inside square brackets (e.g. ``[abc]`` matches either ``a``, ``b`` or ``c``) |
        | ``[!chars]`` | matches any character not inside square brackets |

        A line matches only if it matches the ``pattern`` fully.

        The match is case-sensitive by default, but giving ``case_insensitive``
        a true value makes it case-insensitive. The value is considered true
        if it is a non-empty string that is not equal to ``false``, ``none`` or
        ``no``. If the value is not a string, its truth value is got directly
        in Python. Considering ``none`` false is new in RF 3.0.3.

        Lines are returned as one string catenated back together with
        newlines. Possible trailing newline is never returned. The
        number of matching lines is automatically logged.

        Examples:
        | ${lines} = | Get Lines Matching Pattern | ${result} | Wild???? example |
        | ${ret} = | Get Lines Matching Pattern | ${ret} | FAIL: * | case_insensitive=true |

        See `Get Lines Matching Regexp` if you need more complex
        patterns and `Get Lines Containing String` if searching
        literal strings is enough.
        """
        if is_truthy(case_insensitive):
            pattern = pattern.lower()
            matches = lambda line: fnmatchcase(line.lower(), pattern)
        else:
            matches = lambda line: fnmatchcase(line, pattern)
        return self._get_matching_lines(string, matches)
Exemplo n.º 4
0
    def handle_msg (self, cmsg):
        """ Got message from client -- process it.
        """
        dest = cmsg.get_dest()
        size = cmsg.get_size()
        stat = '?'

        for exc, wild in self.excludes:
            if (not wild and dest == exc) or fnmatch.fnmatchcase (dest, exc):
                stat = 'dropped'
                break
        else:
            if self.includes:
                for inc, wild in self.includes:
                    if (not wild and dest == inc) or fnmatch.fnmatchcase (dest, inc):
                        break
                else:
                    stat = 'dropped'
            if stat != 'dropped':
                try:
                    self.fwd_handler.handle_msg (cmsg)
                    stat = 'ok'
                except Exception:
                    self.log.exception ('crashed, dropping msg: %s', dest)
                    stat = 'crashed'

        self.stat_inc ('filter.count')
        self.stat_inc ('filter.bytes', size)
        self.stat_inc ('filter.count.%s' % stat)
        self.stat_inc ('filter.bytes.%s' % stat, size)
Exemplo n.º 5
0
def wildcard_dict_lookup(key, dictionary):
    """
    Given a dictionary where the keys are strings with Unix-style wildcards
    (i.e. *, ? and []) find the most specific match for the key in the dictionary.
    Raises KeyError if there is no match.
    Raises AmbiguousMatchException if there are multiple matches and none of them
    is most specific.
    """
    matching_keys = [k for k in dictionary.keys() if fnmatch.fnmatchcase(key, k)]
    # All of matching_keys are either equal to key
    # or match it by means of wildcards.
    if len(matching_keys) == 0:
        raise KeyError(key)
    for i, candidate_key in enumerate(matching_keys):
        # Determine if candidate_key is strictly a better match than every other
        # matched key.
        # E.g. if both "Linux-*" and "*" are matches, "Linux-*" is a strictly
        # better match than "*". However, if both "Linux-*" and "*-x86" are
        # matches, neither is better than the other.
        if all(
                fnmatch.fnmatchcase(candidate_key, other_key)
                for (j,other_key) in enumerate(matching_keys)
                if j!=i):
            return dictionary[candidate_key]
    raise AmbiguousMatchException(key=key, candidates=matching_keys)
Exemplo n.º 6
0
def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories):
    out = {}
    stack = [(convert_path(where), '', package)]
    while stack:
        where, prefix, package = stack.pop(0)
        for name in os.listdir(where):
            fn = os.path.join(where, name)
            if os.path.isdir(fn):
                bad_name = False
                for pattern in exclude_directories:
                    if (fnmatchcase(name, pattern)
                        or fn.lower() == pattern.lower()):
                        bad_name = True
                        break
                if bad_name:
                    continue
                if os.path.isfile(os.path.join(fn, '__init__.py')):
                    if not package:
                        new_package = name
                    else:
                        new_package = package + '.' + name
                        stack.append((fn, '', new_package))
                else:
                    stack.append((fn, prefix + name + '/', package))
            else:
                bad_name = False
                for pattern in exclude:
                    if (fnmatchcase(name, pattern)
                        or fn.lower() == pattern.lower()):
                        bad_name = True
                        break
                if bad_name:
                    continue
                out.setdefault(package, []).append(prefix+name)
    return out
Exemplo n.º 7
0
    def check_issue(self, trace):
        directories =  [ ]
        image_naughty = os.path.join(testinfra.TEST_DIR, "verify", "naughty-" + testinfra.DEFAULT_IMAGE)
        if os.path.exists(image_naughty):
            directories.append(image_naughty)
        trace = self.normalize_traceback(trace)
        number = 0
        for naughty in list_directories(directories):
            (prefix, unused, name) = os.path.basename(naughty).partition("-")
            try:
                n = int(prefix)
            except:
                continue
            with open(naughty, "r") as fp:
                match = "*" + self.normalize_traceback(fp.read()) + "*"
            # Match as in a file name glob, albeit multi line, and account for literal pastes with '[]'
            if fnmatch.fnmatchcase(trace, match) or fnmatch.fnmatchcase(trace, match.replace("[", "?")):
                number = n
        if not number:
            return False

        sys.stderr.write("Ignoring known issue #{0}\n{1}\n".format(number, trace))
        try:
            self.post_github(number, trace)
        except:
            sys.stderr.write("Failed to post known issue to GitHub\n")
            traceback.print_exc()
        return True
Exemplo n.º 8
0
def is_forbidden(cfg, cvsroot_name, module):
    '''Return 1 if MODULE in CVSROOT_NAME is forbidden; return 0 otherwise.'''

    # CVSROOT_NAME might be None here if the data comes from an
    # unconfigured root.  This interfaces doesn't care that the root
    # isn't configured, but if that's the case, it will consult only
    # the base and per-vhost configuration for authorizer and
    # authorizer parameters.
    if cvsroot_name:
        authorizer, params = cfg.get_authorizer_and_params_hack(cvsroot_name)
    else:
        authorizer = cfg.options.authorizer
        params = cfg.get_authorizer_params()
        
    # If CVSROOT_NAME isn't configured to use an authorizer, nothing
    # is forbidden.  If it's configured to use something other than
    # the 'forbidden' authorizer, complain.  Otherwise, check for
    # forbiddenness per the PARAMS as expected.
    if not authorizer:
        return 0
    if authorizer != 'forbidden':    
        raise Exception("The 'forbidden' authorizer is the only one supported "
                        "by this interface.  The '%s' root is configured to "
                        "use a different one." % (cvsroot_name))
    forbidden = params.get('forbidden', '')
    forbidden = map(lambda x: x.strip(), filter(None, forbidden.split(',')))
    default = 0
    for pat in forbidden:
        if pat[0] == '!':
            default = 1
            if fnmatch.fnmatchcase(module, pat[1:]):
                return 0
        elif fnmatch.fnmatchcase(module, pat):
            return 1
    return default
Exemplo n.º 9
0
 def allowed(self, path):
     return (
         (not self.include_patterns
         or any(fnmatchcase(path, x) for x in self.include_patterns))
             and
         (not self.exclude_patterns
         or all(not fnmatchcase(path, x) for x in self.exclude_patterns)))
Exemplo n.º 10
0
def _match(path, patterns):
    """
    Return a message if `path` is matched by a pattern from the `patterns` map
    or False.
    """
    if not path or not patterns:
        return False

    path = fileutils.as_posixpath(path).lower()
    pathstripped = path.lstrip(POSIX_PATH_SEP)
    if not pathstripped:
        return False
    segments = paths.split(pathstripped)
    if DEBUG:
        logger.debug('_match: path: %(path)r patterns:%(patterns)r.' % locals())
    mtch = False
    for pat, msg in patterns.items():
        if not pat and not pat.strip():
            continue
        msg = msg or EMPTY_STRING
        pat = pat.lstrip(POSIX_PATH_SEP).lower()
        is_plain = POSIX_PATH_SEP not in pat
        if is_plain:
            if any(fnmatch.fnmatchcase(s, pat) for s in segments):
                mtch = msg
                break
        elif (fnmatch.fnmatchcase(path, pat)
              or fnmatch.fnmatchcase(pathstripped, pat)):
            mtch = msg
            break
    if DEBUG:
        logger.debug('_match: match is %(mtch)r' % locals())
    return mtch
Exemplo n.º 11
0
	def find_iter(self, in_pat='*', ex_pat=None, prunes=None, build_dir=True):
		"find nodes recursively, this returns the build nodes by default"
		if self.id & 3 != DIR:
			raise StopIteration

		self.__class__.bld.rescan(self)
		for name in self.__class__.bld.cache_dir_contents[self.id]:
			if (ex_pat is None or not fnmatch.fnmatchcase(name, ex_pat)) and fnmatch.fnmatchcase(name, in_pat):
				node = self.find_resource(name)
				if node:
					yield node
				else:
					node = self.find_dir(name)
					if node.id != self.__class__.bld.bldnode.id:
						yield node
			elif self.find_resource(name) is None:
				if prunes is not None and name in prunes:
					continue
				dir = self.find_dir(name)
				if dir:
					if dir.id == self.__class__.bld.bldnode.id:
						continue
					for node in dir.find_iter(in_pat, ex_pat, prunes):
						yield node
		if build_dir:
			for node in self.childs.values():
				if node.id & 3 == BUILD:
					yield node
		raise StopIteration
Exemplo n.º 12
0
    def get_lines_matching_pattern(self, string, pattern, case_insensitive=False):
        """Returns lines of the given `string` that match the `pattern`.

        The `pattern` is a _glob pattern_ where:
        | *        | matches everything |
        | ?        | matches any single character |
        | [chars]  | matches any character inside square brackets (e.g. '[abc]' matches either 'a', 'b' or 'c') |
        | [!chars] | matches any character not inside square brackets |

        A line matches only if it matches the `pattern` fully.  By
        default the match is case-sensitive, but setting
        `case_insensitive` to any value makes it case-insensitive.

        Lines are returned as one string catenated back together with
        newlines. Possible trailing newline is never returned. The
        number of matching lines is automatically logged.

        Examples:
        | ${lines} = | Get Lines Matching Pattern | ${result} | Wild???? example |
        | ${ret} = | Get Lines Matching Pattern | ${ret} | FAIL: * | case-insensitive |

        See `Get Lines Matching Regexp` if you need more complex
        patterns and `Get Lines Containing String` if searching
        literal strings is enough.
        """
        if case_insensitive:
            pattern = pattern.lower()
            matches = lambda line: fnmatchcase(line.lower(), pattern)
        else:
            matches = lambda line: fnmatchcase(line, pattern)
        return self._get_matching_lines(string, matches)
Exemplo n.º 13
0
    def test_emitted(self, qtbot, signaller):
        with pytest.raises(SignalEmittedError) as excinfo:
            with qtbot.assertNotEmitted(signaller.signal):
                signaller.signal.emit()

        fnmatch.fnmatchcase(str(excinfo.value),
                            "Signal * unexpectedly emitted.")
Exemplo n.º 14
0
def listFiles(path, pattern=None, full=True, recurse=True):
    """
    Recurse into the given directory and searches for all files containing the given wildcard pattern.

    :param str path: Path to the directory that shall be searched
    :param str pattern: String pattern to be mandatory within the filename
    :param bool full: Full path and file name (True; default) or file name only (False)
    :param bool recurse: Recurse into subfolders (True; default) or not (False)
    :return: List of files
    :rtype: list

    :example: shapes = listFiles(export_path, pattern='*.shp', full=True, recurse=True)
    """

    filelist = []
    if recurse is True:
        for root, subdirs, files in os.walk(path):
            for filename in files:
                if fnmatch.fnmatchcase(filename, pattern):
                    if full is True:
                        filelist.append(os.path.join(root, filename))
                    else:
                        filelist.append(filename)
    else:
        for f in os.listdir(path):
            if fnmatch.fnmatchcase(f, pattern):
                if full is True:
                    filename = os.path.join(path, f)
                else:
                    filename = f
                filelist.append(filename)
    return filelist
Exemplo n.º 15
0
    def on_process_start(self, task):
        # If --task hasn't been specified don't do anything
        if not task.manager.options.onlytask:
            return

        # Make a list of the specified tasks to run, and those available
        onlytasks = task.manager.options.onlytask.split(',')

        # Make sure the specified tasks exist
        enabled_tasks = [f.name.lower() for f in task.manager.tasks.itervalues() if f.enabled]
        for onlytask in onlytasks:
            if any(i in onlytask for i in '*?['):
                # Try globbing
                if not any(fnmatch.fnmatchcase(f.lower(), onlytask.lower()) for f in enabled_tasks):
                    task.manager.disable_tasks()
                    raise PluginError('No match for task pattern \'%s\'' % onlytask, log)
            elif onlytask.lower() not in enabled_tasks:
                # If any of the tasks do not exist, exit with an error
                task.manager.disable_tasks()
                raise PluginError('Could not find task \'%s\'' % onlytask, log)

        # If current task is not among the specified tasks, disable it
        if not any(task.name.lower() == f.lower() or fnmatch.fnmatchcase(task.name.lower(), f.lower())
                for f in onlytasks):
            task.enabled = False
Exemplo n.º 16
0
    def on_process_start(self, feed):
        # If --feed hasn't been specified don't do anything
        if not feed.manager.options.onlyfeed:
            return

        # Make a list of the specified feeds to run, and those available
        onlyfeeds = feed.manager.options.onlyfeed.split(',')

        # Make sure the specified feeds exist
        enabled_feeds = [f.name.lower() for f in feed.manager.feeds.itervalues() if f.enabled]
        for onlyfeed in onlyfeeds:
            if any(i in onlyfeed for i in '*?['):
                # Try globbing
                if not any(fnmatch.fnmatchcase(f.lower(), onlyfeed.lower()) for f in enabled_feeds):
                    feed.manager.disable_feeds()
                    raise PluginError('No match for feed pattern \'%s\'' % onlyfeed, log)
            elif onlyfeed.lower() not in enabled_feeds:
                # If any of the feeds do not exist, exit with an error
                feed.manager.disable_feeds()
                raise PluginError('Could not find feed \'%s\'' % onlyfeed, log)

        # If current feed is not among the specified feeds, disable it
        if not any(feed.name.lower() == f.lower() or fnmatch.fnmatchcase(feed.name.lower(), f.lower())
                for f in onlyfeeds):
            feed.enabled = False
Exemplo n.º 17
0
  def _FindPackageMatches(self, cpv_pattern):
    """Returns list of binpkg (CP, slot) pairs that match |cpv_pattern|.

    This is breaking |cpv_pattern| into its C, P and V components, each of
    which may or may not be present or contain wildcards. It then scans the
    binpkgs database to find all atoms that match these components, returning a
    list of CP and slot qualifier. When the pattern does not specify a version,
    or when a CP has only one slot in the binpkgs database, we omit the slot
    qualifier in the result.

    Args:
      cpv_pattern: A CPV pattern, potentially partial and/or having wildcards.

    Returns:
      A list of (CPV, slot) pairs of packages in the binpkgs database that
      match the pattern.
    """
    attrs = portage_util.SplitCPV(cpv_pattern, strict=False)
    cp_pattern = os.path.join(attrs.category or '*', attrs.package or '*')
    matches = []
    for cp, cp_slots in self.binpkgs_db.iteritems():
      if not fnmatch.fnmatchcase(cp, cp_pattern):
        continue

      # If no version attribute was given or there's only one slot, omit the
      # slot qualifier.
      if not attrs.version or len(cp_slots) == 1:
        matches.append((cp, None))
      else:
        cpv_pattern = '%s-%s' % (cp, attrs.version)
        for slot, pkg_info in cp_slots.iteritems():
          if fnmatch.fnmatchcase(pkg_info.cpv, cpv_pattern):
            matches.append((cp, slot))

    return matches
Exemplo n.º 18
0
 def is_ignored(path, ignore_patterns):
     """
     Check if the given path should be ignored or not.
     """
     filename = os.path.basename(path)
     ignore = lambda pattern: (fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern))
     return any(ignore(pattern) for pattern in ignore_patterns)
Exemplo n.º 19
0
def index_files(tree, conn):
  """ Index all files from the source directory """
  print "Indexing files from the '%s' tree" % tree.name
  started = datetime.now()
  cur = conn.cursor()
  # Walk the directory tree top-down, this allows us to modify folders to
  # exclude folders matching an ignore_pattern
  for root, folders, files in os.walk(tree.source_folder, True):
    # Find relative path
    rel_path = os.path.relpath(root, tree.source_folder)
    if rel_path == '.':
      rel_path = ""

    # List of file we indexed (ie. add to folder listing)
    indexed_files = []
    for f in files:
      # Ignore file if it matches an ignore pattern
      if any((fnmatch.fnmatchcase(f, e) for e in tree.ignore_patterns)):
        continue # Ignore the file

      # file_path and path
      file_path = os.path.join(root, f)
      path = os.path.join(rel_path, f)

      # the file
      with open(file_path, "r") as source_file:
        data = source_file.read()

      # Discard non-text files
      if not dxr.mime.is_text(file_path, data):
        continue

      # Find an icon (ideally dxr.mime should use magic numbers, etc.)
      # that's why it makes sense to save this result in the database
      icon = dxr.mime.icon(path)

      # Insert this file
      cur.execute("INSERT INTO files (path, icon) VALUES (?, ?)", (path, icon))
      # Index this file
      sql = "INSERT INTO trg_index (id, text) VALUES (?, ?)"
      cur.execute(sql, (cur.lastrowid, data))

      # Okay to this file was indexed
      indexed_files.append(f)

    # Exclude folders that match an ignore pattern
    # (The top-down walk allows us to do this)
    for folder in folders:
      if any((fnmatch.fnmatchcase(folder, e) for e in tree.ignore_patterns)):
        folders.remove(folder)

    # Now build folder listing and folders for indexed_files
    build_folder(tree, conn, rel_path, indexed_files, folders)

  # Okay, let's commit everything
  conn.commit()

  # Print time
  print "(finished in %s)" % (datetime.now() - started)
Exemplo n.º 20
0
 def instrument(self, targets, tests, compute_junit_classpath):
   junit_classpath = compute_junit_classpath()
   cobertura_cp = self._task_exports.tool_classpath('cobertura-instrument')
   aux_classpath = os.pathsep.join(relativize_paths(junit_classpath, get_buildroot()))
   safe_delete(self._coverage_datafile)
   classes_by_target = self._context.products.get_data('classes_by_target')
   for target in targets:
     if self.is_coverage_target(target):
       classes_by_rootdir = classes_by_target.get(target)
       if classes_by_rootdir:
         for root, products in classes_by_rootdir.rel_paths():
           self._rootdirs[root].update(products)
   # Cobertura uses regular expressions for filters, and even then there are still problems
   # with filtering. It turned out to be easier to just select which classes to instrument
   # by filtering them here.
   # TODO(ji): Investigate again how we can use cobertura's own filtering mechanisms.
   if self._coverage_filters:
     for basedir, classes in self._rootdirs.items():
       updated_classes = []
       for cls in classes:
         does_match = False
         for positive_filter in self._include_filters:
           if fnmatch.fnmatchcase(_classfile_to_classname(cls), positive_filter):
             does_match = True
         for negative_filter in self._exclude_filters:
           if fnmatch.fnmatchcase(_classfile_to_classname(cls), negative_filter):
             does_match = False
         if does_match:
           updated_classes.append(cls)
       self._rootdirs[basedir] = updated_classes
   for basedir, classes in self._rootdirs.items():
     if not classes:
       continue  # No point in running instrumentation if there is nothing to instrument!
     self._nothing_to_instrument = False
     args = [
       '--basedir',
       basedir,
       '--datafile',
       self._coverage_datafile,
       '--auxClasspath',
       aux_classpath,
       ]
     with temporary_file_path(cleanup=False) as instrumented_classes_file:
       with file(instrumented_classes_file, 'wb') as icf:
         icf.write(('\n'.join(classes) + '\n').encode('utf-8'))
       self._context.log.debug('instrumented classes in {0}'.format(instrumented_classes_file))
       args.append('--listOfFilesToInstrument')
       args.append(instrumented_classes_file)
       main = 'net.sourceforge.cobertura.instrument.InstrumentMain'
       execute_java = self.preferred_jvm_distribution_for_targets(targets).execute_java
       result = execute_java(classpath=cobertura_cp,
                             main=main,
                             jvm_options=self._coverage_jvm_options,
                             args=args,
                             workunit_factory=self._context.new_workunit,
                             workunit_name='cobertura-instrument')
     if result != 0:
       raise TaskError("java {0} ... exited non-zero ({1})"
                       " 'failed to instrument'".format(main, result))
Exemplo n.º 21
0
  def is_forbidden(self, root, path_parts, pathtype):
    # If we don't have a root and path to check, get outta here.
    if not (root and path_parts):
      return 0

    # Give precedence to the new 'forbiddenre' stuff first.
    if self.general.forbiddenre:

      # Join the root and path-parts together into one path-like thing.
      root_and_path = string.join([root] + path_parts, "/")
      if pathtype == vclib.DIR:
        root_and_path = root_and_path + '/'
      
      # If we still have a list of strings, replace those suckers with
      # lists of (compiled_regex, negation_flag)
      if type(self.general.forbiddenre[0]) == type(""):
        for i in range(len(self.general.forbiddenre)):
          pat = self.general.forbiddenre[i]
          if pat[0] == '!':
            self.general.forbiddenre[i] = (re.compile(pat[1:]), 1)
          else:
            self.general.forbiddenre[i] = (re.compile(pat), 0)

      # Do the forbiddenness test.
      default = 0
      for (pat, negated) in self.general.forbiddenre:
        match = pat.search(root_and_path)
        if negated:
          default = 1
          if match:
            return 0
        elif match:
          return 1
      return default

    # If no 'forbiddenre' is in use, we check 'forbidden', which only
    # looks at the top-most directory.
    elif self.general.forbidden:

      # A root and a single non-directory path component?  That's not
      # a module.
      if len(path_parts) == 1 and pathtype != vclib.DIR:
        return 0
      
      # Do the forbiddenness test.
      module = path_parts[0]
      default = 0
      for pat in self.general.forbidden:
        if pat[0] == '!':
          default = 1
          if fnmatch.fnmatchcase(module, pat[1:]):
            return 0
        elif fnmatch.fnmatchcase(module, pat):
          return 1
      return default

    # No forbiddenness configuration?  Just allow it.
    else:
      return 0
Exemplo n.º 22
0
 def _include(self, path):
     for pattern in TEST_FILTER['exclude']:
         if fnmatchcase(path, pattern):
             return False
     for pattern in TEST_FILTER['include']:
         if fnmatchcase(path, pattern):
             return True
     return False
Exemplo n.º 23
0
 def is_included(self, eid):
    for iglob in self.includeids:
         if fnmatch.fnmatchcase(eid, iglob):
             return True
    for eglob in self.excludeids:
         if fnmatch.fnmatchcase(eid, eglob):
             return False
    return True
Exemplo n.º 24
0
		def accept_name(node,name):
			for pat in ex_pat:
				if fnmatch.fnmatchcase(name,pat):
					return False
			for pat in in_pat:
				if fnmatch.fnmatchcase(name,pat):
					return True
			return False
Exemplo n.º 25
0
 def is_indexed(path):
     if any(fnmatchcase(basename(path), e)
            for e in self.tree.ignore_filenames):
         return False
     if any(fnmatchcase('/' + path.replace(os.sep, '/'), e)
            for e in self.tree.ignore_paths):
         return False
     return True
def test_fnmatch():
	from fnmatch import fnmatch, fnmatchcase
	print fnmatch('foo.txt', '*.txt')
	print fnmatch('foo.txt', '?oo.txt')
	print fnmatch('Dat45.csv', 'Dat[0-9]*')
	# case-sensitivity
	print fnmatch('foo.txt', '*.TXT')
	print fnmatchcase('foo.txt', '*.TXT')
Exemplo n.º 27
0
    def test_emitted_args(self, qtbot, signaller):
        with pytest.raises(SignalEmittedError) as excinfo:
            with qtbot.assertNotEmitted(signaller.signal_args):
                signaller.signal_args.emit('foo', 123)

        fnmatch.fnmatchcase(str(excinfo.value),
                            "Signal * unexpectedly emitted with arguments "
                            "['foo', 123]")
Exemplo n.º 28
0
 def find_import_files(self):
     """ return a list of files to import """
     if self.files_to_tests:
         pyfiles = self.files_to_tests.keys()
     else:
         pyfiles = []
 
         for base_dir in self.files_or_dirs:
             if os.path.isdir(base_dir):
                 if hasattr(os, 'walk'):
                     for root, dirs, files in os.walk(base_dir):
                         self.__add_files(pyfiles, root, files)
                 else:
                     # jython2.1 is too old for os.walk!
                     os.path.walk(base_dir, self.__add_files, pyfiles)
 
             elif os.path.isfile(base_dir):
                 pyfiles.append(base_dir)
 
     if self.configuration.exclude_files or self.configuration.include_files:
         ret = []
         for f in pyfiles:
             add = True
             basename = os.path.basename(f)
             if self.configuration.include_files:
                 add = False
                 
                 for pat in self.configuration.include_files:
                     if fnmatch.fnmatchcase(basename, pat):
                         add = True
                         break
             
             if not add:
                 if self.verbosity > 3:
                     sys.stdout.write('Skipped file: %s (did not match any include_files pattern: %s)\n' % (f, self.configuration.include_files))
             
             elif self.configuration.exclude_files:
                 for pat in self.configuration.exclude_files:
                     if fnmatch.fnmatchcase(basename, pat):
                         if self.verbosity > 3:
                             sys.stdout.write('Skipped file: %s (matched exclude_files pattern: %s)\n' % (f, pat))
                             
                         elif self.verbosity > 2:
                             sys.stdout.write('Skipped file: %s\n' % (f,))
                             
                         add = False
                         break
                     
             if add:
                 if self.verbosity > 3:
                     sys.stdout.write('Adding file: %s for test discovery.\n' % (f,))
                 ret.append(f)
                     
         pyfiles = ret
             
             
     return pyfiles
Exemplo n.º 29
0
    def _ls_detailed(self, path_glob):
        """Recursively list files on GCS and includes some metadata about them:
        - object name
        - size
        - md5 hash
        - _uri

        *path_glob* can include ``?`` to match single characters or
        ``*`` to match 0 or more characters. Both ``?`` and ``*`` can match
        ``/``.
        """

        scheme = urlparse(path_glob).scheme

        bucket_name, base_name = _path_glob_to_parsed_gcs_uri(path_glob)

        # allow subdirectories of the path/glob
        if path_glob and not path_glob.endswith('/'):
            dir_glob = path_glob + '/*'
        else:
            dir_glob = path_glob + '*'

        list_request = self.api_client.objects().list(
            bucket=bucket_name, prefix=base_name, fields=_LS_FIELDS_TO_RETURN)

        uri_prefix = '%s://%s' % (scheme, bucket_name)
        while list_request:
            try:
                resp = list_request.execute()
            except google_errors.HttpError as e:
                if e.resp.status == 404:
                    return

                raise

            resp_items = resp.get('items') or []
            for item in resp_items:
                # We generate the item URI by adding the "gs://" prefix
                uri = "%s/%s" % (uri_prefix, item['name'])

                # enforce globbing
                if not (fnmatch.fnmatchcase(uri, path_glob) or
                        fnmatch.fnmatchcase(uri, dir_glob)):
                    continue

                # filter out folders
                if uri.endswith('/'):
                    continue

                item['_uri'] = uri
                item['bucket'] = bucket_name
                item['size'] = int(item['size'])
                yield item

            list_request = self.api_client.objects().list_next(
                list_request, resp)
Exemplo n.º 30
0
	def matchHostmask(self, user, banmask):
		banmask = ircLower(banmask)
		userMask = ircLower(user.hostmask())
		if fnmatchcase(userMask, banmask):
			return True
		userMask = ircLower(user.hostmaskWithRealHost())
		if fnmatchcase(userMask, banmask):
			return True
		userMask = ircLower(user.hostmaskWithIP())
		return fnmatchcase(userMask, banmask)
                'Text files:', file, "\b"
            )  # now print the Text Files, pass in the second argument file

        if fnmatch.fnmatch(file, '*.rb'):  # find the ruby file
            print('Ruby files:', file)

        if fnmatch.fnmatch(file, '*.yml'):  # find the yaml files
            print('Yaml files:', file)

        if fnmatch.fnmatch(file, '*.py'):  # find python file
            print('Python files:', file)


list_files()
#regular expressions allow you to match patterns
# pulled in the fnmatch library to pass in reg expressions

players = [  # create a list     go through the array players and filter-if 2b is found and grab those elements
    "Jose Altuve 2B", "Carlos Correa SS", "Alex Bregman 3B",
    "Scooter Gennett 2B"
]

second_base_players = [
    player for player in players if fnmatchcase(player, "* 2B")
]
# above-created a variable named second base players-now build a list dynamically
# say player for player in the players list(grabbing player item) looping through------
# now conidtion    if fnmatchcase now pass in the player and the second argument say "* 2B"

print(second_base_players)
Exemplo n.º 32
0
def check_skins(mixxx_path, skins, ignore_patterns=()):
    """
    Yields error messages for skins using class/object names from mixxx_path.

    By providing a list of ignore_patterns, you can ignore certain class or
    object names (e.g. #Test, #*Debug).
    """
    classnames, objectnames = get_global_names(mixxx_path)

    # Check default stylesheets
    default_styles_path = os.path.join(mixxx_path, "res", "skins")
    for qss_path, stylesheet in get_stylesheets(default_styles_path):
        for error in stylesheet.errors:
            yield "%s:%d:%d: %s - %s" % (
                qss_path,
                error.line,
                error.column,
                error.__class__.__name__,
                error.reason,
            )
        for token, message in check_stylesheet(stylesheet, classnames,
                                               objectnames, []):
            if any(
                    fnmatch.fnmatchcase(token.value, pattern)
                    for pattern in ignore_patterns):
                continue
            yield "%s:%d:%d: %s" % (
                qss_path,
                token.line,
                token.column,
                message,
            )

    # Check skin stylesheets
    for skin_name, skin_path in sorted(skins):
        # If the skin objectname is something like 'Deck<Variable name="i">',
        # then replace it with 'Deck*' and use glob-like matching
        skin_objectnames = objectnames.copy()
        skin_objectnames_fuzzy = set()
        for objname in get_skin_objectnames(skin_path):
            new_objname = RE_OBJNAME_VARTAG.sub("*", objname)
            if "*" in new_objname:
                skin_objectnames_fuzzy.add(new_objname)
            else:
                skin_objectnames.add(new_objname)

        for qss_path, stylesheet in get_stylesheets(skin_path):
            for error in stylesheet.errors:
                yield "%s:%d:%d: %s - %s" % (
                    qss_path,
                    error.line,
                    error.column,
                    error.__class__.__name__,
                    error.reason,
                )
            for token, message in check_stylesheet(
                    stylesheet,
                    classnames,
                    skin_objectnames,
                    skin_objectnames_fuzzy,
            ):
                if any(
                        fnmatch.fnmatchcase(token.value, pattern)
                        for pattern in ignore_patterns):
                    continue
                yield "%s:%d:%d: %s" % (
                    qss_path,
                    token.line,
                    token.column,
                    message,
                )
Exemplo n.º 33
0
    def test_find_unused_macros(self):
        '''
        Find macros intended for the templates that are never actually used in
        any template.
        '''

        # First obtain a set of available macros by parsing the source of
        # macros.py.
        macrospy = os.path.join(MY_DIR, '../macros.py')
        with open(macrospy) as f:
            source = f.read()

        node = ast.parse(source, filename=macrospy)

        macros = set()
        for i in ast.iter_child_nodes(node):
            if isinstance(i, ast.FunctionDef):
                macros.add(i.name)

        # Next get a set of gitignored globs.

        # First up, ignore the tests.
        ignored = set(('%s/**' % MY_DIR, ))

        # Now look at all .gitignores from three directories up.
        for stem in ('../../..', '../..', '..'):
            gitignore = os.path.join(MY_DIR, stem, '.gitignore')
            if os.path.exists(gitignore):
                with open(gitignore) as f:
                    for line in (x.strip() for x in f
                                 if x.strip() != '' and not x.startswith('#')):
                        pattern = os.path.join(
                            os.path.abspath(os.path.dirname(gitignore)), line)
                        ignored.add(pattern)

        # Now let's look at all the templates and note macro calls.

        # A regex to match macro calls from the template context. Note that it is
        # imprecise, so the resulting analysis could generate false negatives.
        call = re.compile(r'/\*[-\?].*?\bmacros\.([a-zA-Z][a-zA-Z0-9_]*)\b')

        used = set()
        for root, _, files in os.walk(
                os.path.abspath(os.path.join(MY_DIR, '..'))):
            for f in (os.path.join(root, f) for f in files):
                for pattern in ignored:
                    try:
                        if fnmatch.fnmatchcase(f, pattern):
                            break
                    except Exception:
                        # Suppress any errors resulting from invalid lines in
                        # .gitignore.
                        pass
                else:
                    # This file did not match any of the ignore patterns; scan
                    # it for macro calls.

                    with open(f) as input:
                        source = input.read()
                    for m in call.finditer(source):
                        used.add(m.group(1))

        unused = macros - used - NO_CHECK_UNUSED
        if len(unused) > 0:
            [print("Unused macro: %s" % u) for u in unused]
        self.assertSetEqual(unused, set())
Exemplo n.º 34
0
def match_glob(val, pat):
    ok = fnmatch.fnmatchcase(val, pat)
    if not ok:
        assert pat in val
Exemplo n.º 35
0
    def clone_shallow_local(self, ud, dest, d):
        """Clone the repo and make it shallow.

        The upstream url of the new clone isn't set at this time, as it'll be
        set correctly when unpacked."""
        runfetchcmd(
            "%s clone %s %s %s" %
            (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)

        to_parse, shallow_branches = [], []
        for name in ud.names:
            revision = ud.revisions[name]
            depth = ud.shallow_depths[name]
            if depth:
                to_parse.append('%s~%d^{}' % (revision, depth - 1))

            # For nobranch, we need a ref, otherwise the commits will be
            # removed, and for non-nobranch, we truncate the branch to our
            # srcrev, to avoid keeping unnecessary history beyond that.
            branch = ud.branches[name]
            if ud.nobranch:
                ref = "refs/shallow/%s" % name
            elif ud.bareclone:
                ref = "refs/heads/%s" % branch
            else:
                ref = "refs/remotes/origin/%s" % branch

            shallow_branches.append(ref)
            runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision),
                        d,
                        workdir=dest)

        # Map srcrev+depths to revisions
        parsed_depths = runfetchcmd("%s rev-parse %s" %
                                    (ud.basecmd, " ".join(to_parse)),
                                    d,
                                    workdir=dest)

        # Resolve specified revisions
        parsed_revs = runfetchcmd(
            "%s rev-parse %s" %
            (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)),
            d,
            workdir=dest)
        shallow_revisions = parsed_depths.splitlines(
        ) + parsed_revs.splitlines()

        # Apply extra ref wildcards
        all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' %
                               ud.basecmd,
                               d,
                               workdir=dest).splitlines()
        for r in ud.shallow_extra_refs:
            if not ud.bareclone:
                r = r.replace('refs/heads/', 'refs/remotes/origin/')

            if '*' in r:
                matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
                shallow_branches.extend(matches)
            else:
                shallow_branches.append(r)

        # Make the repository shallow
        shallow_cmd = [self.make_shallow_path, '-s']
        for b in shallow_branches:
            shallow_cmd.append('-r')
            shallow_cmd.append(b)
        shallow_cmd.extend(shallow_revisions)
        runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
Exemplo n.º 36
0
print(fnmatch('Dat45.csv', 'Dat[0-9]*'))
names = ['Dat1.csv', 'Dat2.csv', 'config.ini', 'foo.py']
print([name for name in names if fnmatch(name, 'Dat*.csv')])

# fnmatch() matches patterns using the same case-sensitivity rules as
# the sys‐ tem’s underlying filesystem (which varies based on operating system)

# On OS X (Mac)
print(fnmatch('foo.txt', '*.TXT'))
# On Windows
print(fnmatch('foo.txt', '*.TXT'))

# If this distinction matters, use fnmatchcase() instead.
# It matches exactly based on the lower- and uppercase conventions that you supply:

print(fnmatchcase('foo.txt', '*.TXT'))

# An often overlooked feature of these functions is their potential use
# with data processing of nonfilename strings
from fnmatch import fnmatchcase
addresses = [
    '5412 N CLARK ST',
    '1060 W ADDISON ST',
    '1039 W GRANVILLE AVE',
    '2122 N CLARK ST',
    '4802 N BROADWAY',
]
tmp1 = [addr for addr in addresses if fnmatchcase(addr, '* ST')]
print(tmp1)
tmp2 = [
    addr for addr in addresses if fnmatchcase(addr, '54[0-9][0-9] *CLARK*')
Exemplo n.º 37
0
 def is_path_downloadable(self, repos, path):
     if repos.reponame:
         path = repos.reponame + '/' + path
     return any(
         fnmatchcase(path, dp.strip('/')) for dp in self.downloadable_paths)
Exemplo n.º 38
0
def heur(physinfo, run=''):
    """
    Set of if .. elif statements to fill BIDS names.

    It requires the user (you!) to adjust it accordingly!
    It needs an ``if`` or ``elif`` statement for each file that
    needs to be processed.
    The statement will test if the ``physinfo``:
        - is similar to a string (first case), or
        - exactly matches a string (second case).

    Parameters
    ----------
    physinfo: str
        Name of an input file that should be bidsified (See Notes)

    Returns
    -------
    info: dictionary of str
        Dictionary containing BIDS keys

    Notes
    -----
    The `if ..` structure should always be similar to
    ```
    if physinfo == 'somepattern':
        info['var'] = 'somethingelse'
    ```
    or, in case it's a partial match
    ```
    if fnmatch.fnmatchcase(physinfo, '*somepattern?'):
        info['var'] = 'somethingelse'
    ```
    Where:
        - `physinfo` and `info` are dedicated keywords,
        - 'somepattern' is the name of the file,
        - 'var' is a bids key in the list below
        - 'somethingelse' is the value of the key
    """
    info = {}
    # ################################# #
    # ##        Modify here!         ## #
    # ##                             ## #
    # ##  Possible variables are:    ## #
    # ##    -info['task'] (required) ## #
    # ##    -info['run']             ## #
    # ##    -info['rec']             ## #
    # ##    -info['acq']             ## #
    # ##    -info['dir']             ## #
    # ##                             ## #
    # ##  Remember that they are     ## #
    # ##  dictionary keys            ## #
    # ##  See example below          ## #
    # ################################# #

    if fnmatch.fnmatchcase(physinfo, '*onescan*'):
        info['task'] = 'test'
        info['run'] = '01'
        info['rec'] = 'biopac'
    elif physinfo == 'Example':
        info['task'] = 'rest'
        info['run'] = '01'
        info['acq'] = 'resp'

    # ############################## #
    # ## Don't modify below this! ## #
    # ############################## #
    return info
Exemplo n.º 39
0
    'neat.CSV',
    'nice.txt',
    'scripts.py',
    'simple.py'
]

#Now we can use the basic expression '*.csv' to match whether the file is csv or not
for file in fileList:
    if fnmatch(file, '*.csv') == True:
        print(file)

#Although the above gives us the correct result, but assume that the company accepts lowercase extension files only
#In such case, we have to also look for the case instead of just string matching.

for file in fileList:
    if fnmatchcase(file, '*.csv') == True:
        print(file)
        
#As another example, lets assume we have been provided with a list of phone numbers belonging to Pakistani citizens
#As per the code 033- and the fact that the number is 11 digits long, we need to find all the numbers that use the Ufone SIM card

phoneList = [
    '03311111111',
    '03122222222',
    '03033333333',
    '0334444444444',
    '03455555555',
    '03266666666666',
    '902946656575676756',
    '03399999999',
    '03100000000'
Exemplo n.º 40
0
 def blacklisted(keychain):
     return any(
         fnmatch.fnmatchcase(keychain, glob)
         for glob in config.val.keyhint.blacklist)
Exemplo n.º 41
0
result1 = fnmatch('foo.txt', '*.txt')
print('result1 :', result1)

result2 = fnmatch('foo.txt', '?oo.txt')
print('result2 :', result2)

result3 = fnmatch('Dat45.csv', 'Dat[0-9]*')
print('result3:', result3)

names = ['Dat1.csv', 'Dat2.csv', 'config.ini', 'foo.py']
targetNames = [name for name in names if fnmatch(name, 'Dat*.csv')]
print('targetNames:', targetNames)

# ==========================================================

result4 = fnmatchcase('foo.txt', '*.TXT')
print('result4:', result4)

addresses = [
    '5412 N CLARK ST',
    '1060 W ADDISON ST',
    '1039 W GRANVILLE AVE',
    '2122 N CLARK ST',
    '4802 N BROADWAY',
]

targetAddr1 = [addr for addr in addresses if fnmatchcase(addr, '* ST')]
print('targetAddr1:', targetAddr1)

targetAddr2 = [
    addr for addr in addresses if fnmatchcase(addr, '54[0-9][0-9] *CLARK*')
Exemplo n.º 42
0
    def with_dependencies(self, dag_collection):
        """Perfom a dry_run to get upstream dependencies."""
        dependencies = []

        for table in self._get_referenced_tables():
            upstream_task = dag_collection.task_for_table(table[0], table[1])
            task_schedule_interval = dag_collection.dag_by_name(
                self.dag_name
            ).schedule_interval

            if upstream_task is not None:
                # ensure there are no duplicate dependencies
                # manual dependency definitions overwrite automatically detected ones
                if not any(
                    d.dag_name == upstream_task.dag_name
                    and d.task_id == upstream_task.task_name
                    for d in self.depends_on
                ):
                    upstream_schedule_interval = dag_collection.dag_by_name(
                        upstream_task.dag_name
                    ).schedule_interval

                    execution_delta = schedule_interval_delta(
                        upstream_schedule_interval, task_schedule_interval
                    )

                    if execution_delta == "0s":
                        execution_delta = None

                    dependencies.append(
                        TaskRef(
                            dag_name=upstream_task.dag_name,
                            task_id=upstream_task.task_name,
                            execution_delta=execution_delta,
                        )
                    )
            else:
                # see if there are some static dependencies
                for task, patterns in EXTERNAL_TASKS.items():
                    if any(fnmatchcase(f"{table[0]}.{table[1]}", p) for p in patterns):
                        # ensure there are no duplicate dependencies
                        # manual dependency definitions overwrite automatically detected
                        if not any(
                            d.dag_name == task.dag_name and d.task_id == task.task_id
                            for d in self.depends_on + dependencies
                        ):
                            execution_delta = schedule_interval_delta(
                                task.schedule_interval, task_schedule_interval
                            )

                            if execution_delta:
                                dependencies.append(
                                    TaskRef(
                                        dag_name=task.dag_name,
                                        task_id=task.task_id,
                                        execution_delta=execution_delta,
                                    )
                                )
                        break  # stop after the first match

        self.dependencies = dependencies
Exemplo n.º 43
0
def find_package_data(where='.',
                      package='',
                      exclude=standard_exclude,
                      exclude_directories=standard_exclude_directories,
                      only_in_packages=True,
                      show_ignored=False):
    """
    Return a dictionary suitable for use in ``package_data``
    in a distutils ``setup.py`` file.

    The dictionary looks like::

        {'package': [files]}

    Where ``files`` is a list of all the files in that package that
    don't match anything in ``exclude``.

    If ``only_in_packages`` is true, then top-level directories that
    are not packages won't be included (but directories under packages
    will).

    Directories matching any pattern in ``exclude_directories`` will
    be ignored; by default directories with leading ``.``, ``CVS``,
    and ``_darcs`` will be ignored.

    If ``show_ignored`` is true, then all the files that aren't
    included in package data are shown on stderr (for debugging
    purposes).

    Note patterns use wildcards, or can be exact paths (including
    leading ``./``), and all searching is case-insensitive.
    """

    out = {}
    stack = [(convert_path(where), '', package, only_in_packages)]
    while stack:
        where, prefix, package, only_in_packages = stack.pop(0)
        for name in os.listdir(where):
            fn = os.path.join(where, name)
            if os.path.isdir(fn):
                bad_name = False
                for pattern in exclude_directories:
                    if (fnmatchcase(name, pattern)
                            or fn.lower() == pattern.lower()):
                        bad_name = True
                        if show_ignored:
                            print(("Directory %s ignored by pattern %s" %
                                   (fn, pattern)),
                                  file=sys.stderr)
                        break
                if bad_name:
                    continue
                if (os.path.isfile(os.path.join(fn, '__init__.py'))
                        and not prefix):
                    if not package:
                        new_package = name
                    else:
                        new_package = package + '.' + name
                    stack.append((fn, '', new_package, False))
                else:
                    stack.append(
                        (fn, prefix + name + '/', package, only_in_packages))
            elif package or not only_in_packages:
                # is a file
                bad_name = False
                for pattern in exclude:
                    if (fnmatchcase(name, pattern)
                            or fn.lower() == pattern.lower()):
                        bad_name = True
                        if show_ignored:
                            print(("File %s ignored by pattern %s" %
                                   (fn, pattern)),
                                  file=sys.stderr)
                        break
                if bad_name:
                    continue
                out.setdefault(package, []).append(prefix + name)
    return out
Exemplo n.º 44
0
# FIXME: only processes first file
rfileconf = rfiles[0]

# guess session from file name
from utils import session_from_path
session = session_from_path(rfileconf[0]['file'])
prefix = 'plots/{}'.format(session)

from config import classifiers

if options.clnameglob:
    # only process matching classifiers
    from fnmatch import fnmatchcase
    for key in classifiers:
        if not fnmatchcase(key, options.clnameglob):
            del classifiers[key]

from fixes import ROOT
ROOT.gROOT.SetBatch(options.batch)

from utils import get_hists
from rplot.rplot import Rplot, arrange


def _filter(string):
    matches = ['MVA_{}{}'.format(cl, string) for cl in classifiers]
    return lambda k: k.GetName() in matches


_filter1 = lambda string: lambda k: _filter(string + '_S')(k) or _filter(
Exemplo n.º 45
0
 def ignore(pattern):
     return fnmatch.fnmatchcase(path.name, pattern) or fnmatch.fnmatchcase(
         str(path), pattern)
Exemplo n.º 46
0
def find_target_file(targetpath, d, pkglist=None):
    """Find the recipe installing the specified target path, optionally limited to a select list of packages"""
    import json

    pkgdata_dir = d.getVar('PKGDATA_DIR')

    # The mix between /etc and ${sysconfdir} here may look odd, but it is just
    # being consistent with usage elsewhere
    invalidtargets = {
        '${sysconfdir}/version':
        '${sysconfdir}/version is written out at image creation time',
        '/etc/timestamp':
        '/etc/timestamp is written out at image creation time',
        '/dev/*':
        '/dev is handled by udev (or equivalent) and the kernel (devtmpfs)',
        '/etc/passwd':
        '/etc/passwd should be managed through the useradd and extrausers classes',
        '/etc/group':
        '/etc/group should be managed through the useradd and extrausers classes',
        '/etc/shadow':
        '/etc/shadow should be managed through the useradd and extrausers classes',
        '/etc/gshadow':
        '/etc/gshadow should be managed through the useradd and extrausers classes',
        '${sysconfdir}/hostname':
        '${sysconfdir}/hostname contents should be set by setting hostname:pn-base-files = "value" in configuration',
    }

    for pthspec, message in invalidtargets.items():
        if fnmatch.fnmatchcase(targetpath, d.expand(pthspec)):
            raise InvalidTargetFileError(d.expand(message))

    targetpath_re = re.compile(r'\s+(\$D)?%s(\s|$)' % targetpath)

    recipes = defaultdict(list)
    for root, dirs, files in os.walk(os.path.join(pkgdata_dir, 'runtime')):
        if pkglist:
            filelist = pkglist
        else:
            filelist = files
        for fn in filelist:
            pkgdatafile = os.path.join(root, fn)
            if pkglist and not os.path.exists(pkgdatafile):
                continue
            with open(pkgdatafile, 'r') as f:
                pn = ''
                # This does assume that PN comes before other values, but that's a fairly safe assumption
                for line in f:
                    if line.startswith('PN:'):
                        pn = line.split(':', 1)[1].strip()
                    elif line.startswith('FILES_INFO:'):
                        val = line.split(':', 1)[1].strip()
                        dictval = json.loads(val)
                        for fullpth in dictval.keys():
                            if fnmatch.fnmatchcase(fullpth, targetpath):
                                recipes[targetpath].append(pn)
                    elif line.startswith('pkg_preinst:') or line.startswith(
                            'pkg_postinst:'):
                        scriptval = line.split(':', 1)[1].strip().encode(
                            'utf-8').decode('unicode_escape')
                        if 'update-alternatives --install %s ' % targetpath in scriptval:
                            recipes[targetpath].append('?%s' % pn)
                        elif targetpath_re.search(scriptval):
                            recipes[targetpath].append('!%s' % pn)
    return recipes
Exemplo n.º 47
0
 def _build_filter(*patterns):
     """copy from setuptools/__init__.py:
     Given a list of patterns, return a callable that will be true only if
     the input matches one of the patterns.
     """
     return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
Exemplo n.º 48
0
 def _build_filter(*patterns):
     """
     Given a list of patterns, return a callable that will be true only if
     the input matches at least one of the patterns.
     """
     return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
Exemplo n.º 49
0
#Normally, fnmatch() matches our patterns using the same case-sensitivity rules as the system's underlying filesystem (varies based on your OS)
#On Mac OS X
#fnmatch('example.txt', '*.TXT') this would return a FALSE

#On Windows
#fnmatch('example.txt', '*.TXT') this would return a TRUE

#Another features of this function is that their potential use with data processing of nonfilename strings:
addresses = [
    '6969 N Bone St', '420 S Yolo Rd', '12 W AllFollows Blvd.',
    '12345 E Example Ave'
]

#You could make a list comprehension like so:
[addr for addr in addresses if fnmatchcase(addr, '* Rd')]
[addr for addr in addresses if fnmatchcase(addr, '123[0-9][0-9] *Example*')]

#The matching performed by fnmatch somewhere between functionality of simple string methods and full power regular expression.
#If you're trying to provide some simple mechanism for allowing wildcards in data processing operations, it's a useful/reasonable solution.

#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------

#2.4 - Matching and Searching for Text Patterns

#Problem: You want to match or search text for a specific pattern
#Solution: If the text you're trying to match is a literal, you can usually just use the basic string methods, like str.find(), str.endswith(), str.startswith(), etc.

text = 'where are all of my friends at, yo'

#If we want an exact match
Exemplo n.º 50
0
    def expand_macro(self, formatter, name, content):
        args, kw = parse_args(content)
        prefix = args[0].strip() if args else None
        hideprefix = args and len(args) > 1 and args[1].strip() == 'hideprefix'
        minsize = _arg_as_int(kw.get('min', 1), 'min', min=1)
        minsize_group = max(minsize, 2)
        depth = _arg_as_int(kw.get('depth', -1), 'depth', min=-1)
        format = kw.get('format', '')

        def parse_list(name):
            return [inc.strip() for inc in kw.get(name, '').split(':')
                    if inc.strip()]

        includes = parse_list('include') or ['*']
        excludes = parse_list('exclude')

        wiki = formatter.wiki
        resource = formatter.resource
        if prefix and resource and resource.realm == 'wiki':
            prefix = wiki.resolve_relative_name(prefix, resource.id)

        start = prefix.count('/') if prefix else 0

        if hideprefix:
            omitprefix = lambda page: page[len(prefix):]
        else:
            omitprefix = lambda page: page

        pages = sorted(page for page in wiki.get_pages(prefix)
                       if (depth < 0 or depth >= page.count('/') - start)
                       and 'WIKI_VIEW' in formatter.perm('wiki', page)
                       and any(fnmatchcase(page, inc) for inc in includes)
                       and not any(fnmatchcase(page, exc) for exc in excludes))

        if format == 'compact':
            return tag(
                separated((tag.a(wiki.format_page_name(omitprefix(p)),
                                 href=formatter.href.wiki(p)) for p in pages),
                          ', '))

        # the function definitions for the different format styles

        # the different page split formats, each corresponding to its rendering
        def split_pages_group(pages):
            """Return a list of (path elements, page_name) pairs,
            where path elements correspond to the page name (without prefix)
            splitted at Camel Case word boundaries, numbers and '/'.
            """
            page_paths = []
            for page in pages:
                path = [elt.strip() for elt in self.SPLIT_RE.split(
                        self.NUM_SPLIT_RE.sub(r" \1 ",
                        wiki.format_page_name(omitprefix(page), split=True)))]
                page_paths.append(([elt for elt in path if elt], page))
            return page_paths

        def split_pages_hierarchy(pages):
            """Return a list of (path elements, page_name) pairs,
            where path elements correspond to the page name (without prefix)
            splitted according to the '/' hierarchy.
            """
            return [(wiki.format_page_name(omitprefix(page)).split("/"), page)
                    for page in pages]

        # the different tree structures, each corresponding to its rendering
        def tree_group(entries):
            """Transform a flat list of entries into a tree structure.

            `entries` is a list of `(path_elements, page_name)` pairs

            Return a list organized in a tree structure, in which:
              - a leaf is a page name
              - a node is a `(key, nodes)` pairs, where:
                - `key` is the leftmost of the path elements, common to the
                  grouped (path element, page_name) entries
                - `nodes` is a list of nodes or leaves
            """
            def keyfn(args):
                elts, name = args
                return elts[0] if elts else ''
            groups = []

            for key, grouper in groupby(entries, keyfn):
                # remove key from path_elements in grouped entries for further
                # grouping
                grouped_entries = [(path_elements[1:], page_name)
                                   for path_elements, page_name in grouper]

                if key and len(grouped_entries) >= minsize_group:
                    subnodes = tree_group(sorted(grouped_entries))
                    if len(subnodes) == 1:
                        subkey, subnodes = subnodes[0]
                        node = (key + subkey, subnodes)
                        groups.append(node)
                    elif self.SPLIT_RE.match(key):
                        for elt in subnodes:
                            if isinstance(elt, tuple):
                                subkey, subnodes = elt
                                elt = (key + subkey, subnodes)
                            groups.append(elt)
                    else:
                        node = (key, subnodes)
                        groups.append(node)
                else:
                    for path_elements, page_name in grouped_entries:
                        groups.append(page_name)
            return groups

        def tree_hierarchy(entries):
            """Transform a flat list of entries into a tree structure.

            `entries` is a list of `(path_elements, page_name)` pairs

            Return a list organized in a tree structure, in which:
              - a leaf is a `(rest, page)` pair, where:
                - `rest` is the rest of the path to be shown
                - `page` is a page name
              - a node is a `(key, nodes, page)` pair, where:
                - `key` is the leftmost of the path elements, common to the
                  grouped (path element, page_name) entries
                - `page` is a page name (if one exists for that node)
                - `nodes` is a list of nodes or leaves
            """
            def keyfn(args):
                elts, name = args
                return elts[0] if elts else ''
            groups = []

            for key, grouper in groupby(entries, keyfn):
                grouped_entries = [e for e in grouper]
                sub_entries = [e for e in grouped_entries if len(e[0]) > 1]
                key_entries = [e for e in grouped_entries if len(e[0]) == 1]
                key_entry = key_entries[0] if key_entries else None
                key_page = key_entry[1] if key_entries else None

                if key and len(sub_entries) >= minsize:
                    # remove key from path_elements in grouped entries for
                    # further grouping
                    sub_entries = [(path_elements[1:], page)
                                   for path_elements, page in sub_entries]

                    subnodes = tree_hierarchy(sorted(sub_entries))
                    node = (key, key_page, subnodes)
                    groups.append(node)
                else:
                    if key_entry:
                        groups.append(key_entry)
                    groups.extend(sub_entries)
            return groups

        # the different rendering formats
        def render_group(group):
            return tag.ul(
                tag.li(tag(tag.strong(elt[0].strip('/')), render_group(elt[1]))
                       if isinstance(elt, tuple) else
                       tag.a(wiki.format_page_name(omitprefix(elt)),
                             href=formatter.href.wiki(elt)))
                for elt in group)

        def render_hierarchy(group):
            return tag.ul(
                tag.li(tag(tag.a(elt[0], href=formatter.href.wiki(elt[1]))
                           if elt[1] else tag(elt[0]),
                           render_hierarchy(elt[2]))
                       if len(elt) == 3 else
                       tag.a('/'.join(elt[0]),
                             href=formatter.href.wiki(elt[1])))
                for elt in group)

        transform = {
            'group': lambda p: render_group(tree_group(split_pages_group(p))),
            'hierarchy': lambda p: render_hierarchy(
                                    tree_hierarchy(split_pages_hierarchy(p))),
            }.get(format)

        if transform:
            titleindex = transform(pages)
        else:
            titleindex = tag.ul(
                tag.li(tag.a(wiki.format_page_name(omitprefix(page)),
                             href=formatter.href.wiki(page)))
                for page in pages)

        return tag.div(titleindex, class_='titleindex')
Exemplo n.º 51
0
 def ignore(pattern):
     return fnmatch.fnmatchcase(
         filename, pattern) or fnmatch.fnmatchcase(path, pattern)
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""Force a case-sensitive test of a filename with a pattern.
"""

#end_pymotw_header
import fnmatch
import os

pattern = 'FNMATCH_*.PY'
print('Pattern :', pattern)
print()

files = os.listdir('.')

for name in sorted(files):
    print('Filename: {:<25} {}'.format(name,
                                       fnmatch.fnmatchcase(name, pattern)))
Exemplo n.º 53
0
 def includeOk(self, path):
     for pattern in self.patterns:
         if fnmatch.fnmatchcase(path, pattern):
             self.usedPatterns.add(pattern)
             return True
     return False
Exemplo n.º 54
0
def matchesGlob(globStr: str, path: str) -> bool:
	return next(
		(True for glob in globStr.split(',') if fnmatchcase(path, glob)), False
	)
Exemplo n.º 55
0
from fnmatch import fnmatch, fnmatchcase

print fnmatch('foo.txt', '*.txt')
print fnmatch('foo.txt', '?oo.txt')
print fnmatch('dat45.txt', 'dat[0-9][0-9].txt')

names = ['Dat1.csv', 'Dat2.csv', 'config.ini', 'foo.py']

print[name for name in names if fnmatch(name, 'Dat*.csv')]

print fnmatch('foo.txt', '*.TXT')
print fnmatchcase('foot.txt', '*.TXT')

addresses = [
    '5412 N CLARK ST',
    '1060 W ADDISON ST',
    '1039 W GRANVILLE AVE',
    '2122 N CLARK ST',
    '4802 N BROADWAY',
]

print[addr for addr in addresses if fnmatchcase(addr, '* ST')]
print[addr for addr in addresses if fnmatchcase(addr, '54[0-9][0-9] *CLARK*')]
Exemplo n.º 56
0
 def is_prune(node, name):
     for pat in prune_pat:
         if fnmatch.fnmatchcase(name, pat):
             return True
     return False
Exemplo n.º 57
0
 def connect(self, pattern, callback):
     """Uses glob style strings to match the attribute name"""
     matcher = lambda attr: fnmatch.fnmatchcase(attr, pattern)
     return self.register(matcher, callback)
Exemplo n.º 58
0
    def test_list_pkg_files(self):
        def splitoutput(output):
            files = {}
            curpkg = None
            for line in output.splitlines():
                if line.startswith('\t'):
                    self.assertTrue(curpkg,
                                    'Unexpected non-package line:\n%s' % line)
                    files[curpkg].append(line.strip())
                else:
                    self.assertTrue(
                        line.rstrip().endswith(':'),
                        'Invalid package line in output:\n%s' % line)
                    curpkg = line.split(':')[0]
                    files[curpkg] = []
            return files

        base_libdir = get_bb_var('base_libdir')
        libdir = get_bb_var('libdir')
        includedir = get_bb_var('includedir')
        mandir = get_bb_var('mandir')
        # Test recipe-space package name
        result = runCmd('oe-pkgdata-util list-pkg-files zlib-dev zlib-doc')
        files = splitoutput(result.output)
        self.assertIn('zlib-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-doc', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
        self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
        # Test runtime package name
        result = runCmd('oe-pkgdata-util list-pkg-files -r libz1 libz-dev')
        files = splitoutput(result.output)
        self.assertIn('libz1', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertGreater(len(files['libz1']), 1)
        libspec = os.path.join(base_libdir, 'libz.so.1.*')
        found = False
        for fileitem in files['libz1']:
            if fnmatch.fnmatchcase(fileitem, libspec):
                found = True
                break
        self.assertTrue(
            found,
            'Could not find zlib library file %s in libz1 package file list: %s'
            % (libspec, files['libz1']))
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
        # Test recipe
        result = runCmd('oe-pkgdata-util list-pkg-files -p zlib')
        files = splitoutput(result.output)
        self.assertIn('zlib-dbg', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-doc', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-staticdev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertNotIn('zlib-locale', list(files.keys()),
                         "listed pkgs. files: %s" % result.output)
        # (ignore ptest, might not be there depending on config)
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
        self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
        self.assertIn(os.path.join(libdir, 'libz.a'), files['zlib-staticdev'])
        # Test recipe, runtime
        result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -r')
        files = splitoutput(result.output)
        self.assertIn('libz-dbg', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-doc', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-staticdev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz1', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertNotIn('libz-locale', list(files.keys()),
                         "listed pkgs. files: %s" % result.output)
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
        self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['libz-doc'])
        self.assertIn(os.path.join(libdir, 'libz.a'), files['libz-staticdev'])
        # Test recipe, unpackaged
        result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -u')
        files = splitoutput(result.output)
        self.assertIn('zlib-dbg', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-doc', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-staticdev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('zlib-locale', list(files.keys()),
                      "listed pkgs. files: %s" %
                      result.output)  # this is the key one
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['zlib-dev'])
        self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['zlib-doc'])
        self.assertIn(os.path.join(libdir, 'libz.a'), files['zlib-staticdev'])
        # Test recipe, runtime, unpackaged
        result = runCmd('oe-pkgdata-util list-pkg-files -p zlib -r -u')
        files = splitoutput(result.output)
        self.assertIn('libz-dbg', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-doc', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-dev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-staticdev', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz1', list(files.keys()),
                      "listed pkgs. files: %s" % result.output)
        self.assertIn('libz-locale', list(files.keys()),
                      "listed pkgs. files: %s" %
                      result.output)  # this is the key one
        self.assertIn(os.path.join(includedir, 'zlib.h'), files['libz-dev'])
        self.assertIn(os.path.join(mandir, 'man3/zlib.3'), files['libz-doc'])
        self.assertIn(os.path.join(libdir, 'libz.a'), files['libz-staticdev'])
Exemplo n.º 59
0
    def expand_macro(self, formatter, name, content):
        args, kwargs = parse_args(content)
        format = kwargs.get('format', 'compact')
        glob = kwargs.get('glob', '*')
        order = kwargs.get('order')
        desc = as_bool(kwargs.get('desc', 0))

        rm = RepositoryManager(self.env)
        all_repos = dict(rdata for rdata in rm.get_all_repositories().items()
                         if fnmatchcase(rdata[0], glob))

        if format == 'table':
            repo = self._render_repository_index(formatter.context, all_repos,
                                                 order, desc)

            add_stylesheet(formatter.req, 'common/css/browser.css')
            wiki_format_messages = self.config['changeset'] \
                                       .getbool('wiki_format_messages')
            data = {
                'repo': repo,
                'order': order,
                'desc': 1 if desc else None,
                'reponame': None,
                'path': '/',
                'stickyrev': None,
                'wiki_format_messages': wiki_format_messages
            }
            return Chrome(self.env).render_template(formatter.req,
                                                    'repository_index.html',
                                                    data,
                                                    None,
                                                    fragment=True)

        def get_repository(reponame):
            try:
                return rm.get_repository(reponame)
            except TracError:
                return

        all_repos = [(reponame, get_repository(reponame))
                     for reponame in all_repos]
        all_repos = sorted(
            ((reponame, repos) for reponame, repos in all_repos
             if repos and not as_bool(repos.params.get('hidden'))
             and repos.is_viewable(formatter.perm)),
            reverse=desc)

        def repolink(reponame, repos):
            label = reponame or _('(default)')
            return Markup(
                tag.a(label,
                      title=_('View repository %(repo)s', repo=label),
                      href=formatter.href.browser(repos.reponame or None)))

        if format == 'list':
            return tag.dl([
                tag(tag.dt(repolink(reponame, repos)),
                    tag.dd(repos.params.get('description')))
                for reponame, repos in all_repos
            ])
        else:  # compact
            return Markup(', ').join(
                [repolink(reponame, repos) for reponame, repos in all_repos])
Exemplo n.º 60
0
 def is_op_ok(op):
     for op_match in op_blacklist:
         if fnmatchcase(op, op_match):
             print("    skipping: %s (%s)" % (op, op_match))
             return False
     return True