Exemple #1
0
  def ConvertGlobIntoPathComponents(self, pattern):
    r"""Converts a glob pattern into a list of pathspec components.

    Wildcards are also converted to regular expressions. The pathspec components
    do not span directories, and are marked as a regex or a literal component.

    We also support recursion into directories using the ** notation.  For
    example, /home/**2/foo.txt will find all files named foo.txt recursed 2
    directories deep. If the directory depth is omitted, it defaults to 3.

    Example:
     /home/test/* -> ['home', 'test', '.*\\Z(?ms)']

    Args:
      pattern: A glob expression with wildcards.

    Returns:
      A list of PathSpec instances for each component.

    Raises:
      ValueError: If the glob is invalid.
    """

    components = []
    for path_component in pattern.split("/"):
      # A ** in the path component means recurse into directories that match the
      # pattern.
      m = rdf_paths.GlobExpression.RECURSION_REGEX.search(path_component)
      if m:
        path_component = path_component.replace(m.group(0), "*")

        component = rdf_paths.PathSpec(
            path=fnmatch.translate(path_component),
            pathtype=self.state.pathtype,
            path_options=rdf_paths.PathSpec.Options.RECURSIVE)

        # Allow the user to override the recursion depth.
        if m.group(1):
          component.recursion_depth = int(m.group(1))

      elif self.GLOB_MAGIC_CHECK.search(path_component):
        component = rdf_paths.PathSpec(
            path=fnmatch.translate(path_component),
            pathtype=self.state.pathtype,
            path_options=rdf_paths.PathSpec.Options.REGEX)
      else:
        pathtype = self.state.pathtype
        # TODO(user): This is a backwards compatibility hack. Remove when
        # all clients reach 3.0.0.2.
        if (pathtype == rdf_paths.PathSpec.PathType.TSK and
            re.match("^.:$", path_component)):
          path_component = "%s\\" % path_component
        component = rdf_paths.PathSpec(
            path=path_component,
            pathtype=pathtype,
            path_options=rdf_paths.PathSpec.Options.CASE_INSENSITIVE)

      components.append(component)

    return components
Exemple #2
0
def compare_target_list(target_list, file, commit):
	# iterate files_to_look_for.txt
	for target in helper.checks():
		if ("*" in target) and not (target in no_fly):
			try:
				regex = re.compile(fnmatch.translate(target))
				if regex.search(file):
					f = finding.Finding()
					f.file = file+"_NO_DOWNLOAD"
					add_to_commits(str(commit), f)
			except Exception as e:
				"ignore error"
		if target in file:
			f = finding.Finding()
			f.file = file
			add_to_commits(str(commit), f)

	# iterate the gitignore file
	for target in target_list:
		if ("*" in target) and not (target in no_fly):
			try:
				regex = re.compile(fnmatch.translate(target))
				if regex.search(file):
					f = finding.Finding()
					f.is_gitignore = True
					f.file = file+"_NO_DOWNLOAD"
					add_to_commits(str(commit), f)
			except Exception as e:
				"ignore error"
		if target in file:
			f = finding.Finding()
			f.file = file
			f.is_gitignore = True
			add_to_commits(str(commit), f)
def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
    ''' find files matching file_regex '''
    found = []
    exclude_regex = ''
    include_regex = ''

    if exclude_dirs is not None:
        exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'

    if include_dirs is not None:
        include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'

    for root, dirs, files in os.walk(base_dir):
        if exclude_dirs is not None:
            # filter out excludes for dirs
            dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]

        if include_dirs is not None:
            # filter for includes for dirs
            dirs[:] = [d for d in dirs if re.match(include_regex, d)]

        matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
        found.extend(matches)

    return found
Exemple #4
0
    def return_environments(self, env_pattern, case_sensitive=False):
        """return all environments which match either by glob or exact match"""
        returns = {}

        for item in env_pattern.split(','):
            item = item.strip()
            if item in self._environments:
                env = self._environments[item]
                returns[env.environmentid] = env
                continue

            if case_sensitive:
                match = re.compile(fnmatch.translate(item)).match
            else:
                match = re.compile(fnmatch.translate(item), flags=re.I).match

            done = False
            for env in self.environments:
                for name in env.name, env.environmentid, env.ui_name:
                    if match(name):
                        done = True
                        returns[env.environmentid] = env
                        break
            if done:
                continue

            # If we didn't match to anything in the current locale, try others
            for env in self.environments:
                for name in env.translated_name.values():
                    if match(name):
                        returns[env.environmentid] = env
                        break

        return returns.values()
	def __init__(self, dirpaths, filepaths, file_counter):
		self.dirpaths = dirpaths
		self.filepaths = filepaths
		self.patterns = settings.get('patterns', {})
		self.file_counter = file_counter
		self.ignored_files = [fnmatch.translate(patt) for patt in settings.get('exclude_files', [])]
		self.ignored_folders = [fnmatch.translate(patt) for patt in settings.get('exclude_folders', [])]
Exemple #6
0
 def test_consumer_matching(self, _load_consumers):
     access_consumer = mock.Mock()
     error_consumer = mock.Mock()
     system_consumer = mock.Mock()
     _load_consumers.return_value = [
         ([re.compile(fnmatch.translate('*access.log*'))],
          access_consumer),
         ([re.compile(fnmatch.translate('*error.log*'))],
          error_consumer),
         ([re.compile(fnmatch.translate('*/cron')),
           re.compile(fnmatch.translate('*/messages'))],
          system_consumer),
         ]
     conf = slurp.Conf(
         state_path=TMP_PATH,
         consumer_paths=['/etc/slurp.d'],
         locking=True,
         lock_timeout=30,
         tracking=True,
         event_sink=None,
         batch_size=256,
         )
     consumers = conf.get_matching_consumers(
         '/my/logs/2012-05-28/access.log')
     self.assertItemsEqual(consumers, [access_consumer])
     consumers = conf.get_matching_consumers(
         '/my/logs/2012-05-28/error.log')
     self.assertItemsEqual(consumers, [error_consumer])
     consumers = conf.get_matching_consumers(
         '/my/logs/2012-05-28/cron')
     self.assertItemsEqual(consumers, [system_consumer])
     consumers = conf.get_matching_consumers(
         '/my/logs/2012-05-28/nada')
     self.assertItemsEqual(consumers, [])
Exemple #7
0
 def test_consumer_grouping(self, _load_consumers):
     access_consumer = mock.Mock(group='g1')
     error_consumer = mock.Mock(group='g1')
     system_consumer = mock.Mock(group='g2')
     _load_consumers.return_value = [
         ([re.compile(fnmatch.translate('*access.log*'))],
          access_consumer),
         ([re.compile(fnmatch.translate('*error.log*'))],
          error_consumer),
         ([re.compile(fnmatch.translate('*/cron')),
           re.compile(fnmatch.translate('*/messages'))],
          system_consumer),
         ]
     conf = slurp.Conf(
         state_path=TMP_PATH,
         consumer_paths=['/etc/slurp.d'],
         locking=True,
         lock_timeout=30,
         tracking=True,
         event_sink=None,
         batch_size=256,
         )
     self.assertItemsEqual(conf.get_consumer_groups(), ['g1', 'g2'])
     conf.filter_consumers(['g1', 'g2'])
     self.assertItemsEqual(conf.get_consumer_groups(), ['g1', 'g2'])
     conf.filter_consumers(['g1'])
     self.assertItemsEqual(conf.get_consumer_groups(), ['g1'])
     conf.filter_consumers(['g2'])
     self.assertItemsEqual(conf.get_consumer_groups(), [])
Exemple #8
0
def get_files_to_commit(autooptions):
    """
    Look through the local directory to pick up files to check
    """
    workingdir = autooptions['working-directory']
    includes = autooptions['track']['includes']
    excludes = autooptions['track']['excludes']

    # transform glob patterns to regular expressions
    # print("Includes ", includes) 
    includes = r'|'.join([fnmatch.translate(x) for x in includes])
    excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'

    matched_files = []
    for root, dirs, files in os.walk(workingdir):

        # print("Looking at ", files)

        # exclude dirs
        # dirs[:] = [os.path.join(root, d) for d in dirs]
        dirs[:] = [d for d in dirs if not re.match(excludes, d)]

        # exclude/include files
        files = [f for f in files if not re.match(excludes, f)]
        #print("Files after excludes", files)
        #print(includes) 
        files = [f for f in files if re.match(includes, f)]
        #print("Files after includes", files) 
        files = [os.path.join(root, f) for f in files]

        matched_files.extend(files)

    return matched_files
	def __init__(self, dirpaths, filepaths, view):
		self.view = view
		self.dirpaths = dirpaths
		self.filepaths = filepaths

		if settings.get('case_sensitive', False):
			case = 0
		else:
			case = re.IGNORECASE

		patt_patterns = settings.get('patterns', {})
		patt_files = settings.get('exclude_files', [])
		patt_folders = settings.get('exclude_folders', [])

		match_patterns = '|'.join(patt_patterns.values())
		match_files = [fnmatch.translate(p) for p in patt_files]
		match_folders = [fnmatch.translate(p) for p in patt_folders]

		self.patterns = re.compile(match_patterns, case)
		self.priority = re.compile(r'\(([0-9]{1,2})\)')
		self.exclude_files = [re.compile(p) for p in match_files]
		self.exclude_folders = [re.compile(p) for p in match_folders]

		self.open = self.view.window().views()
		self.open_files = [v.file_name() for v in self.open if v.file_name()]
Exemple #10
0
    def match_files(self, prefix, includes, excludes):
        """ Filters os.walk() with include and exclude patterns."""

        #translate glob patterns to regular expressions
        includes_pattern = r"|".join([fnmatch.translate(x) for x in includes])
        excludes_pattern = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."
        hash_pattern = r".*\.[0-9a-f]{12}\..*"
        matches = []

        for root, dirs, files in os.walk(prefix, topdown=True):
            #exclude dirs which match by modifying 'dirs' in
            #place using the slice notattion. This is necessary
            #for os.walk() to pick up the changes.
            dirs[:] = [os.path.join(root, d) for d in dirs]
            dirs[:] = [d for d in dirs if not re.match(excludes_pattern,
                                                       d.split(root)[1])]
            # exclude/include files
            files = [os.path.join(root, f) for f in files]
            files = [f for f in files if not re.match(excludes_pattern, f)]
            files = [f for f in files if re.match(includes_pattern, f.split(prefix)[1])]

            #only allow files with md5 hash preceeding file extension
            #to avoid issues where non-hashed files are uploaded
            #to the cdn and cached and later modified. These modifications
            #will not be visible until the CDN ttl elapses and
            #will cause issues.
            files = [f for f in files if re.match(hash_pattern, f)]

            for fname in files:
                matches.append(fname)
        return matches
Exemple #11
0
 def update_gitignore_test(self, _get_all_re):
     _get_all_re.return_value = {
         "/a/b/.gitignore": {
             1: [],
             2: [],
             3: []
         }
     }
     _handler_gitignore_list = list(self.handler.gitignore_list)
     _handler_gitignore_dict = dict(self.handler.gitignore_dict)
     self.handler.update_gitignore("/a/b/.gitignore")
     self.assertEqual(
         self.handler.gitignore_list,
         ["/a/b/", "/a/"]
     )
     self.assertEqual(
         self.handler.gitignore_dict,
         {
             "/a/.gitignore": {
                 1: [],
                 2: [re.compile(fnmatch.translate("1.py"))],
                 3: [re.compile(fnmatch.translate("test.py"))]
             },
             "/a/b/.gitignore": {
                 1: [],
                 2: [],
                 3: []
             }
         }
     )
     self.handler.gitignore_list = _handler_gitignore_list
     self.handler.gitignore_dict = _handler_gitignore_dict
Exemple #12
0
    def bgSearch(self, pat):

        #self.clear()

        if self.frozen:
            return
            
        if not pat.startswith('r:'):
            hpat = fnmatch.translate('*'+ pat + '*').replace(r"\Z(?ms)","")
            bpat = fnmatch.translate(pat).rstrip('$').replace(r"\Z(?ms)","")
            flags = re.IGNORECASE
        else:
            hpat = pat[2:]
            bpat = pat[2:]
            flags = 0
        combo = self.widgetUI.comboBox.currentText()
        if combo == "All":
            hNodes = self.c.all_positions()
        elif combo == "Subtree":
            hNodes = self.c.p.self_and_subtree()
        else:
            hNodes = [self.c.p]
        hm = self.find_h(hpat, hNodes, flags)
        #self.addHeadlineMatches(hm)
        #bm = self.c.find_b(bpat, flags)
        #self.addBodyMatches(bm)
        return hm, []
Exemple #13
0
 def update_list(self):
     includes = [
         re.compile(fnmatch.translate(x))
         for x in (
             self.props['include'].split(';')
             if self.props.get('include')
             else []
         )
     ]
     excludes = [
         re.compile(fnmatch.translate(x))
         for x in (
             self.props['exclude'].split(';')
             if self.props.get('exclude')
             else []
         )
     ]
     roms = sorted(filter(
         lambda x: (not any(y.match(x) for y in excludes) or
             any(y.match(x) for y in includes)),
         os.listdir(os.path.expanduser(self.props['path']))))
     self.set_state({
         'roms': roms,
         'last_page': ceil(len(roms) / self.props['page_size']) - 1,
         'select': -1,
         'page': 0,
         'error': None,
     })
Exemple #14
0
    def return_categories(self, pattern, ignore_case=True):
        """return all categories which match either by glob or exact match"""
        returns = {}

        for item in pattern.split(','):
            item = item.strip()
            if item in self._categories:
                cat = self._categories[item]
                returns[cat.categoryid] = cat
                continue

            if not ignore_case:
                match = re.compile(fnmatch.translate(item)).match
            else:
                match = re.compile(fnmatch.translate(item), flags=re.I).match

            done = False
            for cat in self.categories:
                for name in cat.name, cat.categoryid, cat.ui_name:
                    if match(name):
                        done = True
                        returns[cat.categoryid] = cat
                        break
            if done:
                continue

            for cat in self.categories:
                for name in cat.translated_name.values():
                    if match(name):
                        returns[cat.categoryid] = cat
                        break

        return returns.values()
Exemple #15
0
def iterate_presentation_files(path=None, excludes=None, includes=None):
    """Iterates the repository presentation files relative to 'path',
    not including themes. Note that 'includes' take priority."""

    # Defaults
    if includes is None:
        includes = []
    if excludes is None:
        excludes = []

    # Transform glob patterns to regular expressions
    includes_pattern = r'|'.join([fnmatch.translate(x) for x in includes]) or r'$.'
    excludes_pattern = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'
    includes_re = re.compile(includes_pattern)
    excludes_re = re.compile(excludes_pattern)

    def included(root, name):
        """Returns True if the specified file is a presentation file."""
        full_path = os.path.join(root, name)
        # Explicitly included files takes priority
        if includes_re.match(full_path):
            return True
        # Ignore special and excluded files
        return (not specials_re.match(name)
            and not excludes_re.match(full_path))

    # Get a filtered list of paths to be built
    for root, dirs, files in os.walk(path):
        dirs[:] = [d for d in dirs if included(root, d)]
        files = [f for f in files if included(root, f)]
        for f in files:
            yield os.path.relpath(os.path.join(root, f), path)
def find_dirs(base_dir, exclude_dirs, include_dirs, dir_name):
    ''' find directories matching dir_name '''
    found = []
    exclude_regex = ''
    include_regex = ''

    if exclude_dirs is not None:
        exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'

    if include_dirs is not None:
        include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'

    for root, dirs, _files in os.walk(base_dir):
        if exclude_dirs is not None:
            # filter out excludes for dirs
            dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]

        if include_dirs is not None:
            # filter for includes for dirs
            dirs[:] = [d for d in dirs if re.match(include_regex, d)]

        if dir_name in dirs:
            found.append(os.path.join(root, dir_name))
            dirs = []

    return found
Exemple #17
0
    def return_groups(self, group_pattern, case_sensitive=False):
        """return all groups which match either by glob or exact match"""
        returns = {}

        for item in group_pattern.split(','):
            item = item.strip()
            if item in self._groups:
                thisgroup = self._groups[item]
                returns[thisgroup.groupid] = thisgroup
                continue
            
            if case_sensitive:
                match = re.compile(fnmatch.translate(item)).match
            else:
                match = re.compile(fnmatch.translate(item), flags=re.I).match

            done = False
            for group in self.groups:
                for name in group.name, group.groupid, group.ui_name:
                    if match(name):
                        done = True
                        returns[group.groupid] = group
                        break
            if done:
                continue

            # If we didn't match to anything in the current locale, try others
            for group in self.groups:
                for name in group.translated_name.values():
                    if match(name):
                        returns[group.groupid] = group
                        break

        return returns.values()
def list_files(includes, excludes, start="."):
    """
    Returns a list of files matching the glob expressions of the included
    parameter and excluding the files and directories matching the parameter
    excludes.

    :param includes: the files to match, using glob's format.
    :type includes: list of str
    :param excludes: the files and directories to exclude, using glob's format.
    :type excludes: list of str
    """
    # transform glob patterns to regular expressions
    includes = r"|".join([fnmatch.translate(x) for x in includes])
    excludes = r"|".join([fnmatch.translate(x) for x in excludes]) or r"$."

    files_list = []

    for root, dirs, files in os.walk(start):
        # exclude dirs
        if excludes:
            dirs[:] = [d for d in dirs if not re.match(excludes, d)]

        # exclude/include files
        if excludes:
            files = [f for f in files if not re.match(excludes, f)]
        files = [f for f in files if re.match(includes, f)]
        files = [os.path.join(root, f) for f in files]

        for fname in files:
            files_list.append(fname)

    return files_list
Exemple #19
0
def filter(l, a, pattern="", case_sensitive=True, regex=False):
    """ Return a subset of the a collection (l) whose item's string 
    attribute (a) matches 'pattern'. 
    
    Uses Unix shell-style wildcards when regex False.
    
    """
    if pattern == "":
        return [getattr(i, a) for i in l]
    import re

    if regex:
        if not case_sensitive:
            return [getattr(i, a) for i in l if re.search(pattern, getattr(i, a), re.IGNORECASE) != None]
        else:
            return [getattr(i, a) for i in l if re.search(pattern, getattr(i, a)) != None]
    else:
        import fnmatch

        if not case_sensitive:
            return [
                getattr(i, a) for i in l if re.search(fnmatch.translate(pattern), getattr(i, a), re.IGNORECASE) != None
            ]
        else:
            return [getattr(i, a) for i in l if re.search(fnmatch.translate(pattern), getattr(i, a)) != None]
    def run(self, caller, request, inventory):
        site_names = None
        data_names = None
        if 'node' in request:
            site_names = []
            nodepat = re.compile(fnmatch.translate(request['node']))
            for site in inventory.sites:
                if nodepat.match(site):
                    site_names.append(site)
            if len(site_names) < 1: site_names = None

        if 'dataset' in request:
            data_names = []
            dset_name = request['dataset']

            if '*' in dset_name:
                pattern = re.compile(fnmatch.translate(dset_name))
                for thename in inventory.datasets.iterkeys():
                    if pattern.match(thename):
                        data_names.append(thename)
            elif dset_name in inventory.datasets:
                    data_names.append(dset_name)
            if len(data_names) < 1: data_names = None


        cpquests = self.copy_manager.get_requests(sites=site_names, items=data_names)
        dequests = self.dele_manager.get_requests(sites=site_names, items=data_names)

        a1 = self.pro_requests(cpquests,request,inventory)
        a2 = self.pro_requests(dequests,request,inventory)
        response = a1 + a2

        return {'request': response}
Exemple #21
0
def find_files(directory, includes=None, excludes=None):
    """
    Find files given a starting directory and optionally a set of includes and/or excludes patterns.

    The patterns should be globs (ex: ['*.py', '*.rst'])

    :param directory: the starting directory for the find
    :type directory: str
    :param includes: list of file glob patterns to find
    :type includes: list
    :param excludes: list of file or directory glob patterns to exclude
    :type excludes: list
    :return: iterator of found file paths as strings
    :rtype: iterator(str)
    """
    if includes is None:
        includes = []
    if excludes is None:
        excludes = []
        # transform glob patterns to regular expressions
    includes = r'|'.join([fnmatch.translate(x) for x in includes])
    excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'

    for root, dirs, files in os.walk(directory):
        # exclude dirs
        dirs[:] = [os.path.join(root, d) for d in dirs]
        dirs[:] = [d for d in dirs if not re.match(excludes, d)]

        # exclude/include files
        files = [os.path.join(root, f) for f in files]
        files = [f for f in files if not re.match(excludes, f)]
        files = [f for f in files if re.match(includes, f)]

        for filename in files:
            yield filename
Exemple #22
0
    def handle_attributes(cls, patterns: str, input_attributes: dict) -> dict:
        """Handle `template-attributes`.

        Pattern is glob style and `-` prefix means exclude pattern.

        :param str patterns: handling attribute value
        :param dict input_attributes: source attributes
        :return: filtered attributes
        :rtype: dict
        """
        includes = []
        excludes = []

        patterns = patterns.split(',') if ',' in patterns else patterns.split()
        for pattern in patterns:
            pattern = pattern.strip()
            if not pattern.startswith('-'):
                includes.append(fnmatch.translate(pattern))
            else:
                excludes.append(fnmatch.translate(pattern[1:]))

        includes = re.compile('^(' + '|'.join(includes) + ')$', flags=re.IGNORECASE)
        excludes = re.compile('^(' + '|'.join(excludes) + ')$', flags=re.IGNORECASE)

        result = {}

        for name, value in input_attributes.items():
            if excludes.match(name):
                continue
            elif includes.match(name):
                result[name] = value

        return result
Exemple #23
0
	def __init__(self, dir, filter = "*", exclude = "", ssh=None):
		self.__list = []
		self.__dir = os.path.abspath(dir)
		self.__filter = ""
		self.__ssh = ssh.open_sftp() if ssh is not None else False

		if dir.startswith('http') or dir.startswith('ftp'):
			self.__list.append((dir,''))
			return


		for tmp_f in filter.split(','):
			tmp_f = tmp_f.strip(' ')
			self.__filter = "%s|(%s)" % (self.__filter, fnmatch.translate(tmp_f))
		
		self.__filter = self.__filter.strip('|')
		self.__filter = re.compile(self.__filter)
		
		self.__exclude = FileList.__gExclude
		for tmp_f in exclude.split(','):
			tmp_f = tmp_f.strip(' ')
			self.__exclude = "%s|(%s)" % (self.__exclude, fnmatch.translate(tmp_f))
		
		self.__exclude = self.__exclude.strip('|')
		self.__exclude = re.compile(self.__exclude)



		self.__explore(dir)
Exemple #24
0
 def poll(self):
     while gtk.events_pending():
         gtk.main_iteration()
     if not gtk3:
       screen = wnck.screen_get_default()
     else:
       screen = wnck.Screen.get_default()
     screen.force_update()
     window_list = screen.get_windows()
     for w in window_list:
         if self._frame_name:
             current_window = w.get_name()
             if re.search( \
                 fnmatch.translate(self._frame_name), current_window,
                 re.U | re.M | re.L) \
                 or re.search(fnmatch.translate(re.sub("(^frm)|(^dlg)", "",
                                                       self._frame_name)),
                              re.sub(" *(\t*)|(\n*)", "", current_window),
                              re.U | re.M | re.L):
                 # If window name specified, then activate just that window
                 w.activate(int(time.time()))
                 self.success = True
                 break
         else:
             break
Exemple #25
0
 def poll(self):
     while gtk.events_pending():
         gtk.main_iteration()
     if not gtk3:
       screen = wnck.screen_get_default()
     else:
       screen = wnck.Screen.get_default()
     # Added screen.force_update() based on
     # http://stackoverflow.com/questions/5794309/how-can-i-get-a-list-of-windows-with-wnck-using-pygi
     screen.force_update()
     window_list = screen.get_windows()
     for w in window_list:
         if self._frame_name:
             current_window = w.get_name()
             if re.search( \
                 fnmatch.translate(self._frame_name), current_window,
                 re.U | re.M | re.L) \
                 or re.search(fnmatch.translate(re.sub("(^frm)|(^dlg)", "",
                                                       self._frame_name)),
                              re.sub(" *(\t*)|(\n*)", "", current_window),
                              re.U | re.M | re.L):
                 # If window name specified, then close just that window
                 w.close(int(time.time()))
                 self.success = True
                 break
         else:
             # Close all window
             w.close(int(time.time()))
             self.success = True
Exemple #26
0
def DepotToolsPylint(input_api, output_api):
  """Gather all the pylint logic into one place to make it self-contained."""
  white_list = [
    r'^[^/]*\.py$',
    r'^testing_support/[^/]*\.py$',
    r'^tests/[^/]*\.py$',
    r'^recipe_modules/.*\.py$',  # Allow recursive search in recipe modules.
  ]
  black_list = list(input_api.DEFAULT_BLACK_LIST)
  if os.path.exists('.gitignore'):
    with open('.gitignore') as fh:
      lines = [l.strip() for l in fh.readlines()]
      black_list.extend([fnmatch.translate(l) for l in lines if
                         l and not l.startswith('#')])
  if os.path.exists('.git/info/exclude'):
    with open('.git/info/exclude') as fh:
      lines = [l.strip() for l in fh.readlines()]
      black_list.extend([fnmatch.translate(l) for l in lines if
                         l and not l.startswith('#')])
  disabled_warnings = [
    'R0401',  # Cyclic import
    'W0613',  # Unused argument
  ]
  return input_api.canned_checks.GetPylint(
      input_api,
      output_api,
      white_list=white_list,
      black_list=black_list,
      disabled_warnings=disabled_warnings)
  def sieve(self, ifos=None, description=None, segment=None, duration=None,
    exact_match=False):
    """
    Return a FrameCache object with those FrameCacheEntries that contain the
    given patterns (or overlap, in the case of segment).  If
    exact_match is True, then non-None ifos, description, and
    segment patterns must match exactly.
    
    Bash-style wildcards (*?) are allowed for ifos and description.
    """
    if exact_match:
      segment_func = lambda e: e.segment == segment
    else:
      if ifos is not None: ifos = "*" + ifos + "*"
      if description is not None: description = "*" + description + "*"
      segment_func = lambda e: segment.intersects(e.segment)
    
    c = self
    
    if ifos is not None:
      ifos_regexp = re.compile(fnmatch.translate(ifos))
      c = [entry for entry in c if ifos_regexp.match(entry.observatory) is not None]
    
    if description is not None:
      descr_regexp = re.compile(fnmatch.translate(description))
      c = [entry for entry in c if descr_regexp.match(entry.description) is not None]
    
    if segment is not None:
      c = [entry for entry in c if segment_func(entry)]
    
    if duration is not None:
      c = [entry for entry in c if entry.duration==duration]

    return self.__class__(c)
Exemple #28
0
def getTreeFiles(baseDir, includePattern=None, excludePattern=None):
    import fnmatch
    import re
    def walkFunc(arg, dir, names):
        (files, includeRegexp, excludeRegexp) = arg
        for f in names:
            filePath = os.path.join(dir, f)
            if not os.path.isfile(filePath):
                continue
            if includeRegexp and not includeRegexp.match(f):
                continue
            if excludeRegexp and excludeRegexp.match(f):
                continue
            files.append(filePath)
    files = []
    if includePattern:
        try:
            includeRegexp = re.compile(fnmatch.translate(includePattern))
        except:
            print >>sys.stderr, "Invalid include file pattern %s" % includePattern
            sys.exit(1)
    else:
        includeRegexp = None
    if excludePattern:
        try:
            excludeRegexp = re.compile(fnmatch.translate(excludePattern))
        except:
            print >>sys.stderr, "Invalid exclude file pattern %s" % excludePattern
            sys.exit(1)
    else:
        excludeRegexp = None
    os.path.walk(baseDir, walkFunc, (files, includeRegexp, excludeRegexp))
    return files
Exemple #29
0
    def get_deps_graph(self) -> dag.DAG:
        """Build dependency graph for test methods.

        Returns:
            the dependency graph from on _internal_deps. Nodes in the graph are
            2-tuples (metohd_name, method).

        Attributes:
            _internal_deps: maps a target to its prerequsite(s). Glob wildcard is
            supported for targets and prerequisites.

        Raises:
            Py3DAGError: When cyclic dependency is detected, with the exception
            of self-dependency.
        """
        graph = dag.DAG()
        for test in self._get_all_tests():
            graph.add_node(*test)
        for src_pattern, dst_pattern_list in self._internal_deps.items():
            src_reg = fnmatch.translate(src_pattern)
            for dst_pattern in dst_pattern_list:
                dst_reg = fnmatch.translate(dst_pattern)
                # Add edges (src -> dst) to graph
                for src in graph._nodes.keys():
                    for dst in graph._nodes.keys():
                        if src == dst: # Ignore self dependency.
                            continue
                        if re.match(src_reg, src) and re.match(dst_reg, dst):
                            graph.add_edge(src, dst)
        return graph
 def find_mnames(self, qp):
     ''' given string, return array of un-aliased strings'''
     if (not qp)   : return []
     cacheData = self.getQpFromCache(qp)
     if cacheData:
         return cacheData
     first, star, last = self.splitOnStars(qp)
     #self.log.info("find_mnames(): qp: %s, fsl: f=%s, s=%s, l=%s" % (qp, first, star, last))
     if not first:   # starts with a wildcard, get root docs.
         rdocs = self.getRootDocs()
         rdocNames = [x.get('metricname', None) for x in rdocs if x is not None]
         tlist = []
         for name in rdocNames:
             pat = fnmatch.translate(star)  # get regex pattern
             gotMatch = bool(re.match(pat, name))
             #self.log.info("find_mnames(): First: %s, Star: '%s', last: '%s', regex: '%s', gotMatch: %s" % (first, star, last, pat, gotMatch))
             if not gotMatch:
                 continue
             fname = '%s.%s' % (name, last)
             if not last:
                 fname = name
             if gotMatch:
                 tlist.extend(self.find_mnames(fname))
         return tlist
     realName, linkpart, realpart = self.translateLinkedPath(first)
     #self.log.info("find_mnames(): tlp returned rn: %s, lp: %s, rp: %s." % (realName, linkpart, realpart))
     realMt = self.getByName(realName, saveNew=False)
     #self.log.info("find_mnames(): realname returned: %s" % (realMt))
     if not realMt:
         #self.log.info("find_mnames(): non, returning empty.")
         return []
     if not star:
         # all of it is literal, just return self.
         #self.log.info("find_mnames(): no star, all literal, returning first.")
         return [first]
     # have star.  
     tlist = []
     children = realMt['children']
     linkedKids = []
     pat = fnmatch.translate(star)  # get regex pattern
     pat = r'%s.%s' % (first, pat)
     for childName in children:
         realName = realMt.get('metricname', "")
         if (not linkpart):
             cname = childName
         else:
             cname = childName.replace(linkpart, realpart)
         gotMatch = bool(re.match(pat, cname))
         #self.log.info("find_mnames(): CHILD: %s, rn: %s, cname: %s, gotMatch: %s" % (childName, realName, cname, gotMatch))
         if gotMatch:
             linkedKids.append(cname)
     # self.log.info("find_mnames(): created linkedKids list: %s" % (linkedKids))
     ret = []
     for n in linkedKids:
         if last:
             ret.extend(self.find_mnames("%s.%s" % (n, last)))
         else:
             ret.extend(self.find_mnames(n))
     #self.log.info("find_mnames(): returning: %s" % (ret))
     return ret
Exemple #31
0
 def __init__(self, glob):
     super(GlobComponent, self).__init__()
     self.regex = re.compile(fnmatch.translate(glob), re.I)
Exemple #32
0
# find out whether any file within library_path is newer than timestamp_file
reset_stamp = False
for root, dirnames, filenames in os.walk(library_path):
    if reset_stamp:
        break
    for filename in fnmatch.filter(filenames, '*'):
        f = os.path.join(root, filename)
        if os.path.getmtime(f) > stamp_time:
            reset_stamp = True
            break

# list all files in the stamp directory
f = []
for (dirpath, dirnames, filenames) in os.walk(timestamp_path):
    f.extend(filenames)
    break
# exclude the *-test.cmake files from the list
regex = fnmatch.translate('*-test.cmake')
reobj = re.compile(regex)
for file in f:
    if reobj.match(file):
        f.remove(file)

if reset_stamp:
    # sanity check
    if os.path.dirname(timestamp_file) == timestamp_path:
        # remove anything BUT the *-test.cmake files
        for file in f:
            os.remove(os.path.join(timestamp_path, file))
Exemple #33
0
def happi_cli(args):
    parser = get_parser()
    # print happi usage if no arguments are provided
    if not args:
        parser.print_usage()
        return
    args = parser.parse_args(args)

    # Logging Level handling
    if args.verbose:
        shown_logger = logging.getLogger()
        level = "DEBUG"
    else:
        shown_logger = logging.getLogger('happi')
        level = "INFO"
    coloredlogs.install(level=level, logger=shown_logger,
                        fmt='[%(asctime)s] - %(levelname)s -  %(message)s')
    logger.debug("Set logging level of %r to %r", shown_logger.name, level)

    # Version endpoint
    if args.version:
        print(f'Happi: Version {happi.__version__} from {happi.__file__}')
        return
    logger.debug('Command line arguments: %r' % args)

    client = happi.client.Client.from_config(cfg=args.path)
    logger.debug("Happi client: %r" % client)
    logger.debug('Happi command: %r' % args.cmd)

    if args.cmd == 'search':
        logger.debug("We're in the search block")

        # Get search criteria into dictionary for use by client
        client_args = {}
        range_list = []
        regex_list = []
        is_range = False
        for user_arg in args.search_criteria:
            is_range = False
            if '=' in user_arg:
                criteria, value = user_arg.split('=', 1)
            else:
                criteria = 'name'
                value = user_arg
            if criteria in client_args:
                logger.error(
                    'Received duplicate search criteria %s=%r (was %r)',
                    criteria, value, client_args[criteria]
                )
                return
            if value.replace('.', '').isnumeric():
                logger.debug('Changed %s to float', value)
                value = str(float(value))

            if is_a_range(value):
                start, stop = value.split(',')
                start = float(start)
                stop = float(stop)
                is_range = True
                if start < stop:
                    range_list = client.search_range(criteria, start, stop)
                else:
                    logger.error('Invalid range, make sure start < stop')

            # skip the criteria for range values
            # it won't be a valid criteria for search_regex()
            if is_range:
                pass
            else:
                client_args[criteria] = fnmatch.translate(value)

        regex_list = client.search_regex(**client_args)
        results = regex_list + range_list

        # find the repeated items
        res_size = len(results)
        repeated = []
        for i in range(res_size):
            k = i + 1
            for j in range(k, res_size):
                if results[i] == results[j] and results[i] not in repeated:
                    repeated.append(results[i])

        # we only want to return the ones that have been repeated when
        # they have been matched with both search_regex() & search_range()
        if repeated:
            final_results = repeated
        elif regex_list and not is_range:
            # only matched with search_regex()
            final_results = regex_list
        elif range_list and is_range:
            # only matched with search_range()
            final_results = range_list
        else:
            final_results = []

        if args.json:
            json.dump([dict(res.item) for res in final_results], indent=2,
                      fp=sys.stdout)
        else:
            for res in final_results:
                res.item.show_info()

        if not final_results:
            logger.error('No devices found')
        return final_results
    elif args.cmd == 'add':
        logger.debug('Starting interactive add')
        registry = happi.containers.registry
        if args.clone:
            clone_source = client.find_device(name=args.clone)
            # Must use the same container if cloning
            response = registry.entry_for_class(clone_source.__class__)
        else:
            # Keep Device at registry for backwards compatibility but filter
            # it out of new devices options
            options = os.linesep.join(
                [k for k, _ in registry.items() if k != "Device"]
            )
            logger.info(
                'Please select a container, or press enter for generic '
                'Ophyd Device container: %s%s', os.linesep, options
            )
            response = input()
            if response and response not in registry:
                logger.info('Invalid device container f{response}')
                return
            elif not response:
                response = 'OphydItem'

        container = registry[response]
        kwargs = {}
        for info in container.entry_info:
            valid_value = False
            while not valid_value:
                if args.clone:
                    default = getattr(clone_source, info.key)
                else:
                    default = info.default
                logger.info(f'Enter value for {info.key}, default={default}, '
                            f'enforce={info.enforce}')
                item_value = input()
                if not item_value:
                    if info.optional or args.clone:
                        logger.info(f'Selecting default value {default}')
                        item_value = default
                    else:
                        logger.info('Not an optional field!')
                        continue
                try:
                    info.enforce_value(item_value)
                    valid_value = True
                    kwargs[info.key] = item_value
                except Exception:
                    logger.info(f'Invalid value {item_value}')

        device = client.create_device(container, **kwargs)
        logger.info('Please confirm the following info is correct:')
        device.show_info()
        ok = input('y/N\n')
        if 'y' in ok:
            logger.info('Adding device')
            device.save()
        else:
            logger.info('Aborting')
    elif args.cmd == 'edit':
        logger.debug('Starting edit block')
        device = client.find_device(name=args.name)
        is_invalid_field = False
        for edit in args.edits:
            field, value = edit.split('=', 1)
            try:
                getattr(device, field)
                logger.info('Setting %s.%s = %s', args.name, field, value)
                setattr(device, field, value)
            except Exception as e:
                is_invalid_field = True
                logger.error('Could not edit %s.%s: %s', args.name, field, e)
        if is_invalid_field:
            sys.exit(1)
        device.save()
        device.show_info()
    elif args.cmd == 'load':
        logger.debug('Starting load block')
        logger.info(f'Creating shell with devices {args.device_names}')
        devices = {}
        for name in args.device_names:
            devices[name] = client.load_device(name=name)

        from IPython import start_ipython  # noqa
        start_ipython(argv=['--quick'], user_ns=devices)
    elif args.cmd == "update":
        # parse input
        input_ = " ".join(args.json).strip()
        if input_ == "-":
            items_input = json.load(sys.stdin)
        else:
            items_input = json.loads(input_)
        # insert
        for item in items_input:
            item = client.create_device(device_cls=item["type"], **item)
            exists = item["_id"] in [c["_id"] for c in client.all_items]
            client._store(item, insert=not exists)
Exemple #34
0
 def __init__(self, field_name, is_allowed_to_be_empty, length, rule, data_format, empty_value=''):
     super(PatternFieldFormat, self).__init__(
         field_name, is_allowed_to_be_empty, length, rule, data_format, empty_value)
     self.pattern = fnmatch.translate(rule)
     self.regex = re.compile(self.pattern, re.IGNORECASE | re.MULTILINE)
Exemple #35
0
 def __init__(self, pat, child_parts):
     self.pat = re.compile(fnmatch.translate(pat))
     _Selector.__init__(self, child_parts)
Exemple #36
0
def MakePatternRE(patterns):
    if patterns:
        pattern_re = '|'.join(fnmatch.translate('*%s*' % p) for p in patterns)
    else:
        pattern_re = '.*'
    return re.compile(pattern_re)
Exemple #37
0
# @Date:   11-10-2017
# @Last modified time: 11-10-2017
# @License: GNU Public License v3

#from: https://stackoverflow.com/questions/5141437/filtering-os-walk-dirs-and-files
import fnmatch
import os
import os.path
import re

includes = ['*.jpg', '*.png'] # for files only
#excludes = ['faces'] # for dirs and files

# transform glob patterns to regular expressions
includes = r'|'.join([fnmatch.translate(x) for x in includes])
#excludes = r'|'.join([fnmatch.translate(x) for x in excludes]) or r'$.'

for root, dirs, files in os.walk('.'):

    # exclude dirs
    #dirs[:] = [os.path.join(root, d) for d in dirs]
    #dirs[:] = [d for d in dirs if not re.match(excludes, d)]

    # exclude/include files
    files = [os.path.join(root, f) for f in files]
    #files = [f for f in files if not re.match(excludes, f)]
    files = [f for f in files if re.match(includes, f)]

    for fname in files:
        print(fname)
Exemple #38
0
def diskusage(*args):
    '''
    Return the disk usage for this minion

    Usage::

        salt '*' status.diskusage [paths and/or filesystem types]

    CLI Example:

    .. code-block:: bash

        salt '*' status.diskusage         # usage for all filesystems
        salt '*' status.diskusage / /tmp  # usage for / and /tmp
        salt '*' status.diskusage ext?    # usage for ext[234] filesystems
        salt '*' status.diskusage / ext?  # usage for / and all ext filesystems
    '''
    selected = set()
    fstypes = set()
    if not args:
        # select all filesystems
        fstypes.add('*')
    else:
        for arg in args:
            if arg.startswith('/'):
                # select path
                selected.add(arg)
            else:
                # select fstype
                fstypes.add(arg)

    if fstypes:
        # determine which mount points host the specified fstypes
        regex = re.compile(
            '|'.join(
                fnmatch.translate(fstype).format('(%s)') for fstype in fstypes
            )
        )
        # ifile source of data varies with OS, otherwise all the same
        if __grains__['kernel'] == 'Linux':
            try:
                with salt.utils.fopen('/proc/mounts', 'r') as fp_:
                    ifile = fp_.read().splitlines()
            except OSError:
                return {}
        elif __grains__['kernel'] in ('FreeBSD', 'SunOS'):
            ifile = __salt__['cmd.run']('mount -p').splitlines()
        else:
            ifile = []

        for line in ifile:
            comps = line.split()
            if __grains__['kernel'] == 'SunOS':
                if len(comps) >= 4:
                    mntpt = comps[2]
                    fstype = comps[3]
                    if regex.match(fstype):
                        selected.add(mntpt)
            else:
                if len(comps) >= 3:
                    mntpt = comps[1]
                    fstype = comps[2]
                    if regex.match(fstype):
                        selected.add(mntpt)

    # query the filesystems disk usage
    ret = {}
    for path in selected:
        fsstats = os.statvfs(path)
        blksz = fsstats.f_bsize
        available = fsstats.f_bavail * blksz
        total = fsstats.f_blocks * blksz
        ret[path] = {"available": available, "total": total}
    return ret
Exemple #39
0
    'src/secp256k1/src/java/org_bitcoin_Secp256k1Context.h',
    # univalue:
    'src/univalue/test/object.cpp',
    'src/univalue/lib/univalue_escapes.h',
    # auto generated:
    'src/qt/bitcoinstrings.cpp',
    'src/chainparamsseeds.h',
    # other external copyrights:
    'src/tinyformat.h',
    'src/leveldb/util/env_win.cc',
    'src/crypto/ctaes/bench.c',
    'test/functional/test_framework/bignum.py',
    # python init:
    '*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m)
                                        for m in EXCLUDE]))

INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.py']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m)
                                        for m in INCLUDE]))


def applies_to_file(filename):
    return ((EXCLUDE_COMPILED.match(filename) is None)
            and (INCLUDE_COMPILED.match(filename) is not None))


################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
        return GafferUI.StandardNodule(plug)
    else:
        return None


GafferUI.Nodule.registerNodule(Gaffer.ParameterisedHolderNode.staticTypeId(),
                               "parameters", GafferUI.CompoundNodule)
GafferUI.Nodule.registerNodule(
    Gaffer.ParameterisedHolderComputeNode.staticTypeId(), "parameters",
    GafferUI.CompoundNodule)
GafferUI.Nodule.registerNodule(
    Gaffer.ParameterisedHolderDependencyNode.staticTypeId(), "parameters",
    GafferUI.CompoundNodule)

GafferUI.Nodule.registerNodule(Gaffer.ParameterisedHolderNode.staticTypeId(),
                               fnmatch.translate("parameters.*"),
                               __parameterNoduleCreator)
GafferUI.Nodule.registerNodule(
    Gaffer.ParameterisedHolderComputeNode.staticTypeId(),
    fnmatch.translate("parameters.*"), __parameterNoduleCreator)
GafferUI.Nodule.registerNodule(
    Gaffer.ParameterisedHolderDependencyNode.staticTypeId(),
    fnmatch.translate("parameters.*"), __parameterNoduleCreator)

##########################################################################
# Metadata
##########################################################################


def __plugDescription(plug):
def get_matcher(pattern):
    if not pattern in _CACHED:
        _CACHED[pattern] = re.compile(fnmatch.translate(pattern)).match
    return _CACHED[pattern]
Exemple #42
0
 def _register_event(callback):
     self.patterns.append(
         (pattern, re.compile(fnmatch.translate(pattern))))
     self.callbacks[pattern].append(callback)
     return callback
Exemple #43
0
 def __init__(self, globs_to_ignore):
     self.patterns_to_ignore = \
         [re.compile(fnmatch.translate(glob), re.IGNORECASE)
          for glob in globs_to_ignore]
     self.result_by_path = {}
Exemple #44
0
def compute_up(expr, df, **kwargs):
    arrs = [df[name].str.contains('^%s$' % fnmatch.translate(pattern))
            for name, pattern in expr.patterns.items()]
    return df[np.logical_and.reduce(arrs)]
chosenFormats = args.format
dateLimit = args.dateLimit

gd_client = oauthLogin()
verbose = args.verbose
rootDirs = args.directory  # set the directory you want to start from

albumNaming = args.naming
mode = args.mode
noupdatealbummetadata = args.noupdatealbummetadata
for comparison in Comparisons:
    r = getattr(args, "override:%s" % comparison, None)
    if r:
        mode[comparison] = r

excludes = r'|'.join([fnmatch.translate(x) for x in args.skip]) or r'$.'
server_excludes = r'|'.join([fnmatch.translate(x)
                             for x in args.skipserver]) or r'$.'

print("Excluding %s on client and %s on server" % (excludes, server_excludes))

albums = Albums(rootDirs, albumNaming, excludes, args.replace,
                args.namingextract)
albums.scanWebAlbums(args.owner, args.deletedups, server_excludes)
albums.uploadMissingAlbumsAndFiles(args.compareattributes, mode, args.test,
                                   args.allowDelete)

if args.purge:
    albums.deleteEmptyWebAlbums(args.owner)
Exemple #46
0
 def addDescription(self, s):
     s = _stripeol(fnmatch.translate(s)).replace("\ ", " ")
     p = re.compile("\s+".join(s.split()), self.ignorecase and re.I or 0)
     self.description.append(p)
Exemple #47
0
def unittest(args):
    """run the JUnit tests"""

    junit_arg_actions = []
    junit_args = []

    class MxJUnitWrapperArg(Action):
        def __init__(self, **kwargs):
            kwargs['required'] = False
            Action.__init__(self, **kwargs)
            junit_arg_actions.append(self)

        def __call__(self, parser, namespace, values, option_string=None):
            junit_args.append('-' + self.dest)
            junit_args.append(values)

    class MxJUnitWrapperBoolArg(Action):
        def __init__(self, **kwargs):
            kwargs['required'] = False
            kwargs['nargs'] = 0
            Action.__init__(self, **kwargs)
            junit_arg_actions.append(self)

        def __call__(self, parser, namespace, values, option_string=None):
            junit_args.append('-' + self.dest)

    parser = ArgumentParser(
        prog='mx unittest',
        description='run the JUnit tests',
        formatter_class=RawDescriptionHelpFormatter,
        epilog=unittestHelpSuffix,
    )

    parser.add_argument('--blacklist',
                        help='run all testcases not specified in <file>',
                        metavar='<file>')
    parser.add_argument('--whitelist',
                        help='run testcases specified in <file> only',
                        metavar='<file>')
    parser.add_argument('--verbose',
                        help='enable verbose JUnit output',
                        dest='JUnitVerbose',
                        action=MxJUnitWrapperBoolArg)
    parser.add_argument('--very-verbose',
                        help='enable very verbose JUnit output',
                        dest='JUnitVeryVerbose',
                        action=MxJUnitWrapperBoolArg)
    parser.add_argument(
        '--max-class-failures',
        help=
        'stop after N test classes that have a failure (default is no limit)',
        metavar='<N>',
        dest='JUnitMaxClassFailures',
        action=MxJUnitWrapperArg)
    parser.add_argument('--fail-fast',
                        help='alias for --max-class-failures=1',
                        dest='JUnitFailFast',
                        action=MxJUnitWrapperBoolArg)
    parser.add_argument(
        '--enable-timing',
        help='enable JUnit test timing (requires --verbose/--very-verbose)',
        dest='JUnitEnableTiming',
        action=MxJUnitWrapperBoolArg)
    parser.add_argument(
        '--regex',
        help='run only testcases matching a regular expression',
        metavar='<regex>')
    parser.add_argument('--color',
                        help='enable color output',
                        dest='JUnitColor',
                        action=MxJUnitWrapperBoolArg)
    parser.add_argument('--gc-after-test',
                        help='force a GC after each test',
                        dest='JUnitGCAfterTest',
                        action=MxJUnitWrapperBoolArg)
    parser.add_argument(
        '--record-results',
        help='record test class results to passed.txt and failed.txt',
        dest='JUnitRecordResults',
        action=MxJUnitWrapperBoolArg)
    record_help_msg_shared = 'If <file> is "-", the tests will be printed to stdout. ' +\
                  'In contrast to --record-results this prints not only the test class but also the test method.'
    parser.add_argument('--print-passed',
                        metavar="<file>",
                        dest='JUnitRecordPassed',
                        action=MxJUnitWrapperArg,
                        help='record passed test class results in <file>. ' +
                        record_help_msg_shared)
    parser.add_argument('--print-failed',
                        metavar="<file>",
                        dest='JUnitRecordFailed',
                        action=MxJUnitWrapperArg,
                        help='record failed test class results in <file>.  ' +
                        record_help_msg_shared)
    parser.add_argument('--suite',
                        help='run only the unit tests in <suite>',
                        metavar='<suite>')
    parser.add_argument('--repeat',
                        help='run each test <n> times',
                        dest='JUnitRepeat',
                        action=MxJUnitWrapperArg,
                        type=is_strictly_positive,
                        metavar='<n>')
    parser.add_argument(
        '--open-packages',
        dest='JUnitOpenPackages',
        action=MxJUnitWrapperArg,
        metavar='<module>/<package>[=<target-module>(,<target-module>)*]',
        help=
        "export and open packages regardless of module declarations (see more detail and examples below)"
    )
    eagerStacktrace = parser.add_mutually_exclusive_group()
    eagerStacktrace.add_argument(
        '--eager-stacktrace',
        action='store_const',
        const=True,
        dest='eager_stacktrace',
        help='print test errors as they occur (default)')
    eagerStacktrace.add_argument(
        '--no-eager-stacktrace',
        action='store_const',
        const=False,
        dest='eager_stacktrace',
        help='print test errors after all tests have run')

    # Augment usage text to mention test filters and options passed to the VM
    usage = parser.format_usage().strip()
    if usage.startswith('usage: '):
        usage = usage[len('usage: '):]
    parser.usage = usage + ' [test filters...] [VM options...]'

    ut_args = []
    delimiter = False
    # check for delimiter
    while len(args) > 0:
        arg = args.pop(0)
        if arg == '--':
            delimiter = True
            break
        ut_args.append(arg)

    if delimiter:
        # all arguments before '--' must be recognized
        parsed_args = parser.parse_args(ut_args)
    else:
        # parse all known arguments
        parsed_args, args = parser.parse_known_args(ut_args)

    # Remove junit_args values from parsed_args
    for a in junit_arg_actions:
        parsed_args.__dict__.pop(a.dest)

    if parsed_args.whitelist:
        try:
            with open(parsed_args.whitelist) as fp:
                parsed_args.whitelist = [
                    re.compile(fnmatch.translate(l.rstrip()))
                    for l in fp.readlines() if not l.startswith('#')
                ]
        except IOError:
            mx.log('warning: could not read whitelist: ' +
                   parsed_args.whitelist)
    if parsed_args.blacklist:
        try:
            with open(parsed_args.blacklist) as fp:
                parsed_args.blacklist = [
                    re.compile(fnmatch.translate(l.rstrip()))
                    for l in fp.readlines() if not l.startswith('#')
                ]
        except IOError:
            mx.log('warning: could not read blacklist: ' +
                   parsed_args.blacklist)

    if parsed_args.eager_stacktrace is None:
        junit_args.append('-JUnitEagerStackTrace')
    parsed_args.__dict__.pop('eager_stacktrace')

    _unittest(args, ['@Test', '@Parameters'], junit_args,
              **parsed_args.__dict__)
Exemple #48
0

# SOURCE: https://docs.python.org/3.7/library/fnmatch.html#fnmatch.translate


import fnmatch
import os
import re


wildcard = 'get*r.py'

items = fnmatch.filter(os.listdir('../'), wildcard)
print(*items, sep='\n')
# get_current_script_dir.py
# get_quarter.py
# get_random_hex_color.py

print()

def is_match(regex, text) -> bool:
    m = re.match(regex, text)
    return bool(m)

regex = fnmatch.translate(wildcard)
items = list(filter(lambda text: is_match(regex, text), os.listdir('../')))
print(*items, sep='\n')
# get_current_script_dir.py
# get_quarter.py
# get_random_hex_color.py
Exemple #49
0
            found = 0
            for filepat in args[1:]:
                found += do_grep(filepat, textpat, ".")

            sys.exit(found)
        except Exception as ex:
            report_exception("GREP mode error", ex, -1)

    if _run_mode == RunMode.GLOB:
        try:
            assert len(args) > 0, "Arguments are required"
            if len(args) == 1:
                """ Adding default dir if necessary """
                args.append(".")

            filepat_re = args[0] if _filepat_re else fnmatch.translate(args[0])
            if _verbosity > 3:
                print("Searching for: r'%s'" % filepat_re)

            found = 0
            compiled_re = re.compile(filepat_re, _re_flags)
            for dirname in args[1:]:
                found += do_glob(compiled_re, dirname)
            sys.exit(found)
        except Exception as ex:
            report_exception("GLOB mode error", ex, -1)

    if _run_mode == RunMode.TAG:
        """ args should be tagfile filepattern - mytags.tag f:main"""
        try:
            assert len(args) == 1 or len(
Exemple #50
0
def _fn_matches(fn, glob):
    """Return whether the supplied file name fn matches pattern filename."""
    if glob not in _pattern_cache:
        pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob))
        return pattern.match(fn)
    return _pattern_cache[glob].match(fn)
Exemple #51
0
 def compile_pattern(self, pattern):
     return re.compile(fnmatch.translate(pattern)).fullmatch
Exemple #52
0
def add_global_ignore_glob(ignore):
    _global_ignore_globs.append(re.compile(fnmatch.translate(ignore)))
Exemple #53
0
def _compile_patterns(pattern_list):
    return [re.compile(fnmatch.translate(p)) for p in pattern_list]
Exemple #54
0
 def parse(patterns_str):
     if patterns_str:
         regexes = (fnmatch.translate(pat) for pat in patterns_str.split())
         return re.compile(r"^(" + r"|".join(regexes) + r")$")
Exemple #55
0
			wd = wd + '/'+cmd[1];
			display = wd
	if (cmd[0]=='get'):
		fnam = wd+'/'+cmd[1]
		dbx.files_download_to_file(os.path.abspath(cmd[1]), fnam);
	if (cmd[0]=='mkdir'):
		dbx.files_create_folder(wd+"/"+cmd[1])
	if (cmd[0]=='put'):
		with open(cmd[1]) as f:
			dbx.files_upload(f.read(), wd+"/"+cmd[1], mute = True)
	if (cmd[0]=='mput'):
		for filename in glob.glob(cmd[1]):
			with open(filename) as f:
				dbx.files_upload(f.read(), wd+"/"+filename, mute = True)
	if (cmd[0]=='mget'):
		regexp = fnmatch.translate(cmd[1])
		reobj = re.compile(regexp);
		for entry in dbx.files_list_folder(wd).entries:
			if (reobj.match(entry.name)):
				fnam = wd+'/'+entry.name
				print entry.name
				dbx.files_download_to_file(os.path.abspath(entry.name), fnam);
				
			
			
		
			

	cmd = raw_input(display+'$ ');

Exemple #56
0
 def compile_pattern(self, pattern):
     return re.compile(fnmatch.translate(pattern), re.IGNORECASE).fullmatch
    def __init__(self, pat: AnyStr, *, period_special: bool = True):
        """
		Arguments
		---------
		pat
			The glob pattern to use for matching
		period_special
			Whether a leading period in file/directory names should be matchable by
			``*``, ``?`` and ``[…]`` – traditionally they are not, but many modern
			shells allow one to disable this behaviour
		"""
        super().__init__(isinstance(pat, bytes))

        self.period_special = period_special

        self._sep = utils.maybe_fsencode(os.path.sep, pat)
        dblstar = utils.maybe_fsencode("**", pat)
        dot = utils.maybe_fsencode(".", pat)
        pat_ndot = utils.maybe_fsencode(r"(?![.])", pat)

        # Normalize path separator
        if os.path.altsep:
            pat = pat.replace(utils.maybe_fsencode(os.path.altsep, pat),
                              self._sep)

        # Sanity checks for stuff that will definitely NOT EVER match
        # (there is another one in the loop below)
        assert not os.path.isabs(
            pat), "Absolute matching patterns will never match"

        # Note the extra final slash for its effect of only matching directories
        #
        # (TBH, I find it hard to see how that is useful, but everybody does it
        #  and it keeps things consistent overall – something to only match files
        #  would be nice however.)
        self._dir_only = pat.endswith(self._sep)

        self._pat = []
        for label in pat.split(self._sep):
            # Skip over useless path components
            if len(label) < 1 or label == dot:
                continue

            assert label != dot + dot, 'Matching patterns containing ".." will never match'

            if label == dblstar:
                self._pat.append(None)
            elif dblstar in label:
                raise NotImplementedError(
                    "Using double-star (**) and other characters in the same glob "
                    "path label ({0}) is not currently supported – please do file "
                    "an issue if you need this!".format(os.fsdecode(label)))
            else:
                if not isinstance(label, bytes):
                    re_expr = fnmatch.translate(label)
                else:
                    re_expr = fnmatch.translate(
                        label.decode("latin-1")).encode("latin-1")

                if period_special and not label.startswith(dot):
                    re_expr = pat_ndot + re_expr
                self._pat.append(re.compile(re_expr))
Exemple #58
0
    def setup(self):
        self.add_copy_spec("/etc/kubernetes")
        self.add_copy_spec("/run/flannel")

        self.add_env_var([
            'KUBECONFIG',
            'KUBERNETES_HTTP_PROXY',
            'KUBERNETES_HTTPS_PROXY',
            'KUBERNETES_NO_PROXY'
        ])

        svcs = [
            'kubelet',
            'kube-apiserver',
            'kube-proxy',
            'kube-scheduler',
            'kube-controller-manager'
        ]

        for svc in svcs:
            self.add_journal(units=svc)

        # We can only grab kubectl output from the master
        if not self.check_is_master():
            return

        kube_get_cmd = "get -o json "
        for subcmd in ['version', 'config view']:
            self.add_cmd_output('%s %s' % (self.kube_cmd, subcmd))

        # get all namespaces in use
        kn = self.collect_cmd_output('%s get namespaces' % self.kube_cmd)
        # namespace is the 1st word on line, until the line has spaces only
        kn_output = kn['output'].splitlines()[1:]
        knsps = [n.split()[0] for n in kn_output if n and len(n.split())]

        resources = [
            'deployments',
            'ingresses',
            'limitranges',
            'pods',
            'policies',
            'pvc',
            'rc',
            'resourcequotas',
            'routes',
            'services'
        ]

        # these are not namespaced, must pull separately.
        global_resources = [
            'namespaces',
            'nodes',
            'projects',
            'pvs'
        ]
        self.add_cmd_output([
            "%s get %s" % (self.kube_cmd, res) for res in global_resources
        ])
        # Also collect master metrics
        self.add_cmd_output("%s get --raw /metrics" % self.kube_cmd)

        # CNV is not part of the base installation, but can be added
        if self.is_installed('kubevirt-virtctl'):
            resources.extend(['vms', 'vmis'])
            self.add_cmd_output('virtctl version')

        for n in knsps:
            knsp = '--namespace=%s' % n
            if self.get_option('all'):
                k_cmd = '%s %s %s' % (self.kube_cmd, kube_get_cmd, knsp)

                self.add_cmd_output('%s events' % k_cmd)

                for res in resources:
                    self.add_cmd_output('%s %s' % (k_cmd, res), subdir=res)

            if self.get_option('describe'):
                # need to drop json formatting for this
                k_cmd = '%s %s' % (self.kube_cmd, knsp)
                for res in resources:
                    r = self.exec_cmd('%s get %s' % (k_cmd, res))
                    if r['status'] == 0:
                        k_list = [k.split()[0] for k in
                                  r['output'].splitlines()[1:]]
                        for k in k_list:
                            k_cmd = '%s %s' % (self.kube_cmd, knsp)
                            self.add_cmd_output(
                                '%s describe %s %s' % (k_cmd, res, k),
                                subdir=res
                            )

            if self.get_option('podlogs'):
                k_cmd = '%s %s' % (self.kube_cmd, knsp)
                r = self.exec_cmd('%s get pods' % k_cmd)
                if r['status'] == 0:
                    pods = [p.split()[0] for p in
                            r['output'].splitlines()[1:]]
                    # allow shell-style regex
                    reg = (translate(self.get_option('podlogs-filter')) if
                           self.get_option('podlogs-filter') else None)
                    for pod in pods:
                        if reg and not re.match(reg, pod):
                            continue
                        self.add_cmd_output('%s logs %s' % (k_cmd, pod),
                                            subdir='pods')

        if not self.get_option('all'):
            k_cmd = '%s get --all-namespaces=true' % self.kube_cmd
            for res in resources:
                self.add_cmd_output('%s %s' % (k_cmd, res), subdir=res)
Exemple #59
0
                         pinnumber_update_function=series_params.
                         pin_number_generator[1],
                         pinname_update_function=pinname_update_function))

    if series_params.mirror:
        drawing.mirrorHorizontal()


if __name__ == '__main__':
    modelfilter = ""
    libname = 'conn_new'
    for arg in sys.argv[1:]:
        if arg.startswith("sf="):
            modelfilter = arg[len("sf="):]
        if arg.startswith("o="):
            libname = arg[len("o="):]

    if len(modelfilter) == 0:
        modelfilter = "*"

    model_filter_regobj = re.compile(fnmatch.translate(modelfilter))

    generator = SymbolGenerator(libname)
    for series_name, series_params in connector_params.items():
        if model_filter_regobj.match(series_name):
            for num_pins_per_row in series_params.pin_per_row_range:
                generateSingleSymbol(generator, series_params,
                                     num_pins_per_row)

    generator.writeFiles()
Exemple #60
0
 def KEYS(self, pattern: bytes = None) -> List[bytes]:
     re_pattern = fnmatch.translate(pattern.decode() if pattern else "*")
     rgx = re.compile(re_pattern.encode())
     return [key for key in self.data if rgx.match(key)]