コード例 #1
0
ファイル: build_modules_gui.py プロジェクト: erik-morgan/code
 def iter_dir(self, *args, pat='*'):
     dirs = [self.dirs[a] for a in args]
     steps = (step for d in dirs for step in os.walk(d))
     for path, folds, files in steps:
         files = fnfilter(files, '[!.]*')
         folds[:] = [fold for fold in folds if not fold.startswith('.')]
         yield from ((path, f) for f in fnfilter(files, pat))
コード例 #2
0
ファイル: test_manager.py プロジェクト: leorenc/ldap2pg
def test_diff_acls(mocker):
    from ldap2pg.acl import Acl, AclItem
    from ldap2pg.manager import SyncManager

    acl = Acl(name='connect', revoke='REVOKE {role}', grant='GRANT {role}')
    nogrant = Acl(name='nogrant', revoke='REVOKE')
    norvk = Acl(name='norvk', grant='GRANT')
    m = SyncManager(acl_dict={a.name: a for a in [acl, nogrant, norvk]})

    item0 = AclItem(acl=acl.name, dbname='backend', role='daniel')
    pgacls = set([
        item0,
        AclItem(acl=acl.name, dbname='backend', role='alice'),
        AclItem(acl=norvk.name, role='torevoke'),
    ])
    ldapacls = set([
        item0,
        AclItem(acl=acl.name, dbname='backend', role='david'),
        AclItem(acl=nogrant.name, role='togrant'),
    ])

    queries = [q.args[0] for q in m.diff(pgacls=pgacls, ldapacls=ldapacls)]

    assert not fnfilter(queries, "REVOKE daniel*")
    assert fnfilter(queries, "REVOKE alice*")
    assert fnfilter(queries, "GRANT david*")
コード例 #3
0
def get_deployment_files():
    root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    
    ignore_file = next((os.path.join(root, f) for f in ['.gitignore', '.hgignore', '.tfignore', '.ignore']
                       if os.path.isfile(os.path.join(root, f))), None)
    if ignore_file:
        with open(ignore_file, 'r', encoding='utf-8') as f:
            ignore = set(line.strip() for line in f
                         if line and not line.lstrip().startswith('#') and not line.lstrip().startswith('//'))
        ignore.add(os.path.relpath(ignore_file, root))
    else:
        ignore = set()
    ignore.add(os.path.basename(os.path.dirname(os.path.abspath(__file__))))

    all_files = []
    for dirpath, dirnames, filenames in os.walk(root):
        reldirs = [os.path.relpath(os.path.join(dirpath, d), root) for d in dirnames]
        relfiles = [os.path.relpath(os.path.join(dirpath, f), root) for f in filenames]
        if ignore:
            remove = set(os.path.basename(d) for pat in ignore for d in fnfilter(reldirs, pat))
            if VERBOSE and remove:
                print('Excluding:\n  ' + '\n  '.join(remove))
            dirnames[:] = (d for d in dirnames if d not in remove)
            remove = set(os.path.basename(f) for pat in ignore for f in fnfilter(relfiles, pat))
            if VERBOSE and remove:
                print('Excluding:\n  ' + '\n  '.join(remove))
            filenames[:] = (f for f in filenames if f not in remove)
        all_files.extend((f, os.path.relpath(f, root).replace('\\', '/')) for f in 
            (os.path.join(dirpath, basef) for basef in filenames))

    return all_files
コード例 #4
0
ファイル: test_privileges.py プロジェクト: rveede/ldap2pg
def test_diff(mocker):
    from ldap2pg.privilege import Privilege, Grant, Acl

    priv = Privilege(name='priv', revoke='REVOKE {role}', grant='GRANT {role}')
    nogrant = Privilege(name='nogrant', revoke='REVOKE')
    norvk = Privilege(name='norvk', grant='GRANT')
    privileges = {p.name: p for p in [priv, nogrant, norvk]}

    item0 = Grant(privilege=priv.name, dbname='backend', role='daniel')
    pgacl = Acl([
        item0,
        Grant(privilege=priv.name, dbname='backend', role='alice'),
        Grant(priv.name, dbname='backend', role='irrelevant', full=None),
        Grant(privilege=norvk.name, role='torevoke'),
    ])
    ldapacl = Acl([
        item0,
        Grant(privilege=priv.name, dbname='backend', role='david'),
        Grant(privilege=nogrant.name, role='togrant'),
    ])

    queries = [q.args[0] for q in pgacl.diff(ldapacl, privileges)]

    assert not fnfilter(queries, 'REVOKE "daniel"*')
    assert fnfilter(queries, 'REVOKE "alice"*')
    assert fnfilter(queries, 'GRANT "david"*')
コード例 #5
0
ファイル: test_role.py プロジェクト: codeaditya/ldap2pg
def test_diff_not_rename():
    from ldap2pg.role import Role, RoleSet

    pgmanagedroles = RoleSet()
    pgallroles = pgmanagedroles.union({
        Role('ambigue_from'),
        Role('AMBIGUE_FROM'),
        Role('Ambigue_To'),
        Role('bothmin'),
        Role('BothMix'),
    })
    ldaproles = RoleSet([
        Role('Ambigue_From'),
        Role('ambigue_to'),
        Role('AMBIGUE_TO'),
        Role('bothmin'),
        Role('Bothmin'),
        Role('BothMix'),
        Role('bothmix'),
    ])
    queries = [q.args[0] for q in pgmanagedroles.diff(ldaproles, pgallroles)]

    assert not fnfilter(queries, '* RENAME TO *')
    assert fnfilter(queries, '*CREATE ROLE "Ambigue_From"*')
    assert fnfilter(queries, '*CREATE ROLE "ambigue_to"*')
    assert fnfilter(queries, '*CREATE ROLE "AMBIGUE_TO"*')
コード例 #6
0
ファイル: test_role.py プロジェクト: zurikus/ldap2pg
def test_drop():
    from ldap2pg.role import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.drop()]

    assert fnfilter(queries, '*REASSIGN OWNED*DROP OWNED BY "toto";*')
    assert fnfilter(queries, 'DROP ROLE "toto";')
コード例 #7
0
ファイル: test_role.py プロジェクト: jrjsmrtn/ldap2pg
def test_create():
    from ldap2pg.manager import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.create()]

    assert fnfilter(queries, 'CREATE ROLE "toto" *;')
    assert fnfilter(queries, 'GRANT "toto" TO "titi";')
コード例 #8
0
def test_drop():
    from ldap2pg.manager import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.drop()]

    assert fnfilter(queries, "*REASSIGN OWNED*DROP OWNED BY toto;*")
    assert fnfilter(queries, "DROP ROLE toto;")
コード例 #9
0
def test_create():
    from ldap2pg.manager import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.create()]

    assert fnfilter(queries, "CREATE ROLE toto *;")
    assert fnfilter(queries, "GRANT toto TO titi;")
コード例 #10
0
def test_create():
    from ldap2pg.manager import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.create()]

    assert fnfilter(queries, "CREATE ROLE toto *;")
    assert fnfilter(queries, "INSERT INTO pg_catalog.pg_auth_members*")
コード例 #11
0
def test_drop():
    from ldap2pg.manager import Role

    role = Role(name='toto', members=['titi'])

    queries = [q.args[0] for q in role.drop()]

    assert fnfilter(queries, "DROP ROLE toto;")
    assert fnfilter(queries, "*DELETE FROM pg_catalog.pg_auth_members*")
コード例 #12
0
ファイル: build_modules.py プロジェクト: erik-morgan/code
def get_path(idnum):
    # draw test without using re:
    # if len(max(''.join(str(int(c.isdigit())) for c in idnum).split('0'))) > 4:
    pattern = '*/{0}*'.format
    if re.search(r'[0-9]{5}', idnum):
        paths = fnfilter(draws, pattern(idnum + '.'))
        if not paths:
            paths = fnfilter(draws, pattern(idnum.rsplit('-', 1)[0] + '.'))
    else:
        paths = fnfilter(pdfs, pattern(idnum))
    return paths[0] if paths else idnum
コード例 #13
0
ファイル: searcher.py プロジェクト: nicr9/greptools
def glob_recursive(ptrn):
    """Returns a list of paths under ./ that match a given glob pattern."""
    ptrn = ptrn[:-1] if ptrn[-1] == '/' else ptrn

    dir_matches = []
    file_matches = []
    for root, dirnames, filenames in walk(getcwd()):
        for dirname in fnfilter(dirnames, ptrn):
            dir_matches.append(os.path.relpath(os.path.join(root, dirname)))
        for filename in fnfilter(filenames, ptrn):
            file_matches.append(os.path.join(root, filename))

    return dir_matches, file_matches
コード例 #14
0
def test_alter():
    from ldap2pg.manager import Role

    a = Role(name='toto', members=['titi'], options=dict(LOGIN=True))
    b = Role(name='toto', members=['tata'], options=dict(LOGIN=False))

    queries = [q.args[0] for q in a.alter(a)]
    assert not queries

    queries = [q.args[0] for q in a.alter(b)]

    assert fnfilter(queries, "ALTER ROLE toto *;")
    assert fnfilter(queries, "GRANT toto TO tata;")
    assert fnfilter(queries, "REVOKE toto FROM titi;")
コード例 #15
0
def test_alter():
    from ldap2pg.manager import Role

    a = Role(name='toto', members=['titi'], options=dict(LOGIN=True))
    b = Role(name='toto', members=['tata'], options=dict(LOGIN=False))

    queries = [q.args[0] for q in a.alter(a)]
    assert not queries

    queries = [q.args[0] for q in a.alter(b)]

    assert fnfilter(queries, "ALTER ROLE toto *;")
    assert fnfilter(queries, "INSERT INTO pg_catalog.pg_auth_members*")
    assert fnfilter(queries, "*DELETE FROM pg_catalog.pg_auth_members*")
コード例 #16
0
ファイル: test_role.py プロジェクト: zurikus/ldap2pg
def test_alter():
    from ldap2pg.role import Role

    a = Role(name='toto', members=['titi'], options=dict(LOGIN=True))
    b = Role(name='toto', members=['tata'], options=dict(LOGIN=False))

    queries = [q.args[0] for q in a.alter(a)]
    assert not queries

    queries = [q.args[0] for q in a.alter(b)]

    assert fnfilter(queries, 'ALTER ROLE "toto" *;')
    assert fnfilter(queries, 'GRANT "toto" TO "tata";')
    assert fnfilter(queries, 'REVOKE "toto" FROM "titi";')
コード例 #17
0
ファイル: push.py プロジェクト: georgeyumnam/pylada
def remote_iglob(path, ssh, sftp):
  """ Globs remote files. """
  from os.path import dirname, basename, join
  from stat import S_ISDIR
  from fnmatch import filter as fnfilter
  dir, suffix = dirname(path), basename(path)
  for dir in ssh.exec_command("ls -d " + dir)[1].read()[:-1].split('\n'):
    paths = [join(dir, u) for u in sftp.listdir(dir)]
    if len(suffix) != 0:
      if '~' not in path: paths = fnfilter(paths, path)
      else: paths = fnfilter(paths, path.replace('~/', '*/'))
    for path in paths:
      try: attr = sftp.stat(path)
      except: continue
      yield path, S_ISDIR(attr.st_mode)
コード例 #18
0
ファイル: cc.py プロジェクト: getsmap/smap411
def find_files(ext):
    paths = []
    for base_dir in BASE_DIRECTORIES:
        for root, directories, filenames in walk(base_dir):
            paths.extend(map(lambda f: join(root, f), fnfilter(filenames, ext)))
    print 'Found %d files matching pattern %s' % (len(paths), ext)
    return paths
コード例 #19
0
        def all_python_files_recursively(directory):
            """Get all python files in this directory and subdirectories."""
            py_files = []
            for root, _, files in os.walk(directory):
                py_files += fnfilter([os.path.join(root, f) for f in files], "*.py")

            return py_files
コード例 #20
0
ファイル: procset.py プロジェクト: adi0321/PyPPL
    def __getitem__(self, item, _ignore_default=True):
        """@API:
		Process selector, always return Proxy object
		@params:
			item (any): The process selector.
		@returns:
			(Proxy): The processes match the item."""
        if item in ('starts', 'ends'):
            return self.__getattr__(item)
        if hasattr(item, 'id') and hasattr(
                item, 'tag') and not isinstance(item, ProcSet):
            return Proxy([self.procs[item.id]])
        if isinstance(item, slice):
            return Proxy(
                self.__getattr__(procid) for procid in self.procs.keys()[item])
        if isinstance(item, int):
            return self[self.procs.keys()[item]]
        if isinstance(item, (tuple, list, GeneratorType)):
            ret = Proxy()
            ret.add(Proxy(it for itm in item for it in self[itm]))
            return ret
        if item in self.procs:
            return Proxy([self.procs[item]])
        if ',' in item:
            return self[(it.strip() for it in item.split(','))]

        return self[fnfilter(self.procs.keys(), item)]
コード例 #21
0
def lookup_installed_services(app_options):
    """
    Returns list of installed Zoomdata services like: ['zoomdata', 'query-engine', 'scheduler', 'edc-impala']
    :param app_options: options passed to application
    :return: list on names
    """

    services_list = []

    if not isfile(path_join(app_options.install_location, "bin/zoomdata")):
        # main Zoomdata startup script not found - failing by returning empty list
        print(
            "[Error] No Zoomdata executable found while looking up for installed services."
        )
        return services_list
    else:
        services_list.append("zoomdata")

    services_list.extend([
        filename.split('-', 1)[1] for filename in fnfilter(
            listdir(path_join(app_options.install_location, "bin/")),
            "zoomdata-*")
    ])
    if app_options.verbose:
        print("Found {} installed Zoomdata services: {}".format(
            len(services_list), services_list))

    return services_list
コード例 #22
0
ファイル: resample.py プロジェクト: EOxServer/autotest
def resample(srid, origin, offsets, raw_imagery_path, output_path, image_pattern):
    data_path = os.path.join(settings.PROJECT_DIR, "examples", "data")
    
    filenames = os.listdir(os.path.join(data_path, raw_imagery_path))
    imgs = fnfilter(filenames, image_pattern)

    
    for img in imgs:
        src_path = os.path.join(data_path, raw_imagery_path, img)
        src = gdal.Open(src_path)
        
        src_grid = getGridFromFile(src_path)
        src_minx, src_miny, src_maxx, src_maxy = src_grid.getExtent2D()
        
        src_x_min = _roundint(math.floor((src_minx - origin[0]) / offsets[0][0]))
        src_y_min = _roundint(math.floor((src_maxy - origin[1]) / offsets[1][1]))
        src_x_max = _roundint(math.ceil((src_maxx - origin[0]) / offsets[0][0]))
        src_y_max = _roundint(math.ceil((src_miny - origin[1]) / offsets[1][1]))
        
        x_size = src_x_max - src_x_min + 1
        y_size = src_y_max - src_y_min + 1
        
        dst_path = os.path.join(data_path, output_path, "mosaic_%s" % img)
        driver = gdal.GetDriverByName('GTiff')
        dst = driver.Create(dst_path, x_size, y_size, 3)
        gt = [origin[0] + float(src_x_min) * offsets[0][0], offsets[0][0], 0.0,
              origin[1] + float(src_y_min) * offsets[1][1], 0.0, offsets[1][1]]
        dst.SetGeoTransform(gt)
        dst.SetProjection(src.GetProjection())
        
        gdal.ReprojectImage(src, dst)
コード例 #23
0
ファイル: clean.py プロジェクト: pymontecarlo/pymontecarlo
    def run(self):
        _clean.run(self)

        # Remove egg-info directory
        if self.all:
            dirs = self.distribution.package_dir
            egg_basedir = (dirs or {}).get('', os.curdir)

            for egg_infodir in fnfilter(os.listdir(egg_basedir), '*.egg-info'):
                if os.path.exists(egg_infodir):
                    remove_tree(egg_infodir, dry_run=self.dry_run)
                else:
                    log.warn("'%s' does not exist -- can't clean it", egg_infodir)

        # Remove build directories
        if self.all:
            for directory in (self.build_exe,):
                if os.path.exists(directory):
                    remove_tree(directory, dry_run=self.dry_run)
                else:
                    log.warn("'%s' does not exist -- can't clean it",
                             directory)

        # Purge
        if self.purge:
            if os.path.exists(self.dist_dir):
                remove_tree(self.dist_dir, dry_run=self.dry_run)
            else:
                log.warn("'%s' does not exist -- can't clean it", self.dist_dir)
コード例 #24
0
ファイル: cc.py プロジェクト: getsmap/smap411
def find_files(ext):
    paths = []
    for base_dir in BASE_DIRECTORIES:
        for root, directories, filenames in walk(base_dir):
            paths.extend(map(lambda f: join(root, f), fnfilter(filenames,
                                                               ext)))
    print 'Found %d files matching pattern %s' % (len(paths), ext)
    return paths
コード例 #25
0
def _all_files_matching_ext(start, ext):
    """Get all files matching :ext: from :start: directory."""
    md_files = []
    for root, _, files in os.walk(start):
        md_files += fnfilter([os.path.join(root, f) for f in files],
                             "*." + ext)

    return md_files
コード例 #26
0
def _all_files_matching_ext(start, ext):
    """Get all files matching :ext: from :start: directory."""
    md_files = []
    for root, _, files in os.walk(start):
        md_files += fnfilter([os.path.join(root, f) for f in files],
                             "*." + ext)

    return md_files
コード例 #27
0
ファイル: test_role.py プロジェクト: zurikus/ldap2pg
def test_diff():
    from ldap2pg.role import Role, RoleSet

    pgmanagedroles = RoleSet([
        Role('drop-me'),
        Role('alter-me', members=['rename-me']),
        Role('nothing'),
        Role('public'),
        Role('rename-me'),
    ])
    pgallroles = pgmanagedroles.union({
        Role('reuse-me'),
        Role('dont-touch-me'),
    })
    ldaproles = RoleSet([
        Role('reuse-me'),
        Role('alter-me', options=dict(LOGIN=True), members=['Rename-Me']),
        Role('nothing'),
        Role('create-me'),
        Role('Rename-Me'),
    ])
    queries = [q.args[0] for q in pgmanagedroles.diff(ldaproles, pgallroles)]

    assert fnfilter(queries, 'ALTER ROLE "alter-me" WITH* LOGIN*;')
    assert fnfilter(queries, 'CREATE ROLE "create-me" *;')
    assert fnfilter(queries, '*DROP ROLE "drop-me";*')
    assert not fnfilter(queries, 'CREATE ROLE "reuse-me" *')
    assert not fnfilter(queries, '*nothing*')
    assert not fnfilter(queries, '*dont-touch-me*')
    assert not fnfilter(queries, '*public*')
コード例 #28
0
def get_clients():
    """
    Returns a list of filenames(modules) in clients/ minus the .py extension
    """
    cmps = []
    modules = fnfilter(os.listdir('clients'), '*.py')
    modules.remove('__init__.py')
    cmps.extend(map(lambda x: os.path.splitext(x)[0], modules))
    return cmps
コード例 #29
0
ファイル: sftp.py プロジェクト: jonatkinson/fabric
    def glob(self, path):
        dirpart, pattern = os.path.split(path)
        rlist = self.ftp.listdir(dirpart)

        names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
        if len(names):
            return [os.path.join(dirpart, name) for name in names]
        else:
            return [path]
コード例 #30
0
ファイル: example-ext.py プロジェクト: mmattice/tx-ts6
def get_clients():
    """
    Returns a list of filenames(modules) in clients/ minus the .py extension
    """
    cmps = []
    modules = fnfilter(os.listdir('clients'), '*.py')
    modules.remove('__init__.py')
    cmps.extend(map(lambda x: os.path.splitext(x)[0], modules))
    return cmps
コード例 #31
0
ファイル: jack.py プロジェクト: gjcoombes/jack
 def _has_project(self):
     """Return project dir if the locdata has a project in it"""
     assert self.locdata_dirs
     results = []
     for d in self.locdata_dirs:
         dirs = [ osp.join(d,f, "modelout")
                  for f in fnfilter(os.listdir(d), self.project)
                  if osp.isdir(osp.join(d,f, "modelout")) ]
         results.extend(dirs)
     return results
コード例 #32
0
def test_roles_diff_queries():
    from ldap2pg.manager import Role, RoleSet

    a = RoleSet([
        Role('drop-me'),
        Role('alter-me'),
        Role('nothing'),
    ])
    b = RoleSet([
        Role('alter-me', options=dict(LOGIN=True)),
        Role('nothing'),
        Role('create-me')
    ])
    queries = [q.args[0] for q in a.diff(b)]

    assert fnfilter(queries, "ALTER ROLE alter-me WITH* LOGIN*;")
    assert fnfilter(queries, "CREATE ROLE create-me *;")
    assert 'DROP ROLE drop-me;' in queries
    assert not fnfilter(queries, '*nothing*')
コード例 #33
0
ファイル: test_role.py プロジェクト: codeaditya/ldap2pg
def test_diff_rename_members():
    from ldap2pg.role import Role, RoleSet

    pgmanagedroles = RoleSet()
    pgallroles = pgmanagedroles.union({
        Role('parent', members=['min2mix', 'min2min']),
        Role('min2mix'),
        Role('min2min'),
    })
    ldaproles = RoleSet([
        Role('parent', members=['Min2Mix', 'min2min']),
        Role('Min2Mix'),
        Role('min2min'),
    ])
    queries = [q.args[0] for q in pgmanagedroles.diff(ldaproles, pgallroles)]

    # Don't modify membership.
    assert not fnfilter(queries, '*GRANT*')
    assert not fnfilter(queries, '*REVOKE*')
コード例 #34
0
ファイル: jack.py プロジェクト: gjcoombes/jack
 def _has_results(self):
     """Return files that match the pattern"""
     assert self.project_dirs
     assert self.patterns
     results = []
     for d in self.project_dirs:
         for p in self.patterns:
             files = [ osp.join(d, f) for f in fnfilter(os.listdir(d), p)
                       if osp.splitext(f)[1].lower() in self.EXTENSIONS ]
             results.extend(files)
     return results
コード例 #35
0
def read_image_stack(fn, *args, **kwargs):
    """Read a 3D volume of images in image or .h5 format into a numpy.ndarray.

    The format is automatically detected from the (first) filename.

    A 'crop' keyword argument is supported, as a list of 
    [xmax, xmin, ymax, ymin, zmax, zmin]. Use 'None' for no crop in that 
    coordinate.

    If reading in .h5 format, keyword arguments are passed through to
    read_h5_stack().
    """
    #pdb.set_trace()
    if os.path.isdir(fn):
        fn += '/'
    d, fn = split_path(os.path.expanduser(fn))
    if len(d) == 0: d = '.'
    crop = kwargs.get('crop', [None] * 6)
    if len(crop) == 4: crop.extend([None] * 2)
    elif len(crop) == 2: crop = [None] * 4 + crop
    kwargs['crop'] = crop
    if any([fn.endswith(ext) for ext in supported_image_extensions]):
        # image types, such as a set of pngs or a multi-page tiff
        xmin, xmax, ymin, ymax, zmin, zmax = crop
        if len(args) > 0 and type(args[0]) == str and args[0].endswith(
                fn[-3:]):
            # input is a list of filenames
            fns = [fn] + [split_path(f)[1] for f in args]
        else:
            # input is a filename pattern to match
            fns = fnfilter(os.listdir(d), '*' + fn[-4:])
        if len(fns) == 1 and fns[0].endswith('.tif'):
            stack = read_multi_page_tif(join_path(d, fns[0]), crop)
        else:
            fns.sort(key=alphanumeric_key)  # sort filenames numerically
            fns = fns[zmin:zmax]
            im0 = pil_to_numpy(Image.open(join_path(d, fns[0])))
            ars = (pil_to_numpy(Image.open(join_path(d, fn))) for fn in fns)
            im0 = im0[xmin:xmax, ymin:ymax]
            dtype = im0.dtype
            stack = zeros((len(fns), ) + im0.shape, dtype)
            for i, im in enumerate(ars):
                stack[i] = im[xmin:xmax, ymin:ymax]
    elif fn.endswith('_boundpred.h5') or fn.endswith('_prediction.h5'):
        # Ilastik batch prediction output file
        stack = read_prediction_from_ilastik_batch(os.path.join(d, fn),
                                                   **kwargs)
    elif fn.endswith('.h5'):
        # other HDF5 file
        stack = read_h5_stack(join_path(d, fn), *args, **kwargs)
    elif os.path.isfile(os.path.join(d, 'superpixel_to_segment_map.txt')):
        # Raveler export
        stack = raveler_to_labeled_volume(d, *args, **kwargs)
    return squeeze(stack)
コード例 #36
0
    def dirlist(self, dirpath, pattern=None):
        def fileGen(dir):
            for entry in scandir(dirpath):
                if entry.is_file():
                    yield entry.path
                else:
                    yield from dirlist(entry)

        if pattern:
            return fnfilter(list(fileGen(dirPath)), pattern)
        else:
            return list(fileGen(dirPath))
コード例 #37
0
ファイル: test_manager.py プロジェクト: leorenc/ldap2pg
def test_diff_roles(mocker):
    from ldap2pg.manager import SyncManager, Role, RoleSet

    m = SyncManager()

    pgroles = RoleSet([
        Role('drop-me'),
        Role('alter-me'),
        Role('nothing'),
    ])
    ldaproles = RoleSet([
        Role('alter-me', options=dict(LOGIN=True)),
        Role('nothing'),
        Role('create-me')
    ])
    queries = [q.args[0] for q in m.diff(pgroles, set(), ldaproles, set())]

    assert fnfilter(queries, "ALTER ROLE alter-me WITH* LOGIN*;")
    assert fnfilter(queries, "CREATE ROLE create-me *;")
    assert fnfilter(queries, '*DROP ROLE drop-me;*')
    assert not fnfilter(queries, '*nothing*')
コード例 #38
0
ファイル: sftp.py プロジェクト: nicktimko/fabric
    def glob(self, path):
        from fabric.state import win32
        dirpart, pattern = os.path.split(path)
        rlist = self.ftp.listdir(dirpart)

        names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
        ret = [path]
        if len(names):
            s = '/'
            ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
            if not win32:
                ret = [posixpath.join(dirpart, name) for name in names]
        return ret
コード例 #39
0
    def glob(self, path):
        from fabric.state import win32
        dirpart, pattern = os.path.split(path)
        rlist = self.ftp.listdir(dirpart)

        names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
        ret = [path]
        if len(names):
            s = '/'
            ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
            if not win32:
                ret = [posixpath.join(dirpart, name) for name in names]
        return ret
コード例 #40
0
ファイル: formic.py プロジェクト: scottbelden/formic
    def __init__(self, elements):
        self.sections = []
        self.str = []

        self.bound_start = elements[0] != "**"

        if elements[-1] != "**":
            # Patterns like "constant", "cons*" or "c?nst?nt"
            self.file_pattern = path.normcase(elements[-1])
            del elements[-1]
        else:
            self.file_pattern = "*"

        # Optimization: Set self.file_filter to be a specific pattern
        # validating algorithm for the specific pattern
        if self.file_pattern == "*":
            # The pattern matches everything
            self.file_filter = lambda files: files
        elif "*" in self.file_pattern or "?" in self.file_pattern:
            # The pattern is a glob. Use fnmatch.filter
            self.file_filter = lambda files: fnfilter(files, self.file_pattern)
        else:
            # This is a 'constant' pattern - use comprehension
            self.file_filter = lambda files: [
                file for file in files
                if path.normcase(file) == self.file_pattern
            ]

        if elements:
            self.bound_end = elements[-1] != "**"
        else:
            self.bound_end = self.bound_start

        fragment = []
        for element in elements:
            if element == '**':
                if fragment:
                    self.sections.append(Section(fragment))
                fragment = []
            else:
                fragment.append(element)
        if fragment:
            self.sections.append(Section(fragment))

        # Propagate the bound start/end to the sections
        if self.bound_start and self.sections:
            self.sections[0].bound_start = True
        if self.bound_end and self.sections:
            self.sections[-1].bound_end = True
コード例 #41
0
ファイル: formic.py プロジェクト: galman33/GalDevKit
    def __init__(self, glob):
        glob = glob.replace('\\', '/').replace('//', '/')
        self.sections = []
        self.str      = []

        elements = Pattern._simplify(glob.split('/'))
        self.bound_start = elements[0]  != "**"

        if elements[-1] != "**":
            self.file_pattern = elements[-1]
            del elements[-1]
        else:
            self.file_pattern = "*"

        # Optimization: Set self.file_filter to be a specific pattern
        # validating algorithm for the specific pattern
        if self.file_pattern == "*":
            # The pattern matches everything
            self.file_filter = lambda files: files
        elif "*" in self.file_pattern or "?" in self.file_pattern:
            # The pattern is a glob. Use fnmatch.filter
            self.file_filter = lambda files: fnfilter(files, self.file_pattern)
        else:
            # This is a 'constant' pattern - use comprehension
            self.file_filter = lambda files: [ file for file in files if file == self.file_pattern ]

        if elements:
            self.bound_end = elements[-1] != "**"
        else:
            self.bound_end = self.bound_start

        fragment = []
        for element in elements:
            if element == '**':
                if fragment:
                    self.sections.append(Section(fragment))
                fragment = []
            else:
                fragment.append(element)
        if fragment:
            self.sections.append(Section(fragment))

        # Propagate the bound start/end to the sections
        if self.bound_start and self.sections:
            self.sections[0].bound_start = True
        if self.bound_end and self.sections:
            self.sections[-1].bound_end = True
コード例 #42
0
ファイル: test_lint_peps.py プロジェクト: samtx/OpenMDAO
def _get_files():
    """
    A generator of files to check for pep8/pep257 violations.
    """
    topdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

    for dirpath, dnames, fnames in os.walk(topdir):
        for dpattern in dir_excludes:
            newdn = [d for d in dnames if not fnmatch(d, dpattern)]
            if len(newdn) != len(dnames):
                dnames[:] = newdn  # replace contents of dnames to cause pruning

        for fpattern in file_excludes:
            fnames = [f for f in fnames if not fnmatch(f, fpattern)]

        for name in fnfilter(fnames, '*.py'):
            yield os.path.join(dirpath, name)
コード例 #43
0
ファイル: depana.py プロジェクト: letoh/depana
def create_walker(root_dir = ".", depth = 0, inc_pattern = ["*.o", "*.pico"], check_hidden = False):
	root_dir_depth = root_dir.count(os.path.sep)
	for root, dirs, files in os.walk(root_dir):
#		print "path:", root_dir, ", root:", root, ", dirs:", dirs, root.count(os.path.sep) - root_dir_depth, depth
		if depth and depth <= (root.count(os.path.sep) - root_dir_depth): continue
		if not files: continue
		if not check_hidden and fnmatch(root[len(root_dir):], "/.[!.]*"): continue

		n = []
		for pat in inc_pattern:
			n.extend(fnfilter(files, pat))
		if not n: continue

#		print "\x1b[1;33m", root, "\x1b[0m", len(n), "items"
		for file in n:
#			print "\t\t", os.path.join(root, file)
			yield os.path.join(root, file)
	pass
コード例 #44
0
ファイル: command.py プロジェクト: nco/swamp
 def mapInput(self, scriptFilename):
     temps = fnfilter(self.scriptOuts, scriptFilename)
     # FIXME: should really match against filemap, since
     # logicals may be renamed
     if temps:
         temps.sort()  # sort script wildcard expansion.
         # we need to convert script filenames to logicals
         # to handle real dependencies
         return map(lambda s: self.logicalOutByScript[s], temps)
     else:
         inList = self.expandConcreteInput(scriptFilename)
         if inList:
             self.scriptIns.update(inList)
             return inList
         else:
             log.error("%s is not allowed as an input filename" % (scriptFilename))
             raise StandardError("Input illegal or nonexistant %s" % (scriptFilename))
     pass
コード例 #45
0
ファイル: EM_normalization.py プロジェクト: azadis/EMISAC
def read_image_stack(fn):
	
    """Read a 3D volume of images in .tif or .h5 formats into a numpy.ndarray.
    This function attempts to automatically determine input file types and
    wraps specific image-reading functions.
    Adapted from gala.imio (https://github.com/janelia-flyem/gala)
    """
    if os.path.isdir(fn):
        fn += '/'
    d, fn = split_path(os.path.expanduser(fn))
    if len(d) == 0: d = '.'
    fns = fnfilter(os.listdir(d), fn)
    if len(fns) == 1 and fns[0].endswith('.tif'):
		stack = read_multi_page_tif(join_path(d,fns[0]))
    elif fn.endswith('.h5'):
		data=h5py.File(join_path(d,fn),'r')
		stack=data[group_name].value
    return squeeze(stack)
コード例 #46
0
ファイル: util.py プロジェクト: Clam-/Misc-Utilities
def buildpathlist(args, paths=None):
	if not paths:
		paths = []
	cwd = getcwdu()
	
	if not args:
		paths.append(cwd)

	elif platform == "win32":
		from fnmatch import filter as fnfilter
		#make list of paths here
		
		dircontents = None
			
		for arg in args:
			if isabs(arg):
				if exists(arg):
					paths.append(unicode(arg))
				else:
					log.warn("Path not found: %s" % arg)
			
			elif exists(abspath(join(cwd,arg))):
				paths.append(join(cwd,arg))
			
			else:
				if not dircontents:
					dircontents = listdir(cwd)
				for fname in fnfilter(dircontents, arg):
					paths.append(join(cwd,fname))
		
	else:
		for arg in args:
			if isabs(arg):
				if exists(arg):
					paths.append(unicode(arg))
				else:
					log.warn("Path not found: %s" % arg)
			elif exists(abspath(join(cwd,arg))):
				paths.append(join(cwd,arg))

	return paths
コード例 #47
0
ファイル: imio.py プロジェクト: DerThorsten/ray
def read_image_stack(fn, *args, **kwargs):
    """Read a 3D volume of images in image or .h5 format into a numpy.ndarray.

    The format is automatically detected from the (first) filename.

    A 'crop' keyword argument is supported, as a list of 
    [xmax, xmin, ymax, ymin, zmax, zmin]. Use 'None' for no crop in that 
    coordinate.

    If reading in .h5 format, keyword arguments are passed through to
    read_h5_stack().
    """
    d, fn = split_path(os.path.expanduser(fn))
    if len(d) == 0: d = '.'
    crop = kwargs.get('crop', [None]*6)
    if len(crop) == 4: crop.extend([None]*2)
    elif len(crop) == 2: crop = [None]*4 + crop
    kwargs['crop'] = crop
    if any([fn.endswith(ext) for ext in supported_image_extensions]):
        xmin, xmax, ymin, ymax, zmin, zmax = crop
        if len(args) > 0 and type(args[0]) == str and args[0].endswith(fn[-3:]):
            # input is a list of filenames
            fns = [fn] + [split_path(f)[1] for f in args]
        else:
            # input is a filename pattern to match
            fns = fnfilter(os.listdir(d), fn)
        if len(fns) == 1 and fns[0].endswith('.tif'):
            stack = read_multi_page_tif(join_path(d,fns[0]), crop)
        else:
            fns.sort(key=alphanumeric_key) # sort filenames numerically
            fns = fns[zmin:zmax]
            im0 = pil_to_numpy(Image.open(join_path(d,fns[0])))
            ars = (pil_to_numpy(Image.open(join_path(d,fn))) for fn in fns)
            im0 = im0[xmin:xmax,ymin:ymax]
            dtype = im0.dtype
            stack = zeros((len(fns),)+im0.shape, dtype)
            for i, im in enumerate(ars):
                stack[i] = im[xmin:xmax,ymin:ymax]
    if fn.endswith('.h5'):
        stack = read_h5_stack(join_path(d,fn), *args, **kwargs)
    return squeeze(stack)
コード例 #48
0
ファイル: analysis.py プロジェクト: jordan-stone/ARIES
def grid_veil_fit(obs_spec, model_spec_path, shift_stepsize, shift_total_size,
                  template_glob='phoenix_model*.dat',windows=[(0,np.inf)]):
    '''
    Determine the necessary shift in wavelength necessary to align a template spectrum 
    and an observed spectrum
    Inputs:
    obs_spec        -[str] filename of observed spectrum (col0 is wavelength, col1 is flux)
    model_spec_path -[str] the path to the photospheric spectra files. Each file in this 
                           dir should have col0 wavelength and col1 flux
    shift_stepsize  -[float] the step size to use while gridding (only steps in one 
                             direction...)
    shift_total_size-[float] the total amount to shift during the fitting process.
    template_globe  -[str] a glob string that identifies all of the desired template
                           datafiles in model_spec_path
    windows         -[list of tuples] to be fed to analysis.make_inds. These are 
                                      windows around spectral features that the fitter will use
                                      while generating it's figure of merit value...
    Returns:
    None            - prints the best fit model name and shift
    '''
    spec=readcol(obs_spec,colNames=['w','f'])
    inds=make_inds(spec['w'],windows)
    model_photospheres={}
    fnames=sorted(fnfilter(listdir(model_spec_path),'phoenix_model*.dat'))
    print len(fnames)
    for f in fnames:
        model_photospheres[f]=readcol(model_spec_path+f,colNames=['w','f'])
        pm=np.polyfit(model_photospheres[f]['w'],model_photospheres[f]['f'],3)
        polynomial=np.polyval(pm,model_photospheres[f]['w'])
        polynomial-=polynomial.mean()
        model_photospheres[f]['f']-=polynomial
    fit_dict={}
    for shift in np.arange(0,shift_total_size,shift_stepsize):
        for model in fnames:
            model_func=interp1d(model_photospheres[model]['w']+shift,model_photospheres[model]['f'])
            diff=(spec['f']-model_func(spec['w']))**2
            fit_dict[(model,shift)]=diff[inds].sum()
    skeys=sorted(fit_dict,key=fit_dict.get)
    print skeys[0]
コード例 #49
0
def spotlight_json(path):
    for root, dirnames, filenames in os.walk(path):
        for filename in fnfilter(filenames, '*.json'):
            with open(os.path.join(root, filename)) as jsonfile:
                yield (filename, json.loads(jsonfile.read()))
コード例 #50
0
ファイル: setup.py プロジェクト: fajran/kivy
with open(join(dirname(__file__), 'kivy', 'graphics', 'config.pxi'), 'w') as fd:
    fd.write('# Autogenerated file for Kivy Cython configuration\n')
    for k, v in c_options.iteritems():
        fd.write('DEF %s = %d\n' % (k.upper(), int(v)))

# extension modules
ext_modules = []

# list all files to compile
pyx_files = []
kivy_libs_dir = realpath(kivy.kivy_libs_dir)
for root, dirnames, filenames in walk(join(dirname(__file__), 'kivy')):
    # ignore lib directory
    if realpath(root).startswith(kivy_libs_dir):
        continue
    for filename in fnfilter(filenames, '*.pyx'):
        pyx_files.append(join(root, filename))

# check for cython
try:
    have_cython = True
    from Cython.Distutils import build_ext
except:
    have_cython = False

# create .c for every module
if 'sdist' in argv and have_cython:
    from Cython.Compiler.Main import compile
    print 'Generating C files...',
    compile(pyx_files)
    print 'Done !'
コード例 #51
0
ファイル: misc.py プロジェクト: dhaffner/dhaffner.py
def files(directory, pattern):
    """Return all filenames from directory which match given pattern."""
    join = partial(path.join, directory)
    return map(join, fnfilter(listdir(directory), pattern))
コード例 #52
0
ファイル: archive.py プロジェクト: groutr/conda-tools
 def get_member(self, pattern):
     """
     Return the member(s) that match with pattern, otherwise None.
     """
     _files = {m.path: m for m in self.files()}
     return tuple(_files[m] for m in fnfilter(_files.keys(), pattern))
コード例 #53
0
ファイル: imio.py プロジェクト: elhuhdron/gala
def read_image_stack(fn, *args, **kwargs):
    """Read a 3D volume of images in image or .h5 format into a numpy.ndarray.

    This function attempts to automatically determine input file types and
    wraps specific image-reading functions.

    Parameters
    ----------
    fn : filename (string)
        A file path or glob pattern specifying one or more valid image files.
        The file format is automatically determined from this argument.

    *args : filenames (string, optional)
        More than one positional argument will be interpreted as a list of
        filenames pointing to all the 2D images in the stack.

    **kwargs : keyword arguments (optional)
        Arguments to be passed to the underlying functions. A 'crop'
        keyword argument is supported, as a list of length 6:
        [xmin, xmax, ymin, ymax, zmin, zmax]. Use 'None' for no crop in
        that coordinate.

    Returns
    -------
    stack : 3-dimensional numpy ndarray

    Notes
    -----
        If reading in .h5 format, keyword arguments are passed through to
        read_h5_stack().

        Automatic file type detection may be deprecated in the future.
    """
    # TODO: Refactor.  Rather than have implicit designation of stack format
    # based on filenames (*_boundpred.h5, etc), require explicit parameters
    # in config JSON files.
    if os.path.isdir(fn):
        fn += '/'
    d, fn = split_path(os.path.expanduser(fn))
    if len(d) == 0: d = '.'
    crop = kwargs.get('crop', [None]*6)
    if crop is None:
        crop = [None]*6
    if len(crop) == 4: crop.extend([None]*2)
    elif len(crop) == 2: crop = [None]*4 + crop
    kwargs['crop'] = crop
    if any([fn.endswith(ext) for ext in supported_image_extensions]):
        # image types, such as a set of pngs or a multi-page tiff
        xmin, xmax, ymin, ymax, zmin, zmax = crop
        if len(args) > 0 and type(args[0]) == str and args[0].endswith(fn[-3:]):
            # input is a list of filenames
            fns = [fn] + [split_path(f)[1] for f in args]
        else:
            # input is a filename pattern to match
            fns = fnfilter(os.listdir(d), fn)
        if len(fns) == 1 and fns[0].endswith('.tif'):
            stack = read_multi_page_tif(join_path(d,fns[0]), crop)
        else:
            fns.sort(key=alphanumeric_key) # sort filenames numerically
            fns = fns[zmin:zmax]
            im0 = imread(join_path(d, fns[0]))
            ars = (imread(join_path(d, fn)) for fn in fns)
            im0 = im0[xmin:xmax, ymin:ymax]
            dtype = im0.dtype
            stack = zeros((len(fns),)+im0.shape, dtype)
            for i, im in enumerate(ars):
                stack[i] = im[xmin:xmax,ymin:ymax]
    elif fn.endswith('_boundpred.h5') or fn.endswith('_processed.h5'):
        # Ilastik batch prediction output file
        stack = read_prediction_from_ilastik_batch(os.path.join(d,fn), **kwargs)
    elif fn.endswith('.h5'):
        # other HDF5 file
        stack = read_h5_stack(join_path(d,fn), *args, **kwargs)
    elif os.path.isfile(os.path.join(d, 'superpixel_to_segment_map.txt')):
        # Raveler export
        stack = raveler_to_labeled_volume(d, *args, **kwargs)
    return squeeze(stack)
コード例 #54
0

cmdclass["build_ext"] = KivyBuildExt

# extension modules
ext_modules = []

# list all files to compile
pyx_files = []
pxd_files = []
kivy_libs_dir = realpath(join(kivy.kivy_base_dir, "libs"))
for root, dirnames, filenames in walk(join(dirname(__file__), "kivy")):
    # ignore lib directory
    if realpath(root).startswith(kivy_libs_dir):
        continue
    for filename in fnfilter(filenames, "*.pxd"):
        pxd_files.append(join(root, filename))
    for filename in fnfilter(filenames, "*.pyx"):
        pyx_files.append(join(root, filename))

# add cython core extension modules if cython is available

if True:
    libraries = ["m"]
    include_dirs = []
    extra_link_args = []
    if platform == "win32":
        libraries.append("opengl32")
    elif platform == "darwin":
        # On OSX, it's not -lGL, but -framework OpenGL...
        extra_link_args = ["-framework", "OpenGL"]
コード例 #55
0
ファイル: example-ext.py プロジェクト: elly/twisted-ts6
def get_clients():
    cmps = []
    modules = fnfilter(os.listdir('clients'), '*.py')
    modules.remove('__init__.py')
    cmps.extend(map(lambda x: os.path.splitext(x)[0], modules))
    return cmps