コード例 #1
0
ファイル: setup.py プロジェクト: Lekensteyn/Solaar
def _data_files():
	from os.path import dirname as _dirname

	yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
	yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
	yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']

	for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
		yield _dirname(mo), [mo]

	yield 'share/applications', ['share/applications/solaar.desktop']
	yield autostart_path, ['share/applications/solaar.desktop']

	del _dirname
コード例 #2
0
ファイル: _main.py プロジェクト: Duroktar/nedry_pass
    def _gif_spam():
        """
            Opens a tkLabel frame that animates the Nedry_0*.gif files onto
            the screen, simulating an animated gif.

        :return: returns on window close
        """
        pics = [_join(_PATH, i) for i in _glob(_PATH, '*.gif')]
        root = _tk.Tk()
        label = _tk.Label(root)
        label.pack(padx=10, pady=10)
        # store as tk img_objects
        pictures = _it.cycle(
            _tk.PhotoImage(file=img_name) for img_name in pics)
        # milliseconds
        delay = 150

        def animate():
            """ cycle through """
            img = next(pictures)
            label["image"] = img
            root.after(delay, animate)

        animate()
        root.mainloop()
コード例 #3
0
ファイル: smartopen.py プロジェクト: lirazsiri/vim-smartopen
    def glob(path):
        def f(p):
            if isdir(p):
                return p + "/"
            return p

        return [path for path in map(f, _glob(path)) if not re.search(r"\.py[co]$", path)]
コード例 #4
0
ファイル: Dependencies.py プロジェクト: daveuu/baga
def chmod_xr(pattern):
    print('Setting as executable')
    files = _glob(pattern)
    for f in files:
        print(f)
        st = _os.stat(f)
        _os.chmod(f, st.st_mode | _stat.S_IXUSR | _stat.S_IRUSR)
コード例 #5
0
ファイル: setup.py プロジェクト: shundhammer/solaar
def _data_files():
    from os.path import dirname as _dirname

    yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
    yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
    yield 'share/icons/hicolor/scalable/apps', [
        'share/solaar/icons/solaar.svg'
    ]

    for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
        yield _dirname(mo), [mo]

    yield 'share/applications', ['share/applications/solaar.desktop']
    yield autostart_path, ['share/applications/solaar.desktop']

    del _dirname
コード例 #6
0
def verify_conversion(glob_pattern,
                      target_dir,
                      reference_ext,
                      tmpdir_root=_tempfile.gettempdir(),
                      processes=None):
    from .validate import validate_converted_files_match
    if not glob_pattern.endswith(reference_ext):
        glob_pattern = glob_pattern + reference_ext
    file_sets_by_ext = {
        ext: _glob(_os.path.join(target_dir, glob_pattern + ext))
        for ext in SUPPORTED_EXTENSIONS
    }
    matches = {
        path.replace(ext, "")
        for ext, path in file_sets_by_ext[reference_ext]
    }
    for ext, paths in file_sets_by_ext.items():
        if ext == reference_ext:
            continue
        matches &= {path.replace(ext, "") for ext, path in paths}
    other_exts = set(SUPPORTED_EXTENSIONS) - {
        reference_ext,
    }

    errors = {}
    with tqdm.tqdm(total=(len(matches) * len(SUPPORTED_EXTENSIONS) - 1),
                   leave=False) as t:
        with _Executor(max_workers=processes) as executor:
            for other_ext in other_exts:
                verify_fn = lambda fn: validate_converted_files_match(
                    ref_ext=reference_ext, subject=fn + other_ext)
                for fn, missing, matching in executor.map(verify_fn, matches):
                    if missing or mismatching:
                        errors[fn] = str(ConversionError(missing, mismatching))
    return errors
コード例 #7
0
def chmod_xr(pattern):
    print('Setting as executable')
    files = _glob(pattern)
    for f in files:
        print(f)
        st = _os.stat(f)
        _os.chmod(f, st.st_mode | _stat.S_IXUSR | _stat.S_IRUSR)
コード例 #8
0
ファイル: Dependencies.py プロジェクト: daveuu/baga
def prep_python_2(pattern):
    print('Ensuring python2 is called . . .')
    # could try here?
    files = _glob(pattern)
    for f in files:
        print(f)
        fixed = open(f).read().replace('python ','python2 ')
        open(f,'w').write(fixed)
コード例 #9
0
def prep_python_2(pattern):
    print('Ensuring python2 is called . . .')
    # could try here?
    files = _glob(pattern)
    for f in files:
        print(f)
        fixed = open(f).read().replace('python ', 'python2 ')
        open(f, 'w').write(fixed)
コード例 #10
0
ファイル: config_parser.py プロジェクト: gabis-precog/pyhocon
 def glob(pathname, recursive=False):
     if recursive and '**' in pathname:
         import warnings
         warnings.warn(
             'This version of python (%s) does not support recursive import'
             % sys.version)
     from glob import glob as _glob
     return _glob(pathname)
コード例 #11
0
def _data_files():
    from os.path import dirname as _dirname

    yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
    yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
    yield 'share/icons/hicolor/scalable/apps', [
        'share/solaar/icons/solaar.svg'
    ]

    for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
        yield _dirname(mo), [mo]

    yield 'share/applications', ['share/applications/solaar.desktop']
    yield autostart_path, ['share/autostart/solaar.desktop']
    yield '/etc/udev/rules.d', ['rules.d/42-logitech-unify-permissions.rules']

    del _dirname
コード例 #12
0
def get_file_or_nothing(path, glob_pattern) -> _typing.Union[str, None]:
    prev_path = get_cur_dir_path()
    _os.chdir(path)
    file = (list(_glob(glob_pattern)) or [None])[0]
    if file is not None:
        file = f"{path}/{file}"
    _os.chdir(prev_path)
    return file
コード例 #13
0
def msh_completer(text, state):
          options, matches = [], []
          
          ## Files Completions
          for files in _glob(text + "*"): options.append(files)
          
          ## Programs Completions
          for progsDirectory in _msh_os.environ["PATH"].split(":"):
                    for programs in _glob(progsDirectory + "/*"): options.append(_msh_os.path.basename(programs))
          
          ## Builtin Programs Completions
          for builtins in programlist: options.append(builtins)
          
          if text:
                    for chars in options:
                              if chars[:len(text)] == text: matches += [chars]
          
          return matches[state]
コード例 #14
0
def resubmit(sample, dryrun=False, quiet=False):
    """
    Check the dag status file of the sample for failed jobs. If any, submit
    the rescue dag files to farmoutAnalysisJobs.
    Sample should be a path to the submit directory.
    """
    statusDag = '%s/dags/dag.status' % sample

    pattern = _reCompile(r'Nodes(?P<status>[A-Za-z]+) = (?P<nNodes>\d+)')

    results = {}
    try:
        with open(statusDag, 'r') as f:
            for line in f:
                # we only care about the summary block, which is first
                if ']' in line:
                    break

                match = pattern.search(line)
                if match:
                    results[match.group('status')] = int(match.group("nNodes"))
    except IOError:
        _log.error(
            "Unable to find DAG status file for {} -- did it get submitted?".
            format(sample))
        raise

    try:
        total = results['Total']
        succeeded = results['Done']
        failed = results['Failed']
        inProgress = results['Pre'] + results['Post'] + results['Queued'] + \
            results['Ready']
        ignore = results['Unready']  # email job or something
    except KeyError:
        _log.error("DAG status file {} is broken somehow".format(statusDag))
        raise

    if failed or not quiet:
        _log.info('    ' + sample)
        _log.info(
            "        Total: {0} Done: {1} Queued: {2} Failed: {3}".format(
                total - ignore, succeeded, inProgress, failed))

    if inProgress and (failed or not quiet):
        _log.info("        Not done, try again later")
    elif failed:
        _log.info("        Resubmitting...")
        rescue_dag = max(
            _glob('{}/dags/*dag.rescue[0-9][0-9][0-9]'.format(sample)))
        _log.info('        Rescue file: {0}'.format(rescue_dag))
        if not dryrun:
            cmd = 'farmoutAnalysisJobs --rescue-dag-file={}'.format(rescue_dag)
            _bash(cmd)

    return succeeded, failed, inProgress
コード例 #15
0
def ReadData(paths, delim=None):
    """
	ReadData requires list of wild card paths, read in each set and store in master data frame
	"""
    merged = _DataFrame()
    for indexa, elem in enumerate(paths):
        files = sorted(_glob(elem))
        allR, m1 = ReadFilesToDF(files, delim=delim)
        merged[indexa] = m1[0]
    return merged
コード例 #16
0
def list():
    """Return a list of the available nodes."""

    # Glob all Python scripts in the _nodes directory.
    nodes = _glob("%s/*.py" % _node_dir)

    # Strip the extension.
    nodes = [_os.path.basename(x).split(".py")[0] for x in nodes]

    return nodes
コード例 #17
0
ファイル: workflow.py プロジェクト: jbethune/gwf
    def glob(self, pathname, *args, **kwargs):
        """Return a list of paths matching `pathname`.

        This method is equivalent to :func:`python:glob.glob`, but searches with
        relative paths will be performed relative to the working directory
        of the workflow.
        """
        if not os.path.isabs(pathname):
            pathname = os.path.join(self.working_dir, pathname)
        return _glob(pathname, *args, **kwargs)
コード例 #18
0
ファイル: __init__.py プロジェクト: kartikye/PyGranSim
    def __init__(self):
        _dir, _ = __file__.split("__init__.py")
        pyFiles = _glob(_dir + "*.py")

        for file in pyFiles:
            _, fileName = file.split(_dir)

            if fileName.startswith("engine_"):
                engine, _ = fileName.split(".py")
                setattr(self, engine.split("engine_")[1], engine)
コード例 #19
0
ファイル: cdcs.py プロジェクト: tkphd/NexusLIMS
def upload_record_files(files_to_upload, progress=False):
    """
    Upload a list of .xml files (or all .xml files in the current directory)
    to the NexusLIMS CDCS instance using :py:meth:`upload_record_content`

    Parameters
    ----------
    files_to_upload : list or None
        The list of .xml files to upload. If ``None``, all .xml files in the
        current directory will be used instead.
    progress : bool
        Whether or not to show a progress bar for uploading

    Returns
    -------
    files_uploaded : list of str
        A list of the files that were successfully uploaded
    record_ids : list of str
        A list of the record id values (onthe server) that were uploaded
    """
    if files_to_upload is None:
        _logger.info('Using all .xml files in this directory')
        files_to_upload = _glob('*.xml')
    else:
        _logger.info('Using .xml files from command line')

    _logger.info(f'Found {len(files_to_upload)} files to upload\n')
    if len(files_to_upload) == 0:
        msg = 'No .xml files were found (please specify on the ' \
              'command line, or run this script from a directory ' \
              'containing one or more .xml files'
        _logger.error(msg)
        raise ValueError(msg)

    files_uploaded = []
    record_ids = []

    for f in _tqdm(files_to_upload) if progress else files_to_upload:
        with open(f, 'r') as xml_file:
            xml_content = xml_file.read()

        title = _os.path.basename(f)
        r, record_id = upload_record_content(xml_content, title)

        if r.status_code != 201:
            _logger.warning(f'Could not upload {title}')
            continue
        else:
            files_uploaded.append(f)
            record_ids.append(record_id)

    _logger.info(f'Successfully uploaded {len(files_uploaded)} of '
                 f'{len(files_to_upload)} files')

    return files_uploaded, record_ids
コード例 #20
0
ファイル: render_segmented_views.py プロジェクト: won21kr/up
def sample_shots(  # pylint: disable=too-many-arguments
        input_folder,
        out_folder,
        num_shots_per_body=7,
        only_missing=False,
        num_threads=-1,
        use_light=False,
        factor=1.):
    """Sample body images with visibilities."""
    _LOGGER.info("Sampling 3D body shots.")
    if num_threads == -1:
        num_threads = available_cpu_count()
    else:
        assert num_threads > 0
    if not _path.exists(out_folder):
        _os.mkdir(out_folder)
    _np.random.seed(1)
    bodies = _glob(_path.join(input_folder, '*.pkl'))
    _LOGGER.info("%d bodies detected.", len(bodies))
    with _pymp.Parallel(num_threads, if_=num_threads > 1) as p:
        for body_idx in p.iterate(tqdm.tqdm(range(len(bodies)))):
            body_filename = bodies[body_idx]
            vis_filename = body_filename + '_vis_overlay.png'
            vis_filename = body_filename + '_vis.png'
            if not _os.path.exists(vis_filename):
                vis_filename = body_filename + '_vis_0.png'
            if not _os.path.exists(vis_filename):
                # Try something else.
                vis_filename = body_filename[:-9]
            out_names = [
                _os.path.join(
                    out_folder,
                    _path.basename(body_filename) + '.' + str(map_idx) +
                    '.png') for map_idx in range(num_shots_per_body)
            ]
            if only_missing:
                all_exist = True
                for fname in out_names:
                    if not _path.exists(fname):
                        all_exist = False
                        break
                if all_exist:
                    continue
            vis_im = _cv2.imread(vis_filename)
            assert vis_im is not None, 'visualization not found: %s' % (
                vis_filename)
            renderings = render_body_impl(body_filename,
                                          [vis_im.shape[1], vis_im.shape[0]],
                                          num_shots_per_body,
                                          quiet=False,
                                          use_light=use_light,
                                          factor=factor)
            for map_idx, vmap in enumerate(renderings):
                _cv2.imwrite(out_names[map_idx], vmap[:, :, ::-1])
    _LOGGER.info("Done.")
コード例 #21
0
def get_files_by_pattern(pattern: str, path: _typing.Optional[str] = None):
    prev_path = None
    if path is not None:
        prev_path = get_cur_dir_path()
        _os.chdir(path)

    files = _glob(pattern)

    if prev_path is not None:
        _os.chdir(prev_path)
    return files
コード例 #22
0
def show_id_results(idname):
    idlist = search_IDlist(idname)
    if len(idlist) == 0:
        return
    bdb = _glob('BeDB/*.bdb')
    for db in bdb:
        idb = _np.loadtxt(db, dtype=str, delimiter=', ')
        for i in range(len(idb)):
            if idb[i, 0] in idlist:
                print('{0}:\n{1}\n'.format(db, idb[i]))
    return
コード例 #23
0
    def __init__(self, img_dir, file_extension="jpg"):
        """
        Create a new image loader that reads all the images with specified file extension in a given directory.

        Args:
            img_dir: Directory to be searched.
            file_extension: Desired extension of files to be loaded.
        """
        local = locals().copy()
        paths = sorted(_glob(_os.path.join(img_dir, f"*.{file_extension}")))
        BasicImageFileLoader.__init__(self, paths)
        self.args = self._prepare_args(local)
コード例 #24
0
    def __init__(self, gt_dir, getter, file_extension="mat"):
        """
        Create a loader that searches for files with specified extension in a given directory and loads them.

        Args:
            gt_dir: Directory to be searched.
            file_extension: Desired file extension of Matlab files.
        """
        local = locals().copy()
        paths = sorted(_glob(_os.path.join(gt_dir, f"*.{file_extension}")))
        BasicGTPointsMatFileLoader.__init__(self, paths, getter)
        self.args = self._prepare_args(local)
コード例 #25
0
    def __init__(self, den_map_dir, file_extension="csv"):
        """
        Create a loader that searches for files with the given extension in the given directory and loads them.

        Args:
            den_map_dir: Directory to be searched.
            file_extension: Desired extension of files to be loaded.
        """
        local = locals().copy()
        paths = sorted(_glob(_os.path.join(den_map_dir, f"*.{file_extension}")))
        BasicDensityMapCSVFileLoader.__init__(self, paths)
        self.args = self._prepare_args(local)
コード例 #26
0
ファイル: setup.py プロジェクト: valderaplets/Solaar
def _data_files():
    from os.path import dirname as _dirname

    yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')
    yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')
    yield 'share/icons/hicolor/scalable/apps', [
        'share/solaar/icons/solaar.svg'
    ]

    for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):
        yield _dirname(mo), [mo]

    yield 'share/applications', ['share/applications/solaar.desktop']
    yield 'share/solaar/udev-rules.d', [
        'rules.d/42-logitech-unify-permissions.rules'
    ]
    yield 'share/metainfo', [
        'share/solaar/io.github.pwr_solaar.solaar.metainfo.xml'
    ]

    del _dirname
コード例 #27
0
def readFiles(path, spectra, frmt="spc", **kwargs):
    files = _glob(f"{path}/*.{frmt}")
    for file_pathname in files:
        # If it's the same file change the configuration
        if (file_pathname == files[0]):
            _setPreValues()

        readFile(file_pathname, spectra, **kwargs)

        # Otherwise
        if (file_pathname == files[-1]):
            _removePreValues()
コード例 #28
0
ファイル: resubmitFailedJobs.py プロジェクト: kdlong/UWVV
def resubmit(sample, dryrun=False, quiet=False):
    """
    Check the dag status file of the sample for failed jobs. If any, submit
    the rescue dag files to farmoutAnalysisJobs.
    Sample should be a path to the submit directory.
    """
    statusDag = '%s/dags/dag.status' % sample

    pattern = _reCompile(r'Nodes(?P<status>[A-Za-z]+) = (?P<nNodes>\d+)')

    results = {}
    try:
        with open(statusDag, 'r') as f:
            for line in f:
                # we only care about the summary block, which is first
                if ']' in line:
                    break

                match = pattern.search(line)
                if match:
                    results[match.group('status')] = int(match.group("nNodes"))
    except IOError:
        _log.error("Unable to find DAG status file for {} -- did it get submitted?".format(sample))
        raise

    try:
        total = results['Total']
        succeeded = results['Done']
        failed = results['Failed']
        inProgress = results['Pre'] + results['Post'] + results['Queued'] + \
            results['Ready']
        ignore = results['Unready'] # email job or something
    except KeyError:
        _log.error("DAG status file {} is broken somehow".format(statusDag))
        raise

    if failed or not quiet:
        _log.info('    ' + sample)
        _log.info("        Total: {0} Done: {1} Queued: {2} Failed: {3}".format(total-ignore,succeeded,inProgress,failed))


    if inProgress and (failed or not quiet):
        _log.info("        Not done, try again later")
    elif failed:
        _log.info("        Resubmitting...")
        rescue_dag = max(_glob('{}/dags/*dag.rescue[0-9][0-9][0-9]'.format(sample)))
        _log.info('        Rescue file: {0}'.format(rescue_dag))
        if not dryrun:
            cmd = 'farmoutAnalysisJobs --rescue-dag-file={}'.format(rescue_dag)
            _bash(cmd)

    return succeeded, failed, inProgress
コード例 #29
0
ファイル: locking.py プロジェクト: noku5/inProject
def read_only_average_all(measurepath):
    averdict = {}
    for output in _glob(measurepath+'/*average*'):
        split = _os.path.split(output)[-1].split('.npy')[0].split('_')
        with open(output,'r') as f:
            myarr = _np.load(f)
        if '2x' in split[0]:
            print(split[0][2:])
            beta=2*float(split[0][2:])
        else:
            beta=float(split[0])
        averdict[(beta,split[-1])] = myarr
    return averdict
コード例 #30
0
ファイル: api.py プロジェクト: katietz/conda-package-handling
def transmute(in_file, out_ext, out_folder=None, processes=None, **kw):
    if not out_folder:
        out_folder = _os.path.dirname(in_file) or _os.getcwd()

    flist = set(_glob(in_file))
    if in_file.endswith('.tar.bz2'):
        flist = flist - set(_glob(in_file.replace('.tar.bz2', out_ext)))
    elif in_file.endswith('.conda'):
        flist = flist - set(_glob(in_file.replace('.conda', out_ext)))

    failed_files = {}
    with _tqdm.tqdm(total=len(flist), leave=False) as t:
        with _Executor(max_workers=processes) as executor:
            convert_f = _functools.partial(_convert, out_ext=out_ext,
                                          out_folder=out_folder, **kw)
            for fn, out_fn, errors in executor.map(convert_f, flist):
                t.set_description("Converted: %s" % fn)
                t.update()
                if errors:
                    failed_files[fn] = errors
                    _rm_rf(out_fn)
    return failed_files
コード例 #31
0
ファイル: i18n.py プロジェクト: 3v1n0/Solaar
def _find_locale_path(lc_domain):
	import os.path as _path

	import sys as _sys
	prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))
	src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))
	del _sys

	from glob import glob as _glob

	for location in prefix_share, src_share:
		mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))
		if mo_files:
			return _path.join(location, 'locale')
コード例 #32
0
ファイル: i18n.py プロジェクト: saue0/Solaar
def _find_locale_path(lc_domain):
    import os.path as _path

    import sys as _sys
    prefix_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..'))
    src_share = _path.normpath(_path.join(_path.realpath(_sys.path[0]), '..', 'share'))
    del _sys

    from glob import glob as _glob

    for location in prefix_share, src_share:
        mo_files = _glob(_path.join(location, 'locale', '*', 'LC_MESSAGES', lc_domain + '.mo'))
        if mo_files:
            return _path.join(location, 'locale')
コード例 #33
0
ファイル: __init__.py プロジェクト: t20100/hdf5plugin
def _init_filters():
    """Initialise and register HDF5 filters with h5py

    Generator of tuples: (filename, library handle)
    """
    hdf5_version = _h5py.h5.get_libversion()

    for name, filter_id in FILTERS.items():
        # Skip filters that were not embedded
        if name not in config.embedded_filters:
            _logger.debug("%s filter not available in this build of hdf5plugin.", name)
            continue

        # Check if filter is already loaded (not on buggy HDF5 versions)
        if (1, 8, 20) <= hdf5_version < (1, 10) or hdf5_version >= (1, 10, 2):
            if _h5py.h5z.filter_avail(filter_id):
                _logger.info("%s filter already loaded, skip it.", name)
                continue

        # Load DLL
        filename = _glob(_os.path.join(
            PLUGIN_PATH, 'libh5' + name + '*' + config.filter_file_extension))
        if len(filename):
            filename = filename[0]
        else:
            _logger.error("Cannot initialize filter %s: File not found", name)
            continue
        try:
            lib = _ctypes.CDLL(filename)
        except OSError:
            _logger.error("Failed to load filter %s: %s", name, filename)
            continue

        if _sys.platform.startswith('win'):
            # Use register_filter function to register filter
            lib.register_filter.restype = _ctypes.c_int
            retval = lib.register_filter()
        else:
            # Use init_filter function to initialize DLL and register filter
            lib.init_filter.argtypes = [_ctypes.c_char_p]
            lib.init_filter.restype = _ctypes.c_int
            retval = lib.init_filter(
                bytes(_h5py.h5z.__file__, encoding='utf-8'))

        if retval < 0:
            _logger.error("Cannot initialize filter %s: %d", name, retval)
            continue

        yield filename, lib
コード例 #34
0
ファイル: __init__.py プロジェクト: Andrew-AbiMansour/PyDEM
	def __init__(self):
		# Any engine module *must* follow the naming convention: engine_foo.py
		# If the engine is found, it will be linked via setattr to be imported
		# in DEM.py as PyGran.simulation.engine_foo. The engine is set by the user
		# as DEM.simulation.engines.foo

		_dir, _ = __file__.split('__init__.py')
		pyFiles = _glob(_dir + '*.py')

		for file in pyFiles:
			_, fileName = file.split(_dir)

			if fileName.startswith('engine_'):	
				engine, _ = fileName.split('.py')
				setattr(self, engine.split('engine_')[1], engine)
コード例 #35
0
	def __init__(self):
		# Any engine module *must* follow the naming convention: engine_foo.py
		# If the engine is found, it will be linked via setattr to be imported
		# in DEM.py as PyGran.simulation.engine_foo. The engine is set by the user
		# as DEM.simulation.engines.foo

		_dir, _ = __file__.split('__init__.py')
		pyFiles = _glob(_dir + '*.py')

		for file in pyFiles:
			_, fileName = file.split(_dir)

			if fileName.startswith('engine_'):	
				engine, _ = fileName.split('.py')
				setattr(self, engine.split('engine_')[1], engine)
コード例 #36
0
def generate_submit_dirs(jobids):
    '''
    Make a list of submit directories from an input argument.
    If two or more forward slashes ('/') appear in a jobid, it is interpreted
    as a path to a submit directory (which is resubmitted) or directory
    containing submit directories, all of which are resubmitted.
    If there are no forward slashes, it is interpreted as a jobid, and its
    submit directories are found in /<submit_base>/<username>/jobid, where
    <submit_base> is '/data' on uwlogin and '/nfs_scratch' on login0*, and
    all subdirectories are resubmitted.
    If there is exactly one forward slash, it is considered a jobid/sample pair
    and the sample is resubmitted.
    Either way, UNIX-style wildcards are allowed.
    '''
    dirs = []

    if 'uwlogin' in _hostname():
        scratch = '/data'
    else:
        scratch = '/nfs_scratch'

    user = _env['USER']

    for job in jobids:
        if job.count('/') > 1:  # full path
            unixPath = job
        else:  # jobid or jobid/sample
            unixPath = _join(scratch, user, job)

        subdirs = _glob('%s/*' % unixPath)
        if any('dags' in s for s in subdirs):  # this is a sample
            dirs += _glob(unixPath)
        else:
            dirs += subdirs

    return dirs
コード例 #37
0
def run(input_files=[], target=None, force_conversion=True, verbose=True):
    if target is None:
        target = DEFAULT_TARGET
    for pathpat in input_files:
        for path in _glob(pathpat):
            try:
                process_file(path, target, force_conversion=force_conversion,
                             verbose=verbose)
            except ProcessingError as e:
                print(f"***{e}", file=_sys.stderr, flush=True)
                continue
            except:
                _print_exc()
                print(f"***failed to process: {path}", file=_sys.stderr, flush=True)
                continue
コード例 #38
0
ファイル: resubmitFailedJobs.py プロジェクト: kdlong/UWVV
def generate_submit_dirs(jobids):
    '''
    Make a list of submit directories from an input argument.
    If two or more forward slashes ('/') appear in a jobid, it is interpreted
    as a path to a submit directory (which is resubmitted) or directory
    containing submit directories, all of which are resubmitted.
    If there are no forward slashes, it is interpreted as a jobid, and its
    submit directories are found in /<submit_base>/<username>/jobid, where
    <submit_base> is '/data' on uwlogin and '/nfs_scratch' on login0*, and
    all subdirectories are resubmitted.
    If there is exactly one forward slash, it is considered a jobid/sample pair
    and the sample is resubmitted.
    Either way, UNIX-style wildcards are allowed.
    '''
    dirs = []

    if 'uwlogin' in _hostname():
        scratch = '/data'
    else:
        scratch = '/nfs_scratch'

    user = _env['USER']

    for job in jobids:
        if job.count('/') > 1: # full path
            unixPath = job
        else: # jobid or jobid/sample
            unixPath = _join(scratch, user, job)

        subdirs = _glob('%s/*' % unixPath)
        if any('dags' in s for s in subdirs): # this is a sample
            dirs += _glob(unixPath)
        else:
            dirs += subdirs

    return dirs
コード例 #39
0
ファイル: locking.py プロジェクト: noku5/inProject
def read_only_correlation_with_all(measurepath):
    averdict = {}
    for output in _glob(measurepath+'/*correlation*with_all*'):
        filename = _os.path.split(output)[-1].split('.npy')[0]
        splitting = filename.split('_correlation_')
        beta = float(splitting[0])
        splitting = splitting[-1].split('_with_all_')
        obsnameB = splitting[-1]
        splitting = splitting[0].split('_')
        obsnameA = splitting[0]
        posiA    = int(splitting[1])
        timeA    = float(splitting[2])
        with open(output,'r') as f:
            myarr = _np.load(f)
        averdict[(beta,obsnameA,posiA,timeA,obsnameB)] = myarr
    return averdict
コード例 #40
0
def _common_loader(names, folder, info):
    """
    Outsourced some common loader code.

    Parameters
    ----------
    names : list of str, None or 'all'
        See explicit loaders.
    folder : string
        Full path to folder from where to load the data.
    info : str
        Info for print.

    Returns
    -------
    data : dict
        See explicit loader returns.
    """
    files = sorted(_glob(_os.path.join(folder, "*")))
    file_names = map(lambda s: _os.path.splitext(_os.path.basename(s))[0],
                     files)

    if names is None:
        return file_names
    else:
        if names == "all":
            names = file_names
        elif not isinstance(names, list):
            names = [names]

    data = {}
    for name in names:
        idx = file_names.index(name)
        fname = files[idx]
        print("Load {} for sample {} from:\n  {}".format(info, name, fname))
        ext = _os.path.splitext(fname)[1]
        if ext == ".npy":
            data[name] = _np.load(fname)
        elif ext == ".json":
            with open(fname) as json_file:
                data[name] = _json.load(json_file)
        else:
            raise ValueError(
                "Couldn't load unknown datatype: '{}'".format(ext))

    return data
コード例 #41
0
ファイル: beatlas.py プロジェクト: danmoser/pyhdust
def createBAsed(fsedlist, xdrpath, lbdarr, param=True, savetxt=False,
    ignorelum=False, pol=False, saveextra=None):
    """ Create the BeAtlas SED XDR release.

    WARNING: The file names must be in this format: 
    `mod01_PLn3.5_sig0.00_h072_Rd000.0_Be_M14.60_ob1.45_H0.77_Z0.014_bE_Ell`

    | The file structure:
    | -n_quantities, n_lbd, n_models,
    | -n_qt_vals1, n_qt_vals2, .. n_qt_valsn
    | -quantities values =  M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
    | -(Unique) lbd array
    | -Loop:
    |   *model values
    |   *model SED

    | Definitions:
    | -photospheric models: sig0 = 0.00
    | -Parametric disk model default (`param` == True)
    | -VDD-ST models: n excluded (alpha and R0 fixed. Confirm?)
    | -The flux will be given in ergs/s/um2/um. If ignorelum==True, the usual
    |   F_lbda/F_bol unit will be given.

    Since the grid is not symmetric, there is no index to jump directly to the
    desired model. So the suggestion is to use the index matrix, or read the
    file line by line until find the model (if exists).

    :Example: 

    def genxdr(xdrname='PL.xdr', param=True, pol=False):
        fs2l = glob('fullsed/*.sed2')
        print('# Using {0} as reference!'.format(fs2l[0]))
        lbdarr = hdt.readfullsed2(fs2l[0])
        lbdarr = lbdarr[0, :, 2]
        nm, listpar = bat.fsedList(fs2l)
        bat.createBAsed(fs2l, xdrname, lbdarr, param=param, savetxt=False, 
            pol=pol, saveextra=xdrname.replace('xdr', 'txt'))
        return

    genxdr(xdrname='Yudin_PL.xdr')
    """
    fsedlist.sort()
    nq = 9
    if not param:
        nq = 8
    nm, listpar = fsedList(fsedlist, param=param)
    header2 = []
    for vals in listpar:
        header2 += [len(vals)]
    nlb = len(lbdarr)
    header1 = [nq, nlb, nm]
    models = _np.zeros((nm, nlb))
    minfo = _np.zeros((nm, nq))
    k = 0
    iflx = 3
    if pol:
        iflx = 7
    for i in range(len(fsedlist)):
        mod = BAmod(fsedlist[i])
        # Select only `param` matching cases:
        if mod.param == param:
            sed2data = _hdt.readfullsed2(fsedlist[i])
            iL = 1.
            dist = 1/_np.sqrt(4 * _np.pi)
            if not ignorelum and not pol:
                j = fsedlist[i].find('fullsed_mod')
                # modn = fsedlist[i][j + 11:j + 13]
                modn = _re.match(r'.*mod(\d+)_', fsedlist[i]).group(1)
                log = fsedlist[i].replace('fullsed_mod', '../mod{0}/mod'.
                    format(modn)).replace('.sed2', '.log')
                if not _os.path.exists(log):
                    log = _glob(log.replace('../mod{0}/mod'.format(modn),
                    '../mod{0}/*mod'.format(modn)))
                    if len(log) >= 1:
                        log = log[0]
                    else:
                        raise LookupError('# No log file found for {0}'.
                            format(fsedlist[i]))
                f0 = open(log)
                lines = f0.readlines()
                f0.close()
                iL = _phc.fltTxtOccur('L =', lines) * _phc.Lsun.cgs
                if saveextra is not None:
                    R_pole = _phc.fltTxtOccur('R_pole =', lines)
                    Vrot = _phc.fltTxtOccur('Vrot', lines)
                    f0 = open(saveextra, 'a')
                    f0.writelines('{0}\t{1}\t{2}\n'.format(R_pole, Vrot, iL))
                    f0.close()
                dist = 10. * _phc.pc.cgs
            for j in range(header2[-1]):
                #  M, ob(W), Z, H, sig, Rd, h, *n*, cos(i).
                if param:
                    minfo[k * header2[-1] + j] = _np.array([ mod.M, mod.ob, 
                    mod.Z, mod.H, mod.sig, mod.Rd, mod.h, mod.n, 
                    listpar[-1][j] ]).astype(float)
                else:
                    minfo[k * header2[-1] + j] = _np.array([ mod.M, mod.ob, 
                    mod.Z, mod.H, mod.sig, mod.Rd, mod.h,
                    listpar[-1][j] ]).astype(float)
                if len(sed2data[j, :, 2]) != nlb:
                    models[k * header2[-1] + j] = _np.interp(lbdarr, 
                        sed2data[j, :, 2], sed2data[j, :, iflx]) * iL / 4 / \
                        _np.pi / dist**2
                else:
                    models[k * header2[-1] + j] = sed2data[j, :, iflx] * \
                        iL / 4 / _np.pi / dist**2
                if _np.sum(_np.isnan(models[k * header2[-1] + j])) > 0:
                    nans, x = _phc.nan_helper(models[k * header2[-1] + j]) 
                    models[k * header2[-1] + j][nans] = _np.interp(x(nans), 
                        x(~nans), models[k * header2[-1] + j][~nans])
            k += 1
    #
    f0 = open(xdrpath, 'wb')
    stfmt = '>{0}l'.format(3)
    f0.write(_struct.pack(stfmt, *header1))
    stfmt = '>{0}l'.format(nq)
    f0.write(_struct.pack(stfmt, *header2))
    for vals in listpar:
        stfmt = '>{0}f'.format(len(vals))
        f0.write(_struct.pack(stfmt, *_np.array(vals).astype(float)))
    stfmt = '>{0}f'.format(nlb)
    f0.write(_struct.pack(stfmt, *_np.array(lbdarr).astype(float)))
    for i in range(nm):
        stfmt = '>{0}f'.format(nq)
        f0.write(_struct.pack(stfmt, *minfo[i]))
        stfmt = '>{0}f'.format(nlb)
        f0.write(_struct.pack(stfmt, *_np.array(models[i]).astype(float)))
    f0.close()
    print('# XDR file {0} saved!'.format(xdrpath))

    if savetxt:
        f0 = open(xdrpath + '.txt', 'w')
        f0.writelines('{0} \n'.format(header1))
        f0.writelines('{0} \n'.format(header2))
        for vals in listpar:
            f0.writelines('{0} \n'.format(vals))
        f0.writelines('{0} \n'.format(lbdarr))
        for i in range(nm):
            f0.writelines('{0} \n'.format(minfo[i]))
            f0.writelines('{0} \n'.format(models[i]))
        f0.close()
        print('# TXT file {0} saved!'.format(xdrpath + '.txt'))
    return
コード例 #42
0
ファイル: CollectData.py プロジェクト: pauruihu/baga
    def getFromPath(self, path_to_fastq):
        '''
        Given a path to pairs of fastq short read files, parse them ready for analysis 
        with the Bacteria and Archaea Genome (BAG) Analyser.
        '''    

        use_files = []
        if isinstance(path_to_fastq, str):
            use_paths = [path_to_fastq]
        else:
            use_paths = path_to_fastq

        for path in use_paths:
            if _os.path.isdir(path):
                print('Checking in {}'.format(path))
                # supplied with path to folder - need to check contents
                path1 = _os.path.sep.join([path, '*.fastq'])
                file_list = _glob(path1)
                path2 = _os.path.sep.join([path, '*.fq'])
                file_list += _glob(path2)
                file_list.sort()
                
                path3 = _os.path.sep.join([path, '*.fastq.gz'])
                file_list_gz = _glob(path3)
                path4 = _os.path.sep.join([path, '*.fq.gz'])
                file_list_gz += _glob(path4)
                file_list_gz.sort()
                
                if len(file_list) == 0 and len(file_list_gz) == 0:
                    print('WARNING: did not find any files at {}, {}, {}, nor {}'.format(path1, path2, path3, path4))
                    
                elif len(file_list) == 0 and len(file_list_gz) > 0:
                    print('Found {} total gzipped fastq files'.format(len(file_list_gz)))
                    use_files += file_list_gz
                    
                elif len(file_list) > 0 and len(file_list_gz) == 0:
                    print('Found {} total uncompressed fastq files'.format(len(file_list)))
                    use_files += file_list
                    
                else:
                    print('Found compressed and uncompressed fastq files.\nUsing {} gzipped files'.format(len(file_list_gz)))
                    # could select from a combination without doubling up . . .
                    # preference for uncompressed:
                    # use_files = sorted(list(set(file_list_gz) - set([f+'.gz' for f in file_list])) + file_list)
                    use_files += file_list_gz
            else:
                try:
                    test = open(path, 'r')
                    test.close()
                    # part of a list of reads or shell expansion
                    use_files += [path]
                except IOError:
                    print('WARNING: did not find any files at {}'.format(path))

        use_files.sort()


        # check potential non-pair files
        keep_use_files = []
        for f in use_files:
            if 'singletons' in f:
                print('Assuming {} is not part of a pair: ignoring'.format(f))
                continue
            
            keep_use_files += [f]

        use_files = keep_use_files

        # check filenames for inclusion of known baga downstream files
        keep_use_files = []
        for f in use_files:
            this_suffix = ''
            for suffix in ('_subsmp','_adpt','_qual')[::-1]:
                this_suffix = suffix + this_suffix
                for f2 in use_files:
                    if f2 != f:
                        if this_suffix in f2 and f2.replace(this_suffix,'') == f:
                            error = 'ERROR: {} appears to be a file from a previous baga run that included {}. Try being more specific with the supplied path expansion to read VCFs (i.e., without baga suffixes allowed, e.g. "reads/*_[12].*"), or remove files generated in previous analyses'.format(f2, f)
                            _sys.exit(error)
            
            keep_use_files += [f]

        use_files = keep_use_files

        if len(use_files) == 0:
            print('Error: could not find any files at {}'.format(', '.join(path_to_fastq)))
            print('Please check paths and try again . . .')
            _sys.exit(1)

        if len(use_files) % 2 != 0:
            print('Please supply an even number of paired files. Found {}:\n{}'.format(len(use_files), '\n'.join(use_files)))
            _sys.exit(1)

        error_explanation = 'Problem parsing read files: ensure pairs are numbered '\
        '1 and 2\n'\
        'BAGA looks for a "1" or "2" labelling in read pair filenames and takes '\
        'the last digit in the filename (excluding the set number if present e.g., '\
        '_001.fastq).\n E.g. *R1.fastq.gz and *R2.fastq.gz would be OK, 1_thesereads1'\
        '.fastq.gz and 2_thesereads1.fastq.gz would not. (Leading digits OK for sample '\
        'numbering: 1_* 2_* 3_* etc but must each have 1 or 2 elsewhere in file '\
        'name)\n . . else please report as bug'

        # Illumina filename scheme:
        # <sample name>_<barcode sequence>_L<lane (0-padded to 3 digits)>_R<read number>_<set number (0-padded to 3 digits>.fastq.gz
        # http://support.illumina.com/help/SequencingAnalysisWorkflow/Content/Vault/Informatics/Sequencing_Analysis/CASAVA/swSEQ_mCA_FASTQFiles.htm

        # match pairs
        filepairs = {}
        for path in use_files:
            path_bits = path.split(_os.path.sep)
            filename_ext = path_bits[-1]
            # for now dump set number (if present? not always present?)
            # really need to deal with multiple sets and all likely versions of CASAVA filename schemes
            # _<set number (0-padded to 3 digits>.f
            use_filename_ext = _re.sub('(_[0-9]{3})(\.[fF])', r'\2', filename_ext)
            filename, ext = _re.findall('(.+)(\.fastq\.gz|\.fastq|\.fq\.gz|\.fq)$', use_filename_ext)[0]
            ones_and_twos = list(_re.finditer('[12]', filename))
            assert len(ones_and_twos) > 0, '{}. Problem filename: {}'.format(
                                                                        error_explanation, 
                                                                        filename)
            # make name for each pair that is consistant parts of file name
            # joining with space caused problems when incorporating into a filename downstream
            # and joining with underscore risks introducing double underscore which would cause splitting on __ later to fail
            s,e = ones_and_twos[-1].span()
            pairmember = ones_and_twos[-1].group()
            # omit the 1 or 2 relevent to pairing from the name
            part1,part2 = filename[:s],filename[e:]
            if len(part1) and len(part2):
                pairname = '-'.join([part1,part2])
            elif len(part1) and not len(part2):
                pairname = part1
            else:
                pairname = part2
            for known_suffix in ['.fastq.gz','.fq.gz','.fastq','.fq']:
                thismatch = _re.findall('('+known_suffix+')$', pairname)
                if thismatch:
                    pairnamenew = _re.sub('('+thismatch[0]+')$', '', pairname)
                    #print('Removed {} from {} == {}'.format(thismatch, pairname, pairnamenew))
                    pairname = pairnamenew.rstrip(' ')
                    continue
            
            # store with keys 1 or 2
            try:
                filepairs[pairname][int(pairmember)] = path
            except KeyError:
                filepairs[pairname] = {int(pairmember): path}

        # check pairs are accessible
        checked_read_files = {}
        for pairname,files in filepairs.items():
            assert len(files) == 2, '{}. Problem filename(s): {}'.format(
                    error_explanation, ', '.join(files.values()))
            
            print('Collected pair "{}": {} and {}'.format(
                    pairname, files[1], files[2]))
            
            try:
                if _os.path.getsize(files[1]) == 0:
                    print('File access fail (empty file): {}'.format(files[1]))
                    _sys.exit(1)
            except OSError:
                print('File access fail: {}'.format(files[1]))
                _sys.exit(1)
            
            try:
                if _os.path.getsize(files[2]) == 0:
                    print('File access fail (empty file): {}'.format(files[2]))
                    _sys.exit(1)
            except OSError:
                print('File access fail: {}'.format(files[2]))
                _sys.exit(1)
            
            checked_read_files[pairname] = files

        print('Total read pairs: {}'.format(len(checked_read_files)))

        self.read_files = checked_read_files
コード例 #43
0
ファイル: setup.py プロジェクト: Lekensteyn/Solaar
		author='Daniel Pavel',
		author_email='*****@*****.**',
		license='GPLv2',
		url='http://pwr.github.io/Solaar/',
		classifiers=[
			'Development Status :: 4 - Beta',
			'Environment :: X11 Applications :: GTK',
			'Environment :: Console',
			'Intended Audience :: End Users/Desktop',
			'License :: DFSG approved',
			'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
			'Natural Language :: English',
			'Programming Language :: Python :: 2.7',
			'Programming Language :: Python :: 3.2',
			'Operating System :: POSIX :: Linux',
			'Topic :: Utilities',
			],

		platforms=['linux'],

		# sudo apt install python-gi python3-gi \
		#        gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-appindicator3-0.1
		# os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],

		install_requires=['pyudev (>= 0.13)', ],
		package_dir={'': 'lib'},
		packages=['hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
		data_files=list(_data_files()),
		scripts=_glob('bin/*'),
	)
コード例 #44
0
def get_valid_filename(pathstr,experiment,verbose=False):
	# File doesn't exist
	# This case must terminate in us trying again.
	if verbose:
		print 'Input is: pathstr={}, experiment={}'.format(pathstr,experiment)
	if not _path.exists(pathstr):
		if verbose:
			print 'Path does not exist'
		# If file didn't exist, maybe it needs a prefix
		# Append prefix
		prefix = get_remoteprefix()
		pathstr = _path.join(prefix,pathstr)
		# print prefix

		# If it didn't work, we need to find the file somehow
		# The prefix may be wrong, the file may not be where we thought,
		# or the drive might not have been mounted and we can mount and
		# try again.
		if not _path.exists(pathstr):
			if verbose:
				print 'Adding prefix didn''t help, new pathstr={}'.format(pathstr)
			title = 'File not found!'
			maintext='File doesn''t exist:\n\n{}'.format(pathstr)
			subtext='(External drive may not be mounted.)'
			buttons=_np.array([
				_mtqt.Button('Try again',_QtGui.QMessageBox.AcceptRole),
				_mtqt.Button('Change prefix',_QtGui.QMessageBox.ActionRole,default=True),
				_mtqt.Button('Locate file',_QtGui.QMessageBox.ActionRole),
				_QtGui.QMessageBox.Abort
				])

			buttonbox=_mtqt.ButtonMsg(maintext,title=title,infotext=subtext,buttons=buttons)
			clicked = buttonbox.clickedArray

			# Change prefix
			if clicked[0]:
				if verbose:
					print 'Trying again'
			elif clicked[1]:
				if verbose: print 'Changing prefix'
				prefix = _mt.E200.choose_remoteprefix(pathstart=prefix)
			# Locate file
			elif clicked[2]:
				if verbose: print 'Locating file'
				# Try to open the path by chopping of the end
				while not _path.isdir(pathstr):
					lastpath=pathstr
					pathstr=_path.dirname(pathstr)
					# If you've chopped all you can chop, look in the base data folder
					if lastpath==pathstr:
						pathstr=_get_datapath()
						break
				pathstr = _mtqt.getOpenFileName(directory=pathstr,caption='Locate file',filter='Matlab Files (*.mat)')
			elif clicked[3]:
				if verbose: print 'Aborting'
				raise IOError('Aborting')
			else:
				print 'Thar be dragons!'
		if verbose: print 'Trying new pathstr={}'.format(pathstr)
		return get_valid_filename(pathstr,experiment)
	elif _path.isdir(pathstr):
		if verbose: print 'Path is a directory'
		patterns=['*scan_info.mat','*filenames.mat','{}_*.mat'.format(experiment)]
		files = _np.array([])
		for i,val in enumerate(patterns):
			gfiles = _glob(_path.join(pathstr,val))
			files = _np.append(files,gfiles)
		if verbose: print 'Files match patterns\npatterns: {}\nfiles: {}'.format(patterns,files)

		if files.size == 1:
			if verbose: print 'Found one file.'
			pathstr=files[0]
		elif files.size == 0:
			if verbose: print 'Found no files.'
			title = 'File Not Found'
			maintext = 'No valid file found in folder.'
			subtext = 'Locate manually?'
			buttons = _np.array([
				_QtGui.QMessageBox.Ok,
				_QtGui.QMessageBox.Abort
				],dtype=object)
			buttonbox = _mtqt.ButtonMsg(maintext,title=title,infotext=subtext,buttons=buttons)
			clicked=buttonbox.clickedArray

			if clicked[0]:
				if verbose: print 'Trying to locate manually.'
				pathstr = _mtqt.getOpenFileName(directory=pathstr,caption='Locate file',filter='Matlab Files (*.mat)')
			elif clicked[1]:
				if verbose: print 'Aborting.'
				raise IOError('Aborting')
		if verbose: print 'Trying new pathstr={}'.format(pathstr)
		return get_valid_filename(pathstr,experiment)

	elif _path.isfile(pathstr):
		if verbose: print 'Path is a file.'
		# Check for string endings
		strends = _np.array(['scan_info.mat$','filenames.mat$','{}_[0-9]*\.mat'.format(experiment)])
		strmatch = _np.empty(strends.size,dtype=object)
		for i,val in enumerate(strends):
			strmatch[i] = _re.search(val,pathstr)

		# True for matches
		strmatch_bool = _np.not_equal(strmatch,None)

		# Warn if there are no matches
		if not _np.any(strmatch_bool):
			if verbose: print 'File doesn''t match patterns={}'.format(strends)
			_warnings.warn('Neither a 2014+ data file, scan_info.mat file, nor a filenames.mat file: {}'.format(pathstr))

		if strmatch_bool[2]:
			data_source_type='2014'
		else:
			data_source_type='2013'
		if verbose: print 'data_source_type is {}'.format(data_source_type)

		# Check that this is a data file
		# Must have /nas/nas-li20-pm01
		match = _re.search('/nas/nas-li20-pm0',pathstr)
		if match == None:
			raise IOError('Does not point to a file with /nas/nas-li20-pm01 or /nas/nas-li20-pm00 in its path.')

		# Get the directories
		dirstr   = _path.dirname(pathstr)
		filename = _path.basename(pathstr)
		
		dir_beg = dirstr[:match.start()]
		dir_mid = dirstr[match.start()+1:]

		output = (dir_beg,dir_mid,filename,data_source_type)
		if verbose: print 'Output is: {}'.format(output)
		return output
コード例 #45
0
ファイル: __init__.py プロジェクト: mingxuli/datatools
import os.path as _path, re as _re, sys as _sys, imp as _imp, warnings as _warn
import __fields__
import utilities

#Private
__formats__={}

#Public
format_regex=[]
'''List of file path regular expressions defined by format drivers'''
fields=__fields__.fields
'''List of metadata fields that can be populated'''

debug=False
#Dynamically load all formats
for _lib in _glob(_path.join(__path__[0],'[a-z]*.py')):
    _lib=_path.splitext(_path.basename(_lib))[0]
    try:
        #import custom format and add to the list of formats
        __formats__[_lib]=__import__('%s.%s'%(__name__,_lib), fromlist=[__name__])
        
        #append module _format_regex & fields to lists
        format_regex.extend([r for r in __formats__[_lib].format_regex if not r in format_regex])
    #except:pass 
    except:
        _warn.warn('Unable to import %s\n%s' % (_lib, utilities.ExceptionInfo()))
 
#import generic formats (eg. GeoTiff, JP2, etc...)
import __default__
#append module _format_regex to list of format regexes
format_regex.extend([_r for _r in __default__.format_regex if not _r in format_regex])
コード例 #46
0
ファイル: __init__.py プロジェクト: simonaoliver/metageta
# from metageta import

# ++++++++++++++++++++++++
# Public vars
# ++++++++++++++++++++++++
transforms = {}
"""Pre-defined XSL transforms"""

xslfiles = {}
"""Pre-defined XSL files"""

# ++++++++++++++++++++++++
# Initialise pub/priv properties
# ++++++++++++++++++++++++
# load known XSL transforms
for _f in _glob(_path.join(__path__[0], "*.xml")):
    # _xml=_Parse('file:%s'%_f)
    _xml = _etree.parse(_f)
    _name = str(_xml.xpath("string(/stylesheet/@name)"))
    _file = str(_xml.xpath("string(/stylesheet/@file)"))
    _desc = str(_xml.xpath("string(/stylesheet/@description)"))
    xslfiles[_name] = _file
    transforms[_name] = _desc

# Load config
config = _etree.parse("%s/config/config.xml" % _mpath[0])
categories = {
    "default": config.xpath("string(/config/geonetwork/categories/@default)"),
    "categories": config.xpath("/config/geonetwork/categories/category/@name"),
}
if not categories["default"] and not categories["categories"]:
コード例 #47
0
#from metageta import

#++++++++++++++++++++++++
#Public vars
#++++++++++++++++++++++++
transforms={}
'''Pre-defined XSL transforms'''

xslfiles={}
'''Pre-defined XSL files'''

#++++++++++++++++++++++++
#Initialise pub/priv properties
#++++++++++++++++++++++++
#load known XSL transforms
for _f in _glob(_path.join(__path__[0],'*.xml')):
    #_xml=_Parse('file:%s'%_f)
    _xml=_etree.parse(_f)
    _name = str(_xml.xpath('string(/stylesheet/@name)'))
    _file = str(_xml.xpath('string(/stylesheet/@file)'))
    _desc = str(_xml.xpath('string(/stylesheet/@description)'))
    xslfiles[_name]=_file
    transforms[_name]=_desc

#Load config
config=_etree.parse('%s/config/config.xml'%_mpath[0])
categories={'default':config.xpath('string(/config/geonetwork/categories/@default)'),
             'categories':config.xpath('/config/geonetwork/categories/category/@name')
             }
if not categories['default'] and not categories['categories']:categories={'default': 'datasets', 'categories': ['datasets']}
コード例 #48
0
ファイル: __init__.py プロジェクト: pegaucher/metageta
import os.path as _path, re as _re, sys as _sys, imp as _imp, warnings as _warn
import __fields__
from metageta import utilities

#Private
__formats__={}

#Public
format_regex=[]
'''List of file path regular expressions defined by format drivers'''
fields=__fields__.fields
'''List of metadata fields that can be populated'''

debug=False
#Dynamically load all formats
for _lib in sorted(_glob(_path.join(__path__[0],'[a-z]*.py'))):
    _lib=_path.splitext(_path.basename(_lib))[0]
    try:
        #import custom format and add to the list of formats
        __formats__[_lib]=__import__('%s.%s'%(__name__,_lib), fromlist=[__name__])

        #append module _format_regex & fields to lists
        format_regex.extend([r for r in __formats__[_lib].format_regex if not r in format_regex])
    #except:pass
    except:
        _warn.showwarning=_warn._show_warning #Fix Ft overwrite
        _warn.warn('Unable to import %s\n%s' % (_lib, utilities.ExceptionInfo()))

#import generic formats (eg. GeoTiff, JP2, etc...)
import __default__
#append module _format_regex to list of format regexes
コード例 #49
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import absolute_import, print_function, unicode_literals

import ConfigParser
import datetime
from glob import glob as _glob
from os.path import realpath
from StringIO import StringIO

from .utils import storage, _decode

glob = lambda f: map(realpath, _glob(f))


class Setting(object):
    def __init__(self, settings_file='settings.ini'):
        # 读取配置文件
        self.conf = ConfigParser.RawConfigParser()
        with open(settings_file, 'rb') as f:
            content = _decode(f.read()).encode('utf8')
            self.conf.readfp(StringIO(content))

    def _get_option(self, key, default=None):
        try:
            value = self.conf.get('General', key)
            return _decode(value)
        except ConfigParser.Error:
            if default is not None:
                return default