Пример #1
0
    def parse_command_for_scantree(self, cmd):
        """Given the user choice for --ignore get the corresponding value"""

        parser = argparse.ArgumentParser(description="parse scantree options")
        parser.add_argument('--ignore', type=str, default=None)

        # the first name on the list is 'scantree.py'
        args = parser.parse_args(
            cmd[1:] if cmd and cmd[0] == 'scantree.py' else cmd)
        if args.ignore is None:
            ignore = self.vim.options['wildignore']
        else:
            ignore = args.ignore

        current_folder, _ = path.split(__file__)
        scantree_py = normpath(join(current_folder,
                                    pardir, pardir, 'scantree.py'))

        if shutil.which('python3') is not None:
            python_exe = 'python3'
        else:
            python_exe = 'python'
        if shutil.which(python_exe) is None:
            raise FileNotFoundError("Coudn't find {} executable!".format(
                                    python_exe))

        return [python_exe, scantree_py, '--ignore', ignore,
                '--path', ':directory']
Пример #2
0
def present_string_diff(a, di, path):
    "Pretty-print a nbdime diff."
    header = ["patch {}:".format(path)]

    if _base64.match(a):
        return header + ['<base64 data changed>']

    b = patch(a, di)
    td = tempfile.mkdtemp()
    cmd = None
    try:
        with open(os.path.join(td, 'before'), 'w') as f:
            f.write(a)
        with open(os.path.join(td, 'after'), 'w') as f:
            f.write(b)
        if which('git'):
            cmd = _git_diff_print_cmd.split()
            heading_lines = 4
        elif which('diff'):
            cmd = ['diff']
            heading_lines = 0
        else:
            dif = ''.join(unified_diff(a.split("\n"),
                                       b.split("\n")))
            heading_lines = 2

        if cmd is not None:
            p = Popen(cmd + ['before', 'after'], cwd=td, stdout=PIPE)
            out, _ = p.communicate()
            dif = out.decode('utf8')

    finally:
        shutil.rmtree(td)
    return header + dif.splitlines()[heading_lines:]
Пример #3
0
 def check_pkgconfig(self):
     evar = 'PKG_CONFIG'
     if evar in os.environ:
         pkgbin = os.environ[evar].strip()
     else:
         pkgbin = 'pkg-config'
     try:
         p, out = Popen_safe([pkgbin, '--version'])[0:2]
         if p.returncode != 0:
             # Set to False instead of None to signify that we've already
             # searched for it and not found it
             pkgbin = False
     except (FileNotFoundError, PermissionError):
         pkgbin = False
     if pkgbin and not os.path.isabs(pkgbin) and shutil.which(pkgbin):
         # Sometimes shutil.which fails where Popen succeeds, so
         # only find the abs path if it can be found by shutil.which
         pkgbin = shutil.which(pkgbin)
     if not self.silent:
         if pkgbin:
             mlog.log('Found pkg-config:', mlog.bold(pkgbin),
                      '(%s)' % out.strip())
         else:
             mlog.log('Found Pkg-config:', mlog.red('NO'))
     return pkgbin
Пример #4
0
def start_daemon():
    """ Starts the zerotier daemon if it is installed on your system

    Returns:
       str: output of the subprocess call to check_output

    Raises:
        EnvironmentError: if you your ststem is not yet supported.
        CalledProcessError: if the command to start the daemon failed.
    """
    if not is_installed():
        logger.info(uxstring.UxString.install_zerotier)
        sys.exit(1)

    if platform.system() in "Linux":
        if shutil.which("systemctl"):
            cmd = ('sudo', 'systemctl', 'start', 'zerotier-one.service')
        elif shutil.which("service"):
            cmd = ('sudo', 'service', 'zerotier-one', 'start')
        else:
            raise EnvironmentError("Do not know how to start zerotier deamon on your system")
    elif platform.system() in "Darwin":
        # ZT post install for Macs already load the daemon
        return ""
    else:
        raise EnvironmentError("Do not know how to start zerotier deamon on your system")

    return subprocess.check_output(cmd)
Пример #5
0
    def build_dist(self):
        for sdir in self.staging_dirs:
            if os.path.exists(sdir):
                shutil.rmtree(sdir)
        main_stage, ninja_stage = self.staging_dirs
        modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join('mesonbuild/modules/*'))]
        modules = ['mesonbuild.modules.' + x for x in modules if not x.startswith('_')]
        modules += ['distutils.version']
        modulestr = ','.join(modules)
        python = shutil.which('python')
        cxfreeze = os.path.join(os.path.dirname(python), "Scripts", "cxfreeze")
        if not os.path.isfile(cxfreeze):
            print("ERROR: This script requires cx_freeze module")
            sys.exit(1)

        subprocess.check_call([python,
                               cxfreeze,
                               '--target-dir',
                               main_stage,
                               '--include-modules',
                               modulestr,
                               'meson.py'])
        if not os.path.exists(os.path.join(main_stage, 'meson.exe')):
            sys.exit('Meson exe missing from staging dir.')
        os.mkdir(ninja_stage)
        shutil.copy(shutil.which('ninja'), ninja_stage)
        if not os.path.exists(os.path.join(ninja_stage, 'ninja.exe')):
            sys.exit('Ninja exe missing from staging dir.')
Пример #6
0
def get_docker_path():
    global DOCKER
    if DOCKER is None:
        DOCKER = shutil.which("docker.io")
        if not DOCKER:
            DOCKER = shutil.which("docker")
    return DOCKER
Пример #7
0
    def detect_d_compiler(self, want_cross):
        is_cross = False
        # Search for a D compiler.
        # We prefer LDC over GDC unless overridden with the DC
        # environment variable because LDC has a much more
        # up to date language version at time (2016).
        if 'DC' in os.environ:
            exelist = shlex.split(os.environ['DC'])
        elif self.is_cross_build() and want_cross:
            exelist = mesonlib.stringlistify(self.cross_info.config['binaries']['d'])
            is_cross = True
        elif shutil.which("ldc2"):
            exelist = ['ldc2']
        elif shutil.which("ldc"):
            exelist = ['ldc']
        elif shutil.which("gdc"):
            exelist = ['gdc']
        elif shutil.which("dmd"):
            exelist = ['dmd']
        else:
            raise EnvironmentException('Could not find any supported D compiler.')

        try:
            p, out = Popen_safe(exelist + ['--version'])[0:2]
        except OSError:
            raise EnvironmentException('Could not execute D compiler "%s"' % ' '.join(exelist))
        version = search_version(out)
        full_version = out.split('\n', 1)[0]
        if 'LLVM D compiler' in out:
            return compilers.LLVMDCompiler(exelist, version, is_cross, full_version=full_version)
        elif 'gdc' in out:
            return compilers.GnuDCompiler(exelist, version, is_cross, full_version=full_version)
        elif 'The D Language Foundation' in out or 'Digital Mars' in out:
            return compilers.DmdDCompiler(exelist, version, is_cross, full_version=full_version)
        raise EnvironmentException('Unknown compiler "' + ' '.join(exelist) + '"')
Пример #8
0
def detect_tests_to_run():
    # Name, subdirectory, skip condition.
    all_tests = [
        ('common', 'common', False),
        ('failing-meson', 'failing', False),
        ('failing-build', 'failing build', False),
        ('failing-tests', 'failing tests', False),

        ('platform-osx', 'osx', not mesonlib.is_osx()),
        ('platform-windows', 'windows', not mesonlib.is_windows() and not mesonlib.is_cygwin()),
        ('platform-linux', 'linuxlike', mesonlib.is_osx() or mesonlib.is_windows()),

        ('java', 'java', backend is not Backend.ninja or mesonlib.is_osx() or not have_java()),
        ('C#', 'csharp', skip_csharp(backend)),
        ('vala', 'vala', backend is not Backend.ninja or not shutil.which('valac')),
        ('rust', 'rust', backend is not Backend.ninja or not shutil.which('rustc')),
        ('d', 'd', backend is not Backend.ninja or not have_d_compiler()),
        ('objective c', 'objc', backend not in (Backend.ninja, Backend.xcode) or mesonlib.is_windows() or not have_objc_compiler()),
        ('objective c++', 'objcpp', backend not in (Backend.ninja, Backend.xcode) or mesonlib.is_windows() or not have_objcpp_compiler()),
        ('fortran', 'fortran', backend is not Backend.ninja or not shutil.which('gfortran')),
        ('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')),
        ('python3', 'python3', backend is not Backend.ninja),
        ('fpga', 'fpga', shutil.which('yosys') is None),
        ('frameworks', 'frameworks', False),
    ]
    gathered_tests = [(name, gather_tests('test cases/' + subdir), skip) for name, subdir, skip in all_tests]
    return gathered_tests
Пример #9
0
 def __init__(self, environment, kwargs):
     Dependency.__init__(self)
     self.name = 'qt5'
     self.root = '/usr'
     mods = kwargs.get('modules', [])
     self.cargs = []
     self.largs = []
     self.is_found = False
     if isinstance(mods, str):
         mods = [mods]
     if len(mods) == 0:
         raise DependencyException('No Qt5 modules specified.')
     type_text = 'native'
     if environment.is_cross_build() and kwargs.get('native', False):
         type_text = 'cross'
         self.pkgconfig_detect(mods, environment, kwargs)
     elif not environment.is_cross_build() and shutil.which('pkg-config') is not None:
         self.pkgconfig_detect(mods, environment, kwargs)
     elif shutil.which('qmake') is not None:
         self.qmake_detect(mods, kwargs)
     else:
         self.version = 'none'
     if not self.is_found:
         mlog.log('Qt5 %s dependency found: ' % type_text, mlog.red('NO'))
     else:
         mlog.log('Qt5 %s dependency found: ' % type_text, mlog.green('YES'))
Пример #10
0
def _find_mac(command, args, hw_identifiers, get_index):
    import os, shutil
    executable = shutil.which(command)
    if executable is None:
        path = os.pathsep.join(('/sbin', '/usr/sbin'))
        executable = shutil.which(command, path=path)
        if executable is None:
            return None

    try:
        # LC_ALL to ensure English output, 2>/dev/null to
        # prevent output on stderr
        cmd = 'LC_ALL=C %s %s 2>/dev/null' % (executable, args)
        with os.popen(cmd) as pipe:
            for line in pipe:
                words = line.lower().split()
                for i in range(len(words)):
                    if words[i] in hw_identifiers:
                        try:
                            return int(
                                words[get_index(i)].replace(':', ''), 16)
                        except (ValueError, IndexError):
                            # Virtual interfaces, such as those provided by
                            # VPNs, do not have a colon-delimited MAC address
                            # as expected, but a 16-byte HWAddr separated by
                            # dashes. These should be ignored in favor of a
                            # real MAC address
                            pass
    except IOError:
        pass
Пример #11
0
def detect_tests_to_run():
    # Name, subdirectory, skip condition.
    all_tests = [
        ('common', 'common', False),
        ('failing-meson', 'failing', False),
        ('failing-build', 'failing build', False),
        ('failing-tests', 'failing tests', False),

        ('platform-osx', 'osx', not mesonlib.is_osx()),
        ('platform-windows', 'windows', not mesonlib.is_windows() and not mesonlib.is_cygwin()),
        ('platform-linux', 'linuxlike', mesonlib.is_osx() or mesonlib.is_windows()),

        ('java', 'java', backend is not Backend.ninja or mesonlib.is_osx() or not have_java()),
        ('C#', 'csharp', backend is not Backend.ninja or not shutil.which('mcs')),
        ('vala', 'vala', backend is not Backend.ninja or not shutil.which('valac')),
        ('rust', 'rust', backend is not Backend.ninja or not shutil.which('rustc')),
        ('d', 'd', backend is not Backend.ninja or not have_d_compiler()),
        ('objective c', 'objc', backend not in (Backend.ninja, Backend.xcode) or mesonlib.is_windows() or not have_objc_compiler()),
        ('fortran', 'fortran', backend is not Backend.ninja or not shutil.which('gfortran')),
        ('swift', 'swift', backend not in (Backend.ninja, Backend.xcode) or not shutil.which('swiftc')),
        ('python3', 'python3', backend is not Backend.ninja),
    ]
    gathered_tests = [(name, gather_tests('test cases/' + subdir), skip) for name, subdir, skip in all_tests]
    if mesonlib.is_windows():
        # TODO: Set BOOST_ROOT in .appveyor.yml
        gathered_tests += [('framework', ['test cases/frameworks/1 boost'], 'BOOST_ROOT' not in os.environ)]
    elif mesonlib.is_osx() or mesonlib.is_cygwin():
        gathered_tests += [('framework', gather_tests('test cases/frameworks'), True)]
    else:
        gathered_tests += [('framework', gather_tests('test cases/frameworks'), False)]
    return gathered_tests
Пример #12
0
    def findVmrun():
        """
        Finds the vmrun path.

        :return: path to vmrun
        """

        vmrun_path = None
        if sys.platform.startswith("win"):
            vmrun_path = shutil.which("vmrun")
            if vmrun_path is None:
                # look for vmrun.exe using the VMware Workstation directory listed in the registry
                vmrun_path = VMware._findVmrunRegistry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
                if vmrun_path is None:
                    # look for vmrun.exe using the VIX directory listed in the registry
                    vmrun_path = VMware._findVmrunRegistry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
        elif sys.platform.startswith("darwin"):
            vmware_fusion_vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
            if os.path.exists(vmware_fusion_vmrun_path):
                vmrun_path = vmware_fusion_vmrun_path
        else:
            vmrun_path = shutil.which("vmrun")

        if vmrun_path is None:
            return ""
        return os.path.abspath(vmrun_path)
Пример #13
0
def diff_render(a, b):
    if use_git and which('git'):
        return diff_render_with_git(a, b)
    elif use_diff and which('diff'):
        return diff_render_with_diff(a, b)
    else:
        return diff_render_with_difflib(a, b)
Пример #14
0
def init_venv(name, location=None, py_3=True, path_to_rqes='',
              options=None, **kwargs):
    """Initialize a virtualenv with the given name on the location if given,
    if not, location is $WORKON_HOME env variable.
    """
    command_line = 'virtualenv --python={pyversion}'
    if py_3:
        command_line = command_line.format(pyversion=which('python3'))
    else:
        command_line = command_line.format(pyversion=which('python2.7'))
    try:
        if location and os.path.isdir(location):
            command_line += ' ' + os.path.join(location, name)
        else:
            location = ' ' + os.path.join(os.getenv('WORKON_HOME'), name)
            command_line += location
    except TypeError:
        raise Exception("Location or WORKON_HOME env variable does not exists")

    if options:
        for option in options.split(','):
            if option in available_options:
                command_line += ' --' + option
            else:
                raise SchemeConfigWrong("options for virtualenv wrong defined")

    call(command_line, shell=True)
    if path_to_rqes:
        try:
            venv = os.path.join(location, 'bin/activate_this.py').strip()
            with open(venv) as activate_file:
                exec(activate_file.read())
                call(['pip', 'install', '-r', path_to_rqes])
        except FileNotFoundError:
            print("Requirements not found.")
Пример #15
0
    def _determineHostType():

        if sys.platform.startswith("win"):
            import winreg
            try:
                # the Core key indicates which VMware core product is installed (VMware Player vs VMware Workstation)
                hkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Wow6432Node\VMware, Inc.")
                output, _ = winreg.QueryValueEx(hkey, "Core")
                winreg.CloseKey(hkey)
            except OSError:
                return "ws"
        elif sys.platform.startswith("darwin"):
            return "fusion"
        else:
            vmware_path = shutil.which("vmware")
            if vmware_path is None:
                vmware_path = shutil.which("vmplayer")
                if vmware_path is not None:
                    return "player"
            if vmware_path:
                command = [vmware_path, "-v"]
                log.debug("Executing vmware with command: {}".format(command))
                try:
                    output = subprocess.check_output(command).decode("utf-8", errors="ignore").strip()
                except (OSError, subprocess.SubprocessError) as e:
                    log.error("Could not execute {}: {}".format("".join(command), e))
                    return "ws"
            else:
                log.error("vmware command not found")
                return "ws"
        if "VMware Player" in output:
            return "player"
        # Workstation is the default
        return "ws"
Пример #16
0
def create_project(parser, options, args):
    # Validate args
    if len(args) < 2:
        parser.error("Please specify a name for your Alliance Auth installation.")
    elif len(args) > 3:
        parser.error("Too many arguments.")

    # First find the path to Alliance Auth
    import allianceauth
    allianceauth_path = os.path.dirname(allianceauth.__file__)
    template_path = os.path.join(allianceauth_path, 'project_template')

    # Determine locations of commands to render supervisor cond
    command_options = {
        'template': template_path,
        'python': shutil.which('python'),
        'gunicorn': shutil.which('gunicorn'),
        'celery': shutil.which('celery'),
        'extensions': ['py', 'conf', 'json'],
    }

    # Strip 'start' out of the arguments, leaving project name (and optionally destination dir)
    args = args[1:]

    # Call the command with extra context
    call_command(StartProject(), *args, **command_options)

    print("Success! %(project_name)s has been created." % {'project_name': args[0]})  # noqa
Пример #17
0
def isDockerInsalledOnOSX():
    if shutil.which('docker') == None:
        return False
    if (shutil.which('boot2docker') == None
        and shutil.which('docker-machine') == None) :
        return False
    return True
Пример #18
0
def find_vnetlib_on_windows():

    # look for vnetlib in PATH
    vnetlib_path = shutil.which("vnetlib64")

    if vnetlib_path is None:
        vnetlib_path = shutil.which("vnetlib")

    if vnetlib_path is None:
        # look for vnetlib using the directory listed in the registry (for VMware Workstation)
        vnetlib_path = find_vnetlib_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")

    if vnetlib_path is None:
        # look for vnetlib using the directory listed in the registry (for VMware Player)
        vnetlib_path = find_vnetlib_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")

    if vnetlib_path is None:
        # look for vnetlib in default VMware Workstation directory
        vnetlib_ws = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware Workstation\vnetlib64.exe")
        if not os.path.exists(vnetlib_ws):
            vnetlib_ws = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware Workstation\vnetlib.exe")
        if os.path.exists(vnetlib_ws):
            vnetlib_path = vnetlib_ws

    if vnetlib_path is None:
        # look for vnetlib in default VMware VIX directory
        vnetlib_vix = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware VIX\vnetlib64.exe")
        if not os.path.exists(vnetlib_vix):
            vnetlib_vix = os.path.expandvars(r"%PROGRAMFILES(X86)%\VMware\VMware VIX\vnetlib.exe")
        if os.path.exists(vnetlib_vix):
            vnetlib_path = vnetlib_vix

    return vnetlib_path
Пример #19
0
def run_install_script(d):
    env = {'MESON_SOURCE_ROOT' : d.source_dir,
           'MESON_BUILD_ROOT' : d.build_dir,
           'MESON_INSTALL_PREFIX' : d.prefix
          }
    child_env = os.environ.copy()
    child_env.update(env)

    for i in d.install_scripts:
        final_command = i.cmd_arr
        script = i.cmd_arr[0]
        print('Running custom install script %s' % script)
        suffix = os.path.splitext(script)[1].lower()
        if platform.system().lower() == 'windows' and suffix != '.bat':
            first_line = open(script, encoding='latin_1', errors='ignore').readline().strip()
            if first_line.startswith('#!'):
                if shutil.which(first_line[2:]):
                    commands = [first_line[2:]]
                else:
                    commands = first_line[2:].split('#')[0].strip().split()
                    commands[0] = shutil.which(commands[0].split('/')[-1])
                    if commands[0] is None:
                        raise RuntimeError("Don't know how to run script %s." % script)
                final_command = commands + [script] + i.cmd_arr[1:]
        try:
            rc = subprocess.call(final_command, env=child_env)
            if rc != 0:
                sys.exit(rc)
        except:
            print('Failed to run install script:', *i.cmd_arr)
            sys.exit(1)
Пример #20
0
    def find_vmrun(self):
        """
        Searches for vmrun.

        :returns: path to vmrun
        """

        # look for vmrun
        vmrun_path = self.config.get_section_config("VMware").get("vmrun_path")
        if not vmrun_path:
            if sys.platform.startswith("win"):
                vmrun_path = shutil.which("vmrun")
                if vmrun_path is None:
                    # look for vmrun.exe using the VMware Workstation directory listed in the registry
                    vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
                    if vmrun_path is None:
                        # look for vmrun.exe using the VIX directory listed in the registry
                        vmrun_path = self._find_vmrun_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
            elif sys.platform.startswith("darwin"):
                vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
            else:
                vmrun_path = shutil.which("vmrun")

        if not vmrun_path:
            raise VMwareError("Could not find vmrun")
        if not os.path.isfile(vmrun_path):
            raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
        if not os.access(vmrun_path, os.X_OK):
            raise VMwareError("vmrun is not executable")
        if os.path.basename(vmrun_path) not in ["vmrun", "vmrun.exe"]:
            raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))

        self._vmrun_path = vmrun_path
        return vmrun_path
Пример #21
0
    def open_file(cls, path):
        launch_editor_command_template = get_config_value('launch_editor_command', None)
        if launch_editor_command_template:
            if isinstance(launch_editor_command_template, str) and launch_editor_command_template.strip():
                launch_editor_command_template = launch_editor_command_template.strip()
            else:
                launch_editor_command_template = None
            if not launch_editor_command_template:
                raise WorkspaceError('configuration parameter "launch_editor_command" must be a non-empty string')
        else:
            try:
                # Windows:
                # Start a file with its associated application.
                os.startfile(path)
                return
            except AttributeError:
                if shutil.which('xdg-open'):
                    # Unix Desktops
                    # xdg-open opens a file or URL in the user's preferred application.
                    launch_editor_command_template = 'xdg-open "{file}"'
                elif shutil.which('open'):
                    # Mac OS X
                    # Open a file or folder. The open command opens a file (or a folder or URL), just as
                    # if you had double-clicked the file's icon.
                    launch_editor_command_template = 'open "{file}"'
                else:
                    print('warning: don\'t know how to open %s' % path)
                    return

        launch_editor_command = launch_editor_command_template.format(file=path)
        try:
            # print('launch_editor_command:', launch_editor_command)
            subprocess.call(launch_editor_command, shell=True)
        except (IOError, OSError) as error:
            raise WorkspaceError(str(error))
Пример #22
0
 def __init__(self, environment, kwargs):
     Dependency.__init__(self)
     self.name = "qt5"
     self.root = "/usr"
     mods = kwargs.get("modules", [])
     self.cargs = []
     self.largs = []
     self.is_found = False
     if isinstance(mods, str):
         mods = [mods]
     if len(mods) == 0:
         raise DependencyException("No Qt5 modules specified.")
     type_text = "native"
     if environment.is_cross_build() and kwargs.get("native", False):
         type_text = "cross"
         self.pkgconfig_detect(mods, environment, kwargs)
     elif not environment.is_cross_build() and shutil.which("pkg-config") is not None:
         self.pkgconfig_detect(mods, environment, kwargs)
     elif shutil.which("qmake") is not None:
         self.qmake_detect(mods, kwargs)
     else:
         self.version = "none"
     if not self.is_found:
         mlog.log("Qt5 %s dependency found: " % type_text, mlog.red("NO"))
     else:
         mlog.log("Qt5 %s dependency found: " % type_text, mlog.green("YES"))
Пример #23
0
    def findVmrun():
        """
        Finds the vmrun path.

        :return: path to vmrun
        """

        vmrun_path = None
        if sys.platform.startswith("win"):
            vmrun_path = shutil.which("vmrun")
            if vmrun_path is None:
                # look for vmrun.exe using the VMware Workstation directory listed in the registry
                vmrun_path = VMware._findVmrunRegistry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Workstation")
                if vmrun_path is None:
                    # look for vmrun.exe using the VIX directory listed in the registry
                    vmrun_path = VMware._findVmrunRegistry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware VIX")
        elif sys.platform.startswith("darwin"):
            vmware_fusion_vmrun_path = None
            try:
                output = subprocess.check_output(["mdfind", "kMDItemCFBundleIdentifier == 'com.vmware.fusion'"]).decode("utf-8", errors="ignore").strip()
                if len(output):
                    vmware_fusion_vmrun_path = os.path.join(output, "Contents/Library/vmrun")
            except (OSError, subprocess.SubprocessError) as e:
                pass
            if vmware_fusion_vmrun_path is None:
                vmware_fusion_vmrun_path = "/Applications/VMware Fusion.app/Contents/Library/vmrun"
            if os.path.exists(vmware_fusion_vmrun_path):
                vmrun_path = vmware_fusion_vmrun_path
        else:
            vmrun_path = shutil.which("vmrun")

        if vmrun_path is None:
            return ""
        return os.path.abspath(vmrun_path)
Пример #24
0
	def checkCopasiSE(self, copasiPath):
		"""
		Checks whether a given CopasiSE program exists and if it is the right version. If a program defined by the user is not existing, the standard names in the $PATH are checked (i.e. copasise and CopasiSE).

		:param copasiPath: A user-given path to Copasi (may also contain just the name for calling, like "copasise", if the program is in the PATH)
		:returns: A valid Copasi path or name
		"""

		# Check for existance of a given CopasiSE path or the standard names.
		if which(copasiPath) is None:
			if which('CopasiSE') is None:
				if which('copasise') is None:
					self._errorReport('CopasiSE not found. Neither in the given path ({}), nor as "CopasiSE".'.format(copasiPath), fatal = True)
				else:
					self._errorReport('"{}" not found, switching to "copasise"'.format(copasiPath))
					copasiPath = 'copasise'
			else:
				self._errorReport('"{}" not found, switching to "CopasiSE"'.format(copasiPath))
				copasiPath = 'CopasiSE'

		# Check the program version of the given CopasiSE. Call e.g. copasise -h and only keep the stdout, not stderr
		output = subprocess.check_output([copasiPath, "-h"], universal_newlines = True, stderr=subprocess.DEVNULL)
		if self.version not in output:
			self._errorReport('The version of the given CopasiSE ({}) is not the same as for the given Copasi file ({}).'.format(output.split('\n')[0][7:], self.version))

		return copasiPath
Пример #25
0
    def get_python_binary(cls, minor_versions):
        python_path = bpy.app.binary_path_python

        # temporary OSX fix
        if sys.platform == "darwin" and python_path == "/usr/bin/python":

            # 1) try to find python in the distribution
            for mv in minor_versions:
                for suff in ["", "m"]:
                    path = normpath(os.path.join(
                        os.path.dirname(bpy.app.binary_path), "../Resources",
                        bpy.app.version_string[:4],"python", "bin", "python3.%s%s" % (mv, suff)))

                    if shutil.which(path):
                        return path

            # 2) try to find installed
            for mv in minor_versions:
                for suff in ["", "m"]:
                    path = "/Library/Frameworks/Python.framework/Versions/3.%s/bin/python3%s" % (minor_version, suff)
                    if shutil.which(path):
                        return path
        else:
            return python_path
        return None
Пример #26
0
    def __init__(self, config, **kwargs):
        super(PanIndiDevice, self).__init__(config, **kwargs)

        self.logger = get_logger(self)
        name = getattr(self, 'name', 'INDI_DEVICE')
        driver = config.get('driver', 'indi_simulator_ccd')
        port = config.get('port')

        self.logger.info('Creating device {} ({})'.format(name, driver))

        self._getprop = shutil.which('indi_getprop')
        self._setprop = shutil.which('indi_setprop')

        assert self._getprop is not None, error.PanError("Can't find indi_getprop")
        assert self._setprop is not None, error.PanError("Can't find indi_setprop")

        self.name = name
        self.driver = driver
        self.port = port

        self.status_delay = kwargs.get('status_delay', 1.3)  # Why not
        self._status_thread = None

        self._driver_loaded = False
        self._properties = {}
        self._states = {}

        self.config = config
def process_command_line(argv):
    config = Config()

    import inspect
    import os.path

    i = 1

    ipython_exec_set = False
    maxima_jupyter_exec_set = False

    while i < len(argv):
        #print("cmd line option #{}: {}".format(i, argv[i]))

        if argv[i].startswith("--ipython-exec="):
            if ipython_exec_set:
                halt("Error: --ipython-exec option set twice")
            config.ipython_executable = shutil.which(argv[i][15:])
            ipython_exec_set = True
        elif argv[i].startswith("--maxima-jupyter-exec="):
            if maxima_jupyter_exec_set:
                halt("Error: --maxima-jupyter-exec option set twice")
            config.maxima_jupyter_executable = shutil.which(argv[i][len("--maxima-jupyter-exec="):])
            maxima_jupyter_exec_set = True
        else:
            halt("Error: unexpected option '{}'".format(argv[i]))

        i += 1

    return config
Пример #28
0
    def run(self):
        # 同步数据库
        if not os.path.exists("config.ini"):
            raise Exception("你必须先创建项目")

        pg_dump = shutil.which('pg_dump')
        if pg_dump is None:
            raise Exception("确保系统具备pg_dump命令")

        psql = shutil.which('psql')
        if pg_dump is None:
            raise Exception("确保系统具备psql命令")

        parser = ConfigParser()
        parser.read("config.ini")
        dumps = self.dump(parser)
        content = SQL_TPL.format(up=dumps)

        if not os.path.exists("migrations"):
            os.makedirs("migrations")

        with open("migrations/001_init.sql", "w") as handle:
            handle.write(content)

        self.create_migration_table(parser)
        self.insert_init_record(parser)
 def which_exec(self, cmd):
   path = ''
   if sublime.version() < '3000':
     path = os.popen('which ' + cmd).read().split('\n')[0]
   else:
     path = shutil.which(cmd) or shutil.which(cmd, path='/usr/local/bin')
   return path
def check_windows():
    """Checks Windows-specific requirements"""
    print("Checking bison command...")
    result = subprocess.run(["bison", "--version"], stdout=subprocess.PIPE,
                            universal_newlines=True)
    if not result.returncode is 0:
        raise Exception("bison command returned non-zero exit code {}".format(
            result.returncode))
    result_which = shutil.which("bison")
    if result_which:
        if " " in result_which:
            raise Exception("Spaces are not allowed in the path to bison: {}".format(
                result_which))
    else:
        raise Exception("shutil.which returned unexpected value: {}".format(
            result_which))
    print("Using bison command '{!s}'".format(result.stdout.split("\n")[0]))

    print("Checking gperf command...")
    result = subprocess.run(["gperf", "--version"], stdout=subprocess.PIPE,
                            universal_newlines=True)
    if not result.returncode is 0:
        raise Exception("gperf command returned non-zero exit code {}".format(
            result.returncode))
    result_which = shutil.which("gperf")
    if result_which:
        if " " in result_which:
            raise Exception("Spaces are not allowed in the path to gperf: {}".format(
                result_which))
    else:
        raise Exception("shutil.which returned unexpected value: {}".format(
            result_which))
    print("Using gperf command '{!s}'".format(result.stdout.split("\n")[0]))
Пример #31
0
def main(args):
    parser = argparse.ArgumentParser('Clustering with KlustaKwik')
    parser.add_argument('target', help='Target path, either path containing tetrode files, or single tetrodeXX.mat')
    parser.add_argument('--KK', help='Path to KlustaKwik executable')
    parser.add_argument('--features', nargs='*', help='list of features to use for clustering')
    parser.add_argument('--config', help='Path to configuration file')
    parser.add_argument('--skip', help='Skip if clu file exists already', action='store_true')
    parser.add_argument('--no_spread', help='Shade report plots without static spread', action='store_true')
    parser.add_argument('--kkargs', help='Additional KK parameters, default: {-MaxPossibleClusters 35 -MaxIter 2000}',
                        type=str, default='-MaxPossibleClusters 35 -MaxIter 2000')
    parser.add_argument('-N', '--num_proc',
                        help='Number of KlustaKwik instances to run in parallel, defaults to 0 (all)', type=int,
                        default=0)
    parser.add_argument('--KKv3', action='store_true',
                        help='Running KlustaKwik v3 requires additional parameters for the call.')
    cli_args = parser.parse_args(args)

    # Load default configuration yaml file
    default_cfg_path = Path(pkg_resources.resource_filename(__name__, '../resources/cluster_defaults.yml')).resolve()
    if not default_cfg_path.exists():
        logging.error('Could not find default config file.')
        raise FileNotFoundError
    logger.debug('Loading default configuration')
    cfg = load_yaml(default_cfg_path)

    # Load local config file if it exists
    local_cfg_path = Path(
        pkg_resources.resource_filename(__name__, '../resources/cluster_defaults_local.yml')).resolve()
    if local_cfg_path.exists():
        logger.debug('Loading and updating with local configuration')
        local_cfg = load_yaml(local_cfg_path)
        cfg.update(local_cfg)

    # Load custom config path
    custom_cfg_path = Path(cli_args.config).resolve() if cli_args.config else None
    if custom_cfg_path:
        if custom_cfg_path.exists():
            logger.debug('Loading and updating with custom configuration')
            cfg.update(load_yaml(custom_cfg_path))
        else:
            raise FileNotFoundError(f"Could not load configuration file {custom_cfg_path}")

    # Load parameters from command line
    logger.debug('Parsing and updating configuration with CLI arguments')
    cfg.update(vars(cli_args))

    # try to find KlustaKwik executable if necessary...
    if cli_args.KK is None:
        cli_args.KK = shutil.which('KlustaKwik') or shutil.which('klustakwik') or shutil.which('Klustakwik')
    if cli_args.KK is None:
        raise FileNotFoundError('Could not find the KlustaKwik executable on the path, and none given.')

    cfg['kk_executable'] = cli_args.KK
    cfg['kk_additional_args'] = cli_args.kkargs

    logger.debug(cfg)

    # 1) Find target file stem
    target_path = Path(cli_args.target).resolve()
    if target_path.is_file():
        tetrode_files = [target_path]
        logger.debug(f'Using single file mode with {str(tetrode_files)}')
    else:
        tetrode_files = sorted([tf.resolve() for tf in target_path.glob(cfg['TARGET_FILE_GLOB'])])
    logger.debug(f'Targets found: {tetrode_files}')

    from multiprocessing.pool import ThreadPool
    num_threads = cli_args.num_proc if cli_args.num_proc > 0 else len(tetrode_files)

    logger.info('Launching ThreadPool')
    pool = ThreadPool(processes=num_threads)

    params = [(cfg, tfp) for tfp in tetrode_files]
    params.append(cli_args.kkargs)

    results = pool.map_async(run_kk, params)

    pool.close()
    pool.join()

    print(results)
Пример #32
0
if util.find_spec("PyQt5") is None:
    exit_with_error("Missing package 'PyQt5'")

# check for matplot lib
try:
    import matplotlib  # type: ignore
    matplotlib.use("Qt5Agg")
    import matplotlib.pyplot as matplot  # type: ignore
except ImportError:
    # satisfy undefined variable warnings
    matplotlib = None
    matplot = None
    exit_with_error("Missing package 'python3-matplotlib'")

# check for ffprobe in path
if not shutil.which("ffprobe"):
    exit_with_error("Missing ffprobe from package 'ffmpeg'")


def parse_arguments() -> argparse.Namespace:
    """ Parses all arguments and returns them as an object. """
    
    if sys.version_info >= (3, 6):
        supported_filetypes = matplotlib.figure.Figure().canvas \
            .get_supported_filetypes().keys()
    else:
        fig = matplot.figure()
        supported_filetypes = fig.canvas.get_supported_filetypes().keys()
        matplot.close(fig)

    # get list of supported matplotlib formats
Пример #33
0
    def grassBin():
        """
        Find GRASS binary path on the operating system.
        Sets global variable Grass7Utils.command
        """
        def searchFolder(folder):
            """
            Inline function to search for grass binaries into a folder
            with os.walk
            """
            if os.path.exists(folder):
                for root, dirs, files in os.walk(folder):
                    for cmd in cmdList:
                        if cmd in files:
                            return os.path.join(root, cmd)
            return None

        if Grass7Utils.command:
            return Grass7Utils.command

        path = Grass7Utils.grassPath()
        command = None

        vn = os.path.join(path, "etc", "VERSIONNUMBER")
        if os.path.isfile(vn):
            with open(vn, "r") as f:
                major, minor, patch = f.readlines()[0].split(' ')[0].split('.')
                if patch != 'svn':
                    patch = ''
                cmdList = [
                    "grass{}{}{}".format(major, minor, patch),
                    "grass",
                    "grass{}{}{}.sh".format(major, minor, patch),
                    "grass.sh"
                ]
        else:
            cmdList = [
                "grass76", "grass74", "grass72", "grass70", "grass",
                "grass76.sh", "grass74.sh", "grass72.sh", "grass70.sh", "grass.sh"
            ]

        # For MS-Windows there is a difference between GRASS Path and GRASS binary
        if isWindows():
            # If nothing found, use OSGEO4W or QgsPrefix:
            if "OSGEO4W_ROOT" in os.environ:
                testFolder = str(os.environ['OSGEO4W_ROOT'])
            else:
                testFolder = str(QgsApplication.prefixPath())
            testFolder = os.path.join(testFolder, 'bin')
            command = searchFolder(testFolder)
        elif isMac():
            # Search in grassPath
            command = searchFolder(path)

        # If everything has failed, use shutil
        if not command:
            for cmd in cmdList:
                testBin = shutil.which(cmd)
                if testBin:
                    command = os.path.abspath(testBin)
                    break

        if command:
            Grass7Utils.command = command
            if path is '':
                Grass7Utils.path = os.path.dirname(command)

        return command
Пример #34
0
file_logger.setFormatter(formatter)
root_logger.addHandler(file_logger)

logging.getLogger('googleapiclient').setLevel(logging.CRITICAL)
logging.getLogger('googleapiclient.discover').setLevel(logging.CRITICAL)
logging.getLogger('googleapiclient.discovery_cache').setLevel(logging.CRITICAL)
logging.getLogger('google.auth.transport.requests').setLevel(logging.INFO)

logging.getLogger('telegram.bot').setLevel(logging.INFO)
logging.getLogger('telegram.ext.dispatcher').setLevel(logging.INFO)
logging.getLogger(
    'telegram.vendor.ptb_urllib3.urllib3.connectionpool').setLevel(
        logging.INFO)

if not os.path.isfile(path_to_gclone):
    path_to_gclone = shutil.which('gclone')
    if not path_to_gclone:
        logger.warning('gclone executable is not found.')
        input("Press Enter to continue...")
        sys.exit(0)
logger.info('Found gclone: ' + path_to_gclone)

if not gclone_remote_name:
    logger.warning('gclone remote name is not found.')
    input("Press Enter to continue...")
    sys.exit(0)

if not os.path.isfile(path_to_gclone_config):
    path_to_gclone_config = None
    logger.debug(
        'Cannot find gclone config. Use system gclone config instead.')
Пример #35
0
        return json.loads(out.decode("utf-8"))
    else:
        raise GCloudError(err.decode("utf-8"))


class GCloud:
    def __init__(self):
        pass

    def __getattribute__(self, attr):
        attributes = attr.split("_")
        if attributes[-1] == "":
            attributes.pop(-1)

        def run_gcloud(*args, **kwargs):
            return gcloud_proxy(*attributes, *args, **kwargs)

        return run_gcloud


if which("gcloud") == None:
    raise FileNotFoundError(
        "\'gcloud\' command was not found in your environment. Ensure it is installed and set up properly."
    )

if __name__ == '__main__':
    gc = GCloud()
    print(gc.projects_list())

__all__ = [GCloud, GCloudError]
Пример #36
0
def run(args):
    '''
    filter and subsample a set of sequences into an analysis set
    '''
    #Set flags if VCF
    is_vcf = False
    is_compressed = False
    if any([args.sequences.lower().endswith(x) for x in ['.vcf', '.vcf.gz']]):
        is_vcf = True
        if args.sequences.lower().endswith('.gz'):
            is_compressed = True

    ### Check users has vcftools. If they don't, a one-blank-line file is created which
    #   allows next step to run but error very badly.
    if is_vcf:
        from shutil import which
        if which("vcftools") is None:
            print("ERROR: 'vcftools' is not installed! This is required for VCF data. "
                  "Please see the augur install instructions to install it.")
            return 1

    ####Read in files

    #If VCF, open and get sequence names
    if is_vcf:
        seq_keep, all_seq = read_vcf(args.sequences)

    #if Fasta, read in file to get sequence names and sequences
    else:
        try:
            seqs = SeqIO.to_dict(SeqIO.parse(args.sequences, 'fasta'))
        except ValueError as error:
            print("ERROR: Problem reading in {}:".format(args.sequences))
            print(error)
            return 1
        seq_keep = list(seqs.keys())
        all_seq = seq_keep.copy()

    try:
        meta_dict, meta_columns = read_metadata(args.metadata)
    except ValueError as error:
        print("ERROR: Problem reading in {}:".format(args.metadata))
        print(error)
        return 1


    #####################################
    #Filtering steps
    #####################################

    # remove sequences without meta data
    tmp = [ ]
    for seq_name in seq_keep:
        if seq_name in meta_dict:
            tmp.append(seq_name)
        else:
            print("No meta data for %s, excluding from all further analysis."%seq_name)
    seq_keep = tmp

    # remove strains explicitly excluded by name
    # read list of strains to exclude from file and prune seq_keep
    num_excluded_by_name = 0
    if args.exclude:
        try:
            with open(args.exclude, 'r', encoding='utf-8') as ifile:
                to_exclude = set()
                for line in ifile:
                    if line[0] != comment_char:
                        # strip whitespace and remove all text following comment character
                        exclude_name = line.split(comment_char)[0].strip()
                        to_exclude.add(exclude_name)
            tmp = [seq_name for seq_name in seq_keep if seq_name not in to_exclude]
            num_excluded_by_name = len(seq_keep) - len(tmp)
            seq_keep = tmp
        except FileNotFoundError as e:
            print("ERROR: Could not open file of excluded strains '%s'" % args.exclude, file=sys.stderr)
            sys.exit(1)

    # exclude strain my metadata field like 'host=camel'
    # match using lowercase
    num_excluded_by_metadata = {}
    if args.exclude_where:
        for ex in args.exclude_where:
            try:
                col, val = re.split(r'!?=', ex)
            except (ValueError,TypeError):
                print("invalid --exclude-where clause \"%s\", should be of from property=value or property!=value"%ex)
            else:
                to_exclude = set()
                for seq_name in seq_keep:
                    if "!=" in ex: # i.e. property!=value requested
                        if meta_dict[seq_name].get(col,'unknown').lower() != val.lower():
                            to_exclude.add(seq_name)
                    else: # i.e. property=value requested
                        if meta_dict[seq_name].get(col,'unknown').lower() == val.lower():
                            to_exclude.add(seq_name)
                tmp = [seq_name for seq_name in seq_keep if seq_name not in to_exclude]
                num_excluded_by_metadata[ex] = len(seq_keep) - len(tmp)
                seq_keep = tmp

    # exclude strains by metadata, using Pandas querying
    num_excluded_by_query = 0
    if args.query:
        filtered = filter_by_query(seq_keep, args.metadata, args.query)
        num_excluded_by_query = len(seq_keep) - len(filtered)
        seq_keep = filtered

    # filter by sequence length
    num_excluded_by_length = 0
    if args.min_length:
        if is_vcf: #doesn't make sense for VCF, ignore.
            print("WARNING: Cannot use min_length for VCF files. Ignoring...")
        else:
            seq_keep_by_length = []
            for seq_name in seq_keep:
                sequence = seqs[seq_name].seq
                length = sum(map(lambda x: sequence.count(x), ["a", "t", "g", "c", "A", "T", "G", "C"]))
                if length >= args.min_length:
                    seq_keep_by_length.append(seq_name)
            num_excluded_by_length = len(seq_keep) - len(seq_keep_by_length)
            seq_keep = seq_keep_by_length

    # filter by date
    num_excluded_by_date = 0
    if (args.min_date or args.max_date) and 'date' in meta_columns:
        dates = get_numerical_dates(meta_dict, fmt="%Y-%m-%d")
        tmp = [s for s in seq_keep if dates[s] is not None]
        if args.min_date:
            tmp = [s for s in tmp if (np.isscalar(dates[s]) or all(dates[s])) and np.max(dates[s])>args.min_date]
        if args.max_date:
            tmp = [s for s in tmp if (np.isscalar(dates[s]) or all(dates[s])) and np.min(dates[s])<args.max_date]
        num_excluded_by_date = len(seq_keep) - len(tmp)
        seq_keep = tmp

    # exclude sequences with non-nucleotide characters
    num_excluded_by_nuc = 0
    if args.non_nucleotide:
        good_chars = {'A', 'C', 'G', 'T', '-', 'N', 'R', 'Y', 'S', 'W', 'K', 'M', 'D', 'H', 'B', 'V', '?'}
        tmp = [s for s in seq_keep if len(set(str(seqs[s].seq).upper()).difference(good_chars))==0]
        num_excluded_by_nuc = len(seq_keep) - len(tmp)
        seq_keep = tmp

    # subsampling. This will sort sequences into groups by meta data fields
    # specified in --group-by and then take at most --sequences-per-group
    # from each group. Within each group, sequences are optionally sorted
    # by a priority score specified in a file --priority
    # Fix seed for the RNG if specified
    if args.subsample_seed:
        random.seed(args.subsample_seed)
    num_excluded_subsamp = 0
    if args.group_by and args.sequences_per_group:
        spg = args.sequences_per_group
        seq_names_by_group = defaultdict(list)

        for seq_name in seq_keep:
            group = []
            m = meta_dict[seq_name]
            # collect group specifiers
            for c in args.group_by:
                if c in m:
                    group.append(m[c])
                elif c in ['month', 'year'] and 'date' in m:
                    try:
                        year = int(m["date"].split('-')[0])
                    except:
                        print("WARNING: no valid year, skipping",seq_name, m["date"])
                        continue
                    if c=='month':
                        try:
                            month = int(m["date"].split('-')[1])
                        except:
                            month = random.randint(1,12)
                        group.append((year, month))
                    else:
                        group.append(year)
                else:
                    group.append('unknown')
            seq_names_by_group[tuple(group)].append(seq_name)

        #If didnt find any categories specified, all seqs will be in 'unknown' - but don't sample this!
        if len(seq_names_by_group)==1 and ('unknown' in seq_names_by_group or ('unknown',) in seq_names_by_group):
            print("WARNING: The specified group-by categories (%s) were not found."%args.group_by,
                  "No sequences-per-group sampling will be done.")
            if any([x in args.group_by for x in ['year','month']]):
                print("Note that using 'year' or 'year month' requires a column called 'date'.")
            print("\n")
        else:
            # Check to see if some categories are missing to warn the user
            group_by = set(['date' if cat in ['year','month'] else cat
                            for cat in args.group_by])
            missing_cats = [cat for cat in group_by if cat not in meta_columns]
            if missing_cats:
                print("WARNING:")
                if any([cat != 'date' for cat in missing_cats]):
                    print("\tSome of the specified group-by categories couldn't be found: ",
                          ", ".join([str(cat) for cat in missing_cats if cat != 'date']))
                if any([cat == 'date' for cat in missing_cats]):
                    print("\tA 'date' column could not be found to group-by year or month.")
                print("\tFiltering by group may behave differently than expected!\n")

            if args.priority: # read priorities
                priorities = read_priority_scores(args.priority)

            # subsample each groups, either by taking the spg highest priority strains or
            # sampling at random from the sequences in the group
            seq_subsample = []
            for group, sequences_in_group in seq_names_by_group.items():
                if args.priority: #sort descending by priority
                    seq_subsample.extend(sorted(sequences_in_group, key=lambda x:priorities[x], reverse=True)[:spg])
                else:
                    seq_subsample.extend(sequences_in_group if len(sequences_in_group)<=spg
                                         else random.sample(sequences_in_group, spg))

            num_excluded_subsamp = len(seq_keep) - len(seq_subsample)
            seq_keep = seq_subsample

    # force include sequences specified in file.
    # Note that this might re-add previously excluded sequences
    # Note that we are also not checking for existing meta data here
    num_included_by_name = 0
    if args.include and os.path.isfile(args.include):
        with open(args.include, 'r', encoding='utf-8') as ifile:
            to_include = set(
                [
                    line.strip()
                    for line in ifile
                    if line[0]!=comment_char and len(line.strip()) > 0
                ]
            )

        for s in to_include:
            if s not in seq_keep:
                seq_keep.append(s)
                num_included_by_name += 1

    # add sequences with particular meta data attributes
    num_included_by_metadata = 0
    if args.include_where:
        to_include = []
        for ex in args.include_where:
            try:
                col, val = ex.split("=")
            except (ValueError,TypeError):
                print("invalid include clause %s, should be of from property=value"%ex)
                continue

            # loop over all sequences and re-add sequences
            for seq_name in all_seq:
                if seq_name in meta_dict:
                    if meta_dict[seq_name].get(col)==val:
                        to_include.append(seq_name)
                else:
                    print("WARNING: no metadata for %s, skipping"%seq_name)
                    continue

        for s in to_include:
            if s not in seq_keep:
                seq_keep.append(s)
                num_included_by_metadata += 1

    ####Write out files

    if is_vcf:
        #get the samples to be deleted, not to keep, for VCF
        dropped_samps = list(set(all_seq) - set(seq_keep))
        if len(dropped_samps) == len(all_seq): #All samples have been dropped! Stop run, warn user.
            print("ERROR: All samples have been dropped! Check filter rules and metadata file format.")
            return 1
        write_vcf(args.sequences, args.output, dropped_samps)

    else:
        seq_to_keep = [seq for id,seq in seqs.items() if id in seq_keep]
        if len(seq_to_keep) == 0:
            print("ERROR: All samples have been dropped! Check filter rules and metadata file format.")
            return 1
        SeqIO.write(seq_to_keep, args.output, 'fasta')

    print("\n%i sequences were dropped during filtering" % (len(all_seq) - len(seq_keep),))
    if args.exclude:
        print("\t%i of these were dropped because they were in %s" % (num_excluded_by_name, args.exclude))
    if args.exclude_where:
        for key,val in num_excluded_by_metadata.items():
            print("\t%i of these were dropped because of '%s'" % (val, key))
    if args.query:
        print("\t%i of these were filtered out by the query:\n\t\t\"%s\"" % (num_excluded_by_query, args.query))
    if args.min_length:
        print("\t%i of these were dropped because they were shorter than minimum length of %sbp" % (num_excluded_by_length, args.min_length))
    if (args.min_date or args.max_date) and 'date' in meta_columns:
        print("\t%i of these were dropped because of their date (or lack of date)" % (num_excluded_by_date))
    if args.non_nucleotide:
        print("\t%i of these were dropped because they had non-nucleotide characters" % (num_excluded_by_nuc))
    if args.group_by and args.sequences_per_group:
        seed_txt = ", using seed {}".format(args.subsample_seed) if args.subsample_seed else ""
        print("\t%i of these were dropped because of subsampling criteria%s" % (num_excluded_subsamp, seed_txt))

    if args.include and os.path.isfile(args.include):
        print("\n\t%i sequences were added back because they were in %s" % (num_included_by_name, args.include))
    if args.include_where:
        print("\t%i sequences were added back because of '%s'" % (num_included_by_metadata, args.include_where))

    print("%i sequences have been written out to %s" % (len(seq_keep), args.output))
Пример #37
0
 def _default_nodejs(self):
     return (shutil.which("node") or shutil.which("nodejs")
             or shutil.which("nodejs.exe"))
Пример #38
0
import os
import re
import shutil
import subprocess as sp
from datetime import datetime
import time

from snakemake.remote import AbstractRemoteObject, AbstractRemoteProvider
from snakemake.exceptions import WorkflowError
from snakemake.common import lazy_property
from snakemake.logging import logger
from snakemake.utils import os_sync


if not shutil.which("gfal-copy"):
    raise WorkflowError(
        "The gfal-* commands need to be available for " "gfal remote support."
    )


class RemoteProvider(AbstractRemoteProvider):

    supports_default = True
    allows_directories = True

    def __init__(
        self,
        *args,
        keep_local=False,
        stay_on_remote=False,
Пример #39
0
def sway():
    """Reload sway colors."""
    if shutil.which("swaymsg") and util.get_pid("sway"):
        util.disown(["swaymsg", "reload"])
Пример #40
0
 def cmd_exists(cmd):
     return shutil.which(cmd) is not None
Пример #41
0
def kitty():
    """ Reload kitty colors. """
    if shutil.which("kitty") and util.get_pid("kitty"):
        util.disown(["kitty", "@", "set-colors", "--all"])
Пример #42
0
# and saves them in individual dot files (one for each plan). Optionally, and
# providing 'dot' is installed, it can also render the dot into a PNG file.

from __future__ import print_function

import sys
import re
import argparse
import shutil
import subprocess

parser = argparse.ArgumentParser()
parser.add_argument('--png', action='store_true')
args = parser.parse_args()

dot = shutil.which('dot')
if args.png and not dot:
    raise RuntimeError("Can't export to PNG without 'dot' in the system")

pattern = re.compile(r"(digraph VPlan {.*?\n})",re.DOTALL)
matches = re.findall(pattern, sys.stdin.read())

for vplan in matches:
    m = re.search("graph \[.+(VF=.+,UF.+)", vplan)
    if not m:
        raise ValueError("Can't get the right VPlan name")
    name = re.sub('[^a-zA-Z0-9]', '', m.group(1))

    if args.png:
        filename = 'VPlan' + name + '.png'
        print("Exporting " + name + " to PNG via dot: " + filename)
Пример #43
0
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'


from shutil import which

SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = which('chromedriver')
SELENIUM_DRIVER_ARGUMENTS=[]  # '--headless' if using chrome instead of firefox


CLOSESPIDER_ITEMCOUNT = 1000
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 3600
Пример #44
0
def polybar():
    """Reload polybar colors."""
    if shutil.which("polybar") and util.get_pid("polybar"):
        util.disown(["pkill", "-USR1", "polybar"])
Пример #45
0
BOT_NAME = 'reuters_crawler'

SPIDER_MODULES = ['reuters_crawler.spiders']
NEWSPIDER_MODULE = 'reuters_crawler.spiders'

FEED_FORMAT = 'json'
FEED_URI = 'tmp/%(file_name)s.json'
FEED_EXPORT_ENCODING = 'utf-8'

# Selenium
from shutil import which

SELENIUM_COMMAND_EXECUTOR = 'http://localhost:4444/wd/hub'
SELENIUM_DRIVER_NAME = 'firefox'
SELENIUM_DRIVER_EXECUTABLE_PATH = which('geckodriver')
SELENIUM_DRIVER_ARGUMENTS = [
    '-headless'
]  # '--headless' if using chrome instead of firefox

DOWNLOADER_MIDDLEWARES = {'scrapy_selenium.SeleniumMiddleware': 800}

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'reuters_crawler (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
Пример #46
0
def i3():
    """Reload i3 colors."""
    if shutil.which("i3-msg") and util.get_pid("i3"):
        util.disown(["i3-msg", "reload"])
Пример #47
0
def run_individual_python_test(target_dir, test_name, pyspark_python, params):
    env = dict(os.environ)
    env.update({
        'PYTHONPATH': params["REPAIR_MODULE_PATH"],
        'REPAIR_API_LIB': params["REPAIR_API_LIB"],
        'REPAIR_TESTDATA': params["REPAIR_TESTDATA"],
        'SPARK_TESTING': '1',
        'SPARK_PREPEND_CLASSES': '1',
        'PYSPARK_PYTHON': which(pyspark_python),
        'PYSPARK_DRIVER_PYTHON': which(pyspark_python),
        # Preserve legacy nested timezone behavior for pyarrow>=2, remove after SPARK-32285
        'PYARROW_IGNORE_TIMEZONE': '1',
    })

    # Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
    # recognized by the tempfile module to override the default system temp directory.
    tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
    while os.path.isdir(tmp_dir):
        tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
    os.mkdir(tmp_dir)
    env["TMPDIR"] = tmp_dir
    metastore_dir = os.path.join(tmp_dir, str(uuid.uuid4()))
    while os.path.isdir(metastore_dir):
        metastore_dir = os.path.join(metastore_dir, str(uuid.uuid4()))
    os.mkdir(metastore_dir)

    # Also override the JVM's temp directory by setting driver and executor options.
    java_options = "-Djava.io.tmpdir={0} -Dio.netty.tryReflectionSetAccessible=true".format(
        tmp_dir)
    spark_args = [
        "--conf", "spark.driver.extraJavaOptions='{0}'".format(java_options),
        "--conf", "spark.executor.extraJavaOptions='{0}'".format(java_options),
        "--conf", "spark.sql.warehouse.dir='{0}'".format(metastore_dir),
        "pyspark-shell"
    ]
    env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)

    LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
    start_time = time.time()
    try:
        per_test_output = tempfile.TemporaryFile()
        retcode = subprocess.Popen(["pyspark"] + test_name.split(),
                                   stderr=per_test_output,
                                   stdout=per_test_output,
                                   env=env).wait()
        rmtree(tmp_dir, ignore_errors=True)
    except:
        LOGGER.exception("Got exception while running %s with %s", test_name,
                         pyspark_python)
        # Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
        # this code is invoked from a thread other than the main thread.
        os._exit(1)
    duration = time.time() - start_time
    # Exit on the first failure.
    if retcode != 0:
        try:
            with FAILURE_REPORTING_LOCK:
                with open(LOG_FILE, 'ab') as log_file:
                    per_test_output.seek(0)
                    log_file.writelines(per_test_output)
                per_test_output.seek(0)
                for line in per_test_output:
                    decoded_line = line.decode("utf-8", "replace")
                    if not re.match('[0-9]+', decoded_line):
                        print(decoded_line, end='')
                per_test_output.close()
        except:
            LOGGER.exception(
                "Got an exception while trying to print failed test output")
        finally:
            print_red("\nHad test failures in %s with %s; see logs." %
                      (test_name, pyspark_python))
            # Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
            # this code is invoked from a thread other than the main thread.
            os._exit(-1)
    else:
        skipped_counts = 0
        try:
            per_test_output.seek(0)
            # Here expects skipped test output from unittest when verbosity level is
            # 2 (or --verbose option is enabled).
            decoded_lines = map(lambda line: line.decode("utf-8", "replace"),
                                iter(per_test_output))
            skipped_tests = list(
                filter(
                    lambda line: re.search(r'test_.* \(.*\) ... (skip|SKIP)',
                                           line), decoded_lines))
            skipped_counts = len(skipped_tests)
            if skipped_counts > 0:
                key = (pyspark_python, test_name)
                assert SKIPPED_TESTS is not None
                SKIPPED_TESTS[key] = skipped_tests
            per_test_output.close()
        except:
            import traceback
            print_red("\nGot an exception while trying to store "
                      "skipped test output:\n%s" % traceback.format_exc())
            # Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
            # this code is invoked from a thread other than the main thread.
            os._exit(-1)
        if skipped_counts != 0:
            LOGGER.info(
                "Finished test(%s): %s (%is) ... %s tests were skipped",
                pyspark_python, test_name, duration, skipped_counts)
        else:
            LOGGER.info("Finished test(%s): %s (%is)", pyspark_python,
                        test_name, duration)
Пример #48
0
    assert type(dumps) is dict


def test_compile_tflite_module(tflite_mobilenet_v1_1_quant):
    # some CI environments wont offer tflite, so skip in case it is not present
    pytest.importorskip("tflite")
    # Check default compilation.
    verify_compile_tflite_module(tflite_mobilenet_v1_1_quant)
    # Check with manual shape override
    shape_string = "input:[1,224,224,3]"
    shape_dict = tvmc.common.parse_shape_string(shape_string)
    verify_compile_tflite_module(tflite_mobilenet_v1_1_quant, shape_dict)


# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(not shutil.which("aarch64-linux-gnu-gcc"),
                    reason="cross-compilation toolchain not installed")
def test_cross_compile_aarch64_tflite_module(tflite_mobilenet_v1_1_quant):
    pytest.importorskip("tflite")

    mod, params = tvmc.load(tflite_mobilenet_v1_1_quant)
    graph, lib, params, dumps = tvmc.compile(
        mod,
        params,
        target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr='+neon'",
        dump_code="asm",
    )

    # check for output types
    assert type(graph) is str
    assert type(lib) is tvm.runtime.module.Module
Пример #49
0
def _binary_install_check(binary):
    '''Checks if the given binary is installed. Otherwise we exit with return code 10.'''
    if not which(binary):
        exit_invoke(10, "Couldn't find {}. Please install to proceed.", binary)
Пример #50
0
def adjustjob(acct, jobid):
    """Move job from one account to another."""
    subprocess.Popen([shutil.which('scontrol'),
                      'update',
                      'Account=%s_cpu' % acct,
                      'JobId=%s' % str(jobid)])
Пример #51
0
#!/usr/bin/env python
import os
import sys
import requests
import datetime
import shutil

clear = 'cls' if os.name == 'nt' else 'clear'

if shutil.which('ffmpeg') is not None:
    pass
else:
    print('Til að nota ruv-dl verður að setja upp ffmpeg. ')
    sys.exit()


def main():
    os.system(clear)
    content_type()


def content_type():
    choice = input(
        '\n\n    1: Hlekkur á gamalt efni\n    2: RÚV í beinni\n\n    Setja inn hlekk á gamalt efni eða niðurhala beinni útsendingu RÚV?: '
    )
    if choice == '1':
        os.system(clear)
        link()
    elif choice == '2':
        os.system(clear)
        resolution()
Пример #52
0
smartctl_type_dict = {
    'ata': 'ata',
    'csmi': 'ata',
    'nvme': 'nvme',
    'sas': 'scsi',
    'sat': 'sat',
    'sata': 'ata',
    'scsi': 'scsi',
    'atacam': 'atacam'
}
"""
**(dict of str):** Contains actual interface types (ie: sas, csmi) as keys and
the corresponding smartctl interface type (ie: scsi, ata) as values.
"""

SMARTCTL_PATH = which('smartctl')


def smartctl_isvalid_type(interface_type: str) -> bool:
    """Tests if the interface_type is supported

    Args:
        interface_type (str): An internal interface_type

    Returns:
        bool: True if the type is supported, false z
    """
    if interface_type in smartctl_type_dict:
        return True
    elif 'megaraid,' in interface_type:
        return True
Пример #53
0
 def __check_if_tor_installed(self) -> None:
     if bool(shutil.which("tor")) is False:
         raise TORNotInstalled("TOR not installed... Please install TOR to get it on your system")
Пример #54
0
    # errorMessages.append("ERROR: Cython not installed. Please install Cython and rerun")

import multiprocessing
import os
import shutil

import subprocess

from argparse import ArgumentParser

##################################
# check whether SCons is available
##################################
scons_available = None
try:
    if shutil.which("scons") is None:
        # errorMessages.append("ERROR: Build system SCons is not installed. Please install and rerun")
        # abort_installation = True
        scons_available = False
    else:
        scons_available = True
except:
    print("WARNING: unable to check whether build system SCons is installed")
    scons_available = False

#
if sys.platform == 'Windows' and not scons_available:
    abort_installation = True
    errorMessages.append(
        "ERROR: Build system SCons is not installed. Please install and rerun."
    )
Пример #55
0
def is_ffmpeg_available():
    """
    Returns true if ffmpeg is available in the operating system
    """
    return shutil.which("ffmpeg") is not None
Пример #56
0
def main():
    parser = argparse.ArgumentParser(
        description="FLIP Fluids Addon build and compile script")
    parser.add_argument("-build-directory",
                        help="Path to destination build directory")
    parser.add_argument("-cmake-path",
                        help="Specify path to CMake binary (www.cmake.org)")
    parser.add_argument(
        "-make-path",
        help="Specify path to GNU Make binary (www.gnu.org/software/make)")
    parser.add_argument(
        '--clean',
        action="store_true",
        help="Clear generated files in the build directory before building")
    parser.add_argument('-no-compile',
                        action="store_true",
                        help="Do not compile libraries")
    args = parser.parse_args()

    root_dir = os.path.dirname(os.path.abspath(__file__))
    build_dir = os.path.join(root_dir, "build")
    if args.build_directory:
        build_dir = process_path(args.build_directory)

    cmake_path = "cmake"
    if args.cmake_path:
        cmake_path = process_path(args.cmake_path)
        if not os.path.isfile(cmake_path):
            print("\n***ERROR: Could not find file: <" + cmake_path + ">***\n")
            return
    else:
        if shutil.which(cmake_path) is None:
            parser.print_help()
            print(
                "\n***ERROR: Could not find CMake (cmake) on your system path. See above for help.***\n"
            )
            return

    make_path = "make"
    if args.make_path:
        make_path = process_path(args.make_path)
        if not os.path.isfile(make_path):
            print("\n***ERROR: Could not find file: <" + make_path + ">***\n")
            return
    else:
        if shutil.which(make_path) is None:
            parser.print_help()
            print(
                "\n***ERROR: Could not find GNU Make (make) on your system path. See above for help.***\n"
            )
            return

    try:
        original_cwd = os.getcwd()
        cmakelists_file = os.path.join(root_dir, "cmake", "CMakeLists.txt")
        temp_cmakelists_file = os.path.join(root_dir, "CMakeLists.txt")

        shutil.copyfile(cmakelists_file, temp_cmakelists_file)
        os.makedirs(build_dir, exist_ok=True)
        os.chdir(build_dir)

        if args.clean:
            clean_build_directory(build_dir)

        cmake_make(root_dir,
                   cmake_path,
                   make_path,
                   build_debug=True,
                   make_build=not args.no_compile)
        cmake_make(root_dir,
                   cmake_path,
                   make_path,
                   build_debug=False,
                   make_build=not args.no_compile)

        lib_dir = os.path.join(build_dir, "bl_flip_fluids",
                               "flip_fluids_addon", "pyfluid", "lib")
        if os.path.isdir(lib_dir):
            for filename in os.listdir(lib_dir):
                if filename.endswith(".dll.a"):
                    os.remove(os.path.join(lib_dir, filename))

    except Exception as e:
        if os.path.isfile(temp_cmakelists_file):
            os.remove(temp_cmakelists_file)
        os.chdir(original_cwd)
        raise e

    os.chdir(original_cwd)

    addon_dir = os.path.join(build_dir, "bl_flip_fluids", "flip_fluids_addon")
    print("\n" + "-" * 80)
    print("FLIP Fluids addon successfully built and compiled to:")
    print("\t<" + addon_dir + ">")
Пример #57
0
def run(args):
    '''
    filter and subsample a set of sequences into an analysis set
    '''
    #Set flags if VCF
    is_vcf = False
    is_compressed = False
    if any([args.sequences.lower().endswith(x) for x in ['.vcf', '.vcf.gz']]):
        is_vcf = True
        if args.sequences.lower().endswith('.gz'):
            is_compressed = True

    ### Check users has vcftools. If they don't, a one-blank-line file is created which
    #   allows next step to run but error very badly.
    if is_vcf:
        from shutil import which
        if which("vcftools") is None:
            print(
                "ERROR: 'vcftools' is not installed! This is required for VCF data. "
                "Please see the augur install instructions to install it.")
            return 1

    ####Read in files

    #If VCF, open and get sequence names
    if is_vcf:
        seq_keep, all_seq = read_vcf(args.sequences)
    else:
        # If FASTA, try to load the sequence composition details and strain
        # names to be filtered.
        index_is_autogenerated = False
        sequence_index_path = args.sequence_index

        # Generate the sequence index on the fly, for backwards compatibility
        # with older workflows that don't generate the index ahead of time.
        if sequence_index_path is None:
            # Create a temporary index using a random filename to avoid
            # collisions between multiple filter commands.
            index_is_autogenerated = True
            with NamedTemporaryFile(delete=False) as sequence_index_file:
                sequence_index_path = sequence_index_file.name

            print(
                f"WARNING: A sequence index was not provided, so we are generating one.",
                "Generate your own index ahead of time with `augur index` and pass it with `augur filter --sequence-index`.",
                file=sys.stderr)
            index_sequences(args.sequences, sequence_index_path)

        sequence_index = pd.read_csv(sequence_index_path, sep="\t")

        # Remove temporary index file, if it exists.
        if index_is_autogenerated:
            os.unlink(sequence_index_path)

        # Calculate summary statistics needed for filtering.
        sequence_index["ACGT"] = sequence_index.loc[:,
                                                    ["A", "C", "G", "T"]].sum(
                                                        axis=1)
        seq_keep = sequence_index["strain"].values
        all_seq = seq_keep.copy()

    try:
        meta_dict, meta_columns = read_metadata(args.metadata)
    except ValueError as error:
        print("ERROR: Problem reading in {}:".format(args.metadata))
        print(error)
        return 1

    #####################################
    #Filtering steps
    #####################################

    # remove sequences without meta data
    tmp = []
    for seq_name in seq_keep:
        if seq_name in meta_dict:
            tmp.append(seq_name)
        else:
            print("No meta data for %s, excluding from all further analysis." %
                  seq_name)
    seq_keep = tmp

    # remove strains explicitly excluded by name
    # read list of strains to exclude from file and prune seq_keep
    num_excluded_by_name = 0
    if args.exclude:
        try:
            with open(args.exclude, 'r', encoding='utf-8') as ifile:
                to_exclude = set()
                for line in ifile:
                    if line[0] != comment_char:
                        # strip whitespace and remove all text following comment character
                        exclude_name = line.split(comment_char)[0].strip()
                        to_exclude.add(exclude_name)
            tmp = [
                seq_name for seq_name in seq_keep if seq_name not in to_exclude
            ]
            num_excluded_by_name = len(seq_keep) - len(tmp)
            seq_keep = tmp
        except FileNotFoundError as e:
            print("ERROR: Could not open file of excluded strains '%s'" %
                  args.exclude,
                  file=sys.stderr)
            sys.exit(1)

    # exclude strain my metadata field like 'host=camel'
    # match using lowercase
    num_excluded_by_metadata = {}
    if args.exclude_where:
        for ex in args.exclude_where:
            try:
                col, val = re.split(r'!?=', ex)
            except (ValueError, TypeError):
                print(
                    "invalid --exclude-where clause \"%s\", should be of from property=value or property!=value"
                    % ex)
            else:
                to_exclude = set()
                for seq_name in seq_keep:
                    if "!=" in ex:  # i.e. property!=value requested
                        if meta_dict[seq_name].get(
                                col, 'unknown').lower() != val.lower():
                            to_exclude.add(seq_name)
                    else:  # i.e. property=value requested
                        if meta_dict[seq_name].get(
                                col, 'unknown').lower() == val.lower():
                            to_exclude.add(seq_name)
                tmp = [
                    seq_name for seq_name in seq_keep
                    if seq_name not in to_exclude
                ]
                num_excluded_by_metadata[ex] = len(seq_keep) - len(tmp)
                seq_keep = tmp

    # exclude strains by metadata, using Pandas querying
    num_excluded_by_query = 0
    if args.query:
        filtered = filter_by_query(seq_keep, args.metadata, args.query)
        num_excluded_by_query = len(seq_keep) - len(filtered)
        seq_keep = filtered

    # filter by sequence length
    num_excluded_by_length = 0
    if args.min_length:
        if is_vcf:  #doesn't make sense for VCF, ignore.
            print("WARNING: Cannot use min_length for VCF files. Ignoring...")
        else:
            is_in_seq_keep = sequence_index["strain"].isin(seq_keep)
            is_gte_min_length = sequence_index["ACGT"] >= args.min_length

            seq_keep_by_length = sequence_index[
                (is_in_seq_keep) & (is_gte_min_length)]["strain"].tolist()

            num_excluded_by_length = len(seq_keep) - len(seq_keep_by_length)
            seq_keep = seq_keep_by_length

    # filter by ambiguous dates
    num_excluded_by_ambiguous_date = 0
    if args.exclude_ambiguous_dates_by and 'date' in meta_columns:
        seq_keep_by_date = []
        for seq_name in seq_keep:
            if not is_date_ambiguous(meta_dict[seq_name]['date'],
                                     args.exclude_ambiguous_dates_by):
                seq_keep_by_date.append(seq_name)

        num_excluded_by_ambiguous_date = len(seq_keep) - len(seq_keep_by_date)
        seq_keep = seq_keep_by_date

    # filter by date
    num_excluded_by_date = 0
    if (args.min_date or args.max_date) and 'date' in meta_columns:
        dates = get_numerical_dates(meta_dict, fmt="%Y-%m-%d")
        tmp = [s for s in seq_keep if dates[s] is not None]
        if args.min_date:
            tmp = [
                s for s in tmp if (np.isscalar(dates[s]) or all(dates[s]))
                and np.max(dates[s]) > args.min_date
            ]
        if args.max_date:
            tmp = [
                s for s in tmp if (np.isscalar(dates[s]) or all(dates[s]))
                and np.min(dates[s]) < args.max_date
            ]
        num_excluded_by_date = len(seq_keep) - len(tmp)
        seq_keep = tmp

    # exclude sequences with non-nucleotide characters
    num_excluded_by_nuc = 0
    if args.non_nucleotide:
        is_in_seq_keep = sequence_index["strain"].isin(seq_keep)
        no_invalid_nucleotides = sequence_index["invalid_nucleotides"] == 0
        seq_keep_by_valid_nucleotides = sequence_index[
            (is_in_seq_keep) & (no_invalid_nucleotides)]["strain"].tolist()

        num_excluded_by_nuc = len(seq_keep) - len(
            seq_keep_by_valid_nucleotides)
        seq_keep = seq_keep_by_valid_nucleotides

    # subsampling. This will sort sequences into groups by meta data fields
    # specified in --group-by and then take at most --sequences-per-group
    # from each group. Within each group, sequences are optionally sorted
    # by a priority score specified in a file --priority
    # Fix seed for the RNG if specified
    if args.subsample_seed:
        random.seed(args.subsample_seed)
    num_excluded_subsamp = 0
    if args.group_by and (args.sequences_per_group
                          or args.subsample_max_sequences):
        spg = args.sequences_per_group
        seq_names_by_group = defaultdict(list)

        for seq_name in seq_keep:
            group = []
            m = meta_dict[seq_name]
            # collect group specifiers
            for c in args.group_by:
                if c in m:
                    group.append(m[c])
                elif c in ['month', 'year'] and 'date' in m:
                    try:
                        year = int(m["date"].split('-')[0])
                    except:
                        print("WARNING: no valid year, skipping", seq_name,
                              m["date"])
                        continue
                    if c == 'month':
                        try:
                            month = int(m["date"].split('-')[1])
                        except:
                            month = random.randint(1, 12)
                        group.append((year, month))
                    else:
                        group.append(year)
                else:
                    group.append('unknown')
            seq_names_by_group[tuple(group)].append(seq_name)

        #If didnt find any categories specified, all seqs will be in 'unknown' - but don't sample this!
        if len(seq_names_by_group) == 1 and ('unknown' in seq_names_by_group or
                                             ('unknown', )
                                             in seq_names_by_group):
            print(
                "WARNING: The specified group-by categories (%s) were not found."
                % args.group_by,
                "No sequences-per-group sampling will be done.")
            if any([x in args.group_by for x in ['year', 'month']]):
                print(
                    "Note that using 'year' or 'year month' requires a column called 'date'."
                )
            print("\n")
        else:
            # Check to see if some categories are missing to warn the user
            group_by = set([
                'date' if cat in ['year', 'month'] else cat
                for cat in args.group_by
            ])
            missing_cats = [cat for cat in group_by if cat not in meta_columns]
            if missing_cats:
                print("WARNING:")
                if any([cat != 'date' for cat in missing_cats]):
                    print(
                        "\tSome of the specified group-by categories couldn't be found: ",
                        ", ".join([
                            str(cat) for cat in missing_cats if cat != 'date'
                        ]))
                if any([cat == 'date' for cat in missing_cats]):
                    print(
                        "\tA 'date' column could not be found to group-by year or month."
                    )
                print(
                    "\tFiltering by group may behave differently than expected!\n"
                )

            if args.priority:  # read priorities
                priorities = read_priority_scores(args.priority)

            if spg is None:
                # this is only possible if we have imposed a maximum number of samples
                # to produce.  we need binary search until we have the correct spg.
                try:
                    length_of_sequences_per_group = [
                        len(sequences_in_group)
                        for sequences_in_group in seq_names_by_group.values()
                    ]

                    if args.probabilistic_sampling:
                        spg = _calculate_fractional_sequences_per_group(
                            args.subsample_max_sequences,
                            length_of_sequences_per_group)
                    else:
                        spg = _calculate_sequences_per_group(
                            args.subsample_max_sequences,
                            length_of_sequences_per_group)
                except TooManyGroupsError as ex:
                    print(f"ERROR: {ex}", file=sys.stderr)
                    sys.exit(1)
                print("sampling at {} per group.".format(spg))

            if args.probabilistic_sampling:
                random_generator = np.random.default_rng()

            # subsample each groups, either by taking the spg highest priority strains or
            # sampling at random from the sequences in the group
            seq_subsample = []
            subsampling_attempts = 0

            # Attempt to subsample with the given constraints for a fixed number
            # of times. For small values of maximum sequences, subsampling can
            # randomly select zero sequences to keep. When this happens, we can
            # usually find a non-zero number of samples by repeating the
            # process.
            while len(
                    seq_subsample
            ) == 0 and subsampling_attempts < MAX_NUMBER_OF_PROBABILISTIC_SAMPLING_ATTEMPTS:
                subsampling_attempts += 1

                for group, sequences_in_group in seq_names_by_group.items():
                    if args.probabilistic_sampling:
                        tmp_spg = random_generator.poisson(spg)
                    else:
                        tmp_spg = spg

                    if tmp_spg == 0:
                        continue

                    if args.priority:  #sort descending by priority
                        seq_subsample.extend(
                            sorted(sequences_in_group,
                                   key=lambda x: priorities[x],
                                   reverse=True)[:tmp_spg])
                    else:
                        seq_subsample.extend(sequences_in_group if len(
                            sequences_in_group) <= tmp_spg else random.sample(
                                sequences_in_group, tmp_spg))

            num_excluded_subsamp = len(seq_keep) - len(seq_subsample)
            seq_keep = seq_subsample

    # force include sequences specified in file.
    # Note that this might re-add previously excluded sequences
    # Note that we are also not checking for existing meta data here
    num_included_by_name = 0
    if args.include and os.path.isfile(args.include):
        with open(args.include, 'r', encoding='utf-8') as ifile:
            to_include = set([
                line.strip() for line in ifile
                if line[0] != comment_char and len(line.strip()) > 0
            ])

        for s in to_include:
            if s not in seq_keep:
                seq_keep.append(s)
                num_included_by_name += 1

    # add sequences with particular meta data attributes
    num_included_by_metadata = 0
    if args.include_where:
        to_include = []
        for ex in args.include_where:
            try:
                col, val = ex.split("=")
            except (ValueError, TypeError):
                print(
                    "invalid include clause %s, should be of from property=value"
                    % ex)
                continue

            # loop over all sequences and re-add sequences
            for seq_name in all_seq:
                if seq_name in meta_dict:
                    if meta_dict[seq_name].get(col) == val:
                        to_include.append(seq_name)
                else:
                    print("WARNING: no metadata for %s, skipping" % seq_name)
                    continue

        for s in to_include:
            if s not in seq_keep:
                seq_keep.append(s)
                num_included_by_metadata += 1

    ####Write out files

    if is_vcf:
        #get the samples to be deleted, not to keep, for VCF
        dropped_samps = list(set(all_seq) - set(seq_keep))
        if len(dropped_samps) == len(
                all_seq):  #All samples have been dropped! Stop run, warn user.
            print(
                "ERROR: All samples have been dropped! Check filter rules and metadata file format."
            )
            return 1
        write_vcf(args.sequences, args.output, dropped_samps)

    else:
        # It should not be possible to have ids in the list of sequences to keep
        # that do not exist in the original input sequences, since we built this
        # list of ids from the sequence index. Just to be safe though, we find
        # the intersection of these two lists of ids to determine if all samples
        # were dropped or not. This final list of ids is in the same order as
        # the input sequences such that output sequences are always in the same
        # order for a given set of filters.
        sequences = SeqIO.parse(args.sequences, "fasta")
        sequences_to_write = (sequence for sequence in sequences
                              if sequence.id in seq_keep)

        # Write out sequences that passed all filters using an iterator to
        # ensure that sequences are streamed to disk without being read into
        # memory first.
        sequences_written = SeqIO.write(sequences_to_write, args.output,
                                        'fasta')

        if sequences_written == 0:
            print(
                "ERROR: All samples have been dropped! Check filter rules and metadata file format.",
                file=sys.stderr)
            return 1

    print("\n%i sequences were dropped during filtering" %
          (len(all_seq) - len(seq_keep), ))
    if args.exclude:
        print("\t%i of these were dropped because they were in %s" %
              (num_excluded_by_name, args.exclude))
    if args.exclude_where:
        for key, val in num_excluded_by_metadata.items():
            print("\t%i of these were dropped because of '%s'" % (val, key))
    if args.query:
        print("\t%i of these were filtered out by the query:\n\t\t\"%s\"" %
              (num_excluded_by_query, args.query))
    if args.min_length:
        print(
            "\t%i of these were dropped because they were shorter than minimum length of %sbp"
            % (num_excluded_by_length, args.min_length))
    if args.exclude_ambiguous_dates_by and num_excluded_by_ambiguous_date:
        print(
            "\t%i of these were dropped because of their ambiguous date in %s"
            %
            (num_excluded_by_ambiguous_date, args.exclude_ambiguous_dates_by))
    if (args.min_date or args.max_date) and 'date' in meta_columns:
        print(
            "\t%i of these were dropped because of their date (or lack of date)"
            % (num_excluded_by_date))
    if args.non_nucleotide:
        print(
            "\t%i of these were dropped because they had non-nucleotide characters"
            % (num_excluded_by_nuc))
    if args.group_by and args.sequences_per_group:
        seed_txt = ", using seed {}".format(
            args.subsample_seed) if args.subsample_seed else ""
        print("\t%i of these were dropped because of subsampling criteria%s" %
              (num_excluded_subsamp, seed_txt))

    if args.include and os.path.isfile(args.include):
        print("\n\t%i sequences were added back because they were in %s" %
              (num_included_by_name, args.include))
    if args.include_where:
        print("\t%i sequences were added back because of '%s'" %
              (num_included_by_metadata, args.include_where))

    print("%i sequences have been written out to %s" %
          (len(seq_keep), args.output))
Пример #58
0
 def __sel(self, en: int) -> bool:
     if bool(shutil.which("setenforce")) is not False:
         subprocess.call(shlex.split("setenforce {}".format(en)))
         return True
     return False
Пример #59
0
def find_executable(file_name, additional_paths=None):
    path = None
    if additional_paths:
        path = os.getenv('PATH', os.defpath)
        path += os.path.pathsep + os.path.pathsep.join(additional_paths)
    return which(file_name, path=path)
Пример #60
0
    def run(self):
        if force_bundled_libcapnp:
            need_build = True
        elif force_system_libcapnp:
            need_build = False
        else:
            # Try to use capnp executable to find include and lib path
            capnp_executable = shutil.which("capnp")
            if capnp_executable:
                self.include_dirs += [
                    os.path.join(os.path.dirname(capnp_executable), '..',
                                 'include')
                ]
                self.library_dirs += [
                    os.path.join(os.path.dirname(capnp_executable), '..',
                                 'lib{}'.format(8 * struct.calcsize("P")))
                ]
                self.library_dirs += [
                    os.path.join(os.path.dirname(capnp_executable), '..',
                                 'lib')
                ]

            # Look for capnproto using pkg-config (and minimum version)
            try:
                if pkgconfig.installed('capnp', '>= 0.8.0'):
                    need_build = False
                else:
                    need_build = True
            except EnvironmentError:
                # pkg-config not available in path
                need_build = True

        if need_build:
            print(
                "*WARNING* no libcapnp detected or rebuild forced. "
                "Attempting to build it from source now. "
                "If you have C++ Cap'n Proto installed, it may be out of date or is not being detected. "
                "This may take a while...")
            bundle_dir = os.path.join(_this_dir, "bundled")
            if not os.path.exists(bundle_dir):
                os.mkdir(bundle_dir)
            build_dir = os.path.join(
                _this_dir, "build{}".format(8 * struct.calcsize("P")))
            if not os.path.exists(build_dir):
                os.mkdir(build_dir)

            # Check if we've already built capnproto
            capnp_bin = os.path.join(build_dir, 'bin', 'capnp')
            if os.name == 'nt':
                capnp_bin = os.path.join(build_dir, 'bin', 'capnp.exe')

            if not os.path.exists(capnp_bin):
                # Not built, fetch and build
                fetch_libcapnp(bundle_dir, libcapnp_url)
                build_libcapnp(bundle_dir, build_dir)
            else:
                print("capnproto already built at {}".format(build_dir))

            self.include_dirs += [os.path.join(build_dir, 'include')]
            self.library_dirs += [
                os.path.join(build_dir,
                             'lib{}'.format(8 * struct.calcsize("P")))
            ]
            self.library_dirs += [os.path.join(build_dir, 'lib')]

            # Copy .capnp files from source
            src_glob = glob.glob(
                os.path.join(build_dir, 'include', 'capnp', '*.capnp'))
            dst_dir = os.path.join(self.build_lib, "capnp")
            for file in src_glob:
                print("copying {} -> {}".format(file, dst_dir))
                shutil.copy(file, dst_dir)

        return build_ext_c.run(self)