def deploy_or_upgrade_concourse( config_name: CliHint(typehint=str, help="the cfg_set to use"), deployment_name: CliHint( typehint=str, help="namespace and deployment name") = 'concourse', timeout_seconds: CliHint( typehint=int, help="how long to wait for concourse startup") = 180, dry_run: bool = True, ): '''Deploys a new concourse-instance using the given deployment name and config-directory.''' which("helm") _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=deployment_name, ) if dry_run: return setup_concourse.deploy_concourse_landscape( config_name=config_name, deployment_name=deployment_name, timeout_seconds=timeout_seconds, )
def checks(self): # Check dict directory if not os.path.exists('dict'): os.makedirs('dict') # ~ self.log.debug("Directory 'dict' created") # Check dictionary server availability DICTD_AVAILABLE = which('dictd') self.available = self.available or DICTD_AVAILABLE # ~ self.log.debug("Dictionary server available? %s", DICTD_AVAILABLE) # Check dictionary client availability DICTC_AVAILABLE = which('dict') self.available = self.available and DICTC_AVAILABLE
def init(proj, root, exists, site_packages, template, template_vars, vcs, force): print("Installing SDK...") if template_vars['kernel_path'] == None: install_kernel(os.path.join(root, ".knightos"), template_vars['platform']) shutil.move(os.path.join(root, ".knightos", "kernel-" + template_vars['platform'] + ".rom"), os.path.join(root, ".knightos", "kernel.rom")) print("Installing packages...") packages = proj.get_config("dependencies") if not packages: packages = list() else: packages = packages.split(' ') for i in template["install"]: if not i in packages: packages.append(i) if "core/kernel-headers" in packages and template_vars['kernel_path']: packages.remove("core/kernel-headers") template_vars['packages'] = cmd_install(packages, site_only=True, init=True) if len(site_packages) != 0: print("Installing site packages...") cmd_install(site_packages, site_only=True, init=True) if which('git') and vcs == "git": if not os.path.exists(os.path.join(root, ".git")): print("Initializing new git repository...") subprocess.call(["git", "init", root], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) elif which('hg') and vcs == "hg": if not os.path.exists(os.path.join(root, ".hg")): print("Initializing new hg repository...") subprocess.call(["hg", "init", root], stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT) print("Installing template...") for i in template["files"]: if not os.path.exists(os.path.join(root, i["path"])): if not exists or (exists and i["reinit"]): mode = "r" if "binary" in i and i["binary"]: mode = "rb" ofile = open(os.path.join(get_resource_root(), "templates", template["name"], i["template"]), mode) if ofile == "gitignore" and vcs != "git": pass if "binary" in i and i["binary"]: file = open(os.path.join(root, i["path"]), "wb") file.write(ofile.read()) else: file = open(os.path.join(root, i["path"]), "w") file.write(pystache.render(ofile.read(), template_vars)) # TODO: Check for software listed in template['requries'] print("All done! You can use `make help` to find out what to do next.")
def assertConnectivity(self, result): """make sure we have a running local server or network connectivity""" if self.useSocket or self.dlg.url in ('localhost', '127.0.0.1'): if not self.serverListening(): if os.name == 'nt': port = self.findFreePort() else: port = None self.startLocalServer(port) # give the server up to 5 seconds time to start for loop in range(50): if self.serverListening(): break time.sleep(0.1) elif which('qdbus'): # the state of QtDBus is unclear to me. # riverbank.computing says module dbus is deprecated # for Python 3. And Ubuntu has no package with # PyQt4.QtDBus. So we use good old subprocess. answer = subprocess.Popen(['qdbus', 'org.kde.kded', '/modules/networkstatus', 'org.kde.Solid.Networking.status'], stdout=subprocess.PIPE).communicate()[0].strip() if answer != '4': # pylint: disable=W0710 raise Failure(m18n('You have no network connectivity: %1', answer)) return result
def assertConnectivity(self, result): """make sure we have a running local server or network connectivity""" if self.useSocket or self.dlg.url in ('localhost', '127.0.0.1'): if not self.serverListening(): if os.name == 'nt': port = self.findFreePort() else: port = None self.startLocalServer(port) # give the server up to 5 seconds time to start for loop in range(50): if self.serverListening(): break time.sleep(0.1) elif which('qdbus'): # the state of QtDBus is unclear to me. # riverbank.computing says module dbus is deprecated # for Python 3. And Ubuntu has no package with # PyQt4.QtDBus. So we use good old subprocess. answer = subprocess.Popen( [ 'qdbus', 'org.kde.kded', '/modules/networkstatus', 'org.kde.Solid.Networking.status' ], stdout=subprocess.PIPE).communicate()[0].strip() if answer != '4': # pylint: disable=W0710 raise Failure( m18n('You have no network connectivity: %1', answer)) return result
def find_docker(self): """find_docker searches paths and common directores to find docker.""" location = which("docker") if location is None: raise DockerNotFoundError("Please make sure docker is installed " "and in your path") return location
def find_docker(self): """find_docker searches paths and common directores to find docker.""" # Find docker command line location location = which("docker") if location is None: raise DockerNotFoundError("Please make sure docker is installed " "and in your path") # Find docker-machine environment variables self.docker_host = os.getenv("DOCKER_HOST") self.docker_cert_path = os.getenv("DOCKER_CERT_PATH") self.docker_machine_name = os.getenv("DOCKER_MACHINE_NAME") # Look for linux docker socket file socket_path = "/var/run/docker.socket" has_docker_socket_file = os.path.exists(socket_path) if has_docker_socket_file: mode = os.stat(socket_path).st_mode isSocket = stat.S_ISSOCK(mode) if isSocket: self.docker_socket_file = "unix://" + socket_path # Sanity check docker environment to see that we either have # docker machine env vars or a running docker server with # a socket file. if not ((self.docker_host and self.docker_machine_name and self.docker_cert_path) or not (has_docker_socket_file)): raise DockerNotFoundError("Make docker server is started or env" "variables for docker-machine are set.") self.location = location return self.location
def __init__(self, exec_progA): ignoreT = { 'env' : True, 'time' : True, } cmd = None for prog in exec_progA: bare = os.path.basename(prog) if (not (bare in ignoreT)): cmd = prog break self.__execName = which(cmd) ldd = None if (self.__execName): outStr = capture(["file", self.__execName]) if (outStr.find("ASCII text") > 0): self.__execType = "script" elif (outStr.find("executable") > 0): self.__execType = "binary" ldd = capture(["ldd", self.__execName]) else: self.__execType = None info = os.stat(self.__execName) self.__modify = info.st_mtime self.__libA = self.__parseLDD(ldd) self.__hash = self.__computeHash(self.__execName)
def _echo_module(self, command, *arguments): """A function to execute module commands from a python script, and return the exit code, stdout, and stderr to the caller.""" # If the modulecmd executable wasn't found when this script was # initialized, or if MODULESHOME is no longer in the environment, look # for it in PATH. This is needed because in 3.1.6 a 'module purge' # deletes the MODULESHOME variable, so we can't always use it to find # modulecmd. try: cmd = os.environ.get('LMOD_CMD') if not cmd: cmd = '%s/bin/%s' % ( os.environ['MODULESHOME'], self.command_name) except Exception: # pylint: disable=broad-except cmd = self.command if not cmd or not os.path.exists(cmd): cmd = self.command if not which(cmd): print("Unable to load modules; no {} found.".format( self.command_name), file=sys.stderr) return '', '', '' cmdline = '%s python %s %s' % (cmd, command, ' '.join(arguments)) # Get all the output at once. subp = subprocess.Popen(cmdline, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (stdout, stderr) = subp.communicate() errcode = subp.wait() return errcode, stdout, stderr
def startServer(self, result, waiting=0): """make sure we have a running local server or network connectivity""" if self.isLocalHost: # just wait for that server to appear if self.__serverListening(): return result else: if waiting == 0: self.__startLocalServer() elif waiting > 30: logDebug( 'Game %s: Server %s not available after 30 seconds, aborting' % (SingleshotOptions.game, self)) raise CancelledError return deferLater(Internal.reactor, 1, self.startServer, result, waiting + 1) elif which('qdbus'): try: stdoutdata, stderrdata = subprocess.Popen( [ 'qdbus', 'org.kde.kded', '/modules/networkstatus', 'org.kde.Solid.Networking.status' ], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate(timeout=1) except subprocess.TimeoutExpired: raise twisted.internet.error.ConnectError() stdoutdata = stdoutdata.strip() stderrdata = stderrdata.strip() if stderrdata == '' and stdoutdata != '4': # pylint: disable=nonstandard-exception raise twisted.internet.error.ConnectError() # if we have stderrdata, qdbus probably does not provide the # service we want, so ignore it return result
def find_docker(self): """find_docker searches paths and common directores to find docker.""" # Find docker command line location location = which("docker") if location is None: raise DockerNotFoundError("Please make sure docker is installed " "and in your path") # Find docker-machine environment variables self.docker_host = os.getenv("DOCKER_HOST") self.docker_cert_path = os.getenv("DOCKER_CERT_PATH") self.docker_machine_name = os.getenv("DOCKER_MACHINE_NAME") # Look for linux docker socket file socket_path = "/var/run/docker.socket" has_docker_socket_file = os.path.exists(socket_path) if has_docker_socket_file: mode = os.stat(socket_path).st_mode isSocket = stat.S_ISSOCK(mode) if isSocket: self.docker_socket_file = "unix://"+socket_path # Sanity check docker environment to see that we either have # docker machine env vars or a running docker server with # a socket file. if not ((self.docker_host and self.docker_machine_name and self.docker_cert_path) or not (has_docker_socket_file)): raise DockerNotFoundError("Make docker server is started or env" "variables for docker-machine are set.") self.location = location return self.location
def __init__(self, exec_progA): ignoreT = { 'env' : True, 'time' : True, } cmd = None for prog in exec_progA: bare = os.path.basename(prog) if (not (bare in ignoreT)): cmd = prog break self.__execType = None self.__execName = which(cmd) self.__libA = [] if (self.__execName): outStr = capture(["file", self.__execName]) if (outStr.find("script") > 0 or outStr.find("text") > 0): self.__execType = "script" else: self.__execType = "binary" ldd = capture(["ldd", self.__execName]) self.__libA = self.__parseLDD(ldd) info = os.stat(self.__execName) self.__modify = info.st_mtime self.__hash = self.__computeHash(self.__execName)
def destroy_concourse_landscape(config_name: str, release_name: str): # Fetch concourse and kubernetes config config_factory = global_ctx().cfg_factory() config_set = config_factory.cfg_set(cfg_name=config_name) concourse_cfg = config_set.concourse() kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_config = config_factory.kubernetes(kubernetes_config_name) context = kube_ctx context.set_kubecfg(kubernetes_config.kubeconfig()) # Delete helm release helm_cmd_path = which("helm") KUBECONFIG_FILE_NAME = 'kubecfg' helm_env = os.environ.copy() helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME with tempfile.TemporaryDirectory() as temp_dir: with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f: yaml.dump(kubernetes_config.kubeconfig(), f) try: subprocess.run([helm_cmd_path, "delete", release_name, "--purge"], env=helm_env, check=True, cwd=temp_dir) except CalledProcessError: # ignore sporadic connection timeouts from infrastructure warning( "Connection to K8s cluster lost. Continue with deleting namespace {ns}" .format(ns=release_name)) # delete namespace namespace_helper = context.namespace_helper() namespace_helper.delete_namespace(namespace=release_name)
def _echo_module(self, command, *arguments): """A function to execute module commands from a python script, and return the exit code, stdout, and stderr to the caller.""" # If the modulecmd executable wasn't found when this script was # initialized, or if MODULESHOME is no longer in the environment, look # for it in PATH. This is needed because in 3.1.6 a 'module purge' # deletes the MODULESHOME variable, so we can't always use it to find # modulecmd. try: cmd = os.environ.get('LMOD_CMD') if not cmd: cmd = '%s/bin/%s' % (os.environ['MODULESHOME'], self.command_name) except Exception: # pylint: disable=broad-except cmd = self.command if not cmd or not os.path.exists(cmd): cmd = self.command if not which(cmd): print("Unable to load modules; no {} found.".format( self.command_name), file=sys.stderr) return '', '', '' cmdline = '%s python %s %s' % (cmd, command, ' '.join(arguments)) # Get all the output at once. subp = subprocess.Popen( cmdline, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) (stdout, stderr) = subp.communicate() errcode = subp.wait() return errcode, stdout, stderr
def buildSubvoice(self, oggName, side): """side is 'left' or 'right'.""" angleDirectory = os.path.join(cacheDir(), 'angleVoices', self.md5sum, side) stdName = os.path.join(self.directory, oggName) angleName = os.path.join(angleDirectory, oggName) if os.path.exists(stdName) and not os.path.exists(angleName): sox = which('sox') if not sox: return stdName if not os.path.exists(angleDirectory): os.makedirs(angleDirectory) args = [sox, stdName, angleName, 'remix'] if side == 'left': args.extend(['1,2', '0']) elif side == 'right': args.extend(['0', '1,2']) callResult = subprocess.call(args) if callResult: if Debug.sound: logDebug('failed to build subvoice %s: return code=%s' % (angleName, callResult)) return stdName if Debug.sound: logDebug('built subvoice %s' % angleName) return angleName
def ensure_helm_setup(): """Ensure up-to-date helm installation. Return the path to the found Helm executable""" helm_executable = which('helm') with open(os.devnull) as devnull: subprocess.run([helm_executable, 'repo', 'update'], check=True, stdout=devnull) return helm_executable
def ensure_helm_setup(): """Ensure that Helm is installed and its repo-list is up-to-date. Return the path to the found Helm executable""" helm_executable = util.which('helm') with open(os.devnull) as devnull: subprocess.run([helm_executable, 'repo', 'update'], check=True, stdout=devnull) return helm_executable
def _setup_command(self): """Set command for module""" command = which(self.command_name) if command: self.paths.append(os.path.dirname(command)) self.command_name = os.path.basename(self.command_name) return command
def deploy_or_upgrade_concourse( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), deployment_name: CliHint( typehint=str, help="namespace and deployment name") = 'concourse', timeout_seconds: CliHint( typehint=int, help="how long to wait for concourse startup") = 180, ): '''Deploys a new concourse-instance using the given deployment name and config-directory.''' which("helm") cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) setup_concourse.deploy_concourse_landscape( config_set=config_set, deployment_name=deployment_name, timeout_seconds=timeout_seconds, )
def __init__(self): self.binpath = util.which(self.command) if self.binpath is None: sys.stderr.write( "Could not find program '%s'. Ensure that it is on the path. Exiting.\n" % self.command) sys.exit(1) sys.stdout.write("Found '%s' at '%s'\n" % (self.name, self.binpath))
def __init__(self): """Init has no arguments. The __init__ method will attempt to find the docker command line and attempt to setup docker-py to connect to the docker client backend. The connection variables will be stored in self. """ # Find docker command line location location = which("docker") if location is None: raise DockerNotFoundError("Please make sure docker is installed " "and in your path") self.location = location # Find docker-machine environment variables self.docker_host = os.getenv("DOCKER_HOST") self.docker_cert_path = os.getenv("DOCKER_CERT_PATH") self.docker_machine_name = os.getenv("DOCKER_MACHINE_NAME") # Look for linux docker socket file socket_path = "/var/run/docker.sock" has_docker_socket_file = os.path.exists(socket_path) if has_docker_socket_file: mode = os.stat(socket_path).st_mode isSocket = stat.S_ISSOCK(mode) if isSocket: self.docker_socket_file = "unix://" + socket_path if not ((self.docker_host and self.docker_cert_path and self.docker_machine_name) or has_docker_socket_file): raise DockerNotFoundError("Couldn't find socket file or" "Environment variables for docker.") # Setup the docker client connections based on what we've found. if (self.docker_host and self.docker_machine_name and self.docker_cert_path): tls_config = tls.TLSConfig( client_cert=(os.path.join(self.docker_cert_path, 'cert.pem'), os.path.join(self.docker_cert_path, 'key.pem')), ca_cert=os.path.join(self.docker_cert_path, 'ca.pem'), verify=True, assert_hostname=False ) # # Replace tcp: with https: in docker host. docker_host_https = self.docker_host.replace("tcp", "https") self.dcli = client.scClient(base_url=docker_host_https, tls=tls_config, version="auto") self.docker_machine = True # #print self.dcli.info() elif (self.docker_socket_file): self.dcli = client.scClient(base_url=self.docker_socket_file, version="auto") # Assert dcli is not none; if self.dcli is None: raise DockerNotFoundError("Docker Client cannot find server.")
def undirected(graph, scaled=False): """ Compute the eigenvector centrality. If scaled is True, the values will be set such that the maximum is 1. The graph must be an undirected signed graph or two unsigned graphs. If there are two graphs, the first one represent the positive weights and the second one defines the negative ones. :param graph: the graph :type graph: igraph.Graph or tuple :param scaled: indicates if the centrality must be scaled :type scaled: bool :return: the eigenvector centrality :rtype: list """ matrix = get_matrix(graph).toarray() res = eigs(matrix) eigenvalues = res[0] max_eigenvalue = max(eigenvalues) max_indx = which(np.isin(eigenvalues, max_eigenvalue)) eigenvectors = res[1] # eigenvectors[:,2] is the 3rd eigenvector leading_eigenvector = np.real( eigenvectors[:, max_indx]).reshape(-1).tolist() #print(leading_eigenvector) #return np.real(max(eigenvalues)) #eigenvector = list( # Because eigs() returns a ndarray. # real( # Because the matrix is treated as a complex matrix. So, only the real part must be kept. # eigs( # Because it is a square matrix. # matrix, # The matrix. # 1, # To get only the first eigenvector. # None, # Default value. # None, # Default value. # "LR" # Because the matrix is treated as a complex matrix. # )[1] # To get only the eigenvector (the eigenvalue is not used). # ).transpose()[0]) # Because eigs() returns a column vector. centrality = leading_eigenvector #[value * (1 / norm(leading_eigenvector)) for value in leading_eigenvector] # If the norm isn't 1, it makes the result more accurate. scale = 1 # Thus, if scaled == False, the matrix won't be scaled. if scaled: # Sets the values such that the maximum is 1 scale = get_scale(centrality) if sum( centrality ) < 0: # Makes the first cluster values positive if they aren't. scale *= -1 # Values will be inverted when they will be scaled (more efficient). #if scale == 1: # If the centrality has the right signs and if it doesn't have to be scaled, it can be returned. # return centrality return [value * scale for value in centrality] # Else, return a scaled centrality.
def __init__(self, preprocessor): self.pp = preprocessor self.token = "xsd2html" self.pp.register_plugin(self) if (sys.platform == "win32"): self.msxsl = self.pp.toolpath("plugins/xsd2html/msxsl/msxsl.exe") else: self.msxsl = util.which("xsltproc") self.xs3p = self.pp.toolpath("plugins/xsd2html/msxsl/xs3p.xsl")
def find_exec(ignoreT, argT, cmdArg, argA): N = len(argA) i = 0 while (i < N): arg = argA[i] if (arg == cmdArg): return which(find_cmd(ignoreT, 0, argA[i + 1].split())) n = argT.get(arg, -1) if (n > 0): i += n + 1 continue if (arg[0:1] == "-"): i = i + 1 continue break return which(find_cmd(ignoreT, i, argA))
def find_exec(ignoreT, argT, cmdArg, argA): N = len(argA) i = 0 while (i < N): arg = argA[i] if (arg == cmdArg): return which(find_cmd(ignoreT, 0, argA[i+1].split())) n = argT.get(arg,-1) if (n > 0): i += n + 1 continue if (arg[0:1] == "-"): i = i + 1 continue break return which(find_cmd(ignoreT, i, argA))
def _module_setup(self, name=None): """Set up modules for the Sierra nightly processes.""" if not name: name = 'python' # Look for the modules init files for python. # Look for a modulecmd script that we can execute on this machine, # and thatis paired with an init/python script. modules_init_file = None if os.environ.get('LMOD_CMD') and name == 'python': self.command = os.environ.get('LMOD_CMD') else: self.command = os.environ.get('LMOD_CMD', find_first_binary("modulecmd", self.paths, '-V')) if self.command: # Look for the executable module init file relative to the # command binary. fname = os.path.join('init', name) grandparent_dir = os.path.dirname( os.path.dirname(self.command)) modules_init_file = which(fname, grandparent_dir) if not modules_init_file: modules_init_file = which(fname + '.py', grandparent_dir) if not modules_init_file: # If we didn't find it relative to the command binary, look # for it in the standard paths. Again, this is a hack for # Modules/3.1.6 modules_init_file = find_file_in_list(fname, self.paths) if not modules_init_file: modules_init_file = find_file_in_list( fname + '.py', self.paths) if not modules_init_file: raise RuntimeError('Unable to find modules ' 'init/python file in {}.'. format(' '.join(self.paths))) # We need to set the MODULESHOME environment variable if # it is not yet set. if modules_init_file and 'MODULESHOME' not in os.environ: os.environ['MODULESHOME'] = os.path.dirname( os.path.dirname(modules_init_file)) return modules_init_file
def findOgg(): """sets __hasogg to True or False""" if Sound.__hasogg is None: oggName = r'c:\vorbis\oggdec.exe' if os.name == 'nt' else 'ogg123' if not which(oggName): Sound.enabled = False # checks again at next reenable logWarning(m18n('No voices will be heard because the program %1 is missing', oggName)) return Sound.__hasogg = True return Sound.__hasogg
def gc(self): with self.lock: if which('git'): repo_dir = self.repo.path try: log.info("starting gc on repo %s" % repo_dir) subprocess.check_call("git gc --auto", cwd=repo_dir, shell=True) log.info("finished gc on repo %s" % repo_dir) self.repo = Repo(self.repo.path) except subprocess.CalledProcessError: log.exception("git gc failed for repo %s" % repo_dir)
def test_which_ExecutableNotFound(self): platform = util.getPlatform() if type(platform) == util.WindowsPlatform: exe = "bogus_executable_name.exe" elif type(platform) == util.UnixPlatform: exe = "bogus_executable_name" else: log.warn("Skipping test. Platform not supported") return exepath = util.which(exe) self.assertTrue(exepath is None)
def executable_sanity_checks (executables): """ Check for availability of executables specified in the list of strings EXECUTABLES. """ lg.debug(util) for executable_spec in executables: if not util.which(executable_spec): msg = json1.json_msg_executable_not_accessible(executable_spec) lg.error(msg) lg.info(json1.json_last_log_msg()) sys.exit(msg)
def _module_setup(self, name=None): """Set up modules for the Sierra nightly processes.""" if not name: name = 'python' # Look for the modules init files for python. # Look for a modulecmd script that we can execute on this machine, # and thatis paired with an init/python script. modules_init_file = None if os.environ.get('LMOD_CMD') and name == 'python': self.command = os.environ.get('LMOD_CMD') else: self.command = os.environ.get( 'LMOD_CMD', find_first_binary("modulecmd", self.paths, '-V')) if self.command: # Look for the executable module init file relative to the # command binary. fname = os.path.join('init', name) grandparent_dir = os.path.dirname(os.path.dirname( self.command)) modules_init_file = which(fname, grandparent_dir) if not modules_init_file: modules_init_file = which(fname + '.py', grandparent_dir) if not modules_init_file: # If we didn't find it relative to the command binary, look # for it in the standard paths. Again, this is a hack for # Modules/3.1.6 modules_init_file = find_file_in_list(fname, self.paths) if not modules_init_file: modules_init_file = find_file_in_list( fname + '.py', self.paths) if not modules_init_file: raise RuntimeError('Unable to find modules ' 'init/python file in {}.'.format( ' '.join(self.paths))) # We need to set the MODULESHOME environment variable if # it is not yet set. if modules_init_file and 'MODULESHOME' not in os.environ: os.environ['MODULESHOME'] = os.path.dirname( os.path.dirname(modules_init_file)) return modules_init_file
def test_which_ExecutableFound(self): platform = util.getPlatform() if type(platform) == util.WindowsPlatform: exe = "cmd.exe" elif type(platform) == util.UnixPlatform: exe = "true" else: log.warn("Skipping test. Platform not supported") return exepath = util.which(exe) log.debug('which found %s' % exepath) self.assertFalse(exepath is None)
def find_exec(ignoreT, argT, cmdArg, argA, *n, **kw): N = len(argA) if ('dot' in kw): os.environ['PATH'] = os.environ['PATH'] + ":." i = 0 while (i < N): arg = argA[i] if (arg == cmdArg): return which(find_cmd(ignoreT, 0, argA[i+1].split())) n = argT.get(arg,-1) if (n > 0): i += n + 1 continue if (arg[0:1] == "-"): i = i + 1 continue break return which(find_cmd(ignoreT, i, argA))
def findOgg(): """sets __hasogg to True or False""" if Sound.__hasogg is None: oggName = r'c:\vorbis\oggdec.exe' if os.name == 'nt' else 'ogg123' if not which(oggName): Sound.enabled = False # checks again at next reenable logWarning( m18n( 'No voices will be heard because the program %1 is missing', oggName)) return Sound.__hasogg = True return Sound.__hasogg
def jobdaemon(self): try: return self._config.get(self._section, 'jobdaemon') except ConfigParser.NoOptionError: # search it on the shell's $PATH jobdaemon = which('tm_jobdaemon.py') if jobdaemon is None: raise LookupError( "No value specified for configuration option `[{0}]{1}`," " and cannot find `tm_jobdaemon.py` on the shell search PATH." .format(self._section, 'jobdaemon')) # remember it for next invocation self._config.set(self._section, 'jobdaemon', jobdaemon) return self._config.get(self._section, 'jobdaemon')
def deploy_or_upgrade_concourse( config_name: CliHint(typehint=str, help="Which of the configurations contained in --config-dir to use."), deployment_name: CliHint(typehint=str, help="Name under which Concourse will be deployed. Will also be the identifier of the namespace into which it is deployed.")='concourse', timeout_seconds: CliHint(typehint=int, help="Maximum time (in seconds) to wait after deploying for the Concourse-webserver to become available.")=180, dry_run: bool=True, ): '''Deploys a new concourse-instance using the given deployment name and config-directory.''' which("helm") namespace = deployment_name _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=deployment_name, ) if dry_run: return setup.deploy_concourse_landscape( config_name=config_name, deployment_name=deployment_name, timeout_seconds=timeout_seconds, )
def astrometry_net_version(): """ Return the Astrometry.net version as a tuple, e.g. (0, 78).""" # For example: "Revision 0.78, date Mon_Apr_22_12:25:30_2019_-0400." PATTERN = "^Revision (\d\.\d{1,2}), date.*" emsg = "{!r}' not found in the current environment" if not util.which(ASTROMETRY_COMMAND): raise AstrometryNetNotInstalled(emsg.format(ASTROMETRY_COMMAND)) args = [ASTROMETRY_COMMAND, '--help'] output = subprocess.check_output(args) version = re.search(PATTERN, output, re.MULTILINE).group(1) # From, for example, '0.78' to (0, 78) return tuple(int(x) for x in version.split('.'))
def aplay_sound(self, snd=None): if snd: cmd_arr = [binaries.aplay, snd] else: cmd_arr = [util.which('aplay'), self.state.wavefile] # p = Popen(cmd_arr, stdin=PIPE, stdout=PIPE, stderr=PIPE) p = Popen(cmd_arr, stdout=PIPE, stderr=PIPE) output, err = p.communicate() rc = p.returncode if rc != 0: raise Exception("\n".join([ "Error running command: {}".format(" ".join(cmd_arr)), "Return code: {}".format(rc), "STDERR:\n{}".format(err), "STDOUT:\n{}".format(output), ])) return [output, err, rc]
def fetch_tools(): tmp = PROJ_ROOT / 'tmp' logger.info('Creating temporary directory at %s', tmp) try: os.mkdir(tmp) except OSError: pass # Downloading download_if_not_exist(CLOSURE_ZIP_URL, tmp / CLOSURE_ZIP) download_if_not_exist(JS_COMPILER_ZIP_URL, tmp / JS_COMPILER_ZIP) download_if_not_exist(SOY_ZIP_URL, tmp / SOY_ZIP) download_if_not_exist(CSS_COMPILER_URL, tmp / CSS_COMPILER) # Installing if not util.which('scss'): logger.info('Installing scss') subprocss.call(maybe_add_sudo(['gem', 'install', 'scss'])) with ZipFile(tmp / CLOSURE_ZIP) as f: logger.info('Extracting closure library') f.extractall(tmp) with ZipFile(tmp / JS_COMPILER_ZIP) as f: logger.info('Extracting closure compiler') f.extractall(tmp) with ZipFile(tmp / SOY_ZIP) as f: logger.info('Extracting closure template compiler') f.extractall(tmp) logger.info('Copying closure library into project tree') shutil.rmtree(PROJ_ROOT / 'third_party', True) shutil.copytree(tmp / 'third_party', PROJ_ROOT / 'third_party') shutil.rmtree(PROJ_ROOT / 'js' / 'goog', True) shutil.copytree(tmp / 'closure' / 'goog', PROJ_ROOT / 'js' / 'goog') soyutilroot = PROJ_ROOT / 'js' / 'goog' / 'soyutil' try: os.makedirs(soyutilroot) except OSError: pass for filename in glob(tmp / 'soy*.js'): shutil.copy2(filename, soyutilroot) logger.info(util.colorize('Done', 'green'))
def init_settings_file(filepath): critical_error_if_file_exists(filepath) if util.which('ninja') is None: logging.warning('cannot find ninja') varlist = { 'project_name': os.path.basename(os.getcwd()), 'sourcedir': 'source', 'cxx': get_compiler() } template = util.get_resource('defaults/configure.yaml') for key, value in varlist.items(): template = re.sub(r'(%%%s%%)' % key, value, template) with open(filepath, 'w+') as fd: fd.write(template)
def build_css(): logger.info('Cleaning up css generated from sass') scss_glob = PROJ_ROOT / 'css' / '*.scss' for name in glob(scss_glob): name = name.replace('.scss', '.css') try: os.unlink(name) except OSError: pass logger.info('Building up css from sass') subprocess.call(['ruby', util.which('sass'), '--update'] + glob(scss_glob)) logger.info('Building up css from gss') gss_glob = PROJ_ROOT / 'css' / '*.gss' for name in glob(gss_glob): subprocess.call(['java', '-jar', PROJ_ROOT / 'tmp' / CSS_COMPILER, '--pretty-print', name, '--output-file', name.replace('.gss', '-gen.css')]) logger.info(util.colorize('Done', 'green'))
def run(self, pacman): if os.path.isfile(util.PM_LOCK): print "\tERROR: another pacman session is on-going -- skipping" return print "==> Running test" vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: print "WARNING: fakeroot not found!" else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: print "WARNING: fakechroot not found!" else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend(["libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--suppressions=%s" % suppfile]) cmd.extend([pacman["bin"], "--config", os.path.join(self.root, util.PACCONF), "--root", self.root, "--dbpath", os.path.join(self.root, util.PM_DBPATH), "--cachedir", os.path.join(self.root, util.PM_CACHEDIR)]) if not pacman["manual-confirm"]: cmd.append("--noconfirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["valgrind"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): print "\tERROR: %s not removed" % util.PM_LOCK os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): print "\tERROR: pacman dumped a core file"
if handle in self.runningSounds: #Instead of using a lock lets just catch the error is someone else got there first. try: del self.runningSounds[handle] except KeyError: pass def isPlaying(self,channel = "PRIMARY"): "Return true if a sound is playing on channel" try: return self.runningSounds[channel].isPlaying() except KeyError: return False #See if we have mpg123 installed at all and if not use the dummy driver. if util.which('mpg123'): backend = Mpg123Wrapper() else: backend = SoundWrapper() #Make fake module functions mapping to the bound methods. playSound = backend.playSound stopSound = backend.stopSound isPlaying = backend.isPlaying stopAllSounds = backend.stopAllSounds
def test_which(): assert which('ls') == '/bin/ls' assert which('THIS-IS-SPARTA') is None
def _check_prince(): if not which('prince'): return False return True
def __init__(self): self.binpath = util.which(self.command) if self.binpath is None: sys.stderr.write("Could not find program '%s'. Ensure that it is on the path. Exiting.\n" % self.command) sys.exit(1) sys.stdout.write("Found '%s' at '%s'\n" % (self.name, self.binpath))
import string import random import subprocess from util import which DEVNULL = open(os.devnull, 'wb') INLINE = 'mode-inline' DISPLAY = 'mode-display' MARKER_STYLE = 'fill:rgb(72.898865%,85.499573%,33.299255%);fill-opacity:1;' _keep_numbers_regex = re.compile("[^\d\.]") # Check dependencies if not which("pdf2svg"): print("Please install pdf2svg (apt-get/yum/brew install pdf2svg)") sys.exit() if not which("xelatex"): print("Please install xelatex and the stanalone style (apt-get install texlive-xelatex & texlive-latex-extra)") sys.exit() if not which("pdfcrop"): print("Could not find the pdfcrop binary.") sys.exit() def render(equation, mode=INLINE, font_family="Helvetica Neue", preamble=[], font_size=12, x_height=None):
def _check_pdfreactor(): if not which('pdfreactor'): return False return True
def _check_calibre(): if not which('ebook-convert'): return False return True
scs=ScreenSaver(pid,projectsdir,savedir) if idle: d_args_d=('-I','-i','--current-session','--idle','--in') nargv=[] bSkipNext=False for arg in sys.argv: if arg in d_args_d: bSkipNext=True elif bSkipNext: bSkipNext=False else: if not arg.startswith('logpipe'): nargv.append(arg) nargv[0]=util.which('screen-session')[0] scscall=nargv.pop(0) scscall+=' '+nargv.pop(0) for arg in nargv: scscall+=" "+arg scscall+=" --in "+input command='exec sh -c \"screen '+scscall+' > /dev/null\"' scs.idle(idle,command) out(':idle %s %s'%(idle,command)) return 0 if not scs.exists(): out('No such session: %s'%pid) doexit(1,waitfor)
def run(self, pacman): if os.path.isfile(util.PM_LOCK): tap.bail("\tERROR: another pacman session is on-going -- skipping") return tap.diag("==> Running test") vprint("\tpacman %s" % self.args) cmd = [] if os.geteuid() != 0: fakeroot = util.which("fakeroot") if not fakeroot: tap.diag("WARNING: fakeroot not found!") else: cmd.append("fakeroot") fakechroot = util.which("fakechroot") if not fakechroot: tap.diag("WARNING: fakechroot not found!") else: cmd.append("fakechroot") if pacman["gdb"]: cmd.extend(["libtool", "execute", "gdb", "--args"]) if pacman["valgrind"]: suppfile = os.path.join(os.path.dirname(__file__), '..', '..', 'valgrind.supp') cmd.extend(["libtool", "execute", "valgrind", "-q", "--tool=memcheck", "--leak-check=full", "--show-reachable=yes", "--gen-suppressions=all", "--child-silent-after-fork=yes", "--log-file=%s" % os.path.join(self.root, "var/log/valgrind"), "--suppressions=%s" % suppfile]) self.addrule("FILE_EMPTY=var/log/valgrind") # replace program name with absolute path prog = pacman["bin"] if not prog: prog = util.which(self.cmd[0], pacman["bindir"]) if not prog or not os.access(prog, os.X_OK): if not prog: tap.bail("could not locate '%s' binary" % (self.cmd[0])) return cmd.append(os.path.abspath(prog)) cmd.extend(self.cmd[1:]) if pacman["manual-confirm"]: cmd.append("--confirm") if pacman["debug"]: cmd.append("--debug=%s" % pacman["debug"]) cmd.extend(shlex.split(self.args)) if not (pacman["gdb"] or pacman["nolog"]): output = open(os.path.join(self.root, util.LOGFILE), 'w') else: output = None vprint("\trunning: %s" % " ".join(cmd)) # Change to the tmp dir before running pacman, so that local package # archives are made available more easily. time_start = time.time() self.retcode = subprocess.call(cmd, stdout=output, stderr=output, cwd=os.path.join(self.root, util.TMPDIR), env={'LC_ALL': 'C'}) time_end = time.time() vprint("\ttime elapsed: %.2fs" % (time_end - time_start)) if output: output.close() vprint("\tretcode = %s" % self.retcode) # Check if the lock is still there if os.path.isfile(util.PM_LOCK): tap.diag("\tERROR: %s not removed" % util.PM_LOCK) os.unlink(util.PM_LOCK) # Look for a core file if os.path.isfile(os.path.join(self.root, util.TMPDIR, "core")): tap.diag("\tERROR: pacman dumped a core file")