def make_bootable(self): """ Tworzenie dysku bootowalnego """ # self.uuid = re.search("UUID=\"(\w*)\"", str(sh.blkid(self.device + "1"))).group(1) # print("Device UUID:", self.uuid) # W niektórych wersjach windows katalog ten jest z drukowanej def try_move(old_file, new_file): try: sh.mv(old_file, new_file) except: print("File {} already exists, nothing to move".format(new_file)) self.boot_folder = self.destination_mount + "/boot" try_move(self.destination_mount + "/BOOT", self.boot_folder) try_move(self.destination_mount + "/BOOTMGR", self.destination_mount + "/bootmgr") # Instalownie bootloadera # grub-install --target=i386-pc --boot-directory="/<USB_mount_folder>/boot" /dev/sdX installer = sh.Command(sh.which("grub-install") or sh.which("grub2-install")) installer(self.device, target="i386-pc", skip_fs_probe=True, force=True, boot_directory=self.destination_mount + "/boot") # Tworzenie konfiguracji GRUBa with open( "{}/{}/grub.cfg".format( self.boot_folder, "grub2" if str(installer).find("grub2") != -1 else "grub") , "wt" ) as config: config.write(""" set menu_color_normal=white/black set menu_color_highlight=black/light-gray menuentry 'Install Windows' { ntldr /bootmgr } """)
def check_required_programs(): # Check that mediainfo is installed if sh.which("mediainfo") is None: print("%s: Cannot find mediainfo, please install before continuing.") % (PROG_NAME) exit(1) # Check that ffmpeg is installed if sh.which("ffmpeg") is None: print("%s: Cannot find ffmpeg. " "Please install ffmpeg version 1.0 or later.") % (PROG_NAME) out = StringIO() try: sh.ffmpeg("-encoders", _out=out) except sh.ErrorReturnCode: print("%s: unsupported version of ffmpeg installed. " "Install ffmpeg version 1.0 or higher") % PROG_NAME if "libx264" not in out.getvalue(): print( "%s: Installed version of ffmeg doesn't include libx264 support. " "Install version of ffmpeg that supports libx264." ) % PROG_NAME exit(1) config.extra_opts = ["-strict", "experimental"] config.audio_encoder = "libfaac" if "libfaac" not in out.getvalue(): config.audio_encoder = "aac"
def setUp(self): super().setUp() sops_path = sh.which('sops') gpg_path = sh.which('gpg') if (sops_path is not None) and (gpg_path is not None): self.install_gpg_key() else: self.skipTest('sops and/or pgp not installed')
def tearDown(self): super().tearDown() sops_path = sh.which('sops') gpg_path = sh.which('gpg') if (sops_path is not None) and (gpg_path is not None): self.delete_gpg_key() else: self.skipTest('sops and/or pgp not installed')
def get_virtualenv_executable(): virtualenv = None if virtualenv is None: virtualenv = sh.which('virtualenv2') if virtualenv is None: virtualenv = sh.which('virtualenv-2.7') if virtualenv is None: virtualenv = sh.which('virtualenv') return virtualenv
def __init__(self, *args, **kwargs): if self.name and sh.which(self.name): self.path = sh.which(self.name) self.pkm = getattr(sh, self.name) self.sudo_pkm = getattr(getattr(sh.contrib, "sudo"), self.name) self.is_available = True else: self.pkm = self.__manager_status__ self.sudo_pkm = self.__manager_status__
def run_mux(self): self.ensure_config() if not sh.which('tmux'): print("You need tmux for this subcommand (brew install tmux).") return if not sh.which('mux'): print("You need mux for this subcommand (gem install tmuxinator).") return roles = ROLES[:] if self.arguments["<role>"]: roles = [self.arguments["<role>"]] cfg_template = ''' name: {{cell_name}} root: ~/ windows: {{#roles}} - {{name}}: layout: tiled panes: {{#instances}} - {{ip_addr}}: - {{ssh_cmd}} - clear {{/instances}} {{/roles}} ''' cfg = { 'cell_name': self.full_cell, 'roles': [], } for role in sorted(roles): print("ROLE: {}".format(role)) print( self.backend.instances(role=role, format=self.get_ssh_ip_type())) cfg['roles'].append({ 'name': role, 'instances': [{ 'ip_addr': instance[0], 'ssh_cmd': self.ssh_cmd(instance[0]), } for instance in self.backend.instances( role=role, format=self.get_ssh_ip_type())] }) with open( os.path.join(os.path.expanduser('~/.tmuxinator'), '{}.yml'.format(self.full_cell)), 'w') as f: f.write(pystache.render(cfg_template, cfg)) f.flush() subprocess.call('mux {}'.format(self.full_cell), shell=True)
def test_command_wrapper(self): from sh import Command, which ls = Command(which("ls")) wc = Command(which("wc")) c1 = int(wc(ls("-A1"), l=True)) c2 = len(os.listdir(".")) self.assertEqual(c1, c2)
def chkcom(command): if command == "haproxy": if not sh.brew("list", command): if sh.which('brew'): output = sh.brew("install", command) return (output.exit_code) else: return ( "# ERROR: cannot find command %s and brew isn't installed, aborting" ) elif command == "ghost": if not sh.which('ghost'): output = sh.sudo("gem", "install", command) return (output.exit_code)
def _get_python_path(self): _python_paths = [ sh.which('python'), sh.which('python3'), sh.which('python2') ] python_paths = [str(path) for path in _python_paths if path] if os.path.isfile('/usr/local/python-3.6.5/bin/python'): python_paths.append('/usr/local/python-3.6.5/bin/python') if os.path.isdir('/usr/local/Cellar/python'): out = sh.find('/usr/local/Cellar/python', '-regex', '.*/bin/python3[0-9.]*$', '-type', 'f', _piped=True) out = sh.sort(out, _piped=True) python_paths.append(sh.head(out, '-n1').strip()) useable_pythons = [] python_paths_set = set() for python_path in python_paths: if python_path in python_paths_set: continue python_paths_set.add(python_path) if os.path.realpath(python_path) in python_paths_set: continue python_paths_set.add(os.path.realpath(python_path)) useable_pythons.append( (python_path, self._get_python_version(python_path))) if len(useable_pythons) == 0: print('Not found python!!') sys.exit(1) error = '' while True: message = '{}\n{}select python path [{}]: '.format( '\n'.join([ '{}. {} (v{})'.format(i, *e) for i, e in enumerate(useable_pythons) ]), error, ','.join([str(i) for i in range(len(useable_pythons))])) num = int(input(message)) if num < 0 or num >= len(useable_pythons): error = 'error: invalid input, try again!! ' continue return useable_pythons[num]
def _local_version(self, executable): if executable == "fy": output = fy(version=True).stdout.decode("UTF-8").strip() version = self._remove_prefix(output, "fycli ") elif executable == "gcloud": output = json.loads( gcloud.version(format="json").stdout.decode("UTF-8").strip()) version = output["Google Cloud SDK"] elif executable == "kubectl": output = json.loads( kubectl.version(client=True, output="json").stdout.decode("UTF-8").strip()) version = self._remove_suffix( self._remove_prefix(output["clientVersion"]["gitVersion"], "v"), "-dispatcher", ) elif executable == "terraform": # Check if terraform is a symlink or tfenv bash script.. if so then run # tfenv install first to ensure that the correct version of terraform is # installed. # # This is necessary because otherwise if the terraform version isn't # installed but tfenv is in use then the terraform version check will fail # due to stdout being filled with garbage from tfenv during terraform binary # install. terraform_path = which("terraform") if Path(terraform_path).is_symlink(): if "tfenv" in Path(terraform_path).resolve().parts: tfenv.install() elif "tfenv" in Path(terraform_path).parts: tfenv.install() output = json.loads( terraform.version(json=True).stdout.decode("UTF-8").strip()) version = output["terraform_version"] elif executable == "vault": output = vault.version().stdout.decode("UTF-8").strip() version = self._remove_prefix(self._strip_ansi_reset(output), "Vault v") elif executable == "kube-score": output = kube_score.version().stdout.decode("UTF-8").strip() version = output.split(" ")[2].rstrip(",") elif executable == "tfsec": output = tfsec(version=True).stdout.decode("UTF-8").strip() version = self._remove_prefix(output, "v") else: raise KeyError( f"Executable not found in fy lockfile: {executable}") semver = parse_version(version) return semver
def get_recipe_env(self, arch=None, with_flags_in_cc=True): env = environ.copy() env["HOSTARCH"] = arch.command_prefix env["CC"] = arch.get_clang_exe(with_target=True) env["PATH"] = ("{hostpython_dir}:{old_path}").format( hostpython_dir=self.get_recipe("host" + self.name, self.ctx).get_path_to_python(), old_path=env["PATH"], ) env["CFLAGS"] = " ".join([ "-fPIC", "-DANDROID", "-D__ANDROID_API__={}".format(self.ctx.ndk_api) ]) env["LDFLAGS"] = env.get("LDFLAGS", "") if sh.which("lld") is not None: # Note: The -L. is to fix a bug in python 3.7. # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=234409 env["LDFLAGS"] += " -L. -fuse-ld=lld" else: warning("lld not found, linking without it. " "Consider installing lld if linker errors occur.") return env
def new(cls, filename): if hasattr(settings, "LEDGER_BIN"): ledger_bin = settings.LEDGER_BIN else: ledger_bin = sh.which("ledger") return Ledger(sh.Command(ledger_bin).bake(_tty_out=False, no_color=True, file=filename), filename=filename)
def get_executable_path(executable): path = sh.which(executable) if path is None: raise Exception( "Can't find '{}' executable, is it in your $PATH?".format( executable)) return str(path)
def exists(): """ Determines whether or not ser2sock exists in our path. :returns: Whether or not ser2sock exists in the path. """ return sh.which('ser2sock') is not None
def zsh_linux(password): with sh.contrib.sudo(password=password, _with=True): apt('update', '-y') apt( 'install', '-y', 'make', 'build-essential', 'libssl-dev', 'zlib1g-dev', 'libbz2-dev', 'libreadline-dev', 'libsqlite3-dev', 'wget', 'curl', 'llvm', 'libncurses5-dev', 'libncursesw5-dev', 'xz-utils', 'tk-dev', 'libffi-dev', 'liblzma-dev', 'python-openssl', 'git', ) typer.echo("Installing zsh through apt-get") for line in apt_get("install", "zsh", _iter=True): typer.echo(line) typer.echo("Setting zsh as the default cli.") csh(which("-s", "zsh")) typer.secho("You've successfully installed zsh.", fg=colors.BRIGHT_GREEN)
def install(): with open( os.path.join(user_config_dir("autostart"), "borgmacator.desktop"), "w") as f: f.write("""[Desktop Entry] Type=Application Exec=%s Hidden=false X-GNOME-Autostart-enabled=true Name=Borgmacator """ % sh.which("borgmacator")) config = user_config_dir("borgmacator.json") if not os.path.exists(config): with open(config, "w") as f: json.dump( { "healthchecks": { "api_key": "TODO", "filter": [] }, "terminal": { "path": "gnome-terminal", "args": ["--"], "kwargs": {} }, "log_lines": 10, "update_interval": 15 }, f)
def pip(self): ctx = Context() for recipe in Recipe.list_recipes(): key = "{}.build_all".format(recipe) if key not in ctx.state: continue recipe = Recipe.get_recipe(recipe, ctx) recipe.init_with_ctx(ctx) print(ctx.site_packages_dir) if not hasattr(ctx, "site_packages_dir"): print("ERROR: python must be compiled before using pip") sys.exit(1) pip_env = { "CC": "/bin/false", "CXX": "/bin/false", "PYTHONPATH": ctx.site_packages_dir, "PYTHONOPTIMIZE": "2", "PIP_INSTALL_TARGET": ctx.site_packages_dir } print(pip_env) pip_path = sh.which("pip") args = [pip_path] + sys.argv[2:] if not pip_path: print("ERROR: pip not found") sys.exit(1) import os print("-- execute pip with: {}".format(args)) os.execve(pip_path, args, pip_env)
def parallel_blast(inputfile, outfile, ninst, db, blasttype, task, blastoptions): ''' Runs blast commands in parallel on a given fasta file :param str inputfile: Input fasta path :param str outfile: Output file path :param int ninst: number of cpus to use if not in PBS or SGE job :param str db: Database path to blast against :param str blasttype: Blast exe to use(blastn, blastx, blastp) :param str task: Blast task to run with -task option for blasttype or None if blastx/blastp :param str blastoptions: other options to pass to blast ''' if set(STATIC_BLAST_ARGS).intersection(shlex.split(blastoptions)): raise ValueError("You cannot supply any of the arguments inside of {0} as" \ " optional arguments to blast".format(STATIC_BLAST_ARGS)) args = list(PARALLEL_ARGS) args += generate_sshlogins(ninst) blast_path = sh.which(blasttype) blast_cmd = [blast_path] if blast_path is None: raise ValueError("{0} is not in your path(Maybe not installed?)".format( blasttype )) if task is not None: blast_cmd += ['-task', task] blast_cmd += ['-db', db,] blast_cmd += [blastoptions] blast_cmd += ['-query', '{}'] args += [' '.join(blast_cmd)] cmd = sh.Command('parallel') run(cmd, *args, _in=open(inputfile), _out=open(outfile,'w'))
def get_options(): """Gets the command-line options.""" parser = argparse.ArgumentParser(description=PROG_DESC) parser.add_argument( "-b", "--branch", required=True, help="Branch name of platform-complete." ) parser.add_argument( "-l", "--loglevel", choices=["debug", "info", "warning", "error", "critical"], default="info", help="Logging level. (Default: %(default)s)" ) args = parser.parse_args() loglevel = LOGLEVELS.get(args.loglevel.lower(), logging.NOTSET) logger.setLevel(loglevel) # Display a version string logger.info("Using version: %s", SCRIPT_VERSION) # Ensure that commands are available bin_extra_dirs = "/home/linuxbrew/.linuxbrew/bin" if not sh.which("jd", bin_extra_dirs): logger.critical("'jd' is not available.") sys.exit(2) return args.branch
def new_ws(cmd, args): """Create a new workspace by using the first free number > 0.""" nums = (w["num"] for w in i3.get_workspaces()) nums = filter(lambda n: n is not None and n >= 0, nums) try: exe = args[args.index("--exec")+1] except (IndexError, ValueError): exe = None i = -1 # fallback if `nums` happens to be empty for i,n in enumerate(sorted(nums)): if i != n: cmd(str(i)) break else: cmd(str(i+1)) if exe: # We use i3.exec_ here instead of sh.Command, as we do not want the # exe to be a child of this script's process # Also we get startup notification support for free :-) if sh.which(exe): # i3 exec always yields 'success' i3.exec_(exe) else: nag("Command '%s' not found!" % exe)
def get_recipe_env(self, arch=None, with_flags_in_cc=True): env = environ.copy() env['HOSTARCH'] = arch.command_prefix env['CC'] = arch.get_clang_exe(with_target=True) env['PATH'] = ('{hostpython_dir}:{old_path}').format( hostpython_dir=self.get_recipe('host' + self.name, self.ctx).get_path_to_python(), old_path=env['PATH']) env['CFLAGS'] = ' '.join([ '-fPIC', '-DANDROID', '-D__ANDROID_API__={}'.format(self.ctx.ndk_api), ]) env['LDFLAGS'] = env.get('LDFLAGS', '') if sh.which('lld') is not None: # Note: The -L. is to fix a bug in python 3.7. # https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=234409 env['LDFLAGS'] += ' -L. -fuse-ld=lld' else: warning('lld not found, linking without it. ' 'Consider installing lld if linker errors occur.') return env
def create_weights(src_grid, dest_grid, method='conserve', ignore_unmapped=False, unmasked_src=True, unmasked_dest=False): _, src_grid_scrip = tempfile.mkstemp(suffix='.nc') _, dest_grid_scrip = tempfile.mkstemp(suffix='.nc') _, regrid_weights = tempfile.mkstemp(suffix='.nc') if unmasked_src: src_grid.write_scrip(src_grid_scrip, mask=np.zeros_like(src_grid.mask_t, dtype=int)) else: src_grid.write_scrip(src_grid_scrip) if unmasked_dest: dest_grid.write_scrip(dest_grid_scrip, mask=np.zeros_like(dest_grid.mask_t, dtype=int)) else: dest_grid.write_scrip(dest_grid_scrip) if ignore_unmapped: ignore_unmapped = ['--ignore_unmapped'] else: ignore_unmapped = [] mpirun = [] if sh.which('mpirun') is not None: import multiprocessing as mp mpirun = ['mpirun', '-np', str(mp.cpu_count() // 2)] my_dir = os.path.dirname(os.path.realpath(__file__)) esmf = os.path.join(my_dir, 'contrib', 'bin', 'ESMF_RegridWeightGen') if not os.path.exists(esmf): esmf = 'ESMF_RegridWeightGen' try: cmd = mpirun + [esmf] + [ '-s', src_grid_scrip, '-d', dest_grid_scrip, '-m', method, '-w', regrid_weights ] + ignore_unmapped sp.check_output(cmd) except sp.CalledProcessError as e: print("Error: ESMF_RegridWeightGen failed ret {}".format(e.returncode), file=sys.stderr) print(e.output, file=sys.stderr) log = 'PET0.RegridWeightGen.Log' if os.path.exists(log): print('Contents of {}:'.format(log), file=sys.stderr) with open(log) as f: print(f.read(), file=sys.stderr) return None os.remove(src_grid_scrip) os.remove(dest_grid_scrip) return regrid_weights
def do_dev_update(settings, log_func): """Do a code update and install.""" log_func("DEBUG", "Will perform a Python package update") os.chdir(settings.get('me_dir')) _pip = sh.which('pip') run_command(settings, log_func, [_pip, 'install', '-U', 'pip']) run_command(settings, log_func, [_pip, 'install', '-U', '-r', 'pip-requirements.txt'])
def check_mmseqs_installation(): '''Check mmseqs installation''' print('Checking git installation...') shOut = which('git') print(shOut) if not shOut is None: from sh import git print(git('--version'))
def parallel_diamond(inputfile, outfile, ninst, db, task, diamondoptions): ''' Runs diamond commands in parallel on a given fasta file Will not run more than 1 diamond process per host as diamond utilizes threads better than blast Since diamond v0.7.9 produces a daa file, diamond view is required to output the tsv format that is similar to blast's output format. diamond view is automatically called on the produced .daa file so that GNU Parallel can combine all output into a single stream. :param str inputfile: Input fasta path :param str outfile: Output file path :param int ninst: number of threads to use if not in PBS or SGE job :param str db: Database path to blast against :param str task: blastx or blastp :param str diamondoptions: other options to pass to blast ''' if set(STATIC_DIAMOND_ARGS).intersection(shlex.split(diamondoptions)): raise ValueError("You cannot supply any of the arguments inside of {0} as" \ " optional arguments to diamond".format(STATIC_DIAMOND_ARGS)) # This seems kinda stupid that we are just replacing cpu count for each # node with 1, but it is easier than refactoring other code to be better sshlogins = generate_sshlogins(ninst) for i in range(0,len(sshlogins),2): cpu,host = sshlogins[i+1].split('/') sshlogins[i+1] = '1/{0}'.format(host) dmnd_path = sh.which('diamond') if dmnd_path is None: raise ValueError("diamond is not in your path(Maybe not installed?)") # Diamond base command arguments # parallel replaces {} with the temporary file it is using # and replaces {#} with the current file segment it is using # After diamond is finished, diamond view will be used to output the tsv # format of the file diamond_cmd = [ dmnd_path, task, '--threads', str(ninst), '--db', db, '--query', '{}', '--daa', '{}.{#}', ';', dmnd_path, 'view', '--daa', '{}.{#}.daa' ] if len(sshlogins) > 2: args = list(PARALLEL_ARGS) args += sshlogins diamond_cmd_str = ' '.join(diamond_cmd) + diamondoptions args += [diamond_cmd_str] cmd = sh.Command('parallel') run(cmd, *args, _in=open(inputfile), _out=open(outfile,'w')) else: dcmd = sh.Command('diamond') args = [task] if diamondoptions: args += shlex.split(diamondoptions) p = run( dcmd, *args, threads=ninst, db=db, query=inputfile, daa=outfile ) p = run( dcmd, 'view', daa=outfile+'.daa', _out=open(outfile,'w') )
def get_env(self): include_dirs = [ "-I{}/{}".format( self.ctx.include_dir, d.format(arch=self)) for d in self.ctx.include_dirs] env = {} ccache = sh.which('ccache') cc = sh.xcrun("-find", "-sdk", self.sdk, "clang").strip() if ccache: ccache = ccache.strip() use_ccache = environ.get("USE_CCACHE", "1") if use_ccache != '1': env["CC"] = cc else: if not self._ccsh: self._ccsh = ccsh = sh.mktemp().strip() with open(ccsh, 'w') as f: f.write('#!/bin/sh\n') f.write(ccache + ' ' + cc + ' "$@"\n') sh.chmod('+x', ccsh) else: ccsh = self._ccsh env["USE_CCACHE"] = '1' env["CCACHE"] = ccache env["CC"] = ccsh env.update({k: v for k, v in environ.items() if k.startswith('CCACHE_')}) env.setdefault('CCACHE_MAXSIZE', '10G') env.setdefault('CCACHE_HARDLINK', 'true') env.setdefault('CCACHE_SLOPPINESS', ('file_macro,time_macros,' 'include_file_mtime,include_file_ctime,file_stat_matches')) else: env["CC"] = cc env["AR"] = sh.xcrun("-find", "-sdk", self.sdk, "ar").strip() env["LD"] = sh.xcrun("-find", "-sdk", self.sdk, "ld").strip() env["OTHER_CFLAGS"] = " ".join(include_dirs) env["OTHER_LDFLAGS"] = " ".join([ "-L{}/{}".format(self.ctx.dist_dir, "lib"), ]) env["CFLAGS"] = " ".join([ "-arch", self.arch, "-pipe", "-no-cpp-precomp", "--sysroot", self.sysroot, #"-I{}/common".format(self.ctx.include_dir), #"-I{}/{}".format(self.ctx.include_dir, self.arch), "-O3", self.version_min ] + include_dirs) env["LDFLAGS"] = " ".join([ "-arch", self.arch, "--sysroot", self.sysroot, "-L{}/{}".format(self.ctx.dist_dir, "lib"), "-lsqlite3", self.version_min ]) return env
def stop_wireless(): ''' Try official ways to stop wireless such as nmcli and rfkill. These often leave the service enabled, or the service is re-enabled on boot. To do: check rmcomm piconets ''' if not sh.which('nm'): sh.aptitude('install', 'nmcli') assert sh.which('nm') if not sh.which('service'): service_path = '/usr/local/sbin/service' with open(service_path, 'w') as service_file: service_file.write(service_script_text) os.chmod(service_path, 0o755) assert sh.which('service') try: sh.nmcli('nm', 'wifi', 'off') sh.nmcli('nm', 'wwan', 'off') except: pass # rfkill block all try: #if not sh.which ('rfkill'): # sh.aptitude('install', 'rfkill') #assert sh.which ('rfkill') sh.rfkill('block', 'all') except: # some variants of linux don't have /dev/rfkill, # so there's no program rfkill pass # /etc/init.d/bluetooth stop try: sh.service(Bluetooth, 'stop') except: try: sh.service(Bluetooth+'-unused', 'stop') except: pass
def setup(self): # pipe video from youtube-dl if self.get_local: self.log.info("Locally fetching %s..." % self.url) cmd = [which("youtube-dl"), "-4"] else: self.log.info("Remotely fetching %s..." % self.url) cmd = [which("ssh"), "-C", "theaterproxy@xxx", "youtube-dl"] if self.format: cmd += ["-f", str(self.format)] cmd += ["-o", "-", self.url] p1 = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE) PipeRedirector(p1.stderr, LoggerPipe("mediajv.WebVideo.youtube-dl")).start() p1.stdin.close() self.input = p1.stdout
def zsh_windows(password): with sh.contrib.sudo(password=password, _with=True): typer.echo("Installing zsh through apt-get") for line in apt_get("install", "zsh", _iter=True): typer.echo(line) typer.echo("Setting zsh as the default cli.") csh(which("-s", "zsh")) typer.secho("You've successfully installed zsh.", fg=colors.BRIGHT_GREEN)
def sh_command(self, program): ''' return sh.Command(program), or None if program not found. ''' # sh.Command requires an absolute path program = sh.which(program) if not program: raise CliException('program not found: {}'.format(program)) return sh.Command(program)
def shell(container_id=None): container_id = container_id or work.last_container_id # use os.execv so that docker gets the tty; there's no need for the # python side to wait anyway args = ['docker'] if configuration.docker_host: args += ['-H', configuration.docker_host] args += ['exec', '-it', container_id, '/bin/bash'] os.execv(sh.which('docker'), args)
def check_git(os='Linux'): '''Check git installation''' print('Checking git installation...') from sh import which shOut = which('git') print(shOut) if not shOut is None: from sh import git return str(git('--version'))
def find_aria2c(self): for path in (self.aria2c_path, 'aria2c', 'aria2c.exe'): if path is None: continue next_path = sh.which(path) if next_path: return next_path logging.error('aria2c is missing from the current configuration!') return None
def run(self, pipeline): # scale video self.add_vf("scale", "%dx%d" % pipeline.video_dim) self.setup() for filter in self.filters: filter.apply(self) # get the input if not hasattr(self.input, 'fileno'): input_pipe = PIPE input_arg = self.input else: # assume that it's a pipe input_pipe = self.input input_arg = "-" input_args = [] if self.skip_time: input_args.append("-ss") input_args.append(self.skip_time) for input in self.extra_inputs: input_args.append("-i") input_args.append(input) input_args += ['-i', input_arg] cmd = [ which("ffmpeg"), ] + (["-re"] if self.realtime else []) + input_args + [ "-vf", ", ".join(self.video_filters), ] + (["-af", ", ".join(self.audio_filters)] if len(self.audio_filters) else []) + [ "-shortest", "-target", "film-dvd", "-q:v", "0", "-q:a", "0", "-f", "mpeg", "-loglevel", "error", "-" ] self.log.debug(" ".join(cmd)) self.proc = Popen(cmd, stdin=input_pipe, stdout=PIPE, stderr=PIPE) try: PipeRedirector(self.proc.stderr, LoggerPipe("mediajv.FFmpegSource.ffmpeg")).start() PipeRedirector(self.proc.stdout, sys.stdout.buffer).start() # wait until it dies self.proc.wait() finally: self.proc = None for file in self.temp_files: try: os.remove(file.name) except: pass
def test_execute_raises(patched_print_error, shell_instance): shell_instance._command = sh.false.bake() with pytest.raises(SystemExit) as e: shell_instance.execute() assert 1 == e.value.code false_path = sh.which('false') msg = "\n\n RAN: {}\n\n STDOUT:\n\n\n STDERR:\n".format(false_path) patched_print_error.assert_called_once_with(msg)
def test_run_command_with_debug(mocker, patched_print_debug): cmd = sh.ls.bake(_env={'ANSIBLE_FOO': 'foo', 'MOLECULE_BAR': 'bar'}) util.run_command(cmd, debug=True) x = [ mocker.call('ANSIBLE ENVIRONMENT', '---\nANSIBLE_FOO: foo\n'), mocker.call('MOLECULE ENVIRONMENT', '---\nMOLECULE_BAR: bar\n'), mocker.call('COMMAND', sh.which('ls')) ] assert x == patched_print_debug.mock_calls
def test_execute_raises(patched_print_error, shell_instance): shell_instance._command = sh.false.bake() with pytest.raises(SystemExit) as e: shell_instance.execute() assert 1 == e.value.code false_path = sh.which('false') msg = "\n\n RAN: '{}'\n\n STDOUT:\n\n\n STDERR:\n".format(false_path) patched_print_error.assert_called_once_with(msg)
def test_run_command_with_debug_handles_no_env(mocker, patched_print_debug): cmd = sh.ls.bake() util.run_command(cmd, debug=True) x = [ mocker.call('ANSIBLE ENVIRONMENT', '--- {}\n'), mocker.call('MOLECULE ENVIRONMENT', '--- {}\n'), mocker.call('COMMAND', sh.which('ls')) ] assert x == patched_print_debug.mock_calls
def check_cmake3(): """Check that cmake3 is installed.""" from sh import which shOut = which('cmake3') if not shOut is None: from sh import cmake3 version = str(cmake3('--version')) return (version, True) else: return (None, False)
def IsInstalled(self): logging.debug("def IsInstalled(self):") out = sh.which(self.digitemp_cmd_str) if out is None: logging.error('digitemp_DS2490 not found on the system, use sudo apt-get install digitemp') self._installed = False else: logging.info("Found digitemp_DS2490 in : %s" % out) self._installed = True return self._installed
def test_execute_exits_with_return_code_and_logs(patched_print_error, ansible_playbook_instance): ansible_playbook_instance._ansible = sh.false.bake() result = ansible_playbook_instance.execute() false_path = sh.which('false') msg = "\n\n RAN: {}\n\n STDOUT:\n\n\n STDERR:\n".format(false_path) patched_print_error.assert_called_once_with(msg) assert (1, None) == result
def run_test(version): py_version = "python%s" % version py_bin = sh.which(py_version) if py_bin: print("Testing %s" % py_version.capitalize()) p = subprocess.Popen([py_bin, "test.py"] + sys.argv[2:]) p.wait() else: print("Couldn't find %s, skipping" % py_version.capitalize())
def exec_expander(exefile): """ determine full path executable """ if os.path.exists(exefile): return exefile fp = sh.which(exefile).strip().split("\n") if len(fp) > 0: return fp[0] else: return exefile
def main(): if not sh.which('pip3'): print('installing pip3') sh.apt_get("install", "python3-pip", "-y") if not sh.which('pip3'): print('pip3 install failed.') return print('pip3 installed') print('installing python package') print('flask.') sh.pip3("install", "flask") print('rsa.') sh.pip3("install", "rsa") if not sh.which('supervisorctl'): print('installing supervisor') sh.apt_get("install", "supervisor", "-y") if not sh.which('supervisorctl'): print('supervisor install failed') return print('supervisor installed') ans = input('Do you want to copy files to /root? [y/N]') if ans == 'Y' or ans == 'y': print('copying files to /root/Adence') sh.cp('../Adence', '-R', '/root') print('config supervisor') cmd = r'''[program:Adence] command=python3 /root/Adence/main.py autostart=true autorestart=true stderr_logfile=/var/log/Adence.err.log stdout_logfile=/var/log/Adence.out.log ''' with open('/etc/supervisor/conf.d/Adence.conf', 'w') as fp: fp.write(cmd) sh.service('supervisor', 'restart') print('done. you can visit http://localhost:9000 now.') else: print('environment settled.you need to run the main.py manually')
def main(argv): """ `argv`: command line arguments without the name of the program (poped $0). """ parser = argparse.ArgumentParser() parser.add_argument( "-j", "--json", help="Dump as json.", default=False, action='store_true') parser.add_argument( "--pretty-json", help="Try to make json output pretty.", default=False, action='store_true') parser.add_argument( "--stdin", help="Generate a control for stdin.", default=False, action='store_true') parser.add_argument( "-x", "--extra-arguments", help="Extra argument you need to pass to the program.", default=[], action='append') parser.add_argument( "-c", "--help-command", help="What needs to be given to make the program display help.", default="--help") parser.add_argument("program") arguments = parser.parse_args(argv) help_text = get_help( arguments.program, arguments.extra_arguments, arguments.help_command) import os program = os.path.basename(arguments.program) path = os.path.abspath(arguments.program) if (not os.path.exists(path)): import sh path = sh.which(arguments.program) #print(help_text) if (arguments.json): print(dump_json( program, arguments.extra_arguments, path, help_text, arguments.stdin, arguments.pretty_json)) else: for item in parse_help(help_text, arguments.stdin): print(item)
def create_base(folder): """ Create multisite Plone hosting infrastructure on a server.. Host sites at /srv/plone or chosen cache_folder Each folder has a file called buildout.cfg which is the production buildout file for this site. This might not be a real file, but a symlink to a version controlled file under /srv/plone/xxx/src/yoursitecustomization.policy/production.cfg. Log rotate is performed using a global UNIX log rotate script: http://opensourcehacker.com/2012/08/30/autodiscovering-log-files-for-logrotate/ :param folder: Base installation folder for all the sites e.g. /srv/plone """ from sh import apt_get with sudo: # Return software we are going to need in any case # Assumes Ubuntu / Debian # More info: https://github.com/miohtama/ztanesh if (not which("zsh")) or (not which("git")) or (not which("gcc")): # Which returs zero on success print "Installing OS packages" apt_get("update") apt_get("install", "-y", *PACKAGES) # Create base folder if not os.path.exists(folder): print "Creating installation base %s" % folder install(folder, "-d") # Create nightly restart cron job if os.path.exists("/etc/cron.d"): print "(Re)setting all sites nightly restart cron job" echo(CRON_TEMPLATE, _out=CRON_JOB) create_python_env(folder)
def test_execute_exits_when_command_fails_and_exit_flag_set( mocker, patched_ansible_lint, patched_trailing, patched_ssh_config, patched_print_error, molecule_instance): patched_testinfra = mocker.patch('molecule.verifier.testinfra.Testinfra') patched_testinfra.side_effect = sh.ErrorReturnCode_1(sh.ls, None, None) v = verify.Verify({}, {}, molecule_instance) with pytest.raises(SystemExit): v.execute() ls_path = sh.which('ls') msg = ("\n\n RAN: <Command '{}'>\n\n " "STDOUT:\n<redirected>\n\n STDERR:\n<redirected>").format(ls_path) patched_print_error.assert_called_once_with(msg)
def main(args=None): parser = gitboots.argparse_setup(globals()) parser.add_argument("-e", metavar="COMMAND", default="rm -f", help="execute command on each found lock") opts = parser.parse_args(args) path = git_dir() for lock in sh.find(path, "-type", "f", "-name", "index.lock"): lock = lock.strip() if opts.e != "rm -f": if "{}" in opts.e: cmd = opts.e.replace("{}", lock) else: cmd = "{cmd} {lock}".format(cmd=opts.e, lock=lock) logger.info(cmd) if sh.which("sh"): sh.sh("-c", cmd) elif sh.which("cmd.exe"): sh.cmd("\c", cmd) else: raise ValueError("Do not know how to run this command: %s" % cmd) else: logger.info("rm -f %s", lock) os.remove(lock)
def tabix_index(in_file, preset="vcf", config=None): """ index a file using tabix """ if config: tabix_path = config["program"].get("tabix", "tabix") else: tabix_path = sh.which("tabix") tabix = sh.Command(tabix_path) out_file = in_file + ".tbi" if file_exists(out_file): return out_file tabix("-p", preset, in_file) return out_file
def install_homebrew(): """ Installs or upgrades homebrew on mac. If homebrew is not installed, this command will install it, otherwise it will update homebrew to the latest version. Additionally, it will offer to upgrade all homebrew packages. Upgrading all packages can take a long time, so the user is given the choice to skip the upgrade. """ print("Checking homebrew install") if sh.which("brew"): spinner = Halo( text="Updating homebrew", spinner="dots", placement="right" ) spinner.start() sh.brew("update") spinner.succeed() print( "Before using homebrew to install packages, we can upgrade " "any outdated packages." ) response = user_input("Run brew upgrade? [y|N] ") if response[0].lower() == "y": spinner = Halo( text="Upgrade brew packages", spinner="dots", placement="right" ) spinner.start() sh.brew("upgrade") spinner.succeed() else: print("Skipped brew package upgrades") else: # TODO (phillip): Currently, this homebrew installation does not work on a fresh # computer. It works from the command line, but not when run from the script. I # need to figure out what is going on. It could be because user input is needed. spinner = Halo( text="Installing homebrew", spinner="dots", placement="right" ) spinner.start() try: script = sh.curl("-fsSL", BREW_GITHUB).stdout sh.ruby("-e", script) spinner.succeed() except sh.ErrorReturnCode: logging.error("Unable to install homebrew. Aborting...") spinner.fail() exit(1)
def build_arch(self, arch): """ Creates expected build and symlinks system Python version. """ self.ctx.hostpython = '/usr/bin/false' # creates the sub buildir (used by other recipes) # https://github.com/kivy/python-for-android/issues/1154 sub_build_dir = join(self.get_build_dir(), 'build') shprint(sh.mkdir, '-p', sub_build_dir) python3crystax = self.get_recipe('python3crystax', self.ctx) system_python = sh.which("python" + python3crystax.version) if system_python is None: raise OSError( ('Trying to use python3crystax=={} but this Python version ' 'is not installed locally.').format(python3crystax.version)) link_dest = join(self.get_build_dir(), 'hostpython') shprint(sh.ln, '-sf', system_python, link_dest)
def get_system_info(self): self.resources['platform'] ={ 'uname': ', '.join(platform.uname()), 'machine': platform.machine(), 'system': platform.system(), 'processor': platform.processor(), 'node': platform.node()} try: from sh import which exe_checklist = ['nvcc', 'java', 'lua', 'qstat', 'squeue', 'python'] self.resources['exe'] = {} for e in exe_checklist: self.resources['exe'][e] = which(e) except Exception as e: raise self.update_system_info()
def default_shell(name): """ Sets default shell for the current user. """ spinner = Halo( text="Default shell `{}`".format(name), spinner="dots", placement="right" ) spinner.start() try: path = sh.which(name).strip() user = sh.whoami().strip() with Authentication(): sh.chsh("-s", path, user) spinner.succeed() except sh.ErrorReturnCode as err: err_message = "\n\t" + err.stderr.replace("\n", "\n\t") logging.error( "Error changing default shell to %s: %s", name, err_message ) spinner.fail()
def test_missing_tools(self, mock_urlopen): """ Test that we send the right event when the ARM tools are missing""" self._printTestHeader() # Rename the tools directory so that it can't be found tools_dir = os.path.join(root_dir, os.pardir, 'arm-cs-tools') save_tools_dir = tools_dir + ".bck" if os.path.exists(tools_dir): os.rename(tools_dir, save_tools_dir) else: save_tools_dir = None # If we can still find it, remove it from the path save_os_environ = os.environ['PATH'] paths = save_os_environ.split(':') while True: where = sh.which('arm-none-eabi-size') if where is None: break dir = os.path.split(where)[0] paths.remove(dir) os.environ['PATH'] = ":".join(paths) # Copy the desired project to temp location working_dir = self.use_project('good_c_app') with temp_chdir(working_dir): sys.argv = self.pebble_cmd_line + ['build'] retval = self.p_sh.main() # Verify that we sent missing tools event self.assert_evt(mock_urlopen, {'ec': 'install', 'ea': 'tools', 'el': 'fail: The compiler.*'}) # Restore environment if save_tools_dir is not None: os.rename(save_tools_dir, tools_dir) os.environ['PATH'] = save_os_environ