def get_disabled(): ''' Return the disabled services CLI Example: .. code-block:: bash salt '*' service.get_disabled ''' if has_powershell(): cmd = 'Get-WmiObject win32_service | where {$_.startmode -ne "Auto"} | select-object name' lines = __salt__['cmd.run'](cmd, shell='POWERSHELL').splitlines() return sorted([line.strip() for line in lines[3:]]) else: ret = set() services = [] cmd = list2cmdline(['sc', 'query', 'type=', 'service', 'state=', 'all', 'bufsize=', str(BUFFSIZE)]) lines = __salt__['cmd.run'](cmd).splitlines() for line in lines: if 'SERVICE_NAME:' in line: comps = line.split(':', 1) if not len(comps) > 1: continue services.append(comps[1].strip()) for service in services: cmd2 = list2cmdline(['sc', 'qc', service]) lines = __salt__['cmd.run'](cmd2).splitlines() for line in lines: if 'DEMAND_START' in line: ret.add(service) elif 'DISABLED' in line: ret.add(service) return sorted(ret)
def run_tophat2_paired(project_dir, sample, index_basename, fastq_r1, fastq_r2, logger): '''Run tophat2 in paired-end mode for fastq files. ''' logger.info('***Running tophat2 on paired-end reads; aligned to ref %s' % index_basename) filepath = os.path.join(project_dir, sample) args = [ '/Applications/tophat-2.1.0.OSX_x86_64/tophat2', #temp path for testing #'/home/seqproc/tophat2/' '--num-threads','10', '--mate-inner-dist','200', '--max-multihits' ,'1', '--splice-mismatches', '1', index_basename, os.path.join(filepath, fastq_r1), os.path.join(filepath, fastq_r2) ] print subprocess.list2cmdline(args) top2_process = subprocess.call(args) if not top2_process: #return code of 0 is success logger.info( '***Bowtie2 alignment completed successfully for %s' % filepath ) else: logger.info( '***Error in bowtie2 alignment. Return code: %d' % top2_process )
def run_pgdump(dbname, output_stream, connection_params, format='custom', env=None): """Run pg_dump for the given database and write to the specified output stream. :param db: database name :type db: str :param output_stream: a file-like object - must have a fileno attribute that is a real, open file descriptor """ args = [ 'pg_dump' ] + connection_params + [ '--format', format, dbname ] LOG.info('%s > %s', subprocess.list2cmdline(args), output_stream.name) stderr = tempfile.TemporaryFile() returncode = subprocess.call(args, stdout=output_stream, stderr=stderr, env=env, close_fds=True) stderr.flush() stderr.seek(0) for line in stderr: LOG.error('%s', line.rstrip()) stderr.close() if returncode != 0: raise OSError("%s failed." % subprocess.list2cmdline(args))
def run_pgdump(dbname, output_stream, connection_params, out_format="custom", env=None): """Run pg_dump for the given database and write to the specified output stream. :param db: database name :type db: str :param output_stream: a file-like object - must have a fileno attribute that is a real, open file descriptor """ args = ["pg_dump"] + connection_params + ["--format", out_format, dbname] LOG.info("%s > %s", subprocess.list2cmdline(args), output_stream.name) stderr = tempfile.TemporaryFile() try: try: returncode = subprocess.call( args, stdout=output_stream, stderr=stderr, env=env, close_fds=True ) except OSError as exc: raise PgError("Failed to execute '%s': [%d] %s" % (args[0], exc.errno, exc.strerror)) stderr.flush() stderr.seek(0) for line in stderr: LOG.error("%s", line.rstrip()) finally: stderr.close() if returncode != 0: raise PgError("%s failed." % subprocess.list2cmdline(args))
def cmd( cmd, args = [], raw = False, show = False ): """ executes a command returning the output as a string """ # prepend the base command args.insert( 0, cmd ) # check for a raw command (no interpreted arguments) if raw == True: cmd_args = ' '.join( args ) if show == True: print cmd_args # normal command (list of arguments) else: cmd_args = args if show == True: print subprocess.list2cmdline( cmd_args ) # call the command expecting to see string output output = subprocess.check_output( cmd_args, stderr = subprocess.STDOUT, shell = True, universal_newlines = True ) # return the output of the command return output.strip()
def etl2csv(xperf_path, etl_filename, debug=False): """ Convert etl_filename to etl_filename.csv (temp file) which is the .csv representation of the .etl file Etlparser will read this .csv and parse the information we care about into the final output. This is done to keep things simple and to preserve resources on talos machines (large files == high memory + cpu) """ xperf_cmd = [xperf_path, '-merge', '%s.user' % etl_filename, '%s.kernel' % etl_filename, etl_filename] if debug: print("executing '%s'" % subprocess.list2cmdline(xperf_cmd)) subprocess.call(xperf_cmd) csv_filename = '%s.csv' % etl_filename xperf_cmd = [xperf_path, '-i', etl_filename, '-o', csv_filename] if debug: print("executing '%s'" % subprocess.list2cmdline(xperf_cmd)) subprocess.call(xperf_cmd) return csv_filename
def main(sys_args): sys_args, jython_opts = decode_args(sys_args) args, jython_args = parse_launcher_args(sys_args) jython_command = JythonCommand(args, jython_opts + jython_args) command = jython_command.command if args.profile and not args.help: try: os.unlink("profile.txt") except OSError: pass if args.print_requested and not args.help: if jython_command.uname == "windows": print subprocess.list2cmdline(jython_command.command) else: print " ".join(pipes.quote(arg) for arg in jython_command.command) else: if not (is_windows or not hasattr(os, "execvp") or args.help or jython_command.uname == "cygwin"): # Replace this process with the java process. # # NB such replacements actually do not work under Windows, # but if tried, they also fail very badly by hanging. # So don't even try! os.execvp(command[0], command[1:]) else: result = 1 try: result = subprocess.call(command) if args.help: print_help() except KeyboardInterrupt: pass sys.exit(result)
def run(cls, cmd, *args, **argd): extraflags = argd.get('extraflags', []) if type(extraflags) not in (list, tuple): extraflags = [extraflags] cmd = [g_drive_bin] + [cmd] + list(extraflags) + list(args) #print '$', if argd.get('input') is not None: if re.match(r'^[\x32-\x79\n]+$', argd.get('input')): print 'echo "%s" |' % argd.get('input'), else: print 'echo ... |', print subprocess.list2cmdline(cmd) try: cwd = os.getcwd() os.chdir(g_testdir) if argd.get('input') is None: p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: os.chdir(cwd) out, err = p.communicate(argd.get('input')) return p.returncode, out, err
def _do_mock_command(self, func, mock_target, command, cwd=None, env=None, **kwargs): """Internal helper for preparing commands to run under mock. Used by run_mock_command and get_mock_output_from_command.""" cmd = ['mock_mozilla', '-r', mock_target, '-q'] if cwd: cmd += ['--cwd', cwd] cmd += ['--unpriv', '--shell'] if not isinstance(command, basestring): command = subprocess.list2cmdline(command) # XXX - Hack - gets around AB_CD=%(locale)s type arguments command = command.replace("(", "\\(") command = command.replace(")", "\\)") if env: env_cmd = ['/usr/bin/env'] for key, value in env.items(): # $HOME won't work inside the mock chroot if key == 'HOME': continue value = value.replace(";", "\\;") env_cmd += ['%s=%s' % (key, value)] cmd.append(subprocess.list2cmdline(env_cmd) + " " + command) else: cmd.append(command) return func(cmd, cwd=cwd, **kwargs)
def exec_commands(cmds): ''' Exec commands in parallel in multiple process (as much as we have CPU) ''' if not cmds: return # empty list def done(p): return p.poll() is not None def success(p): return p.returncode == 0 def fail(): sys.exit(1) max_task = cpu_count() processes = [] while True: while cmds and len(processes) < max_task: task = cmds.pop() print list2cmdline(task) processes.append(Popen(task)) for p in processes: if done(p): if success(p): processes.remove(p) else: fail() if not processes and not cmds: break else: time.sleep(0.05)
def backup(self): if self.dry_run: return if not os.path.exists(self.config['tar']['directory']) \ or not os.path.isdir(self.config['tar']['directory']): raise BackupError('{0} is not a directory!'.format(self.config['tar']['directory'])) out_name = "{0}.tar".format( self.config['tar']['directory'].lstrip('/').replace('/', '_')) outfile = os.path.join(self.target_directory, out_name) args = ['tar', 'c', self.config['tar']['directory']] errlog = TemporaryFile() stream = self._open_stream(outfile, 'w') LOG.info("Executing: %s", list2cmdline(args)) pid = Popen( args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True) status = pid.wait() try: errlog.flush() errlog.seek(0) for line in errlog: LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip()) finally: errlog.close() if status != 0: raise BackupError('tar failed (status={0})'.format(status))
def matlab_despike_command(func): import os import subprocess # make sure your nifti is unzipped cur_dir = os.getcwd() matlab_command = ['matlab', '-nodesktop' , '-nosplash', '-r "WaveletDespike(\'%s\',\'%s/rest_dn\', \'wavelet\', \'d4\', \'LimitRAM\', 17) ; quit;"' %(func, cur_dir)] print '' print 'Running matlab through python...........Bitch Please....' print '' print subprocess.list2cmdline(matlab_command) print '' subprocess.call(matlab_command) spike_percent = [os.path.join(cur_dir,i) for i in os.listdir(cur_dir) if 'SP' in i][0] noise_img = [os.path.join(cur_dir,i) for i in os.listdir(cur_dir) if 'noise' in i][0] despiked_img = [os.path.join(cur_dir,i) for i in os.listdir(cur_dir) if 'wds' in i][0] return despiked_img, noise_img, spike_percent
def _exec_command_line( command, prefix ): """ Executes a command (from a string), allowing all output to pass to STDOUT. """ # split the arguments arguments = shlex.split( command ) # attempt to detect generic commands (not a relative command) proc = arguments[ 0 ] # this is probably a generic/system command if proc[ 0 ] != '.': check = _which( proc ) if check is None: raise RuntimeError( 'Unable to locate "{}" in path.'.format( proc ) ) arguments[ 0 ] = check # this needs to be executed relative to the prefix else: arguments[ 0 ] = prefix + arguments[ 1 : ] # print the statement we're about to execute print subprocess.list2cmdline( arguments ) # attempt to execute the requested command result = subprocess.call( arguments ) # return the result of the command return result
def dryrun(self, binary_xtrabackup): """Perform test backup""" from subprocess import Popen, list2cmdline, PIPE, STDOUT xb_cfg = self.config["xtrabackup"] args = util.build_xb_args( xb_cfg, self.target_directory, self.defaults_path, binary_xtrabackup ) LOG.info("* xtrabackup command: %s", list2cmdline(args)) args = ["xtrabackup", "--defaults-file=" + self.defaults_path, "--help"] cmdline = list2cmdline(args) LOG.info("* Verifying generated config '%s'", self.defaults_path) LOG.debug("* Verifying via command: %s", cmdline) try: process = Popen(args, stdout=PIPE, stderr=STDOUT, close_fds=True) except OSError: raise BackupError("Failed to find xtrabackup binary") stdout = process.stdout.read() process.wait() # Note: xtrabackup --help will exit with 1 usually if process.returncode != 1: LOG.error("! %s failed. Output follows below.", cmdline) for line in stdout.splitlines(): LOG.error("! %s", line) raise BackupError("%s exited with failure status [%d]" % (cmdline, process.returncode))
def Config_and_build_own(self): ''' Build all the own libraries that are used for SaRoMaN ''' #digi_ND #run configure and autogen in that context. command = self.exec_base+'/digi_ND/autogen.sh' print command subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/digi_ND') subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/digi_ND') command = self.exec_base+'/digi_ND/configure' print command subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/digi_ND') subprocess.call('make', shell=True, cwd = self.exec_base+'/digi_ND') #mind_rec #run configure and autogen in that context. command = self.exec_base+'/mind_rec/autogen.sh' print command subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/mind_rec') subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/mind_rec') command = self.exec_base+'/mind_rec/configure' print command subprocess.call('bash %s' %command, shell=True, cwd = self.exec_base+'/mind_rec') subprocess.call('make', shell=True, cwd = self.exec_base+'/mind_rec') #sciNDG4 command = [self.third_party_support+'/bin/scons'] print subprocess.list2cmdline(command) subprocess.call(command, cwd = self.exec_base+'/sciNDG4', env=os.environ)
def _exec_wrapper(self, type, args, root=None, arch=None, outputlogger=None, timeout=None, ignoreerrors=False, interactive=False, quiet=False, ignorestderr=False, remount=False): assert not (interactive and outputlogger) basecmd = self.jurtrootcmd[:] basecmd.extend(("--type", type)) basecmd.extend(("--target", self.targetname)) if timeout is not None: basecmd.extend(("--timeout", str(timeout))) if root is not None: basecmd.extend(("--root", root)) if remount: basecmd.append("--remount") if arch is not None: basecmd.extend(("--arch", arch)) if ignoreerrors: basecmd.append("--ignore-errors") if quiet: basecmd.append("--quiet") if ignorestderr: basecmd.append("--ignore-stderr") basecmd.extend(args) if interactive: fullcmd = self.sucmd[:] fullcmd.extend(basecmd) cmdline = subprocess.list2cmdline(fullcmd) proc = subprocess.Popen(args=fullcmd, shell=False, bufsize=-1) proc.wait() returncode = proc.returncode output = "(interactive command, no output)" else: cmdline = subprocess.list2cmdline(basecmd) if outputlogger and not quiet: outputlogger.write(">>>> running privilleged agent: %s\n" % (cmdline)) outputlogger.flush() if not self.agentrunning: self.start() logger.debug("sending command to agent: %s", cmdline) self.agentproc.stdin.write(cmdline + "\n") self.agentproc.stdin.flush() if outputlogger: targetfile = outputlogger else: targetfile = StringIO() returncode = self._collect_from_agent(targetfile, outputlogger) if outputlogger: output = "(error in log available in log files)" else: output = targetfile.getvalue() # check for error: if returncode != 0: if timeout is not None and returncode == 124: # command timeout raise CommandTimeout, ("command timed out:\n%s\n" % (cmdline)) raise CommandError(returncode, cmdline, output) return output
def enable(self): proxies = ('%s=%s' % (_, self.__get_proxy(_)) for _ in self.__env_names) print(list2cmdline(['export'] + list(proxies))) aliases = ('%s=%s' % (_, self.__get_proxy('GIT_SSH')) for _ in self.__ssh_aliases) print(list2cmdline(['alias'] + list(aliases)))
def DamnSpawner(cmd, shell=False, stderr=None, stdout=None, stdin=None, cwd=None, bufsize=0): if cwd is None: cwd = DV.curdir cwd = DamnUnicode(cwd) if type(cmd) in (type(''), type(u'')): cmd = DamnUnicode(cmd) else: for i in range(len(cmd)): cmd[i] = DamnUnicode(cmd[i]) if DV.os == 'nt': import win32process if type(cmd) in (type([]), type(())): tempcmd = [] for i in cmd: tempcmd.append(DamnUnicode(i).encode('windows-1252')) Damnlog('Spawning subprocess on NT:', tempcmd) Damnlog('Actual command:', subprocess.list2cmdline(tempcmd)) return subprocess.Popen(tempcmd, shell=shell, creationflags=win32process.CREATE_NO_WINDOW, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd.encode('windows-1252'), executable=None, bufsize=bufsize) # Yes, ALL std's must be PIPEd, otherwise it doesn't work on win32 (see http://www.py2exe.org/index.cgi/Py2ExeSubprocessInteractions) else: Damnlog('Spawning subprocess on NT:', cmd) Damnlog('Actual command:', subprocess.list2cmdline(cmd)) return subprocess.Popen(cmd.encode('windows-1252'), shell=shell, creationflags=win32process.CREATE_NO_WINDOW, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, cwd=cwd.encode('windows-1252'), executable=None, bufsize=bufsize) else: Damnlog('Spawning subprocess on UNIX:', cmd) Damnlog('Actual command:', subprocess.list2cmdline(cmd)) return subprocess.Popen(cmd, shell=shell, stderr=stderr, stdout=stdout, stdin=stdin, cwd=cwd, executable=None, bufsize=bufsize)
def run_task(task_name, task, config): if task is None: return target = task["target"] positional_args = parse_list_arg( task.get("args", {}).get("positional", []), config, config.get("mipmip.arg_policy." + task_name, {}).get("positional", {}), False, ) joined_named_args = parse_dict_arg( task.get("args", {}).get("named", {}), config, config.get("mipmip.arg_policy." + task_name, {}).get("named", {}), False, ) stdout_pipe = None for artifact_name, artifact in task.get("artifacts", {}).viewitems(): if artifact["type"] == "stdout": stdout_pipe = subprocess.PIPE args = [target] + positional_args + joined_named_args print subprocess.list2cmdline(args) p = subprocess.Popen(args, stdout=stdout_pipe) returncode = p.wait() if returncode == 0: for artifact_name, artifact in task.get("artifacts", {}).viewitems(): if artifact["type"] == "stdout": config["mipmip.artifacts." + task_name + "." + artifact_name] = p.stdout.read() return p.returncode
def lvsnapshot(orig_lv_path, snapshot_name, snapshot_extents, chunksize=None): """Create a snapshot of an existing logical volume :param snapshot_lv_name: name of the snapshot :param orig_lv_path: path to the logical volume being snapshotted :param snapshot_extents: size to allocate to snapshot volume in extents :param chunksize: (optional) chunksize of the snapshot volume """ lvcreate_args = [ "lvcreate", "--snapshot", "--name", snapshot_name, "--extents", "%d" % snapshot_extents, orig_lv_path, ] if chunksize: lvcreate_args.insert(-1, "--chunksize") lvcreate_args.insert(-1, chunksize) LOG.debug("%s", list2cmdline(lvcreate_args)) process = Popen(lvcreate_args, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid, close_fds=True) stdout, stderr = process.communicate() for line in stdout.splitlines(): if not line: continue LOG.debug("lvcreate: %s", line) if process.returncode != 0: raise LVMCommandError(list2cmdline(lvcreate_args), process.returncode, str(stderr).strip())
def main(): if (len(sys.argv) != 2): print "Usage: ./submit_problem.py problem_file" exit(1) if (str.find(sys.argv[1], '-') == -1): print "Solution file format: id-infostring.txt" exit(1) solution_file = sys.argv[1] if (str.find(solution_file, '/') != -1): id_start_idx = len(solution_file) - 1 - str.index(solution_file[::-1], '/') else: id_start_idx = 0 id_end_idx = str.index(solution_file, '-') problem_id = solution_file[id_start_idx+1:id_end_idx] cmd_list = [ "curl", "--compressed", "-L", "-H", "Expect:", "-H", "X-API-Key: " + API_KEY, "-F", "problem_id=" + problem_id, "-F", "solution_spec=@" + solution_file, SUBMIT_ENDPOINT ] print subprocess.list2cmdline(cmd_list) out_json = subprocess.check_output(cmd_list) print out_json
def ffmpeg_encode(threads=1): cmd = ['ffmpeg', '-y', '-vcodec', 'ppm','-r','23.97', '-f', 'image2pipe','-i', '-'] cmd.extend(['-vcodec', 'libx264','-pix_fmt','yuv420p', '-profile', 'baseline','-vb','15M','-crf', '16']) cmd.extend([os.path.expanduser('~/out.mov')]) print subprocess.list2cmdline(cmd) p = None pool = Pool(threads) #with ThreadPoolExecutor(max_workers=threads) as e: for result in pool.imap(rotate,xrange(360)): if p is None: p = subprocess.Popen(cmd,stdin=subprocess.PIPE) p.stdin.write(result) p.stdin.flush() p.stdin.close() p.wait() pool.close() pool.join()
def spawnProcess(self, cmdList): if not self.dryRun: self.makeGroupNote('Spawning a process with command ' + subprocess.list2cmdline(cmdList) + ' at ' + os.getcwd()) return subprocess.Popen(cmdList) else: self.makeGroupNote('Doing a dry run. Would spawn a process with command ' + subprocess.list2cmdline(cmdList) + ' at ' + os.getcwd()) return None
def run_sync_get_output( args, cwd=None, env=None, stdout=None, stderr=None, none_on_error=False, log_success=False, log_initiation=False ): if log_initiation: log("running: %s" % (subprocess.list2cmdline(args),)) env_copy = _get_env_for_cwd(cwd, env) f = open("/dev/null", "r") if stderr is None: stderr_target = sys.stderr else: stderr_target = stderr proc = subprocess.Popen( args, stdin=f, stdout=subprocess.PIPE, stderr=stderr_target, close_fds=True, cwd=cwd, env=env_copy ) f.close() output = proc.communicate()[0].strip() if proc.returncode != 0 and not none_on_error: logfn = fatal elif log_success: logfn = log else: logfn = None if logfn is not None: logfn( "cmd '%s' (cwd=%s) exited with code %d, %d bytes of output" % (subprocess.list2cmdline(args), cwd, proc.returncode, len(output)) ) if proc.returncode == 0: return output return None
def backup(self): """ Create backup """ if self.dry_run: return if not os.path.exists(self.config["tar"]["directory"]) or not os.path.isdir( self.config["tar"]["directory"] ): raise BackupError("{0} is not a directory!".format(self.config["tar"]["directory"])) out_name = "{0}.tar".format(self.config["tar"]["directory"].lstrip("/").replace("/", "_")) outfile = os.path.join(self.target_directory, out_name) args = ["tar", "c", self.config["tar"]["directory"]] errlog = TemporaryFile() stream = open_stream(outfile, "w", **self.config["compression"]) LOG.info("Executing: %s", list2cmdline(args)) pid = Popen(args, stdout=stream.fileno(), stderr=errlog.fileno(), close_fds=True) status = pid.wait() try: errlog.flush() errlog.seek(0) for line in errlog: LOG.error("%s[%d]: %s", list2cmdline(args), pid.pid, line.rstrip()) finally: errlog.close() if status != 0: raise BackupError("tar failed (status={0})".format(status))
def run_shell(*args, **kw): check_for_interrupt = kw.get('check_for_interrupt', False) fail_silently = kw.get('fail_silently', False) command_log_level = kw.get("command_log_level", logging.DEBUG) filter = kw.get("filter", False) state = RunnerState() state.done = False state.output = StringIO.StringIO() state.proc = None state.error = None def runner(): try: state.proc = lib.PopenWithoutNewConsole(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=kw.get('env')) for line in iter(state.proc.stdout.readline, ''): if not filter or filter(line): state.output.write(line) LOG.log(command_log_level, line.rstrip('\r\n')) state.done = True except Exception as e: state.done = True state.error = e LOG.debug('Running: {cmd}'.format(cmd=subprocess.list2cmdline(args))) if check_for_interrupt: try: call = lib.current_call() runner_thread = threading.Thread(target=runner) runner_thread.daemon = True runner_thread.start() while not state.done: time.sleep(1) call.assert_not_interrupted() finally: # if interrupted, kill child process if state.proc and not state.done: lib.progressive_kill(state.proc.pid) else: runner() if state.error: raise state.error if state.proc.wait() != 0: if fail_silently: LOG.debug('Failed to run %s, but was told to carry on anyway' % subprocess.list2cmdline(args)) else: raise ShellError( message="Failed when running {command}".format(command=args[0]), output=state.output.getvalue() ) return state.output.getvalue()
def run_cmd(cmd, sudo=False): if sudo: cmd = ['/usr/bin/sudo'] + cmd print subprocess.list2cmdline(cmd) p = subprocess.Popen(cmd) out, err = p.communicate() if p.returncode: raise Exception(err)
def Print_outdata_file(self,filename,command): ''' Print the command to stdout then print call output to file filename. ''' print subprocess.list2cmdline(command) outfile = open(filename,'w+') subprocess.call(command, stdout=outfile) outfile.close()
def RunCmd(cmd, dryRun): print subprocess.list2cmdline(cmd) sys.stdout.flush() if not dryRun: status = subprocess.call(cmd) if status != 0: print "Build failed." sys.exit()
def __init__(self, args, origin): from .event import post_event Signal.__init__(self) self.origin = origin if env.flags["no_op_print"]: print subprocess.list2cmdline(args) post_event(self.emit, self)
def run_command(self, command_list, filename, count, log_function, raise_on_failure=None): """Run command_list count times. Parameters ---------- command_list : list A list of args to provide to Popen. Each element of this list will be interpolated with the filename to convert. filename : unicode The name of the file to convert. count : int How many times to run the command. raise_on_failure: Exception class (default None) If provided, will raise the given exception for if an instead of returning False on command failure. Returns ------- success : bool A boolean indicating if the command was successful (True) or failed (False). """ command = [c.format(filename=filename) for c in command_list] # On windows with python 2.x there is a bug in subprocess.Popen and # unicode commands are not supported if sys.platform == 'win32' and sys.version_info < (3, 0): #We must use cp1252 encoding for calling subprocess.Popen #Note that sys.stdin.encoding and encoding.DEFAULT_ENCODING # could be different (cp437 in case of dos console) command = [c.encode('cp1252') for c in command] # This will throw a clearer error if the command is not found cmd = which(command_list[0]) if cmd is None: link = "https://nbconvert.readthedocs.io/en/latest/install.html#installing-tex" raise OSError( "{formatter} not found on PATH, if you have not installed " "{formatter} you may need to do so. Find further instructions " "at {link}.".format(formatter=command_list[0], link=link)) times = 'time' if count == 1 else 'times' self.log.info("Running %s %i %s: %s", command_list[0], count, times, command) shell = (sys.platform == 'win32') if shell: command = subprocess.list2cmdline(command) env = os.environ.copy() prepend_to_env_search_path('TEXINPUTS', self.texinputs, env) prepend_to_env_search_path('BIBINPUTS', self.texinputs, env) prepend_to_env_search_path('BSTINPUTS', self.texinputs, env) with open(os.devnull, 'rb') as null: stdout = subprocess.PIPE if not self.verbose else None for index in range(count): p = subprocess.Popen(command, stdout=stdout, stderr=subprocess.STDOUT, stdin=null, shell=shell, env=env) out, _ = p.communicate() if p.returncode: if self.verbose: # verbose means I didn't capture stdout with PIPE, # so it's already been displayed and `out` is None. out = u'' else: out = out.decode('utf-8', 'replace') log_function(command, out) self._captured_output.append(out) if raise_on_failure: raise raise_on_failure( 'Failed to run "{command}" command:\n{output}'. format(command=command, output=out)) return False # failure return True # success
def main(): options, prefix, requested_paths, excluded_paths = parse_args() if options.js_shell is not None and not (isfile( options.js_shell) and os.access(options.js_shell, os.X_OK)): if (platform.system() != "Windows" or isfile(options.js_shell) or not isfile(options.js_shell + ".exe") or not os.access(options.js_shell + ".exe", os.X_OK)): print("Could not find executable shell: " + options.js_shell) return 1 test_count, test_gen = load_tests(options, requested_paths, excluded_paths) test_environment = get_environment_overlay(options.js_shell) if test_count == 0: print("no tests selected") return 1 test_dir = dirname(abspath(__file__)) if options.debug: if test_count > 1: print("Multiple tests match command line arguments," " debugger can only run one") for tc in test_gen: print(" {}".format(tc.path)) return 2 cmd = next(test_gen).get_command(prefix) if options.show_cmd: print(list2cmdline(cmd)) with changedir(test_dir), change_env(test_environment): call(cmd) return 0 # The test_gen generator is converted into a list in # run_all_tests. Go ahead and do it here so we can apply # chunking. # # If chunking is enabled, determine which tests are part of this chunk. # This code was adapted from testing/mochitest/runtestsremote.py. if options.total_chunks > 1: tests_per_chunk = math.ceil(test_count / float(options.total_chunks)) start = int(round((options.this_chunk - 1) * tests_per_chunk)) end = int(round(options.this_chunk * tests_per_chunk)) test_gen = list(test_gen)[start:end] if options.remote: results = ResultsSink("jstests", options, test_count) try: from lib.remote import init_remote_dir, init_device device = init_device(options) jtd_tests = posixpath.join(options.remote_test_root, "tests", "tests") init_remote_dir(device, jtd_tests) device.push(test_dir, jtd_tests, timeout=600) device.chmod(jtd_tests, recursive=True) prefix[0] = options.js_shell for test in test_gen: out = run_test_remote(test, device, prefix, options) results.push(out) results.finish(True) except KeyboardInterrupt: results.finish(False) return 0 if results.all_passed() else 1 with changedir(test_dir), change_env(test_environment): results = ResultsSink("jstests", options, test_count) try: for out in run_all_tests(test_gen, prefix, results.pb, options): results.push(out) results.finish(True) except KeyboardInterrupt: results.finish(False) return 0 if results.all_passed() else 1 return 0
def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info path = dir + '/' + rest i = path.find('/', len(dir) + 1) while i >= 0: nextdir = path[:i] nextrest = path[i + 1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest i = path.find('/', len(dir) + 1) else: break # find an explicit query string, if present. rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(404, "No such CGI script (%r)" % scriptname) return if not os.path.isfile(scriptfile): self.send_error(403, "CGI script is not a plain file (%r)" % scriptname) return ispy = self.is_python(scriptname) if not ispy: if not (self.have_fork or self.have_popen2 or self.have_popen3): self.send_error( 403, "CGI script is not a Python script (%r)" % scriptname) return if not self.is_executable(scriptfile): self.send_error( 403, "CGI script is not executable (%r)" % scriptname) return # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query host = self.address_string() if host != self.client_address[0]: env['REMOTE_HOST'] = host env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.getheader("authorization") if authorization: authorization = authorization.split() if len(authorization) == 2: import base64, binascii env['AUTH_TYPE'] = authorization[0] if authorization[0].lower() == "basic": try: authorization = base64.decodestring(authorization[1]) except binascii.Error: pass else: authorization = authorization.split(':') if len(authorization) == 2: env['REMOTE_USER'] = authorization[0] # XXX REMOTE_IDENT if self.headers.typeheader is None: env['CONTENT_TYPE'] = self.headers.type else: env['CONTENT_TYPE'] = self.headers.typeheader length = self.headers.getheader('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.getheader('referer') if referer: env['HTTP_REFERER'] = referer accept = [] for line in self.headers.getallmatchingheaders('accept'): if line[:1] in "\t\n\r ": accept.append(line.strip()) else: accept = accept + line[7:].split(',') env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.getheader('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.getheaders('cookie')) if co: env['HTTP_COOKIE'] = ', '.join(co) # XXX Other HTTP_* headers # Since we're setting the env in the parent, provide empty # values to override previously set values for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, "") self.send_response(200, "Script output follows") decoded_query = query.replace('+', ' ') if self.have_fork: # Unix -- fork as we should args = [script] if '=' not in decoded_query: args.append(decoded_query) nobody = nobody_uid() self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) # throw away additional data [see bug #427345] while select.select([self.rfile], [], [], 0)[0]: if not self.rfile.read(1): break if sts: self.log_error("CGI script exit status %#x", sts) return # Child try: try: os.setuid(nobody) except os.error: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: # Non Unix - use subprocess import subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith("w.exe"): # On Windows, use python.exe, not pythonw.exe interp = interp[:-5] + interp[-4:] cmdline = [interp, '-u'] + cmdline if '=' not in query: cmdline.append(query) self.log_message("command: %s", subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if self.command.lower() == "post" and nbytes > 0: data = self.rfile.read(nbytes) else: data = None # throw away additional data [see bug #427345] while select.select([self.rfile._sock], [], [], 0)[0]: if not self.rfile._sock.recv(1): break stdout, stderr = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error("CGI script exit status %#x", status) else: self.log_message("CGI script exited OK")
def run_command(self, command_list, filename, count, log_function): """Run command_list count times. Parameters ---------- command_list : list A list of args to provide to Popen. Each element of this list will be interpolated with the filename to convert. filename : unicode The name of the file to convert. count : int How many times to run the command. Returns ------- success : bool A boolean indicating if the command was successful (True) or failed (False). """ command = [c.format(filename=filename) for c in command_list] # On windows with python 2.x there is a bug in subprocess.Popen and # unicode commands are not supported if sys.platform == 'win32' and sys.version_info < (3, 0): #We must use cp1252 encoding for calling subprocess.Popen #Note that sys.stdin.encoding and encoding.DEFAULT_ENCODING # could be different (cp437 in case of dos console) command = [c.encode('cp1252') for c in command] # This will throw a clearer error if the command is not found cmd = which(command_list[0]) if cmd is None: raise OSError("%s not found on PATH" % command_list[0]) times = 'time' if count == 1 else 'times' self.log.info("Running %s %i %s: %s", command_list[0], count, times, command) shell = (sys.platform == 'win32') if shell: command = subprocess.list2cmdline(command) env = os.environ.copy() env['TEXINPUTS'] = os.pathsep.join([ cast_bytes_py2(self.texinputs), env.get('TEXINPUTS', ''), ]) with open(os.devnull, 'rb') as null: stdout = subprocess.PIPE if not self.verbose else None for index in range(count): p = subprocess.Popen(command, stdout=stdout, stderr=subprocess.STDOUT, stdin=null, shell=shell, env=env) out, _ = p.communicate() if p.returncode: if self.verbose: # verbose means I didn't capture stdout with PIPE, # so it's already been displayed and `out` is None. out = u'' else: out = out.decode('utf-8', 'replace') log_function(command, out) self._captured_output.append(out) return False # failure return True # success
def _get_popen_args(self): return subprocess.list2cmdline(self._to_args_list())
def cmdline(self): return subprocess.list2cmdline( self.bake(self._check_cmd(), self.parameters, self.arguments))
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in [ 'none', 'emtpy' ]: # E501 cmds.extend(['-n', self.namespace]) rval = {} results = '' err = None if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format( subprocess.list2cmdline(cmds), ex) rval = { "returncode": returncode, "results": results, "cmd": ' '.join(cmds) } if returncode == 0: if output: if output_type == 'json': try: rval['results'] = json.loads(stdout) except ValueError as err: if "No JSON object could be decoded" in err.args: err = err.args elif output_type == 'raw': rval['results'] = stdout if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if err: rval.update({ "err": err, "stderr": stderr, "stdout": stdout, "cmd": cmds }) else: rval.update({"stderr": stderr, "stdout": stdout, "results": {}}) return rval
def __str__(self): return subprocess.list2cmdline(self.visible_cmd)
def scan_parents_of(config_cache, hosts, silent=False, settings=None): # type: (config.ConfigCache, List[HostName], bool, Optional[Dict[str, int]]) -> Gateways if settings is None: settings = {} if config.monitoring_host: nagios_ip = ip_lookup.lookup_ipv4_address(config.monitoring_host) else: nagios_ip = None os.putenv("LANG", "") os.putenv("LC_ALL", "") # Start processes in parallel procs = [ ] # type: List[Tuple[HostName, Optional[HostAddress], Union[str, subprocess.Popen]]] for host in hosts: console.verbose("%s " % host) try: ip = ip_lookup.lookup_ipv4_address(host) if ip is None: raise RuntimeError() command = [ "traceroute", "-w", "%d" % settings.get("timeout", 8), "-q", "%d" % settings.get("probes", 2), "-m", "%d" % settings.get("max_ttl", 10), "-n", ip ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) procs.append((host, ip, subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, encoding="utf-8"))) except Exception as e: if cmk.utils.debug.enabled(): raise procs.append((host, None, "ERROR: %s" % e)) # Output marks with status of each single scan def dot(color, dot='o'): # type: (str, str) -> None if not silent: out.output(tty.bold + color + dot + tty.normal) # Now all run and we begin to read the answers. For each host # we add a triple to gateways: the gateway, a scan state and a diagnostic output gateways = [] # type: Gateways for host, ip, proc_or_error in procs: if isinstance(proc_or_error, six.string_types): lines = [proc_or_error] exitstatus = 1 else: exitstatus = proc_or_error.wait() if proc_or_error.stdout is None: raise RuntimeError() lines = [l.strip() for l in proc_or_error.stdout.readlines()] if exitstatus: dot(tty.red, '*') gateways.append( (None, "failed", 0, "Traceroute failed with exit code %d" % (exitstatus & 255))) continue if len(lines) == 1 and lines[0].startswith("ERROR:"): message = lines[0][6:].strip() console.verbose("%s: %s\n", host, message, stream=sys.stderr) dot(tty.red, "D") gateways.append((None, "dnserror", 0, message)) continue if len(lines) == 0: if cmk.utils.debug.enabled(): raise MKGeneralException( "Cannot execute %s. Is traceroute installed? Are you root?" % command) dot(tty.red, '!') continue if len(lines) < 2: if not silent: console.error("%s: %s\n" % (host, ' '.join(lines))) gateways.append((None, "garbled", 0, "The output of traceroute seem truncated:\n%s" % ("".join(lines)))) dot(tty.blue) continue # Parse output of traceroute: # traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 40 byte packets # 1 * * * # 2 10.0.0.254 0.417 ms 0.459 ms 0.670 ms # 3 172.16.0.254 0.967 ms 1.031 ms 1.544 ms # 4 217.0.116.201 23.118 ms 25.153 ms 26.959 ms # 5 217.0.76.134 32.103 ms 32.491 ms 32.337 ms # 6 217.239.41.106 32.856 ms 35.279 ms 36.170 ms # 7 74.125.50.149 45.068 ms 44.991 ms * # 8 * 66.249.94.86 41.052 ms 66.249.94.88 40.795 ms # 9 209.85.248.59 43.739 ms 41.106 ms 216.239.46.240 43.208 ms # 10 216.239.48.53 45.608 ms 47.121 ms 64.233.174.29 43.126 ms # 11 209.85.255.245 49.265 ms 40.470 ms 39.870 ms # 12 8.8.8.8 28.339 ms 28.566 ms 28.791 ms routes = [] # type: List[Optional[str]] for line in lines[1:]: parts = line.split() route = parts[1] if route.count('.') == 3: routes.append(route) elif route == '*': routes.append(None) # No answer from this router else: if not silent: console.error( "%s: invalid output line from traceroute: '%s'\n" % (host, line)) if len(routes) == 0: error = "incomplete output from traceroute. No routes found." console.error("%s: %s\n" % (host, error)) gateways.append((None, "garbled", 0, error)) dot(tty.red) continue # Only one entry -> host is directly reachable and gets nagios as parent - # if nagios is not the parent itself. Problem here: How can we determine # if the host in question is the monitoring host? The user must configure # this in monitoring_host. if len(routes) == 1: if ip == nagios_ip: gateways.append( (None, "root", 0, "")) # We are the root-monitoring host dot(tty.white, 'N') elif config.monitoring_host: gateways.append(((config.monitoring_host, nagios_ip, None), "direct", 0, "")) dot(tty.cyan, 'L') else: gateways.append((None, "direct", 0, "")) continue # Try far most route which is not identical with host itself ping_probes = settings.get("ping_probes", 5) skipped_gateways = 0 this_route = None # type: Optional[HostAddress] for r in routes[::-1]: if not r or (r == ip): continue # Do (optional) PING check in order to determine if that # gateway can be monitored via the standard host check if ping_probes: if not gateway_reachable_via_ping(r, ping_probes): console.verbose("(not using %s, not reachable)\n", r, stream=sys.stderr) skipped_gateways += 1 continue this_route = r break if not this_route: error = "No usable routing information" if not silent: console.error("%s: %s\n" % (host, error)) gateways.append((None, "notfound", 0, error)) dot(tty.blue) continue # TTLs already have been filtered out) gateway_ip = this_route gateway = _ip_to_hostname(config_cache, this_route) if gateway: console.verbose("%s(%s) ", gateway, gateway_ip) else: console.verbose("%s ", gateway_ip) # Try to find DNS name of host via reverse DNS lookup dns_name = _ip_to_dnsname(gateway_ip) gateways.append( ((gateway, gateway_ip, dns_name), "gateway", skipped_gateways, "")) dot(tty.green, 'G') return gateways
def main(): usage = "usage: %prog [options] -- command line" description = """ The %%prog laucnhes given command line an parse output in the following format: 1. Text format (default): mandatory fields: tag: $STR; score: $FLOAT; tag: $STR; score: $FLOAT; ... optional fields: time: $FLOAT; metrics: $STR; loops: $INT; cmdline: $STR; less_better: true; errors: $INT; warnings: $INT; group: $STR; category: $STR; 2. JSON format (-j) [ { /* Mandatory fields */ "name": "string", "score": float, /* Optional fields */ "time": float, /* seconds */ "metrics": "string", "loops": int, "cmdline": "string", "less_better": true, "errors": int, "warnings": int, "group": "string", "category": "string" }, { ... } ] """ op = OptionParser(description=description, usage=usage, formatter=formatter()) op.add_option("-v", "--verbose", action="store_true", help="enable verbose mode") op.add_option("-j", "--json", action="store_true", help="treat input data as json") op.add_option("-f", "--file", help="get results from file, not from command line") suite = ptSuite() suite.addOptions(op) opts, args = op.parse_args() loglevel = logging.DEBUG if opts.verbose else logging.INFO logging.basicConfig( level=loglevel, format="%(asctime)s - %(module)17s - %(levelname).3s - %(message)s", datefmt='%H:%M:%S') suite.handleOptions(opts) if not opts.file and len(args) == 0: op.print_usage() print("error: command line is required") sys.exit(-1) run(suite, opts.json, opts.file, subprocess.list2cmdline(args))
def check_mk_local_automation(command: str, args: Optional[Sequence[str]] = None, indata: Any = "", stdin_data: Optional[str] = None, timeout: Optional[int] = None) -> Any: if args is None: args = [] new_args = [ensure_str(a) for a in args] if stdin_data is None: stdin_data = repr(indata) if timeout: new_args = ["--timeout", "%d" % timeout] + new_args cmd = ['check_mk', '--automation', command] + new_args if command in ['restart', 'reload']: call_hook_pre_activate_changes() cmd = [ensure_str(a) for a in cmd] try: # This debug output makes problems when doing bulk inventory, because # it garbles the non-HTML response output # if config.debug: # html.write("<div class=message>Running <tt>%s</tt></div>\n" % subprocess.list2cmdline(cmd)) auto_logger.info("RUN: %s" % subprocess.list2cmdline(cmd)) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, encoding="utf-8") except Exception as e: raise _local_automation_failure(command=command, cmdline=cmd, exc=e) assert p.stdin is not None assert p.stdout is not None assert p.stderr is not None auto_logger.info("STDIN: %r" % stdin_data) p.stdin.write(stdin_data) p.stdin.close() outdata = p.stdout.read() exitcode = p.wait() auto_logger.info("FINISHED: %d" % exitcode) auto_logger.debug("OUTPUT: %r" % outdata) errdata = p.stderr.read() if errdata: auto_logger.warning("'%s' returned '%s'" % (" ".join(cmd), errdata)) if exitcode != 0: auto_logger.error("Error running %r (exit code %d)" % (subprocess.list2cmdline(cmd), exitcode)) raise _local_automation_failure(command=command, cmdline=cmd, code=exitcode, out=outdata, err=errdata) # On successful "restart" command execute the activate changes hook if command in ['restart', 'reload']: call_hook_activate_changes() try: return ast.literal_eval(outdata) except SyntaxError as e: raise _local_automation_failure(command=command, cmdline=cmd, out=outdata, exc=e)
def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--clang-format-executable', metavar='EXECUTABLE', help='path to the clang-format executable', default='clang-format') parser.add_argument( '--extensions', help='comma separated list of file extensions (default: {})'.format( DEFAULT_EXTENSIONS), default=DEFAULT_EXTENSIONS) parser.add_argument('-r', '--recursive', action='store_true', help='run recursively over directories') parser.add_argument('files', metavar='file', nargs='+') parser.add_argument('-q', '--quiet', action='store_true', help="disable output, useful for the exit code") parser.add_argument('-j', metavar='N', type=int, default=0, help='run N clang-format jobs in parallel' ' (default number of cpus + 1)') parser.add_argument('--color', default='auto', choices=['auto', 'always', 'never'], help='show colored diff (default: auto)') parser.add_argument( '-e', '--exclude', metavar='PATTERN', action='append', default=[], help='exclude paths matching the given glob-like pattern(s)' ' from recursive search') parser.add_argument('--style', help='Formatting style to use (default: file)', default='file') args = parser.parse_args() # use default signal handling, like diff return SIGINT value on ^C # https://bugs.python.org/issue14229#msg156446 signal.signal(signal.SIGINT, signal.SIG_DFL) try: signal.SIGPIPE except AttributeError: # compatibility, SIGPIPE does not exist on Windows pass else: signal.signal(signal.SIGPIPE, signal.SIG_DFL) colored_stdout = False colored_stderr = False if args.color == 'always': colored_stdout = True colored_stderr = True elif args.color == 'auto': colored_stdout = sys.stdout.isatty() colored_stderr = sys.stderr.isatty() version_invocation = [args.clang_format_executable, str("--version")] try: subprocess.check_call(version_invocation, stdout=DEVNULL) except subprocess.CalledProcessError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) return ExitStatus.TROUBLE except OSError as e: print_trouble( parser.prog, "Command '{}' failed to start: {}".format( subprocess.list2cmdline(version_invocation), e), use_colors=colored_stderr, ) return ExitStatus.TROUBLE retcode = ExitStatus.SUCCESS excludes = excludes_from_file(DEFAULT_CLANG_FORMAT_IGNORE) excludes.extend(args.exclude) files = list_files(args.files, recursive=args.recursive, exclude=excludes, extensions=args.extensions.split(',')) if not files: print_trouble(parser.prog, 'No files found', use_colors=colored_stderr) return ExitStatus.TROUBLE if not args.quiet: print('Processing %s files: %s' % (len(files), ', '.join(files))) njobs = args.j if njobs == 0: njobs = multiprocessing.cpu_count() + 1 njobs = min(len(files), njobs) if njobs == 1: # execute directly instead of in a pool, # less overhead, simpler stacktraces it = (run_clang_format_diff_wrapper(args, file) for file in files) pool = None else: pool = multiprocessing.Pool(njobs) it = pool.imap_unordered(partial(run_clang_format_diff_wrapper, args), files) while True: try: outs, errs = next(it) except StopIteration: break except DiffError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) retcode = ExitStatus.TROUBLE sys.stderr.writelines(e.errs) except UnexpectedError as e: print_trouble(parser.prog, str(e), use_colors=colored_stderr) sys.stderr.write(e.formatted_traceback) retcode = ExitStatus.TROUBLE # stop at the first unexpected error, # something could be very wrong, # don't process all files unnecessarily if pool: pool.terminate() break else: sys.stderr.writelines(errs) if outs == []: continue if not args.quiet: print_diff(outs, use_color=colored_stdout) if retcode == ExitStatus.SUCCESS: retcode = ExitStatus.DIFF return retcode
def run_verbose(args, **kwargs): print("+ {}".format(subprocess.list2cmdline(args))) subprocess.check_call(args, **kwargs)
def devserver( reload, watchers, workers, experimental_spa, styleguide, prefix, pretty, environment, debug_server, bind, ): "Starts a lightweight web server for development." if bind is None: bind = "127.0.0.1:8000" if ":" in bind: host, port = bind.split(":", 1) port = int(port) else: host = bind port = None import os os.environ["SENTRY_ENVIRONMENT"] = environment # NODE_ENV *must* use production for any prod-like environment as third party libraries look # for this magic constant os.environ["NODE_ENV"] = "production" if environment.startswith("prod") else environment from django.conf import settings from sentry import options from sentry.services.http import SentryHTTPServer url_prefix = options.get("system.url-prefix", "") parsed_url = urlparse(url_prefix) # Make sure we're trying to use a port that we can actually bind to needs_https = parsed_url.scheme == "https" and (parsed_url.port or 443) > 1024 has_https = False if needs_https: from subprocess import check_output try: check_output(["which", "https"]) has_https = True except Exception: has_https = False from sentry.runner.initializer import show_big_error show_big_error( [ "missing `https` on your `$PATH`, but https is needed", "`$ brew install mattrobenolt/stuff/https`", ] ) uwsgi_overrides = { "http-keepalive": True, # Make sure we reload really quickly for local dev in case it # doesn't want to shut down nicely on it's own, NO MERCY "worker-reload-mercy": 2, # We need stdin to support pdb in devserver "honour-stdin": True, # accept ridiculously large files "limit-post": 1 << 30, # do something with chunked "http-chunked-input": True, "thunder-lock": False, "timeout": 600, "harakiri": 600, } if reload: uwsgi_overrides["py-autoreload"] = 1 daemons = [] if experimental_spa: os.environ["SENTRY_UI_DEV_ONLY"] = "1" if not watchers: click.secho( "Using experimental SPA mode without watchers enabled has no effect", err=True, fg="yellow", ) # We proxy all requests through webpacks devserver on the configured port. # The backend is served on port+1 and is proxied via the webpack # configuration. if watchers: daemons += settings.SENTRY_WATCHERS proxy_port = port port = port + 1 uwsgi_overrides["protocol"] = "http" os.environ["FORCE_WEBPACK_DEV_SERVER"] = "1" os.environ["SENTRY_WEBPACK_PROXY_PORT"] = "%s" % proxy_port os.environ["SENTRY_BACKEND_PORT"] = "%s" % port # webpack and/or typescript is causing memory issues os.environ["NODE_OPTIONS"] = ( os.environ.get("NODE_OPTIONS", "") + " --max-old-space-size=4096" ).lstrip() # Replace the webpack watcher with the drop-in webpack-dev-server webpack_config = next(w for w in daemons if w[0] == "webpack")[1] webpack_config[0] = os.path.join( *os.path.split(webpack_config[0])[0:-1] + ("webpack-dev-server",) ) daemons = [w for w in daemons if w[0] != "webpack"] + [("webpack", webpack_config)] else: # If we are the bare http server, use the http option with uwsgi protocol # See https://uwsgi-docs.readthedocs.io/en/latest/HTTP.html uwsgi_overrides.update( { # Make sure uWSGI spawns an HTTP server for us as we don't # have a proxy/load-balancer in front in dev mode. "http": f"{host}:{port}", "protocol": "uwsgi", # This is needed to prevent https://git.io/fj7Lw "uwsgi-socket": None, } ) if workers: if settings.CELERY_ALWAYS_EAGER: raise click.ClickException( "Disable CELERY_ALWAYS_EAGER in your settings file to spawn workers." ) daemons += [_get_daemon("worker"), _get_daemon("cron")] from sentry import eventstream if eventstream.requires_post_process_forwarder(): daemons += [_get_daemon("post-process-forwarder")] if settings.SENTRY_EXTRA_WORKERS: daemons.extend([_get_daemon(name) for name in settings.SENTRY_EXTRA_WORKERS]) if settings.SENTRY_DEV_PROCESS_SUBSCRIPTIONS: if not settings.SENTRY_EVENTSTREAM == "sentry.eventstream.kafka.KafkaEventStream": raise click.ClickException( "`SENTRY_DEV_PROCESS_SUBSCRIPTIONS` can only be used when " "`SENTRY_EVENTSTREAM=sentry.eventstream.kafka.KafkaEventStream`." ) for name, topic in settings.KAFKA_SUBSCRIPTION_RESULT_TOPICS.items(): daemons += [_get_daemon("subscription-consumer", "--topic", topic, suffix=name)] if settings.SENTRY_USE_RELAY: daemons += [_get_daemon("ingest")] if needs_https and has_https: https_port = str(parsed_url.port) https_host = parsed_url.hostname # Determine a random port for the backend http server import socket s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind((host, 0)) port = s.getsockname()[1] s.close() bind = "%s:%d" % (host, port) daemons += [ ("https", ["https", "-host", https_host, "-listen", host + ":" + https_port, bind]) ] from sentry.runner.commands.devservices import _prepare_containers for name, container_options in _prepare_containers("sentry", silent=True).items(): if container_options.get("with_devserver", False): daemons += [(name, ["sentry", "devservices", "attach", "--fast", name])] # A better log-format for local dev when running through honcho, # but if there aren't any other daemons, we don't want to override. if daemons: uwsgi_overrides["log-format"] = "%(method) %(status) %(uri) %(proto) %(size)" else: uwsgi_overrides["log-format"] = "[%(ltime)] %(method) %(status) %(uri) %(proto) %(size)" server = SentryHTTPServer( host=host, port=port, workers=1, extra_options=uwsgi_overrides, debug=debug_server ) # If we don't need any other daemons, just launch a normal uwsgi webserver # and avoid dealing with subprocesses if not daemons: return server.run() import sys from subprocess import list2cmdline from honcho.manager import Manager from honcho.printer import Printer os.environ["PYTHONUNBUFFERED"] = "true" if debug_server: threading.Thread(target=server.run).start() else: # Make sure that the environment is prepared before honcho takes over # This sets all the appropriate uwsgi env vars, etc server.prepare_environment() daemons += [_get_daemon("server")] if styleguide: daemons += [_get_daemon("storybook")] cwd = os.path.realpath(os.path.join(settings.PROJECT_ROOT, os.pardir, os.pardir)) honcho_printer = Printer(prefix=prefix) if pretty: from sentry.runner.formatting import monkeypatch_honcho_write honcho_printer.write = types.MethodType(monkeypatch_honcho_write, honcho_printer) manager = Manager(honcho_printer) for name, cmd in daemons: manager.add_process(name, list2cmdline(cmd), quiet=False, cwd=cwd) manager.loop() sys.exit(manager.returncode)
def install_module(self, module=None, module_url=None, install_method=None, requirements=(), optional=False, global_options=[], no_deps=False, editable=False): """ Install module via pip. module_url can be a url to a python package tarball, a path to a directory containing a setup.py (absolute or relative to work_dir) or None, in which case it will default to the module name. requirements is a list of pip requirements files. If specified, these will be combined with the module_url (if any), like so: pip install -r requirements1.txt -r requirements2.txt module_url """ c = self.config dirs = self.query_abs_dirs() venv_path = self.query_virtualenv_path() self.info("Installing %s into virtualenv %s" % (module, venv_path)) if not module_url: module_url = module if install_method in (None, 'pip'): if not module_url and not requirements: self.fatal("Must specify module and/or requirements") pip = self.query_python_path("pip") if c.get("verbose_pip"): command = [pip, "-v", "install"] else: command = [pip, "install"] if no_deps: command += ["--no-deps"] virtualenv_cache_dir = c.get("virtualenv_cache_dir", os.path.join(venv_path, "cache")) if virtualenv_cache_dir: command += ["--download-cache", virtualenv_cache_dir] # To avoid timeouts with our pypi server, increase default timeout: # https://bugzilla.mozilla.org/show_bug.cgi?id=1007230#c802 command += ['--timeout', str(c.get('pip_timeout', 120))] for requirement in requirements: command += ["-r", requirement] if c.get('find_links') and not c["pip_index"]: command += ['--no-index'] for opt in global_options: command += ["--global-option", opt] elif install_method == 'easy_install': if not module: self.fatal( "module parameter required with install_method='easy_install'" ) if requirements: # Install pip requirements files separately, since they're # not understood by easy_install. self.install_module(requirements=requirements, install_method='pip') # Allow easy_install to be overridden by # self.config['exes']['easy_install'] default = 'easy_install' if self._is_windows(): # Don't invoke `easy_install` directly on windows since # the 'install' in the executable name hits UAC # - http://answers.microsoft.com/en-us/windows/forum/windows_7-security/uac-message-do-you-want-to-allow-the-following/bea30ad8-9ef8-4897-aab4-841a65f7af71 # - https://bugzilla.mozilla.org/show_bug.cgi?id=791840 default = [ self.query_python_path(), self.query_python_path('easy_install-script.py') ] command = self.query_exe('easy_install', default=default, return_type="list") else: self.fatal( "install_module() doesn't understand an install_method of %s!" % install_method) # Add --find-links pages to look at proxxy = Proxxy(self.config, self.log_obj) for link in proxxy.get_proxies_and_urls(c.get('find_links', [])): command.extend(["--find-links", link]) # module_url can be None if only specifying requirements files if module_url: if editable: if install_method in (None, 'pip'): command += ['-e'] else: self.fatal( "editable installs not supported for install_method %s" % install_method) command += [module_url] # If we're only installing a single requirements file, use # the file's directory as cwd, so relative paths work correctly. cwd = dirs['abs_work_dir'] if not module and len(requirements) == 1: cwd = os.path.dirname(requirements[0]) quoted_command = subprocess.list2cmdline(command) # Allow for errors while building modules, but require a # return status of 0. self.retry( self.run_command, # None will cause default value to be used attempts=1 if optional else None, good_statuses=(0, ), error_level=WARNING if optional else FATAL, error_message='Could not install python package: ' + quoted_command + ' failed after %(attempts)d tries!', args=[ command, ], kwargs={ 'error_list': VirtualenvErrorList, 'cwd': cwd, # WARNING only since retry will raise final FATAL if all # retry attempts are unsuccessful - and we only want # an ERROR of FATAL if *no* retry attempt works 'error_level': WARNING, })
def build_pod( self, task_run: TaskRun, cmds: List[str], args: Optional[List[str]] = None, labels: Optional[Dict[str, str]] = None, try_number: Optional[int] = None, include_system_secrets: bool = False, ) -> k8s.V1Pod: if not self.container_tag: raise DatabandConfigError( "Your container tag is None, please check your configuration", help_msg="Container tag should be assigned", ) pod_name = self.get_pod_name(task_run=task_run, try_number=try_number) image = self.full_image labels = combine_mappings(labels, self.labels) labels["pod_name"] = pod_name labels["dbnd_run_uid"] = task_run.run.run_uid labels["dbnd_task_run_uid"] = task_run.task_run_uid labels["dbnd_task_run_attempt_uid"] = task_run.task_run_attempt_uid labels[ "dbnd_task_family"] = task_run.task.task_definition.full_task_family_short labels["dbnd_task_name"] = task_run.task.task_name labels["dbnd_task_af_id"] = task_run.task_af_id # for easier pod deletion (kubectl delete pod -l dbnd=task_run -n <my_namespace>) if task_run.task.task_is_system: labels["dbnd"] = "dbnd_system_task_run" else: labels["dbnd"] = "task_run" # we need to be sure that the values meet the dns label names RFC # https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names labels = { label_name: clean_label_name_dns1123(str(label_value)) for label_name, label_value in six.iteritems(labels) } if is_verbose(): logger.info("Build pod with kubernetes labels {}".format(labels)) annotations = self.annotations.copy() if self.gcp_service_account_keys: annotations[ "iam.cloud.google.com/service-account"] = self.gcp_service_account_keys annotations["dbnd_tracker"] = task_run.task_tracker_url from dbnd_docker.kubernetes.vendorized_airflow.dbnd_extended_resources import ( DbndExtendedResources, ) resources = DbndExtendedResources( requests=self.requests, limits=self.limits, request_memory=self.request_memory, request_cpu=self.request_cpu, limit_memory=self.limit_memory, limit_cpu=self.limit_cpu, ) env_vars = { ENV_DBND_POD_NAME: pod_name, ENV_DBND_POD_NAMESPACE: self.namespace, ENV_DBND_USER: task_run.task_run_env.user, ENV_DBND__ENV_IMAGE: image, ENV_DBND_ENV: task_run.run.env.task_name, ENV_DBND__ENV_MACHINE: "%s at %s" % (pod_name, self.namespace), } if AIRFLOW_VERSION_2: env_vars[ "AIRFLOW__CORE__TASK_RUNNER"] = "dbnd_airflow.compat.dbnd_task_runner.DbndStandardTaskRunner" if self.auto_remove: env_vars[ENV_DBND_AUTO_REMOVE_POD] = "True" env_vars[self._params.get_param_env_key(self, "in_cluster")] = "True" env_vars["AIRFLOW__KUBERNETES__IN_CLUSTER"] = "True" env_vars[ "DBND__RUN_INFO__SOURCE_VERSION"] = task_run.run.context.task_run_env.user_code_version env_vars["AIRFLOW__KUBERNETES__DAGS_IN_IMAGE"] = "True" if not get_dbnd_project_config().is_tracking_mode(): env_vars[ENV_DBND__TRACKING] = "False" # we want that all next runs will be able to use the image that we have in our configuration env_vars.update( self._params.to_env_map(self, "container_repository", "container_tag")) env_vars.update(self.env_vars) env_vars.update(task_run.run.get_context_spawn_env()) secrets = self.get_secrets( include_system_secrets=include_system_secrets) if self.trap_exit_file_flag: args = [ textwrap.dedent(""" trap "touch {trap_file}" EXIT {command} """.format( trap_file=self.trap_exit_file_flag, command=subprocess.list2cmdline(cmds), )) ] # we update cmd now cmds = ["/bin/bash", "-c"] if self.debug_with_command: logger.warning( "%s replacing pod %s command with '%s', original command=`%s`", task_run, pod_name, self.debug_with_command, subprocess.list2cmdline(cmds), ) cmds = shlex.split(self.debug_with_command) base_pod = self._build_base_pod() pod = self._to_real_pod( cmds=cmds, args=args, namespace=self.namespace, name=pod_name, envs=env_vars, image=image, labels=labels, secrets=secrets, resources=resources, annotations=annotations, ) final_pod = reconcile_pods(base_pod, pod) return final_pod
def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): if not isinstance(args, str): args = subprocess.list2cmdline(args) # Always or in the create new process group creationflags |= winprocess.CREATE_NEW_PROCESS_GROUP if startupinfo is None: startupinfo = winprocess.STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES startupinfo.hStdInput = int(p2cread) startupinfo.hStdOutput = int(c2pwrite) startupinfo.hStdError = int(errwrite) if shell: startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = winprocess.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = comspec + " /c " + args # determine if we can create create a job canCreateJob = winprocess.CanCreateJobObject() # set process creation flags creationflags |= winprocess.CREATE_SUSPENDED creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT if canCreateJob: creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB # create the process hp, ht, pid, tid = winprocess.CreateProcess( executable, args, None, None, # No special security 1, # Must inherit handles! creationflags, winprocess.EnvironmentBlock(env), cwd, startupinfo) self._child_created = True self._handle = hp self._thread = ht self.pid = pid self.tid = tid if canCreateJob: # We create a new job for this process, so that we can kill # the process and any sub-processes self._job = winprocess.CreateJobObject() winprocess.AssignProcessToJobObject(self._job, int(hp)) else: self._job = None winprocess.ResumeThread(int(ht)) ht.Close() if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() time.sleep(.1)
def adb_call(self, *args): """ call adb with serial """ cmds = ['adb', '-s', self._serial] + list(args) logger.debug("RUN: %s", subprocess.list2cmdline(cmds)) return subprocess.call(cmds)
def _execute(self, args, cwd): args = subprocess.list2cmdline(args) if not isinstance(args, unicode): args = args.decode('mbcs') if not isinstance(cwd, unicode): cwd = cwd.decode('mbcs') limits = JOBOBJECT_EXTENDED_LIMIT_INFORMATION() limits.JobMemoryLimit = self.memory_limit * 1024 # bytes limits.BasicLimitInformation.PerJobUserTimeLimit = int( self.time_limit * 10000000) # 100ns units limits.BasicLimitInformation.LimitFlags = ( JOB_OBJECT_LIMIT_ACTIVE_PROCESS | JOB_OBJECT_LIMIT_JOB_MEMORY | JOB_OBJECT_LIMIT_JOB_TIME) limits.BasicLimitInformation.ActiveProcessLimit = 1 self._job = job = CreateJobObject(None, None) if not job: raise WinError() self._port = CreateIoCompletionPort(INVALID_HANDLE_VALUE, None, 0, 1) if not self._port: raise WinError() if not SetInformationJobObject( job, JobObjectExtendedLimitInformation, byref(limits), sizeof(JOBOBJECT_EXTENDED_LIMIT_INFORMATION)): raise WinError() port = JOBOBJECT_ASSOCIATE_COMPLETION_PORT() port.CompletionKey = job port.CompletionPort = self._port if not SetInformationJobObject( job, JobObjectAssociateCompletionPortInformation, byref(port), sizeof(JOBOBJECT_ASSOCIATE_COMPLETION_PORT)): raise WinError() stdin_, stdin = CreatePipe() stdout, stdout_ = CreatePipe() stderr, stderr_ = CreatePipe() stdin_ = make_inheritable(stdin_) stdout_ = make_inheritable(stdout_) stderr_ = make_inheritable(stderr_) si = STARTUPINFO() si.cb = sizeof(STARTUPINFO) si.dwFlags = STARTF_USESTDHANDLES si.hStdInput = stdin_ si.hStdOutput = stdout_ si.hStdError = stderr_ pi = PROCESS_INFORMATION() if not CreateProcess(self.csbox, args, None, None, True, CREATE_SUSPENDED | CREATE_BREAKAWAY_FROM_JOB, None, cwd, byref(si), byref(pi)): raise WinError() if AssignProcessToJobObject(job, pi.hProcess) == 0: raise WinError() self._monitor_thread = Thread(target=self._monitor) self._monitor_thread.daemon = True self._monitor_thread.start() if ResumeThread(pi.hThread) == -1: raise WinError() if not CloseHandle(pi.hThread): raise WinError() self._process = pi.hProcess self.stdin = os.fdopen(msvcrt.open_osfhandle(stdin, 0), 'wb') self.stdout = os.fdopen(msvcrt.open_osfhandle(stdout, 0), 'rb') self.stderr = os.fdopen(msvcrt.open_osfhandle(stderr, 0), 'rb') if not CloseHandle(stdin_): raise WinError() if not CloseHandle(stdout_): raise WinError() if not CloseHandle(stderr_): raise WinError() self._shocker_thread = Thread(target=self._shocker) self._shocker_thread.daemon = True self._shocker_thread.start()
def _get_command_line_as_string(): import subprocess cmd_line_as_list = [click.get_current_context().parent.command_path] cmd_line_as_list.extend(click.get_os_args()) return subprocess.list2cmdline(cmd_line_as_list)
def _execute_child(self, *args_tuple): # workaround for bug 958609 if sys.hexversion < 0x02070600: # prior to 2.7.6 (args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = args_tuple to_close = set() else: # 2.7.6 and later (args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = args_tuple if not isinstance(args, types.StringTypes): args = subprocess.list2cmdline(args) # Always or in the create new process group creationflags |= winprocess.CREATE_NEW_PROCESS_GROUP if startupinfo is None: startupinfo = winprocess.STARTUPINFO() if None not in (p2cread, c2pwrite, errwrite): startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES startupinfo.hStdInput = int(p2cread) startupinfo.hStdOutput = int(c2pwrite) startupinfo.hStdError = int(errwrite) if shell: startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = winprocess.SW_HIDE comspec = os.environ.get("COMSPEC", "cmd.exe") args = comspec + " /c " + args # determine if we can create create a job canCreateJob = winprocess.CanCreateJobObject() # set process creation flags creationflags |= winprocess.CREATE_SUSPENDED creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT if canCreateJob: # Uncomment this line below to discover very useful things about your environment #print "++++ killableprocess: releng twistd patch not applied, we can create job objects" creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB # create the process hp, ht, pid, tid = winprocess.CreateProcess( executable, args, None, None, # No special security 1, # Must inherit handles! creationflags, winprocess.EnvironmentBlock(env), cwd, startupinfo) self._child_created = True self._handle = hp self._thread = ht self.pid = pid self.tid = tid if canCreateJob: # We create a new job for this process, so that we can kill # the process and any sub-processes self._job = winprocess.CreateJobObject() winprocess.AssignProcessToJobObject(self._job, int(hp)) else: self._job = None winprocess.ResumeThread(int(ht)) ht.Close() if p2cread is not None: p2cread.Close() if c2pwrite is not None: c2pwrite.Close() if errwrite is not None: errwrite.Close() time.sleep(.1)
def conform_media(path,output_dir, start=None, end=None, duration=None, video_profile=None, audio_profile=None): if not video_profile: video_profile = 'dnx_1080p_36_23.97' if not audio_profile: audio_profile = 'pcm_48000' video_profile = Video_Profiles[video_profile] audio_profile = Audio_Profiles[audio_profile] format = probe(path) out_files = [] cmd = [FFMPEG_EXEC,'-y', '-nostdin'] if end: duration = timecode_to_seconds(end) - timecode_to_seconds(start) duration = seconds_to_timecode(duration) end = None if start: start_seconds = timecode_to_seconds(start) fast_start = max(0,int(start_seconds-30)) if fast_start: start = seconds_to_timecode(start_seconds - fast_start) cmd.extend(['-ss', seconds_to_timecode(fast_start)]) frame_rate = video_profile['frame_rate'] pix_fmt = video_profile['pix_fmt'] bitrate = video_profile['bitrate'] if format['format']['format_name'] == "image2": cmd.extend([ '-r', frame_rate]) cmd.extend(['-i', path,]) width, height = video_profile['size'].split('x') interlaced = False if height[-1] == 'i': interlaced = True width = int(width) height = int(height[:-1]) #sample_rate =44100 sample_rate = audio_profile['sample_rate'] for stream in format['streams']: pprint(stream) stream_index = stream['index'] if stream['codec_type'] == 'video': input_width = stream['width'] input_height = stream['height'] max_width = width max_height = height scale = min(max_width/ float(input_width), max_height/float(input_height) ) scale_width = int(input_width*scale) scale_height = int(input_height*scale) padding_ofs_x = (max_width - scale_width)/2 padding_ofs_y = (max_height - scale_height)/2 vfilter = "scale=%d:%d,pad=%d:%d:%d:%d" % (scale_width,scale_height, max_width,max_height, padding_ofs_x,padding_ofs_y) print vfilter cmd.extend(['-an','-vcodec', 'dnxhd', '-vb', '%dM' % bitrate, '-r', frame_rate, '-pix_fmt', pix_fmt]) if not start is None: cmd.extend(['-ss', str(start)]) if not duration is None: cmd.extend(['-t', str(duration)]) cmd.extend(['-vf', vfilter]) out_file = os.path.join(output_dir, 'out_%d.dnxhd' % (stream_index)) cmd.extend([out_file]) out_files.append({'path':out_file, 'frame_rate':frame_rate, 'type': 'video'}) elif stream['codec_type'] == 'audio': input_sample_rate = int(stream['sample_rate']) channels = stream['channels'] cmd.extend(['-vn', '-acodec', 'pcm_s16le','-f','s16le', '-ar', str(sample_rate)]) if not start is None: cmd.extend(['-ss', str(start)]) if not duration is None: cmd.extend(['-t', str(duration)]) out_file = os.path.join(output_dir, 'out_%d_%d_%d.pcm' % (stream_index, sample_rate, channels)) cmd.extend([out_file]) out_files.append({'path':out_file, 'sample_rate':sample_rate, 'channels':channels,'type': 'audio'}) print subprocess.list2cmdline(cmd) subprocess.check_call(cmd) return out_files
def __init__(self, command): self.command = subprocess.list2cmdline(command) self.returncode = None self.output = None self.error = None
def run_lcmodel_raw(voxel_name, ppmst): print '' print 'PROCESSING SPECTRA WITH LCMODEL FOR %s PPMST = %s' % ( voxel_name, ppmst) # #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name, 'ppm_%s'%ppmst, 'met')) #mkdir_path(os.path.join(workspace_dir, subject, 'lcmodel_twix', voxel_name, 'ppm_%s'%ppmst, 'h2o')) #lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_twix',voxel_name, 'ppm_%s'%ppmst) mkdir_path( os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH', voxel_name, 'ppm_%s' % ppmst, 'met')) mkdir_path( os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH', voxel_name, 'ppm_%s' % ppmst, 'h2o')) lcmodel_dir = os.path.join(workspace_dir, subject, 'lcmodel_twix_NMEACH', voxel_name, 'ppm_%s' % ppmst) shutil.copy( os.path.join(twix_dir, '%s' % voxel_name, '%s' % voxel_name, '%s_lcm' % voxel_name), os.path.join(lcmodel_dir, 'met', 'RAW')) shutil.copy( os.path.join(twix_dir, '%s' % voxel_name, '%s_w' % voxel_name, '%s_w_lcm' % voxel_name), os.path.join(lcmodel_dir, 'h2o', 'RAW')) met = os.path.join(lcmodel_dir, 'met', 'RAW') h2o = os.path.join(lcmodel_dir, 'h2o', 'RAW') # read some data from the RDA header rda_info = [] rda_header = open( os.path.join(workspace_dir, subject, 'lcmodel_rda', voxel_name, 'ppm_%s' % ppmst, 'rda_header.txt'), 'r') for line in rda_header: rda_info.append(line) # define twix parameters nunfil = 2078 hzpppm = 123.242398 echot = 30.0 deltat = 0.000417 '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' ''' Building the control file ''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' '''''' print '...building control file' file = open(os.path.join(lcmodel_dir, 'control'), "w") file.write(" $LCMODL\n") file.write(" title= 'TWIX - %s' \n" % rda_info[0]) file.write(" srcraw= '%s' \n" % met) file.write(" srch2o= '%s' \n" % h2o) file.write(" savdir= '%s' \n" % lcmodel_dir) file.write(" ppmst= %s \n" % ppmst) file.write(" ppmend= 0.3\n") file.write(" nunfil= %s\n" % nunfil) file.write(" ltable= 7\n") file.write(" lps= 8\n") file.write(" lprint= 6\n") file.write(" lcsv= 11\n") file.write(" lcoraw= 10\n") file.write(" lcoord= 9\n") file.write(" hzpppm= %s\n" % hzpppm) file.write(" filtab= '%s/table'\n" % lcmodel_dir) file.write(" filraw= '%s/met/RAW'\n" % lcmodel_dir) file.write(" filps= '%s/ps'\n" % lcmodel_dir) file.write(" filpri= '%s/print'\n" % lcmodel_dir) file.write(" filh2o= '%s/h2o/RAW'\n" % lcmodel_dir) file.write(" filcsv= '%s/spreadsheet.csv'\n" % lcmodel_dir) file.write(" filcor= '%s/coraw'\n" % lcmodel_dir) file.write(" filcoo= '%s/coord'\n" % lcmodel_dir) file.write( " filbas= '/home/raid3/kanaan/.lcmodel/basis-sets/press_te30_3t_01a.basis'\n" ) file.write(" echot= %s \n" % echot) file.write(" dows= T \n") file.write(" NEACH= 999 \n") # export met fits #file.write(" DEGPPM =0 \n") file.write(" doecc= T\n") file.write(" deltat= %s\n" % deltat) file.write(" $END\n") file.close() if os.path.isfile(os.path.join(lcmodel_dir, 'spreadsheet.csv')): print 'Spectrum already processed .................moving on' else: print '...running standardA4pdf execution-script ' print '' lcm_command = [ '/bin/sh', '/home/raid3/kanaan/.lcmodel/execution-scripts/standardA4pdfv3', '%s' % lcmodel_dir, '30', '%s' % lcmodel_dir, '%s' % lcmodel_dir ] print '... running execution script' print subprocess.list2cmdline(lcm_command) subprocess.call(lcm_command) reader = open(os.path.join(lcmodel_dir, 'table'), 'r') for line in reader: if 'FWHM' in line: fwhm = float(line[9:14]) snrx = line[29:31] if 'Data shift' in line: shift = line[15:21] if 'Ph:' in line: ph0 = line[6:10] ph1 = line[19:24] fwhm_hz = fwhm * 123.24 file = open(os.path.join(lcmodel_dir, 'snr.txt'), "w") file.write('%s, %s, %s, %s, %s, %s' % (fwhm, fwhm_hz, snrx, shift, ph0, ph1)) file.close() print '###############################################################################'
def devserver(*, bootstrap: bool, workers: bool) -> None: "Starts all Snuba processes for local development." import os import sys from subprocess import list2cmdline, call from honcho.manager import Manager os.environ["PYTHONUNBUFFERED"] = "1" if bootstrap: cmd = ["snuba", "bootstrap", "--force", "--no-migrate"] if not workers: cmd.append("--no-kafka") returncode = call(cmd) if returncode > 0: sys.exit(returncode) # Run migrations returncode = call(["snuba", "migrations", "migrate", "--force"]) if returncode > 0: sys.exit(returncode) daemons = [("api", ["snuba", "api"])] if not workers: os.execvp(daemons[0][1][0], daemons[0][1]) daemons += [ ( "transaction-consumer", [ "snuba", "consumer", "--auto-offset-reset=latest", "--log-level=debug", "--storage=transactions", "--consumer-group=transactions_group", "--commit-log-topic=snuba-commit-log", ], ), ( "sessions-consumer", [ "snuba", "consumer", "--auto-offset-reset=latest", "--log-level=debug", "--storage=sessions_raw", "--consumer-group=sessions_group", ], ), ( "consumer", [ "snuba", "consumer", "--auto-offset-reset=latest", "--log-level=debug", "--storage=events", ], ), ( "replacer", [ "snuba", "replacer", "--auto-offset-reset=latest", "--log-level=debug", "--storage=events", ], ), ( "subscriptions-consumer-events", [ "snuba", "subscriptions", "--auto-offset-reset=latest", "--log-level=debug", "--max-batch-size=1", "--consumer-group=snuba-events-subscriptions-consumers", "--topic=events", "--result-topic=events-subscription-results", "--dataset=events", "--commit-log-topic=snuba-commit-log", "--commit-log-group=snuba-consumers", "--delay-seconds=1", "--schedule-ttl=10", "--max-query-workers=1", ], ), ( "subscriptions-consumer-transactions", [ "snuba", "subscriptions", "--auto-offset-reset=latest", "--log-level=debug", "--max-batch-size=1", "--consumer-group=snuba-transactions-subscriptions-consumers", "--topic=events", "--result-topic=transactions-subscription-results", "--dataset=transactions", "--commit-log-topic=snuba-commit-log", "--commit-log-group=transactions_group", "--delay-seconds=1", "--schedule-ttl=10", "--max-query-workers=1", ], ), ] manager = Manager() for name, cmd in daemons: manager.add_process( name, list2cmdline(cmd), quiet=False, ) manager.loop() sys.exit(manager.returncode)
def hg_merge_via_debugsetparents(self, cwd, old_head, new_head, preserve_tags=True, user=None): """ Merge 2 heads avoiding non-fastforward commits """ hg = self.query_exe('hg', return_type='list') cmd = hg + ['debugsetparents', new_head, old_head] self.run_command(cmd, cwd=cwd, error_list=HgErrorList, halt_on_failure=True) self.hg_commit( cwd, message="Merge old head via |hg debugsetparents %s %s|. " "CLOSED TREE DONTBUILD a=release" % (new_head, old_head), user=user ) if preserve_tags: # I don't know how to do this elegantly. # I'm reverting .hgtags to old_head, then appending the new tags # from new_head to .hgtags, and hoping nothing goes wrong. # I'd rather not write patch files from scratch, so this seems # like a slightly more complex but less objectionable method? self.info("Trying to preserve tags from before debugsetparents...") dirs = self.query_abs_dirs() patch_file = os.path.join(dirs['abs_work_dir'], 'patch_file') self.run_command( subprocess.list2cmdline(hg + ['diff', '-r', old_head, '.hgtags', '-U9', '>', patch_file]), cwd=cwd, ) self.run_command( ['patch', '-R', '-p1', '-i', patch_file], cwd=cwd, halt_on_failure=True, ) tag_diff = self.read_from_file(patch_file) with self.opened(os.path.join(cwd, '.hgtags'), open_mode='a') as (fh, err): if err: self.fatal("Can't append to .hgtags!") for n, line in enumerate(tag_diff.splitlines()): # The first 4 lines of a patch are headers, so we ignore them. if n < 5: continue # Even after that, the only lines we really care about are # additions to the file. # TODO: why do we only care about additions? I couldn't # figure that out by reading this code. if not line.startswith('+'): continue line = line.replace('+', '') (changeset, tag) = line.split(' ') if len(changeset) != 40: continue fh.write("%s\n" % line) out = self.get_output_from_command(['hg', 'status', '.hgtags'], cwd=cwd) if out: self.hg_commit( cwd, message="Preserve old tags after debugsetparents. " "CLOSED TREE DONTBUILD a=release", user=user, ) else: self.info(".hgtags file is identical, no need to commit")
def run_tests(self): """ Run the tests """ self.start_time = datetime.datetime.now() max_per_test_time = datetime.timedelta(minutes=60) per_test_args = [] suites = self._query_suites() minidump = self.query_minidump_stackwalk() for (per_test_suite, suite) in suites: self.test_suite = suite try: cwd = self._query_tests_dir() except Exception: self.fatal("Don't know how to run --test-suite '%s'!" % self.test_suite) env = self.query_env() if minidump: env['MINIDUMP_STACKWALK'] = minidump env['MOZ_UPLOAD_DIR'] = self.query_abs_dirs( )['abs_blob_upload_dir'] env['MINIDUMP_SAVE_PATH'] = self.query_abs_dirs( )['abs_blob_upload_dir'] env['RUST_BACKTRACE'] = 'full' summary = None for per_test_args in self.query_args(per_test_suite): if (datetime.datetime.now() - self.start_time) > max_per_test_time: # Running tests has run out of time. That is okay! Stop running # them so that a task timeout is not triggered, and so that # (partial) results are made available in a timely manner. self.info("TinderboxPrint: Running tests took too long: " "Not all tests were executed.<br/>") # Signal per-test time exceeded, to break out of suites and # suite categories loops also. return cmd = self._build_command() final_cmd = copy.copy(cmd) if len(per_test_args) > 0: # in per-test mode, remove any chunk arguments from command for arg in final_cmd: if 'total-chunk' in arg or 'this-chunk' in arg: final_cmd.remove(arg) final_cmd.extend(per_test_args) self.info( "Running on %s the command %s" % (self.device_name, subprocess.list2cmdline(final_cmd))) self.info("##### %s log begins" % self.test_suite) suite_category = self.test_suite parser = self.get_test_output_parser(suite_category, config=self.config, log_obj=self.log_obj, error_list=[]) self.run_command(final_cmd, cwd=cwd, env=env, output_parser=parser) tbpl_status, log_level, summary = parser.evaluate_parser( 0, summary) parser.append_tinderboxprint_line(self.test_suite) self.info("##### %s log ends" % self.test_suite) if len(per_test_args) > 0: self.record_status(tbpl_status, level=log_level) self.log_per_test_status(per_test_args[-1], tbpl_status, log_level) if tbpl_status == TBPL_RETRY: self.info("Per-test run abandoned due to RETRY status") return else: self.record_status(tbpl_status, level=log_level) self.log("The %s suite: %s ran with return status: %s" % (suite_category, suite, tbpl_status), level=log_level)
elif opt == "-c": # Specifies a different comment character. comment_char = settings.pop(0) elif opt == "-t": testing = True else: print("Invalid option.") sys.exit(1) # sanity check command line for setting in settings: try: name, value = setting.split("=", 1) except: import subprocess print("Invalid command line: ", subprocess.list2cmdline(sys.argv)) # create the new config file in memory found = set() buf = "" input_lines = list(open(filename)) while len(input_lines) > 0: line = input_lines.pop(0) # If this configuration file uses folded lines, append any folded lines # into our input buffer. if folded_lines and line[0] not in (comment_char, " ", ""): while len(input_lines) > 0 and input_lines[0][0] in " \t": line += input_lines.pop(0)
def mockExecuteOrExit(command): print("Mocking successful execution of: %s" % subprocess.list2cmdline(command)) return (0, '', '')
def shell_output(self, *args) -> str: return self._client.shell(self._serial, subprocess.list2cmdline(args))