def invoke_gcc_local(args): if which("arm-eabi-gcc") is not None: args.insert(0, "arm-eabi-gcc") os.execvp("arm-eabi-gcc", args) else: args.insert(0, "arm-none-eabi-gcc") os.execvp("arm-none-eabi-gcc", args)
def main(argv): if len(argv) > 1 and argv[1] == "-n": # no passphrase passphrase = None else: # first read a line containing the passphrase passphrase = string.strip(sys.stdin.readline()) # fork with pty pid,master = pty.fork() assert pid != -1 if pid == 0: # child. run ssh os.execvp("ssh", ssh) else: # parent. talk to child. s = parent(master, passphrase) # ensure child is gone cleanup(pid) # write whatever we get from child os.write(1, s) # wait for child to disappear qid,status = os.wait() assert pid == qid if os.WIFEXITED(status): # child normally exited. forward its status os._exit(os.WEXITSTATUS(status)) else: # child was killed. return 255 os._exit(255)
def edit_proxy(): """ Configure proxy settings """ res, http_proxy = d.inputbox(text="HTTP proxy string, for example http://<user>:<password>@<proxy>:<port>. Leave empty for no proxy.") if res != d.OK: return res, https_proxy = d.inputbox(text="HTTPS proxy string, for example http://<user>:<password>@<proxy>:<port>. Leave empty for no proxy.") if res != d.OK: return with open('/tmp/00proxy', 'w+') as f: f.write('Acquire::http::Proxy "' + http_proxy + '";') os.system("sudo mv /tmp/00proxy /etc/apt/apt.conf.d/00proxy") os.system("sudo chown root /etc/apt/apt.conf.d/00proxy") os.system("sudo chmod 744 /etc/apt/apt.conf.d/00proxy") with open('/tmp/proxy.sh', 'w+') as f: f.write('export http_proxy="' + http_proxy + '"\n') f.write('export https_proxy="' + https_proxy + '"\n') os.system("sudo mv /tmp/proxy.sh /etc/profile.d/proxy.sh") os.system("sudo chown root /etc/profile.d/proxy.sh") os.system("sudo chmod 744 /etc/profile.d/proxy.sh") os.system("sudo cp /etc/profile.d/proxy.sh /etc/default/docker") d.msgbox("The GNS3 VM will reboot") os.execvp("sudo", ['/usr/bin/sudo', "reboot"])
def main(sys_args): sys_args, jython_opts = decode_args(sys_args) args, jython_args = parse_launcher_args(sys_args) jython_command = JythonCommand(args, jython_opts + jython_args) command = jython_command.command if args.profile and not args.help: try: os.unlink("profile.txt") except OSError: pass if args.print_requested and not args.help: if jython_command.uname == "windows": print subprocess.list2cmdline(jython_command.command) else: print " ".join(pipes.quote(arg) for arg in jython_command.command) else: if not (is_windows or not hasattr(os, "execvp") or args.help or jython_command.uname == "cygwin"): # Replace this process with the java process. # # NB such replacements actually do not work under Windows, # but if tried, they also fail very badly by hanging. # So don't even try! os.execvp(command[0], command[1:]) else: result = 1 try: result = subprocess.call(command) if args.help: print_help() except KeyboardInterrupt: pass sys.exit(result)
def exec_storm_class(klass, jvmtype="-server", jvmopts=[], extrajars=[], args=[], fork=False, daemon=True, client=False, daemonName=""): global CONFFILE storm_log_dir = confvalue("storm.log.dir",[CLUSTER_CONF_DIR]) if(storm_log_dir == None or storm_log_dir == "null"): storm_log_dir = os.path.join(STORM_DIR, "logs") all_args = [ JAVA_CMD, jvmtype, "-Ddaemon.name=" + daemonName, get_config_opts(), "-Dstorm.home=" + STORM_DIR, "-Dstorm.log.dir=" + storm_log_dir, "-Djava.library.path=" + confvalue("java.library.path", extrajars, daemon), "-Dstorm.conf.file=" + CONFFILE, "-cp", get_classpath(extrajars, daemon, client=client), ] + jvmopts + [klass] + list(args) print("Running: " + " ".join(all_args)) sys.stdout.flush() exit_code = 0 if fork: exit_code = os.spawnvp(os.P_WAIT, JAVA_CMD, all_args) elif is_windows(): # handling whitespaces in JAVA_CMD try: ret = sub.check_output(all_args, stderr=sub.STDOUT) print(ret) except sub.CalledProcessError as e: print(e.output) sys.exit(e.returncode) else: os.execvp(JAVA_CMD, all_args) return exit_code
def run_captured(self, cmd): """Run a command, capturing stdout and stderr. Based in part on popen2.py Returns (waitstatus, stdout, stderr).""" import os, types pid = os.fork() if pid == 0: # child try: pid = os.getpid() openmode = os.O_WRONLY|os.O_CREAT|os.O_TRUNC outfd = os.open('%d.out' % pid, openmode, 0666) os.dup2(outfd, 1) os.close(outfd) errfd = os.open('%d.err' % pid, openmode, 0666) os.dup2(errfd, 2) os.close(errfd) if isinstance(cmd, types.StringType): cmd = ['/bin/sh', '-c', cmd] os.execvp(cmd[0], cmd) finally: os._exit(127) else: # parent exited_pid, waitstatus = os.waitpid(pid, 0) stdout = open('%d.out' % pid).read() stderr = open('%d.err' % pid).read() return waitstatus, stdout, stderr
def vm_information(): """ Show IP, SSH settings.... """ content = "Welcome to GNS3 appliance\n\n" version = gns3_version() if version is None: content += "GNS3 is not installed please install it with sudo pip3 install gns3-server. Or download a preinstalled VM.\n\n" else: content = "GNS3 version: {gns3_version}\nVM version: {gns3vm_version}\nKVM support available: {kvm}\n\n".format( gns3vm_version=gns3vm_version(), gns3_version=version, kvm=kvm_support()) ip = get_ip() if ip: content += "IP: {ip}\n\nTo log in using SSH:\nssh gns3@{ip}\nPassword: gns3\n\nImages and projects are located in /opt/gns3""".format(ip=ip) else: content += "eth0 is not configured. Please manually configure it via the Networking menu." content += "\n\nRelease channel: " + get_release() try: d.msgbox(content) # If it's an scp command or any bugs except: os.execvp("bash", ['/bin/bash'])
def test(coverage=False): """Run the unit tests.""" if coverage and not os.environ.get('FLASK_COVERAGE'): import sys os.environ['FLASK_COVERAGE'] = '1' os.execvp(sys.executable, [sys.executable] + sys.argv) import unittest print('Unit tests:') tests = unittest.TestLoader().discover('tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if COV: COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() if result.wasSuccessful: return 0 else: return 1
def autocompile(ws, conf, env, **options): """Subcommand: autocompile -- automatically re-compiles when something in content-dir has changed and parallel serving files.""" CONF_PY = './conf.py' mtime = -1 cmtime = getmtime(CONF_PY) while True: ntime = max( max(getmtime(e) for e in readers.filelist(conf['content_dir']) if utils.istext(e)), max(getmtime(p) for p in readers.filelist(conf['layout_dir']))) if mtime != ntime: try: compile(conf, env, **options) except AcrylamidException as e: log.fatal(e.args[0]) pass event.reset() mtime = ntime if cmtime != getmtime(CONF_PY): log.info(' * Restarting due to change in %s' % (CONF_PY)) # Kill the webserver ws.shutdown() # Force compilation since no template was changed argv = sys.argv if options['force'] else sys.argv[:] + ["--force"] # Restart acrylamid os.execvp(sys.argv[0], argv) time.sleep(1)
def test_dispatch_cups(self): """Test dispatch_command dispatching to CUPS""" os.execvp('cups-lp', ['lp', '-dajax']) self.mox.ReplayAll() common.dispatch_command(common.SYSTEM_CUPS, 'lp', ['-dajax'])
def contain(command, image_name, image_dir, container_id, container_dir): linux.unshare(linux.CLONE_NEWNS) # create a new mount namespace linux.mount(None, '/', None, linux.MS_PRIVATE | linux.MS_REC, None) # TODO: we added MS_REC here. wanna guess why? new_root = create_container_root(image_name, image_dir, container_id, container_dir) print('Created a new root fs for our container: {}'.format(new_root)) # Create mounts (/proc, /sys, /dev) under new_root linux.mount('proc', os.path.join(new_root, 'proc'), 'proc', 0, '') linux.mount('sysfs', os.path.join(new_root, 'sys'), 'sysfs', 0, '') linux.mount('tmpfs', os.path.join(new_root, 'dev'), 'tmpfs', linux.MS_NOSUID | linux.MS_STRICTATIME, 'mode=755') # Add some basic devices devpts_path = os.path.join(new_root, 'dev', 'pts') if not os.path.exists(devpts_path): os.makedirs(devpts_path) linux.mount('devpts', devpts_path, 'devpts', 0, '') makedev(os.path.join(new_root, 'dev')) os.chroot(new_root) # TODO: replace with pivot_root os.chdir('/') # TODO: umount2 old root (HINT: see MNT_DETACH in man mount) os.execvp(command[0], command)
def start_stud(exe, front_port, back_port, hmac_key, capath, keycertfile): ppid = os.getpid() pid = os.fork() if pid > 0: return pid try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 os.closerange(0, MAXFD) cmd = [exe, "--backend", "[127.0.0.1]:%d" % back_port, "--frontend", "[*]:%d" % front_port, "--verify-depth", "10", "--verify-proxy", "--verify-require", "--inject-chain", "--ca-path", capath, "--hmac-key", hmac_key.encode("hex"), keycertfile] try: os.execvp(cmd[0], cmd) raise RuntimeError("execvp failed for cmd: " + repr(cmd)) except Exception, exc: sys.stderr = open("/dev/tty", "w") sys.stderr.write("Failed to start stud: " + str(exc)+"\n") sys.stderr.write("Command line was:\n") sys.stderr.write(" ".join(cmd) + "\n") sys.stderr.flush() os.kill(ppid, 15) sys.exit(1)
def runshell(self): args = [self.executable_name, self.connection.settings_dict['NAME']] if os.name == 'nt': sys.exit(os.system(" ".join(args))) else: os.execvp(self.executable_name, args)
def command_inspect(args): 'Implements the inspect subcommand' command_line = [ 'docker', 'run', '-it', '--entrypoint', args.shell, ] if 'submission' in args and args.submission is not None: command_line.append('-v') command_line.append(common.mk_submission_volume_str(args.submission)) if not args.allow_network: command_line.append('--net') command_line.append('none') if not args.unlimited_memory: command_line.append('-m') command_line.append('1g') if not args.no_rm: command_line.append('--rm') if not args.super_user: # Note: docker run CLI doesn't support setting the group. :-( command_line.append('-u') command_line.append('1000') command_line.append(args.containerId) logging.debug("About to execute command: %s", ' '.join(command_line)) os.execvp('docker', command_line)
def flower(args): broka = conf.get('celery', 'BROKER_URL') port = '--port={}'.format(args.port) api = '' if args.broker_api: api = '--broker_api=' + args.broker_api if args.daemon: pid, stdout, stderr, log_file = setup_locations("flower", args.pid, args.stdout, args.stderr, args.log_file) stdout = open(stdout, 'w+') stderr = open(stderr, 'w+') ctx = daemon.DaemonContext( pidfile=TimeoutPIDLockFile(pid, -1), stdout=stdout, stderr=stderr, ) with ctx: os.execvp("flower", ['flower', '-b', broka, port, api]) stdout.close() stderr.close() else: signal.signal(signal.SIGINT, sigint_handler) signal.signal(signal.SIGTERM, sigint_handler) os.execvp("flower", ['flower', '-b', broka, port, api])
def run_bg(cmd, debug=False, cwd=''): ''' run_bg(cmd) Works the same as ``ave.cmd.run()``, except that it is non-blocking. :returns: A *(PID, file descriptor)* tuple. The file descriptor is attached to the new process' pseudoterminal and will carry all messages written to ``stdout`` and ``stderr``. .. note:: The caller *must* eventually use ``os.wait()`` or one of its variations on the PID and ``os.close()`` on the file descriptor. Failing to perform these cleanups will lead to defunct processes and/or running out of pseudoterminals. ''' # make sure 'cmd' is a list of strings if type(cmd) in [str, unicode]: cmd = [c for c in cmd.split() if c != ''] if debug: sys.stderr.write(' '.join(cmd)+'\n') sys.stderr.flush() try: ( child_pid, child_fd ) = pty.fork() except OSError as e: raise RunError(cmd, None, message='pty.fork() failed: %s' % str(e)) if child_pid == 0: try: if cwd != '': os.chdir(cwd) os.execvp(cmd[0], cmd) except Exception, e: raise RunError(cmd, None, 'os.execvp() failed: %s' % str(e))
def main(): """Parse options and call the appropriate class/method.""" CONF.register_cli_opt(category_opt) script_name = sys.argv[0] if len(sys.argv) < 2: print(_("\nOpenStack manila version: %(version)s\n") % {"version": version.version_string()}) print(script_name + " category action [<args>]") print(_("Available categories:")) for category in CATEGORIES: print("\t%s" % category) sys.exit(2) try: log.register_options(CONF) CONF(sys.argv[1:], project="manila", version=version.version_string()) log.setup(CONF, "manila") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp("sudo", ["sudo", "-u", "#%s" % st.st_uid] + sys.argv) except Exception: print(_("sudo failed, continuing as if nothing happened")) print(_("Please re-run manila-manage as root.")) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def find_FIFE(fife_custom_path=None): """Inserts path to fife engine to $LD_LIBRARY_PATH (environment variable). If it's already there, the function will return, else it will restart uh with correct $LD_LIBRARY_PATH. """ global logfilename fife_path = get_fife_path(fife_custom_path) # terminates program if fife can't be found os.environ['LD_LIBRARY_PATH'] = os.path.pathsep.join( \ [ os.path.abspath(fife_path + '/' + a) for \ a in ('ext/minizip', 'ext/install/lib') ] + \ (os.environ['LD_LIBRARY_PATH'].split(os.path.pathsep) if \ os.environ.has_key('LD_LIBRARY_PATH') else [])) log().debug("Restarting with proper LD_LIBRARY_PATH...") log_paths() # assemble args (python run_uh.py ..) args = [sys.executable] + sys.argv + [ "--fife-in-library-path" ] # WORKAROUND: windows systems don't handle spaces in arguments for execvp correctly. if platform.system() != 'Windows': if logfilename: args += [ "--logfile", logfilename ] log().debug("Restarting with args %s", args) os.execvp(args[0], args) else: args[1] = "\"%s\"" % args[1] args += [ "--logfile", "\"%s\"" % logfilename ] log().debug("Restarting using windows workaround with args %s", args) os.system(" ".join(args)) sys.exit(0)
def dispatchExternalHandler(type, **info): """ Execute an external script after post processing """ if isWindows() or Hellanzb.EXTERNAL_HANDLER_SCRIPT is None: return # FIXME: daemonize/os.fork/etc doesn't work on Windows type = type is SUCCESS and 'SUCCESS' or 'ERROR' # the info dict should include four params, archive name, archive dest dir, elapsed # time, and parMessage, parMessage may be empty. cmdArgs = ['', type, info['archiveName'], info['destDir'], info['elapsedTime'], info['parMessage']] errorMsg = "Dispatch of external handler: %s failed." % \ Hellanzb.EXTERNAL_HANDLER_SCRIPT try: pid = os.fork() if pid < 0: debug(errorMsg) elif pid == 0: daemonize() try: os.execvp(Hellanzb.EXTERNAL_HANDLER_SCRIPT, cmdArgs) except Exception, e: debug(errorMsg, e) except: debug(errorMsg)
def submit(sourcejar, destjar, config, venv, name, definition, logdir, extrastormcp, config_override={}): # Build a topology jar and submit it to Storm. if not sourcejar: sourcejar = get_sourcejar() build_jar( source_jar_path=sourcejar, dest_jar_path=destjar, config=config, definition=definition, venv=venv, logdir=logdir) storm_class_path = [ subprocess.check_output([get_storm_cmd(), 'classpath']).strip(), destjar ] if extrastormcp is not None: storm_class_path = [ extrastormcp ] + storm_class_path storm_home = os.path.dirname(os.path.dirname( subprocess.check_output(['which', get_storm_cmd()]))) storm_options = '' if config_override: storm_options = ['%s=%s' % item for item in config_override.iteritems()] storm_options = ','.join(storm_options).replace(' ', '%%%%') submit_args = [ "", "-client", "-Dstorm.options=%s" % storm_options, "-Dstorm.home=%s" % storm_home, "-cp",":".join(storm_class_path), "-Dstorm.jar=%s" % destjar, "storm.petrel.GenericTopology", ] if name: submit_args += [name] os.execvp('java', submit_args)
def pipeline(command): try: if '&' in command: amper=1 else: amper=0 #child process child=os.fork() if child==0 : while '|' in command: #get '|' index position pipe_index=command.index('|') r,w=os.pipe() grand_child=os.fork() if grand_child==0: os.dup2(w,1)#replace to w in index 1 os.close(w)#close w os.close(r)# close stand in os.execvp(command[0],command[0:pipe_index])# execute command before '|' os.dup2(r,0) os.close(r) os.close(w) del command[:pipe_index+1] os.execvp(command[0],command) else: if amper==0: os.waitpid(child,0) except: print("Unexpected error2:", sys.exc_info()[0])
def main(): CONF.register_cli_opt(category_opt) try: log.register_options(CONF) CONF(sys.argv[1:], project='rumster', version=version.version_info.version_string()) log.setup(CONF, "rumster") except cfg.ConfigFilesNotFoundError: cfgfile = CONF.config_file[-1] if CONF.config_file else None if cfgfile and not os.access(cfgfile, os.R_OK): st = os.stat(cfgfile) print(_LI("Could not read %s. Re-running with sudo") % cfgfile) try: os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv) except Exception: print(_LI('sudo failed, continuing as if nothing happened')) print(_LI('Please re-run rumster-manage as root.')) sys.exit(2) fn = CONF.category.action_fn fn_args = fetch_func_args(fn) fn(*fn_args)
def main(): global sig_usr_1 if len(sys.argv) <= 2: print_usage_and_die() report_file = sys.argv[1] if os.path.abspath(report_file) != report_file: print("report-file must be an absolute path. Got %r" % (report_file,), file=sys.stderr) sys.exit(1) if not os.path.exists(report_file): print("No such file %r" % (report_file,), file=sys.stderr) sys.exit(1) command = sys.argv[2:] assert command reporter = open(report_file, "w") def report_field(field, value): reporter.write("%s: %d\n" % (field, value)) reporter.flush() report_field(CONTROLLER, os.getpid()) r, w = os.pipe() child = os.fork() sys.stdout.flush() if not child: try: os.close(w) while True: time.sleep(0.01) data = os.read(r, 1) if not data: continue progress = data == b"1" os.close(r) assert progress, data break reporter.close() os.execvp(command[0], command) except: traceback.print_exc() os._exit(COMMAND_FAILED_STATUS) report_field(CHILD, child) os.close(r) def awaken_child(signal, frame): global sig_user_1 sig_user_1 = True os.write(w, b"1") os.close(w) signal.signal(signal.SIGUSR1, awaken_child) while not sig_user_1: signal.pause() _, exit_status = os.waitpid(child, 0) report_field(EXIT_STATUS, exit_status) reporter.close() # Long sleep to give time to snapshot the screen time.sleep(1)
def _run_job_in_hadoop(self): self._counters = [] for step_num in range(self._num_steps()): log.debug("running step %d of %d" % (step_num + 1, self._num_steps())) step_args = self._args_for_step(step_num) log.debug("> %s" % cmd_line(step_args)) # try to use a PTY if it's available try: pid, master_fd = pty.fork() except (AttributeError, OSError): # no PTYs, just use Popen step_proc = Popen(step_args, stdout=PIPE, stderr=PIPE) self._process_stderr_from_streaming(step_proc.stderr) # there shouldn't be much output to STDOUT for line in step_proc.stdout: log.error("STDOUT: " + to_string(line.strip(b"\n"))) returncode = step_proc.wait() else: # we have PTYs if pid == 0: # we are the child process os.execvp(step_args[0], step_args) else: with os.fdopen(master_fd, "rb") as master: # reading from master gives us the subprocess's # stderr and stdout (it's a fake terminal) self._process_stderr_from_streaming(master) _, returncode = os.waitpid(pid, 0) if returncode == 0: # parsing needs step number for whole job self._fetch_counters([step_num + self._start_step_num]) # printing needs step number relevant to this run of mrjob self.print_counters([step_num + 1]) else: msg = "Job failed with return code %d: %s" % (returncode, step_args) log.error(msg) # look for a Python traceback cause = self._find_probable_cause_of_failure([step_num + self._start_step_num]) if cause: # log cause, and put it in exception cause_msg = [] # lines to log and put in exception cause_msg.append("Probable cause of failure (from %s):" % cause["log_file_uri"]) cause_msg.extend(line.strip("\n") for line in cause["lines"]) if cause["input_uri"]: cause_msg.append("(while reading from %s)" % cause["input_uri"]) for line in cause_msg: log.error(line) # add cause_msg to exception message msg += "\n" + "\n".join(cause_msg) + "\n" raise CalledProcessError(returncode, step_args)
def main(): """The command line interface for the ``pip-accel`` program.""" arguments = sys.argv[1:] # If no arguments are given, the help text of pip-accel is printed. if not arguments: usage() sys.exit(0) # If no install subcommand is given we pass the command line straight # to pip without any changes and exit immediately afterwards. if 'install' not in arguments: # This will not return. os.execvp('pip', ['pip'] + arguments) else: arguments = [arg for arg in arguments if arg != 'install'] # Initialize logging output. coloredlogs.install() # Adjust verbosity based on -v, -q, --verbose, --quiet options. for argument in list(arguments): if match_option(argument, '-v', '--verbose'): coloredlogs.increase_verbosity() elif match_option(argument, '-q', '--quiet'): coloredlogs.decrease_verbosity() # Perform the requested action(s). try: accelerator = PipAccelerator(Config()) accelerator.install_from_arguments(arguments) except Exception: logger.exception("Caught unhandled exception!") sys.exit(1)
def run(valid): """Match requested command against list of patterns, first to match is executed. Otherwise exit with nonzero status. valid is a list of (re, path) 2-tuples where: re is a compiled regular expression object (or any other object with a .match(string) method accepting one argument), or a regular expression string to be compiled into an expression. path is the full path to the acutal executable to be run. """ try: cmd = os.environ['SSH_ORIGINAL_COMMAND'] except KeyError: raise SystemExit("Permission denied for interactive shell.") for rx, c in valid_re: if isinstance(rx, basestring): rx = re.compile(rx) if rx.match(cmd): # TODO: support a mechanism for rewriting arguments # TODO: split() is not good enough; use shlex os.execvp(c, cmd.split()) # os.execvp replaces this process with new; script essentially ends # here. Imagine a call to sys.exit(). raise SystemExit("Permission denied for that command.")
def spawn(self, wait=False, silent=False): if not os.path.isfile(self._file): raise OverlordException("%s not found" % (self._file, )) """Fork and exec the target process.""" pid = os.fork() if pid == 0: # child if silent: fd = os.open("/dev/null", os.O_APPEND) os.dup2(fd, sys.stdout.fileno()) sys.stdout.flush() os.execvp(self._file, self._args) exit(1) elif pid > 0: # parent if wait: pid, status = os.wait() else: os.perror("fork failed") pid = -1 self._pid = pid
def main(): parser = argparse.ArgumentParser() parser.add_argument('-m', '--machine', help='Docker machine config to use') parser.add_argument('command', help='make|run') args, extra = parser.parse_known_args() docker_cmd = ['docker'] if args.machine: docker_cmd.extend(machine_config(args.machine)) docker_cmd.extend(['run', '-it', '-v', kubos_root + ':/data/riotbuild', '-w', '/data/riotbuild/' + cmd_relpath, 'riotbuild']) if args.command == 'make': docker_cmd.append('make') docker_cmd.extend(extra) elif args.command == 'run': elf_relpath = find_elf() if not elf_relpath: parser.error('No ELF binaries found in ./bin/native') docker_cmd.append(elf_relpath) docker_cmd.extend(extra) else: parser.error('Unknown command: "%s"' % args.command) print '> '.join(docker_cmd) os.execvp('docker', docker_cmd)
def env(parser, args): if not args.spec: tty.die("spack env requires a spec.") # Specs may have spaces in them, so if they do, require that the # caller put a '--' between the spec and the command to be # executed. If there is no '--', assume that the spec is the # first argument. sep = '--' if sep in args.spec: s = args.spec.index(sep) spec = args.spec[:s] cmd = args.spec[s+1:] else: spec = args.spec[0] cmd = args.spec[1:] specs = spack.cmd.parse_specs(spec, concretize=True) if len(specs) > 1: tty.die("spack env only takes one spec.") spec = specs[0] build_env.setup_package(spec.package) if not cmd: # If no command act like the "env" command and print out env vars. for key, val in os.environ.items(): print "%s=%s" % (key, val) else: # Otherwise execute the command with the new environment os.execvp(cmd[0], cmd)
def run_execve(program, args=None, env=None, do_path_lookup=False): if args is None: args = [program] else: args = [program] + args if env is None: env = {} # we cannot directly call execve() because it replaces the # current process. fd_read, fd_write = os.pipe() childpid = os.fork() if childpid == 0: # in the child os.close(fd_read) os.dup2(fd_write, 1) # stdout os.close(fd_write) if do_path_lookup: os.execvp(program, args) else: rposix.execve(program, args, env) assert 0, "should not arrive here" else: # in the parent os.close(fd_write) child_stdout = [] while True: data = os.read(fd_read, 4096) if not data: break # closed child_stdout.append(data) pid, status = os.waitpid(childpid, 0) os.close(fd_read) return status, ''.join(child_stdout)
def main(): client = discord.Client() #명령어 목록 Command_list = discord.Embed(title="Command List", description="명령어 목록", color=0x00ff00) Command_list.add_field(name="$a", value="도움말", inline=False) Command_list.add_field(name="$b", value="버전 정보", inline=False) Command_list.add_field(name="$d", value="오늘 날짜", inline=False) Command_list.add_field(name="$f", value="내일급식", inline=False) Command_list.add_field(name="$g", value="급식정보", inline=False) #급식안내 meal_notice = ("```css\n" "[안내] 날짜와 급식이 맞지 않는 경우 개발자에게 문의해주세요.\n" "[주의] 2018년 3월 5일 인 경우 17035 로 보낼 것.\n" "[주의] 2017년 10월 1일 인 경우 17101 로 보낼 것.\n" "```") plus_meal_notice = "" @client.event async def on_ready(): print('Logged in as') print(client.user.name) print(client.user.id) print('---------') await client.change_presence(game=discord.Game(name="$a for help")) @client.event async def on_message(message): if message.content.startswith('$a'): await client.send_message(message.channel, embed=Command_list) elif message.content.startswith('$b'): embed = discord.Embed(title="Bot Version", description="updated", color=0x00ff00) embed.add_field(name="Version", value="2.3.3", inline=False) await client.send_message(message.channel, embed=embed) elif message.content.startswith('$d'): dt = datetime.datetime.now() local_date = dt.strftime("%Y년 %m월 %d일 %H시 %M분 %S초") embed = discord.Embed(title="Local Time", description=local_date, color=0x00ff00) await client.send_message(message.channel, embed=embed) elif message.content.startswith('$f'): f_dt = datetime.datetime.today() + datetime.timedelta(days=1) local_date2 = f_dt.strftime("%Y.%m.%d") local_weekday2 = f_dt.weekday() l_diet = get_diet(2, local_date2, local_weekday2) d_diet = get_diet(3, local_date2, local_weekday2) if len(l_diet) == 1: embed = discord.Embed(title="No Meal", description="급식이 없습니다.", color=0x00ff00) await client.send_message(message.channel, embed=embed) elif len(d_diet) == 1: lunch = local_date2 + " 중식\n" + l_diet embed = discord.Embed(title="Lunch", description=lunch, color=0x00ff00) await client.send_message(message.channel, embed=embed) else: lunch = local_date2 + " 중식\n" + l_diet dinner = local_date2 + " 석식\n" + d_diet lunch_e = discord.Embed(title="Lunch", description=lunch, color=0x00ff00) await client.send_message(message.channel, embed=lunch_e) dinner_e = discord.Embed(title="Dinner", description=dinner, color=0x00ff00) await client.send_message(message.channel, embed=dinner_e) elif message.content.startswith('$g'): request = meal_notice + '\n' + plus_meal_notice + '날짜를 보내주세요...' request_e = discord.Embed(title="Send to Me", description=request, color=0xcceeff) await client.send_message(message.channel, embed=request_e) meal_date = await client.wait_for_message(timeout=15.0, author=message.author) if meal_date is None: longtimemsg = discord.Embed( title="In 15sec", description='15초내로 입력해주세요. 다시시도 : $g', color=0xff0000) await client.send_message(message.channel, embed=longtimemsg) return else: meal_date = str(meal_date.content) # 171121 meal_date = '20' + meal_date[:2] + '.' + meal_date[ 2:4] + '.' + meal_date[4:6] # 2017.11.21 s = meal_date.replace('.', ', ') # 2017, 11, 21 if int(s[6:8]) < 10: s = s.replace(s[6:8], s[7:8]) ss = "datetime.datetime(" + s + ").weekday()" try: whatday = eval(ss) except: warnning = discord.Embed( title="Plz Retry", description='올바른 값으로 다시 시도하세요 : $g', color=0xff0000) await client.send_message(message.channel, embed=warnning) return l_diet = get_diet(2, meal_date, whatday) d_diet = get_diet(3, meal_date, whatday) if len(l_diet) == 1: l_diet = "급식이 없습니다." l_e = discord.Embed(title="Lunch", description=l_diet, color=0x00ff00) await client.send_message(message.channel, embed=l_e) elif len(d_diet) == 1: lunch = meal_date + " 중식\n" + l_diet lunch_e = discord.Embed(title="Lunch", description=lunch, color=0x00ff00) await client.send_message(message.channel, embed=lunch_e) else: lunch = meal_date + " 중식\n" + l_diet dinner = meal_date + " 석식\n" + d_diet lunch_e = discord.Embed(title="Lunch", description=lunch, color=0x00ff00) await client.send_message(message.channel, embed=lunch_e) dinner_e = discord.Embed(title="Dinner", description=dinner, color=0x00ff00) await client.send_message(message.channel, embed=dinner_e) client.run('NDE0NzU0NzM2ODEwNzU0MDY5.DXlfMw.sUeh3BggTGxf_aqBsUhnGB7_2bQ') print("respawn!") executable = sys.executable args = sys.argv[:] args.insert(0, sys.executable) os.execvp(executable, args)
def start_virtual_environment(arr, verbose=False): path_result = create_data_config_path(arr[0]['filepath'], arr[0]['configpath']) if 'Error' in path_result: sys.stderr.write(path_result['Error']) sys.exit(1) else: G.verbose = verbose; start_logging() # Set up virtual environment under home since it should be write-able. output_dir = os.path.join(G.config_path, 'voltdeploy') # Make sure the output directory is available. if not os.path.isdir(output_dir): if os.path.exists(output_dir): abort('Output path "%s" exists, but is not a directory.' % output_dir, 'Please move or delete it before running this command again.') try: os.makedirs(output_dir) except (IOError, OSError), e: abort('Output path "%s" exists, but is not a directory.' % output_dir, 'Please move or delete it before running this command again.', e) venv_base = os.path.join(output_dir, 'venv') venv_dir = os.path.join(venv_base, sys.platform) venv_complete = False version = get_version(os.path.dirname(G.script_dir)) try: build_venv = not os.path.isdir(venv_dir) if not build_venv: # If the virtual environment is present check that it's current. # If version.txt is not present leave it alone so that we don't # get in the situation where the virtual environment gets # recreated every time. venv_version = get_version(venv_dir, error_abort=False) if venv_version is None: warning('Unable to read the version file:', [os.path.join(venv_dir, 'version.txt')], 'Assuming that the virtual environment is current.', 'To force a rebuild delete the virtual environment base directory:', [venv_base]) else: build_venv = venv_version != version if build_venv: _build_virtual_environment(venv_dir, version) else: #run_cmd(os.path.join(venv_dir, 'bin', 'pip'), '--quiet', 'install', '-r', packages) install_required_packages(os.path.join(venv_dir, 'bin', 'pip')) venv_complete = True # the virtual environment's Python. python = os.path.join(venv_dir, 'bin', 'python') args = [python, os.path.join(G.base_dir, 'lib/python/vdm/vdmrunner.py')] args.append('-p' + G.data_path) args.append('-c' + G.config_path) if arr[0]['server'] is not None: args.append('-s' + str(arr[0]['server'])) os.execvp(python, args) except KeyboardInterrupt: sys.stderr.write('\n<break>\n') finally: stop_logging() # Avoid confusion by cleaning up incomplete virtual environments. if not venv_complete and os.path.exists(venv_dir): warning('Removing incomplete virtual environment after installation failure ...') shutil.rmtree(venv_dir, True) return {'status': 'error', 'path_venv_python': ''} else: return {'status': 'success', 'path_venv_python': python}
#!/usr/bin/env python import os import sys import subprocess from tkinter import * from tkinter import ttk from tkinter import filedialog from PIL import Image if os.geteuid() != 0: os.execvp("sudo", ["sudo"] + ["python3"] + sys.argv) #To check root priviledges #Code below should remove the last line from GRUB if image has already been set #------------------------------------------------------------------------------ readCurrent = open('/etc/default/grub', 'a+') lastLine = readCurrent.readlines()[:-1] readCurrent.close() if (lastLine[:15] == "GRUB_BACKGROUND="): writeCurrent = open('/etc/default/grub', 'a+') writeCurrent.writelines("")[:-1] writeCurrent.close() #For some reason it doesnt.Needs to be fixed------------------------------------ path = "" def set_image(data): to_write = "GRUB_BACKGROUND=" + data writeCurrent = open('/etc/default/grub', 'a+')
def main(args=None): # noqa args = args or make_argument_parser().parse_args() for path in args.path: sys.path.insert(0, path) if args.use_spawn: multiprocessing.set_start_method("spawn") try: if args.pid_file: setup_pidfile(args.pid_file) except RuntimeError as e: with file_or_stderr(args.log_file) as stream: logger = setup_parent_logging(args, stream=stream) logger.critical(e) return RET_PIDFILE canteen = multiprocessing.Value(Canteen) worker_pipes = [] worker_processes = [] for worker_id in range(args.processes): read_pipe, write_pipe = multiprocessing.Pipe() proc = multiprocessing.Process( target=worker_process, args=(args, worker_id, StreamablePipe(write_pipe), canteen), daemon=True, ) proc.start() worker_pipes.append(read_pipe) worker_processes.append(proc) fork_pipes = [] fork_processes = [] for fork_id, fork_path in enumerate(chain(args.forks, canteen_get(canteen))): read_pipe, write_pipe = multiprocessing.Pipe() proc = multiprocessing.Process( target=fork_process, args=(args, fork_id, fork_path, StreamablePipe(write_pipe)), daemon=True, ) proc.start() fork_pipes.append(read_pipe) fork_processes.append(proc) parent_read_pipe, parent_write_pipe = multiprocessing.Pipe() logger = setup_parent_logging(args, stream=StreamablePipe(parent_write_pipe)) logger.info("Dramatiq %r is booting up." % __version__) if args.pid_file: atexit.register(remove_pidfile, args.pid_file, logger) running, reload_process = True, False # To avoid issues with signal delivery to user threads on # platforms such as FreeBSD 10.3, we make the main thread block # the signals it expects to handle before spawning the file # watcher and log watcher threads so that those threads can # inherit the blocking behaviour. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_BLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) if HAS_WATCHDOG and args.watch: file_watcher = setup_file_watcher(args.watch, args.watch_use_polling) log_watcher = Thread( target=watch_logs, args=(args.log_file, [parent_read_pipe, *worker_pipes, *fork_pipes]), daemon=False, ) log_watcher.start() def stop_subprocesses(signum): nonlocal running running = False for proc in chain(worker_processes, fork_processes): try: os.kill(proc.pid, signum) except OSError: # pragma: no cover if proc.exitcode is None: logger.warning("Failed to send %r to PID %d.", signum.name, proc.pid) def sighandler(signum, frame): nonlocal reload_process reload_process = signum == getattr(signal, "SIGHUP", None) if signum == signal.SIGINT: signum = signal.SIGTERM logger.info("Sending signal %r to subprocesses...", getattr(signum, "name", signum)) stop_subprocesses(signum) # Now that the watcher threads have been started, it should be # safe to unblock the signals that were previously blocked. if hasattr(signal, "pthread_sigmask"): signal.pthread_sigmask( signal.SIG_UNBLOCK, {signal.SIGINT, signal.SIGTERM, signal.SIGHUP}, ) retcode = RET_OK signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) if hasattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighandler) if hasattr(signal, "SIGBREAK"): signal.signal(signal.SIGBREAK, sighandler) # Wait for all workers to terminate. If any of the processes # terminates unexpectedly, then shut down the rest as well. The # use of `waited' here avoids a race condition where the processes # could potentially exit before we even get a chance to wait on # them. waited = False while not waited or any(p.exitcode is None for p in worker_processes): waited = True for proc in worker_processes: proc.join(timeout=1) if proc.exitcode is None: continue if running: # pragma: no cover logger.critical( "Worker with PID %r exited unexpectedly (code %r). Shutting down...", proc.pid, proc.exitcode) stop_subprocesses(signal.SIGTERM) retcode = proc.exitcode break else: retcode = max(retcode, proc.exitcode) for pipe in [ parent_read_pipe, parent_write_pipe, *worker_pipes, *fork_pipes ]: try: pipe.close() # If the worker process was killed, the handle may already be # closed. except (EOFError, OSError): pass # The log watcher can't be a daemon in case we log to a file. So # we have to wait for it to complete on exit. Closing all the # pipes above is what should trigger said exit. log_watcher.join() if HAS_WATCHDOG and args.watch: file_watcher.stop() file_watcher.join() if reload_process: if sys.argv[0].endswith("/dramatiq/__main__.py"): return os.execvp(sys.executable, ["python", "-m", "dramatiq", *sys.argv[1:]]) return os.execvp(sys.argv[0], sys.argv) return retcode
f.close() os.chmod("/etc/veil/settings.py", 0o0755) print(" [I] Configuration File Written To: '/etc/veil/settings.py'\n") else: print(" [!] ERROR: PLATFORM NOT CURRENTLY SUPPORTED") sys.exit() if __name__ == '__main__': options = {} # Check for root access if os.geteuid() != 0: print("\n [!] ERROR: Not root. Requesting...\n") os.execvp("sudo", ["sudo"] + ["python"] + sys.argv) sys.exit() if platform.system() == "Linux": # Check /etc/issue for the exact linux distro issue = open("/etc/issue").read() # General options options[ "METASPLOIT_PATH"] = "/opt/metasploit-framework/embedded/framework/" options["MSFVENOM_OPTIONS"] = "" options["MSFVENOM_PATH"] = "/usr/local/bin/" options["OPERATING_SYSTEM"] = "Linux" options[ "PYINSTALLER_PATH"] = "/var/lib/veil/PyInstaller-3.2.1/" # via /config/setup.sh options["GOLANG_PATH"] = "/var/lib/veil/go/" # via /config/setup.sh
def main(): # Re-exec if we don't have unbuffered i/o. This is essential to get server # to output its logs synchronous to its operation, such that log output does # not remain buffered in the python server. This is particularly important # when infrequently accessed server redirects output to 'rotatelogs'. if 'PYTHONUNBUFFERED' not in os.environ: os.environ['PYTHONUNBUFFERED'] = "1" os.execvp("python", ["python"] + sys.argv) opt = ArgumentParser(usage=__doc__) opt.add_argument("-q", "--quiet", action="store_true", dest="quiet", default=False, help="be quiet, don't print unnecessary output") opt.add_argument("-v", "--verify", action="store_true", dest="verify", default=False, help="verify daemon is running, restart if not") opt.add_argument("-s", "--status", action="store_true", dest="status", default=False, help="check if the server monitor daemon is running") opt.add_argument("-k", "--kill", action="store_true", dest="kill", default=False, help="kill any existing already running daemon") opt.add_argument("-r", "--restart", action="store_true", dest="restart", default=False, help="restart, kill any existing running daemon first") opt.add_argument("-d", "--dir", dest="statedir", metavar="DIR", default=os.getcwd(), help="server state directory (default: current working directory)") opt.add_argument("-l", "--log", dest="logfile", metavar="DEST", default=None, help="log to DEST, via pipe if DEST begins with '|', otherwise a file") opts, args = opt.parse_known_args() if len(args) != 1: print("%s: exactly one configuration file required" % sys.argv[0], file=sys.stderr) sys.exit(1) if not os.path.isfile(args[0]) or not os.access(args[0], os.R_OK): print("%s: %s: invalid configuration file" % (sys.argv[0], args[0]), file=sys.stderr) sys.exit(1) if not opts.statedir or \ not os.path.isdir(opts.statedir) or \ not os.access(opts.statedir, os.W_OK): print("%s: %s: invalid state directory" % (sys.argv[0], opts.statedir), file=sys.stderr) sys.exit(1) # Create server object. cfg = loadConfigurationFile(args[0]) app = cfg.main.application.lower() server = RESTDaemon(cfg, opts.statedir) # Now actually execute the task. if opts.status: # Show status of running daemon, including exit code matching the # daemon status: 0 = running, 1 = not running, 2 = not running but # there is a stale pid file. If silent don't print out anything # but still return the right exit code. running, pid = server.daemon_pid() if running: if not opts.quiet: print("%s is %sRUNNING%s, PID %d" \ % (app, COLOR_OK, COLOR_NORMAL, pid)) sys.exit(0) elif pid != None: if not opts.quiet: print("%s is %sNOT RUNNING%s, stale PID %d" \ % (app, COLOR_WARN, COLOR_NORMAL, pid)) sys.exit(2) else: if not opts.quiet: print("%s is %sNOT RUNNING%s" \ % (app, COLOR_WARN, COLOR_NORMAL)) sys.exit(1) elif opts.kill: # Stop any previously running daemon. If quiet squelch messages, # except removal of stale pid file cannot be silenced. server.kill_daemon(silent=opts.quiet) else: # We are handling a server start, in one of many possible ways: # normal start, restart (= kill any previous daemon), or verify # (= if daemon is running leave it alone, otherwise start). # Convert 'verify' to 'restart' if the server isn't running. if opts.verify: opts.restart = True if server.daemon_pid()[0]: sys.exit(0) # If restarting, kill any previous daemon, otherwise complain if # there is a daemon already running here. Starting overlapping # daemons is not supported because pid file would be overwritten # and we'd lose track of the previous daemon. if opts.restart: server.kill_daemon(silent=opts.quiet) else: running, pid = server.daemon_pid() if running: print("Refusing to start over an already running daemon, pid %d" % pid, file=sys.stderr) sys.exit(1) # If we are (re)starting and were given a log file option, convert # the logfile option to a list if it looks like a pipe request, i.e. # starts with "|", such as "|rotatelogs foo/bar-%Y%m%d.log". if opts.logfile: if opts.logfile.startswith("|"): server.logfile = re.split(r"\s+", opts.logfile[1:]) else: server.logfile = opts.logfile # Actually start the daemon now. server.start_daemon()
import os import sys program = 'python3' print('Process calling') arguments = ('called.py', ) os.execvp(program, (program, ) + arguments) print('goodby')
usage: arp.py [<target>] [options] optional arguments: -h, --help Show this help message and exit -l, --loglevel=X logging level [default: 10] -m, --changemac Change to random mac address (can take 30 secs) -r, --reset Reset arp poison (should happen on exit anyway) """ import os import sys # force run as root if not os.geteuid() == 0: os.execvp( "sudo", ["env", "PYTHONPATH=%s" % os.environ.get("PYTHONPATH", "")] + sys.argv) import logging as log log.getLogger("scapy.runtime").setLevel(log.ERROR) from scapy.all import send, ARP, sr1, IP, ICMP from mim.tools import su, setTitle, fwreset from mim.bash import arp, route import time from docopt import docopt FREQ = 5 def main():
def run(args): config, _ = load_configs(args.conffile) runid = config['build']['runid'] if use_docker(config): if not runid: print( "Docker was not enabled when the environment was setup. Cannot use it now!" ) return 1 docker_path = config['config']['dockerpath'] try: buildid = get_image_id(config, runid) except subprocess.CalledProcessError as e: print("Cannot verify docker image: %s\n" % e.output) return 1 if buildid != config['build']['buildid']: sys.stderr.write( "WARNING: buildid for docker image %s has changed\n" % runid) if config['config']['buildlocal'] == '1' and config['build'][ 'buildhash'] != get_build_hash(config): sys.stderr.write( "WARNING: The docker image source has changed and should be rebuilt.\n" "Try running: 'pyrex-rebuild'\n") # These are "hidden" keys in pyrex.ini that aren't publicized, and # are primarily used for testing. Use they at your own risk, they # may change uid = int(config['run'].get('uid', os.getuid())) gid = int(config['run'].get('gid', os.getgid())) username = config['run'].get('username') or pwd.getpwuid( uid).pw_name groupname = config['run'].get('groupname') or grp.getgrgid( gid).gr_name init_command = config['run'].get('initcommand', config['build']['initcommand']) command_prefix = config['run'].get('commandprefix', '').splitlines() docker_args = [ docker_path, 'run', '--rm', '-i', '--net=host', '-e', 'PYREX_USER=%s' % username, '-e', 'PYREX_UID=%d' % uid, '-e', 'PYREX_GROUP=%s' % groupname, '-e', 'PYREX_GID=%d' % gid, '-e', 'PYREX_HOME=%s' % os.environ['HOME'], '-e', 'PYREX_INIT_COMMAND=%s' % init_command, '-e', 'PYREX_OEROOT=%s' % config['build']['oeroot'], '-e', 'PYREX_CLEANUP_EXIT_WAIT', '-e', 'PYREX_CLEANUP_LOG_FILE', '-e', 'PYREX_CLEANUP_LOG_LEVEL', '-e', 'PYREX_COMMAND_PREFIX=%s' % ' '.join(command_prefix), '-e', 'TINI_VERBOSITY', '--workdir', os.getcwd(), ] # Run the docker image with a TTY if this script was run in a tty if os.isatty(1): docker_args.extend( ['-t', '-e', 'TERM=%s' % os.environ['TERM']]) # Configure binds for b in set(config['run']['bind'].split()): docker_args.extend( ['--mount', 'type=bind,src={b},dst={b}'.format(b=b)]) # Pass environment variables for e in config['run']['envvars'].split(): docker_args.extend(['-e', e]) # Special case: Make the user SSH authentication socket available in Docker if 'SSH_AUTH_SOCK' in os.environ: docker_args.extend([ '--mount', 'type=bind,src=%s,dst=/tmp/%s-ssh-agent-sock' % (os.environ['SSH_AUTH_SOCK'], username), '-e', 'SSH_AUTH_SOCK=/tmp/%s-ssh-agent-sock' % username, ]) # Pass along BB_ENV_EXTRAWHITE and anything it has whitelisted if 'BB_ENV_EXTRAWHITE' in os.environ: docker_args.extend(['-e', 'BB_ENV_EXTRAWHITE']) for e in os.environ['BB_ENV_EXTRAWHITE'].split(): docker_args.extend(['-e', e]) docker_args.extend(shlex.split(config['run'].get('args', ''))) docker_args.append('--') docker_args.append(runid) docker_args.extend(args.command) stop_coverage() os.execvp(docker_args[0], docker_args) print("Cannot exec docker!") sys.exit(1) else: startup_args = [ os.path.join(config['build']['pyrexroot'], 'docker', 'startup.sh') ] startup_args.extend(args.command) env = os.environ.copy() env['PYREX_INIT_COMMAND'] = config['build']['initcommand'] env['PYREX_OEROOT'] = config['build']['oeroot'] stop_coverage() os.execve(startup_args[0], startup_args, env) print("Cannot exec startup script") sys.exit(1)
def maybe_reexec( *, file: str = None, module: str = None, silence: bool = False, ) -> None: if ARGS_ENV_VAR not in os.environ: return try: import pydevd # noqa except ImportError: return if pydevd.SetupHolder.setup is not None: return if module is not None: if file is not None: raise ValueError tmpdir = tempfile.mkdtemp() bootstrap_path = os.path.join(tmpdir, 'bootstrap.py') with open(bootstrap_path, 'w') as f: f.write( textwrap.dedent(f""" import sys old_paths = set(sys.path) for new_path in {sys.path!r}: if new_path not in old_paths: sys.path.insert(0, new_path) import runpy runpy.run_module({module!r}, run_name='__main__') """)) file = bootstrap_path else: if file is None: raise ValueError args = [sys.executable] args.extend(json.loads(os.environ[ARGS_ENV_VAR])) args.extend(['--file', file]) args.extend(sys.argv[1:]) if silence: tmpdir = tempfile.mkdtemp() bootstrap_path = os.path.join(tmpdir, 'bootstrap.py') with open(bootstrap_path, 'w') as f: f.write( textwrap.dedent(f""" import sys old_paths = set(sys.path) for new_path in {sys.path!r}: if new_path not in old_paths: sys.path.insert(0, new_path) _stderr_write = sys.stderr.write def stderr_write(*args, **kwargs): code = sys._getframe(1).f_code if code is not None and code.co_filename and code.co_filename.endswith('/pydev_log.py'): return _stderr_write(*args, **kwargs) sys.stderr.write = stderr_write sys.argv = {args[1:]!r} import runpy runpy.run_path({args[1]!r}, run_name='__main__') """)) args = [args[0], bootstrap_path] os.execvp(sys.executable, args)
ismask = 0 #Set MASK to input DEM grass.run_command('r.mask', input = inmap) #updating raw soil fertility category numbers grass.mapcalc('${outmap}=if(isnull(${impacts}) && ${inmap} >= 100 - ${recovery}, 100, if(isnull(${impacts}), (${inmap} + ${recovery}), if(${inmap} >= ${impacts}, (${inmap} - ${impacts}), 0 )))', outmap = outmap, inmap = inmap, recovery = recovery, impacts =impacts ) grass.run_command('r.colors', quiet = 'True', map = outmap, rules = sf_color) #checking total area of updated cells totarea = grass.read_command('r.stats', flags = 'an', input = impacts, fs = ',', nsteps = '1').split(',') grass.message('\n\nTotal area of impacted zones = %s square meters\n\n' % totarea[1]) #creating optional output text file of stats if os.getenv('GIS_FLAG_s') == '1': f = file(txtout, 'wt') f.write('Stats for ' + outmap+ '\n\nTotal area of impacted zones = ' + totarea[1] + ' square meters\n\nSoil Fertility Value,Area (sq. m)\n') areadict = grass.parse_command('r.stats', flags = 'an', input = impacts) for key in areadict: f.write(key + '\n') f.close() grass.message('\nCleaning up...\n\n') grass.run_command('r.mask', flags = 'r') if ismask == 1: grass.run_command('g.rename', quiet = "True", rast = tempmask +',MASK') grass.message('DONE!\n\n') # here is where the code in "main" actually gets executed. This way of programming is neccessary for the way g.parser needs to run. if __name__ == "__main__": if ( len(sys.argv) <= 1 or sys.argv[1] != "@ARGS_PARSED@" ): os.execvp("g.parser", [sys.argv[0]] + sys.argv) else: main()
os.environ['PLACEBO_MODE'] = 'playback' os.environ['PLACEBO_DIR'] = os.path.join(root_dir, 'test', 'unit', 'placebo') cmd = ['pytest', '--verbose'] if args.lint: lint_args = [ '--flake8', '--pylint', '--pylint-jobs={}'.format(multiprocessing.cpu_count()), '--pylint-rcfile={}'.format(os.path.join(root_dir, '.pylintrc')), ] cmd.extend(lint_args) if args.coverage: coverage_args = [ '--cov=aws_gate', '--cov-report=term-missing', '--cov-report=xml:reports/coverage.xml', '--cov-report=html:reports/coverage/aws-gate', '--junitxml=reports/test.xml', ] cmd.extend(coverage_args) else: cmd.append('--no-cov') cmd.extend(rest) os.execvp(cmd[0], cmd)
def record_and_send(): if g.recordbutton.is_pressed: person_leds_off() # turns off all person LEDs g.recordled.on() g.fileCounter += 1 # Create a child process pid = os.fork() if pid: print("\nIn parent process") time.sleep(2) #pushing record button second time ends the loop while True: if g.recordbutton.is_pressed: g.recordled.off() break os.kill(pid, signal.SIGTERM) fileName = "audio-" + str(g.boxID) + "-" + str(g.fileCounter) #convert .wav to .mp3 song = AudioSegment.from_wav(fileName + ".wav") song.export(fileName + ".mp3", format="mp3") print("Signal sent, child interrupted.") print("record loop") while True: if g.playbutton.is_pressed: play_audio(fileName + ".mp3") if g.sendbutton.is_pressed: g.sendled.on() destIDs = [] #choosing who to send messages to if g.person1on and g.contacts[0] != None: destIDs.append(g.contacts[0]) if g.person2on and g.contacts[1] != None: destIDs.append(g.contacts[1]) if g.person3on and g.contacts[2] != None: destIDs.append(g.contacts[2]) if g.person3on and g.contacts[3] != None: destIDs.append(g.contacts[3]) for destID in destIDs: print("send to dest ", destID) #send audio once you have list of destinations send_audio(destID) print("done recording") g.sendled.off() person_leds_off() break else: print("In child process") print("Process ID:", os.getpid()) print("Recording audio") #recording message #args is the command you would run on command line to record message using our microphone fileName = "audio-" + str(g.boxID) + "-" + str(g.fileCounter) args = ("arecord", "-D", "plughw:2", "-c1", "-r", "48000", "-f", "S32_LE", "-t", "wav", "-V", "mono", "-v", fileName + ".wav") # change audio name os.execvp("arecord", args)
def run(self): log = self.node.logfilename() pager = os.environ.get('PAGER', common.platform_pager()) os.execvp(pager, (pager, log))
def launch(cmd, env=None, stdin=None, stdout=None, stderr=None, debug=False, cd=None): """!Starts the specified command (a list), with the specified environment (or None to copy this process's environment). @param stdin,stdout,stderr Specifies the stdin, stdout and stderr streams. The special value PIPE means "make a pipe," and sending stderr=ERR2OUT requests redirection of stderr to stdout. @param cd The optional "cd" argument specifies a directory to cd into, in the child process, before executing the command. Of course, you shouldn't care about any of this because you should be using the produtil.run package. @param cmd the command to run @param env the subprocess's environment, or None to use mine @param debug if True, send debug messages""" if cd is not None and not isinstance(cd,str): raise TypeError( "In produtil.pipeline.launch, cd must be a string or None") if cd=='': raise ValueError( "In produtil.pipeline.launch, cd must not be the empty string.") stdinP=None ; stdinC=None stdoutP=None ; stdoutC=None stderrP=None ; stderrC=None logger=logging.getLogger(cmd[0]) global pipes_to_close if debug: logger.debug("Start %s"%(repr(cmd),)) if stdin is PIPE: (stdinC,stdinP)=pipe(logger) if debug: logger.debug("Pipe for stdin: %d<==%d"%(stdinC,stdinP)) else: stdinC=stdin if stdout is PIPE: (stdoutP,stdoutC)=pipe(logger) if debug: logger.debug("Pipe for stdout: %d<==%d"%(stdoutP,stdoutC)) else: stdoutC=stdout if stderr is PIPE: (stderrP,stderrC)=pipe(logger) if debug: logger.debug("Pipe for stderr: %d<==%d"%(stderrP,stderrC)) elif stderr is not ERR2OUT: stderrC=stderr pid=os.fork() assert(pid>=0) if pid>0: # Parent process after successfull fork. if stdin is not None and stdin is not PIPE: if debug: logger.debug("Close stdin %d on parent."%stdin) pclose(stdin) if stdin is PIPE and stdinC is not None: if debug: logger.debug("Close stdinC %d on parent."%stdinC) pclose(stdinC) padd(stdinP) if stdout is not None and stdout is not PIPE: if debug: logger.debug("Close stdout %d on parent."%stdout) pclose(stdout) if stdout is PIPE and stdoutC is not None: if debug: logger.debug("Close stdoutC %d on parent."%stdoutC) pclose(stdoutC) padd(stdoutP) if stderr is not None and stderr is not PIPE and stderr is not ERR2OUT: if debug: logger.debug("Close stderr %d on parent."%stderr) pclose(stderr) if stderr is PIPE and stderrC is not None: if debug: logger.debug("Close stderrC %d on parent."%stderrC) pclose(stderrC) padd(stderrP) if debug: logger.debug("On parent, returning %s"%( repr((pid, stdinP,stdoutP,stderrP)))) return (pid, stdinP,stdoutP,stderrP) if isinstance(cd,str): os.chdir(cd) # We are in the child process pclose_all(i=stdin,o=stdout,e=stderr) if stdinP is not None: if debug: logger.debug("Close stdinP %d on child."%stdinP) pclose(stdinP) if stdinC is not None: if debug: logger.debug("Point stdin to stdinC %d on child and close original."%stdinC) os.dup2(stdinC,0) pclose(stdinC) if stdoutP is not None: if debug: logger.debug("Close stdoutP %d on child."%stdoutP) pclose(stdoutP) if stdoutC is not None: if debug: logger.debug("Point stdout to stdoutC %d on child and close original."%stdoutC) os.dup2(stdoutC,1) pclose(stdoutC) if stderr is ERR2OUT: if debug: logger.debug("Redirect stderr to stdout on child.") os.dup2(1,2) if stderrP is not None: if debug: logger.debug("Close stderrP %d on child."%stderrP) pclose(stderrP) if stderrC is not None: if debug: logger.debug("Point stderr to stderrC %d on child and close original."%stderrC) os.dup2(stderrC,2) pclose(stderrC) if debug: logger.debug("Reset signal handlers on child.") signal.signal(signal.SIGHUP,signal.SIG_DFL) signal.signal(signal.SIGTERM,signal.SIG_DFL) signal.signal(signal.SIGINT,signal.SIG_DFL) signal.signal(signal.SIGQUIT,signal.SIG_DFL) signal.signal(signal.SIGPIPE,signal.SIG_DFL) signal.signal(signal.SIGCHLD,signal.SIG_DFL) assert(cmd[0]) try: if debug: logger.debug("Run %s %s on child"%(cmd[0], " ".join(cmd[1:]))) if env is None: os.execvp(cmd[0],cmd) else: os.execvpe(cmd[0],cmd,env) except Exception as e: logger.error("%s: could not exec: %s"%(cmd[0],str(e)),exc_info=True) sys.exit(2)
def getRootPrivileges(): if os.geteuid() != 0: log("Root required, getting privileges") os.execvp('sudo', ['sudo', 'python3'] + sys.argv)
def main_function(test_time=0): """Takes one night worth of images with processing at sunrise. test_time -- the length for a test, 0 is no testing. (default 0) """ # Initialise variables. job_list = [] # Get program directory. prog_dir = os.path.dirname(os.path.abspath(__file__)) # Load new config file from master copy in /opt/dfn-software. config_file = os.path.join(prog_dir, r'dfnstation.cfg') config_dict = dfn.load_config(config_file) config_dict['internal']['config_file'] = config_file # Setup new data path for this night. data_path = dfn.make_data_path(config_dict['internal']['data_directory']) # TIP: Setup logger. # Set lowest log level for testing. if test_time != 0: log_level = logging.DEBUG else: log_level = logging.INFO # Set path, format and identity for logging. log_file = os.path.join(data_path, dfn.log_name() + 'interval.txt') tether_file = os.path.join(data_path, dfn.log_name() + 'tether.txt') formatter = logging.Formatter( '%(asctime)s, %(levelname)s, %(module)s, %(message)s') logger = logging.getLogger() # Remove any pre-existing handlers. if len(logger.handlers) != 0: for hdl in logger.handlers[:]: hdl.stream.close() logger.removeHandler(hdl) # Begin handler that records logs in /data0/latest. fh = logging.FileHandler(log_file) fh.setFormatter(formatter) logger.addHandler(fh) logger.setLevel(log_level) # Provide logger details and stream logging to stdout for testing. if test_time != 0: print('logger_handler_count, ' + str(len(logger.handlers))) print('logfile, ' + log_file) logger.addHandler(logging.StreamHandler()) logger.info('testing') # Report interval control version. logger.info('interval_control_version, ' + VERSION) # Initialise microcontroller. ser = leo.connect_to_leostick() leo.shutter_off() leo.wait_for_camera_ready() leo.camera_off() leo.video_off() # Set bulb mode in microcontroller based on config. if config_dict['camera']['exp_mode'] == 'BULB': leo.set_bulb_mode() logger.info('bulb_mode') else: leo.set_non_bulb_mode() logger.info('non_bulb_mode') # Find latest image mask for the dslr and make a local copy. maskfile = dfn.get_mask(config_dict['internal']['data_directory']) shutil.copy(maskfile, data_path) logger.debug('mask_copied, ' + data_path) logger.info('mask, ' + maskfile) # FIXME: Is this still needed if processing is a daemon? # Make the transfer file early to allow background event detection. transfer_status_file = os.path.join(data_path, r'transfer_status.txt') dfn.write_string_to_file('unprocessed\n', transfer_status_file, mode='wt') logger.debug('transfer_status_file_written, unprocessed') # Get new gps location, if available and report lock. (config_dict['station']['lon'], config_dict['station']['lat'], config_dict['station']['altitude'], config_dict['station']['gps_lock']) = leo.update_GPS_location( config_dict['station']['lon'], config_dict['station']['lat'], config_dict['station']['altitude']) logger.info('GPS_lonlat, ' + str(config_dict['station']['lon']) + ', ' + str(config_dict['station']['lat']) + ', ' + str(config_dict['station']['altitude']) + ', ' + str(config_dict['station']['gps_lock'])) # TIP: Calculate sunset and sunrise in localtime. sunrise, sunset, moonrise, moonset = sm.generate_sun_and_moon( config_dict['station']['lon'], config_dict['station']['lat']) sunset += datetime.timedelta( minutes=float(config_dict['internal']['sun_leeway'])) sunrise -= datetime.timedelta( minutes=float(config_dict['internal']['sun_leeway'])) sunset_after_twilight = sunset + datetime.timedelta(minutes=10) sunrise_before_twilight = sunrise - datetime.timedelta(minutes=10) # Set dummy values for testing. if test_time != 0: sunset = datetime.datetime.now() + datetime.timedelta(seconds=30) sunrise = sunset + datetime.timedelta(seconds=test_time) sunset_after_twilight = sunset sunrise_before_twilight = sunrise logger.info('sunset, ' + str(sunset.isoformat())) logger.info('sunset_after_twilight, ' + str(sunset_after_twilight.isoformat())) logger.info('sunrise_before_twilight, ' + str(sunrise_before_twilight.isoformat())) logger.info('sunrise, ' + str(sunrise.isoformat())) logger.info('now, ' + str(datetime.datetime.now())) logger.info('UTCnow, ' + str(datetime.datetime.utcnow())) logger.info('timezone, ' + str(time.timezone)) # Handle daylight savings. if time.daylight != 0: logger.info('altzone, ' + str(time.altzone)) # Handle missed sunset, force an immediate start. if sunset >= sunrise: logger.info('late_start-forcing_immediate') sunset = datetime.datetime.now() + datetime.timedelta(seconds=30) # Use cal.mktime not timegm as sunrise is local datetime object. sunrise_epoch = time.mktime(datetime.date.timetuple(sunrise)) sunset_epoch = time.mktime(datetime.date.timetuple(sunset)) # Convert back again for testing. sunrise_recalc_test = time.localtime(sunrise_epoch) logger.debug('recalc_sunrise, ' + str(sunrise_epoch) + ', ' + str(sunrise_recalc_test)) # TIP: Wait until sunset. while datetime.datetime.now() < sunset: logger.debug('waiting_for_sunset, ' + datetime.datetime.now().isoformat()) print('waiting_for_sunset, ' + datetime.datetime.now().isoformat()) time.sleep(30) # If no lock try again. if config_dict['station']['gps_lock'] == 'N' and test_time == 0: (config_dict['station']['lon'], config_dict['station']['lat'], config_dict['station']['altitude'], config_dict['station']['gps_lock']) = leo.update_GPS_location( config_dict['station']['lon'], config_dict['station']['lat'], config_dict['station']['altitude']) # If new lock handle coordinates and recalculate timing. if config_dict['station']['gps_lock'] != 'N': logger.info('GPS_lonlat, ' + str(config_dict['station']['lon']) + ', ' + str(config_dict['station']['lat']) + ', ' + str(config_dict['station']['altitude']) + ', ' + str(config_dict['station']['gps_lock'])) # Recalculate sunset and sunrise in localtime. sunrise, sunset, moonrise, moonset = sm.generate_sun_and_moon( config_dict['station']['lon'], config_dict['station']['lat']) sunset += datetime.timedelta( minutes=float(config_dict['internal']['sun_leeway'])) sunrise -= datetime.timedelta( minutes=float(config_dict['internal']['sun_leeway'])) sunset_after_twilight = sunset + datetime.timedelta(minutes=10) sunrise_before_twilight = sunrise - datetime.timedelta( minutes=10) # Set dummy values for testing. if test_time != 0: sunset = datetime.datetime.now() + datetime.timedelta( seconds=30) sunrise = sunset + datetime.timedelta(seconds=test_time) sunset_after_twilight = sunset sunrise_before_twilight = sunrise logger.info('sunset, ' + str(sunset.isoformat())) logger.info('sunset_after_twilight, ' + str(sunset_after_twilight.isoformat())) logger.info('sunrise_before_twilight, ' + str(sunrise_before_twilight.isoformat())) logger.info('sunrise, ' + str(sunrise.isoformat())) logger.info('now, ' + str(datetime.datetime.now())) logger.info('UTCnow, ' + str(datetime.datetime.utcnow())) logger.info('timezone, ' + str(time.timezone)) # Handle daylight savings. if time.daylight != 0: logger.info('altzone, ' + str(time.altzone)) # Handle missed sunset, force an immediate start. if sunset >= sunrise: logger.info('late_start-forcing_immediate') sunset = datetime.datetime.now() + datetime.timedelta( seconds=30) # Use cal.mktime not timegm as sunrise is local datetime object. sunrise_epoch = time.mktime(datetime.date.timetuple(sunrise)) sunset_epoch = time.mktime(datetime.date.timetuple(sunset)) # Convert back again for testing sunrise_recalc_test = time.localtime(sunrise_epoch) logger.debug('recalc_sunrise, ' + str(sunrise_epoch) + ', ' + str(sunrise_recalc_test)) # Report sunset time. logger.debug('sunset_now, ' + str(datetime.datetime.now())) # Get initial status, versions, temperature, etc. temperature = leo.get_temperature() logger.info('leostick_temperature, ' + str(temperature)) time.sleep(1) leo_version = leo.get_version() logger.info('leostick_version, ' + str(leo_version)) time.sleep(1) leo_sequence = leo.get_sequence() logger.info('leostick_sequence, ' + str(leo_sequence)) time.sleep(1) leo_debug = leo.get_debug_codes() logger.info('leostick_debug, ' + str(leo_debug)) time.sleep(1) logger.info('cloud_file, ' + config_dict['internal']['cloudy_img_file']) logger.info('HD_temperature, ' + str(dfn.disk_temperature())) logger.info('today_date, ' + dfn.today()) logger.info('data_path, ' + data_path) logger.info('test_time, ' + str(test_time)) mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss logger.info('memory, ' + "{:,}".format(mem_use)) for item in dfn.get_ntp_data(): logger.info('ntp, ' + str(item)) # Switch on hardware. # FIXME: Handle point grey video camera. time.sleep(10) if os.path.exists(r'/dev/video0'): config_dict['camera']['video_device_exists'] = 1 logger.info('video_device_found') if str(config_dict['camera']['video_enabled']) != '0': if test_time == 0: #start up video cloud daemon logger.info('spawning_video') cc.spawn_video_command(sunrise_epoch) leo.video_on() logger.info('video_on_ok') else: logger.info('video_not_enabled') else: config_dict['camera']['video_device_exists'] = 0 logger.info('video_not_exist') # Switch on camera and initialise settings. leo.camera_on() logger.info('camera_on_ok') os.chdir(data_path) cam.camera_download_images() dfn.rename_RAW_all(data_path, config_dict) cam.camera_set_time() time.sleep(1) logger.info('camera_time_get, ' + cam.camera_get_time()) cam.camera_set_program() # manual exp mode cam.camera_set_autoiso(1) # autoiso off cam.camera_set_highisonr() cam.camera_set_longexpnr() cam.camera_set_vignette() cam.camera_set_fstop(config_dict['camera']['camera_fstop']) # Switch on condensation fan. leo.cond_on() # FIXME: This can be removed for the DFNEXT. if 'firmware_control' in config_dict: if (config_dict['firmware_control']['heater_enabled'] == '1' or config_dict['firmware_control']['heater_enabled'] == 1 or config_dict['firmware_control']['heater_enabled'] == True): leo.heater_on( config_dict['firmware_control']['heater_temperature_C']) config_dict['camera']['shutterspeed'] = cam.get_camera_shutterspeed() # Save a local copy of the config file. if dfn.save_config_file(os.path.join(data_path, 'dfnstation.cfg'), config_dict): logger.info('new_conf_file_written') else: logger.warning('new_conf_file_write_error') logger.info('location, ' + config_dict['station']['location']) # Set interval time from config unless testing. interval_time = str(int(config_dict['clouds']['time_checking_clear'])) interval_time.rstrip( 's') #XXX: Is this required? Already typecast to int... # Set fixed interval length, tests will run for # config time + n * interval_time until passed test_time. if test_time != 0: interval_time = '180' # Set directory. current_dir = data_path os.chdir(current_dir) # Start with incorrect value to force immediate mode change cloud_status_internal = -3 # TIP: Evening Twilight _______________________________________________________________________ if test_time == 0: logger.info('twilight_evening_settings') cam.camera_set_quality(JPG_MODE) # TODO: Remove config check and ensure defaults in config. if 'twilight_exposuretime' in config_dict['camera']: cam.camera_set_shutter( config_dict['camera']['twilight_exposuretime']) else: cam.camera_set_shutter( config_dict['camera']['camera_exposuretime']) if 'twilight_iso' in config_dict['camera']: cam.camera_set_iso(config_dict['camera']['twilight_iso']) else: cam.camera_set_iso(config_dict['camera']['camera_iso']) logger.info('twilight_evening_starting') # FIXME: Evening Tether! This needs to be tested, needs to call in the background. with open(os.devnull, 'w') as shutup: try: # Calculate number of seconds in twilight for tethering. evening_twilight_seconds = int( (sunset_after_twilight - datetime.datetime.now()).total_seconds()) subprocess.Popen([ 'gphoto2', '--capture-tethered', str(evening_twilight_seconds) + 's', '--force-overwrite' ], stderr=shutup, close_fds=True) logger.info('evening_twilight_tether_starting ' + str(evening_twilight_seconds) + 's') except subprocess.CalledProcessError as e: logger.warning('argh-problem_starting_tether, ' + str(e)) print('evening_twilight_tether_err, ' + str(datetime.datetime.now()) + ', ' + str(e)) res = subprocess.call(['gphoto2', '--reset'], stderr=shutup) logger.info('gphoto_reset, ' + str(res)) while datetime.datetime.now() < sunset_after_twilight: imgfile = high_acq(current_dir, interval_time, config_dict) handle_new_image(imgfile, job_list, current_dir, config_dict) else: logger.debug('evening_twilight_not_called') # TIP: Night __________________________________________________________________________________ logger.info('night_settings') cam.camera_set_quality(RAW_MODE) cam.camera_set_shutter(config_dict['camera']['camera_exposuretime']) cam.camera_set_iso(config_dict['camera']['camera_iso']) logger.info('night_starting') # FIXME: Night Tether! This needs to be tested, needs to call in the background. with open(tether_file, 'w') as shutup: try: # Calculate number of seconds in night for tethering. night_seconds = int((sunrise_before_twilight - datetime.datetime.now()).total_seconds()) tether = subprocess.Popen( ['gphoto2', '--capture-tethered', '--force-overwrite'], stderr=shutup, close_fds=True) logger.info('night_tether_starting, ' + str(night_seconds) + 's') except subprocess.CalledProcessError as e: logger.warning('argh-problem_starting_tether, ' + str(e)) print('night_tether_err, ' + str(datetime.datetime.now()) + ', ' + str(e)) res = subprocess.call(['gphoto2', '--reset'], stderr=shutup) logger.info('gphoto_reset, ' + str(res)) while datetime.datetime.now() < sunrise_before_twilight: # Collect and report current cloud status. cloud_status = cc.read_cloud_status( config_dict['internal']['cloud_status_file']) print(datetime.datetime.now(), 'sunrise, ' + str(sunrise) + ', ' + str(cloud_status)) logger.debug('cloud_status, ' + str(cloud_status) + ', ' + str(cloud_status_internal)) # Force status CLEAR for testing. if test_time != 0: logger.debug('testing_force_clear') cloud_status = CLEAR # Select acquisition based on cloud status. if cloud_status == CLEAR: if cloud_status_internal != CLEAR: logger.info('Gone_clear, ' + str(cloud_status)) imgfile = high_acq(current_dir, interval_time, config_dict) elif cloud_status == CLEARING: if cloud_status_internal != CLEARING: logger.info('Gone_clearing, ' + str(cloud_status)) imgfile = low_acq(current_dir, cloud_status, config_dict) elif cloud_status == CLOUDY: logger.info('Gone_cloudy, ' + str(cloud_status)) imgfile = low_acq(current_dir, cloud_status, config_dict) else: if cloud_status_internal in (CLEAR, CLEARING, CLOUDY): logger.info('Gone_undefined_cloud_status, ' + str(cloud_status)) else: logger.info('Still_undefined_cloud_status, ' + str(cloud_status)) # TODO: Incorporate uncertainty measures for undefined status. imgfile = high_acq(current_dir, interval_time, config_dict) cloud_status_internal = cloud_status time.sleep(5) print(str(tether.poll())) if tether.poll() is None: logger.debug('Tether_not_killed, ' + str(tether.pid)) else: logger.debug('Tether_killed, ' + str(tether.pid)) tether.terminate() tether.kill() print(str(tether.poll())) if tether.poll() is None: logger.debug('Tether_not_killed, ' + str(tether.pid)) else: logger.debug('Tether_killed, ' + str(tether.pid)) # TIP: Morning Twilight _______________________________________________________________________ if test_time == 0: logger.info('twilight_morning_settings') cam.camera_set_quality(JPG_MODE) # TODO: Remove config check and ensure defaults in config. if 'twilight_exposuretime' in config_dict['camera']: cam.camera_set_shutter( config_dict['camera']['twilight_exposuretime']) else: cam.camera_set_shutter( config_dict['camera']['camera_exposuretime']) if 'twilight_iso' in config_dict['camera']: cam.camera_set_iso(config_dict['camera']['twilight_iso']) else: cam.camera_set_iso(config_dict['camera']['camera_iso']) logger.info('twilight_morning_start') # Morning Tether! Start gphoto tether in background until sunrise. with open(os.devnull, 'w') as shutup: try: # Calculate number of seconds in night for tethering. morning_twilight_seconds = int( (sunrise - datetime.datetime.now()).total_seconds()) subprocess.Popen([ 'gphoto2', '--capture-tethered', str(morning_twilight_seconds) + 's', '--force-overwrite' ], stderr=shutup, close_fds=True) logger.info('morning_twilight_tether_starting ' + str(morning_twilight_seconds) + 's') except subprocess.CalledProcessError as e: logger.warning('argh-problem_starting_tether, ' + str(e)) print('morning_twilight_tether_err, ' + str(datetime.datetime.now()) + ', ' + str(e)) res = subprocess.call(['gphoto2', '--reset'], stderr=shutup) logger.info('gphoto_reset, ' + str(res)) while datetime.datetime.now() < sunrise: imgfile = high_acq(current_dir, interval_time, config_dict) handle_new_image(imgfile, job_list, current_dir, config_dict) else: logger.debug('morning_twilight_not_called') # TIP: Sunrise logger.info('sunrise') # Shutdown hardware. leo.shutter_off() if str(config_dict['camera']['video_device_exists']) == '1': leo.video_off() logger.info('video_off_ok') # Wait for last shutter exposure to finish. leo.wait_for_camera_ready() # Wait for camera to record last image from buffer. time.sleep(10) for job in job_list: job.join() logger.info('finished_outstanding_tasks') os.chdir(data_path) # Clear any stray images from memory card. cam.camera_download_images() leo.camera_off() logger.info('camera_off_ok') leo.cond_off() ser.close() # Clean up images. renamed_images = dfn.rename_RAW_all(data_path, config_dict) logger.info('rename_RAW_all_ok, ' + str(len(renamed_images))) if str(config_dict['internal']['clearing_quality']) == '2': dfn.make_all_thumb(data_path) logger.info('make_all_thumb_ok') else: logger.debug('make_all_thumb_not_called') # Get a rough shutter count just listdir then sort by cdate. raw_list = [ os.path.join(data_path, a) for a in os.listdir(data_path) if (a.lower().endswith('.nef') or a.lower().endswith('.cr2')) ] # Check there are actually images. if len(raw_list) > 1: last_image = sorted( raw_list, key=lambda x: os.stat(os.path.join(data_path, x)).st_mtime)[-1] logger.info('shuttercount, ' + str(dfn.image_shuttercount(last_image))) # Report memory usage. mem_use = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss logger.info('memory, ' + "{:,}".format(mem_use)) logger.info('today_disk_usage, ' + "{:,}".format(dfn.disk_usage(data_path))) for item in dfn.get_ntp_data(): logger.info('ntp, ' + str(item)) time.sleep(60) # Close down logging and exit. if test_time == 0: if ((not 'enabled' in config_dict['event_detect']) or (config_dict['event_detect']['enabled'] != '0' and config_dict['event_detect']['enabled'] != 'N')): logger.info('exiting_interval_control_calling_processing') # Camera systems do a reboot at 1615 localtime. reboot_time = dfn.get_reboot_time() reboot_time_epoch = time.mktime( datetime.date.timetuple(reboot_time)) os.execvp(sys.executable, [ sys.executable, r'/opt/dfn-software/processing_wrapper.py', '/data0/latest', str(reboot_time_epoch), '012346' ]) else: logger.info('exiting_interval_control_processing_not_enabled') else: logger.info('finished_interval_control_night_end_test') sys.stdout.flush() sys.stderr.flush() logging.shutdown() return True
def handle(self, *args, **options): for arg in args: k, v = arg.split('=') if k == 'http': if self.http_port: self.http_port = v elif k == 'socket': self.http_port = None self.socket_addr = v # load the Django WSGI handler os.environ['UWSGI_MODULE'] = '%s.wsgi' % django_project # DJANGO settings if options['settings']: os.environ['DJANGO_SETTINGS_MODULE'] = options['settings'] else: os.environ[ 'DJANGO_SETTINGS_MODULE'] = '%s.settings' % django_project # set protocol as uwsgi os.environ['UWSGI_PROTOCOL'] = 'uwsgi' # bind the http server to the default port if self.http_port: os.environ['UWSGI_HTTP_SOCKET'] = ':%s' % self.http_port elif self.socket_addr: os.environ['UWSGI_UWSGI_SOCKET'] = self.socket_addr os.environ['UWSGI_CHMOD_SOCKET'] = '664' # set process names os.environ['UWSGI_AUTO_PROCNAME'] = 'true' os.environ[ 'UWSGI_PROCNAME_PREFIX_SPACED'] = '[uWSGI %s]' % django_project # remove sockets/pidfile at exit os.environ['UWSGI_VACUUM'] = 'true' # retrieve/set the PythonHome os.environ['UWSGI_VIRTUALENV'] = sys.prefix # add project to python path os.environ['UWSGI_PP'] = root os.environ['UWSGI_POST_BUFFERING'] = '1048576' os.environ['UWSGI_RELOAD_ON_RSS'] = '300' # increase buffer size a bit os.environ['UWSGI_BUFFER_SIZE'] = '65535' # some additions required by newrelic os.environ['UWSGI_ENABLE_THREADS'] = 'true' os.environ['UWSGI_LAZY_APPS'] = 'true' os.environ['UWSGI_SINGLE_INTERPRETER'] = 'true' os.environ['UWSGI_AUTOLOAD'] = 'true' # set 12 workers and cheaper to number of cpus os.environ['UWSGI_WORKERS'] = '12' os.environ['UWSGI_CHEAPER'] = str(multiprocessing.cpu_count()) # enable the master process os.environ['UWSGI_MASTER'] = 'true' os.environ['UWSGI_NO_ORPHANS'] = 'true' os.environ['UWSGI_MEMORY_REPORT'] = 'true' os.environ['UWSGI_DISABLE_LOGGING'] = 'true' # set harakiri os.environ['UWSGI_HARAKIRI'] = '60' os.environ['UWSGI_HARAKIRI_VERBOSE'] = 'true' # set uid and gid os.environ['UWSGI_UID'] = str(os.getuid()) os.environ['UWSGI_GID'] = str(os.getgid()) # TODO: Figure out cache os.environ[ 'UWSGI_CACHE2'] = 'name=%s,items=20000,keysize=128,blocksize=4096' % django_project if settings.DEBUG: if apps.is_installed('configurations'): os.environ.setdefault('DJANGO_CONFIGURATION', 'Development') import configurations configurations.setup() # map and serve static files os.environ['UWSGI_STATIC_MAP'] = '%s=%s' % (settings.STATIC_URL, settings.STATIC_ROOT) os.environ['UWSGI_PY_AUTORELOAD'] = '2' # run spooler for mail task if 'django_uwsgi' in settings.EMAIL_BACKEND: os.environ['UWSGI_SPOOLER'] = '/tmp' os.environ['UWSGI_SPOOLER_IMPORT'] = 'django_uwsgi.tasks' # exec the uwsgi binary if apps.ready: os.execvp('uwsgi', ('uwsgi', ))
def event_keepalive(event_function, log_function, call_every_loop=None, loop_interval=None, shutdown_function=None): last_config_timestamp = config_timestamp() # Send signal that we are ready to receive the next event, but # not after a config-reload-restart (see below) if os.getenv("CMK_EVENT_RESTART") != "1": log_function("Starting in keepalive mode with PID %d" % os.getpid()) sys.stdout.write("*") sys.stdout.flush() else: log_function("We are back after a restart.") while True: try: # Invalidate timeperiod caches cmk_base.core.cleanup_timeperiod_caches() # If the configuration has changed, we do a restart. But we do # this check just before the next event arrives. We must # *not* read data from stdin, just peek! There is still one # problem: when restarting we must *not* send the initial '*' # byte, because that must be not no sooner then the events # has been sent. We do this by setting the environment variable # CMK_EVENT_RESTART=1 if event_data_available(loop_interval): if last_config_timestamp != config_timestamp(): log_function( "Configuration has changed. Restarting myself.") if shutdown_function: shutdown_function() os.putenv("CMK_EVENT_RESTART", "1") # Close all unexpected file descriptors before invoking # execvp() to prevent inheritance of them. In CMK-1085 we # had an issue related to os.urandom() which kept FDs open. # This specific issue of Python 2.7.9 should've been fixed # since Python 2.7.10. Just to be sure we keep cleaning up. cmk.utils.daemon.closefrom(3) os.execvp("cmk", sys.argv) data = "" while not data.endswith("\n\n"): try: new_data = "" new_data = os.read(0, 32768) except IOError as e: new_data = "" except Exception as e: if cmk.utils.debug.enabled(): raise log_function("Cannot read data from CMC: %s" % e) if not new_data: log_function( "CMC has closed the connection. Shutting down.") if shutdown_function: shutdown_function() sys.exit(0) # closed stdin, this is data += new_data try: context = raw_context_from_string(data.rstrip('\n')) event_function(context) except Exception as e: if cmk.utils.debug.enabled(): raise log_function("ERROR %s\n%s" % (e, traceback.format_exc())) # Signal that we are ready for the next event sys.stdout.write("*") sys.stdout.flush() # Fix vor Python 2.4: except SystemExit as e: sys.exit(e) except Exception as e: if cmk.utils.debug.enabled(): raise log_function("ERROR %s\n%s" % (e, traceback.format_exc())) if call_every_loop: try: call_every_loop() except Exception as e: if cmk.utils.debug.enabled(): raise log_function("ERROR %s\n%s" % (e, traceback.format_exc()))
def popen(cmd, args, mode, capture_err=1): if sys.platform == "win32": command = win32popen.CommandLine(cmd, args) if mode.find('r') >= 0: hStdIn = None if debug.SHOW_CHILD_PROCESSES: dbgIn, dbgOut = None, StringIO.StringIO() handle, hStdOut = win32popen.MakeSpyPipe(0, 1, (dbgOut, )) if capture_err: hStdErr = hStdOut dbgErr = dbgOut else: dbgErr = StringIO.StringIO() x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr, )) else: handle, hStdOut = win32popen.CreatePipe(0, 1) if capture_err: hStdErr = hStdOut else: hStdErr = win32popen.NullFile(1) else: if debug.SHOW_CHILD_PROCESSES: dbgIn, dbgOut, dbgErr = StringIO.StringIO(), StringIO.StringIO( ), StringIO.StringIO() hStdIn, handle = win32popen.MakeSpyPipe(1, 0, (dbgIn, )) x, hStdOut = win32popen.MakeSpyPipe(None, 1, (dbgOut, )) x, hStdErr = win32popen.MakeSpyPipe(None, 1, (dbgErr, )) else: hStdIn, handle = win32popen.CreatePipe(0, 1) hStdOut = None hStdErr = None phandle, pid, thandle, tid = win32popen.CreateProcess( command, hStdIn, hStdOut, hStdErr) if debug.SHOW_CHILD_PROCESSES: debug.Process(command, dbgIn, dbgOut, dbgErr) return _pipe(win32popen.File2FileObject(handle, mode), phandle) # flush the stdio buffers since we are about to change the FD under them sys.stdout.flush() sys.stderr.flush() r, w = os.pipe() pid = os.fork() if pid: # in the parent # close the descriptor that we don't need and return the other one. if mode.find('r') >= 0: os.close(w) return _pipe(os.fdopen(r, mode), pid) os.close(r) return _pipe(os.fdopen(w, mode), pid) # in the child # we'll need /dev/null for the discarded I/O null = os.open('/dev/null', os.O_RDWR) if mode.find('r') >= 0: # hook stdout/stderr to the "write" channel os.dup2(w, 1) # "close" stdin; the child shouldn't use it ### this isn't quite right... we may want the child to read from stdin os.dup2(null, 0) # what to do with errors? if capture_err: os.dup2(w, 2) else: os.dup2(null, 2) else: # hook stdin to the "read" channel os.dup2(r, 0) # "close" stdout/stderr; the child shouldn't use them ### this isn't quite right... we may want the child to write to these os.dup2(null, 1) os.dup2(null, 2) # don't need these FDs any more os.close(null) os.close(r) os.close(w) # the stdin/stdout/stderr are all set up. exec the target try: os.execvp(cmd, (cmd, ) + tuple(args)) except: # aid debugging, if the os.execvp above fails for some reason: print "<h2>exec failed:</h2><pre>", cmd, ' '.join(args), "</pre>" raise # crap. shouldn't be here. sys.exit(127)
def launcher(arg): os.execvp("python", arg)
def test_a110_one(self): pid, fd = os.forkpty() #cmd = [sys.executable] cmd = ['coverage', 'run'] cmd += [ inspect.getsourcefile(run), 'one', '-i', inspect.getsourcefile(data_sample_handler) ] if pid == 0: # child os.execvp(cmd[0], cmd) else: # parent def wait_text(timeout=1): import select text = [] while True: rl, wl, xl = select.select([fd], [], [], timeout) if not rl: break try: t = os.read(fd, 1024) except OSError: break if not t: break t = utils.text(t) text.append(t) print(t, end='') return ''.join(text) text = wait_text(3) self.assertIn('new task data_sample_handler:on_start', text) self.assertIn('pyspider shell', text) os.write(fd, utils.utf8('run()\n')) text = wait_text() self.assertIn('task done data_sample_handler:on_start', text) os.write( fd, utils.utf8('crawl("%s/pyspider/test.html")\n' % self.httpbin)) text = wait_text() self.assertIn('/robots.txt', text) os.write(fd, utils.utf8('crawl("%s/links/10/0")\n' % self.httpbin)) text = wait_text() if '"title": "Links"' not in text: os.write(fd, utils.utf8('crawl("%s/links/10/1")\n' % self.httpbin)) text = wait_text() self.assertIn('"title": "Links"', text) os.write(fd, utils.utf8('crawl("%s/404")\n' % self.httpbin)) text = wait_text() self.assertIn('task retry', text) os.write(fd, b'quit_pyspider()\n') text = wait_text() self.assertIn('scheduler exiting...', text) os.close(fd) os.kill(pid, signal.SIGINT)
sys.exit("no arguments") print 'starting job... %s' % started print ' '.join(args) print sys.stdout.flush() childpid = os.fork() if not childpid: # Execute command sys.stdin.close() fd = os.open(jobdir.file("output"), os.O_WRONLY | os.O_CREAT | os.O_TRUNC) os.dup2(fd, sys.stdout.fileno()) os.dup2(fd, sys.stderr.fileno()) os.execvp(args[0], args) def handler(signum, frame): if childpid != 0: os.kill(childpid, signum) signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGQUIT, handler) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGCONT, handler) signal.signal(signal.SIGUSR1, handler) signal.signal(signal.SIGUSR2, handler) done = 0 while not done:
if nvim.eval('buflisted(%d)' % buf.number)) # For now, treat all arguments that don't start with - or + as filenames. This # is good enough to recognize '-f' and `+11`, which is all this script really # needs right now. filenames = [ re.sub(' ', '\ ', os.path.abspath(arg)) for arg in sys.argv[1:] if not arg[0] in ['-', '+'] ] try: nvim_socket = os.environ["NVIM_LISTEN_ADDRESS"] except KeyError: # If we aren't running inside a `:terminal`, just exec nvim. os.execvp('nvim', sys.argv) sys.exit() nvim = neovim.attach('socket', path=nvim_socket) existing_buffers = get_listed_buffers(nvim) nvim.command('split') nvim.command('args %s' % ' '.join(filenames)) new_buffers = get_listed_buffers(nvim).difference(existing_buffers) for arg in sys.argv: if arg[0] == '+': nvim.command(arg[1:])
# This is how I ensure a Python script is launched as root, and automatically # call 'sudo' to re-launch it as root if not. # I found it useful to check the parameters are valid *before* re-launching as # root, so I don’t have to enter the sudo password if there is a problem with # the parameters, or I just want the help message. import os import sys # At this point we may be running as root or as another user # - Check the parameters are valid - show an error if not # - Show the help message if requested # Don't do any work or anything time-consuming here as it will run twice if os.geteuid() != 0: # os.execvp() replaces the running process, rather than launching a child # process, so there's no need to exit afterwards. The extra "sudo" in the # second parameter is required because Python doesn't automatically set $0 # in the new process. os.execvp("sudo", ["sudo"] + sys.argv) # Now we are definitely running as root # - Make the changes to the system settings (e.g. Apache config)
def main(options): # type: (argparse.Namespace) -> NoReturn # yarn and management commands expect to be run from the root of the # project. os.chdir(ZULIP_PATH) # hash the apt dependencies sha_sum = hashlib.sha1() for apt_depedency in SYSTEM_DEPENDENCIES: sha_sum.update(apt_depedency.encode('utf8')) if vendor in ["Ubuntu", "Debian"]: sha_sum.update(open('scripts/lib/setup-apt-repo', 'rb').read()) else: # hash the content of setup-yum-repo and build-* sha_sum.update(open('scripts/lib/setup-yum-repo', 'rb').read()) build_paths = glob.glob("scripts/lib/build-") for bp in build_paths: sha_sum.update(open(bp, 'rb').read()) new_apt_dependencies_hash = sha_sum.hexdigest() last_apt_dependencies_hash = None apt_hash_file_path = os.path.join(UUID_VAR_PATH, "apt_dependencies_hash") with open(apt_hash_file_path, 'a+') as hash_file: hash_file.seek(0) last_apt_dependencies_hash = hash_file.read() if (new_apt_dependencies_hash != last_apt_dependencies_hash): try: install_system_deps() except subprocess.CalledProcessError: # Might be a failure due to network connection issues. Retrying... print(WARNING + "Installing system dependencies failed; retrying..." + ENDC) install_system_deps() with open(apt_hash_file_path, 'w') as hash_file: hash_file.write(new_apt_dependencies_hash) else: print("No changes to apt dependencies, so skipping apt operations.") # Here we install node. proxy_env = [ "env", "http_proxy=" + os.environ.get("http_proxy", ""), "https_proxy=" + os.environ.get("https_proxy", ""), "no_proxy=" + os.environ.get("no_proxy", ""), ] run_as_root(proxy_env + ["scripts/lib/install-node"], sudo_args = ['-H']) # This is a wrapper around `yarn`, which we run last since # it can often fail due to network issues beyond our control. try: # Hack: We remove `node_modules` as root to work around an # issue with the symlinks being improperly owned by root. if os.path.islink("node_modules"): run_as_root(["rm", "-f", "node_modules"]) run_as_root(["mkdir", "-p", NODE_MODULES_CACHE_PATH]) run_as_root(["chown", "%s:%s" % (user_id, user_id), NODE_MODULES_CACHE_PATH]) setup_node_modules(prefer_offline=True) except subprocess.CalledProcessError: print(WARNING + "`yarn install` failed; retrying..." + ENDC) try: setup_node_modules() except subprocess.CalledProcessError: print(FAIL + "`yarn install` is failing; check your network connection (and proxy settings)." + ENDC) sys.exit(1) # Install shellcheck. run_as_root(["scripts/lib/install-shellcheck"]) setup_venvs.main() run_as_root(["cp", REPO_STOPWORDS_PATH, TSEARCH_STOPWORDS_PATH]) if is_circleci or (is_travis and not options.is_production_travis): run_as_root(["service", "rabbitmq-server", "restart"]) run_as_root(["service", "redis-server", "restart"]) run_as_root(["service", "memcached", "restart"]) run_as_root(["service", "postgresql", "restart"]) elif family == 'redhat': for service in ["postgresql-%s" % (POSTGRES_VERSION,), "rabbitmq-server", "memcached", "redis"]: run_as_root(["systemctl", "enable", service], sudo_args = ['-H']) run_as_root(["systemctl", "start", service], sudo_args = ['-H']) # If we imported modules after activating the virtualenv in this # Python process, they could end up mismatching with modules we’ve # already imported from outside the virtualenv. That seems like a # bad idea, and empirically it can cause Python to segfault on # certain cffi-related imports. Instead, start a new Python # process inside the virtualenv. activate_this = "/srv/zulip-py3-venv/bin/activate_this.py" provision_inner = os.path.join(ZULIP_PATH, "tools", "lib", "provision_inner.py") exec(open(activate_this).read(), dict(__file__=activate_this)) os.execvp( provision_inner, [ provision_inner, *(["--force"] if options.is_force else []), *(["--production-travis"] if options.is_production_travis else []), ] )
def launch(): if not configured: configure() binary = thisApp['binary'] os.execvp(binary, [binary] + thisApp['args'].split(' '))
from __future__ import absolute_import import os, glob, sys sys.path[:0] = [os.path.dirname(os.path.realpath(__file__)) + '/..'] from bup import compat, options, path optspec = """ bup help <command> """ o = options.Options(optspec) opt, flags, extra = o.parse(compat.argv[1:]) if len(extra) == 0: # the wrapper program provides the default usage string os.execvp(path.exe(), ['bup']) elif len(extra) == 1: docname = (extra[0] == 'bup' and 'bup' or ('bup-%s' % extra[0])) manpath = os.path.join(path.exedir(), '../../Documentation/' + docname + '.[1-9]') g = glob.glob(manpath) try: if g: os.execvp('man', ['man', '-l', g[0]]) else: os.execvp('man', ['man', docname]) except OSError as e: sys.stderr.write('Unable to run man command: %s\n' % e) sys.exit(1) else: o.fatal("exactly one command name expected")
def run(self, cmdline): assert hasattr(os, 'fork') cmdline = cmdline.split() if os.fork() == 0: os.execvp(pypath, [pyfile] + cmdline)
def app_child(): os.execvp("./main", [""])