def mygetstatusoutput(self, cmd): status, output = E.getstatusoutput(cmd) if os.WIFEXITED(status): return (os.WEXITSTATUS(status), output) else: return (status, output)
console_port, deployment_type, rhsm_user, rhsm_password, rhsm_pool, containerized, node_type, iam_role, infra_elb_name, create_key, create_vpc, route53_hosted_rootzone, existing_stack, playbook) if verbose > 0: command += " -" + "".join(['v']*verbose) click.echo('We are running: %s' % command) status = os.system(command) if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: return os.WEXITSTATUS(status) if __name__ == '__main__': # check for AWS access info if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None: print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.' sys.exit(1) launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
""" wait 处理僵尸 """ import os pid = os.fork() if pid < 0: print("error") elif pid == 0: print("chuld process", os.getpid()) os._exit(3) else: p, status = os.wait() #阻塞等待子进程退出 print("p:", p) #还原退出状态 print("status", os.WEXITSTATUS(status)) while True: pass
def launch_refarch_env(region=None, stack_name=None, ami=None, no_confirm=False, master_instance_type=None, node_instance_type=None, app_instance_type=None, bastion_instance_type=None, keypair=None, create_key=None, key_path=None, create_vpc=None, vpc_id=None, private_subnet_id1=None, private_subnet_id2=None, private_subnet_id3=None, public_subnet_id1=None, public_subnet_id2=None, public_subnet_id3=None, byo_bastion=None, bastion_sg=None, public_hosted_zone=None, app_dns_prefix=None, deployment_type=None, console_port=443, rhsm_user=None, rhsm_password=None, rhsm_pool=None, containerized=None, s3_bucket_name=None, s3_username=None, verbose=0): # Need to prompt for the R53 zone: if public_hosted_zone is None: public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment') if s3_bucket_name is None: s3_bucket_name = stack_name + '-ocp-registry-' + public_hosted_zone.split('.')[0] if s3_username is None: s3_username = stack_name + '-s3-openshift-user' # Create ssh key pair in AWS if none is specified if create_key in 'yes' and key_path in 'no': key_path = click.prompt('Specify path for ssh public key') keypair = click.prompt('Specify a name for the keypair') # If no keypair is not specified fail: if keypair is None and create_key in 'no': click.echo('A SSH keypair must be specified or created') sys.exit(1) # Name the keypair if a path is defined if keypair is None and create_key in 'yes': keypair = click.prompt('Specify a name for the keypair') # If no subnets are defined prompt: if create_vpc in 'no' and vpc_id is None: vpc_id = click.prompt('Specify the VPC ID') # If no subnets are defined prompt: if create_vpc in 'no' and private_subnet_id1 is None: private_subnet_id1 = click.prompt('Specify the first Private subnet within the existing VPC') private_subnet_id2 = click.prompt('Specify the second Private subnet within the existing VPC') private_subnet_id3 = click.prompt('Specify the third Private subnet within the existing VPC') public_subnet_id1 = click.prompt('Specify the first Public subnet within the existing VPC') public_subnet_id2 = click.prompt('Specify the second Public subnet within the existing VPC') public_subnet_id3 = click.prompt('Specify the third Public subnet within the existing VPC') # Prompt for Bastion SG if byo-bastion specified if byo_bastion in 'yes' and bastion_sg in '/dev/null': bastion_sg = click.prompt('Specify the the Bastion Security group(example: sg-4afdd24)') # If the user already provided values, don't bother asking again if deployment_type in ['openshift-enterprise'] and rhsm_user is None: rhsm_user = click.prompt("RHSM username?") if deployment_type in ['openshift-enterprise'] and rhsm_password is None: rhsm_password = click.prompt("RHSM password?", hide_input=True) if deployment_type in ['openshift-enterprise'] and rhsm_pool is None: rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name?") # Calculate various DNS values wildcard_zone="%s.%s" % (app_dns_prefix, public_hosted_zone) # Display information to the user about their choices click.echo('Configured values:') click.echo('\tstack_name: %s' % stack_name) click.echo('\tami: %s' % ami) click.echo('\tregion: %s' % region) click.echo('\tmaster_instance_type: %s' % master_instance_type) click.echo('\tnode_instance_type: %s' % node_instance_type) click.echo('\tapp_instance_type: %s' % app_instance_type) click.echo('\tbastion_instance_type: %s' % bastion_instance_type) click.echo('\tkeypair: %s' % keypair) click.echo('\tcreate_key: %s' % create_key) click.echo('\tkey_path: %s' % key_path) click.echo('\tcreate_vpc: %s' % create_vpc) click.echo('\tvpc_id: %s' % vpc_id) click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1) click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2) click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3) click.echo('\tpublic_subnet_id1: %s' % public_subnet_id1) click.echo('\tpublic_subnet_id2: %s' % public_subnet_id2) click.echo('\tpublic_subnet_id3: %s' % public_subnet_id3) click.echo('\tbyo_bastion: %s' % byo_bastion) click.echo('\tbastion_sg: %s' % bastion_sg) click.echo('\tconsole port: %s' % console_port) click.echo('\tdeployment_type: %s' % deployment_type) click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone) click.echo('\tapp_dns_prefix: %s' % app_dns_prefix) click.echo('\tapps_dns: %s' % wildcard_zone) click.echo('\trhsm_user: %s' % rhsm_user) click.echo('\trhsm_password: *******') click.echo('\trhsm_pool: %s' % rhsm_pool) click.echo('\tcontainerized: %s' % containerized) click.echo('\ts3_bucket_name: %s' % s3_bucket_name) click.echo('\ts3_username: %s' % s3_username) click.echo("") if not no_confirm: click.confirm('Continue using these values?', abort=True) playbooks = ['playbooks/infrastructure.yaml', 'playbooks/openshift-install.yaml'] for playbook in playbooks: # hide cache output unless in verbose mode devnull='> /dev/null' if verbose > 0: devnull='' # refresh the inventory cache to prevent stale hosts from # interferring with re-running command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull) os.system(command) # remove any cached facts to prevent stale data during a re-run command='rm -rf .ansible/cached_facts' os.system(command) command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \ stack_name=%s \ ami=%s \ keypair=%s \ create_key=%s \ key_path=%s \ create_vpc=%s \ vpc_id=%s \ private_subnet_id1=%s \ private_subnet_id2=%s \ private_subnet_id3=%s \ public_subnet_id1=%s \ public_subnet_id2=%s \ public_subnet_id3=%s \ byo_bastion=%s \ bastion_sg=%s \ master_instance_type=%s \ node_instance_type=%s \ app_instance_type=%s \ bastion_instance_type=%s \ public_hosted_zone=%s \ wildcard_zone=%s \ console_port=%s \ deployment_type=%s \ rhsm_user=%s \ rhsm_password=%s \ rhsm_pool=%s \ containerized=%s \ s3_bucket_name=%s \ s3_username=%s \' %s' % (region, stack_name, ami, keypair, create_key, key_path, create_vpc, vpc_id, private_subnet_id1, private_subnet_id2, private_subnet_id3, public_subnet_id1, public_subnet_id2, public_subnet_id3, byo_bastion, bastion_sg, master_instance_type, node_instance_type, app_instance_type, bastion_instance_type, public_hosted_zone, wildcard_zone, console_port, deployment_type, rhsm_user, rhsm_password, rhsm_pool, containerized, s3_bucket_name, s3_username, playbook) if verbose > 0: command += " -" + "".join(['v']*verbose) click.echo('We are running: %s' % command) status = os.system(command) if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: sys.exit(os.WEXITSTATUS(status))
def RunProgram(self, program, arguments, context, result): """Run the 'program'. 'program' -- The path to the program to run. 'arguments' -- A list of the arguments to the program. This list must contain a first argument corresponding to 'argv[0]'. 'context' -- A 'Context' giving run-time parameters to the test. 'result' -- A 'Result' object. The outcome will be 'Result.PASS' when this method is called. The 'result' may be modified by this method to indicate outcomes other than 'Result.PASS' or to add annotations.""" # Construct the environment. environment = self.MakeEnvironment(context) # Create the executable. if self.timeout >= 0: timeout = self.timeout else: # If no timeout was specified, we sill run this process in a # separate process group and kill the entire process group # when the child is done executing. That means that # orphaned child processes created by the test will be # cleaned up. timeout = -2 e = qm.executable.Filter(self.stdin, timeout) # Run it. # print arguments # print " path "+ program exit_status = e.Run(arguments, environment, path=program) # Get the output generated by the program regardless of how program finished stdout = e.stdout stderr = e.stderr # Record the results. #Stdout is too big we need to just discard it I guess or save it as stdout.out in output result["ExecTest.stdout"] = result.Quote(stdout) result["ExecTest.stderr"] = result.Quote(stderr) if not re.search(r'End run of OMNeT', e.stdout) and not re.search( r'Calling finish', e.stdout): return result.Fail("Simulation did not end properly") # If the process terminated normally, check the outputs. if sys.platform == "win32" or os.WIFEXITED(exit_status): # There are no causes of failure yet. causes = [] # The target program terminated normally. Extract the # exit code, if this test checks it. if self.exit_code is None: exit_code = None elif sys.platform == "win32": exit_code = exit_status else: exit_code = os.WEXITSTATUS(exit_status) ## result["ExecTest.exit_code"] = str(exit_code) ## # Check to see if the exit code matches. ## if exit_code != self.exit_code: ## causes.append("exit_code") ## result["ExecTest.expected_exit_code"] \ ## = str(self.exit_code) ## # Check to see if the standard output matches. ## if not self.__CompareText(stdout, self.stdout): ## causes.append("standard output") ## result["ExecTest.expected_stdout"] \ ## = result.Quote(self.stdout) ## # Check to see that the standard error matches. ## if not self.__CompareText(stderr, self.stderr): ## causes.append("standard error") ## result["ExecTest.expected_stderr"] \ ## = result.Quote(self.stderr) ## # If anything went wrong, the test failed. ## if causes: ## result.Fail("Unexpected %s." % string.join(causes, ", ")) elif os.WIFSIGNALED(exit_status): # The target program terminated with a signal. Construe # that as a test failure. signal_number = str(os.WTERMSIG(exit_status)) ## result.Fail("Program terminated by signal.") result["ExecTest.signal_number"] = signal_number # Get the output generated by the program. elif os.WIFSTOPPED(exit_status): # The target program was stopped. Construe that as a # test failure. signal_number = str(os.WSTOPSIG(exit_status)) result.Fail("Program stopped by signal.") result["ExecTest.signal_number"] = signal_number else: # The target program terminated abnormally in some other # manner. (This shouldn't normally happen...) result.Fail("Program did not terminate normally.")
def main(listener_fd, alive_r, preload, main_path=None, sys_path=None): '''Run forkserver.''' if preload: if '__main__' in preload and main_path is not None: process.current_process()._inheriting = True try: spawn.import_main_path(main_path) finally: del process.current_process()._inheriting for modname in preload: try: __import__(modname) except ImportError: pass util._close_stdin() sig_r, sig_w = os.pipe() os.set_blocking(sig_r, False) os.set_blocking(sig_w, False) def sigchld_handler(*_unused): # Dummy signal handler, doesn't do anything pass handlers = { # unblocking SIGCHLD allows the wakeup fd to notify our event loop signal.SIGCHLD: sigchld_handler, # protect the process from ^C signal.SIGINT: signal.SIG_IGN, } old_handlers = { sig: signal.signal(sig, val) for (sig, val) in handlers.items() } # calling os.write() in the Python signal handler is racy signal.set_wakeup_fd(sig_w) # map child pids to client fds pid_to_fd = {} with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \ selectors.DefaultSelector() as selector: _forkserver._forkserver_address = listener.getsockname() selector.register(listener, selectors.EVENT_READ) selector.register(alive_r, selectors.EVENT_READ) selector.register(sig_r, selectors.EVENT_READ) while True: try: while True: rfds = [key.fileobj for (key, events) in selector.select()] if rfds: break if alive_r in rfds: # EOF because no more client processes left assert os.read(alive_r, 1) == b'', "Not at EOF?" raise SystemExit if sig_r in rfds: # Got SIGCHLD os.read(sig_r, 65536) # exhaust while True: # Scan for child processes try: pid, sts = os.waitpid(-1, os.WNOHANG) except ChildProcessError: break if pid == 0: break child_w = pid_to_fd.pop(pid, None) if child_w is not None: if os.WIFSIGNALED(sts): returncode = -os.WTERMSIG(sts) else: if not os.WIFEXITED(sts): raise AssertionError( "Child {0:n} status is {1:n}".format( pid, sts)) returncode = os.WEXITSTATUS(sts) # Send exit code to client process try: write_signed(child_w, returncode) except BrokenPipeError: # client vanished pass os.close(child_w) else: # This shouldn't happen really warnings.warning('forkserver: waitpid returned ' 'unexpected pid %d' % pid) if listener in rfds: # Incoming fork request with listener.accept()[0] as s: # XXX Thing that changed - be tolerant of socket disconnects try: # Receive fds from client fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1) except EOFError: # broken socket due to reconnection on client-side continue # XXX Thing that changed - be tolerant of socket disconnects if len(fds) > MAXFDS_TO_SEND: raise RuntimeError( "Too many ({0:n}) fds to send".format( len(fds))) child_r, child_w, *fds = fds s.close() pid = os.fork() if pid == 0: # Child code = 1 try: listener.close() selector.close() unused_fds = [alive_r, child_w, sig_r, sig_w] unused_fds.extend(pid_to_fd.values()) code = _serve_one(child_r, fds, unused_fds, old_handlers) except Exception: sys.excepthook(*sys.exc_info()) sys.stderr.flush() finally: os._exit(code) else: # Send pid to client process try: write_signed(child_w, pid) except BrokenPipeError: # client vanished pass pid_to_fd[pid] = child_w os.close(child_r) for fd in fds: os.close(fd) except OSError as e: if e.errno != errno.ECONNABORTED: raise
def fork_processes(num_processes, max_restarts=100): """Starts multiple worker processes. If ``num_processes`` is None or <= 0, we detect the number of cores available on this machine and fork that number of child processes. If ``num_processes`` is given and > 0, we fork that specific number of sub-processes. Since we use processes and not threads, there is no shared memory between any server code. Note that multiple processes are not compatible with the autoreload module (or the ``autoreload=True`` option to `tornado.web.Application` which defaults to True when ``debug=True``). When using multiple processes, no IOLoops can be created or referenced until after the call to ``fork_processes``. In each child process, ``fork_processes`` returns its *task id*, a number between 0 and ``num_processes``. Processes that exit abnormally (due to a signal or non-zero exit status) are restarted with the same id (up to ``max_restarts`` times). In the parent process, ``fork_processes`` returns None if all child processes have exited normally, but will otherwise only exit by throwing an exception. """ global _task_id assert _task_id is None if num_processes is None or num_processes <= 0: num_processes = cpu_count() if ioloop.IOLoop.initialized(): raise RuntimeError( "Cannot run in multiple processes: IOLoop instance " "has already been initialized. You cannot call " "IOLoop.instance() before calling start_processes()") gen_log.info("Starting %d processes", num_processes) children = {} def start_child(i): pid = os.fork() if pid == 0: # child process _reseed_random() global _task_id _task_id = i return i else: children[pid] = i return None for i in range(num_processes): id = start_child(i) if id is not None: return id num_restarts = 0 while children: try: pid, status = os.wait() except OSError as e: if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: continue id = children.pop(pid) if os.WIFSIGNALED(status): gen_log.warning( "child %d (pid %d) killed by signal %d, restarting", id, pid, os.WTERMSIG(status)) elif os.WEXITSTATUS(status) != 0: gen_log.warning( "child %d (pid %d) exited with status %d, restarting", id, pid, os.WEXITSTATUS(status)) else: gen_log.info("child %d (pid %d) exited normally", id, pid) continue num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") new_id = start_child(id) if new_id is not None: return new_id # All child processes exited cleanly, so exit the master process # instead of just returning to right after the call to # fork_processes (which will probably just start up another IOLoop # unless the caller checks the return value). sys.exit(0)
def launch(cmd=None, operate=None, status=None, type=None, clusterName=None, verbose=0): pat = re.compile("cluster_name") with open ('config', 'rt') as configfile: for line in configfile: if(pat.search(line) != None): clusterName=str(line.split("=")[1]).replace("\"", "").replace("\n","") if verbose > 0: verbosity = '-' + 'v' * verbose else: verbosity = '' # Construct ansible command if cmd == 'init': status = os.system( 'ansible-playbook %s -i config prep/ansible/tasks/generate_config_files.yml --flush-cache; \ cd prep; \ ansible-playbook %s -i ansible/inventory ansible/tasks/cloud_init.yml --flush-cache' \ % (verbosity, verbosity) ) if cmd == 'prep': if operate == 'apply': status = os.system( 'cd prep; \ ansible-playbook %s -i ansible/inventory ansible/playbooks/prep.yml -e @ansible/defaults/main.yml --flush-cache; \ terraform init ; \ terraform get ; \ terraform apply -auto-approve' % (verbosity) ) if operate == 'dtr': status = os.system( 'cd prep; \ terraform destroy -auto-approve' ) if cmd == 'ocp': if operate == 'apply': status = os.system( 'cd ocp4; \ terraform init ; \ terraform get ; \ terraform apply -auto-approve' ) if operate == 'dtr': status = os.system( 'cd ocp4; \ terraform destroy -auto-approve' ) if cmd == 'oneshot': status = os.system( 'ansible-playbook %s -i config prep/ansible/tasks/generate_config_files.yml --flush-cache; \ cd prep; \ ansible-playbook %s -i ansible/inventory ansible/tasks/cloud_init.yml --flush-cache; \ ansible-playbook %s -i ansible/inventory ansible/playbooks/prep.yml -e @ansible/defaults/main.yml --flush-cache; \ terraform init ; terraform get ; terraform apply -auto-approve; \ cd ../ocp4; terraform init ; terraform get ; terraform apply -auto-approve; \ cd ../prep; sudo openshift-install --dir %s wait-for bootstrap-complete; \ echo "Waiting 5 mins"; sleep 300; \ oc --config %s/auth/kubeconfig patch configs.imageregistry.operator.openshift.io cluster --type merge --patch \'{"spec":{"storage":{"emptydir":{}}}}\'; \ ansible-playbook %s -i ansible/inventory ansible/tasks/lb_rm_bootstrap.yml ; \ sudo openshift-install --dir %s/ wait-for install-complete' % (verbosity, verbosity, verbosity, clusterName, clusterName, verbosity, clusterName) ) if cmd == 'post': status = os.system( 'cd ./prep; \ oc --config %s/auth/kubeconfig patch configs.imageregistry.operator.openshift.io cluster --type merge --patch \'{"spec":{"storage":{"emptydir":{}}}}\'; \ ansible-playbook %s -i ansible/inventory ansible/tasks/lb_rm_bootstrap.yml ; \ sudo openshift-install --dir %s/ wait-for install-complete' % (clusterName, verbosity, clusterName) ) if cmd == 'update': if type == 'inventory': status = os.system( 'ansible-playbook %s -i config prep/ansible/tasks/generate_config_files.yml --flush-cache' % (verbosity ) ) if type == 'ocp': status = os.system( 'ansible-playbook %s -i config prep/ansible/tasks/generate_config_files.yml --flush-cache; \ cd prep; \ ansible-playbook -i ansible/inventory ansible/tasks/ocp_vm_config.yml %s -e @ansible/defaults/main.yml' % (verbosity, verbosity ) ) if type == 'ocp_module': status = os.system( 'cd prep; \ ansible-playbook -i ansible/inventory ansible/tasks/ocp_module.yml %s -e @ansible/defaults/main.yml' % (verbosity, verbosity ) ) if cmd == 'clean': status = os.system( 'cd ocp4 ; terraform destroy -auto-approve ; \ cd ../prep ; terraform destroy -auto-approve ; \ ansible-playbook %s -i ../config ansible/tasks/clean.yml --flush-cache' % (verbosity) ) # Exit appropriately if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: sys.exit(os.WEXITSTATUS(status))
def main(): EPILOG = """This script is not intended for use by end-users. To configure Chrome Remote Desktop, please install the app from the Chrome Web Store: https://chrome.google.com/remotedesktop""" parser = optparse.OptionParser( usage="Usage: %prog [options] [ -- [ X server options ] ]", epilog=EPILOG) parser.add_option("-s", "--size", dest="size", action="append", help="Dimensions of virtual desktop. This can be specified " "multiple times to make multiple screen resolutions " "available (if the Xvfb server supports this).") parser.add_option("-f", "--foreground", dest="foreground", default=False, action="store_true", help="Don't run as a background daemon.") parser.add_option("", "--start", dest="start", default=False, action="store_true", help="Start the host.") parser.add_option("-k", "--stop", dest="stop", default=False, action="store_true", help="Stop the daemon currently running.") parser.add_option("", "--get-status", dest="get_status", default=False, action="store_true", help="Prints host status") parser.add_option("", "--check-running", dest="check_running", default=False, action="store_true", help="Return 0 if the daemon is running, or 1 otherwise.") parser.add_option("", "--config", dest="config", action="store", help="Use the specified configuration file.") parser.add_option("", "--reload", dest="reload", default=False, action="store_true", help="Signal currently running host to reload the config.") parser.add_option("", "--add-user", dest="add_user", default=False, action="store_true", help="Add current user to the chrome-remote-desktop group.") parser.add_option("", "--host-version", dest="host_version", default=False, action="store_true", help="Prints version of the host.") (options, args) = parser.parse_args() # Determine the filename of the host configuration and PID files. if not options.config: options.config = os.path.join(CONFIG_DIR, "host#%s.json" % g_host_hash) # Check for a modal command-line option (start, stop, etc.) if options.get_status: proc = get_daemon_proc() if proc is not None: print "STARTED" elif is_supported_platform(): print "STOPPED" else: print "NOT_IMPLEMENTED" return 0 # TODO(sergeyu): Remove --check-running once NPAPI plugin and NM host are # updated to always use get-status flag instead. if options.check_running: proc = get_daemon_proc() return 1 if proc is None else 0 if options.stop: proc = get_daemon_proc() if proc is None: print "The daemon is not currently running" else: print "Killing process %s" % proc.pid proc.terminate() try: proc.wait(timeout=30) except psutil.TimeoutExpired: print "Timed out trying to kill daemon process" return 1 return 0 if options.reload: proc = get_daemon_proc() if proc is None: return 1 proc.send_signal(signal.SIGHUP) return 0 if options.add_user: user = getpass.getuser() try: if user in grp.getgrnam(CHROME_REMOTING_GROUP_NAME).gr_mem: logging.info("User '%s' is already a member of '%s'." % (user, CHROME_REMOTING_GROUP_NAME)) return 0 except KeyError: logging.info("Group '%s' not found." % CHROME_REMOTING_GROUP_NAME) if os.getenv("DISPLAY"): sudo_command = "gksudo --description \"Chrome Remote Desktop\"" else: sudo_command = "sudo" command = ("sudo -k && exec %(sudo)s -- sh -c " "\"groupadd -f %(group)s && gpasswd --add %(user)s %(group)s\"" % { 'group': CHROME_REMOTING_GROUP_NAME, 'user': user, 'sudo': sudo_command }) os.execv("/bin/sh", ["/bin/sh", "-c", command]) return 1 if options.host_version: # TODO(sergeyu): Also check RPM package version once we add RPM package. return os.system(locate_executable(HOST_BINARY_NAME) + " --version") >> 8 if not options.start: # If no modal command-line options specified, print an error and exit. print >> sys.stderr, EPILOG return 1 # If a RANDR-supporting Xvfb is not available, limit the default size to # something more sensible. if get_randr_supporting_x_server(): default_sizes = DEFAULT_SIZES else: default_sizes = DEFAULT_SIZE_NO_RANDR # Collate the list of sizes that XRANDR should support. if not options.size: if os.environ.has_key(DEFAULT_SIZES_ENV_VAR): default_sizes = os.environ[DEFAULT_SIZES_ENV_VAR] options.size = default_sizes.split(",") sizes = [] for size in options.size: size_components = size.split("x") if len(size_components) != 2: parser.error("Incorrect size format '%s', should be WIDTHxHEIGHT" % size) try: width = int(size_components[0]) height = int(size_components[1]) # Enforce minimum desktop size, as a sanity-check. The limit of 100 will # detect typos of 2 instead of 3 digits. if width < 100 or height < 100: raise ValueError except ValueError: parser.error("Width and height should be 100 pixels or greater") sizes.append((width, height)) # Register an exit handler to clean up session process and the PID file. atexit.register(cleanup) # Load the initial host configuration. host_config = Config(options.config) try: host_config.load() except (IOError, ValueError) as e: print >> sys.stderr, "Failed to load config: " + str(e) return 1 # Register handler to re-load the configuration in response to signals. for s in [signal.SIGHUP, signal.SIGINT, signal.SIGTERM]: signal.signal(s, SignalHandler(host_config)) # Verify that the initial host configuration has the necessary fields. auth = Authentication() auth_config_valid = auth.copy_from(host_config) host = Host() host_config_valid = host.copy_from(host_config) if not host_config_valid or not auth_config_valid: logging.error("Failed to load host configuration.") return 1 # Determine whether a desktop is already active for the specified host # host configuration. proc = get_daemon_proc() if proc is not None: # Debian policy requires that services should "start" cleanly and return 0 # if they are already running. print "Service already running." return 0 # Detach a separate "daemon" process to run the session, unless specifically # requested to run in the foreground. if not options.foreground: daemonize() logging.info("Using host_id: " + host.host_id) desktop = Desktop(sizes) # Keep track of the number of consecutive failures of any child process to # run for longer than a set period of time. The script will exit after a # threshold is exceeded. # There is no point in tracking the X session process separately, since it is # launched at (roughly) the same time as the X server, and the termination of # one of these triggers the termination of the other. x_server_inhibitor = RelaunchInhibitor("X server") host_inhibitor = RelaunchInhibitor("host") all_inhibitors = [x_server_inhibitor, host_inhibitor] # Don't allow relaunching the script on the first loop iteration. allow_relaunch_self = False while True: # Set the backoff interval and exit if a process failed too many times. backoff_time = SHORT_BACKOFF_TIME for inhibitor in all_inhibitors: if inhibitor.failures >= MAX_LAUNCH_FAILURES: logging.error("Too many launch failures of '%s', exiting." % inhibitor.label) return 1 elif inhibitor.failures >= SHORT_BACKOFF_THRESHOLD: backoff_time = LONG_BACKOFF_TIME relaunch_times = [] # If the session process or X server stops running (e.g. because the user # logged out), kill the other. This will trigger the next conditional block # as soon as os.waitpid() reaps its exit-code. if desktop.session_proc is None and desktop.x_proc is not None: logging.info("Terminating X server") desktop.x_proc.terminate() elif desktop.x_proc is None and desktop.session_proc is not None: logging.info("Terminating X session") desktop.session_proc.terminate() elif desktop.x_proc is None and desktop.session_proc is None: # Both processes have terminated. if (allow_relaunch_self and x_server_inhibitor.failures == 0 and host_inhibitor.failures == 0): # Since the user's desktop is already gone at this point, there's no # state to lose and now is a good time to pick up any updates to this # script that might have been installed. logging.info("Relaunching self") relaunch_self() else: # If there is a non-zero |failures| count, restarting the whole script # would lose this information, so just launch the session as normal. if x_server_inhibitor.is_inhibited(): logging.info("Waiting before launching X server") relaunch_times.append(x_server_inhibitor.earliest_relaunch_time) else: logging.info("Launching X server and X session.") desktop.launch_session(args) x_server_inhibitor.record_started(MINIMUM_PROCESS_LIFETIME, backoff_time) allow_relaunch_self = True if desktop.host_proc is None: if host_inhibitor.is_inhibited(): logging.info("Waiting before launching host process") relaunch_times.append(host_inhibitor.earliest_relaunch_time) else: logging.info("Launching host process") desktop.launch_host(host_config) host_inhibitor.record_started(MINIMUM_PROCESS_LIFETIME, backoff_time) deadline = min(relaunch_times) if relaunch_times else 0 pid, status = waitpid_handle_exceptions(-1, deadline) if pid == 0: continue logging.info("wait() returned (%s,%s)" % (pid, status)) # When a process has terminated, and we've reaped its exit-code, any Popen # instance for that process is no longer valid. Reset any affected instance # to None. if desktop.x_proc is not None and pid == desktop.x_proc.pid: logging.info("X server process terminated") desktop.x_proc = None x_server_inhibitor.record_stopped() if desktop.session_proc is not None and pid == desktop.session_proc.pid: logging.info("Session process terminated") desktop.session_proc = None if desktop.host_proc is not None and pid == desktop.host_proc.pid: logging.info("Host process terminated") desktop.host_proc = None desktop.host_ready = False host_inhibitor.record_stopped() # These exit-codes must match the ones used by the host. # See remoting/host/host_error_codes.h. # Delete the host or auth configuration depending on the returned error # code, so the next time this script is run, a new configuration # will be created and registered. if os.WIFEXITED(status): if os.WEXITSTATUS(status) == 100: logging.info("Host configuration is invalid - exiting.") return 0 elif os.WEXITSTATUS(status) == 101: logging.info("Host ID has been deleted - exiting.") host_config.clear() host_config.save_and_log_errors() return 0 elif os.WEXITSTATUS(status) == 102: logging.info("OAuth credentials are invalid - exiting.") return 0 elif os.WEXITSTATUS(status) == 103: logging.info("Host domain is blocked by policy - exiting.") return 0 # Nothing to do for Mac-only status 104 (login screen unsupported) elif os.WEXITSTATUS(status) == 105: logging.info("Username is blocked by policy - exiting.") return 0 else: logging.info("Host exited with status %s." % os.WEXITSTATUS(status)) elif os.WIFSIGNALED(status): logging.info("Host terminated by signal %s." % os.WTERMSIG(status))
os.putenv('TERM', 'dumb') password=vals_dict['dbpassword'] dbname=vals_dict['dbname'] webdbname=vals_dict['webdbname'] dbuser=vals_dict['dbuser'] commands = [] commands.append('sudo -E apt-get -y update') commands.append('sudo -E apt-get -y -q install mysql-server') commands.append('sudo -E mysqladmin -u root password %s' % (password)) commands.append('sudo -E mysqladmin --password=%s create %s' % (password, dbname)) commands.append('sudo -E mysqladmin --password=%s create %s' % (password, webdbname)) commands.append("sudo -E mysql --password=%s -e \"GRANT INDEX, Select, Insert, Update, Create, Delete, Alter ON *.* TO '%s'@'%%' IDENTIFIED BY '%s';\"" % (password, dbuser, password)) commands.append("sudo -E sed -i 's/bind-address.*/bind-address = 0.0.0.0/' /etc/mysql/my.cnf") commands.append("sudo -E service mysql restart") for cmd in commands: print cmd rc = os.system(cmd) if rc != 0: if os.WIFEXITED(rc): rc = os.WEXITSTATUS(rc) print "ERROR! %d" % (rc) else: print "UNKNOWN EXIT! %d" % (rc) sys.exit(rc) print "SUCCESS" sys.exit(0)
def wait_pid(pid, timeout=None, proc_name=None): """Wait for process with pid 'pid' to terminate and return its exit status code as an integer. If pid is not a children of os.getpid() (current process) just waits until the process disappears and return None. If pid does not exist at all return None immediately. Raise TimeoutExpired on timeout expired. """ def check_timeout(delay): if timeout is not None: if timer() >= stop_at: raise TimeoutExpired(timeout, pid=pid, name=proc_name) time.sleep(delay) return min(delay * 2, 0.04) timer = getattr(time, 'monotonic', time.time) if timeout is not None: def waitcall(): return os.waitpid(pid, os.WNOHANG) stop_at = timer() + timeout else: def waitcall(): return os.waitpid(pid, 0) delay = 0.0001 while True: try: retpid, status = waitcall() except InterruptedError: delay = check_timeout(delay) except ChildProcessError: # This has two meanings: # - pid is not a child of os.getpid() in which case # we keep polling until it's gone # - pid never existed in the first place # In both cases we'll eventually return None as we # can't determine its exit status code. while True: if pid_exists(pid): delay = check_timeout(delay) else: return else: if retpid == 0: # WNOHANG was used, pid is still running delay = check_timeout(delay) continue # process exited due to a signal; return the integer of # that signal if os.WIFSIGNALED(status): return -os.WTERMSIG(status) # process exited using exit(2) system call; return the # integer exit(2) system call has been called with elif os.WIFEXITED(status): return os.WEXITSTATUS(status) else: # should never happen raise ValueError("unknown process exit status %r" % status)
def wait(procs, winfh, losefh, winners, losers): print "." sys.stdout.flush() try: (pid, stat) = os.wait() except OSError, err: print >> sys.stderr, "This shouldn't happen." print >> sys.stderr, err next if pid: [tup] = [tup for tup in procs if tup[0].pid == pid] (proc, logfh, distro, distro_version, arch, spec) = tup procs.remove(tup) name = "%s %s %s" % (distro, distro_version, arch) if os.WIFEXITED(stat): if os.WEXITSTATUS(stat) == 0: win(name, logfh, winfh) winners.append(name) else: lose(name, logfh, losefh) losers.append(name) if os.WIFSIGNALED(stat): lose(name, logfh, losefh) losers.append(name) def __main__(): # FIXME: getopt & --help. print " ".join(sys.argv) branches = sys.argv[-1] makedistopts = sys.argv[1:-1]
def _status(self): if os.WIFEXITED(self.status): return os.WEXITSTATUS(self.status) else: return -1
def _spawn_posix(cmd, search_path=1, verbose=0, dry_run=0): log.info(' '.join(cmd)) if dry_run: return # TODO: Begin Truffle change status = os.system(' '.join(cmd)) if status != 0: raise DistutilsExecError("command %r failed with exit status %d" % (cmd, status)) return # End of Truffle change executable = cmd[0] exec_fn = search_path and os.execvp or os.execv env = None if sys.platform == 'darwin': global _cfg_target, _cfg_target_split if _cfg_target is None: _cfg_target = sysconfig.get_config_var( 'MACOSX_DEPLOYMENT_TARGET') or '' if _cfg_target: _cfg_target_split = [int(x) for x in _cfg_target.split('.')] if _cfg_target: # ensure that the deployment target of build process is not less # than that used when the interpreter was built. This ensures # extension modules are built with correct compatibility values cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target) if _cfg_target_split > [int(x) for x in cur_target.split('.')]: my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: ' 'now "%s" but "%s" during configure' % (cur_target, _cfg_target)) raise DistutilsPlatformError(my_msg) env = dict(os.environ, MACOSX_DEPLOYMENT_TARGET=cur_target) exec_fn = search_path and os.execvpe or os.execve pid = os.fork() if pid == 0: # in the child try: if env is None: exec_fn(executable, cmd) else: exec_fn(executable, cmd, env) except OSError as e: if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r: %s\n" % (cmd, e.strerror)) os._exit(1) if not DEBUG: cmd = executable sys.stderr.write("unable to execute %r for unknown reasons" % cmd) os._exit(1) else: # in the parent # Loop until the child either exits or is terminated by a signal # (ie. keep waiting if it's merely stopped) while True: try: pid, status = os.waitpid(pid, 0) except OSError as exc: if not DEBUG: cmd = executable raise DistutilsExecError("command %r failed: %s" % (cmd, exc.args[-1])) if os.WIFSIGNALED(status): if not DEBUG: cmd = executable raise DistutilsExecError("command %r terminated by signal %d" % (cmd, os.WTERMSIG(status))) elif os.WIFEXITED(status): exit_status = os.WEXITSTATUS(status) if exit_status == 0: return # hey, it succeeded! else: if not DEBUG: cmd = executable raise DistutilsExecError( "command %r failed with exit status %d" % (cmd, exit_status)) elif os.WIFSTOPPED(status): continue else: if not DEBUG: cmd = executable raise DistutilsExecError( "unknown error executing %r: termination status %d" % (cmd, status))
def main(): # TODO: Implement -o switch. global args parser = argparse.ArgumentParser() group = parser.add_mutually_exclusive_group(required=True) group.add_argument( "-s", "--source", type=str, help= "local source file name. Solidity by default. Use -b to process evm instead. Use stdin to read from stdin." ) group.add_argument( "-ru", "--remoteURL", type=str, help= "Get contract from remote URL. Solidity by default. Use -b to process evm instead.", dest="remote_URL") parser.add_argument("--version", action="version", version="oyente version 0.2.5-Buona Vista") parser.add_argument( "-b", "--bytecode", help="read bytecode in source instead of solidity file.", action="store_true") parser.add_argument("-j", "--json", help="Redirect results to a json file.", action="store_true") parser.add_argument("-e", "--evm", help="Do not remove the .evm file.", action="store_true") parser.add_argument("-p", "--paths", help="Print path condition information.", action="store_true") parser.add_argument( "--error", help="Enable exceptions and print output. Monsters here.", action="store_true") parser.add_argument("-t", "--timeout", type=int, help="Timeout for Z3 in ms.") parser.add_argument("-v", "--verbose", help="Verbose output, print everything.", action="store_true") parser.add_argument("-r", "--report", help="Create .report file.", action="store_true") parser.add_argument("-gb", "--globalblockchain", help="Integrate with the global ethereum blockchain", action="store_true") parser.add_argument("-dl", "--depthlimit", help="Limit DFS depth", action="store", dest="depth_limit", type=int) parser.add_argument("-gl", "--gaslimit", help="Limit Gas", action="store", dest="gas_limit", type=int) parser.add_argument("-st", "--state", help="Get input state from state.json", action="store_true") parser.add_argument("-ll", "--looplimit", help="Limit number of loops", action="store", dest="loop_limit", type=int) parser.add_argument("-w", "--web", help="Run Oyente for web service", action="store_true") parser.add_argument("-glt", "--global-timeout", help="Timeout for symbolic execution", action="store", dest="global_timeout", type=int) parser.add_argument("-a", "--assertion", help="Check assertion failures.", action="store_true") parser.add_argument("--debug", help="Display debug information", action="store_true") args = parser.parse_args() if args.timeout: global_params.TIMEOUT = args.timeout if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) global_params.PRINT_PATHS = 1 if args.paths else 0 global_params.REPORT_MODE = 1 if args.report else 0 global_params.IGNORE_EXCEPTIONS = 1 if args.error else 0 global_params.USE_GLOBAL_BLOCKCHAIN = 1 if args.globalblockchain else 0 global_params.INPUT_STATE = 1 if args.state else 0 global_params.WEB = 1 if args.web else 0 global_params.STORE_RESULT = 1 if args.json else 0 global_params.CHECK_ASSERTIONS = 1 if args.assertion else 0 global_params.DEBUG_MODE = 1 if args.debug else 0 if args.depth_limit: global_params.DEPTH_LIMIT = args.depth_limit if args.gas_limit: global_params.GAS_LIMIT = args.gas_limit if args.loop_limit: global_params.LOOP_LIMIT = args.loop_limit if args.global_timeout and args.global_timeout < global_params.GLOBAL_TIMEOUT: global_params.GLOBAL_TIMEOUT = args.global_timeout if not has_dependencies_installed(): return if args.remote_URL: r = requests.get(args.remote_URL) code = r.text filename = "remote_contract.evm" if args.bytecode else "remote_contract.sol" args.source = filename with open(filename, 'w') as f: f.write(code) if args.bytecode: processed_evm_file = args.source + '.1' disasm_file = args.source + '.disasm' with open(args.source) as f: evm = f.read() with open(processed_evm_file, 'w') as f: f.write(removeSwarmHash(evm)) analyze(processed_evm_file, disasm_file) remove_temporary_file(disasm_file) remove_temporary_file(processed_evm_file) if global_params.UNIT_TEST == 2 or global_params.UNIT_TEST == 3: exit_code = os.WEXITSTATUS(cmd) if exit_code != 0: exit(exit_code) else: contracts = compileContracts(args.source) for cname, bin_str in contracts: logging.info("Contract %s:", cname) processed_evm_file = cname + '.evm' disasm_file = cname + '.evm.disasm' with open(processed_evm_file, 'w') as of: of.write(removeSwarmHash(bin_str)) analyze(processed_evm_file, disasm_file, SourceMap(cname, args.source)) if args.evm: with open(processed_evm_file, 'w') as of: of.write(bin_str) remove_temporary_file(processed_evm_file) remove_temporary_file(disasm_file) remove_temporary_file(disasm_file + '.log')
__all__ = ['cpu'] import sys, re, types import os import commands import warnings def getoutput(cmd, successful_status=(0, ), stacklevel=1): try: status, output = commands.getstatusoutput(cmd) except EnvironmentError, e: warnings.warn(str(e), UserWarning, stacklevel=stacklevel) return False, output if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: return True, output return False, output def command_info(successful_status=(0, ), stacklevel=1, **kw): info = {} for key in kw: ok, output = getoutput(kw[key], successful_status=successful_status, stacklevel=stacklevel + 1) if ok: info[key] = output.strip() return info
def runParser(fileName): # print('worker ID for filename: ' + fileName + ' is ', os.getpid()) status = os.WEXITSTATUS(os.system("python3 " + parserFolderPath + fileName)) # print("worker ID for "+ fileName +" exited with status ", status) return (fileName, status)
lockFilePid = self.getPidFromLockFile() if not lockFilePid or lockFilePid != pid: foundPid, status = os.waitpid(pid, os.WNOHANG) if foundPid: os._exit(1) else: time.sleep(.5) timeSlept += 1 else: os._exit(0) os._exit(1) else: time.sleep(2) pid, status = os.waitpid(pid, 0) if os.WIFEXITED(status): rc = os.WEXITSTATUS(status) return rc else: self.error('process killed with signal %s' % os.WTERMSIG(status)) return 1 else: self.daemonize() return 0 def daemonize(self): '''Call this to execute the daemon''' self.writePidToLockFile() try: try: self.doWork()
def updatedJobWorker(self): """ We use the parasol results to update the status of jobs, adding them to the list of updated jobs. Results have the following structure.. (thanks Mark D!) int status; /* Job status - wait() return format. 0 is good. */ char *host; /* Machine job ran on. */ char *jobId; /* Job queuing system job ID */ char *exe; /* Job executable file (no path) */ int usrTicks; /* 'User' CPU time in ticks. */ int sysTicks; /* 'System' CPU time in ticks. */ unsigned submitTime; /* Job submission time in seconds since 1/1/1970 */ unsigned startTime; /* Job start time in seconds since 1/1/1970 */ unsigned endTime; /* Job end time in seconds since 1/1/1970 */ char *user; /* User who ran job */ char *errFile; /* Location of stderr file on host */ Plus you finally have the command name. """ resultsFiles = set() resultsFileHandles = [] try: while self.running: # Look for any new results files that have been created, and open them newResultsFiles = set(os.listdir( self.parasolResultsDir)).difference(resultsFiles) for newFile in newResultsFiles: newFilePath = os.path.join(self.parasolResultsDir, newFile) resultsFileHandles.append(open(newFilePath, 'r')) resultsFiles.add(newFile) for fileHandle in resultsFileHandles: while self.running: line = fileHandle.readline() if not line: break assert line[-1] == '\n' (status, host, jobId, exe, usrTicks, sysTicks, submitTime, startTime, endTime, user, errFile, command) = line[:-1].split(None, 11) status = int(status) jobId = int(jobId) if os.WIFEXITED(status): status = os.WEXITSTATUS(status) else: status = -status self.cpuUsageQueue.put(jobId) startTime = int(startTime) endTime = int(endTime) if endTime == startTime: # Both, start and end time is an integer so to get sub-second # accuracy we use the ticks reported by Parasol as an approximation. # This isn't documented but what Parasol calls "ticks" is actually a # hundredth of a second. Parasol does the unit conversion early on # after a job finished. Search paraNode.c for ticksToHundreths. We # also cheat a little by always reporting at least one hundredth of a # second. usrTicks = int(usrTicks) sysTicks = int(sysTicks) wallTime = float(max(1, usrTicks + sysTicks)) * 0.01 else: wallTime = float(endTime - startTime) self.updatedJobsQueue.put((jobId, status, wallTime)) time.sleep(1) except: logger.warn("Error occurred while parsing parasol results files.") raise finally: for fileHandle in resultsFileHandles: fileHandle.close()
def exited(self) -> int: """returns the exit status, if child exited, else -1""" if os.WIFEXITED(self._status): return os.WEXITSTATUS(self._status) return -1
def get_output(self, body, headers=None, include_dirs=None, libraries=None, library_dirs=None, lang="c", use_tee=None): """Try to compile, link to an executable, and run a program built from 'body' and 'headers'. Returns the exit status code of the program and its output. """ # 2008-11-16, RemoveMe warnings.warn( "\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" "Usage of get_output is deprecated: please do not \n" "use it anymore, and avoid configuration checks \n" "involving running executable on the target machine.\n" "+++++++++++++++++++++++++++++++++++++++++++++++++\n", DeprecationWarning, stacklevel=2) self._check_compiler() exitcode, output = 255, '' try: grabber = GrabStdout() try: src, obj, exe = self._link(body, headers, include_dirs, libraries, library_dirs, lang) grabber.restore() except Exception: output = grabber.data grabber.restore() raise exe = os.path.join('.', exe) try: # specify cwd arg for consistency with # historic usage pattern of exec_command() # also, note that exe appears to be a string, # which exec_command() handled, but we now # use a list for check_output() -- this assumes # that exe is always a single command output = subprocess.check_output([exe], cwd='.') except subprocess.CalledProcessError as exc: exitstatus = exc.returncode output = '' except OSError: # preserve the EnvironmentError exit status # used historically in exec_command() exitstatus = 127 output = '' else: output = filepath_from_subprocess_output(output) if hasattr(os, 'WEXITSTATUS'): exitcode = os.WEXITSTATUS(exitstatus) if os.WIFSIGNALED(exitstatus): sig = os.WTERMSIG(exitstatus) log.error('subprocess exited with signal %d' % (sig, )) if sig == signal.SIGINT: # control-C raise KeyboardInterrupt else: exitcode = exitstatus log.info("success!") except (CompileError, LinkError): log.info("failure.") self._clean() return exitcode, output
def do_POST(self): # Get the parameters of the POST request params = query.Parameters(self) if DEBUG: print SEPARATOR print "* DEBUG: instsrv: Client POST request: POST parameters: %s" % (params) # Get parameter values for given keys user_id = params.get(query.Parameters.USER) action = params.get(query.Parameters.ACTION) description_file = params.get(query.Parameters.DESCRIPTION_FILE) progression_scenario = params.get(query.Parameters.PROGRESSION_SCENARIO) range_id = params.get(query.Parameters.RANGE_ID) if DEBUG: print SEPARATOR print "PARAMETERS:" print SEPARATOR print "USER: %s" % (user_id) print "ACTION: %s" % (action) print "DESCRIPTION FILE:\n%s" % (description_file) print "PROGRESSION_SCENARIO: %s" % (progression_scenario) print "RANGE_ID: %s" % (range_id) print SEPARATOR ## Handle user information # Get user information from YAML file # Note: Only reading data that is (potentially) modified externally => # no need for synchronization user_info = userinfo.UserInfo() if not user_info.parse_YAML_file(DATABASE_DIR + USERS_FILE): self.send_error(SERVER_ERROR, "User information issue") return if DEBUG: user_info.pretty_print() # Check that user id is valid user_obj = user_info.get_user(user_id) if not user_obj: self.send_error(REQUEST_ERROR, "Invalid user id") return ## Handle action information # Check that action is valid if action not in self.VALID_ACTIONS: self.send_error(REQUEST_ERROR, "Invalid action") return # If we reached this point, it means processing was successful # => act according to each action ############################################################################# # Instantiate the cyber range action if action == query.Parameters.INSTANTIATE_RANGE: # Check that description is not empty if not description_file: self.send_error(REQUEST_ERROR, "Invalid description file") return # Check that range id was provided if not range_id: self.send_error(REQUEST_ERROR, "Invalid range id") return # Save the description received as a file try: range_file_name = RANGE_DESCRIPTION_TEMPLATE.format(range_id) range_file = open(range_file_name, "w") range_file.write(description_file) range_file.close() print "* INFO: instsrv: Saved POSTed cyber range description to file '%s'." % (range_file_name) except IOError: print "* ERROR: instsrv: Could not write to file %s." % (range_file_name) print "* INFO: instsrv: Start cyber range instantiation." # Use CyRIS to really do cyber range instantiation if USE_CYRIS: try: command = "python -u " + CYRIS_PATH + "main/cyris.py " + range_file_name + " " + CYRIS_PATH + CYRIS_CONFIG_FILENAME return_value = os.system(command) exit_status = os.WEXITSTATUS(return_value) if exit_status != 0: self.handle_cyris_error(range_id) self.send_error(SERVER_ERROR, "CyRIS execution issue") return status_filename = CYRIS_PATH + CYRIS_RANGE_DIRECTORY + str(range_id) + "/" + CYRIS_STATUS_FILENAME with open(status_filename, 'r') as status_file: status_file_content = status_file.read() if DEBUG: print "* DEBUG: instsrv: Status file content=", status_file_content if Storyboard.SERVER_STATUS_SUCCESS in status_file_content: # Get notification text notification_filename_short = CYRIS_NOTIFICATION_TEMPLATE.format(range_id) notification_filename = "{0}{1}{2}/{3}".format(CYRIS_PATH, CYRIS_RANGE_DIRECTORY, range_id, notification_filename_short) if DEBUG: print "* DEBUG: instsrv: Notification file name=", notification_filename message = None with open(notification_filename, 'r') as notification_file: notification_file_content = notification_file.read() message = urllib.quote(notification_file_content) response_content = self.build_response(Storyboard.SERVER_STATUS_SUCCESS, message) # We try to prepare the terminal for Moodle, but # errors are only considered as warnings for the # moment, since this functionality is not publicly # released yet in cnt2lms try: if USE_CNT2LMS_SCRIPT_GENERATION: ssh_command = "ssh -tt -o 'ProxyCommand ssh [email protected] -W %h:%p' root@moodle" python_command = "python -u " + CNT2LMS_PATH + "get_cyris_result.py " + CYRIS_MASTER_HOST + " " + CYRIS_MASTER_ACCOUNT + " " + CYRIS_PATH + CYRIS_RANGE_DIRECTORY + " " + range_id + " 1" command = ssh_command + " \"" + python_command + "\"" print "* DEBUG: instsrv: get_cyris_result command: " + command return_value = os.system(command) exit_status = os.WEXITSTATUS(return_value) if exit_status == 0: #response_content = RESPONSE_SUCCESS pass else: #self.send_error(SERVER_ERROR, "LMS terminal preparation issue") #return print "* DEBUG: instsrv: LMS terminal preparation issue" except IOError: #self.send_error(SERVER_ERROR, "LMS terminal preparation I/O error) #return print "* DEBUG: instsrv: LMS terminal preparation I/O error" # CyPROM related functionality if progression_scenario: print "* INFO: instsrv: Run CyPROM using scenario '{}'".format(progression_scenario) # Build CyRIS details file name details_filename_short = CYRIS_DETAILS_TEMPLATE.format(range_id) details_filename = "{0}{1}{2}/{3}".format(CYRIS_PATH, CYRIS_RANGE_DIRECTORY, range_id, details_filename_short) # Build CyPROM command (note the background execution!) cyprom_command = "python -u {0}main/cyprom.py --scenario {1} --cyris {2} &".format(CYPROM_PATH, progression_scenario, details_filename) # Execute the command and handle the exit status return_value = os.system(cyprom_command) exit_status = os.WEXITSTATUS(return_value) if exit_status != 0: self.handle_cyris_error(range_id) self.send_error(SERVER_ERROR, "CyPROM execution issue") return else: # Even though CyRIS is now destroying automatically the cyber range # in case of error, as this may fail, we still try to clean up here self.handle_cyris_error(range_id) response_content = self.build_response(Storyboard.SERVER_STATUS_ERROR, Storyboard.INSTANTIATION_STATUS_FILE_NOT_FOUND) except IOError: self.handle_cyris_error(range_id) self.send_error(SERVER_ERROR, Storyboard.INSTANTIATION_CYRIS_IO_ERROR) return # Don't use CyRIS, just simulate the instantiation else: # Simulate time needed to instantiate the cyber range if SIMULATION_DURATION == -1: sleep_time = random.randint(SIMULATION_RAND_MIN, SIMULATION_RAND_MAX) else: sleep_time = SIMULATION_DURATION print Storyboard.SEPARATOR3 print "* INFO: instsrv: Simulate instantiation by sleeping %d s." % (sleep_time) print Storyboard.SEPARATOR3 time.sleep(sleep_time) # Simulate the success or failure of the instantiation if random.random() > 0.0: # Get sample notification text notification_filename = "{0}/{1}".format(DATABASE_DIR, CYRIS_NOTIFICATION_SIMULATED) if DEBUG: print "* DEBUG: instsrv: Simulated notification file name=", notification_filename message = None with open(notification_filename, 'r') as notification_file: notification_file_content = notification_file.read() message = urllib.quote(notification_file_content) response_content = self.build_response(Storyboard.SERVER_STATUS_SUCCESS, message) # CyPROM related functionality if progression_scenario: print "* INFO: instsrv: Simulated CyPROM execution using scenario '{}'.".format(progression_scenario) else: response_content = self.build_response(Storyboard.SERVER_STATUS_ERROR, Storyboard.INSTANTIATION_SIMULATED_ERROR) ############################################################################# # Destroy the cyber range action elif action == query.Parameters.DESTROY_RANGE: # Check that the range id is valid if not range_id: self.send_error(REQUEST_ERROR, "Invalid range id") return print "* INFO: instsrv: Start destruction of cyber range with id %s." % (range_id) # Use CyRIS to really do cyber range destruction if USE_CYRIS: destruction_filename = CYRIS_PATH + CYRIS_DESTRUCTION_SCRIPT destruction_command = "{0} {1} {2}".format(destruction_filename, range_id, CYRIS_PATH + CYRIS_CONFIG_FILENAME) print "* DEBUG: instrv: destruction_command: " + destruction_command return_value = os.system(destruction_command) exit_status = os.WEXITSTATUS(return_value) if exit_status == 0: response_content = self.build_response(Storyboard.SERVER_STATUS_SUCCESS) else: response_content = self.build_response(Storyboard.SERVER_STATUS_ERROR, "CyRIS destruction issue") # Don't use CyRIS, just simulate the destruction else: # Simulate time needed to destroy the cyber range if SIMULATION_DURATION == -1: sleep_time = random.randint(SIMULATION_RAND_MIN, SIMULATION_RAND_MAX) else: sleep_time = SIMULATION_DURATION print Storyboard.SEPARATOR3 print "* INFO: instsrv: Simulate destruction by sleeping %d s." % (sleep_time) print Storyboard.SEPARATOR3 time.sleep(sleep_time) # Simulate the success or failure of the destruction if random.random() > 0.0: response_content = self.build_response(Storyboard.SERVER_STATUS_SUCCESS) else: response_content = self.build_response(Storyboard.SERVER_STATUS_ERROR, Storyboard.DESTRUCTION_SIMULATED_ERROR) # Catch potential unimplemented actions (if any) else: print "* WARNING: instsrv: Unknown action: %s." % (action) # Send response header to requester (triggers log_message()) self.send_response(HTTP_OK_CODE) self.send_header("Content-type", "text/html") self.end_headers() # Send scenario database content information to requester self.wfile.write(response_content) # Output server reply if DEBUG: print "* DEBUG: instsrv: Server response content: %s" % (response_content)
def SetUp(self, context, result): # if not context.has_key("IPv6Suite.srcDir"): # By default we assume there is a compiler. srcDir = context["IPv6Suite.srcDir"] srcDir = os.path.expanduser(srcDir) context["IPv6Suite.srcDir"] = srcDir if not os.path.exists(srcDir): result.SetOutcome( result.ERROR, "srcDir does not exist. Where will we get source" "files from?") return buildDir = context["IPv6Suite.buildDir"] buildDir = os.path.abspath(buildDir) context["IPv6Suite.buildDir"] = buildDir buildDir = os.path.join(buildDir, IdToDir(self)) self.wipe_build_dir = qm.parse_boolean( context["IPv6Suite.wipe_build_dir"]) #Can't use bool since the boolean_value is a string of 'true|false' and since bool # thinks of any string as true except empty or none. #self.wipe_build_dir = bool(self.wipe_build_dir) #We want clean builds especially when previously failed ones may have broken generated code if self.wipe_build_dir and os.path.exists(buildDir): shutil.rmtree(buildDir) mkdirs(buildDir) if self.wipe_build_dir: print "Wiping all files" context["IPv6Suite.myBuildDir"] = buildDir self.myBuildDir = buildDir #srcdir/test is where we store the related input files like ini/xml etc. #Well database of qmtest would be where we store these input files but what structure underneath? make_executable = RedirectedExecutable() #We actually want a gui to relace this value if possible? cmake_defines = string.split(self.ipv6suite_build_options) cmake_options = "-DOPP_USE_TK:BOOL=OFF" ## "-DLIBRARY_OUTPUT_PATH:PATH=lib -DEXECUTABLE_OUTPUT_PATH:PATH=exe" for o in cmake_defines: cmake_options = "-D" + o + ":BOOL=ON" + " " + cmake_options if not os.path.exists(os.path.join(buildDir, "CMakeCache.txt")): cmake_command = ["cmake"] + string.split(cmake_options) + [srcDir] #print cmake_command status = make_executable.Run(cmake_command, dir=buildDir) if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0: q_stdout = result.Quote(make_executable.stdout) q_stderr = result.Quote(make_executable.stderr) result.SetOutcome( result.ERROR, "Error in cmake Makefile generation", { "status": str(status), "stdout": q_stdout, "stderr": q_stderr, "command": " ".join(cmake_command), }) return if os.environ.has_key("RPM_BUILD_NCPUS"): cpus = os.environ["RPM_BUILD_NCPUS"] else: cpus = "1" make_command = ["make", "-j" + cpus] + string.split(self.make_options) status = make_executable.Run(make_command, dir=buildDir) if not os.WIFEXITED(status) or os.WEXITSTATUS(status) != 0: q_stdout = result.Quote(make_executable.stdout) q_stderr = result.Quote(make_executable.stderr) result.SetOutcome( result.ERROR, "Error building IPv6Suite", { "status": str(status), "stdout": q_stdout, "stderr": q_stderr, "command": " ".join(make_command), }) return
def put_audio(self, ch1, ch2): """ Called by putOut() Send to the Output ch1, ch2 interleaved :param ch1: Force data :param ch2: Accel data :return: None """ self.recall_pickled_dict() # Load variables from the c.s.v. c.pickle_txt_fullpath SampRate = v.SamplingFreq # Sampling Frequency, Hz samples_acq = v.samples_acq # n° of Samples acquired: c.n_blocks multiple FORMAT = aa.PCM_FORMAT_S32_LE # Int 4 Bytes signed 32 bit CHANNELS = int(2) byte_width = 4 bytes_size = CHANNELS * byte_width * c.N_FRAMES # Bytes in each period """ Create the data_out alsaaudio object instance In PCM_NORMAL mode, the call will block if the kernel buffer is full, and until enough sound has been played to allow the sound data to be buffered. The call always returns the size of the data provided. Syntax: class alsaaudio.PCM(type=PCM_PLAYBACK, mode=PCM_NORMAL, device='default', cardindex=-1) To find the sound device use: aplay -L: ATTENTION: the device full name must be without spaces ! fuji: "sysdefault" Pisound direct hardware device: "hw:CARD=pisound,DEV=0" AudioBox 22VSL direct hardware device: "hw:CARD=VSL,DEV=0" """ if c.IS_RASPBERRY: print("Machine is Raspberry Pi") else: print("Machine is not a Raspberry Pi") print("Audio device is:", c.SOUND_DEVICE) # try: # Creation af output audio object data_out = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, device=c.SOUND_DEVICE) data_out.setchannels(CHANNELS) data_out.setrate(SampRate) data_out.setformat(FORMAT) data_out.setperiodsize(c.N_FRAMES) print(" - Success creating the output audio object.") except: # Failed: another audio process is running print(" - Failed to create an input audio object") print(" - Search an active process to be terminated.") # pid = None sts = 1 cmd = "ps aux | grep get_audio.py | head -n1" # Search it and try to terminate res = subprocess.getoutput(cmd) pid = res[9:15].strip() if pid: sts = os.WEXITSTATUS(os.system('kill -TERM ' + pid)) if sts is 0: # Process closed successfully print(" - Found an active process: terminated with success.") try: # Creation of input audio object print(" - Second try to create an input audio object.") time.sleep(0.5) # Indispensable greather than 0.01 data_out = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, device=c.SOUND_DEVICE) data_out.setchannels(CHANNELS) data_out.setrate(SampRate) data_out.setformat(FORMAT) data_out.setperiodsize(c.N_FRAMES) print(" - Success creating the input audio object.") except: # Failed, doBeep(on_list=bip_5_S[0], off_list=bip_5_S[1]) # On Bip of 5s print(" - Failed second attempt to create the input audio object: restart computer") sys.exit() # # else: print(" - Old process not found: please restart the computer.") return # # # # END if sts: # END try:...except: # """ x1x2: vector of interleaved channels, Ch1, Ch2, output signal """ N = samples_acq # Number of samples # ch1 /= (self.outGain * t.PI_INP_GAIN) # Scaling Output from ~3 [t.PI_OUT_GAIN] to ~1, and for Input Gain ch2 /= (self.outGain * t.PI_INP_GAIN) ch1 *= c.full_scale_32 # Scaled to 32 bit ch2 *= c.full_scale_32 x1 = ch1.astype(np.int32) # Convert to int32, ch1 x2 = ch2.astype(np.int32) # Convert to int32, ch2 # pk1_32 = np.max(np.abs(x1)) * (self.outGain) pk2_32 = np.max(np.abs(x2)) * (self.outGain) rms1_32 = np.std(x1) * (self.outGain) rms2_32 = np.std(x2) * (self.outGain) # print("\n" + "Values are referred to acquired data and not equals to the values putted on Output") print("rms1 = {:3.1f}".format((rms1_32 / c.full_scale_32)*1000), "mV; ", "pk1 = {:3.1f}".format((pk1_32 / c.full_scale_32)*1000), "mV") print("rms2 = {:3.1f}".format((rms2_32 / c.full_scale_32)*1000), "mV; ", "pk2 = {:3.1f}".format((pk2_32 / c.full_scale_32)*1000), "mV") # x1x2 = np.zeros(CHANNELS * samples_acq, dtype=np.int32) # Array for interleaved Ch1, Ch2 data x1x2[0::2] = x1 # Scaled Output gain, fill ch1: even indexes x1x2[1::2] = x2 # Scaled Output gain, fill ch1: odd indexes # Fill x1 at odd indexes: ch2 # out_big_buffer = bytearray(bytes_size * c.N_BLOCKS) out_big_buffer[:] = pack("l" * int(2 * c.N_OF_SAMPLES), *x1x2) # Pack from numpy.int32 to bytes out_short_buffer = bytearray(bytes_size) # Output array written one frame at time # os.system("sync") # To empty write queue, free CPU beg = 0 end = CHANNELS * c.N_FRAMES # for i in range(c.N_BLOCKS): # Write data to stream pyaudio object out_short_buffer[:] = out_big_buffer[beg:end] # As short blocks size = data_out.write(out_short_buffer) #print(size) beg = end end += bytes_size # # data_out.close()
def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info path = dir + '/' + rest i = path.find('/', len(dir) + 1) while i >= 0: nextdir = path[:i] nextrest = path[i + 1:] scriptdir = self.translate_path(nextdir) if os.path.isdir(scriptdir): dir, rest = nextdir, nextrest i = path.find('/', len(dir) + 1) else: break # find an explicit query string, if present. rest, _, query = rest.partition('?') # dissect the part after the directory name into a script name & # a possible additional path, to be stored in PATH_INFO. i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(HTTPStatus.NOT_FOUND, "No such CGI script (%r)" % scriptname) return if not os.path.isfile(scriptfile): self.send_error(HTTPStatus.FORBIDDEN, "CGI script is not a plain file (%r)" % scriptname) return ispy = self.is_python(scriptname) if self.have_fork or not ispy: if not self.is_executable(scriptfile): self.send_error( HTTPStatus.FORBIDDEN, "CGI script is not executable (%r)" % scriptname) return # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = copy.deepcopy(os.environ) env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.parse.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query env['REMOTE_ADDR'] = self.client_address[0] authorization = self.headers.get("authorization") if authorization: authorization = authorization.split() if len(authorization) == 2: import base64, binascii env['AUTH_TYPE'] = authorization[0] if authorization[0].lower() == "basic": try: authorization = authorization[1].encode('ascii') authorization = base64.decodebytes(authorization).\ decode('ascii') except (binascii.Error, UnicodeError): pass else: authorization = authorization.split(':') if len(authorization) == 2: env['REMOTE_USER'] = authorization[0] # XXX REMOTE_IDENT if self.headers.get('content-type') is None: env['CONTENT_TYPE'] = self.headers.get_content_type() else: env['CONTENT_TYPE'] = self.headers['content-type'] length = self.headers.get('content-length') if length: env['CONTENT_LENGTH'] = length referer = self.headers.get('referer') if referer: env['HTTP_REFERER'] = referer accept = self.headers.get_all('accept', ()) env['HTTP_ACCEPT'] = ','.join(accept) ua = self.headers.get('user-agent') if ua: env['HTTP_USER_AGENT'] = ua co = filter(None, self.headers.get_all('cookie', [])) cookie_str = ', '.join(co) if cookie_str: env['HTTP_COOKIE'] = cookie_str # XXX Other HTTP_* headers # Since we're setting the env in the parent, provide empty # values to override previously set values for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH', 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'): env.setdefault(k, "") self.send_response(HTTPStatus.OK, "Script output follows") self.flush_headers() decoded_query = query.replace('+', ' ') if self.have_fork: # Unix -- fork as we should args = [script] if '=' not in decoded_query: args.append(decoded_query) nobody = http.server.nobody_uid() self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) # throw away additional data [see bug #427345] while select.select([self.rfile], [], [], 0)[0]: if not self.rfile.read(1): break exitcode = os.WEXITSTATUS(sts) if exitcode: self.log_error(f"CGI script exit code {exitcode}") return # Child try: try: os.setuid(nobody) except OSError: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) os.execve(scriptfile, args, env) print(self.wfile) except: self.server.handle_error(self.request, self.client_address) os._exit(127) else: # Non-Unix -- use subprocess import subprocess cmdline = [scriptfile] if self.is_python(scriptfile): interp = sys.executable if interp.lower().endswith("w.exe"): # On Windows, use python.exe, not pythonw.exe interp = interp[:-5] + interp[-4:] cmdline = [interp, '-u'] + cmdline if '=' not in query: cmdline.append(query) self.log_message("command: %s", subprocess.list2cmdline(cmdline)) try: nbytes = int(length) except (TypeError, ValueError): nbytes = 0 p = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) if self.command.lower() == "post" and nbytes > 0: data = self.rfile.read(nbytes) else: data = None # throw away additional data [see bug #427345] while select.select([self.rfile._sock], [], [], 0)[0]: if not self.rfile._sock.recv(1): break stdout, stderr = p.communicate(data) self.wfile.write(stdout) if stderr: self.log_error('%s', stderr) p.stderr.close() p.stdout.close() status = p.returncode if status: self.log_error("CGI script exit status %#x", status) else: self.log_message("CGI script exited OK")
def __init__(self, command, working_dir=None, capture_stderr=True, env=None): """Changes into a specified directory, if provided, and executes a command. Restores the old directory afterwards. Args: command: The command to run, in the form of sys.argv. working_dir: The directory to change into. capture_stderr: Determines whether to capture stderr in the output member or to discard it. env: Dictionary with environment to pass to the subprocess. Returns: An object that represents outcome of the executed process. It has the following attributes: terminated_by_signal True iff the child process has been terminated by a signal. signal Sygnal that terminated the child process. exited True iff the child process exited normally. exit_code The code with which the child process exited. output Child process's stdout and stderr output combined in a string. """ # The subprocess module is the preferrable way of running programs # since it is available and behaves consistently on all platforms, # including Windows. But it is only available starting in python 2.4. # In earlier python versions, we revert to the popen2 module, which is # available in python 2.0 and later but doesn't provide required # functionality (Popen4) under Windows. This allows us to support Mac # OS X 10.4 Tiger, which has python 2.3 installed. if _SUBPROCESS_MODULE_AVAILABLE: if capture_stderr: stderr = subprocess.STDOUT else: stderr = subprocess.PIPE p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=stderr, cwd=working_dir, universal_newlines=True, env=env) # communicate returns a tuple with the file obect for the child's # output. self.output = p.communicate()[0] self._return_code = p.returncode else: old_dir = os.getcwd() def _ReplaceEnvDict(dest, src): # Changes made by os.environ.clear are not inheritable by child # processes until Python 2.6. To produce inheritable changes we have # to delete environment items with the del statement. for key in dest.keys(): del dest[key] dest.update(src) # When 'env' is not None, backup the environment variables and replace # them with the passed 'env'. When 'env' is None, we simply use the # current 'os.environ' for compatibility with the subprocess.Popen # semantics used above. if env is not None: old_environ = os.environ.copy() _ReplaceEnvDict(os.environ, env) try: if working_dir is not None: os.chdir(working_dir) if capture_stderr: p = popen2.Popen4(command) else: p = popen2.Popen3(command) p.tochild.close() self.output = p.fromchild.read() ret_code = p.wait() finally: os.chdir(old_dir) # Restore the old environment variables # if they were replaced. if env is not None: _ReplaceEnvDict(os.environ, old_environ) # Converts ret_code to match the semantics of # subprocess.Popen.returncode. if os.WIFSIGNALED(ret_code): self._return_code = -os.WTERMSIG(ret_code) else: # os.WIFEXITED(ret_code) should return True here. self._return_code = os.WEXITSTATUS(ret_code) if self._return_code < 0: self.terminated_by_signal = True self.exited = False self.signal = -self._return_code else: self.terminated_by_signal = False self.exited = True self.exit_code = self._return_code
def loop(self): """Main loop. Wait until all the runner subprocesses have exited, restarting them if necessary and configured to do so. """ log = logging.getLogger('mailman.runner') log.info('Master started') self._pause() while True: try: pid, status = os.wait() except OSError as error: # No children? We're done. if error.errno == errno.ECHILD: break # If the system call got interrupted, just restart it. elif error.errno == errno.EINTR: continue else: raise # Find out why the subprocess exited by getting the signal # received or exit status. if os.WIFSIGNALED(status): why = os.WTERMSIG(status) elif os.WIFEXITED(status): why = os.WEXITSTATUS(status) else: why = None # We'll restart the subprocess if it exited with a SIGUSR1 or # because of a failure (i.e. no exit signal), and the no-restart # command line switch was not given. This lets us better handle # runaway restarts (e.g. if the subprocess had a syntax error!) rname, slice_number, count, restarts = self._kids.pop(pid) config_name = 'runner.' + rname restart = False if why == signal.SIGUSR1 and self._restartable: restart = True # Have we hit the maximum number of restarts? restarts += 1 max_restarts = int(getattr(config, config_name).max_restarts) if restarts > max_restarts: restart = False # Are we permanently non-restartable? log.debug("""\ Master detected subprocess exit (pid: {0:d}, why: {1}, class: {2}, slice: {3:d}/{4:d}) {5}""".format( pid, why, rname, slice_number + 1, count, ('[restarting]' if restart else ''))) # See if we've reached the maximum number of allowable restarts. if restarts > max_restarts: log.info( """\ Runner {0} reached maximum restart limit of {1:d}, not restarting.""", rname, max_restarts) # Now perhaps restart the process unless it exited with a # SIGTERM or we aren't restarting. if restart: spec = '{0}:{1:d}:{2:d}'.format(rname, slice_number, count) new_pid = self._start_runner(spec) new_info = (rname, slice_number, count, restarts) self._kids.add(new_pid, new_info) log.info('Master stopped')
def fork_processes(num_processes, max_restarts=100): '''多进程启动 两个参数 1 开启进程数,如果小于等于0 则按cpu核心数开启 2 子进程最大重启次数 ''' # 计算CPU核心数 if num_processes is None or num_processes <= 0: num_processes = multiprocessing.cpu_count() # 字典以pid为key 进程数位值 children = {} # 创建子进程 def start_child(i): # i是运行的进程数 pid = os.fork() if pid == 0: return i else: #父进程将子进程pid存入字典 children[pid] = i return None # 根据进程数量启动进程并返回进程pid for i in range(num_processes): id = start_child(i) # 父进程运行到这里因为返回的是个空的所以会继续运行下面的代码 # 子进程运行到这里因为程序已经运行完所以会结束运行 if id is not None: return id # 父进程会继续运行下面 # 子进程重启计数开始 num_restarts = 0 while children: try: # 等待子进程结束os.wait()回收 pid, status = os.wait() except OSError as e: #如果系统EINTR错误(信号中断)跳出继续进行循环 if e.errno == errno.EINTR: continue #其他OS错误则抛出 raise #如果子进程pid不再启动的进程里面跳出继续进循环 if pid not in children: continue # 进程结束后从字典中删除,并返回事第几个进程 id = children.pop(pid) # 可以根据不同状态计入日志 # 如果进程由于信号而退出,则返回True,否则返回False if os.WIFSIGNALED(status): pass # 如果WIFEXITED(status)返回True,WEXITSTATUS(status)则返回一个整数,该整数是exit()调用的参数。否则返回值是未定义的 elif os.WEXITSTATUS(status) != 0: pass # 其他错误跳出这次循环继续程序 else: continue # 子进程最多的重启次数,如果子进程重启次数超过最大的设置则抛出异常 num_restarts += 1 if num_restarts > max_restarts: raise RuntimeError("Too many child restarts, giving up") # 正常情况下这个id已经退出了,我们在fork出一个新的进程 new_id = start_child(id) # 如果fork 成功了直接return退出这个子进程 if new_id is not None: return new_id # 如果没有正常启动进程,子进程字典为空则退出进程 sys.exit(0)
def not_found(self): return os.WIFEXITED(self.status) and os.WEXITSTATUS(self.status) == 127
def run_command(command): return os.WEXITSTATUS(os.system(command))