Ejemplo n.º 1
0
    def run(self):
        # Open our pipes.
        in_pipe = os.fdopen(os.dup(0), 'r')
        out_pipe = os.fdopen(os.dup(1), 'w')
        devnull = open("/dev/null", 'r')
        os.dup2(devnull.fileno(), 0)
        devnull.close()
        os.dup2(2, 1)

        # Dump our startup message.
        robj = {
            "id": None,
            "result": None
        }
        out_pipe.write(pickle.dumps(robj))
        out_pipe.flush()
        sys.stderr.write("proxy %d: started.\n" % os.getpid())

        # Get the call from the other side.
        while True:
            try:
                obj = pickle.load(in_pipe)
                sys.stderr.write("proxy %d: <- %s\n" % (os.getpid(), obj))
            except:
                # We're done!
                break

            def closure(obj, out_pipe):
                def fn():
                    self._process(obj, out_pipe)
                return fn

            t = threading.Thread(target=closure(obj, out_pipe))
            t.start()
Ejemplo n.º 2
0
    def test_is_same_output(self):
        fd1 = sys.stderr.fileno()
        fd2 = os.dup(fd1)
        try:
            self.assertTrue(ConfigureOutputHandler._is_same_output(fd1, fd2))
        finally:
            os.close(fd2)

        fd2, path = tempfile.mkstemp()
        try:
            self.assertFalse(ConfigureOutputHandler._is_same_output(fd1, fd2))

            fd3 = os.dup(fd2)
            try:
                self.assertTrue(ConfigureOutputHandler._is_same_output(fd2, fd3))
            finally:
                os.close(fd3)

            with open(path, 'a') as fh:
                fd3 = fh.fileno()
                self.assertTrue(
                    ConfigureOutputHandler._is_same_output(fd2, fd3))

        finally:
            os.close(fd2)
            os.remove(path)
Ejemplo n.º 3
0
def printfiles():
    print('Writing to files...')
    old = dup(1)
    close(1)

    open('height{}_find{}_binsearch.tex'.format(height, find), O_WRONLY|O_CREAT|O_TRUNC|O_BINARY)
    print('\\begin{tikzpicture}')
    print('% height={}, finding={}, blocksize={}'.format(height, find, blocksize))
    print()
    print('% Binary search')
    printarray(array, 0)
    print('\\end{tikzpicture}')
    close(1)

    open('height{}_find{}_bfstree.tex'.format(height, find), O_WRONLY|O_CREAT|O_TRUNC|O_BINARY)
    print('\\begin{tikzpicture}')
    print('% height={}, finding={}, blocksize={}'.format(height, find, blocksize))
    print()
    print('% BFS tree')
    printtree(bfs, 0)
    print('\\end{tikzpicture}')
    close(1)

    open('height{}_find{}_vebtree.tex'.format(height, find), O_WRONLY|O_CREAT|O_TRUNC|O_BINARY)
    print('\\begin{tikzpicture}')
    print('% height={}, finding={}, blocksize={}'.format(height, find, blocksize))
    print()
    print('% vEB tree')
    printtree(veb, 0)
    print('\\end{tikzpicture}')
    close(1)

    dup(old)
    close(old)
    print('Done')
Ejemplo n.º 4
0
def build_extensions(silent=False):
    if silent:
        devnull = open(os.devnull, 'w')
        oldstdout = os.dup(sys.stdout.fileno())
        os.dup2(devnull.fileno(), sys.stdout.fileno())
        oldstderr = os.dup(sys.stderr.fileno())
        os.dup2(devnull.fileno(), sys.stderr.fileno())
        try:
            build_extensions(silent=False)
        finally:
            os.dup2(oldstdout, sys.stdout.fileno())
            os.dup2(oldstderr, sys.stderr.fileno())
            devnull.close()
        return

    cur_dir = os.getcwd()
    try:
        os.chdir(os.path.dirname(os.path.abspath(__file__)))
        setup(
            name='game',
            py_modules=['game'],
            ext_modules=[
                Extension('_game',
                    ['game.i', 'game.cpp'],
                    depends=['game.h'],
                    swig_opts=['-c++'],
                    extra_compile_args = ['--std=c++0x'],
                ),
            ],
            script_args=['build_ext', '--inplace']
        )
    finally:
        os.chdir(cur_dir)
Ejemplo n.º 5
0
def worker(sock):
    """
    Called by a worker process after the fork().
    """
    signal.signal(SIGHUP, SIG_DFL)
    signal.signal(SIGCHLD, SIG_DFL)
    signal.signal(SIGTERM, SIG_DFL)
    # restore the handler for SIGINT,
    # it's useful for debugging (show the stacktrace before exit)
    signal.signal(SIGINT, signal.default_int_handler)

    # Read the socket using fdopen instead of socket.makefile() because the latter
    # seems to be very slow; note that we need to dup() the file descriptor because
    # otherwise writes also cause a seek that makes us miss data on the read side.
    infile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
    outfile = os.fdopen(os.dup(sock.fileno()), "a+", 65536)
    exit_code = 0
    try:
        worker_main(infile, outfile)
    except SystemExit as exc:
        exit_code = compute_real_exit_code(exc.code)
    finally:
        try:
            outfile.flush()
        except Exception:
            pass
    return exit_code
Ejemplo n.º 6
0
def open_network_connections(port):
    s = socket.socket()
    s.connect(('localhost', port))
    in_stream = ph_fdopen(os.dup(s.fileno()), 'r', BUF_SIZE)
    out_stream = ph_fdopen(os.dup(s.fileno()), 'w', BUF_SIZE)
    return NetworkConnections(BinaryDownStreamFilter(in_stream),
                              BinaryUpStreamFilter(out_stream), s, port)
Ejemplo n.º 7
0
	def do_ipy(self, args):
		"""Start an interactive Python interpreter"""
		from c1218.data import C1218Packet
		from c1219.access.general import C1219GeneralAccess
		from c1219.access.security import C1219SecurityAccess
		from c1219.access.log import C1219LogAccess
		from c1219.access.telephone import C1219TelephoneAccess
		vars = {
			'__version__': __version__,
			'frmwk': self.frmwk,
			'C1218Packet': C1218Packet,
			'C1219GeneralAccess': C1219GeneralAccess,
			'C1219SecurityAccess': C1219SecurityAccess,
			'C1219LogAccess': C1219LogAccess,
			'C1219TelephoneAccess': C1219TelephoneAccess
		}
		banner = 'The Framework Instance Is In The Variable \'frmwk\'' + os.linesep
		if self.frmwk.is_serial_connected():
			vars['conn'] = self.frmwk.serial_connection
			banner = banner + 'The Connection Instance Is In The Variable \'conn\'' + os.linesep
		pyconsole = code.InteractiveConsole(vars)

		savestdin = os.dup(sys.stdin.fileno())
		savestdout = os.dup(sys.stdout.fileno())
		savestderr = os.dup(sys.stderr.fileno())
		try:
			pyconsole.interact(banner)
		except SystemExit:
			sys.stdin = os.fdopen(savestdin, 'r', 0)
			sys.stdout = os.fdopen(savestdout, 'w', 0)
			sys.stderr = os.fdopen(savestderr, 'w', 0)
Ejemplo n.º 8
0
    def daemonize(self):
        """Makeself a daemon process.

        Double fork, close standard pipes, start a new session and
        open logs.
        """
        pid = os.fork()
        if pid == 0:  # first child
            os.setsid()
            pid = os.fork()
            if pid == 0:  # second child
                # Can't chdir to root if we have relative paths to
                # conffile and other modules
                #os.chdir('/')
                os.umask(0)
            else:
                os._exit(0)
        else:
            os._exit(0)

        # close stdin, stdout and stderr ...
        for fd in range(3):
            try:
                os.close(fd)
            except OSError:
                pass
        # ... and replace them with /dev/null
        os.open('/dev/null', os.O_RDWR)
        os.dup(0)
        os.dup(0)

        syslog.openlog('hip-mgmt-iface',
                       syslog.LOG_PID | syslog.LOG_NDELAY,
                       syslog.LOG_DAEMON)
        syslog.syslog('FirewallController started.')
Ejemplo n.º 9
0
    def execute (self):
        # Python stdout, stderr, stdin redirection
        sys.stdout, self.stdout = self.stdout, sys.stdout
        sys.stderr, self.stderr = self.stderr, sys.stderr
        sys.stdin,  self.stdin  = self.stdin,  sys.stdin

        # System stdout, stderr redirection
        sys_stdout = os.dup(1)
        sys_stderr = os.dup(2)
        os.dup2 (self.pipewrite, 1)
        os.dup2 (self.pipewrite, 2)

        self.shell.eval(self)
        self.view.scroll_mark_onscreen(self.buffer.get_insert())
        while Gtk.events_pending():
            Gtk.main_iteration()

        # Get system output and remove system redirection
        os.dup2 (sys_stdout, 1)
        os.dup2 (sys_stderr, 2)
        os.close (sys_stdout)
        os.close (sys_stderr)

        # Remove python redirection
        sys.stdout, self.stdout = self.stdout, sys.stdout
        sys.stderr, self.stderr = self.stderr, sys.stderr
        sys.stdin,  self.stdin  = self.stdin,  sys.stdin
Ejemplo n.º 10
0
 def __init__(self, mixed_out_err = False):
     "Start capture of the Unix-level stdout and stderr."
     if (sys.platform == 'win32' or # os.tmpfile fails, cpython issue #2232
         not hasattr(os, 'tmpfile') or
         not hasattr(os, 'dup') or
         not hasattr(os, 'dup2') or
         not hasattr(os, 'fdopen')):
         self.dummy = 1
     else:
         self.dummy = 0
         # make new stdout/stderr files if needed
         self.localoutfd = os.dup(1)
         self.localerrfd = os.dup(2)
         if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1:
             self.saved_stdout = sys.stdout
             sys.stdout = os.fdopen(self.localoutfd, 'w', 1)
         else:
             self.saved_stdout = None
         if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2:
             self.saved_stderr = sys.stderr
             sys.stderr = os.fdopen(self.localerrfd, 'w', 0)
         else:
             self.saved_stderr = None
         self.tmpout = os.tmpfile()
         if mixed_out_err:
             self.tmperr = self.tmpout
         else:
             self.tmperr = os.tmpfile()
         os.dup2(self.tmpout.fileno(), 1)
         os.dup2(self.tmperr.fileno(), 2)
Ejemplo n.º 11
0
def init_popen_io(execmodel):
    if not hasattr(os, 'dup'): # jython
        io = Popen2IO(sys.stdout, sys.stdin, execmodel)
        import tempfile
        sys.stdin = tempfile.TemporaryFile('r')
        sys.stdout = tempfile.TemporaryFile('w')
    else:
        try:
            devnull = os.devnull
        except AttributeError:
            if os.name == 'nt':
                devnull = 'NUL'
            else:
                devnull = '/dev/null'
        # stdin
        stdin  = execmodel.fdopen(os.dup(0), 'r', 1)
        fd = os.open(devnull, os.O_RDONLY)
        os.dup2(fd, 0)
        os.close(fd)

        # stdout
        stdout = execmodel.fdopen(os.dup(1), 'w', 1)
        fd = os.open(devnull, os.O_WRONLY)
        os.dup2(fd, 1)

        # stderr for win32
        if os.name == 'nt':
            sys.stderr = execmodel.fdopen(os.dup(2), 'w', 1)
            os.dup2(fd, 2)
        os.close(fd)
        io = Popen2IO(stdout, stdin, execmodel)
        sys.stdin = execmodel.fdopen(0, 'r', 1)
        sys.stdout = execmodel.fdopen(1, 'w', 1)
    return io
Ejemplo n.º 12
0
 def __init__(self, mixed_out_err = False):
     "Start capture of the Unix-level stdout and stderr."
     if (not hasattr(os, 'tmpfile') or
         not hasattr(os, 'dup') or
         not hasattr(os, 'dup2') or
         not hasattr(os, 'fdopen')):
         self.dummy = 1
     else:
         try:
             self.tmpout = os.tmpfile()
             if mixed_out_err:
                 self.tmperr = self.tmpout
             else:
                 self.tmperr = os.tmpfile()
         except OSError:     # bah?  on at least one Windows box
             self.dummy = 1
             return
         self.dummy = 0
         # make new stdout/stderr files if needed
         self.localoutfd = os.dup(1)
         self.localerrfd = os.dup(2)
         if hasattr(sys.stdout, 'fileno') and sys.stdout.fileno() == 1:
             self.saved_stdout = sys.stdout
             sys.stdout = os.fdopen(self.localoutfd, 'w', 1)
         else:
             self.saved_stdout = None
         if hasattr(sys.stderr, 'fileno') and sys.stderr.fileno() == 2:
             self.saved_stderr = sys.stderr
             sys.stderr = os.fdopen(self.localerrfd, 'w', 0)
         else:
             self.saved_stderr = None
         os.dup2(self.tmpout.fileno(), 1)
         os.dup2(self.tmperr.fileno(), 2)
Ejemplo n.º 13
0
def spawn(prog, args):
    p2cread, p2cwrite = os.pipe()
    c2pread, c2pwrite = os.pipe()
    pid = os.fork()
    if pid == 0:
        # Child
        for i in 0, 1, 2:
            try:
                os.close(i)
            except os.error:
                pass
        if os.dup(p2cread) != 0:
            sys.stderr.write('popen2: bad read dup\n')
        if os.dup(c2pwrite) != 1:
            sys.stderr.write('popen2: bad write dup\n')
        if os.dup(c2pwrite) != 2:
            sys.stderr.write('popen2: bad write dup\n')
        os.closerange(3, MAXFD)
        try:
            os.execvp(prog, args)
        finally:
            sys.stderr.write('execvp failed\n')
            os._exit(1)
    os.close(p2cread)
    os.close(c2pwrite)
    return pid, c2pread, p2cwrite
Ejemplo n.º 14
0
  def start(self):
    """Start tee-ing all stdout and stderr output to the file."""
    # Flush and save old file descriptors.
    sys.stdout.flush()
    sys.stderr.flush()
    self._old_stdout_fd = os.dup(sys.stdout.fileno())
    self._old_stderr_fd = os.dup(sys.stderr.fileno())
    # Save file objects
    self._old_stdout = sys.stdout
    self._old_stderr = sys.stderr

    # Replace std[out|err] with unbuffered file objects
    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
    sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0)

    # Create a tee subprocess.
    self._tee = _TeeProcess([self._file], True, self._old_stderr_fd,
                            os.getpid())
    self._tee.start()

    # Redirect stdout and stderr to the tee subprocess.
    writer_pipe = self._tee.writer_pipe
    os.dup2(writer_pipe, sys.stdout.fileno())
    os.dup2(writer_pipe, sys.stderr.fileno())
    os.close(writer_pipe)
Ejemplo n.º 15
0
 def does_stuff():
     a, b = os.pipe()
     c = os.dup(a)
     d = os.dup(b)
     assert a != b
     assert a != c
     assert a != d
     assert b != c
     assert b != d
     assert c != d
     os.close(c)
     os.dup2(d, c)
     e, f = os.pipe()
     assert e != a
     assert e != b
     assert e != c
     assert e != d
     assert f != a
     assert f != b
     assert f != c
     assert f != d
     assert f != e
     os.close(a)
     os.close(b)
     os.close(c)
     os.close(d)
     os.close(e)
     os.close(f)
     return 42
Ejemplo n.º 16
0
 def redirect_streams(self):
     # redirect stdout and stderr of steam api
     devnull = os.open(os.devnull, 777)
     self.old_stdout = os.dup(1)
     self.old_stderr = os.dup(2)
     os.dup2(devnull, 1)
     os.dup2(devnull, 2)
Ejemplo n.º 17
0
def spawn(prog, args):
	p2cread, p2cwrite = os.pipe()
	c2pread, c2pwrite = os.pipe()
	pid = os.fork()
	if pid == 0:
		# Child
		os.close(0)
		os.close(1)
		os.close(2)
		if os.dup(p2cread) <> 0:
			sys.stderr.write('popen2: bad read dup\n')
		if os.dup(c2pwrite) <> 1:
			sys.stderr.write('popen2: bad write dup\n')
		if os.dup(c2pwrite) <> 2:
			sys.stderr.write('popen2: bad write dup\n')
		for i in range(3, MAXFD):
			try:
				os.close(i)
			except:
				pass
		try:
			os.execvp(prog, args)
		finally:
			sys.stderr.write('execvp failed\n')
			os._exit(1)
	os.close(p2cread)
	os.close(c2pwrite)
	return pid, c2pread, p2cwrite
Ejemplo n.º 18
0
    def __enter__(self):
        # prepare standard file descriptors for raw manipulation
        self.was_blocking = os.get_blocking(0)
        os.set_blocking(0, False)
        try:
            self.terminal_attr_stdin = termios.tcgetattr(0)
            self.terminal_attr_stdout = termios.tcgetattr(1)
            self.terminal_attr_stderr = termios.tcgetattr(2)
            tty.setraw(0)
            tty.setraw(1)
            tty.setraw(2)
        except termios.error:  # probably redirected
            self.terminal_attr_stdin = None

        # redirect standard file descriptors to new PTY
        master, slave = pty.openpty()
        os.set_blocking(master, False)
        self.real_stdin = os.dup(0)
        self.real_stdout = os.dup(1)
        self.real_stderr = os.dup(2)
        os.close(0)
        os.close(1)
        os.close(2)
        os.dup2(slave, 0)
        os.dup2(slave, 1)
        os.dup2(slave, 2)
        os.close(slave)
        self.terminal_pipe = master

        # start REPL in separate thread
        threading.Thread(target=repl, args=(self,), daemon=True).start()

        return self
Ejemplo n.º 19
0
    def decorated( *fargs, **fkwargs ):

        suppress = args.suppress_output
        if suppress:
            
            # Get rid of what is already there ( should be nothing for this script )
            sys.stdout.flush()

            # Save file descriptors so it can be reactivated later 
            saved_stdout = os.dup( 1 )
            saved_stderr = os.dup( 2 )

            # /dev/null is used just to discard what is being printed
            devnull = os.open( '/dev/null', os.O_WRONLY )

            # Duplicate the file descriptor for /dev/null
            # and overwrite the value for stdout (file descriptor 1)
            os.dup2( devnull, 1 )
            os.dup2( devnull, 2 )

        result = f( *fargs, **fkwargs )

        if suppress:

            # Close devnull after duplication (no longer needed)
            os.close( devnull )

            # Reenable stdout and stderr
            os.dup2( saved_stdout, 1 )
            os.dup2( saved_stderr, 2 )

        return result
Ejemplo n.º 20
0
    def __exit__( self, exc_type, exc_val, exc_tb ):
        if exc_type is None:
            _r, _w = os.pipe( )

            def copy( ):
                with os.fdopen( _w, 'w' ) as w:
                    w.write( self.stdin.getvalue( ) )

            t = Thread( target=copy )
            t.start( )
            try:
                _stdin = sys.stdin.fileno( )
                _old_stdin = os.dup( _stdin )
                os.close( _stdin )
                assert _stdin == os.dup( _r )
                # monkey-patch Fabric
                _input_loop = fabric.operations.input_loop
                fabric.operations.input_loop = input_loop
                try:
                    self.stdin.result = self._run( )
                finally:
                    fabric.operations.input_loop = _input_loop
                    os.close( _stdin )
                    os.dup( _old_stdin )
            finally:
                t.join( )
        return False
Ejemplo n.º 21
0
def posix_redirect_output(filename=None, permanent=True):
    """
    Redirect stdout/stderr to a file, using posix dup2.
    """
    sys.stdout.flush()
    sys.stderr.flush()

    stdout_fd = sys.stdout.fileno()
    stderr_fd = sys.stderr.fileno()

    if not permanent:
        stdout_fd_copy = os.dup(stdout_fd)
        stderr_fd_copy = os.dup(stderr_fd)

    if filename is None:
        out_fd, filename = tempfile.mkstemp()
    else:
        out_fd = os.open(filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)

    try:
        # Redirect stdout and stderr to file
        os.dup2(out_fd, stdout_fd)
        os.dup2(out_fd, stderr_fd)

        yield filename
    finally:
        sys.stdout.flush()
        sys.stderr.flush()
        os.close(out_fd)

        if not permanent:
            os.dup2(stdout_fd_copy, stdout_fd)
            os.dup2(stderr_fd_copy, stderr_fd)
            os.close(stdout_fd_copy)
            os.close(stderr_fd_copy)
Ejemplo n.º 22
0
 def grabOutput(self, flag):
     if flag:
         if not self.rpmout:
             # Grab rpm output, but not the python one.
             self.stdout = sys.stdout
             self.stderr = sys.stderr
             writer = codecs.getwriter(ENCODING)
             reader = codecs.getreader(ENCODING)
             sys.stdout = writer(os.fdopen(os.dup(1), "w"))
             sys.stderr = writer(os.fdopen(os.dup(2), "w"))
             pipe = os.pipe()
             os.dup2(pipe[1], 1)
             os.dup2(pipe[1], 2)
             os.close(pipe[1])
             self.rpmout = reader(os.fdopen(pipe[0], "r"))
             setCloseOnExec(self.rpmout.fileno())
             flags = fcntl.fcntl(self.rpmout.fileno(), fcntl.F_GETFL, 0)
             flags |= os.O_NONBLOCK
             fcntl.fcntl(self.rpmout.fileno(), fcntl.F_SETFL, flags)
     else:
         if self.rpmout:
             self._rpmout()
             os.dup2(sys.stdout.fileno(), 1)
             os.dup2(sys.stderr.fileno(), 2)
             sys.stdout = self.stdout
             sys.stderr = self.stderr
             del self.stdout
             del self.stderr
             self.rpmout.close()
             self.rpmout = None
Ejemplo n.º 23
0
def spawnProc(tag, cmdline, copy=[]):    
    initWorkDir(tag)
    os.chdir(workDir(tag))

    for i in copy:
        debug("Copying %s into workdir of %s" % (i, tag))
        os.system("cp -r %s %s" % (i, workDir(tag)))
    
    debug("Spawning '%s' as %s" % (" ".join(cmdline), tag))
    
    saved_stdin = os.dup(0)
    saved_stdout = os.dup(1)
    saved_stderr = os.dup(2)
    child_stdin = open("/dev/null", "r")
    child_stdout = open("stdout.log", "w")
    child_stderr = open("stderr.log", "w")
    os.dup2(child_stdin.fileno(), 0)
    os.dup2(child_stdout.fileno(), 1)
    os.dup2(child_stderr.fileno(), 2)
    pid = os.spawnvp(os.P_NOWAIT, cmdline[0], cmdline)
    os.dup2(saved_stdin, 0)
    os.dup2(saved_stdout, 1)
    os.dup2(saved_stderr, 2)
    
    Running[tag] = pid
    Running[pid] = tag
Ejemplo n.º 24
0
def check_runnable_weight_field():
    # Define the bpf program for checking purpose
    bpf_check_text = """
#include <linux/sched.h>
unsigned long dummy(struct sched_entity *entity)
{
    return entity->runnable_weight;
}
"""

    # Get a temporary file name
    tmp_file = NamedTemporaryFile(delete=False)
    tmp_file.close();

    # Duplicate and close stderr (fd = 2)
    old_stderr = dup(2)
    close(2)

    # Open a new file, should get fd number 2
    # This will avoid printing llvm errors on the screen
    fd = open(tmp_file.name, O_WRONLY)
    try:
        t = BPF(text=bpf_check_text)
        success_compile = True
    except:
        success_compile = False

    # Release the fd 2, and next dup should restore old stderr
    close(fd)
    dup(old_stderr)
    close(old_stderr)

    # remove the temporary file and return
    unlink(tmp_file.name)
    return success_compile
Ejemplo n.º 25
0
 def grabOutput(self, flag):
     if flag:
         if not self.rpmout:
             # Grab rpm output, but not the python one.
             self.stdout = sys.stdout
             self.stderr = sys.stderr
             writer = codecs.getwriter(ENCODING)
             reader = codecs.getreader(ENCODING)
             sys.stdout = writer(os.fdopen(os.dup(1), "w"),
                                 errors="replace")
             sys.stderr = writer(os.fdopen(os.dup(2), "w"),
                                 errors="replace")
             fd, rpmoutpath = tempfile.mkstemp("-smart-rpm-out.txt")
             os.dup2(fd, 1)
             os.dup2(fd, 2)
             os.close(fd)
             self.rpmout = reader(open(rpmoutpath))
             os.unlink(rpmoutpath)
     else:
         if self.rpmout:
             self._process_rpmout()
             os.dup2(sys.stdout.fileno(), 1)
             os.dup2(sys.stderr.fileno(), 2)
             sys.stdout = self.stdout
             sys.stderr = self.stderr
             del self.stdout
             del self.stderr
             self.rpmout.close()
             self.rpmout = None
             self.rpmoutbuffer = ""
Ejemplo n.º 26
0
    def _becomeLogSlave(self, slaveFd, loggerPid):
        """ hand over control of io to logging process, grab info
            from pseudo tty
        """
        self.loggerPid = loggerPid

        if self.withStdin and sys.stdin.isatty():
            self.oldTermios = termios.tcgetattr(sys.stdin.fileno())
        else:
            self.oldTermios = None

        newTermios = termios.tcgetattr(slaveFd)
        # Don't wait after receiving a character
        newTermios[6][termios.VTIME] = '\x00'
        # Read at least these many characters before returning
        newTermios[6][termios.VMIN] = '\x01'

        termios.tcsetattr(slaveFd, termios.TCSADRAIN, newTermios)
        # Raw mode
        tty.setraw(slaveFd)

        self.oldStderr = os.dup(sys.stderr.fileno())
        self.oldStdout = os.dup(sys.stdout.fileno())
        if self.withStdin:
            self.oldStdin = os.dup(sys.stdin.fileno())
            os.dup2(slaveFd, 0)
        else:
            self.oldStdin = sys.stdin.fileno()
        os.dup2(slaveFd, 1)
        os.dup2(slaveFd, 2)
        os.close(slaveFd)
        self.logging = True
Ejemplo n.º 27
0
def stdouterrin_setnull():
    """ redirect file descriptors 0 and 1 (and possibly 2) to /dev/null. 
        note that this function may run remotely without py lib support. 
    """
    # complete confusion (this is independent from the sys.stdout
    # and sys.stderr redirection that gateway.remote_exec() can do)
    # note that we redirect fd 2 on win too, since for some reason that
    # blocks there, while it works (sending to stderr if possible else
    # ignoring) on *nix
    import sys, os
    try:
        devnull = os.devnull
    except AttributeError:
        if os.name == 'nt':
            devnull = 'NUL'
        else:
            devnull = '/dev/null'
    # stdin
    sys.stdin  = os.fdopen(os.dup(0), 'rb', 0)
    fd = os.open(devnull, os.O_RDONLY)
    os.dup2(fd, 0)
    os.close(fd)

    # stdout
    sys.stdout = os.fdopen(os.dup(1), 'wb', 0)
    fd = os.open(devnull, os.O_WRONLY)
    os.dup2(fd, 1)

    # stderr for win32
    if os.name == 'nt':
        sys.stderr = os.fdopen(os.dup(2), 'wb', 0)
        os.dup2(fd, 2)
    os.close(fd)
Ejemplo n.º 28
0
def uriToBrowser(uri=None):
    ''' 
        Method that launches the URI in the default browser of the system. This returns no new entity.

        :param uri:    uri to open.
    '''
    # Temporally deactivating standard ouptut and error:
    #   Source: <https://stackoverflow.com/questions/2323080/how-can-i-disable-the-webbrowser-message-in-python>

    # Cloning stdout (1) and stderr (2)
    savout1 = os.dup(1)
    savout2 = os.dup(2)

    # Closing them
    os.close(1)
    os.close(2)
    os.open(os.devnull, os.O_RDWR)

    try:
        # Opening the Tor URI using onion.cab proxy
        if ".onion" in uri:
            wb.get().open(uri.replace(".onion", ".onion.cab"), new=2)    
        else:
            wb.get().open(uri, new=2)
    finally:
        # Reopening them...
        os.dup2(savout1, 1)
        os.dup2(savout2, 2)
Ejemplo n.º 29
0
def launch_client(cmd, fdin, fdout):
	cmd = cmd.split()
	pid = os.fork()
	if pid == 0:
		# child
		os.close(0)
		os.close(1)

		if os.dup(fdin) != 0:
			print "cannot duplicate fdin"

		if os.dup(fdout) != 1:
			print "cannot duplicate fdout"

		for i in range(3, MAXFD):
			try:
				os.close(i)
			except:
				pass
		try:
			os.execv(cmd[0], cmd)
		finally:
			print >>sys.stdrr, "failed to execv %s!" % cmd
			os._exit(1)
	else:
		# parent
		print "started %s %d" % (cmd, pid)
		return pid
Ejemplo n.º 30
0
def main():
    s_o = os.dup(1)
    s_e = os.dup(2)
    logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)s %(message)s")
    logging.info('init')
    print os.environ.get('PYTHONUNBUFFERED')
    os.environ['PYTHONUNBUFFERED'] = '1'

    fd = os.open('test.log', os.O_RDWR | os.O_CREAT)
    os.dup2(fd, 1)
    os.dup2(fd, 2)

    print 'out1'
    print >> sys.stderr, 'err1'
    logging.info('init-1')
    reload(sys)
    print >> sys.stderr, os.environ.get('PYTHONUNBUFFERED')
    logging.info('initi-2')
    print 'out2'
    print >> sys.stderr, 'err2'

    os.dup2(s_o, 1)
    os.dup2(s_e, 2)

    print 'finish, err'
    print >> sys.stderr, 'finish err'
    logging.info('end')
Ejemplo n.º 31
0
#      to the 'Test/' directory.

from threading import Thread
import time
import cmor, numpy
import contextlib
import unittest
import signal
import sys, os
import tempfile
import cdms2

# ------------------------------------------------------
# Copy stdout and stderr file descriptor for cmor output
# ------------------------------------------------------
newstdout = os.dup(1)
newstderr = os.dup(2)
# --------------
# Create tmpfile
# --------------
tmpfile = tempfile.mkstemp(
)  #tempfile[0] = File number, tempfile[1] = File name.
os.dup2(tmpfile[0], 1)
os.dup2(tmpfile[0], 2)
os.close(tmpfile[0])

global testOK
testOK = []


# ==============================
Ejemplo n.º 32
0
 def __enter__(self):
     sys.stdout.flush()
     self.oldstdout_fno = os.dup(sys.stdout.fileno())
     os.dup2(self._new_stdout.fileno(), 1)
Ejemplo n.º 33
0
 def __init__(self, fd):
     self.fd = os.dup(fd)
Ejemplo n.º 34
0
 def setUp(self):
     self.out = sys.stdout
     self.out_fd = os.dup(1)
Ejemplo n.º 35
0
 def __enter__(self):
     self.stdout = os.dup(self.stdout_fd)
     os.dup2(self.stdout_pipe_write, self.stdout_fd)
     return self
Ejemplo n.º 36
0
    filename = url.split('/')[-1]
    filesize = int(requests.head(url).headers['Content-Length'])
    print('%s filesize:%s' % (filename, filesize))

    threadnum = 3
    threading.BoundedSemaphore(threadnum)  # 允许线程个数
    step = filesize // threadnum
    mtd_list = []
    start = 0
    end = -1

    tempf = open('./' + filename, 'w')
    tempf.close()
    mtd_list = []
    with open('./' + filename, 'rb+') as f:
        # 获得文件句柄
        fileno = f.fileno()  # 返回一个整型的文件描述符,可用于底层操作系统的 I/O 操作
        while end < filesize - 1:
            start = end + 1
            end = start + step - 1
            if end > filesize:
                end = filesize
            print('Start:%s,end:%s' % (start, end))
            dup = os.dup(fileno)  # 复制文件句柄
            fd = os.fdopen(dup, 'rb+', -1)
            t = Mythread(url, start, end, fd)
            t.start()
            mtd_list.append(t)
        for i in mtd_list:
            i.join()
    f.close()
Ejemplo n.º 37
0
    def _start_execution_in_container(self, args, stdin, stdout, stderr, env,
                                      cwd, temp_dir, cgroups, output_dir,
                                      result_files_patterns, parent_setup_fn,
                                      child_setup_fn, parent_cleanup_fn):
        """Execute the given command and measure its resource usage similarly to super()._start_execution(),
        but inside a container implemented using Linux namespaces.
        The command has no network access (only loopback),
        a fresh directory as /tmp and no write access outside of this,
        and it does not see other processes except itself.
        """
        assert self._use_namespaces

        args = self._build_cmdline(args, env=env)

        # We have three processes involved:
        # parent: the current Python process in which RunExecutor is executing
        # child: child process in new namespace (PID 1 in inner namespace),
        #        configures inner namespace, serves as dummy init,
        #        collects result of grandchild and passes it to parent
        # grandchild: child of child process (PID 2 in inner namespace), exec()s tool

        # We need the following communication steps between these proceses:
        # 1a) grandchild tells parent its PID (in outer namespace).
        # 1b) grandchild tells parent that it is ready and measurement should begin.
        # 2) parent tells grandchild that measurement has begun and tool should
        #    be exec()ed.
        # 3) child tells parent about return value and resource consumption of grandchild.
        # 1a and 1b are done together by sending the PID through a pipe.
        # 2 is done by sending a null byte through a pipe.
        # 3 is done by sending a pickled object through the same pipe as #2.
        # We cannot use the same pipe for both directions, because otherwise a sender might
        # read the bytes it has sent itself.

        # Error codes from child to parent
        CHILD_OSERROR = 128
        CHILD_UNKNOWN_ERROR = 129

        from_parent, to_grandchild = os.pipe(
        )  # "downstream" pipe parent->grandchild
        from_grandchild, to_parent = os.pipe(
        )  # "upstream" pipe grandchild/child->parent

        # If the current directory is within one of the bind mounts we create,
        # we need to cd into this directory again, otherwise we would not see the bind mount,
        # but the directory behind it. Thus we always set cwd to force a change of directory.
        cwd = os.path.abspath(cwd or os.curdir)

        def grandchild():
            """Setup everything inside the process that finally exec()s the tool."""
            try:
                # We know that this process has PID 2 in the inner namespace,
                # but we actually need to know its PID in the outer namespace
                # such that parent can put us into the correct cgroups.
                # According to http://man7.org/linux/man-pages/man7/pid_namespaces.7.html,
                # there are two ways to achieve this: sending a message with the PID
                # via a socket (but Python < 3.3 lacks a convenient API for sendmsg),
                # and reading /proc/self in the outer procfs instance (that's what we do).
                my_outer_pid = container.get_my_pid_from_procfs()

                container.mount_proc()
                container.drop_capabilities()
                container.reset_signal_handling()
                child_setup_fn()  # Do some other setup the caller wants.

                # Signal readiness to parent by sending our PID and wait until parent is also ready
                os.write(to_parent, str(my_outer_pid).encode())
                received = os.read(from_parent, 1)
                assert received == b'\0', received
            finally:
                # close remaining ends of pipe
                os.close(from_parent)
                os.close(to_parent)
            # here Python will exec() the tool for us

        def child():
            """Setup everything inside the container, start the tool, and wait for result."""
            try:
                logging.debug(
                    "Child: child process of RunExecutor with PID %d started",
                    container.get_my_pid_from_procfs())

                # Put all received signals on hold until we handle them later.
                container.block_all_signals()

                # We want to avoid leaking file descriptors to the executed child.
                # It is also nice if the child has only the minimal necessary file descriptors,
                # to avoid keeping other pipes and files open, e.g., those that the parent
                # uses to communicate with other containers (if containers are started in parallel).
                # Thus we do not use the close_fds feature of subprocess.Popen,
                # but do the same here manually.
                # We keep the relevant ends of our pipes, and stdin/out/err of child and grandchild.
                necessary_fds = {
                    sys.stdin, sys.stdout, sys.stderr, to_parent, from_parent,
                    stdin, stdout, stderr
                } - {None}
                container.close_open_fds(keep_files=necessary_fds)

                try:
                    if not self._allow_network:
                        container.activate_network_interface("lo")
                    self._setup_container_filesystem(temp_dir)
                except EnvironmentError as e:
                    logging.critical("Failed to configure container: %s", e)
                    return CHILD_OSERROR

                try:
                    os.chdir(cwd)
                except EnvironmentError as e:
                    logging.critical(
                        "Cannot change into working directory inside container: %s",
                        e)
                    return CHILD_OSERROR

                try:
                    grandchild_proc = subprocess.Popen(args,
                                                       stdin=stdin,
                                                       stdout=stdout,
                                                       stderr=stderr,
                                                       env=env,
                                                       close_fds=False,
                                                       preexec_fn=grandchild)
                except (EnvironmentError, RuntimeError) as e:
                    logging.critical("Cannot start process: %s", e)
                    return CHILD_OSERROR

                container.drop_capabilities()

                # Close other fds that were still necessary above.
                container.close_open_fds(
                    keep_files={sys.stdout, sys.stderr, to_parent})

                # Set up signal handlers to forward signals to grandchild
                # (because we are PID 1, there is a special signal handling otherwise).
                # cf. dumb-init project: https://github.com/Yelp/dumb-init
                # Also wait for grandchild and return its result.
                if _HAS_SIGWAIT:
                    grandchild_result = container.wait_for_child_and_forward_all_signals(
                        grandchild_proc.pid, args[0])
                else:
                    container.forward_all_signals_async(
                        grandchild_proc.pid, args[0])
                    grandchild_result = self._wait_for_process(
                        grandchild_proc.pid, args[0])

                logging.debug(
                    "Child: process %s terminated with exit code %d.", args[0],
                    grandchild_result[0])
                os.write(to_parent, pickle.dumps(grandchild_result))
                os.close(to_parent)

                return 0
            except EnvironmentError as e:
                logging.exception("Error in child process of RunExecutor")
                return CHILD_OSERROR
            except:
                # Need to catch everything because this method always needs to return a int
                # (we are inside a C callback that requires returning int).
                logging.exception("Error in child process of RunExecutor")
                return CHILD_UNKNOWN_ERROR

        try:  # parent
            try:
                child_pid = container.execute_in_namespace(
                    child, use_network_ns=not self._allow_network)
            except OSError as e:
                raise BenchExecException(
                    "Creating namespace for container mode failed: " +
                    os.strerror(e.errno))
            logging.debug(
                "Parent: child process of RunExecutor with PID %d started.",
                child_pid)

            def check_child_exit_code():
                """Check if the child process terminated cleanly and raise an error otherwise."""
                child_exitcode, unused_child_rusage = self._wait_for_process(
                    child_pid, args[0])
                child_exitcode = util.ProcessExitCode.from_raw(child_exitcode)
                logging.debug(
                    "Parent: child process of RunExecutor with PID %d terminated with %s.",
                    child_pid, child_exitcode)

                if child_exitcode:
                    if child_exitcode.value:
                        if child_exitcode.value == CHILD_OSERROR:
                            # This was an OSError in the child, details were already logged
                            raise BenchExecException(
                                "execution in container failed, check log for details"
                            )
                        elif child_exitcode.value == CHILD_UNKNOWN_ERROR:
                            raise BenchExecException(
                                "unexpected error in container")
                        raise OSError(child_exitcode.value,
                                      os.strerror(child_exitcode.value))
                    raise OSError(
                        0, "Child process of RunExecutor terminated with " +
                        str(child_exitcode))

            # Close unnecessary ends of pipes such that read() does not block forever
            # if all other processes have terminated.
            os.close(from_parent)
            os.close(to_parent)

            container.setup_user_mapping(child_pid,
                                         uid=self._uid,
                                         gid=self._gid)

            try:
                grandchild_pid = int(os.read(
                    from_grandchild, 10))  # 10 bytes is enough for 32bit int
            except ValueError:
                # probably empty read, i.e., pipe closed, i.e., child or grandchild failed
                check_child_exit_code()
                assert False, "Child process of RunExecutor terminated cleanly but did not send expected data."

            logging.debug(
                "Parent: executing %s in grand child with PID %d via child with PID %d.",
                args[0], grandchild_pid, child_pid)

            # start measurements
            cgroups.add_task(grandchild_pid)
            parent_setup = parent_setup_fn()

            # Signal grandchild that setup is finished
            os.write(to_grandchild, b'\0')

            # Copy file descriptor, otherwise we could not close from_grandchild in finally block
            # and would leak a file descriptor in case of exception.
            from_grandchild_copy = os.dup(from_grandchild)
        finally:
            os.close(from_grandchild)
            os.close(to_grandchild)

        def wait_for_grandchild():
            # 1024 bytes ought to be enough for everyone^Wour pickled result
            try:
                received = os.read(from_grandchild_copy, 1024)
            except OSError as e:
                if self.PROCESS_KILLED and e.errno == errno.EINTR:
                    # Read was interrupted because of Ctrl+C, we just try again
                    received = os.read(from_grandchild_copy, 1024)
                else:
                    raise e

            parent_cleanup = parent_cleanup_fn(parent_setup)

            os.close(from_grandchild_copy)
            check_child_exit_code()

            if result_files_patterns:
                self._transfer_output_files(temp_dir, cwd, output_dir,
                                            result_files_patterns)

            exitcode, ru_child = pickle.loads(received)
            return exitcode, ru_child, parent_cleanup

        return grandchild_pid, wait_for_grandchild
Ejemplo n.º 38
0
 def __enter__(self):
     self.orig_stream_dup = os.dup(self.orig_stream_fileno)
     self.devnull = open(os.devnull, 'w')
     os.dup2(self.devnull.fileno(), self.orig_stream_fileno)
Ejemplo n.º 39
0
        model_list = os.listdir(
            os.path.join(ShapeNetv1_dir, data_type, class_id))

        for model_id in model_list:
            start = time.time()
            exr_dir = os.path.join(output_dir, 'exr', model_id)
            pose_dir = os.path.join(output_dir, 'pose', model_id)
            if os.path.exists(os.path.join(exr_dir, '32.exr')):
                print("skip " + exr_dir)
                continue

            os.makedirs(exr_dir, exist_ok=True)
            os.makedirs(pose_dir, exist_ok=True)

            # Redirect output to log file
            old_os_out = os.dup(1)
            os.close(1)
            os.open('blender.log', os.O_WRONLY)

            # Import mesh model
            model_path = os.path.join(ShapeNetv1_dir, data_type, class_id,
                                      model_id, 'model.obj')
            bpy.ops.import_scene.obj(filepath=model_path)

            # Rotate model by 90 degrees around x-axis (z-up => y-up) to match ShapeNet's coordinates
            bpy.ops.transform.rotate(value=-np.pi / 2, axis=(1, 0, 0))

            # Render
            for i in range(viewspace.shape[0]):
                scene.frame_set(i)
                cam_pose = mathutils.Vector(
Ejemplo n.º 40
0
def connect(ssh_cmd, rhostport, python, stderr, options):
    main_exe = sys.argv[0]
    portl = []

    rhostIsIPv6 = False
    if (rhostport or '').count(':') > 1:
        rhostIsIPv6 = True
        if rhostport.count(']') or rhostport.count('['):
            result = rhostport.split(']')
            rhost = result[0].strip('[')
            if len(result) > 1:
                result[1] = result[1].strip(':')
                if result[1] is not '':
                    portl = ['-p', str(int(result[1]))]
        else:  # can't disambiguate IPv6 colons and a port number. pass the hostname through.
            rhost = rhostport
    else:  # IPv4
        l = (rhostport or '').split(':', 1)
        rhost = l[0]
        if len(l) > 1:
            portl = ['-p', str(int(l[1]))]

    if rhost == '-':
        rhost = None

    ipv6flag = []
    if rhostIsIPv6:
        ipv6flag = ['-6']

    z = zlib.compressobj(1)
    content = readfile('assembler.py')
    optdata = ''.join("%s=%r\n" % (k, v) for (k, v) in options.items())
    content2 = (empackage(z, 'cmdline_options.py', optdata) +
                empackage(z, 'helpers.py') +
                empackage(z, 'compat/ssubprocess.py') +
                empackage(z, 'ssnet.py') + empackage(z, 'hostwatch.py') +
                empackage(z, 'server.py') + "\n")

    pyscript = r"""
                import sys;
                skip_imports=1;
                verbosity=%d;
                exec compile(sys.stdin.read(%d), "assembler.py", "exec")
                """ % (helpers.verbose or 0, len(content))
    pyscript = re.sub(r'\s+', ' ', pyscript.strip())

    if not rhost:
        # ignore the --python argument when running locally; we already know
        # which python version works.
        argv = [sys.argv[1], '-c', pyscript]
    else:
        if ssh_cmd:
            sshl = ssh_cmd.split(' ')
        else:
            sshl = ['ssh']
        if python:
            pycmd = "'%s' -c '%s'" % (python, pyscript)
        else:
            pycmd = ("P=python2; $P -V 2>/dev/null || P=python; "
                     "\"$P\" -c '%s'") % pyscript
        argv = (sshl + portl + ipv6flag + [rhost, '--', pycmd])
    (s1, s2) = socket.socketpair()

    def setup():
        # runs in the child process
        s2.close()

    s1a, s1b = os.dup(s1.fileno()), os.dup(s1.fileno())
    s1.close()
    debug2('executing: %r\n' % argv)
    p = ssubprocess.Popen(argv,
                          stdin=s1a,
                          stdout=s1b,
                          preexec_fn=setup,
                          close_fds=True,
                          stderr=stderr)
    os.close(s1a)
    os.close(s1b)
    s2.sendall(content)
    s2.sendall(content2)
    return p, s2
Ejemplo n.º 41
0
 def __init__(self):
     # Open a pair of null files
     self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
     # Save the actual stdout (1) and stderr (2) file descriptors.
     self.save_fds = [os.dup(1), os.dup(2)]
Ejemplo n.º 42
0
def fork(pkg, function, dirty, fake):
    """Fork a child process to do part of a spack build.

    Args:

        pkg (PackageBase): package whose environment we should set up the
            forked process for.
        function (callable): argless function to run in the child
            process.
        dirty (bool): If True, do NOT clean the environment before
            building.
        fake (bool): If True, skip package setup b/c it's not a real build

    Usage::

        def child_fun():
            # do stuff
        build_env.fork(pkg, child_fun)

    Forked processes are run with the build environment set up by
    spack.build_environment.  This allows package authors to have full
    control over the environment, etc. without affecting other builds
    that might be executed in the same spack call.

    If something goes wrong, the child process catches the error and
    passes it to the parent wrapped in a ChildError.  The parent is
    expected to handle (or re-raise) the ChildError.
    """
    def child_process(child_pipe, input_stream):
        # We are in the child process. Python sets sys.stdin to
        # open(os.devnull) to prevent our process and its parent from
        # simultaneously reading from the original stdin. But, we assume
        # that the parent process is not going to read from it till we
        # are done with the child, so we undo Python's precaution.
        if input_stream is not None:
            sys.stdin = input_stream

        try:
            if not fake:
                setup_package(pkg, dirty=dirty)
            return_value = function()
            child_pipe.send(return_value)
        except StopIteration as e:
            # StopIteration is used to stop installations
            # before the final stage, mainly for debug purposes
            tty.msg(e)
            child_pipe.send(None)

        except BaseException:
            # catch ANYTHING that goes wrong in the child process
            exc_type, exc, tb = sys.exc_info()

            # Need to unwind the traceback in the child because traceback
            # objects can't be sent to the parent.
            tb_string = traceback.format_exc()

            # build up some context from the offending package so we can
            # show that, too.
            package_context = get_package_context(tb)

            build_log = None
            if hasattr(pkg, 'log_path'):
                build_log = pkg.log_path

            # make a pickleable exception to send to parent.
            msg = "%s: %s" % (exc_type.__name__, str(exc))

            ce = ChildError(msg, exc_type.__module__, exc_type.__name__,
                            tb_string, build_log, package_context)
            child_pipe.send(ce)

        finally:
            child_pipe.close()

    parent_pipe, child_pipe = multiprocessing.Pipe()
    input_stream = None
    try:
        # Forward sys.stdin when appropriate, to allow toggling verbosity
        if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
            input_stream = os.fdopen(os.dup(sys.stdin.fileno()))

        p = multiprocessing.Process(target=child_process,
                                    args=(child_pipe, input_stream))
        p.start()

    except InstallError as e:
        e.pkg = pkg
        raise

    finally:
        # Close the input stream in the parent process
        if input_stream is not None:
            input_stream.close()

    child_result = parent_pipe.recv()
    p.join()

    # let the caller know which package went wrong.
    if isinstance(child_result, InstallError):
        child_result.pkg = pkg

    # If the child process raised an error, print its output here rather
    # than waiting until the call to SpackError.die() in main(). This
    # allows exception handling output to be logged from within Spack.
    # see spack.main.SpackCommand.
    if isinstance(child_result, ChildError):
        child_result.print_context()
        raise child_result

    return child_result
Ejemplo n.º 43
0
def main():
    """
    Check starting conditions and start GUI.

    First, check command line arguments and start loggers. Set log levels. Try
    all imports and exit verbosely if a library is not found. Disable outputs
    to stdout and start the GUI.
    """

    # Set ERROR level for PyQt5 logger
    qtlogger = logging.getLogger('PyQt5')
    qtlogger.setLevel(logging.ERROR)

    parser = argparse.ArgumentParser(
        description="edclient - Espdrone graphical control client")
    parser.add_argument('--debug',
                        '-d',
                        nargs=1,
                        default='info',
                        type=str,
                        help="set debug level "
                        "[minimal, info, debug, debugfile]")
    args = parser.parse_args()
    debug = args.debug

    cflogger = logging.getLogger('')

    # Set correct logging fuctionality according to commandline
    if ("debugfile" in debug):
        logging.basicConfig(level=logging.DEBUG)
        # Add extra format options for file logger (thread and time)
        formatter = logging.Formatter('%(asctime)s:%(threadName)s:%(name)'
                                      's:%(levelname)s:%(message)s')
        filename = "debug-%s.log" % datetime.datetime.now()
        filehandler = logging.FileHandler(filename)
        filehandler.setLevel(logging.DEBUG)
        filehandler.setFormatter(formatter)
        cflogger.addHandler(filehandler)
    elif ("debug" in debug):
        logging.basicConfig(level=logging.DEBUG)
    elif ("minimal" in debug):
        logging.basicConfig(level=logging.WARNING)
    elif ("info" in debug):
        logging.basicConfig(level=logging.INFO)

    logger = logging.getLogger(__name__)

    logger.debug("Using config path {}".format(edclient.config_path))
    logger.debug("sys.path={}".format(sys.path))

    # Try all the imports used in the project here to control what happens....
    try:
        import usb  # noqa
    except ImportError:
        logger.critical("No pyusb installation found, exiting!")
        sys.exit(1)

    if not sys.platform.startswith('linux'):
        try:
            import sdl2  # noqa
        except ImportError:
            logger.critical("No pysdl2 installation found, exiting!")
            sys.exit(1)

    try:
        import PyQt5  # noqa
    except ImportError:
        logger.critical("No PyQT5 installation found, exiting!")
        sys.exit(1)

    # Disable printouts from STL
    if os.name == 'posix':
        stdout = os.dup(1)
        os.dup2(os.open('/dev/null', os.O_WRONLY), 1)
        sys.stdout = os.fdopen(stdout, 'w')
        logger.info("Disabling STL printouts")

    if os.name == 'nt':
        stdout = os.dup(1)
        os.dup2(os.open('NUL', os.O_WRONLY), 1)
        sys.stdout = os.fdopen(stdout, 'w')
        logger.info("Disabling STL printouts")

    if sys.platform == 'darwin':
        try:
            import Foundation
            bundle = Foundation.NSBundle.mainBundle()
            if bundle:
                info = (bundle.localizedInfoDictionary()
                        or bundle.infoDictionary())
                if info:
                    info['CFBundleName'] = 'Espdrone'
        except ImportError:
            logger.info("Foundation not found. Menu will show python as "
                        "application name")

    # Start up the main user-interface
    from .ui.main import MainUI
    from PyQt5.QtWidgets import QApplication
    from PyQt5.QtGui import QIcon

    app = QApplication(sys.argv)

    # Create and set an event loop that combines qt and asyncio
    loop = QSelectorEventLoop(app)
    asyncio.set_event_loop(loop)

    app.setWindowIcon(QIcon(edclient.module_path + "/icon-256.png"))
    # Make sure the right icon is set in Windows 7+ taskbar
    if os.name == 'nt':
        import ctypes

        try:
            myappid = 'mycompany.myproduct.subproduct.version'
            ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(
                myappid)
        except Exception:
            pass

    main_window = MainUI()
    main_window.show()
    sys.exit(app.exec_())
 def __enter__(self):
     self.read_fd, self.write_fd = os.pipe()
     self.copy_fd = os.dup(self.capture_fd)
     os.dup2(self.write_fd, self.capture_fd)
     return self
Ejemplo n.º 45
0
 def __enter__(self):
     self.output_fd_backup = os.dup(self.output.fileno())
     self.output.flush()
     os.close(self.output.fileno())
     os.open(self.redirected_path, os.O_WRONLY)
     return self
Ejemplo n.º 46
0
def dup(fd):
    # 复制文件描述符fd
    os.dup(fd)
Ejemplo n.º 47
0
        def _execute_child(self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite):
            """Execute program (POSIX version)"""
            if isinstance(args, types.StringTypes):
                args = [args]
            else:
                args = list(args)
            if shell:
                args = ['/bin/sh', '-c'] + args
                if executable:
                    args[0] = executable
            if executable is None:
                executable = args[0]

            def _close_in_parent(fd):
                os.close(fd)
                to_close.remove(fd)

            errpipe_read, errpipe_write = self.pipe_cloexec()
            try:
                try:
                    gc_was_enabled = gc.isenabled()
                    gc.disable()
                    try:
                        self.pid = os.fork()
                    except:
                        if gc_was_enabled:
                            gc.enable()
                        raise

                    self._child_created = True
                    if self.pid == 0:
                        try:
                            if p2cwrite is not None:
                                os.close(p2cwrite)
                            if c2pread is not None:
                                os.close(c2pread)
                            if errread is not None:
                                os.close(errread)
                            os.close(errpipe_read)
                            if c2pwrite == 0:
                                c2pwrite = os.dup(c2pwrite)
                            if errwrite == 0 or errwrite == 1:
                                errwrite = os.dup(errwrite)

                            def _dup2(a, b):
                                if a == b:
                                    self._set_cloexec_flag(a, False)
                                elif a is not None:
                                    os.dup2(a, b)
                                return

                            _dup2(p2cread, 0)
                            _dup2(c2pwrite, 1)
                            _dup2(errwrite, 2)
                            closed = {None}
                            for fd in [p2cread, c2pwrite, errwrite]:
                                if fd not in closed and fd > 2:
                                    os.close(fd)
                                    closed.add(fd)

                            if cwd is not None:
                                os.chdir(cwd)
                            if preexec_fn:
                                preexec_fn()
                            if close_fds:
                                self._close_fds(but=errpipe_write)
                            if env is None:
                                os.execvp(executable, args)
                            else:
                                os.execvpe(executable, args, env)
                        except:
                            exc_type, exc_value, tb = sys.exc_info()
                            exc_lines = traceback.format_exception(exc_type, exc_value, tb)
                            exc_value.child_traceback = ''.join(exc_lines)
                            os.write(errpipe_write, pickle.dumps(exc_value))

                        os._exit(255)
                    if gc_was_enabled:
                        gc.enable()
                finally:
                    os.close(errpipe_write)

                data = _eintr_retry_call(os.read, errpipe_read, 1048576)
            finally:
                if p2cread is not None and p2cwrite is not None:
                    _close_in_parent(p2cread)
                if c2pwrite is not None and c2pread is not None:
                    _close_in_parent(c2pwrite)
                if errwrite is not None and errread is not None:
                    _close_in_parent(errwrite)
                os.close(errpipe_read)

            if data != '':
                try:
                    _eintr_retry_call(os.waitpid, self.pid, 0)
                except OSError as e:
                    if e.errno != errno.ECHILD:
                        raise

                child_exception = pickle.loads(data)
                raise child_exception
            return
Ejemplo n.º 48
0
 def __exit__(self, exc_type, exc_value, traceback):
     os.close(self.output.fileno())
     os.dup(self.output_fd_backup)
     os.close(self.output_fd_backup)
 def _get_next_fds(self, n=1):
     # dup uses the lowest-numbered unused descriptor for the new descriptor
     fds = [os.dup(0) for i in range(n)]
     for fd in fds:
         os.close(fd)
     return fds
Ejemplo n.º 50
0
    def __init__(self, argv = None,
                 shell = False,
                 executable = None,
                 cwd = None,
                 env = None,
                 stdin  = PIPE,
                 stdout = PTY,
                 stderr = STDOUT,
                 close_fds = True,
                 preexec_fn = lambda: None,
                 raw = True,
                 aslr = None,
                 setuid = None,
                 where = 'local',
                 display = None,
                 alarm = None,
                 *args,
                 **kwargs
                 ):
        super(process, self).__init__(*args,**kwargs)

        # Permit using context.binary
        if argv is None:
            if context.binary:
                argv = [context.binary.path]
            else:
                raise TypeError('Must provide argv or set context.binary')


        #: :class:`subprocess.Popen` object that backs this process
        self.proc = None

        if not shell:
            executable, argv, env = self._validate(cwd, executable, argv, env)

        # Avoid the need to have to deal with the STDOUT magic value.
        if stderr is STDOUT:
            stderr = stdout

        # Determine which descriptors will be attached to a new PTY
        handles = (stdin, stdout, stderr)

        #: Which file descriptor is the controlling TTY
        self.pty          = handles.index(PTY) if PTY in handles else None

        #: Whether the controlling TTY is set to raw mode
        self.raw          = raw

        #: Whether ASLR should be left on
        self.aslr         = aslr if aslr is not None else context.aslr

        #: Whether setuid is permitted
        self._setuid      = setuid if setuid is None else bool(setuid)

        # Create the PTY if necessary
        stdin, stdout, stderr, master, slave = self._handles(*handles)

        #: Arguments passed on argv
        self.argv = argv

        #: Full path to the executable
        self.executable = executable

        if self.executable is None:
            if shell:
                self.executable = '/bin/sh'
            else:
                self.executable = which(self.argv[0])

        #: Environment passed on envp
        self.env = os.environ if env is None else env

        self._cwd = os.path.realpath(cwd or os.path.curdir)

        #: Alarm timeout of the process
        self.alarm        = alarm

        self.preexec_fn = preexec_fn
        self.display    = display or self.program
        self._qemu      = False
        self._corefile  = None

        message = "Starting %s process %r" % (where, self.display)

        if self.isEnabledFor(logging.DEBUG):
            if self.argv != [self.executable]: message += ' argv=%r ' % self.argv
            if self.env  != os.environ:        message += ' env=%r ' % self.env

        with self.progress(message) as p:

            if not self.aslr:
                self.warn_once("ASLR is disabled!")

            # In the event the binary is a foreign architecture,
            # and binfmt is not installed (e.g. when running on
            # Travis CI), re-try with qemu-XXX if we get an
            # 'Exec format error'.
            prefixes = [([], self.executable)]
            exception = None

            for prefix, executable in prefixes:
                try:
                    args = argv
                    if prefix:
                        args = prefix + args
                    self.proc = subprocess.Popen(args = args,
                                                 shell = shell,
                                                 executable = executable,
                                                 cwd = cwd,
                                                 env = env,
                                                 stdin = stdin,
                                                 stdout = stdout,
                                                 stderr = stderr,
                                                 close_fds = close_fds,
                                                 preexec_fn = self.__preexec_fn)
                    break
                except OSError as exception:
                    if exception.errno != errno.ENOEXEC:
                        raise
                    prefixes.append(self.__on_enoexec(exception))

            p.success('pid %i' % self.pid)

        if self.pty is not None:
            if stdin is slave:
                self.proc.stdin = os.fdopen(os.dup(master), 'r+b', 0)
            if stdout is slave:
                self.proc.stdout = os.fdopen(os.dup(master), 'r+b', 0)
            if stderr is slave:
                self.proc.stderr = os.fdopen(os.dup(master), 'r+b', 0)

            os.close(master)
            os.close(slave)

        # Set in non-blocking mode so that a call to call recv(1000) will
        # return as soon as a the first byte is available
        if self.proc.stdout:
            fd = self.proc.stdout.fileno()
            fl = fcntl.fcntl(fd, fcntl.F_GETFL)
            fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)

        # Save off information about whether the binary is setuid / setgid
        self.uid = os.getuid()
        self.gid = os.getgid()
        self.suid = -1
        self.sgid = -1
        st = os.stat(self.executable)
        if self._setuid:
            if (st.st_mode & stat.S_ISUID):
                self.setuid = st.st_uid
            if (st.st_mode & stat.S_ISGID):
                self.setgid = st.st_gid
Ejemplo n.º 51
0
def main():
    # Our parent expects to read JSON from our stdout, so if anyone
    # uses print, buck will complain with a helpful "but I wanted an
    # array!" message and quit.  Redirect stdout to stderr so that
    # doesn't happen.  Actually dup2 the file handle so that writing
    # to file descriptor 1, os.system, and so on work as expected too.

    to_parent = os.fdopen(os.dup(sys.stdout.fileno()), 'a')
    os.dup2(sys.stderr.fileno(), sys.stdout.fileno())

    parser = optparse.OptionParser()
    parser.add_option('--project_root',
                      action='store',
                      type='string',
                      dest='project_root')
    parser.add_option('--build_file_name',
                      action='store',
                      type='string',
                      dest="build_file_name")
    parser.add_option(
        '--allow_empty_globs',
        action='store_true',
        dest='allow_empty_globs',
        help=
        'Tells the parser not to raise an error when glob returns no results.')
    parser.add_option('--include', action='append', dest='include')
    (options, args) = parser.parse_args()

    # Even though project_root is absolute path, it may not be concise. For
    # example, it might be like "C:\project\.\rule".
    #
    # Under cygwin, the project root will be invoked from buck as C:\path, but
    # the cygwin python uses UNIX-style paths. They can be converted using
    # cygpath, which is necessary because abspath will treat C:\path as a
    # relative path.
    options.project_root = cygwin_adjusted_path(options.project_root)
    project_root = os.path.abspath(options.project_root)

    buildFileProcessor = BuildFileProcessor(project_root,
                                            options.build_file_name,
                                            options.allow_empty_globs,
                                            implicit_includes=options.include
                                            or [])

    buildFileProcessor.install_builtins(__builtin__.__dict__)

    for build_file in args:
        build_file = cygwin_adjusted_path(build_file)
        values = buildFileProcessor.process(build_file)
        to_parent.write(json.dumps(values, sort_keys=True))
        to_parent.flush()

    # "for ... in sys.stdin" in Python 2.x hangs until stdin is closed.
    for build_file in iter(sys.stdin.readline, ''):
        build_file = cygwin_adjusted_path(build_file)
        values = buildFileProcessor.process(build_file.rstrip())
        to_parent.write(json.dumps(values, sort_keys=True))
        to_parent.flush()

    # Python tries to flush/close stdout when it quits, and if there's a dead
    # pipe on the other end, it will spit some warnings to stderr. This breaks
    # tests sometimes. Prevent that by explicitly catching the error.
    try:
        to_parent.close()
    except IOError:
        pass
Ejemplo n.º 52
0
    def _start_execution_in_container(
        self,
        args,
        stdin,
        stdout,
        stderr,
        env,
        root_dir,
        cwd,
        temp_dir,
        memlimit,
        memory_nodes,
        cgroups,
        output_dir,
        result_files_patterns,
        parent_setup_fn,
        child_setup_fn,
        parent_cleanup_fn,
    ):
        """Execute the given command and measure its resource usage similarly to
        super()._start_execution(), but inside a container implemented using Linux
        namespaces.  The command has no network access (only loopback),
        a fresh directory as /tmp and no write access outside of this,
        and it does not see other processes except itself.
        """
        assert self._use_namespaces

        if root_dir is None:
            env.update(self._env_override)

        # We have three processes involved:
        # parent: the current Python process in which RunExecutor is executing
        # child: child process in new namespace (PID 1 in inner namespace),
        #        configures inner namespace, serves as dummy init,
        #        collects result of grandchild and passes it to parent
        # grandchild: child of child process (PID 2 in inner namespace), exec()s tool

        # We need the following communication steps between these proceses:
        # 1a) grandchild tells parent its PID (in outer namespace).
        # 1b) grandchild tells parent that it is ready and measurement should begin.
        # 2) parent tells grandchild that measurement has begun and tool should
        #    be exec()ed.
        # 3) child tells parent about return value and resource consumption of
        #    grandchild.
        # 1a and 1b are done together by sending the PID through a pipe.
        # 2 is done by sending a null byte through a pipe.
        # 3 is done by sending a pickled object through the same pipe as #2.
        # We cannot use the same pipe for both directions, because otherwise a sender
        # might read the bytes it has sent itself.

        # Error codes from child to parent
        CHILD_OSERROR = 128  # noqa: N806 local constant
        CHILD_UNKNOWN_ERROR = 129  # noqa: N806 local constant

        # "downstream" pipe parent->grandchild
        from_parent, to_grandchild = os.pipe()
        # "upstream" pipe grandchild/child->parent
        from_grandchild, to_parent = os.pipe()

        # The protocol for these pipes is that first the parent sends the marker for
        # user mappings, then the grand child sends its outer PID back,
        # and finally the parent sends its completion marker.
        # After the run, the child sends the result of the grand child and then waits
        # for the post_run marker, before it terminates.
        MARKER_USER_MAPPING_COMPLETED = b"A"  # noqa: N806 local constant
        MARKER_PARENT_COMPLETED = b"B"  # noqa: N806 local constant
        MARKER_PARENT_POST_RUN_COMPLETED = b"C"  # noqa: N806 local constant

        # If the current directory is within one of the bind mounts we create,
        # we need to cd into this directory again, otherwise we would not see the
        # bind mount, but the directory behind it.
        # Thus we always set cwd to force a change of directory.
        if root_dir is None:
            cwd = os.path.abspath(cwd or os.curdir)
        else:
            root_dir = os.path.abspath(root_dir)
            cwd = os.path.abspath(cwd)

        def grandchild():
            """Setup everything inside the process that finally exec()s the tool."""
            try:
                # We know that this process has PID 2 in the inner namespace,
                # but we actually need to know its PID in the outer namespace
                # such that parent can put us into the correct cgroups.  According to
                # http://man7.org/linux/man-pages/man7/pid_namespaces.7.html,
                # there are two ways to achieve this: sending a message with the PID
                # via a socket (but Python 2 lacks a convenient API for sendmsg),
                # and reading /proc/self in the outer procfs instance
                # (that's what we do).
                my_outer_pid = container.get_my_pid_from_procfs()

                container.mount_proc(self._container_system_config)
                container.drop_capabilities()
                container.reset_signal_handling()
                child_setup_fn()  # Do some other setup the caller wants.

                # Signal readiness to parent by sending our PID
                # and wait until parent is also ready
                os.write(to_parent, str(my_outer_pid).encode())
                received = os.read(from_parent, 1)
                assert received == MARKER_PARENT_COMPLETED, received
            finally:
                # close remaining ends of pipe
                os.close(from_parent)
                os.close(to_parent)
            # here Python will exec() the tool for us

        def child():
            """Setup everything inside the container,
            start the tool, and wait for result."""
            try:
                logging.debug(
                    "Child: child process of RunExecutor with PID %d started",
                    container.get_my_pid_from_procfs(),
                )

                # Put all received signals on hold until we handle them later.
                container.block_all_signals()

                # We want to avoid leaking file descriptors to the executed child.
                # It is also nice if the child has only the minimal necessary file
                # descriptors, to avoid keeping other pipes and files open, e.g.,
                # those that the parent uses to communicate with other containers
                # (if containers are started in parallel).
                # Thus we do not use the close_fds feature of subprocess.Popen,
                # but do the same here manually. We keep the relevant ends of our pipes,
                # and stdin/out/err of child and grandchild.
                necessary_fds = {
                    sys.stdin,
                    sys.stdout,
                    sys.stderr,
                    to_parent,
                    from_parent,
                    stdin,
                    stdout,
                    stderr,
                } - {None}
                container.close_open_fds(keep_files=necessary_fds)

                try:
                    if self._container_system_config:
                        # A standard hostname increases reproducibility.
                        libc.sethostname(container.CONTAINER_HOSTNAME)

                    if not self._allow_network:
                        container.activate_network_interface("lo")

                    # Wait until user mapping is finished,
                    # this is necessary for filesystem writes
                    received = os.read(from_parent, len(MARKER_USER_MAPPING_COMPLETED))
                    assert received == MARKER_USER_MAPPING_COMPLETED, received

                    if root_dir is not None:
                        self._setup_root_filesystem(root_dir)
                    else:
                        self._setup_container_filesystem(
                            temp_dir,
                            output_dir if result_files_patterns else None,
                            memlimit,
                            memory_nodes,
                        )

                    # Marking this process as "non-dumpable" (no core dumps) also
                    # forbids several other ways how other processes can access and
                    # influence it:
                    # ptrace is forbidden and much of /proc/<child>/ is inaccessible.
                    # We set this to prevent the benchmarked tool from messing with this
                    # process or using it to escape from the container. More info:
                    # http://man7.org/linux/man-pages/man5/proc.5.html
                    # It needs to be done after MARKER_USER_MAPPING_COMPLETED.
                    libc.prctl(libc.PR_SET_DUMPABLE, libc.SUID_DUMP_DISABLE, 0, 0, 0)
                except EnvironmentError as e:
                    logging.critical("Failed to configure container: %s", e)
                    return CHILD_OSERROR

                try:
                    os.chdir(cwd)
                except EnvironmentError as e:
                    logging.critical(
                        "Cannot change into working directory inside container: %s", e
                    )
                    return CHILD_OSERROR

                container.setup_seccomp_filter()

                try:
                    grandchild_proc = subprocess.Popen(
                        args,
                        stdin=stdin,
                        stdout=stdout,
                        stderr=stderr,
                        env=env,
                        close_fds=False,
                        preexec_fn=grandchild,
                    )
                except (EnvironmentError, RuntimeError) as e:
                    logging.critical("Cannot start process: %s", e)
                    return CHILD_OSERROR

                # keep capability for unmount if necessary later
                necessary_capabilities = (
                    [libc.CAP_SYS_ADMIN] if result_files_patterns else []
                )
                container.drop_capabilities(keep=necessary_capabilities)

                # Close other fds that were still necessary above.
                container.close_open_fds(
                    keep_files={sys.stdout, sys.stderr, to_parent, from_parent}
                )

                # Set up signal handlers to forward signals to grandchild
                # (because we are PID 1, there is a special signal handling otherwise).
                # cf. dumb-init project: https://github.com/Yelp/dumb-init
                # Also wait for grandchild and return its result.
                if _HAS_SIGWAIT:
                    grandchild_result = container.wait_for_child_and_forward_signals(
                        grandchild_proc.pid, args[0]
                    )
                else:
                    container.forward_all_signals_async(grandchild_proc.pid, args[0])
                    grandchild_result = self._wait_for_process(
                        grandchild_proc.pid, args[0]
                    )

                logging.debug(
                    "Child: process %s terminated with exit code %d.",
                    args[0],
                    grandchild_result[0],
                )

                if result_files_patterns:
                    # Remove the bind mount that _setup_container_filesystem added
                    # such that the parent can access the result files.
                    libc.umount(temp_dir.encode())

                # Re-allow access to /proc/<child>/...,
                # this is used by the parent for accessing output files
                libc.prctl(libc.PR_SET_DUMPABLE, libc.SUID_DUMP_USER, 0, 0, 0)

                os.write(to_parent, pickle.dumps(grandchild_result))
                os.close(to_parent)

                # Now the parent copies the output files, we need to wait until this is
                # finished. If the child terminates, the container file system and its
                # tmpfs go away.
                assert os.read(from_parent, 1) == MARKER_PARENT_POST_RUN_COMPLETED
                os.close(from_parent)

                return 0
            except EnvironmentError:
                logging.exception("Error in child process of RunExecutor")
                return CHILD_OSERROR
            except:  # noqa: E722
                # Need to catch everything because this method always needs to return an
                # int (we are inside a C callback that requires returning int).
                logging.exception("Error in child process of RunExecutor")
                return CHILD_UNKNOWN_ERROR

        try:  # parent
            try:
                child_pid = container.execute_in_namespace(
                    child, use_network_ns=not self._allow_network
                )
            except OSError as e:
                if (
                    e.errno == errno.EPERM
                    and util.try_read_file("/proc/sys/kernel/unprivileged_userns_clone")
                    == "0"
                ):
                    raise BenchExecException(
                        "Unprivileged user namespaces forbidden on this system, please "
                        "enable them with 'sysctl kernel.unprivileged_userns_clone=1' "
                        "or disable container mode"
                    )
                else:
                    raise BenchExecException(
                        "Creating namespace for container mode failed: "
                        + os.strerror(e.errno)
                    )
            logging.debug(
                "Parent: child process of RunExecutor with PID %d started.", child_pid
            )

            def check_child_exit_code():
                """Check if the child process terminated cleanly
                and raise an error otherwise."""
                child_exitcode, unused_child_rusage = self._wait_for_process(
                    child_pid, args[0]
                )
                child_exitcode = util.ProcessExitCode.from_raw(child_exitcode)
                logging.debug(
                    "Parent: child process of RunExecutor with PID %d"
                    " terminated with %s.",
                    child_pid,
                    child_exitcode,
                )

                if child_exitcode:
                    if child_exitcode.value:
                        if child_exitcode.value == CHILD_OSERROR:
                            # This was an OSError in the child,
                            # details were already logged
                            raise BenchExecException(
                                "execution in container failed, check log for details"
                            )
                        elif child_exitcode.value == CHILD_UNKNOWN_ERROR:
                            raise BenchExecException("unexpected error in container")
                        raise OSError(
                            child_exitcode.value, os.strerror(child_exitcode.value)
                        )
                    raise OSError(
                        0,
                        "Child process of RunExecutor terminated with "
                        + str(child_exitcode),
                    )

            # Close unnecessary ends of pipes such that read() does not block forever
            # if all other processes have terminated.
            os.close(from_parent)
            os.close(to_parent)

            container.setup_user_mapping(child_pid, uid=self._uid, gid=self._gid)
            # signal child to continue
            os.write(to_grandchild, MARKER_USER_MAPPING_COMPLETED)

            try:
                # read at most 10 bytes because this is enough for 32bit int
                grandchild_pid = int(os.read(from_grandchild, 10))
            except ValueError:
                # probably empty read, i.e., pipe closed,
                # i.e., child or grandchild failed
                check_child_exit_code()
                assert False, (
                    "Child process of RunExecutor terminated cleanly"
                    " but did not send expected data."
                )

            logging.debug(
                "Parent: executing %s in grand child with PID %d"
                " via child with PID %d.",
                args[0],
                grandchild_pid,
                child_pid,
            )

            # start measurements
            cgroups.add_task(grandchild_pid)
            parent_setup = parent_setup_fn()

            # Signal grandchild that setup is finished
            os.write(to_grandchild, MARKER_PARENT_COMPLETED)

            # Copy file descriptor, otherwise we could not close from_grandchild in
            # finally block and would leak a file descriptor in case of exception.
            from_grandchild_copy = os.dup(from_grandchild)
            to_grandchild_copy = os.dup(to_grandchild)
        finally:
            os.close(from_grandchild)
            os.close(to_grandchild)

        def wait_for_grandchild():
            # 1024 bytes ought to be enough for everyone^Wour pickled result
            try:
                received = os.read(from_grandchild_copy, 1024)
            except OSError as e:
                if self.PROCESS_KILLED and e.errno == errno.EINTR:
                    # Read was interrupted because of Ctrl+C, we just try again
                    received = os.read(from_grandchild_copy, 1024)
                else:
                    raise e

            if not received:
                # Typically this means the child exited prematurely because an error
                # occurred, and check_child_exitcode() will handle this.
                # We close the pipe first, otherwise child could hang infinitely.
                os.close(from_grandchild_copy)
                os.close(to_grandchild_copy)
                check_child_exit_code()
                assert False, "Child process terminated cleanly without sending result"

            exitcode, ru_child = pickle.loads(received)

            base_path = "/proc/{}/root".format(child_pid)
            parent_cleanup = parent_cleanup_fn(
                parent_setup, util.ProcessExitCode.from_raw(exitcode), base_path
            )

            if result_files_patterns:
                # As long as the child process exists
                # we can access the container file system here
                self._transfer_output_files(
                    base_path + temp_dir, cwd, output_dir, result_files_patterns
                )

            os.close(from_grandchild_copy)
            os.write(to_grandchild_copy, MARKER_PARENT_POST_RUN_COMPLETED)
            os.close(to_grandchild_copy)  # signal child that it can terminate
            check_child_exit_code()

            return exitcode, ru_child, parent_cleanup

        return grandchild_pid, wait_for_grandchild
Ejemplo n.º 53
0
def _exec_task(fn, task, d, quieterr):
    """Execute a BB 'task'

    Execution of a task involves a bit more setup than executing a function,
    running it with its own local metadata, and with some useful variables set.
    """
    if not d.getVarFlag(task, 'task', False):
        event.fire(TaskInvalid(task, d), d)
        logger.error("No such task: %s" % task)
        return 1

    logger.debug(1, "Executing task %s", task)

    localdata = _task_data(fn, task, d)
    tempdir = localdata.getVar('T')
    if not tempdir:
        bb.fatal("T variable not set, unable to build")

    # Change nice level if we're asked to
    nice = localdata.getVar("BB_TASK_NICE_LEVEL")
    if nice:
        curnice = os.nice(0)
        nice = int(nice) - curnice
        newnice = os.nice(nice)
        logger.debug(1, "Renice to %s " % newnice)
    ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
    if ionice:
        try:
            cls, prio = ionice.split(".", 1)
            bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
        except:
            bb.warn("Invalid ionice level %s" % ionice)

    bb.utils.mkdirhier(tempdir)

    # Determine the logfile to generate
    logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
    logbase = logfmt.format(task=task, pid=os.getpid())

    # Document the order of the tasks...
    logorder = os.path.join(tempdir, 'log.task_order')
    try:
        with open(logorder, 'a') as logorderfile:
            logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
    except OSError:
        logger.exception("Opening log file '%s'", logorder)
        pass

    # Setup the courtesy link to the logfn
    loglink = os.path.join(tempdir, 'log.{0}'.format(task))
    logfn = os.path.join(tempdir, logbase)
    if loglink:
        bb.utils.remove(loglink)

        try:
           os.symlink(logbase, loglink)
        except OSError:
           pass

    prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
    postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)

    class ErrorCheckHandler(logging.Handler):
        def __init__(self):
            self.triggered = False
            logging.Handler.__init__(self, logging.ERROR)
        def emit(self, record):
            if getattr(record, 'forcelog', False):
                self.triggered = False
            else:
                self.triggered = True

    # Handle logfiles
    si = open('/dev/null', 'r')
    try:
        bb.utils.mkdirhier(os.path.dirname(logfn))
        logfile = open(logfn, 'w')
    except OSError:
        logger.exception("Opening log file '%s'", logfn)
        pass

    # Dup the existing fds so we dont lose them
    osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
    oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
    ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]

    # Replace those fds with our own
    os.dup2(si.fileno(), osi[1])
    os.dup2(logfile.fileno(), oso[1])
    os.dup2(logfile.fileno(), ose[1])

    # Ensure Python logging goes to the logfile
    handler = logging.StreamHandler(logfile)
    handler.setFormatter(logformatter)
    # Always enable full debug output into task logfiles
    handler.setLevel(logging.DEBUG - 2)
    bblogger.addHandler(handler)

    errchk = ErrorCheckHandler()
    bblogger.addHandler(errchk)

    localdata.setVar('BB_LOGFILE', logfn)
    localdata.setVar('BB_RUNTASK', task)
    localdata.setVar('BB_TASK_LOGGER', bblogger)

    flags = localdata.getVarFlags(task)

    try:
        try:
            event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
        except (bb.BBHandledException, SystemExit):
            return 1
        except FuncFailed as exc:
            logger.error(str(exc))
            return 1

        try:
            for func in (prefuncs or '').split():
                exec_func(func, localdata)
            exec_func(task, localdata)
            for func in (postfuncs or '').split():
                exec_func(func, localdata)
        except FuncFailed as exc:
            if quieterr:
                event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
            else:
                errprinted = errchk.triggered
                logger.error(str(exc))
                event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
            return 1
        except bb.BBHandledException:
            event.fire(TaskFailed(task, logfn, localdata, True), localdata)
            return 1
    finally:
        sys.stdout.flush()
        sys.stderr.flush()

        bblogger.removeHandler(handler)

        # Restore the backup fds
        os.dup2(osi[0], osi[1])
        os.dup2(oso[0], oso[1])
        os.dup2(ose[0], ose[1])

        # Close the backup fds
        os.close(osi[0])
        os.close(oso[0])
        os.close(ose[0])
        si.close()

        logfile.close()
        if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
            logger.debug(2, "Zero size logfn %s, removing", logfn)
            bb.utils.remove(logfn)
            bb.utils.remove(loglink)
    event.fire(TaskSucceeded(task, logfn, localdata), localdata)

    if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
        make_stamp(task, localdata)

    return 0
Ejemplo n.º 54
0
sys.path.append('odsreader')

from ODSReader import ODSReader


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='ODS to Arduino pins converter.')
    parser.add_argument('-i', action='store', dest="input_file", metavar='input-odf-file', required=True, help='Input ODS file')
    parser.add_argument('-o', action='store', dest="output_file", metavar='generated-header-file', type=argparse.FileType('wt'), default='arduino_pins.h', help='Output C++ header file for arduino pins.')
    parser.add_argument('-s', action='store', dest="sheet_name", required=True, help='Name of Sheet in ODS to read pins definition from.')
    parser.add_argument('-m', action='store', dest="device", required=True, help='Name of microcontroler the header is generated (example: ATmega128).')


    ns = parser.parse_args()

    old = os.dup(1)
    os.close(1)
    sys.stdout = ns.output_file

    cpa = re.compile(r'ADC([0-9]+)')
    cpt = re.compile(r'TIMER([0-9]+[A|B|C])')
    doc = ODSReader(ns.input_file)
    table = doc.getSheet(ns.sheet_name)
    ports = {}
    spi = []
    analogin = {}
    timers = {}
    pwm = []
    pins = []
    for r in table[1:]:
        if r[0] == '---':
Ejemplo n.º 55
0
 def __init__(self, fileno, encoding):
     self.binarystream = fdopen(dup(fileno), 'wb')
     self.encoding = encoding
Ejemplo n.º 56
0
 def __init__(self, fd):
     self._redirected_fd = fd
     self._saved_fd = os.dup(self._redirected_fd)
     self._into_file = None
Ejemplo n.º 57
0
    def __execute_child(self, args, executable, preexec_fn, close_fds, cwd,
                        env, universal_newlines, startupinfo, creationflags,
                        shell, p2cread, p2cwrite, c2pread, c2pwrite, errread,
                        errwrite):
        """
        Executes the program using posix_spawn().

        This is based on the method from the superclass but the
        posix_spawn API forces a number of changes.  In particular:

        * When using fork() FDs are manipulated in the child process
          after the fork, but before the program is exec()ed.  With
          posix_spawn() this is done by passing a data-structure to
          the posix_spawn() call, which describes the FD manipulations
          to perform.

        * The fork() version waits until after the fork before
          unsetting the non-blocking flag on the FDs that the child
          has inherited.  In the posix_spawn() version, we cannot
          do that after the fork so we dup the FDs in advance and
          unset the flag on the duped FD, which we then pass to the
          child.
        """

        if preexec_fn is not None:
            raise NotImplementedError("preexec_fn not supported")
        if close_fds:
            raise NotImplementedError("close_fds not implemented")
        if cwd:
            raise NotImplementedError("cwd not implemented")
        if universal_newlines:
            raise NotImplementedError()
        assert startupinfo is None and creationflags == 0

        log.debug("Pipes: p2c %s, %s; c2p %s, %s; err %s, %s", p2cread,
                  p2cwrite, c2pread, c2pwrite, errread, errwrite)

        if isinstance(args, types.StringTypes):
            args = [args]
        else:
            args = [a.encode("ascii") for a in args]

        if shell:
            args = ["/bin/sh", "-c"] + args
            if executable:
                args[0] = executable

        if executable is None:
            executable = args[0]

        self._loop.install_sigchld()

        # The FileActions object is an ordered list of FD operations for
        # posix_spawn to do in the child process before it execs the new
        # program.
        file_actions = FileActions()

        # In the child, close parent's pipe ends.
        if p2cwrite is not None:
            file_actions.add_close(p2cwrite)
        if c2pread is not None:
            file_actions.add_close(c2pread)
        if errread is not None:
            file_actions.add_close(errread)

        # When duping fds, if there arises a situation where one of the fds
        # is either 0, 1 or 2, it is possible that it is overwritten (#12607).
        fds_to_close_in_parent = []
        if c2pwrite == 0:
            c2pwrite = os.dup(c2pwrite)
            fds_to_close_in_parent.append(c2pwrite)
        if errwrite == 0 or errwrite == 1:
            errwrite = os.dup(errwrite)
            fds_to_close_in_parent.append(errwrite)

        # Dup stdin/out/err FDs in child.
        def _dup2(dup_from, dup_to):
            if dup_from is None:
                # Pass through the existing FD.
                dup_from = dup_to
            # Need to take a dup so we can remove the non-blocking flag
            a_dup = os.dup(dup_from)
            log.debug("Duped %s as %s", dup_from, a_dup)
            fds_to_close_in_parent.append(a_dup)
            self._remove_nonblock_flag(a_dup)
            file_actions.add_dup2(a_dup, dup_to)

        _dup2(p2cread, 0)
        _dup2(c2pwrite, 1)
        _dup2(errwrite, 2)

        # Close pipe fds in the child.  Make sure we don't close the same fd
        # more than once, or standard fds.
        for fd in set([p2cread, c2pwrite, errwrite]):
            if fd > 2:
                file_actions.add_close(fd)

        gc_was_enabled = gc.isenabled()
        # FIXME Does this bug apply to posix_spawn version?
        try:
            # Disable gc to avoid bug where gc -> file_dealloc ->
            # write to stderr -> hang.  http://bugs.python.org/issue1336
            gc.disable()
            self.pid = posix_spawnp(
                executable,
                args,
                file_actions=file_actions,
                env=env,
            )
        except:
            if gc_was_enabled:
                gc.enable()
            raise
        finally:
            for fd in fds_to_close_in_parent:
                os.close(fd)

        # Capture the SIGCHILD.
        self._watcher = self._loop.child(self.pid)
        self._watcher.start(self._on_child, self._watcher)

        if gc_was_enabled:
            gc.enable()

        # Close the Child's pipe ends in the parent.
        if p2cread is not None and p2cwrite is not None:
            os.close(p2cread)
        if c2pwrite is not None and c2pread is not None:
            os.close(c2pwrite)
        if errwrite is not None and errread is not None:
            os.close(errwrite)
Ejemplo n.º 58
0
    def runCommand(self, params, wait=1):
        """
        This method runs a command and returns a tuple: (returncode, stdout, stderr)
        """
        ## @todo: Convert this into generic method and reuse it in dhcp4 and dhcp6

        print("Running command: %s" % (" ".join(params)))

        # redirect stdout to a pipe so we can check that our
        # process spawning is doing the right thing with stdout
        self.stdout_old = os.dup(sys.stdout.fileno())
        self.stdout_pipes = os.pipe()
        os.dup2(self.stdout_pipes[1], sys.stdout.fileno())
        os.close(self.stdout_pipes[1])

        # do the same trick for stderr:
        self.stderr_old = os.dup(sys.stderr.fileno())
        self.stderr_pipes = os.pipe()
        os.dup2(self.stderr_pipes[1], sys.stderr.fileno())
        os.close(self.stderr_pipes[1])

        # note that we use dup2() to restore the original stdout
        # to the main program ASAP in each test... this prevents
        # hangs reading from the child process (as the pipe is only
        # open in the child), and also insures nice pretty output

        pi = ProcessInfo('Test Process', params)
        pi.spawn()
        time.sleep(wait)
        os.dup2(self.stdout_old, sys.stdout.fileno())
        os.dup2(self.stderr_old, sys.stderr.fileno())
        self.assertNotEqual(pi.process, None)
        self.assertTrue(type(pi.pid) is int)

        # Set non-blocking read on pipes. Process may not print anything
        # on specific output and the we would hang without this.
        fd = self.stdout_pipes[0]
        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
        fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)

        fd = self.stderr_pipes[0]
        fl = fcntl.fcntl(fd, fcntl.F_GETFL)
        fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)

        # As we don't know how long the subprocess will take to start and
        # produce output, we'll loop and sleep for 250 ms between each
        # iteration.  To avoid an infinite loop, we'll loop for a maximum
        # of five seconds: that should be enough.
        for count in range(20):
            # Read something from stderr and stdout (these reads don't block).
            output = self.readPipe(self.stdout_pipes[0])
            error = self.readPipe(self.stderr_pipes[0])

            # If the process has already exited, or if it has output something,
            # quit the loop now.
            if pi.process.poll(
            ) is not None or len(error) > 0 or len(output) > 0:
                break

            # Process still running, try again in 250 ms.
            time.sleep(0.25)

        # Exited loop, kill the process if it is still running
        if pi.process.poll() is None:
            try:
                pi.process.terminate()
            except OSError:
                print("Ignoring failed kill attempt. Process is dead already.")

        # call this to get returncode, process should be dead by now
        rc = pi.process.wait()

        # Clean up our stdout/stderr munging.
        os.dup2(self.stdout_old, sys.stdout.fileno())
        os.close(self.stdout_old)
        os.close(self.stdout_pipes[0])

        os.dup2(self.stderr_old, sys.stderr.fileno())
        os.close(self.stderr_old)
        os.close(self.stderr_pipes[0])

        # Free up resources (file descriptors) from the ProcessInfo object
        # TODO: For some reason, this gives an error if the process has ended,
        #       although it does cause all descriptors still allocated to the
        #       object to be freed.
        pi = None

        print(
            "Process finished, return code=%d, stdout=%d bytes, stderr=%d bytes"
            % (rc, len(output), len(error)))

        return (rc, output, error)
Ejemplo n.º 59
0
def _exec_task(fn, task, d, quieterr):
    """Execute a BB 'task'

    Execution of a task involves a bit more setup than executing a function,
    running it with its own local metadata, and with some useful variables set.
    """
    if not data.getVarFlag(task, 'task', d):
        event.fire(TaskInvalid(task, d), d)
        logger.error("No such task: %s" % task)
        return 1

    logger.debug(1, "Executing task %s", task)

    localdata = _task_data(fn, task, d)
    tempdir = localdata.getVar('T', True)
    if not tempdir:
        bb.fatal("T variable not set, unable to build")

    bb.utils.mkdirhier(tempdir)
    loglink = os.path.join(tempdir, 'log.{0}'.format(task))
    logbase = 'log.{0}.{1}'.format(task, os.getpid())
    logfn = os.path.join(tempdir, logbase)
    if loglink:
        bb.utils.remove(loglink)

        try:
           os.symlink(logbase, loglink)
        except OSError:
           pass

    prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
    postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)

    class ErrorCheckHandler(logging.Handler):
        def __init__(self):
            self.triggered = False
            logging.Handler.__init__(self, logging.ERROR)
        def emit(self, record):
            self.triggered = True

    # Handle logfiles
    si = file('/dev/null', 'r')
    try:
        logfile = file(logfn, 'w')
    except OSError:
        logger.exception("Opening log file '%s'", logfn)
        pass

    # Dup the existing fds so we dont lose them
    osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
    oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
    ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]

    # Replace those fds with our own
    os.dup2(si.fileno(), osi[1])
    os.dup2(logfile.fileno(), oso[1])
    os.dup2(logfile.fileno(), ose[1])

    # Ensure python logging goes to the logfile
    handler = logging.StreamHandler(logfile)
    handler.setFormatter(logformatter)
    # Always enable full debug output into task logfiles
    handler.setLevel(logging.DEBUG - 2)
    bblogger.addHandler(handler)

    errchk = ErrorCheckHandler()
    bblogger.addHandler(errchk)

    localdata.setVar('BB_LOGFILE', logfn)

    event.fire(TaskStarted(task, localdata), localdata)
    try:
        for func in (prefuncs or '').split():
            exec_func(func, localdata)
        exec_func(task, localdata)
        for func in (postfuncs or '').split():
            exec_func(func, localdata)
    except FuncFailed as exc:
        if quieterr:
            event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
        else:
            errprinted = errchk.triggered
            logger.error(str(exc))
            event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
        return 1
    finally:
        sys.stdout.flush()
        sys.stderr.flush()

        bblogger.removeHandler(handler)

        # Restore the backup fds
        os.dup2(osi[0], osi[1])
        os.dup2(oso[0], oso[1])
        os.dup2(ose[0], ose[1])

        # Close the backup fds
        os.close(osi[0])
        os.close(oso[0])
        os.close(ose[0])
        si.close()

        logfile.close()
        if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
            logger.debug(2, "Zero size logfn %s, removing", logfn)
            bb.utils.remove(logfn)
            bb.utils.remove(loglink)
    event.fire(TaskSucceeded(task, localdata), localdata)

    if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
        make_stamp(task, localdata)

    return 0
Ejemplo n.º 60
0
from __future__ import print_function
import sys
sys.path = [p for p in sys.path if p.startswith('/')]
__name__ = '__bubble__'
sys.modules[__name__] = sys.modules.pop('__main__')


def debug(msg):
    print(msg, file=sys.stderr)


# Reshuffle fds so that we can't break our transport by printing to stdout
import os
infd = os.dup(0)
outfd = os.dup(1)
inpipe = os.fdopen(infd, 'rb')
outpipe = os.fdopen(outfd, 'wb', 0)
sys.stdin.close()
sys.stdin = open(os.devnull, 'rb')
sys.stdout.close()
sys.stdout = open(os.devnull, 'wb')

PY2 = sys.version_info < (3, )
PY3 = not PY2
import threading
if PY2:
    __metaclass__ = type
    from Queue import Queue
    import cPickle as pickle

    def exec_(_code_, _globs_=None, _locs_=None):