Пример #1
0
 def open(self):
     filename = self.application.logfile
     self.proc = Subprocess(["tail", "-f", "-n", "0", filename],
                            stdout=Subprocess.STREAM,
                            bufsize=1)
     self.proc.set_exit_callback(self._close)
     self.proc.stdout.read_until(b"\n", self.write_line)
Пример #2
0
    def _run_process(self, cmd, env=None):
        """
        Execute the subprocess of `docker-compose up`
        """
        proc = Subprocess(cmd,
                          stdout=Subprocess.STREAM,
                          stderr=Subprocess.STREAM,
                          env=env)

        app_log.info('read outputs from Subprocess: %s', cmd)
        outputs = yield [
            proc.stdout.read_until_close(),
            proc.stderr.read_until_close()
        ]
        try:
            yield proc.wait_for_exit()
        except CalledProcessError:
            app_log.error('Process is exited abnormally. stderr: %s',
                          outputs[1])
            raise

        return {
            'stdout': to_unicode(outputs[0]),
            'stderr': to_unicode(outputs[1])
        }
Пример #3
0
def load_sync(context, url, callback):
    # Disable storage of original. These lines are useful if
    # you want your Thumbor instance to store all originals persistently
    # except video frames.
    #
    # from thumbor.storages.no_storage import Storage as NoStorage
    # context.modules.storage = NoStorage(context)

    unquoted_url = unquote(url)

    command = BaseWikimediaEngine.wrap_command([
        context.config.FFPROBE_PATH,
        '-v',
        'error',
        '-show_entries',
        'format=duration',
        '-of',
        'default=noprint_wrappers=1:nokey=1',
        '%s%s' % (uri_scheme, unquoted_url)
    ], context)

    logger.debug('Command: %r' % command)

    process = Subprocess(command, stdout=Subprocess.STREAM)
    process.set_exit_callback(
        partial(
            _parse_time_status,
            context,
            unquoted_url,
            callback,
            process
        )
    )
Пример #4
0
class WatchFileHandler(WebSocketHandler):
    """
    Sends new data to WebSocket client while file changing.
    """
    streaming_finished_message = 'File streaming has finished up'
    extra_args = []
    last_lines_limit = None
    filename = None

    def __init__(self, application, request, **kwargs):
        super().__init__(application, request, **kwargs)
        self._process = None

    def initialize(self, filename=None, last_lines_limit=0):
        if filename is not None:
            self.filename = filename
        self.last_lines_limit = last_lines_limit

    def get_filename(self) -> str:
        return self.filename

    def open(self):
        cmd = ['tail']
        cmd += ['-n', str(self.last_lines_limit)]
        cmd += self.extra_args
        try:
            cmd += ['-f', self.get_filename()]
            self._process = Subprocess(cmd, stdout=Subprocess.STREAM, bufsize=1)
        except Exception as e:
            logger.error(str(e))
            self.close(reason=str(e))
        else:
            self._process.set_exit_callback(self._close)
            self._process.stdout.read_until(b'\n', self.write_line)

    def _close(self) -> None:
        self.close(reason=self.streaming_finished_message)

    def on_close(self, *args, **kwargs):
        if self._process is not None:
            self._process.proc.terminate()
            self._process.proc.wait()

    def transform_output_data(self, data: bytes) -> bytes:
        return data

    def write_line(self, data: bytes) -> None:
        self.write_message(self.transform_output_data(data.strip()))
        self._process.stdout.read_until(b'\n', self.write_line)

    def check_origin(self, origin):
        return True
        # TODO: configuration from settings.py
        # return super().check_origin(origin)

    def on_message(self, message):
        pass

    def data_received(self, chunk):
        pass
Пример #5
0
async def call_subprocess(
        cmd: Union[str, list], stdin_data: Optional[str] = None) \
        -> Tuple[int, Union[str, bytes], Union[str, bytes]]:
    """Call sub process async."""

    if isinstance(cmd, str):
        cmd = shlex.split(cmd)
    try:
        sub_process = Subprocess(cmd,
                                 stdin=Subprocess.STREAM,
                                 stdout=Subprocess.STREAM,
                                 stderr=Subprocess.STREAM)
    except OSError as e:
        return e.errno, '', e.strerror

    if stdin_data:
        await sub_process.stdin.write(stdin_data)
        sub_process.stdin.close()

    code, result, error = await multi([
        sub_process.wait_for_exit(raise_error=False),
        sub_process.stdout.read_until_close(),
        sub_process.stderr.read_until_close()
    ])

    result = result.strip()
    error = error.strip()

    return code, result, error
Пример #6
0
class GeneralSubprocess:
    def __init__(self, id, cmd):
        self.pipe = None
        self.id = id
        self.cmd = cmd
        self.start = None
        self.end = None

    @coroutine
    def run_process(self):
        self.pipe = Subprocess(shlex.split(self.cmd),
                               stdout=Subprocess.STREAM,
                               stderr=Subprocess.STREAM)
        self.start = time.time()
        (out, err) = (yield [Task(self.pipe.stdout.read_until_close),
                             Task(self.pipe.stderr.read_until_close)])
        return (out, err)

    def stat(self):
        self.pipe.poll()
        if self.pipe.returncode is not None:
            self.end = time.time()
            print('Done time : %f', self.end - self.start)
        else:
            print('Not done yet')
Пример #7
0
class EggHandler(BaseHandler):

    def _handle_stdout(self, stdout):
        deps = {}
        result = REGEX.findall(stdout)
        client = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
        for dep in result:
            egg_name = dep[0]
            latest_version = client.package_releases(egg_name)[0]
            deps[egg_name] = string_version_compare(dep[1], latest_version)
            deps[egg_name]['current_version'] = dep[1]
            deps[egg_name]['latest_version'] = latest_version

        self.render('result.html', package_name=self.git_url.split('/')[-1], dependencies=deps)

    def _handle_pip_result(self, setup_py):
        self.sbp.stdout.read_until_close(self._handle_stdout)

    @tornado.web.asynchronous
    def post(self):
        self.git_url = self.get_argument('git_url')
        pip = self.find_pip()
        self.sbp = Subprocess([pip, 'install',  'git+%s' % self.git_url, '--no-install'],
                              io_loop=self.application.main_loop,
                              stdout=Subprocess.STREAM,
                              stderr=Subprocess.STREAM)

        self.sbp.set_exit_callback(self._handle_pip_result)

    @tornado.web.asynchronous
    def get(self):
        self.render('index.html')

    def find_pip(self):
        return os.path.sep.join(os.path.split(sys.executable)[:-1] + ('pip',))
Пример #8
0
 def test_wait_for_exit_raise_disabled(self):
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
     ret = yield subproc.wait_for_exit(raise_error=False)
     self.assertEqual(ret, 1)
Пример #9
0
 def test_sigchild_signal(self):
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c',
                           'import time; time.sleep(30)'],
                          stdout=Subprocess.STREAM)
     self.addCleanup(subproc.stdout.close)
     subproc.set_exit_callback(self.stop)
     os.kill(subproc.pid, signal.SIGTERM)
     try:
         ret = self.wait(timeout=1.0)
     except AssertionError:
         # We failed to get the termination signal. This test is
         # occasionally flaky on pypy, so try to get a little more
         # information: did the process close its stdout
         # (indicating that the problem is in the parent process's
         # signal handling) or did the child process somehow fail
         # to terminate?
         subproc.stdout.read_until_close(callback=self.stop)
         try:
             self.wait(timeout=1.0)
         except AssertionError:
             raise AssertionError("subprocess failed to terminate")
         else:
             raise AssertionError("subprocess closed stdout but failed to "
                                  "get termination signal")
     self.assertEqual(subproc.returncode, ret)
     self.assertEqual(ret, -signal.SIGTERM)
    def call_process(self, cmd, stream, address, io_loop=None): 
        """ Calls process 

        cmd: command in a list e.g ['ls', '-la']
        stdout_callback: callback to run on stdout


        TODO: add some way of calling proc.kill() if the stream is closed
        """

        stdout_stream = Subprocess.STREAM 
        stderr_stream = Subprocess.STREAM 
        proc = Subprocess(cmd, stdout=stdout_stream, stderr=stderr_stream)
        call_back = partial(self.on_exit, address)
        proc.set_exit_callback(call_back)

        pipe_stream = PipeIOStream(proc.stdout.fileno())

        try:
            while True:
                str_ = yield pipe_stream.read_bytes(102400, partial=True)
                yield stream.write(str_)
        except StreamClosedError:
            pass
        print("end address: {}".format(address))
Пример #11
0
 def test_wait_for_exit_raise_disabled(self):
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)'])
     ret = yield subproc.wait_for_exit(raise_error=False)
     self.assertEqual(ret, 1)
Пример #12
0
 def do_execute(self, params, callback):
     env = self.create_env(params)
     
     if self.output == 'combined':
         child = Subprocess(
                 self.filename,
                 env=env,
                 stdout=Subprocess.STREAM,
                 stderr=subprocess.STDOUT,
                 io_loop=IOLoop.instance()
             )
     
         retcode, stdout = yield [
             gen.Task(child.set_exit_callback),
             gen.Task(child.stdout.read_until_close)
         ]
         
         callback((child.returncode, stdout.split()))
     else:
         child = Subprocess(
                 self.filename,
                 env=env,
                 stdout=Subprocess.STREAM,
                 stderr=Subprocess.STREAM,
                 io_loop=IOLoop.instance()
             )
     
         retcode, stdout, stderr = yield [
             gen.Task(child.set_exit_callback),
             gen.Task(child.stdout.read_until_close),
             gen.Task(child.stderr.read_until_close)
         ]
         
         callback((child.returncode, stdout.splitlines(), stderr.splitlines()))
Пример #13
0
    async def send_to_ruby(self, request_json):
        env = {"PCSD_DEBUG": "true" if self.__debug else "false"}
        if self.__gem_home is not None:
            env["GEM_HOME"] = self.__gem_home

        if self.__no_proxy is not None:
            env["NO_PROXY"] = self.__no_proxy
        if self.__https_proxy is not None:
            env["HTTPS_PROXY"] = self.__https_proxy

        pcsd_ruby = Subprocess([
            self.__ruby_executable, "-I", self.__pcsd_dir,
            self.__pcsd_cmdline_entry
        ],
                               stdin=Subprocess.STREAM,
                               stdout=Subprocess.STREAM,
                               stderr=Subprocess.STREAM,
                               env=env)
        await Task(pcsd_ruby.stdin.write, str.encode(request_json))
        pcsd_ruby.stdin.close()
        return await multi([
            Task(pcsd_ruby.stdout.read_until_close),
            Task(pcsd_ruby.stderr.read_until_close),
            pcsd_ruby.wait_for_exit(raise_error=False),
        ])
Пример #14
0
 def _trace_done(self, returncode):
     self.nbapp.log.info("reprozip: tracing done, returned %d", returncode)
     if returncode == 0:
         # Pack
         if self._pack_file.exists():
             self._pack_file.remove()
         proc = Subprocess([
             'reprozip', 'pack', '-d', self._tempdir.path,
             self._pack_file.path
         ],
                           stdin=subprocess.PIPE)
         proc.stdin.close()
         proc.set_exit_callback(self._packing_done)
         self.nbapp.log.info("reprozip: started packing...")
     else:
         self._tempdir.rmtree()
         if returncode == 3:
             self.set_header('Content-Type', 'application/json')
             self.finish({
                 'error':
                 "There was an error running the notebook. "
                 "Please make sure that it can run from top to "
                 "bottom without error before packing."
             })
         else:
             self.send_error(500)
Пример #15
0
    class LogStreamer(tornado.websocket.WebSocketHandler):
        """
        A websocket for streaming log messages from log file to the browser.
        """
        def open(self):
            filename = self.application.logfile
            self.proc = Subprocess(["tail", "-f", "-n", "0", filename],
                                   stdout=Subprocess.STREAM,
                                   bufsize=1)
            self.proc.set_exit_callback(self._close)
            self.proc.stdout.read_until(b"\n", self.write_line)

        def _close(self, *args, **kwargs):
            self.close()

        def on_close(self, *args, **kwargs):
            log.info("Trying to kill log streaming process...")
            self.proc.proc.terminate()
            self.proc.proc.wait()

        def write_line(self, data):
            html = data.decode()
            if "WARNING" in html:
                color = "text-warning"
            elif "ERROR" in html:
                color = "text-danger"
            else:
                color = "text-success"
            if "tornado.access" not in html and "poppy" not in html:
                html = "<samp><span class=%s>%s</span></samp>" % (color, html)
                html += "<script>$(\"#log\").scrollTop($(\"#log\")[0].scrollHeight);</script>"
                self.write_message(html.encode())
            self.proc.stdout.read_until(b"\n", self.write_line)
 def test_wait_for_exit_raise(self):
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "import sys; sys.exit(1)"])
     with self.assertRaises(subprocess.CalledProcessError) as cm:
         yield subproc.wait_for_exit()
     self.assertEqual(cm.exception.returncode, 1)
Пример #17
0
    async def send_to_ruby(self, request_json):
        env = {
            "PCSD_DEBUG": "true" if self.__debug else "false"
        }
        if self.__gem_home is not None:
            env["GEM_HOME"] = self.__gem_home

        if self.__no_proxy is not None:
            env["NO_PROXY"] = self.__no_proxy
        if self.__https_proxy is not None:
            env["HTTPS_PROXY"] = self.__https_proxy

        pcsd_ruby = Subprocess(
            [
                self.__ruby_executable, "-I",
                self.__pcsd_dir,
                self.__pcsd_cmdline_entry
            ],
            stdin=Subprocess.STREAM,
            stdout=Subprocess.STREAM,
            stderr=Subprocess.STREAM,
            env=env
        )
        await Task(pcsd_ruby.stdin.write, str.encode(request_json))
        pcsd_ruby.stdin.close()
        return await multi([
            Task(pcsd_ruby.stdout.read_until_close),
            Task(pcsd_ruby.stderr.read_until_close),
            pcsd_ruby.wait_for_exit(raise_error=False),
        ])
Пример #18
0
class GeneralSubprocess:
    def __init__(self, id, cmd):
        self.pipe = None
        self.id = id
        self.cmd = cmd
        self.start = None
        self.end = None

    @coroutine
    def run_process(self):
        self.pipe = Subprocess(shlex.split(self.cmd),
                               stdout=Subprocess.STREAM,
                               stderr=Subprocess.STREAM)
        self.start = time.time()
        (out, err) = (yield [
            Task(self.pipe.stdout.read_until_close),
            Task(self.pipe.stderr.read_until_close)
        ])
        return (out, err)

    def stat(self):
        self.pipe.poll()
        if self.pipe.returncode is not None:
            self.end = time.time()
            print('Done time : %f', self.end - self.start)
        else:
            print('Not done yet')
Пример #19
0
 def test_h2spec(self):
     h2spec_cmd = [self.h2spec_path, "-p",
                   str(self.get_http_port())]
     for section in options.h2spec_section:
         h2spec_cmd.extend(["-s", section])
     h2spec_proc = Subprocess(h2spec_cmd)
     yield h2spec_proc.wait_for_exit()
Пример #20
0
 def open(self):
     id_ = self.get_argument('id')
     self.p = Subprocess(['tail', '-f', PROCESSES[id_][self.stream]],
                         stdout=Subprocess.STREAM,
                         stderr=Subprocess.STREAM)
     self.p.set_exit_callback(self._close)
     self.p.stdout.read_until('\n', self.write_line)
 def test_sigchild_future(self):
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "pass"])
     ret = yield subproc.wait_for_exit()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
Пример #22
0
def run_command(cmd, input=None, env=None):
    proc = Subprocess(cmd,
                      shell=True,
                      env=env,
                      stdin=Subprocess.STREAM,
                      stdout=Subprocess.STREAM,
                      stderr=Subprocess.STREAM)
    inbytes = None
    if input:
        inbytes = input.encode()
        try:
            yield proc.stdin.write(inbytes)
        except StreamClosedError as exp:
            # Apparently harmless
            pass
    proc.stdin.close()
    out = yield proc.stdout.read_until_close()
    eout = yield proc.stderr.read_until_close()
    proc.stdout.close()
    proc.stderr.close()
    eout = eout.decode().strip()
    try:
        err = yield proc.wait_for_exit()
    except CalledProcessError:
        #self.log.error("Subprocess returned exitcode %s" % proc.returncode)
        #self.log.error(eout)
        raise RuntimeError(eout)
    if err != 0:
        return err  # exit error?
    else:
        out = out.decode().strip()
        return out
Пример #23
0
 def open(self):
     filename = "/tmp/simple_foobar.log"
     self.proc = Subprocess(["tail", "-f", filename, "-n", "0"],
                            stdout=Subprocess.STREAM,
                            bufsize=1)
     self.proc.set_exit_callback(self._close)
     self.proc.stdout.read_until("\n", self.write_line)
Пример #24
0
 def _start_data_stream(self, log_path):
     tail_proc = Subprocess(['tail', '-f', log_path, '-n', '0'],
                            stdout=Subprocess.STREAM,
                            bufsize=1)
     tail_proc.set_exit_callback(self._close)
     self._add_proc(self.task_id, tail_proc)
     tail_proc.stdout.read_until('\n', self.write_line_to_clients)
Пример #25
0
def run_proc(port, cmd, stdout_file, stderr_file, directory):
    run_cmd = cmd.format(numproc=port)

    if directory.startswith('.'):
        directory = os.path.realpath(directory)
        print "Directory", directory

    if not os.path.exists(directory):
        raise Exception('working directory doesnt exist')

    proc = Subprocess(
        shlex.split(run_cmd),
        stdout=Subprocess.STREAM,
        stderr=Subprocess.STREAM,
        cwd=directory
    )
    proc.set_exit_callback(exit_callback)

    std_out_log_file_name = get_out_file_name(directory, stdout_file.format(numproc=port))
    std_err_log_file_name = get_out_file_name(directory, stderr_file.format(numproc=port))
    stdout_fhandler = open(std_out_log_file_name, 'a')
    stderr_fhandler = open(std_err_log_file_name, 'a')
    out_fn = partial(_out, filehandler=stdout_fhandler, head="%s: " % port)
    err_fn = partial(_out, filehandler=stderr_fhandler, head="%s: " % port)

    proc.stdout.read_until_close(exit_callback, streaming_callback=out_fn)
    proc.stderr.read_until_close(exit_callback, streaming_callback=err_fn)

    return proc
Пример #26
0
 def test_sigchild_future(self):
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "pass"])
     ret = yield subproc.wait_for_exit()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
Пример #27
0
 def test_wait_for_exit_raise(self):
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "import sys; sys.exit(1)"])
     with self.assertRaises(subprocess.CalledProcessError) as cm:
         yield subproc.wait_for_exit()
     self.assertEqual(cm.exception.returncode, 1)
Пример #28
0
def start(op,*args,**kw):
    if anonymity:
        args = ('--anonymity',str(anonymity))+args
    done = gen.Future()
    note.cyan('gnunet-'+op+' '+' '.join(args))
    action = Subprocess(('gnunet-'+op,)+args,**kw)
    action.set_exit_callback(done.set_result)
    return action, done
Пример #29
0
 def test_sigchild(self):
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "pass"])
     subproc.set_exit_callback(self.stop)
     ret = self.wait()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
Пример #30
0
 def test_sigchild_future(self):
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c', 'pass'])
     ret = yield subproc.wait_for_exit()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
Пример #31
0
 def test_sigchild_future(self):
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c', 'pass'])
     ret = yield subproc.wait_for_exit()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
Пример #32
0
def work1():
    p = Subprocess(['sleep', '5'])
    future = p.wait_for_exit()

    ioloop.IOLoop.instance().add_future(future, finish_callback)
    print('work1: After add_future....')

    ioloop.IOLoop.instance().add_callback(dumy_callback)
    print ('work1: After add_callback...')
Пример #33
0
 def run_process(self):
     self.pipe = Subprocess(shlex.split(self.cmd),
                            stdout=Subprocess.STREAM,
                            stderr=Subprocess.STREAM)
     self.start = time.time()
     (out, err) = (yield [
         Task(self.pipe.stdout.read_until_close),
         Task(self.pipe.stderr.read_until_close)
     ])
     return (out, err)
Пример #34
0
 def test_sigchild_signal(self):
     skip_if_twisted()
     Subprocess.initialize(io_loop=self.io_loop)
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, "-c", "import time; time.sleep(30)"], io_loop=self.io_loop)
     subproc.set_exit_callback(self.stop)
     os.kill(subproc.pid, signal.SIGTERM)
     ret = self.wait()
     self.assertEqual(subproc.returncode, ret)
     self.assertEqual(ret, -signal.SIGTERM)
Пример #35
0
 def test_sigchild(self):
     # Twisted's SIGCHLD handler and Subprocess's conflict with each other.
     skip_if_twisted()
     Subprocess.initialize()
     self.addCleanup(Subprocess.uninitialize)
     subproc = Subprocess([sys.executable, '-c', 'pass'])
     subproc.set_exit_callback(self.stop)
     ret = self.wait()
     self.assertEqual(ret, 0)
     self.assertEqual(subproc.returncode, ret)
def seek_and_screenshot(callback, context, normalized_url, seek):
    output_file = NamedTemporaryFile(delete=False)

    command = [
        context.config.FFMPEG_PATH,
        # Order is important, for fast seeking -ss and -headers have to be before -i
        # As explained on https://trac.ffmpeg.org/wiki/Seeking
        '-ss',
        '%d' % seek
    ]

    if hasattr(context.config, 'SWIFT_HOST'):
        command += [
            '-headers',
            'X-Auth-Token: %s' % get_swift_token(context)
        ]

    command += [
        '-i',
        '%s' % normalized_url,
        '-y',
        '-vframes',
        '1',
        '-an',
        '-f',
        'image2',
        '-vf',
        'scale=iw*sar:ih',  # T198043 apply any codec-specific aspect ratio
        '-nostats',
        '-loglevel',
        'fatal',
        output_file.name
    ]

    command = ShellRunner.wrap_command(command, context)

    logger.debug('[Video] _parse_time: %r' % command)

    process = Subprocess(
        command,
        stdout=Subprocess.STREAM,
        stderr=Subprocess.STREAM
    )

    process.set_exit_callback(
        partial(
            _process_done,
            callback,
            process,
            context,
            normalized_url,
            seek,
            output_file
        )
    )
Пример #37
0
 def get(self, username):
     proc = Subprocess([options.command, username],
                       stdin=self.devnull,
                       stdout=Subprocess.STREAM,
                       stderr=self.devnull)
     output = yield proc.stdout.read_until_close()
     result = yield proc.wait_for_exit(raise_error=False)
     if result != 0:
         self.set_status(404)
     self.set_header('Content-Type', 'text/plain')
     self.finish(output)
Пример #38
0
 def tearDown(self):
     # Clean up Subprocess, so it can be used again with a new ioloop.
     Subprocess.uninitialize()
     self.loop.clear_current()
     if (not IOLoop.initialized() or self.loop is not IOLoop.instance()):
         # Try to clean up any file descriptors left open in the ioloop.
         # This avoids leaks, especially when tests are run repeatedly
         # in the same process with autoreload (because curl does not
         # set FD_CLOEXEC on its file descriptors)
         self.loop.close(all_fds=True)
     super(TornadoAPITest, self).tearDown()
Пример #39
0
 def tearDown(self):
     # Clean up Subprocess, so it can be used again with a new ioloop.
     Subprocess.uninitialize()
     self.loop.clear_current()
     if (not IOLoop.initialized() or
             self.loop is not IOLoop.instance()):
         # Try to clean up any file descriptors left open in the ioloop.
         # This avoids leaks, especially when tests are run repeatedly
         # in the same process with autoreload (because curl does not
         # set FD_CLOEXEC on its file descriptors)
         self.loop.close(all_fds=True)
     super(TornadoAPITest, self).tearDown()
Пример #40
0
    def get(self, command):
        process = Subprocess(['grep python ../wiki/*.md'],
                             stdout=PIPE,
                             stderr=PIPE,
                             shell=True)
        yield process.wait_for_exit(
        )  # This waits without blocking the event loop.

        out, err = process.stdout.read(), process.stderr.read()
        posts = re.findall('wiki/(.*.md)(.*)', out.decode())
        self.render('{theme}/search.html'.format(theme=setting.theme),
                    posts=posts)
Пример #41
0
    def _recognize_sample_with_gracenote(self, sample_path):
        conf = self.settings['gn_config']
        proc = Subprocess([
            'tools/gracetune_identify.py',
            '--client-id', conf['client_id'],
            '--user-id', conf['user_id'],
            '--license', conf['license'],
            '--filename', sample_path
            ], stdout=Subprocess.STREAM)

        yield proc.wait_for_exit()
        ret = yield proc.stdout.read_until_close()
        raise Return(json.loads(ret))
Пример #42
0
 def open(self):
     cmd = ['tail']
     cmd += ['-n', str(self.last_lines_limit)]
     cmd += self.extra_args
     try:
         cmd += ['-f', self.get_filename()]
         self._process = Subprocess(cmd, stdout=Subprocess.STREAM, bufsize=1)
     except Exception as e:
         logger.error(str(e))
         self.close(reason=str(e))
     else:
         self._process.set_exit_callback(self._close)
         self._process.stdout.read_until(b'\n', self.write_line)
Пример #43
0
def global_signal_master(signals=SIG_TERM_DEFAULT):
    global GLOBAL_SIGNAL_REGISTER

    # `SIGCHLD` signal will handle by tornado.process.Subprocess, this make it as Supervisor of all it's instances.
    Subprocess.initialize()

    for sig_name in list(signals):
        signum = SIG_FROM_NAME[sig_name]
        GLOBAL_SIGNAL_REGISTER[sig_name] = signal.signal(
            signum, global_signal_handler)

    logger.info('Global Safe Signal Register, tid:%s, pid:%s, dask:%s',
                threading.get_ident(), os.getpid(), IN_DASK)
Пример #44
0
    def tearDown(self) -> None:
        # Native coroutines tend to produce warnings if they're not
        # allowed to run to completion. It's difficult to ensure that
        # this always happens in tests, so cancel any tasks that are
        # still pending by the time we get here.
        asyncio_loop = self.io_loop.asyncio_loop  # type: ignore
        if hasattr(asyncio, "all_tasks"):  # py37
            tasks = asyncio.all_tasks(asyncio_loop)  # type: ignore
        else:
            tasks = asyncio.Task.all_tasks(asyncio_loop)
        # Tasks that are done may still appear here and may contain
        # non-cancellation exceptions, so filter them out.
        tasks = [t for t in tasks if not t.done()]  # type: ignore
        for t in tasks:
            t.cancel()
        # Allow the tasks to run and finalize themselves (which means
        # raising a CancelledError inside the coroutine). This may
        # just transform the "task was destroyed but it is pending"
        # warning into a "uncaught CancelledError" warning, but
        # catching CancelledErrors in coroutines that may leak is
        # simpler than ensuring that no coroutines leak.
        if tasks:
            done, pending = self.io_loop.run_sync(lambda: asyncio.wait(tasks))
            assert not pending
            # If any task failed with anything but a CancelledError, raise it.
            for f in done:
                try:
                    f.result()
                except asyncio.CancelledError:
                    pass

        # Clean up Subprocess, so it can be used again with a new ioloop.
        Subprocess.uninitialize()
        with warnings.catch_warnings():
            warnings.simplefilter("ignore", DeprecationWarning)
            self.io_loop.clear_current()
        if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
            # Try to clean up any file descriptors left open in the ioloop.
            # This avoids leaks, especially when tests are run repeatedly
            # in the same process with autoreload (because curl does not
            # set FD_CLOEXEC on its file descriptors)
            self.io_loop.close(all_fds=True)
        if self.should_close_asyncio_loop:
            self.asyncio_loop.close()
        super().tearDown()
        # In case an exception escaped or the StackContext caught an exception
        # when there wasn't a wait() to re-raise it, do so here.
        # This is our last chance to raise an exception in a way that the
        # unittest machinery understands.
        self.__rethrow()
Пример #45
0
 def __init__(self):
     self.sockets = {}
     filename = "./output.log"
     file_2 = './err.log'
     self.proc = Subprocess(["tail", "-f", filename, "-n", "0"],
                            stdout=Subprocess.STREAM,
                            bufsize=1)
     self.proc2 = Subprocess(["tail", "-f", file_2, "-n", "0"],
                            stdout=Subprocess.STREAM,
                            bufsize=1)
     self.proc.set_exit_callback(self._close)
     self.proc.stdout.read_until("\n", self.write_output)
     self.proc2.set_exit_callback(self._close)
     self.proc2.stdout.read_until("\n", self.write_err)
Пример #46
0
    def dvi_to_svg(self):
        dvisvgm = Subprocess([
            self.backend.dvisvgm_path, '--verbosity=1', '--no-fonts',
            'render.dvi'
        ],
                             stdout=Subprocess.STREAM,
                             stderr=subprocess.STDOUT,
                             cwd=self.dir)

        log = yield dvisvgm.stdout.read_until_close()
        try:
            yield dvisvgm.wait_for_exit()
        except subprocess.CalledProcessError:
            raise RuntimeError('Failed to run dvisvgm, full log:\n' +
                               utf8text(log, errors='backslashreplace'))
Пример #47
0
    def latex_to_dvi(self):
        latex = Subprocess([
            self.backend.latex_path, '-halt-on-error',
            '-interaction=nonstopmode', 'render.tex'
        ],
                           stdout=Subprocess.STREAM,
                           stderr=subprocess.STDOUT,
                           cwd=self.dir)

        log = yield latex.stdout.read_until_close()
        try:
            yield latex.wait_for_exit()
        except subprocess.CalledProcessError:
            raise RuntimeError('Failed to run latex, full log:\n' +
                               utf8text(log, errors='backslashreplace'))
Пример #48
0
 def tearDown(self):
     # Clean up Subprocess, so it can be used again with a new ioloop.
     Subprocess.uninitialize()
     self.io_loop.clear_current()
     # Try to clean up any file descriptors left open in the ioloop.
     # This avoids leaks, especially when tests are run repeatedly
     # in the same process with autoreload (because curl does not
     # set FD_CLOEXEC on its file descriptors)
     self.io_loop.close(all_fds=True)
     super(AsyncTestCase, self).tearDown()
     # In case an exception escaped or the StackContext caught an exception
     # when there wasn't a wait() to re-raise it, do so here.
     # This is our last chance to raise an exception in a way that the
     # unittest machinery understands.
     self.__rethrow()
Пример #49
0
def run_command(cmd, input=None, env=None):
    proc = Subprocess(cmd, shell=True, env=env, stdin=Subprocess.STREAM, stdout=Subprocess.STREAM)
    inbytes = None
    if input:
        inbytes = input.encode()
        yield proc.stdin.write(inbytes)
    proc.stdin.close()
    out = yield proc.stdout.read_until_close()
    proc.stdout.close()
    err = yield proc.wait_for_exit()
    if err != 0:
        return err # exit error?
    else:
        out = out.decode().strip()
        return out
Пример #50
0
 def tearDown(self):
     # Clean up Subprocess, so it can be used again with a new ioloop.
     Subprocess.uninitialize()
     self.io_loop.clear_current()
     # Try to clean up any file descriptors left open in the ioloop.
     # This avoids leaks, especially when tests are run repeatedly
     # in the same process with autoreload (because curl does not
     # set FD_CLOEXEC on its file descriptors)
     self.io_loop.close(all_fds=True)
     super(AsyncTestCase, self).tearDown()
     # In case an exception escaped or the StackContext caught an exception
     # when there wasn't a wait() to re-raise it, do so here.
     # This is our last chance to raise an exception in a way that the
     # unittest machinery understands.
     self.__rethrow()
Пример #51
0
 def countdown_handler(self, interval, count):
     command = '{0}/countdown -i {1} {2}'.format(os.getcwd(), interval,
                                                 count)
     proc = Subprocess(shlex.split(command), stdout=Subprocess.STREAM)
     try:
         while True:
             line_bytes = yield proc.stdout.read_until(b'\n')
             line = to_unicode(line_bytes)[:-1]
             self.log.info('command read: %s', line)
             timestamp = datetime.now().timestamp()
             self.zmq_stream.send_multipart([
                 b'0',
                 utf8(
                     json_encode({
                         'stdout': line,
                         'finished': False,
                         'timestamp': timestamp
                     }))
             ])
     except StreamClosedError:
         self.log.info('command closed')
         timestamp = datetime.now().timestamp()
         self.zmq_stream.send_multipart([
             b'0',
             utf8(
                 json_encode({
                     'stdout': None,
                     'finished': True,
                     'timestamp': timestamp
                 }))
         ])
Пример #52
0
    def test_subprocess(self):
        if IOLoop.configured_class().__name__.endswith('LayeredTwistedIOLoop'):
            # This test fails non-deterministically with LayeredTwistedIOLoop.
            # (the read_until('\n') returns '\n' instead of 'hello\n')
            # This probably indicates a problem with either TornadoReactor
            # or TwistedIOLoop, but I haven't been able to track it down
            # and for now this is just causing spurious travis-ci failures.
            raise unittest.SkipTest("Subprocess tests not compatible with "
                                    "LayeredTwistedIOLoop")
        subproc = Subprocess([sys.executable, '-u', '-i'],
                             stdin=Subprocess.STREAM,
                             stdout=Subprocess.STREAM, stderr=subprocess.STDOUT,
                             io_loop=self.io_loop)
        self.addCleanup(lambda: (subproc.proc.terminate(), subproc.proc.wait()))
        subproc.stdout.read_until(b'>>> ', self.stop)
        self.wait()
        subproc.stdin.write(b"print('hello')\n")
        subproc.stdout.read_until(b'\n', self.stop)
        data = self.wait()
        self.assertEqual(data, b"hello\n")

        subproc.stdout.read_until(b">>> ", self.stop)
        self.wait()
        subproc.stdin.write(b"raise SystemExit\n")
        subproc.stdout.read_until_close(self.stop)
        data = self.wait()
        self.assertEqual(data, b"")
Пример #53
0
class GeneratePdfExecutor(object):
    def __init__(self, data_path, request_handler, pdf, logger=None):
        self.data_path = data_path
        self.logger = logger
        self.request_handler = request_handler
        self.pdf = pdf

    def run(self):
        self.output = '%s/%s.%s' % (self.data_path, self.pdf.id, self.pdf.format)

        if self.logger:
            self.logger.debug("GeneratePdfExecutor: Start generating %s from %s (pdf.id:%s)" % (self.output, self.pdf.url, self.pdf.id))

        args = PDF_SETTINGS[self.pdf.setting_name] % (self.pdf.url, self.output)

        if self.logger:
            self.logger.debug("GeneratePdfExecutor: executing: %s" % args)

        self.p = Subprocess(args.split(" "), stdout=Subprocess.STREAM, stderr=Subprocess.STREAM)
        self.p.set_exit_callback(self.send_end)

        self.p.stdout.read_until("\n", self.send_stdout)
        self.p.stderr.read_until("\n", self.send_stderr)

        self.f = Future()

        return self.f

    def send_stdout(self, data):
        if self.logger:
            self.logger.debug("GeneratePdfExecutor: stdout: %s" % data.strip())

        self.p.stdout.read_until("\n", self.send_stdout)

    def send_stderr(self, data):
        if self.logger:
            self.logger.error("GeneratePdfExecutor: stderr: %s" % data.strip())

        self.p.stderr.read_until("\n", self.send_stderr)

    def send_end(self, status_code):
        if self.logger:
            self.logger.debug("GeneratePdfExecutor: status_code: %s" % status_code)

        self.request_handler.send_file(self.output)

        self.f.set_result(True)
def load_sync(context, url, callback):
    # Disable storage of original. These lines are useful if
    # you want your Thumbor instance to store all originals persistently
    # except video frames.
    #
    # from thumbor.storages.no_storage import Storage as NoStorage
    # context.modules.storage = NoStorage(context)

    normalized_url = _normalize_url(url)

    command = [
        context.config.FFPROBE_PATH,
        '-v',
        'error',
        '-show_entries',
        'format=duration',
        '-of',
        'default=noprint_wrappers=1:nokey=1'
    ]

    if hasattr(context.config, 'SWIFT_HOST'):
        command += [
            '-headers',
            'X-Auth-Token: %s' % get_swift_token(context),
        ]

    command += ['%s' % normalized_url]

    command = ShellRunner.wrap_command(command, context)

    logger.debug('[Video] load_sync: %r' % command)

    process = Subprocess(
        command,
        stdout=Subprocess.STREAM,
        stderr=Subprocess.STREAM
    )

    process.set_exit_callback(
        partial(
            _parse_time_status,
            context,
            normalized_url,
            callback,
            process
        )
    )
Пример #55
0
 def run_process(self):
     self.pipe = Subprocess(shlex.split(self.cmd),
                            stdout=Subprocess.STREAM,
                            stderr=Subprocess.STREAM)
     self.start = time.time()
     (out, err) = (yield [Task(self.pipe.stdout.read_until_close),
                          Task(self.pipe.stderr.read_until_close)])
     return (out, err)
Пример #56
0
    def tearDown(self) -> None:
        # Native coroutines tend to produce warnings if they're not
        # allowed to run to completion. It's difficult to ensure that
        # this always happens in tests, so cancel any tasks that are
        # still pending by the time we get here.
        asyncio_loop = self.io_loop.asyncio_loop  # type: ignore
        if hasattr(asyncio, "all_tasks"):  # py37
            tasks = asyncio.all_tasks(asyncio_loop)  # type: ignore
        else:
            tasks = asyncio.Task.all_tasks(asyncio_loop)
        # Tasks that are done may still appear here and may contain
        # non-cancellation exceptions, so filter them out.
        tasks = [t for t in tasks if not t.done()]
        for t in tasks:
            t.cancel()
        # Allow the tasks to run and finalize themselves (which means
        # raising a CancelledError inside the coroutine). This may
        # just transform the "task was destroyed but it is pending"
        # warning into a "uncaught CancelledError" warning, but
        # catching CancelledErrors in coroutines that may leak is
        # simpler than ensuring that no coroutines leak.
        if tasks:
            done, pending = self.io_loop.run_sync(lambda: asyncio.wait(tasks))
            assert not pending
            # If any task failed with anything but a CancelledError, raise it.
            for f in done:
                try:
                    f.result()
                except asyncio.CancelledError:
                    pass

        # Clean up Subprocess, so it can be used again with a new ioloop.
        Subprocess.uninitialize()
        self.io_loop.clear_current()
        if not isinstance(self.io_loop, _NON_OWNED_IOLOOPS):
            # Try to clean up any file descriptors left open in the ioloop.
            # This avoids leaks, especially when tests are run repeatedly
            # in the same process with autoreload (because curl does not
            # set FD_CLOEXEC on its file descriptors)
            self.io_loop.close(all_fds=True)
        super(AsyncTestCase, self).tearDown()
        # In case an exception escaped or the StackContext caught an exception
        # when there wasn't a wait() to re-raise it, do so here.
        # This is our last chance to raise an exception in a way that the
        # unittest machinery understands.
        self.__rethrow()
Пример #57
0
    def post(self):
        self.git_url = self.get_argument('git_url')
        pip = self.find_pip()
        self.sbp = Subprocess([pip, 'install',  'git+%s' % self.git_url, '--no-install'],
                              io_loop=self.application.main_loop,
                              stdout=Subprocess.STREAM,
                              stderr=Subprocess.STREAM)

        self.sbp.set_exit_callback(self._handle_pip_result)
Пример #58
0
class LogMonitor(object):
    def __init__(self):
        self.sockets = {}
        filename = "./output.log"
        file_2 = './err.log'
        self.proc = Subprocess(["tail", "-f", filename, "-n", "0"],
                               stdout=Subprocess.STREAM,
                               bufsize=1)
        self.proc2 = Subprocess(["tail", "-f", file_2, "-n", "0"],
                               stdout=Subprocess.STREAM,
                               bufsize=1)
        self.proc.set_exit_callback(self._close)
        self.proc.stdout.read_until("\n", self.write_output)
        self.proc2.set_exit_callback(self._close)
        self.proc2.stdout.read_until("\n", self.write_err)

    @tornado.gen.coroutine
    def _close(self, *args, **kwargs):
        self.proc.proc.terminate()
        self.proc.proc.wait()
        self.proc2.proc.terminate()
        self.proc2.proc.wait()

    @tornado.gen.coroutine
    def add_listener(self, _id, sock):
        self.sockets[_id] = sock

    @tornado.gen.coroutine
    def remove_listener(self, _id):
        del self.sockets[_id]
    
    @tornado.gen.coroutine
    def write_output(self, data):
        msg = json.dumps({'type':'out', 'msg':data.strip()})
        for _id in self.sockets:
            self.sockets[_id].notify(msg)
        self.proc.stdout.read_until("\n", self.write_output)

    @tornado.gen.coroutine
    def write_err(self, data):
        msg = json.dumps({'type':'err', 'msg':data.strip()})
        for _id in self.sockets:
            self.sockets[_id].notify(msg)
        self.proc2.stdout.read_until("\n", self.write_err)    
Пример #59
0
def run_command(cmd, input=None, env=None):
    proc = Subprocess(cmd, shell=True, env=env, stdin=Subprocess.STREAM, stdout=Subprocess.STREAM)
    inbytes = None
    if input:
        inbytes = input.encode()
        try:
            yield proc.stdin.write(inbytes)
        except StreamClosedError as exp:
            # Apparently harmless
            pass
    proc.stdin.close()
    out = yield proc.stdout.read_until_close()
    proc.stdout.close()
    err = yield proc.wait_for_exit()
    if err != 0:
        return err # exit error?
    else:
        out = out.decode().strip()
        return out