Exemplo n.º 1
0
def test_shell_background_support_setsid(both_debug_modes, setsid_enabled):
    """In setsid mode, dumb-init should suspend itself and its children when it
    receives SIGTSTP, SIGTTOU, or SIGTTIN.
    """
    proc = Popen(
        ('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
        stdout=PIPE,
    )
    match = re.match(b'^ready \(pid: ([0-9]+)\)\n$', proc.stdout.readline())
    pid = match.group(1).decode('ascii')

    for signum in SUSPEND_SIGNALS:
        # both dumb-init and print_signals should be running or sleeping
        assert process_state(pid) in ['running', 'sleeping']
        assert process_state(proc.pid) in ['running', 'sleeping']

        # both should now suspend
        proc.send_signal(signum)

        os.waitpid(proc.pid, os.WUNTRACED)
        assert process_state(proc.pid) == 'stopped'
        assert process_state(pid) == 'stopped'

        # and then both wake up again
        proc.send_signal(SIGCONT)
        assert (
            proc.stdout.readline() == '{0}\n'.format(SIGCONT).encode('ascii')
        )
        assert process_state(pid) in ['running', 'sleeping']
        assert process_state(proc.pid) in ['running', 'sleeping']

    for pid in pid_tree(proc.pid):
        os.kill(pid, SIGKILL)
Exemplo n.º 2
0
def watch_server_pids(server_pids, interval=1, **kwargs):
    """Monitor a collection of server pids yeilding back those pids that
    aren't responding to signals.

    :param server_pids: a dict, lists of pids [int,...] keyed on
                        Server objects
    """
    status = {}
    start = time.time()
    end = start + interval
    server_pids = dict(server_pids)  # make a copy
    while True:
        for server, pids in server_pids.items():
            for pid in pids:
                try:
                    # let pid stop if it wants to
                    os.waitpid(pid, os.WNOHANG)
                except OSError, e:
                    if e.errno not in (errno.ECHILD, errno.ESRCH):
                        raise  # else no such child/process
            # check running pids for server
            status[server] = server.get_running_pids(**kwargs)
            for pid in pids:
                # original pids no longer in running pids!
                if pid not in status[server]:
                    yield server, pid
            # update active pids list using running_pids
            server_pids[server] = status[server]
        if not [p for server, pids in status.items() for p in pids]:
            # no more running pids
            break
        if time.time() > end:
            break
        else:
            time.sleep(0.1)
Exemplo n.º 3
0
 def do_fork_and_wait():
     # just fork a child process and wait it
     pid = os.fork()
     if pid > 0:
         os.waitpid(pid, 0)
     else:
         os._exit(0)
Exemplo n.º 4
0
def test_shell_background_support_without_setsid(both_debug_modes, setsid_disabled):
    """In non-setsid mode, dumb-init should forward the signals SIGTSTP,
    SIGTTOU, and SIGTTIN, and then suspend itself.
    """
    proc = Popen(
        ('dumb-init', sys.executable, '-m', 'tests.lib.print_signals'),
        stdout=PIPE,
    )

    assert re.match(b'^ready \(pid: (?:[0-9]+)\)\n$', proc.stdout.readline())

    for signum in SUSPEND_SIGNALS:
        assert process_state(proc.pid) in ['running', 'sleeping']
        proc.send_signal(signum)
        assert proc.stdout.readline() == '{0}\n'.format(signum).encode('ascii')
        os.waitpid(proc.pid, os.WUNTRACED)
        assert process_state(proc.pid) == 'stopped'

        proc.send_signal(SIGCONT)
        assert (
            proc.stdout.readline() == '{0}\n'.format(SIGCONT).encode('ascii')
        )
        assert process_state(proc.pid) in ['running', 'sleeping']

    for pid in pid_tree(proc.pid):
        os.kill(pid, SIGKILL)
Exemplo n.º 5
0
 def _helper(pid, signal_, wait):
   """Simple helper to encapsulate the kill/waitpid sequence"""
   if utils_wrapper.IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
     try:
       os.waitpid(pid, os.WNOHANG)
     except OSError:
       pass
Exemplo n.º 6
0
    def on_close(self):
        if self.fd is not None:
            self.log.info('Closing fd %d' % self.fd)

        if getattr(self, 'pid', 0) == 0:
            self.log.info('pid is 0')
            return

        try:
            ioloop.remove_handler(self.fd)
        except Exception:
            self.log.error('handler removal fail', exc_info=True)

        try:
            os.close(self.fd)
        except Exception:
            self.log.debug('closing fd fail', exc_info=True)

        try:
            os.kill(self.pid, signal.SIGKILL)
            os.waitpid(self.pid, 0)
        except Exception:
            self.log.debug('waitpid fail', exc_info=True)

        TermWebSocket.terminals.remove(self)
        self.log.info('Websocket closed')

        if self.application.systemd and not len(TermWebSocket.terminals):
            self.log.info('No more terminals, exiting...')
            sys.exit(0)
Exemplo n.º 7
0
Arquivo: worker.py Projeto: Kisioj/rq
 def execute_job(self, job):
     """Spawns a work horse to perform the actual work and passes it a job.
     The worker will wait for the work horse and make sure it executes
     within the given timeout bounds, or will end the work horse with
     SIGALRM.
     """
     self.set_state('busy')
     child_pid = os.fork()
     os.environ['RQ_WORKER_ID'] = self.name
     os.environ['RQ_JOB_ID'] = job.id
     if child_pid == 0:
         self.main_work_horse(job)
     else:
         self._horse_pid = child_pid
         self.procline('Forked {0} at {0}'.format(child_pid, time.time()))
         while True:
             try:
                 os.waitpid(child_pid, 0)
                 self.set_state('idle')
                 break
             except OSError as e:
                 # In case we encountered an OSError due to EINTR (which is
                 # caused by a SIGINT or SIGTERM signal during
                 # os.waitpid()), we simply ignore it and enter the next
                 # iteration of the loop, waiting for the child to end.  In
                 # any other case, this is some other unexpected OS error,
                 # which we don't want to catch, so we re-raise those ones.
                 if e.errno != errno.EINTR:
                     raise
Exemplo n.º 8
0
    def collect_children(self):
        """Internal routine to wait for children that have exited."""
        if self.active_children is None: return
        while len(self.active_children) >= self.max_children:
            # XXX: This will wait for any child process, not just ones
            # spawned by this library. This could confuse other
            # libraries that expect to be able to wait for their own
            # children.
            try:
                pid, status = os.waitpid(0, 0)
            except os.error:
                pid = None
            if pid not in self.active_children: continue
            self.active_children.remove(pid)

        # XXX: This loop runs more system calls than it ought
        # to. There should be a way to put the active_children into a
        # process group and then use os.waitpid(-pgid) to wait for any
        # of that set, but I couldn't find a way to allocate pgids
        # that couldn't collide.
        for child in self.active_children:
            try:
                pid, status = os.waitpid(child, os.WNOHANG)
            except os.error:
                pid = None
            if not pid: continue
            try:
                self.active_children.remove(pid)
            except ValueError as e:
                raise ValueError('%s. x=%d and list=%r' % (e.message, pid,
                                                           self.active_children))
Exemplo n.º 9
0
  def waitall(self, timeout=False, killem=False):
    if killem and timeout is False:
      raise Exception("Wrong arguments")

    if timeout is not False:
      assert timeout > 0, "timeout should be positive"

    if timeout:
      signal.signal(signal.SIGALRM, lambda x,y: None)
      signal.setitimer(0, timeout, signal.ITIMER_REAL)

    interrupted = False
    for pid in self.get_pids():
      try:
        #print("waiting", pid)
        os.waitpid(pid, 0)
        #print("pid", pid, "gone")
      except ChildProcessError:
        pass
      except InterruptedError:
        # ALARM raised
        interrupted = True
        if killem:
          self.killall(sig=signal.SIGKILL)
          return self.waitall(timeout=10)  # again wait till all processes will die
        raise Exception("timeout")
    else:
      # All stopped
      if timeout:
        signal.setitimer(0, 0, signal.ITIMER_REAL)  # disable timer
Exemplo n.º 10
0
 def testKill():
   #kill all processes
   coordinatorCmd('k')
   try:
     WAITFOR(lambda: getStatus()==(0, False),
             lambda:"coordinator kill command failed")
   except CheckFailed:
     global coordinator
     coordinatorCmd('q')
     os.system("kill -9 %d" % coordinator.pid)
     print "Trying to kill old coordinator, and run new one on same port"
     coordinator = runCmd(BIN+"dmtcp_coordinator")
   for x in procs:
     #cleanup proc
     try:
       if isinstance(x.stdin,int):
         os.close(x.stdin)
       elif x.stdin:
         x.stdin.close()
       if isinstance(x.stdout,int):
         os.close(x.stdout)
       elif x.stdout:
         x.stdout.close()
       if isinstance(x.stderr,int):
         os.close(x.stderr)
       elif x.stderr:
         x.stderr.close()
     except:
       None
     try:
       os.waitpid(x.pid, os.WNOHANG)
     except OSError, e:
       if e.errno != errno.ECHILD:
         raise e
     procs.remove(x)
Exemplo n.º 11
0
def GetArticlesFromDir(directory_name):
    """ return articles made from pdf or txt files in this directory"""
    def run_in_parallel(functions, arg=None):
        """ Should run multiple functions in thread, and wait for all of them 
            arg is list of list
        """
        max_threads = 10
        for i in range(0, len(functions)/max_threads + 1):
            function_pack = functions[i * max_threads : (i + 1) * max_threads]
            arg_pack = arg[i * max_threads : (i + 1) * max_threads]
            threads = []
            index = 0
            for function in function_pack:
                if arg:
                    thread = Thread(target=function, args=arg_pack[index])
                else:
                    thread = Thread(target=function)
                thread.start()
                threads.append(thread)
                index += 1
            for thread in threads:
                thread.join()
    text_files = []
    articles = []

    def create_article_from_text (index, text):
        """ thread function, save article in articles variable at index place """
        print("thread {}, file: {}".format(index, text_files[index]))
        norm_text = NormalizeText(text)
        article = Article(index, "fic", text_files[index], text,
                          norm_text, GetFrequencyDict(norm_text))
        articles[index] = article

    cwd = os.getcwd()
    os.chdir(directory_name)
    text_files = glob.glob('*.pdf')
    text_files.extend(glob.glob('*.txt'))
    print(text_files)
    args = [] # multple texts
    article_id = 0
    for fname in text_files:
        print("classify article: {}".format(fname))
        file_name, file_extension = os.path.splitext(fname)
        if file_extension == '.pdf':
            pdftotextcommand = ['pdftotext', '-enc', 'UTF-8']
            command = pdftotextcommand + [fname, 'tmp']
            process = Popen(command, stdout=PIPE)
            os.waitpid(process.pid, 0)
            with open('tmp', 'r') as fd:
                text = fd.read()
            os.remove('tmp')
        else:
            with open(fname, 'r') as fd:
                text = fd.read()
        args.append([article_id, text])
        article_id += 1
    articles = [None] * article_id
    run_in_parallel([create_article_from_text]*article_id, args)
    os.chdir(cwd)
    return articles
Exemplo n.º 12
0
    def rpc_run(self, pcode, user, visitor):
        uid = 61018

        # localtime = time.asctime(time.localtime(time.time()))
        # userdir = '/tmp/' + (localtime[8]+localtime[17]+localtime[18]+localtime[14]+localtime[15])
        userdir = "/tmp" + "/" + user.replace("/", "9")
        if not os.path.exists(userdir):
            os.mkdir(userdir)
            os.chmod(userdir, 0770)
        # print "Directory created " + userdir

        db = zoodb.cred_setup()
        person = db.query(zoodb.Cred).get(user)
        if not person:
            return None
        token = person.token

        (sa, sb) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM, 0)
        pid = os.fork()
        if pid == 0:
            if os.fork() <= 0:
                sa.close()
                ProfileAPIServer(user, visitor, token).run_sock(sb)
                sys.exit(0)
            else:
                sys.exit(0)
        sb.close()
        os.waitpid(pid, 0)

        sandbox = sandboxlib.Sandbox(userdir, uid, "/profilesvc/lockfile")
        with rpclib.RpcClient(sa) as profile_api_client:
            return sandbox.run(lambda: run_profile(pcode, profile_api_client))
 def call_subprocess(self, cmd):
     times = datetime.datetime.now()
     # latest 14.0.4 requires "HOME" env variable to be passed
     # copy current environment variables and add "HOME" variable
     # pass the newly created environment variable to Popen subprocess
     env_home = os.environ.copy()
     env_home['HOME'] = HOME_ENV_PATH
     # stdout and stderr are redirected.
     # stderr not used (stdout validation is done so stderr check is
     # is not needed)
     try:
         p = Popen(cmd, stdout=PIPE, \
             stderr=PIPE, shell=True, env=env_home)
         while p.poll() is None:
             gevent.sleep(0.1)
             now = datetime.datetime.now()
             diff = now - times
             if diff.seconds > 5:
                 os.kill(p.pid, signal.SIGKILL)
                 os.waitpid(-1, os.WNOHANG)
                 message = "command:" + cmd + " ---> hanged"
                 ssdlog = StorageStatsDaemonLog(message = message)
                 self.call_send(ssdlog)
                 return None
     except:
         pass
         return None
     # stdout is used
     return p.stdout.read()
Exemplo n.º 14
0
Arquivo: psh.py Projeto: easoncxz/psh
def run_one_command(command):
    '''Run the given command, and wait for it.
    The process calling this function won't be exec'ed.'''
    top_pid = os.fork()
    if top_pid == 0:
        # child
        # we'll use this process to run the actual command.
        exec_one_command(command)
    else:
        # parent
        # we'll use this process to do the blocking waiting,
        # and the signal handling.
        if command[-1] == '&':
            # We were asked to run the command in background.
            # Note that the command has *already started running*.
            # We simply don't wait for it.
            job_list.add(pid=top_pid, command=command)
        else:
            # We were asked to run the command in foreground.
            # Not only do we need to wait for it,
            # we also have to handle ^Z keypresses (i.e. SIGTSTP signals)
            def sigtstp_callback(s, f):
                pass
            signal.signal(signal.SIGTSTP, sigtstp_callback)
            try:
                os.waitpid(top_pid, 0)
            except InterruptedError as ie:  # happens upon catching SIGTSTP
                jid = job_list.add(pid=top_pid, command=command)
                print('[{}]   {}'.format(jid, top_pid))
            finally:
                del sigtstp_callback
    del top_pid
Exemplo n.º 15
0
    def collect_children(self):
        """Internal routine to wait for children that have exited."""
        if self.active_children is None:
            return

        # If we're above the max number of children, wait and reap them until
        # we go back below threshold. Note that we use waitpid(-1) below to be
        # able to collect children in size(<defunct children>) syscalls instead
        # of size(<children>): the downside is that this might reap children
        # which we didn't spawn, which is why we only resort to this when we're
        # above max_children.
        while len(self.active_children) >= self.max_children:
            try:
                pid, _ = os.waitpid(-1, 0)
                self.active_children.discard(pid)
            except InterruptedError:
                pass
            except ChildProcessError:
                # we don't have any children, we're done
                self.active_children.clear()
            except OSError:
                break

        # Now reap all defunct children.
        for pid in self.active_children.copy():
            try:
                pid, _ = os.waitpid(pid, os.WNOHANG)
                # if the child hasn't exited yet, pid will be 0 and ignored by
                # discard() below
                self.active_children.discard(pid)
            except ChildProcessError:
                # someone else reaped it
                self.active_children.discard(pid)
            except OSError:
                pass
Exemplo n.º 16
0
    def runAppCallbacks(self, reason):
        """Trigger our callbacks to run now, you should add your own callback hooks
           in your own protocol imlementation to inorder to inject your application
           process identification.
        """
        if self._pid:
            import os #work around for randomly missing sigchild
            try: os.waitpid(self._pid, os.WNOHANG)
            except: pass
        if not self.deferredResult.called:
            #give the caller some context to work with
            self.logger("Process %d is no longer running" % self._pid)
            result = {
                'description': 'Application Exited',
                'code': reason.value.exitCode,
                'pid': self.PID,
                'stdout': self.STDOUT,
                'stderr': self.STDERR,
                'screen': self.OUTPUT,
                'error': False,
            }
            if result['code'] == 0:
                self.deferredResult.callback(result)
            else:
                result['description'] = reason.getErrorMessage()
                result['error'] = True
                result['stacktrace'] = reason.getTraceback()
                self.deferredResult.errback(DroneCommandFailed(result))

        return
Exemplo n.º 17
0
  def test_vtaction_dies_hard(self):
    utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

    # start a 'vtctl Sleep' command, don't wait for it
    action_path, _ = utils.run_vtctl(['-no-wait', 'Sleep', tablet_62344.tablet_alias, '60s'], trap_output=True)
    action_path = action_path.strip()

    # wait for the action to be 'Running', capture its pid
    timeout = 10
    while True:
      an = utils.run_vtctl_json(['ReadTabletAction', action_path])
      if an.get('State', None) == 'Running':
        pid = an['Pid']
        logging.info("Action is running with pid %u, good", pid)
        break
      timeout = utils.wait_step('sleep action to run', timeout)

    # let's kill it hard, wait until it's gone for good
    os.kill(pid, signal.SIGKILL)
    try:
      os.waitpid(pid, 0)
    except OSError:
      # this means the process doesn't exist any more, we're good
      pass

    # Then let's make sure the next action cleans up properly and can execute.
    # If that doesn't work, this will time out and the test will fail.
    utils.run_vtctl(['Ping', tablet_62344.tablet_alias])

    tablet_62344.kill_vttablet()
Exemplo n.º 18
0
def execute (command, timeout = -1):
  start_time = time.time()
  processPid = [None]
  stdoutOutput = [None]
  stderrOutput = [None]
  def target():
    process = Popen(command, stdout=PIPE, stderr=STDOUT, close_fds=True)
    processPid[0] = process.pid;
    (stdoutOutput[0], stderrOutput[0]) = process.communicate();

  thread = threading.Thread(target=target)
  thread.start()
  thread.join(timeout)
  if thread.is_alive():
    # Kill Process
    try:
      os.killpg(processPid[0], signal.SIGKILL)
    except:
      pass
    os.waitpid(-1, os.WNOHANG)
    thread.join()

  elapsed_time = time.time() - start_time
  output = stdoutOutput[0]
  return (output.strip(), elapsed_time);
Exemplo n.º 19
0
  def _stop_gerrit(cls, gerrit_instance):
    """Stops the running gerrit instance and deletes it."""
    try:
      # This should terminate the gerrit process.
      cls.check_call(['bash', gerrit_instance.gerrit_exe, 'stop'])
    finally:
      try:
        # cls.gerrit_pid should have already terminated.  If it did, then
        # os.waitpid will raise OSError.
        os.waitpid(gerrit_instance.gerrit_pid, os.WNOHANG)
      except OSError as e:
        if e.errno == errno.ECHILD:
          # If gerrit shut down cleanly, os.waitpid will land here.
          # pylint: disable=lost-exception
          return

      # If we get here, the gerrit process is still alive.  Send the process
      # SIGKILL for good measure.
      try:
        os.kill(gerrit_instance.gerrit_pid, signal.SIGKILL)
      except OSError:
        if e.errno == errno.ESRCH:
          # os.kill raised an error because the process doesn't exist.  Maybe
          # gerrit shut down cleanly after all.
          # pylint: disable=lost-exception
          return

      # Announce that gerrit didn't shut down cleanly.
      msg = 'Test gerrit server (pid=%d) did not shut down cleanly.' % (
          gerrit_instance.gerrit_pid)
      print >> sys.stderr, msg
Exemplo n.º 20
0
def do_build(vehicledir, opts, frame_options):
    '''build build target (e.g. sitl) in directory vehicledir'''

    if opts.build_system == 'waf':
        return do_build_waf(vehicledir, opts, frame_options)

    old_dir = os.getcwd()

    os.chdir(vehicledir)

    if opts.clean:
        run_cmd_blocking("Building clean", ["make", "clean"])

    build_target = frame_options["make_target"]
    if opts.debug:
        build_target += "-debug"

    build_cmd = ["make", "-j"+str(opts.jobs), build_target]
    progress_cmd("Building %s" % (build_target), build_cmd)

    p = subprocess.Popen(build_cmd)
    pid, sts = os.waitpid(p.pid,0)
    if sts != 0:
        progress("Build failed; cleaning and rebuilding")
        subprocess.Popen(["make", "clean"])
        p = subprocess.Popen(["make", "-j"+str(opts.jobs), build_target])
        pid, sts = os.waitpid(p.pid,0)
        if sts != 0:
            progress("Build failed")
            sys.exit(1)

    os.chdir(old_dir)
Exemplo n.º 21
0
    def execute(self, config):
        """
        Run the build task for the given config in the chroot.
        """
        build_path = os.path.join(self.path, self.config['build-path'])

        try:
            shutil.rmtree(self.path)
        except OSError:
            pass

        shutil.copytree(".", build_path)

        for (path, sha) in config['file']:
            if os.path.isabs(path):
                path = os.path.join(self.path, path[1:])
            else:
                path = os.path.join(build_path, path)

            with open(path, 'w') as location:
                with self.server.get(sha) as data:
                    chunk = 'a'
                    while len(chunk) > 0:
                        chunk = data.read(4096)
                        location.write(chunk)

        pid = os.fork()

        if pid == 0:
            os.chroot(self.path)
            os.chdir('/')
            os.execl(config['task'], config['task'])
        else:
            os.waitpid(pid, 0)
Exemplo n.º 22
0
    def test_cache_keyword(self):
        def keyword(x=1, y=1, z=1):
            CallOnceEver().update()
            return x + y + z

        def test(fh):
            result = cache( keyword, kwargs={ 'x': 1, 'y': 2, 'z': 3 } )
            fh.write(str(result == 6))
            fh.close()
            os._exit(0)

        outputs = [ tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False) ]

        for output in outputs:
            pid = os.fork()
            if pid:
                os.waitpid(pid, 0)
            else:
                test(output)

        expected = ['True', 'True', 'True']
        result   = []

        for output in outputs:
            output.close()

            fh = open(output.name)
            result.append(fh.read())
            fh.close()

            os.remove(output.name)

        self.assertEqual(result, expected)
Exemplo n.º 23
0
    def test_update(self):
        o = CallOnceEver()
        test = self

        def test(fh):
            Facade( o )
            result = o.update()
            fh.write(str(result == 1))
            fh.close()
            os._exit(0)

        outputs = [ tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False) ]

        for output in outputs:
            pid = os.fork()
            if pid:
                os.waitpid(pid, 0)
            else:
                test(output)

        expected = ['True', 'True', 'True']
        result   = []

        for output in outputs:
            output.close()

            fh = open(output.name)
            result.append(fh.read())
            fh.close()

            os.remove(output.name)

        self.assertEqual(result, expected)
Exemplo n.º 24
0
def minion_async_run(retriever, method, args):
    """
    This is a simpler invocation for minion side async usage.
    """
    # to avoid confusion of job id's (we use the same job database)
    # minion jobs contain the string "minion".  


    job_id = "%s-minion" % pprint.pformat(time.time())
    __update_status(job_id, JOB_ID_RUNNING, -1)
    pid = os.fork()
    if pid != 0:
        os.waitpid(pid, 0)
        return job_id
    else:
        # daemonize!
        os.umask(077)
        os.chdir('/')
        os.setsid()
        if os.fork():
            os._exit(0)

        try:
            function_ref = retriever(method)
            rc = function_ref(*args)
        except Exception, e:
            (t, v, tb) = sys.exc_info()
            rc = cm_utils.nice_exception(t,v,tb)

        __update_status(job_id, JOB_ID_FINISHED, rc)
        os._exit(0)
Exemplo n.º 25
0
def mainname(cmd):
  cmd.append("--name")
  p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
  cmd.pop()
  os.waitpid(p.pid, 0)
  lines = p.stdout.readlines()
  return lines[-1].strip()
Exemplo n.º 26
0
def fork_waitfor_timed(tmp, pid, timeout):
    """
    Waits for pid until it terminates or timeout expires.
    If timeout expires, test subprocess is killed.
    """
    timer_expired = True
    poll_time = 2
    time_passed = 0
    while time_passed < timeout:
        time.sleep(poll_time)
        (child_pid, status) = os.waitpid(pid, os.WNOHANG)
        if (child_pid, status) == (0, 0):
            time_passed = time_passed + poll_time
        else:
            timer_expired = False
            break

    if timer_expired:
        logging.info('Timer expired (%d sec.), nuking pid %d', timeout, pid)
        utils.nuke_pid(pid)
        (child_pid, status) = os.waitpid(pid, 0)
        raise error.TestError("Test timeout expired, rc=%d" % (status))
    else:
        _check_for_subprocess_exception(tmp, pid)

    if status:
        raise error.TestError("Test subprocess failed rc=%d" % (status))
Exemplo n.º 27
0
def invoke_editor(s, filename="edit.txt", descr="the file"):
    from tempfile import mkdtemp
    tempdir = mkdtemp()

    from os.path import join
    full_name = join(tempdir, filename)

    outf = open(full_name, "w")
    outf.write(str(s))
    outf.close()

    import os
    if "EDITOR" in os.environ:
        from subprocess import Popen
        p = Popen([os.environ["EDITOR"], full_name])
        os.waitpid(p.pid, 0)[1]
    else:
        print("(Set the EDITOR environment variable to be "
                "dropped directly into an editor next time.)")
        input("Edit %s at %s now, then hit [Enter]:"
                % (descr, full_name))

    inf = open(full_name, "r")
    result = inf.read()
    inf.close()

    return result
Exemplo n.º 28
0
    def test_service_call_in_nested__init__(self):
        test = self

        def test(fh):
            o = Facade( cls=CallsServiceInInit )
            result = o.nested_init().methoda()
            fh.write(str(result == 'a'))
            fh.close()
            os._exit(0)

        outputs = [ tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False) ]

        for output in outputs:
            pid = os.fork()
            if pid:
                os.waitpid(pid, 0)
            else:
                test(output)

        expected = ['True', 'True', 'True']
        result   = []

        for output in outputs:
            output.close()

            fh = open(output.name)
            result.append(fh.read())
            fh.close()

            os.remove(output.name)

        self.assertEqual(result, expected)
Exemplo n.º 29
0
    def clientServerUploadOptions(self, options, transmitname=None):
        """Fire up a client and a server and do an upload."""
        root = '/tmp'
        home = os.path.dirname(os.path.abspath(__file__))
        filename = '100KBFILE'
        input_path = os.path.join(home, filename)
        if transmitname:
            filename = transmitname
        server = tftpy.TftpServer(root)
        client = tftpy.TftpClient('localhost',
                                  20001,
                                  options)
        # Fork a server and run the client in this process.
        child_pid = os.fork()
        if child_pid:
            # parent - let the server start
            try:
                time.sleep(1)
                client.upload(filename,
                              input_path)
            finally:
                os.kill(child_pid, 15)
                os.waitpid(child_pid, 0)

        else:
            server.listen('localhost', 20001)
Exemplo n.º 30
0
    def test_patched_cache(self):
        def mixed(x, y, z=1):
            CallOnceEver().update()
            return x + y + z

        def test(fh):
            result = baz()
            fh.write(str(result == 'baz'))
            fh.close()
            os._exit(0)

        outputs = [ tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False),
                    tempfile.NamedTemporaryFile(delete=False) ]

        for output in outputs:
            pid = os.fork()
            if pid:
                os.waitpid(pid, 0)
            else:
                test(output)

        expected = ['True', 'True', 'True']
        result   = []

        for output in outputs:
            output.close()

            fh = open(output.name)
            result.append(fh.read())
            fh.close()

            os.remove(output.name)

        self.assertEqual(result, expected)
Exemplo n.º 31
0
def handle_SIGCHLD(signal, frame):
    os.waitpid(-1, os.WNOHANG)
Exemplo n.º 32
0
def wait(ccd, req_t, pid, fid, plg_name, sock, dbh):
    """
    waits for a plugin to terminate via os.waitpid. if a plugin terminates, the
    fd_state is updated and a MTH_TERMINATE is sent to the client.

    Note: this is not triggered, if ccd is killed via SIGKILL

    input:
        req_t   request tuple which triggered execution of plugin
        pid     id of the project in which context the plugin is executed
        fid     id of the fd database entry which holds data about the plugin's
                execution state
        plg_name    name of the plugin which is executed
        sock    socket that is used sent the MTH_TERMINATE message in order to
                inform the client about termination
        dbh     database handler in order to update plugin's execution state in
                database

    """
    # wait for plugin to terminate
    logger.debug("waiting for %d to terminate ..", pid)
    #WAIT = 1
    result = (0, 0)

    suc, sid, rid, cat, plg, gid, mth, result = req_t
    ccd._db.conn = ccddb.connect()

    try:
        result = os.waitpid(pid, 0)

    except OSError as e:
        # If no childprocess with corresponding pid is found
        # errno 10 is raised. This indicates that the child is
        # already dead.
        if e.errno == 10:
            pass
        else:
            raise e

    # get plugin state
    try:
        state = ccddb.get_fd_state(ccd._db.conn, fid)[0]
        logger.debug("plugin terminated with %s", state)
    except Exception as e:
        raise Exception("Failed to get fd status:'%s'." % str(e))

    # a plugin state -1 indicates that the fork was correctly started
    # Note: this does not cover SIGKILLs
    # of the ccd server, which lead the plugins to be killed, too.
    if state == -1:
        state = result[1]
        try:
            ccddb.set_fd_state(ccd._db.conn, fid, state)
        except Exception as e:
            logger.exception(e)

    # terminating forwarder
    try:
        logger.debug("stopping forwarder..")
        uid = ccd.get_user_and_workgroup(sid)[0].uid
        comm.close(ccd, ccd._db.conn, sid, fid, uid)
    except Exception:
        logger.error("Failed to stop forwarder thread!")

    # communicate plugin state to client
    suc = ccdlib.OP_SUCCESS
    mth = ccdlib.MTH_TERMINATE
    result = dict(code=state)
    resp_t = suc, sid, rid, cat, plg, gid, mth, result
    logger.debug("sending plugin's state to client=%s", resp_t)
    ccdlib.send(sock, resp_t)
    projectdb.close(dbh)
    ccddb.close(ccd._db.conn)
Exemplo n.º 33
0
    def reap_process(self, pid, status=None):
        """ensure that the process is killed (and not a zombie)"""
        if pid not in self.processes:
            return
        process = self.processes.pop(pid)

        timeout = 0.001

        while status is None:
            if IS_WINDOWS:
                try:
                    # On Windows we can't use waitpid as it's blocking,
                    # so we use psutils' wait
                    status = process.wait(timeout=timeout)
                except TimeoutExpired:
                    continue
            else:
                try:
                    _, status = os.waitpid(pid, os.WNOHANG)
                except OSError as e:
                    if e.errno == errno.EAGAIN:
                        time.sleep(timeout)
                        continue
                    elif e.errno == errno.ECHILD:
                        status = None
                    else:
                        raise

            if status is None:
                # nothing to do here, we do not have any child
                # process running
                # but we still need to send the "reap" signal.
                #
                # This can happen if poll() or wait() were called on
                # the underlying process.
                logger.debug('Reaping already dead process {} [{}]'.format(
                    pid, self.name))
                process.stop()
                return

        # get return code
        if hasattr(os, 'WIFSIGNALED'):
            exit_code = 0

            if os.WIFSIGNALED(status):
                # The Python Popen object returns <-signal> in it's returncode
                # property if the process exited on a signal, so emulate that
                # behavior here so that pubsub clients watching for reap can
                # distinguish between an exit with a non-zero exit code and
                # a signal'd exit. This is also consistent with the notify
                # event reap message above that uses the returncode function
                # (that ends up calling Popen.returncode)
                exit_code = -os.WTERMSIG(status)
            # process exited using exit(2) system call; return the
            # integer exit(2) system call has been called with
            elif os.WIFEXITED(status):
                exit_code = os.WEXITSTATUS(status)
            else:
                # should never happen
                raise RuntimeError("Unknown process exit status")
        else:
            # On Windows we don't have such distinction
            exit_code = status

        # if the process is dead or a zombie try to definitely stop it.
        if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
            process.stop()

        logger.debug('Reaping process {} [{}]'.format(pid, self.name))
        return exit_code
Exemplo n.º 34
0
BLOCK = DRQUEUE_FRAME + DRQUEUE_BLOCKSIZE - 1

if BLOCK > DRQUEUE_ENDFRAME:
    BLOCK = DRQUEUE_ENDFRAME

ENGINE_PATH = "lwsn"

command = ENGINE_PATH + " -3 -c " + CONFIGDIR + " -d " + PROJECTDIR + " -q " + SCENE + " " + str(
    DRQUEUE_FRAME) + " " + str(BLOCK) + " " + str(DRQUEUE_STEPFRAME)

print(command)
sys.stdout.flush()

p = subprocess.Popen(command, shell=True)
sts = os.waitpid(p.pid, 0)

# This should requeue the frame if failed
if sts[1] != 0:
    print("Requeueing frame...")
    os.kill(os.getppid(), signal.SIGINT)
    exit(1)
else:
    #if DRQUEUE_OS != "WINDOWS" then:
    # The frame was rendered properly
    # We don't know the output image name. If we knew we could set this correctly
    # chown_block RF_OWNER RD/IMAGE DRQUEUE_FRAME BLOCK

    # change userid and groupid
    #chown 1002:1004 $SCENE:h/*
    print("Finished.")
Exemplo n.º 35
0
            if not DEBUG:
                cmd = executable
            sys.stderr.write("unable to execute %r: %s\n" %
                             (cmd, e.strerror))
            os._exit(1)

        if not DEBUG:
            cmd = executable
        sys.stderr.write("unable to execute %r for unknown reasons" % cmd)
        os._exit(1)
    else:   # in the parent
        # Loop until the child either exits or is terminated by a signal
        # (ie. keep waiting if it's merely stopped)
        while 1:
            try:
                pid, status = os.waitpid(pid, 0)
            except OSError, exc:
                import errno
                if exc.errno == errno.EINTR:
                    continue
                if not DEBUG:
                    cmd = executable
                raise DistutilsExecError, \
                      "command %r failed: %s" % (cmd, exc[-1])
            if os.WIFSIGNALED(status):
                if not DEBUG:
                    cmd = executable
                raise DistutilsExecError, \
                      "command %r terminated by signal %d" % \
                      (cmd, os.WTERMSIG(status))
Exemplo n.º 36
0
def handler_enq1(qdir, jset, tmpl):
    # fork in case importing template does anything crazy
    pid = os.fork()
    if pid != 0:
        while True:
            try:
                os.waitpid(pid, 0)
            except OSError:
                time.sleep(1)
                print("!! Possible error in enqueuing jobset %s..." % jset)
                continue
            return

    try:
        jdir = os.path.join(qdir, jset)
        jtpl = os.path.join(jdir, tmpl)

        if sys.version_info >= (3, 5):
            import importlib.util
            spec = importlib.util.spec_from_file_location("template", jtpl)
            mod = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(mod)
        elif sys.version_info >= (3, 3):
            from importlib.machinery import SourceFileLoader
            mod = SourceFileLoader("template", jtpl).load_module()
        else:
            raise RuntimeError("Enqueueing requires python 3.3+!")

        jobbs = set()

        for jobn, jobd in mod.generate():
            print("- %s" % jobn)
            jobf = jobn + EXT_JOB
            jobp = os.path.join(jdir, jobf)
            jobj = Job(jobn, jobp)
            save = lambda f: json.dump(jobd, f, sort_keys=True, indent=4)

            jobb = jobj.filebase(jobd['options'])
            if jobb in jobbs:
                print("-> Job would refer to duplicate data file!: %s" % jobb)
                continue
            jobbs.add(jobb)

            try:
                with open(jobp, 'x') as f:
                    save(f)
            except FileExistsError:
                try:
                    with jobj.lock() as f:
                        f.truncate()
                        save(f)
                except Job.LockedOut:
                    with open(jobp, 'r') as f:
                        if json.load(f) != jobj:
                            print("-> Can't update -- job being executed!")

    finally:
        try:  # try to replace the process with a noop (`true`)
            os.execlp('true', 'true')

        finally:  # else, fallback to regular exiting of the interpreter
            sys.exit()
Exemplo n.º 37
0
    if (pid == 0):
        cmdline = "pgbench -n -P 5 -c " + str(clients_num / coordinators_num) + \
            " -j " + str(clients_num / coordinators_num) + \
            " --max-tries 1000 -f test.pgb -T " + str(total_time) + " -h " + str(addr)
        if (debugmsg == 0):
            debugmsg = 1
            print("DEBUG: pgbench string sample: ", cmdline)
        os.system(cmdline)
        os._exit(0)

    print("pid: ", pid, ", addr: ", addr)
    pids.append(pid)

for pid in pids:
    os.waitpid(pid, 0)

# ##############################################################################
#
# Show data and test consistency markers
#
# ##############################################################################

con = psycopg2.connect(host=address[1])
cur = con.cursor()
cur.execute("SELECT (itp > 4 AND itp < 6) AS int_transfer_percentage \
	FROM (SELECT 100*sum(nit)/sum(net) AS itp FROM accounts) AS pr;")
res = cur.fetchall()[0]
print(res)
# Sum of total current value and external transfers must be equal to <accounts number> * 1000
cur.execute("SELECT sum(etransfer)+sum(value)=1000*(SELECT count(*) FROM accounts) AS check_value FROM accounts;")
Exemplo n.º 38
0
 def platformWait(self):
     return os.waitpid(self.pid, 0)
Exemplo n.º 39
0
def fork_detached_process():
    """Fork this process, creating a subprocess detached from the current context.

    Returns a :class:`pwkit.Holder` instance with information about what
    happened. Its fields are:

    whoami
      A string, either "original" or "forked" depending on which process we are.
    pipe
      An open binary file descriptor. It is readable by the original process
      and writable by the forked one. This can be used to pass information
      from the forked process to the one that launched it.
    forkedpid
      The PID of the forked process. Note that this process is *not* a child of
      the original one, so waitpid() and friends may not be used on it.

    Example::

      from pwkit import cli

      info = cli.fork_detached_process ()
      if info.whoami == 'original':
          message = info.pipe.readline ().decode ('utf-8')
          if not len (message):
              cli.die ('forked process (PID %d) appears to have died', info.forkedpid)
          info.pipe.close ()
          print ('forked process said:', message)
      else:
          info.pipe.write ('hello world'.encode ('utf-8'))
          info.pipe.close ()

    As always, the *vital* thing to understand is that immediately after a
    call to this function, you have **two** nearly-identical but **entirely
    independent** programs that are now both running simultaneously. Until you
    execute some kind of ``if`` statement, the only difference between the two
    processes is the value of the ``info.whoami`` field and whether
    ``info.pipe`` is readable or writeable.

    This function uses :func:`os.fork` twice and also calls :func:`os.setsid`
    in between the two invocations, which creates new session and process
    groups for the forked subprocess. It does *not* perform other operations
    that you might want, such as changing the current directory, dropping
    privileges, closing file descriptors, and so on. For more discussion of
    best practices when it comes to “daemonizing” processes, see (stalled)
    `PEP 3143`_.

    .. _PEP 3143: https://www.python.org/dev/peps/pep-3143/

    """
    import os, struct
    from .. import Holder
    payload = struct.Struct('L')

    info = Holder()
    readfd, writefd = os.pipe()

    pid1 = os.fork()
    if pid1 > 0:
        info.whoami = 'original'
        info.pipe = os.fdopen(readfd, 'rb')
        os.close(writefd)

        retcode = os.waitpid(pid1, 0)[1]
        if retcode:
            raise Exception('child process exited with error code %d' %
                            retcode)

        (info.forkedpid, ) = payload.unpack(info.pipe.read(payload.size))
    else:
        # We're the intermediate child process. Start new session and process
        # groups, detaching us from TTY signals and whatnot.
        os.setsid()

        pid2 = os.fork()
        if pid2 > 0:
            # We're the intermediate process; we're all done
            os._exit(0)

        # If we get here, we're the detached child process.
        info.whoami = 'forked'
        info.pipe = os.fdopen(writefd, 'wb')
        os.close(readfd)
        info.forkedpid = os.getpid()
        info.pipe.write(payload.pack(info.forkedpid))

    return info
Exemplo n.º 40
0
def main():
    import sys
    import os
    import signal
    import resource
    # -- import from shadowsockesr-v
    import logger
    import conf
    import exit
    import shell, daemon, eventloop, tcprelay, udprelay, asyncdns, common

    # get ssr configurations
    ssr_conf = shell.get_ssr_conf(is_local=False, ssr_conf_path=conf.configuration_path())

    # start daemon
    daemon.daemon_exec(ssr_conf)

    logger.info('current process open files[cmd\'ulimit -n\'] resource: soft %d hard %d' %
                 resource.getrlimit(resource.RLIMIT_NOFILE))

    # todo Multiport -> Single port
    if not ssr_conf['port_password']:
        ssr_conf['port_password'] = {}

        server_port = ssr_conf['server_port']
        if type(server_port) == list:
            for a_server_port in server_port:
                ssr_conf['port_password'][a_server_port] = ssr_conf['password']
        else:
            ssr_conf['port_password'][str(server_port)] = ssr_conf['password']

    if not ssr_conf.get('dns_ipv6', False):
        asyncdns.IPV6_CONNECTION_SUPPORT = False

    # no used
    # if config.get('manager_address', 0):
    #     logging.info('entering manager mode')
    #     manager.run(config)
    #     return

    tcp_servers = []
    udp_servers = []
    dns_resolver = asyncdns.DNSResolver()
    if int(ssr_conf['workers']) > 1:
        stat_counter_dict = None
    else:
        stat_counter_dict = {}
    port_password = ssr_conf['port_password']
    config_password = ssr_conf.get('password', 'm')
    del ssr_conf['port_password']
    for port, password_obfs in port_password.items():
        method = ssr_conf["method"]
        protocol = ssr_conf.get("protocol", 'origin')
        protocol_param = ssr_conf.get("protocol_param", '')
        obfs = ssr_conf.get("obfs", 'plain')
        obfs_param = ssr_conf.get("obfs_param", '')
        bind = ssr_conf.get("out_bind", '')
        bindv6 = ssr_conf.get("out_bindv6", '')
        if type(password_obfs) == list:
            password = password_obfs[0]
            obfs = common.to_str(password_obfs[1])
            if len(password_obfs) > 2:
                protocol = common.to_str(password_obfs[2])
        elif type(password_obfs) == dict:
            password = password_obfs.get('password', config_password)
            method = common.to_str(password_obfs.get('method', method))
            protocol = common.to_str(password_obfs.get('protocol', protocol))
            protocol_param = common.to_str(password_obfs.get('protocol_param', protocol_param))
            obfs = common.to_str(password_obfs.get('obfs', obfs))
            obfs_param = common.to_str(password_obfs.get('obfs_param', obfs_param))
            bind = password_obfs.get('out_bind', bind)
            bindv6 = password_obfs.get('out_bindv6', bindv6)
        else:
            password = password_obfs
        a_config = ssr_conf.copy()
        ipv6_ok = False
        logger.info("server start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]" %
                (protocol, password, method, obfs, obfs_param))
        if 'server_ipv6' in a_config:
            try:
                if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][0] == "[" and a_config['server_ipv6'][-1] == "]":
                    a_config['server_ipv6'] = a_config['server_ipv6'][1:-1]
                a_config['server_port'] = int(port)
                a_config['password'] = password
                a_config['method'] = method
                a_config['protocol'] = protocol
                a_config['protocol_param'] = protocol_param
                a_config['obfs'] = obfs
                a_config['obfs_param'] = obfs_param
                a_config['out_bind'] = bind
                a_config['out_bindv6'] = bindv6
                a_config['server'] = a_config['server_ipv6']
                logger.info("starting server at [%s]:%d" %
                             (a_config['server'], int(port)))
                tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict))
                udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict))
                if a_config['server_ipv6'] == '::':
                    ipv6_ok = True
            except Exception as e:
                logger.error(e)

        try:
            a_config = ssr_conf.copy()
            a_config['server_port'] = int(port)
            a_config['password'] = password
            a_config['method'] = method
            a_config['protocol'] = protocol
            a_config['protocol_param'] = protocol_param
            a_config['obfs'] = obfs
            a_config['obfs_param'] = obfs_param
            a_config['out_bind'] = bind
            a_config['out_bindv6'] = bindv6
            logger.info("starting server at %s:%d" %
                         (a_config['server'], int(port)))
            tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict))
            udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False, stat_counter=stat_counter_dict))
        except Exception as e:
            if not ipv6_ok:
                logger.error(e)

    def run_server():
        def child_handler(signum, _):
            logger.warning('received SIGQUIT, doing graceful shutting down..')
            list(map(lambda s: s.close(next_tick=True),
                     tcp_servers + udp_servers))
        signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
                      child_handler)

        def int_handler(signum, _):
            sys.exit(1)
        signal.signal(signal.SIGINT, int_handler)

        try:
            loop = eventloop.EventLoop()
            dns_resolver.add_to_loop(loop)
            list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))

            daemon.set_user(ssr_conf.get('user', None))
            loop.run()
        except Exception as e:
            exit.error(e)

    if int(ssr_conf['workers']) > 1:
        if os.name == 'posix':
            children = []
            is_child = False
            for i in range(0, int(ssr_conf['workers'])):
                r = os.fork()
                if r == 0:
                    logger.info('worker started')
                    is_child = True
                    run_server()
                    break
                else:
                    children.append(r)
            if not is_child:
                def handler(signum, _):
                    for pid in children:
                        try:
                            os.kill(pid, signum)
                            os.waitpid(pid, 0)
                        except OSError:  # child may already exited
                            pass
                    sys.exit()
                signal.signal(signal.SIGTERM, handler)
                signal.signal(signal.SIGQUIT, handler)
                signal.signal(signal.SIGINT, handler)

                # master
                for a_tcp_server in tcp_servers:
                    a_tcp_server.close()
                for a_udp_server in udp_servers:
                    a_udp_server.close()
                dns_resolver.close()

                for child in children:
                    os.waitpid(child, 0)
        else:
            logger.warning('worker is only available on Unix/Linux')
            run_server()
    else:
        run_server()
Exemplo n.º 41
0
def main():
    shell.check_python()

    config = shell.get_config(False)

    shell.log_shadowsocks_version()

    daemon.daemon_exec(config)

    try:
        import resource
        logging.info(
            'current process RLIMIT_NOFILE resource: soft %d hard %d' %
            resource.getrlimit(resource.RLIMIT_NOFILE))
    except ImportError:
        pass

    if config['port_password']:
        pass
    else:
        config['port_password'] = {}
        server_port = config['server_port']
        if type(server_port) == list:
            for a_server_port in server_port:
                config['port_password'][a_server_port] = config['password']
        else:
            config['port_password'][str(server_port)] = config['password']

    if not config.get('dns_ipv6', False):
        asyncdns.IPV6_CONNECTION_SUPPORT = False

    if config.get('manager_address', 0):
        logging.info('entering manager mode')
        manager.run(config)
        return

    tcp_servers = []
    udp_servers = []
    dns_resolver = asyncdns.DNSResolver()
    if int(config['workers']) > 1:
        stat_counter_dict = None
    else:
        stat_counter_dict = {}
    port_password = config['port_password']
    config_password = config.get('password', 'm')
    del config['port_password']
    for port, password_obfs in port_password.items():
        method = config["method"]
        protocol = config.get("protocol", 'origin')
        protocol_param = config.get("protocol_param", '')
        obfs = config.get("obfs", 'plain')
        obfs_param = config.get("obfs_param", '')
        bind = config.get("out_bind", '')
        bindv6 = config.get("out_bindv6", '')
        if type(password_obfs) == list:
            password = password_obfs[0]
            obfs = password_obfs[1]
            if len(password_obfs) > 2:
                protocol = password_obfs[2]
        elif type(password_obfs) == dict:
            password = password_obfs.get('password', config_password)
            method = password_obfs.get('method', method)
            protocol = password_obfs.get('protocol', protocol)
            protocol_param = password_obfs.get('protocol_param',
                                               protocol_param)
            obfs = password_obfs.get('obfs', obfs)
            obfs_param = password_obfs.get('obfs_param', obfs_param)
            bind = password_obfs.get('out_bind', bind)
            bindv6 = password_obfs.get('out_bindv6', bindv6)
        else:
            password = password_obfs
        a_config = config.copy()
        ipv6_ok = False
        logging.info(
            "server start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]"
            % (protocol, password, method, obfs, obfs_param))
        if 'server_ipv6' in a_config:
            try:
                if len(a_config['server_ipv6']
                       ) > 2 and a_config['server_ipv6'][
                           0] == "[" and a_config['server_ipv6'][-1] == "]":
                    a_config['server_ipv6'] = a_config['server_ipv6'][1:-1]
                a_config['server_port'] = int(port)
                a_config['password'] = password
                a_config['method'] = method
                a_config['protocol'] = protocol
                a_config['protocol_param'] = protocol_param
                a_config['obfs'] = obfs
                a_config['obfs_param'] = obfs_param
                a_config['out_bind'] = bind
                a_config['out_bindv6'] = bindv6
                a_config['server'] = a_config['server_ipv6']
                logging.info("starting server at [%s]:%d" %
                             (a_config['server'], int(port)))
                tcp_servers.append(
                    tcprelay.TCPRelay(a_config,
                                      dns_resolver,
                                      False,
                                      stat_counter=stat_counter_dict))
                udp_servers.append(
                    udprelay.UDPRelay(a_config,
                                      dns_resolver,
                                      False,
                                      stat_counter=stat_counter_dict))
                if a_config['server_ipv6'] == b"::":
                    ipv6_ok = True
            except Exception as e:
                shell.print_exception(e)

        try:
            a_config = config.copy()
            a_config['server_port'] = int(port)
            a_config['password'] = password
            a_config['method'] = method
            a_config['protocol'] = protocol
            a_config['protocol_param'] = protocol_param
            a_config['obfs'] = obfs
            a_config['obfs_param'] = obfs_param
            a_config['out_bind'] = bind
            a_config['out_bindv6'] = bindv6
            logging.info("starting server at %s:%d" %
                         (a_config['server'], int(port)))
            tcp_servers.append(
                tcprelay.TCPRelay(a_config,
                                  dns_resolver,
                                  False,
                                  stat_counter=stat_counter_dict))
            udp_servers.append(
                udprelay.UDPRelay(a_config,
                                  dns_resolver,
                                  False,
                                  stat_counter=stat_counter_dict))
        except Exception as e:
            if not ipv6_ok:
                shell.print_exception(e)

    def run_server():
        def child_handler(signum, _):
            logging.warn('received SIGQUIT, doing graceful shutting down..')
            list(
                map(lambda s: s.close(next_tick=True),
                    tcp_servers + udp_servers))

        signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
                      child_handler)

        def int_handler(signum, _):
            sys.exit(1)

        signal.signal(signal.SIGINT, int_handler)

        try:
            loop = eventloop.EventLoop()
            dns_resolver.add_to_loop(loop)
            list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))

            daemon.set_user(config.get('user', None))
            loop.run()
        except Exception as e:
            shell.print_exception(e)
            sys.exit(1)

    if int(config['workers']) > 1:
        if os.name == 'posix':
            children = []
            is_child = False
            for i in range(0, int(config['workers'])):
                r = os.fork()
                if r == 0:
                    logging.info('worker started')
                    is_child = True
                    run_server()
                    break
                else:
                    children.append(r)
            if not is_child:

                def handler(signum, _):
                    for pid in children:
                        try:
                            os.kill(pid, signum)
                            os.waitpid(pid, 0)
                        except OSError:  # child may already exited
                            pass
                    sys.exit()

                signal.signal(signal.SIGTERM, handler)
                signal.signal(signal.SIGQUIT, handler)
                signal.signal(signal.SIGINT, handler)

                # master
                for a_tcp_server in tcp_servers:
                    a_tcp_server.close()
                for a_udp_server in udp_servers:
                    a_udp_server.close()
                dns_resolver.close()

                for child in children:
                    os.waitpid(child, 0)
        else:
            logging.warn('worker is only available on Unix/Linux')
            run_server()
    else:
        run_server()
Exemplo n.º 42
0
    def run_cgi(self):
        """Execute a CGI script."""
        dir, rest = self.cgi_info
        path = dir + '/' + rest
        i = path.find('/', len(dir) + 1)
        while i >= 0:
            nextdir = path[:i]
            nextrest = path[i + 1:]

            scriptdir = self.translate_path(nextdir)
            if os.path.isdir(scriptdir):
                dir, rest = nextdir, nextrest
                i = path.find('/', len(dir) + 1)
            else:
                break

        # find an explicit query string, if present.
        rest, _, query = rest.partition('?')

        # dissect the part after the directory name into a script name &
        # a possible additional path, to be stored in PATH_INFO.
        i = rest.find('/')
        if i >= 0:
            script, rest = rest[:i], rest[i:]
        else:
            script, rest = rest, ''

        scriptname = dir + '/' + script
        scriptfile = self.translate_path(scriptname)
        if not os.path.exists(scriptfile):
            self.send_error(HTTPStatus.NOT_FOUND,
                            "No such CGI script (%r)" % scriptname)
            return
        if not os.path.isfile(scriptfile):
            self.send_error(HTTPStatus.FORBIDDEN,
                            "CGI script is not a plain file (%r)" % scriptname)
            return
        ispy = self.is_python(scriptname)
        if self.have_fork or not ispy:
            if not self.is_executable(scriptfile):
                self.send_error(
                    HTTPStatus.FORBIDDEN,
                    "CGI script is not executable (%r)" % scriptname)
                return

        # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
        # XXX Much of the following could be prepared ahead of time!
        env = copy.deepcopy(os.environ)
        env['SERVER_SOFTWARE'] = self.version_string()
        env['SERVER_NAME'] = self.server.server_name
        env['GATEWAY_INTERFACE'] = 'CGI/1.1'
        env['SERVER_PROTOCOL'] = self.protocol_version
        env['SERVER_PORT'] = str(self.server.server_port)
        env['REQUEST_METHOD'] = self.command
        uqrest = urllib.parse.unquote(rest)
        env['PATH_INFO'] = uqrest
        env['PATH_TRANSLATED'] = self.translate_path(uqrest)
        env['SCRIPT_NAME'] = scriptname
        env['QUERY_STRING'] = query
        env['REMOTE_ADDR'] = self.client_address[0]
        authorization = self.headers.get("authorization")
        if authorization:
            authorization = authorization.split()
            if len(authorization) == 2:
                import base64, binascii
                env['AUTH_TYPE'] = authorization[0]
                if authorization[0].lower() == "basic":
                    try:
                        authorization = authorization[1].encode('ascii')
                        authorization = base64.decodebytes(authorization).\
                                        decode('ascii')
                    except (binascii.Error, UnicodeError):
                        pass
                    else:
                        authorization = authorization.split(':')
                        if len(authorization) == 2:
                            env['REMOTE_USER'] = authorization[0]
        # XXX REMOTE_IDENT
        if self.headers.get('content-type') is None:
            env['CONTENT_TYPE'] = self.headers.get_content_type()
        else:
            env['CONTENT_TYPE'] = self.headers['content-type']
        length = self.headers.get('content-length')
        if length:
            env['CONTENT_LENGTH'] = length
        referer = self.headers.get('referer')
        if referer:
            env['HTTP_REFERER'] = referer
        accept = self.headers.get_all('accept', ())
        env['HTTP_ACCEPT'] = ','.join(accept)
        ua = self.headers.get('user-agent')
        if ua:
            env['HTTP_USER_AGENT'] = ua
        co = filter(None, self.headers.get_all('cookie', []))
        cookie_str = ', '.join(co)
        if cookie_str:
            env['HTTP_COOKIE'] = cookie_str
        # XXX Other HTTP_* headers
        # Since we're setting the env in the parent, provide empty
        # values to override previously set values
        for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
                  'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
            env.setdefault(k, "")

        self.send_response(HTTPStatus.OK, "Script output follows")
        self.flush_headers()

        decoded_query = query.replace('+', ' ')

        if self.have_fork:
            # Unix -- fork as we should
            args = [script]
            if '=' not in decoded_query:
                args.append(decoded_query)
            nobody = nobody_uid()
            self.wfile.flush()  # Always flush before forking
            pid = os.fork()
            if pid != 0:
                # Parent
                pid, sts = os.waitpid(pid, 0)
                # throw away additional data [see bug #427345]
                while select.select([self.rfile], [], [], 0)[0]:
                    if not self.rfile.read(1):
                        break
                exitcode = os.waitstatus_to_exitcode(sts)
                if exitcode:
                    self.log_error(f"CGI script exit code {exitcode}")
                return
            # Child
            try:
                try:
                    os.setuid(nobody)
                except OSError:
                    pass
                os.dup2(self.rfile.fileno(), 0)
                os.dup2(self.wfile.fileno(), 1)
                os.execve(scriptfile, args, env)
            except:
                self.server.handle_error(self.request, self.client_address)
                os._exit(127)

        else:
            # Non-Unix -- use subprocess
            import subprocess
            cmdline = [scriptfile]
            if self.is_python(scriptfile):
                interp = sys.executable
                if interp.lower().endswith("w.exe"):
                    # On Windows, use python.exe, not pythonw.exe
                    interp = interp[:-5] + interp[-4:]
                cmdline = [interp, '-u'] + cmdline
            if '=' not in query:
                cmdline.append(query)
            self.log_message("command: %s", subprocess.list2cmdline(cmdline))
            try:
                nbytes = int(length)
            except (TypeError, ValueError):
                nbytes = 0
            p = subprocess.Popen(cmdline,
                                 stdin=subprocess.PIPE,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 env=env)
            if self.command.lower() == "post" and nbytes > 0:
                data = self.rfile.read(nbytes)
            else:
                data = None
            # throw away additional data [see bug #427345]
            while select.select([self.rfile._sock], [], [], 0)[0]:
                if not self.rfile._sock.recv(1):
                    break
            stdout, stderr = p.communicate(data)
            self.wfile.write(stdout)
            if stderr:
                self.log_error('%s', stderr)
            p.stderr.close()
            p.stdout.close()
            status = p.returncode
            if status:
                self.log_error("CGI script exit status %#x", status)
            else:
                self.log_message("CGI script exited OK")
def childdeath(signum, frame):
    os.waitpid(-1, os.WNOHANG)
Exemplo n.º 44
0
def runCommand(id, cmd, stdin, logger, retry_rounds=None):
    """
    Run command in non-blocking manner.
    """
    if retry_rounds is None:
        retry_rounds = 8
    # run the command
    child = popen2.Popen3(cmd, True)
    logger.log(
        logger.DEBUG, "<%d> Running command '%s', pid %d. (rounds=%d)" %
        (id, cmd, child.pid, retry_rounds))
    if (stdin):
        child.tochild.write(stdin)
    child.tochild.close()
    outfile = child.fromchild
    outfd = outfile.fileno()
    errfile = child.childerr
    errfd = errfile.fileno()
    makeNonBlocking(outfd)
    makeNonBlocking(errfd)
    outdata = errdata = ''
    outeof = erreof = 0
    for round in range(retry_rounds):
        # wait for input at most 1 second
        ready = select.select([outfd, errfd], [], [], 1.0)
        if outfd in ready[0]:
            outchunk = outfile.read()
            if outchunk == '':
                outeof = 1
            else:
                outdata += outchunk
        if errfd in ready[0]:
            errchunk = errfile.read()
            if errchunk == '':
                erreof = 1
            else:
                errdata += errchunk
        if outeof and erreof: break
        logger.log(
            logger.WARNING, "<%d> Output of command not ready, "
            "waiting (round %d)" % (id, round))
        time.sleep(0.3)  # give a little time for buffers to fill

    child.fromchild.close()
    child.childerr.close()

    status = os.waitpid(child.pid, os.WNOHANG)

    if status[0] == 0:
        time.sleep(1)
        logger.log(logger.WARNING,
                   "<%d> Child doesn't want to exit, TERM signal sent." % (id))
        os.kill(child.pid, signal.SIGTERM)
        time.sleep(1.2)  # time to exit
        status = os.waitpid(child.pid, os.WNOHANG)

        if status[0] == 0:
            logger.log(
                logger.WARNING,
                "<%d> Child doesn't want to die, KILL signal sent." % (id))
            os.kill(child.pid, signal.SIGKILL)
            time.sleep(1.2)  # time to exit
            status = os.waitpid(child.pid, os.WNOHANG)

    stat = 2  # by default assume error
    if outeof and erreof and (status[0] == child.pid) and os.WIFEXITED(
            status[1]):
        stat = os.WEXITSTATUS(status[1])

    return stat, outdata, errdata
Exemplo n.º 45
0
 def _IsEmuStillRunning(self):
     if not self._emu_process:
         return False
     return os.waitpid(self._emu_process.pid, os.WNOHANG)[0] == 0
Exemplo n.º 46
0
    def execute_command(self, cmd, daemonize=False):

        # Most things don't need to be daemonized
        if not daemonize:
            return self.module.run_command(cmd)

        # This is complex because daemonization is hard for people.
        # What we do is daemonize a part of this module, the daemon runs the
        # command, picks up the return code and output, and returns it to the
        # main process.
        pipe = os.pipe()
        pid = os.fork()
        if pid == 0:
            os.close(pipe[0])
            # Set stdin/stdout/stderr to /dev/null
            fd = os.open(os.devnull, os.O_RDWR)
            if fd != 0:
                os.dup2(fd, 0)
            if fd != 1:
                os.dup2(fd, 1)
            if fd != 2:
                os.dup2(fd, 2)
            if fd not in (0, 1, 2):
                os.close(fd)

            # Make us a daemon. Yes, that's all it takes.
            pid = os.fork()
            if pid > 0:
                os._exit(0)
            os.setsid()
            os.chdir("/")
            pid = os.fork()
            if pid > 0:
                os._exit(0)

            # Start the command
            if isinstance(cmd, (text_type, binary_type)):
                cmd = shlex.split(cmd)
            p = subprocess.Popen(cmd,
                                 shell=False,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 preexec_fn=lambda: os.close(pipe[1]))
            stdout = ""
            stderr = ""
            fds = [p.stdout, p.stderr]
            # Wait for all output, or until the main process is dead and its output is done.
            while fds:
                rfd, wfd, efd = select.select(fds, [], fds, 1)
                if not (rfd + wfd + efd) and p.poll() is not None:
                    break
                if p.stdout in rfd:
                    dat = os.read(p.stdout.fileno(), 4096)
                    if not dat:
                        fds.remove(p.stdout)
                    stdout += dat
                if p.stderr in rfd:
                    dat = os.read(p.stderr.fileno(), 4096)
                    if not dat:
                        fds.remove(p.stderr)
                    stderr += dat
            p.wait()
            # Return a JSON blob to parent
            os.write(pipe[1], json.dumps([p.returncode, stdout, stderr]))
            os.close(pipe[1])
            os._exit(0)
        elif pid == -1:
            self.module.fail_json(msg="unable to fork")
        else:
            os.close(pipe[1])
            os.waitpid(pid, 0)
            # Wait for data from daemon process and process it.
            data = ""
            while True:
                rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
                if pipe[0] in rfd:
                    dat = os.read(pipe[0], 4096)
                    if not dat:
                        break
                    data += dat
            return json.loads(data)
Exemplo n.º 47
0
 def wait_done(self):
     [child] = self.active_children
     os.waitpid(child, 0)
     self.active_children.clear()
Exemplo n.º 48
0
def main(args):
    opts, args = getopt.getopt(args, 'zd:n:Ds:LMt:U')
    s = None
    compress = None
    data = sys.argv[0]
    nrep = 5
    minimize = 0
    detailed = 1
    cache = None
    domain = 'AF_INET'
    threads = 1
    for o, v in opts:
        if o == '-n': nrep = int(v)
        elif o == '-d': data = v
        elif o == '-s': s = v
        elif o == '-z':
            import zlib
            compress = zlib.compress
        elif o == '-L':
            minimize = 1
        elif o == '-M':
            detailed = 0
        elif o == '-D':
            global debug
            os.environ['STUPID_LOG_FILE'] = ''
            os.environ['STUPID_LOG_SEVERITY'] = '-999'
            debug = 1
        elif o == '-C':
            cache = 'speed'
        elif o == '-U':
            domain = 'AF_UNIX'
        elif o == '-t':
            threads = int(v)

    zeo_pipe = None
    if s:
        s = __import__(s, globals(), globals(), ('__doc__', ))
        s = s.Storage
        server = None
    else:
        s, server, pid = forker.start_zeo("FileStorage", (fs_name, 1),
                                          domain=domain)

    data = open(data).read()
    db = ZODB.DB(
        s,
        # disable cache deactivation
        cache_size=4000,
        cache_deactivate_after=6000,
    )

    print "Beginning work..."
    results = {1: [], 10: [], 100: [], 1000: []}
    if threads > 1:
        import threading
        l = []
        for i in range(threads):
            t = threading.Thread(target=work,
                                 args=(db, results, nrep, compress, data,
                                       detailed, minimize, i))
            l.append(t)
        for t in l:
            t.start()
        for t in l:
            t.join()

    else:
        work(db, results, nrep, compress, data, detailed, minimize)

    if server is not None:
        server.close()
        os.waitpid(pid, 0)

    if detailed:
        print '-' * 24
    print "num\tmean\tmin\tmax"
    for r in 1, 10, 100, 1000:
        times = []
        for time, conf in results[r]:
            times.append(time)
        t = mean(times)
        print "%d\t%.4f\t%.4f\t%.4f" % (r, t, min(times), max(times))
Exemplo n.º 49
0
    def test_server_starvation(self, sendloops=15):
        recvsize = 2 * min_buf_size()
        sendsize = 10000 * recvsize

        results = [[] for i in xrange(5)]

        listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        listener.bind(('127.0.0.1', 0))
        port = listener.getsockname()[1]
        listener.listen(50)

        base_time = time.time()

        def server(my_results):
            (sock, addr) = listener.accept()

            datasize = 0

            t1 = None
            t2 = None
            try:
                while True:
                    data = sock.recv(recvsize)
                    if not t1:
                        t1 = time.time() - base_time
                    if not data:
                        t2 = time.time() - base_time
                        my_results.append(datasize)
                        my_results.append((t1, t2))
                        break
                    datasize += len(data)
            finally:
                sock.close()

        def client():
            pid = os.fork()
            if pid:
                return pid

            client = _orig_sock.socket(socket.AF_INET, socket.SOCK_STREAM)
            client.connect(('127.0.0.1', port))

            bufsized(client, size=sendsize)

            for i in range(sendloops):
                client.sendall(s2b('*') * sendsize)
            client.close()
            os._exit(0)

        clients = []
        servers = []
        for r in results:
            servers.append(spawn(server, r))
        for r in results:
            clients.append(client())

        for s in servers:
            s.wait()
        for c in clients:
            os.waitpid(c, 0)

        listener.close()

        # now test that all of the server receive intervals overlap, and
        # that there were no errors.
        for r in results:
            assert len(r) == 2, "length is %d not 2!: %s\n%s" % (len(r), r,
                                                                 results)
            assert r[0] == sendsize * sendloops
            assert len(r[1]) == 2
            assert r[1][0] is not None
            assert r[1][1] is not None

        starttimes = sorted(r[1][0] for r in results)
        endtimes = sorted(r[1][1] for r in results)
        runlengths = sorted(r[1][1] - r[1][0] for r in results)

        # assert that the last task started before the first task ended
        # (our no-starvation condition)
        assert starttimes[-1] < endtimes[
            0], "Not overlapping: starts %s ends %s" % (starttimes, endtimes)

        maxstartdiff = starttimes[-1] - starttimes[0]

        assert maxstartdiff * 2 < runlengths[
            0], "Largest difference in starting times more than twice the shortest running time!"
        assert runlengths[0] * 2 > runlengths[
            -1], "Longest runtime more than twice as long as shortest!"
Exemplo n.º 50
0
def main():
    if len(sys.argv) < 4:
        print("[CLAM Dispatcher] ERROR: Invalid syntax, use clamdispatcher.py [pythonpath] settingsmodule projectdir cmd arg1 arg2 ... got: " + " ".join(sys.argv[1:]), file=sys.stderr)
        with open('.done','w') as f:
            f.write(str(1))
        if os.path.exists('.pid'): os.unlink('.pid')
        return 1

    offset = 0
    if '/' in sys.argv[1]:
        #os.environ['PYTHONPATH'] = sys.argv[1]
        for path in sys.argv[1].split(':'):
            print("[CLAM Dispatcher] Adding to PYTHONPATH: " + path, file=sys.stderr)
            sys.path.append(path)
        offset = 1

    settingsmodule = sys.argv[1+offset]
    projectdir = sys.argv[2+offset]
    if projectdir == 'NONE': #Actions
        tmpdir = None
        projectdir = None
    elif projectdir.startswith('tmp://'): #Used for actions with a temporary dir
        tmpdir = projectdir[6:]
        projectdir = None
    else:
        if projectdir[-1] != '/':
            projectdir += '/'
        tmpdir = os.path.join(projectdir,'tmp')

    print("[CLAM Dispatcher] Started CLAM Dispatcher v" + str(VERSION) + " with " + settingsmodule + " (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)

    cmd = sys.argv[3+offset]
    cmd = clam.common.data.unescapeshelloperators(cmd) #shell operators like pipes and redirects were passed in an escaped form
    for arg in sys.argv[4+offset:]:
        arg_u = clam.common.data.unescapeshelloperators(arg)
        if arg_u != arg:
            cmd += " " + arg_u #shell operator (pipe or something)
        else:
            cmd += " " + clam.common.data.shellsafe(arg,'"')


    if not cmd:
        print("[CLAM Dispatcher] FATAL ERROR: No command specified!", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
            if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')
        return 1
    elif projectdir and not os.path.isdir(projectdir):
        print("[CLAM Dispatcher] FATAL ERROR: Project directory "+ projectdir + " does not exist", file=sys.stderr)
        return 1

    try:
        #exec("import " + settingsmodule + " as settings")
        settings = __import__(settingsmodule , globals(), locals(),0)
        try:
            if settings.CUSTOM_FORMATS:
                clam.common.data.CUSTOM_FORMATS = settings.CUSTOM_FORMATS
                print("[CLAM Dispatcher] Dependency injection for custom formats succeeded", file=sys.stderr)
        except AttributeError:
            pass
    except ImportError as e:
        print("[CLAM Dispatcher] FATAL ERROR: Unable to import settings module, settingsmodule is " + settingsmodule + ", error: " + str(e), file=sys.stderr)
        print("[CLAM Dispatcher]      hint: If you're using the development server, check you pass the path your service configuration file is in using the -P flag. For Apache integration, verify you add this path to your PYTHONPATH (can be done from the WSGI script)", file=sys.stderr)
        if projectdir:
            f = open(projectdir + '.done','w')
            f.write(str(1))
            f.close()
        return 1

    settingkeys = dir(settings)
    if not 'DISPATCHER_POLLINTERVAL' in settingkeys:
        settings.DISPATCHER_POLLINTERVAL = 30
    if not 'DISPATCHER_MAXRESMEM' in settingkeys:
        settings.DISPATCHER_MAXRESMEM = 0
    if not 'DISPATCHER_MAXTIME' in settingkeys:
        settings.DISPATCHER_MAXTIME = 0


    try:
        print("[CLAM Dispatcher] Running " + cmd, file=sys.stderr)
    except (UnicodeDecodeError, UnicodeError, UnicodeEncodeError):
        print("[CLAM Dispatcher] Running " + repr(cmd), file=sys.stderr) #unicode-issues on Python 2

    if projectdir:
        process = subprocess.Popen(cmd,cwd=projectdir, shell=True, stderr=sys.stderr)
    else:
        process = subprocess.Popen(cmd, shell=True, stderr=sys.stderr)
    begintime = datetime.datetime.now()
    if process:
        pid = process.pid
        print("[CLAM Dispatcher] Running with pid " + str(pid) + " (" + begintime.strftime('%Y-%m-%d %H:%M:%S') + ")", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.pid','w') as f:
                f.write(str(pid))
    else:
        print("[CLAM Dispatcher] Unable to launch process", file=sys.stderr)
        sys.stderr.flush()
        if projectdir:
            with open(projectdir + '.done','w') as f:
                f.write(str(1))
        return 1

    #intervalf = lambda s: min(s/10.0, 15)
    abort = False
    idle = 0
    done = False
    lastpolltime = datetime.datetime.now()
    lastabortchecktime = datetime.datetime.now()

    while not done:
        d = total_seconds(datetime.datetime.now() - begintime)
        try:
            returnedpid, statuscode = os.waitpid(pid, os.WNOHANG)
            if returnedpid != 0:
                print("[CLAM Dispatcher] Process ended (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s) ", file=sys.stderr)
                done = True
        except OSError: #no such process
            print("[CLAM Dispatcher] Process lost! (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ", " + str(d)+"s)", file=sys.stderr)
            statuscode = 1
            done = True

        if done:
            break

        if total_seconds(datetime.datetime.now() - lastabortchecktime) >= min(10, d* 0.5):  #every 10 seconds, faster at beginning
            if projectdir and os.path.exists(projectdir + '.abort'):
                abort = True
            if abort:
                print("[CLAM Dispatcher] ABORTING PROCESS ON SIGNAL! (" + str(d)+"s)", file=sys.stderr)
                os.system("sleep 30 && kill -9 " + str(pid) + " &") #deathtrap in case the process doesn't listen within 30 seconds
                os.kill(pid, signal.SIGTERM)
                os.waitpid(pid, 0)
                if projectdir:
                    os.unlink(projectdir + '.abort')
                    open(projectdir + '.aborted','w')
                    f.close()
                done = True
                break
            lastabortchecktime = datetime.datetime.now()


        if d <= 1:
            idle += 0.05
            time.sleep(0.05)
        elif d <= 2:
            idle += 0.2
            time.sleep(0.2)
        elif d <= 10:
            idle += 0.5
            time.sleep(0.5)
        else:
            idle += 1
            time.sleep(1)

        if settings.DISPATCHER_MAXRESMEM > 0 and total_seconds(datetime.datetime.now() - lastpolltime) >= settings.DISPATCHER_POLLINTERVAL:
            resmem = mem(pid)
            if resmem > settings.DISPATCHER_MAXRESMEM * 1024:
                print("[CLAM Dispatcher] PROCESS EXCEEDS MAXIMUM RESIDENT MEMORY USAGE (" + str(resmem) + ' >= ' + str(settings.DISPATCHER_MAXRESMEM) + ')... ABORTING', file=sys.stderr)
                abort = True
                statuscode = 2
            lastpolltime = datetime.datetime.now()
        elif settings.DISPATCHER_MAXTIME > 0 and d > settings.DISPATCHER_MAXTIME:
            print("[CLAM Dispatcher] PROCESS TIMED OUT.. NO COMPLETION WITHIN " + str(d) + " SECONDS ... ABORTING", file=sys.stderr)
            abort = True
            statuscode = 3

    if projectdir:
        with open(projectdir + '.done','w') as f:
            f.write(str(statuscode))
        if os.path.exists(projectdir + '.pid'): os.unlink(projectdir + '.pid')

        #update project index cache
        print("[CLAM Dispatcher] Updating project index", file=sys.stderr)
        updateindex(projectdir)


    if tmpdir and os.path.exists(tmpdir):
        print("[CLAM Dispatcher] Removing temporary files", file=sys.stderr)
        for filename in os.listdir(tmpdir):
            filepath = os.path.join(tmpdir,filename)
            try:
                if os.path.isdir(filepath):
                    shutil.rmtree(filepath)
                else:
                    os.unlink(filepath)
            except: #pylint: disable=bare-except
                print("[CLAM Dispatcher] Unable to remove " + filename, file=sys.stderr)

    d = total_seconds(datetime.datetime.now() - begintime)
    if statuscode > 127:
        print("[CLAM Dispatcher] Status code out of range (" + str(statuscode) + "), setting to 127", file=sys.stderr)
        statuscode = 127
    print("[CLAM Dispatcher] Finished (" + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "), exit code " + str(statuscode) + ", dispatcher wait time " + str(idle)  + "s, duration " + str(d) + "s", file=sys.stderr)

    return statuscode
Exemplo n.º 51
0
 def _on_sigchld(self, _signum, _frame):
     for pid, callback in self.callback_by_pid.items():
         pid, status = os.waitpid(pid, os.WNOHANG)
         if pid:
             callback(status)
             del self.callback_by_pid[pid]
Exemplo n.º 52
0
def callback(update: Update, context: CallbackContext):
    conn = sqlite3.connect('data/DMI_DB.db')
    keyboard2 = [[]]
    icona = ""
    number_row = 0
    number_array = 0

    update.callback_query.data = update.callback_query.data.replace(
        "Drive_", "")

    if len(update.callback_query.data) < 13:  # "Accetta" (/request command)

        array_value = update['callback_query']['message']['text'].split(" ")

        try:
            if len(array_value) == 5:
                conn.execute(
                    "INSERT INTO 'Chat_id_List' VALUES (?, ?, ?, ?, ?)",
                    (update.callback_query.data, array_value[4],
                     array_value[1], array_value[2], array_value[3]))
                context.bot.sendMessage(
                    chat_id=update.callback_query.data,
                    text=
                    "🔓 La tua richiesta è stata accettata. Leggi il file README"
                )
                context.bot.sendDocument(chat_id=update.callback_query.data,
                                         document=open('data/README.pdf',
                                                       'rb'))

                request_elimination_text = "Richiesta di " + str(
                    array_value[1]) + " " + str(array_value[2]) + " estinta"
                context.bot.editMessageText(
                    text=request_elimination_text,
                    chat_id=config_map['dev_group_chatid'],
                    message_id=update.callback_query.message.message_id)

                context.bot.sendMessage(
                    chat_id=config_map['dev_group_chatid'],
                    text=str(array_value[1]) + " " +
                    str(array_value[2] +
                        str(" è stato inserito nel database")))
            elif len(array_value) == 4:
                conn.execute(
                    "INSERT INTO 'Chat_id_List'('Chat_id','Nome','Cognome','Email') VALUES (?, ?, ?, ?)",
                    (update.callback_query.data, array_value[1],
                     array_value[2], array_value[3]))
                context.bot.sendMessage(
                    chat_id=update.callback_query.data,
                    text=
                    "🔓 La tua richiesta è stata accettata. Leggi il file README"
                )
                context.bot.sendDocument(chat_id=update.callback_query.data,
                                         document=open('data/README.pdf',
                                                       'rb'))

                request_elimination_text = "Richiesta di " + str(
                    array_value[1]) + " " + str(array_value[2]) + " estinta"
                context.bot.editMessageText(
                    text=request_elimination_text,
                    chat_id=config_map['dev_group_chatid'],
                    message_id=update.callback_query.message.message_id)
            else:
                context.bot.sendMessage(
                    chat_id=config_map['dev_group_chatid'],
                    text=str("ERRORE INSERIMENTO: ") +
                    str(update['callback_query']['message']['text']) + " " +
                    str(update['callback_query']['data']))
            conn.commit()
        except Exception as error:
            print(error)
            context.bot.sendMessage(
                chat_id=config_map['dev_group_chatid'],
                text=str("ERRORE INSERIMENTO: ") +
                str(update['callback_query']['message']['text']) + " " +
                str(update['callback_query']['data']))

        text = ""

    else:
        pid = os.fork()
        if (pid == 0):
            settings_file = "config/settings.yaml"
            gauth2 = GoogleAuth(settings_file=settings_file)
            gauth2.CommandLineAuth()
            # gauth2.LocalWebserverAuth()
            drive2 = GoogleDrive(gauth2)
            bot2 = telegram.Bot(TOKEN)

            file1 = drive2.CreateFile({'id': update.callback_query.data})
            file1.FetchMetadata()
            if file1['mimeType'] == "application/vnd.google-apps.folder":
                file_list2 = None

                try:
                    istance_file = drive2.ListFile({
                        'q':
                        "'" + file1['id'] + "' in parents and trashed=false",
                        'orderBy': 'folder,title'
                    })
                    file_list2 = istance_file.GetList()
                    with open("./logs/debugDrive.txt", "a") as debugfile:
                        debugfile.write("- Log time:\n {}".format(
                            datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                        debugfile.write("- File:\n {}".format(
                            str(json.dumps(file1))))
                        debugfile.write("- IstanceFile:\n {}".format(
                            str(json.dumps(istance_file))))
                        debugfile.write("- FileList:\n {}".format(
                            str(json.dumps(file_list2))))
                        debugfile.write("\n------------\n")
                except Exception as e:
                    with open("./logs/debugDrive.txt", "a") as debugfile:
                        debugfile.write("- Log time:\n {}".format(
                            datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
                        debugfile.write("- Error:\n {}".format(e))
                        debugfile.write("\n------------\n")
                    print("- Drive error: {}".format(e))
                    bot2.sendMessage(
                        chat_id=update['callback_query']['from_user']['id'],
                        text=
                        "Si è verificato un errore, ci scusiamo per il disagio. Contatta i devs. /help"
                    )
                    sys.exit(0)

                formats = {
                    **{
                        "pdf": "📕 "
                    },
                    **dict.fromkeys([' a', 'b', 'c'], 10),
                    **dict.fromkeys(["doc", "docx", "txt"], "📘 "),
                    **dict.fromkeys(["jpg", "png", "gif"], "📷 "),
                    **dict.fromkeys(["rar", "zip"], "🗄 "),
                    **dict.fromkeys(["out", "exe"], "⚙ "),
                    **dict.fromkeys([
                        "c", "cpp", "h", "py", "java", "js", "html", "php"
                    ], "💻 ")
                }

                for file2 in file_list2:
                    file2.FetchMetadata()
                    if file2[
                            'mimeType'] == "application/vnd.google-apps.folder":
                        if number_row >= 1:
                            keyboard2.append([
                                InlineKeyboardButton("🗂 " + file2['title'],
                                                     callback_data="Drive_" +
                                                     file2['id'])
                            ])
                            number_row = 0
                            number_array += 1
                        else:
                            keyboard2[number_array].append(
                                InlineKeyboardButton("🗂 " + file2['title'],
                                                     callback_data="Drive_" +
                                                     file2['id']))
                            number_row += 1
                    else:
                        file_format = file2['title'][
                            -5:]  # get last 5 characters of strings
                        file_format = file_format.split(
                            ".")  # split file_format per "."
                        file_format = file_format[
                            len(file_format) -
                            1]  # get last element of file_format

                        icona = "📄 "

                        if file_format in formats.keys():
                            icona = formats[file_format]

                        if number_row >= 1:
                            keyboard2.append([
                                InlineKeyboardButton(icona + file2['title'],
                                                     callback_data="Drive_" +
                                                     file2['id'])
                            ])
                            number_row = 0
                            number_array += 1
                        else:
                            keyboard2[number_array].append(
                                InlineKeyboardButton(icona + file2['title'],
                                                     callback_data="Drive_" +
                                                     file2['id']))
                            number_row += 1

                if len(
                        file1['parents']
                ) > 0 and file1['parents'][0]['id'] != '0ADXK_Yx5406vUk9PVA':
                    keyboard2.append([
                        InlineKeyboardButton("🔙",
                                             callback_data="Drive_" +
                                             file1['parents'][0]['id'])
                    ])

                reply_markup3 = InlineKeyboardMarkup(keyboard2)
                bot2.sendMessage(
                    chat_id=update['callback_query']['from_user']['id'],
                    text=file1['title'] + ":",
                    reply_markup=reply_markup3)

            elif file1['mimeType'] == "application/vnd.google-apps.document":
                bot2.sendMessage(
                    chat_id=update['callback_query']['from_user']['id'],
                    text=
                    "Impossibile scaricare questo file poichè esso è un google document, Andare sul seguente link"
                )
                bot2.sendMessage(
                    chat_id=update['callback_query']['from_user']['id'],
                    text=file1['exportLinks']['application/pdf'])

            else:
                try:
                    file_d = drive2.CreateFile({'id': file1['id']})
                    file_d.FetchMetadata()
                    if int(file_d['fileSize']) < 5e+7:
                        file_d.GetContentFile('file/' + file1['title'])
                        file_s = file1['title']
                        filex = open(str("file/" + file_s), "rb")
                        bot2.sendChatAction(chat_id=update['callback_query']
                                            ['from_user']['id'],
                                            action="UPLOAD_DOCUMENT")
                        bot2.sendDocument(chat_id=update['callback_query']
                                          ['from_user']['id'],
                                          document=filex)
                        os.remove(str("file/" + file_s))
                    else:
                        bot2.sendMessage(
                            chat_id=update['callback_query']['from_user']
                            ['id'],
                            text=
                            "File troppo grande per il download diretto, scarica dal seguente link"
                        )
                        # file_d['downloadUrl']
                        bot2.sendMessage(chat_id=update['callback_query']
                                         ['from_user']['id'],
                                         text=file_d['alternateLink'])
                except Exception as e:
                    print("- Drive error: {}".format(e))
                    bot2.sendMessage(
                        chat_id=update['callback_query']['from_user']['id'],
                        text=
                        "Impossibile scaricare questo file, contattare gli sviluppatori del bot"
                    )
                    open("logs/errors.txt",
                         "a+").write(str(e) + str(file_d['title']) + "\n")

            sys.exit(0)

        os.waitpid(pid, 0)
    conn.close()
Exemplo n.º 53
0
 def kill(self):
     # Killing pid1 will kill every other process in the context
     # The context itself will implode without any references,
     # basically cleaning up everything
     os.kill(self.pid, signal.SIGKILL)
     os.waitpid(self.pid, 0)
Exemplo n.º 54
0
                s = os.read(m2, 1024)
                outbuf += s
            if m1 in sel[1]:
                n = os.write(m1, outbuf)
                outbuf = outbuf[n:]
        except OSError, e:
            if e.errno == errno.EIO:
                pass
        if r in sel[0]:
            s = os.read(r, 128)
            ret = ret + s
            if len(s) == 0:
                break
    del inbuf
    del outbuf
    os.waitpid(child, 0)
    os.close(r)
    os.close(m2)
    os.close(m1)
    if os.uname()[0] == 'SunOS' or os.uname()[0] == 'NetBSD':
        os.close(s1)
    os.unlink(fifo)

    # Re-acquire the lock to cover the changes we're about to make
    # when we return to domain creation.
    domains.domains_lock.acquire()

    if dom.bootloader_pid is None:
        msg = "Domain was died while the bootloader was running."
        log.error(msg)
        raise VmError, msg
Exemplo n.º 55
0
import os
import time

ret_val = os.fork()

if ret_val:
    print('父进程')
    result = os.waitpid(-1, 0)  # 挂起父进程,直到处理完子进程
    print(result)  # (子进程pid, 0)
    time.sleep(5)
else:
    print('子进程')
    time.sleep(10)
    print('child done')

# watch -n1 ps a   # 观察进程状态
Exemplo n.º 56
0
            not environ.has_key("CMSSW_BASE")) or (
                not environ.has_key("SCRAM_ARCH")):
        print "ERROR: Unable to file the release environment, please make sure you have set the cmssw environment before calling this script"
        exit(1)

    thrds = cmsRunProcessCount
    cmssw_ver = environ["CMSSW_VERSION"]
    arch = environ["SCRAM_ARCH"]
    cmssw_base = environ["CMSSW_BASE"]
    logger = LogUpdater(dirIn=cmssw_base)

    if re.match("^CMSSW_(9_([3-9]|[1-9][0-9]+)|[1-9][0-9]+)_.*$", cmssw_ver):
        p = Popen("%s/jobs/create-relval-jobs.py %s" %
                  (SCRIPT_DIR, opts.workflow),
                  shell=True)
        e = waitpid(p.pid, 0)[1]
        if e: exit(e)

        p = None
        if cmssw_ver.find('_CLANG_') is not -1:
            p = Popen(
                "python %s/rv_scheduler/relval_main.py -a %s -r %s -d 7" %
                (SCRIPT_DIR, arch, cmssw_ver.rsplit('_', 1)[0]),
                shell=True)
        else:
            p = Popen(
                "cd %s/pyRelval ; %s/jobs/jobscheduler.py -M 0 -c 175 -m 85 -o time"
                % (cmssw_base, SCRIPT_DIR),
                shell=True)

        e = waitpid(p.pid, 0)[1]
Exemplo n.º 57
0
import os
import time
def add(num=1000):
    sum=0
    for i in range(1,num+1):
        sum += i
    print(sum)

if __name__ == '__main__':
    start=time.time()

    for i in [100000000,50000000,20000000,225]:
        pid = os.fork()
        if not pid:
            add(i)
            exit()
    for i in range(4):
        os.waitpid(-1,0)

    end = time.time()
    print(end - start)



Exemplo n.º 58
0
            if a[j][0] < a[i][0]:
                a[j][0] = a[i][0]
    print("failed, i = %d, %s" % (_, str(map(lambda x: x[1], a))))


if __name__ == '__main__':
    children = []
    for _ in range(4):
        # use as many children as our cpu cores to speedup
        pid = os.fork()
        if pid == 0:
            random.seed()  # otherwise all children will end up with the same a
            t1 = time.time()
            calc(100 * 1000 * 1000, 10)
            t2 = time.time()
            print("time cost:", t2 - t1)
            sys.exit(0)
        else:
            children.append(pid)

    try:
        try:
            while True:
                print(os.waitpid(-1, 0))
        except OSError:
            print("done")
    except KeyboardInterrupt:
        # kill all children or they become zombies that eat a lot cpu
        for pid in children:
            os.kill(pid, signal.SIGKILL)
Exemplo n.º 59
0
            self.pid = self.vm.gatherDom(('image/device-model-pid', int))
            log.debug("%s device model rediscovered, pid %s sentinel fifo %s",
                      name, self.pid, sentinel_path_fifo)
            self.sentinel_thread = thread.start_new_thread(
                self._sentinel_watch, ())

    def _sentinel_watch(self):
        log.info("waiting for sentinel_fifo")
        try:
            self.sentinel_fifo.read(1)
        except OSError, e:
            pass
        self.sentinel_lock.acquire()
        if self.pid:
            try:
                (p, st) = os.waitpid(self.pid, os.WNOHANG)
                if p == self.pid:
                    message = oshelp.waitstatus_description(st)
                else:
                    # obviously it is malfunctioning, kill it now
                    try:
                        os.kill(self.pid, signal.SIGKILL)
                        message = "malfunctioning (closed sentinel), killed"
                    except:
                        message = "malfunctioning or died ?"
                message = "pid %d: %s" % (self.pid, message)
            except Exception, e:
                message = "waitpid failed: %s" % utils.exception_string(e)
            message = "device model failure: %s" % message
            try:
                message += "; see %s " % self.logfile
Exemplo n.º 60
0
def main():
    parser = argparse.ArgumentParser(
        description='unmount a filesystem in all mount namespaces')
    parser.add_argument('source',
                        metavar='SOURCE',
                        help='source (e.g., block device) to unmount')
    args = parser.parse_args()

    success = True

    namespaces = set()
    for dir in os.scandir('/proc'):
        if not dir.name.isdigit():
            continue

        mnt_ns = -1
        pid_ns = -1
        try:
            mnt_ns = os.open(os.path.join(dir.path, 'ns', 'mnt'), os.O_RDONLY)
            mnt_ns_ino = os.fstat(mnt_ns).st_ino
            if mnt_ns_ino in namespaces:
                continue

            pid_ns = os.open(os.path.join(dir.path, 'ns', 'pid'), os.O_RDONLY)
            root = os.readlink(os.path.join(dir.path, 'root'))

            # Add this after we've gotten everything we need from /proc. If it
            # failed before this, it might be because the process we were
            # looking at disappeared, so we should still try again if we find
            # another process in that mount namespace.
            namespaces.add(mnt_ns_ino)

            # setns() with a PID namespace changes the namespace that child
            # processes will be created in.
            setns(pid_ns)

            pid = os.fork()
            if pid:
                wstatus = os.waitpid(pid, 0)[1]
                if not os.WIFEXITED(wstatus) or os.WEXITSTATUS(wstatus) != 0:
                    success = False
            else:
                try:
                    setns(mnt_ns)
                    os.chroot(root)
                    os.chdir('/')
                    for mount in mounts():
                        if mount.source == args.source:
                            umount(mount.mount_point.encode())
                except Exception as e:
                    print(e, file=sys.stderr)
                    os._exit(1)
                else:
                    os._exit(0)
        except OSError as e:
            print(e, file=sys.stderr)
        finally:
            if mnt_ns != -1:
                os.close(mnt_ns)
            if pid_ns != -1:
                os.close(pid_ns)
    sys.exit(0 if success else 1)