Example #1
0
class PooledThread(Resource):
    def __init__(self, name, release):
        Resource.__init__(self, name)
        self._release = release
        self._ready, self._queue = Event(), InterlockedQueue()
        if __name__ == "__main__":
            self._timeout = Timeout(3.0)
        else:
            self._timeout = Timeout(60.0)
        self._count = 0

    def _expired(self):
        return self._timeout.expired or Resource._expired(self)

    def connect(self):
        Resource.connect(self)
        self._thread = LightThread(target=self._thread_proc,
                                   name="{0:s}:?".format(self.name))
        self._thread.start()
        self._ready.wait(
            3.0)  # this may spend waiting slightly less, but it's ok
        if not self._ready.is_set():
            self._queue.push(
                exit)  # just in case the thread has in fact started
            raise Exception("new thread failed to start in 3.0 seconds")

    def _thread_proc(self):
        self._ready.set()
        while True:  # exits upon processing of exit pushed in disconnect()
            try:
                self._count += 1
                thread_name = "{0:s}:{1:d}".format(self.name, self._count)
                current_thread().name = thread_name
                work_unit = self._queue.pop()
                work_unit()
                self._timeout.reset()
            finally:
                self._release(
                    self)  # this actually invokes ThreadPool._release

    # this method may be called by external thread (ex. pool sweep)
    # or by this thread itself, and posts an exit kind of work unit

    def disconnect(self):
        try:
            if current_thread() is not self._thread:
                self._queue.push(exit)
                self._thread.join(3.0)
        finally:
            Resource.disconnect(self)

    # this method is called by the thread pool to post a work unit
    # to this thread, as well as by the thread itself at disconnect

    def push(self, work_unit):
        self._queue.push(work_unit)
Example #2
0
class PooledThread(Resource):

    def __init__(self, name, release):
        Resource.__init__(self, name)
        self._release = release
        self._ready, self._queue = Event(), InterlockedQueue()
        if __name__ == "__main__":
            self._timeout = Timeout(3.0)
        else:
            self._timeout = Timeout(60.0)
        self._count = 0

    def _expired(self):
        return self._timeout.expired or Resource._expired(self)

    def connect(self):
        Resource.connect(self)
        self._thread = LightThread(target = self._thread_proc,
                                   name = "{0:s}:?".format(self.name))
        self._thread.start()
        self._ready.wait(3.0) # this may spend waiting slightly less, but it's ok
        if not self._ready.is_set():
            self._queue.push(exit) # just in case the thread has in fact started
            raise Exception("new thread failed to start in 3.0 seconds")

    def _thread_proc(self):
        self._ready.set()
        while True: # exits upon processing of exit pushed in disconnect()
            try:
                self._count += 1
                thread_name = "{0:s}:{1:d}".format(self.name, self._count)
                current_thread().name = thread_name
                work_unit = self._queue.pop()
                work_unit()
                self._timeout.reset()
            finally:
                self._release(self) # this actually invokes ThreadPool._release

    # this method may be called by external thread (ex. pool sweep)
    # or by this thread itself, and posts an exit kind of work unit

    def disconnect(self):
        try:
            if current_thread() is not self._thread:
                self._queue.push(exit)
                self._thread.join(3.0)
        finally:
            Resource.disconnect(self)

    # this method is called by the thread pool to post a work unit
    # to this thread, as well as by the thread itself at disconnect

    def push(self, work_unit):
        self._queue.push(work_unit)
Example #3
0
def primary_startup(
        node_cage: by_regex("^[A-Za-z0-9_-]{1,32}(\\.[A-Za-z0-9_-]{1,32})?$")):

    if "." in node_cage:  # node name is specified explicitly
        node, cage = node_cage.split(".")
    else:  # node name is taken from the environment
        cage = node_cage
        node = node_name().split(".")[0]

    cage_dir = os_path.join(cages_dir, cage)  # cage directory must exist
    if not os_path.isdir(cage_dir):
        raise Exception("cage directory does not exist")

    logs_dir = os_path.join(
        cage_dir, "logs")  # while logs directory will be created if necessary
    if not os_path.isdir(logs_dir):
        try:
            mkdir(logs_dir)
        except OSError as e:
            if e.errno != EEXIST:
                raise

    # write own pid file, this also serves as a test of the logs directory writability

    with open(os_path.join(logs_dir, "{0:s}.pid".format(cage)), "wb") as f:
        f.write("{0:d}".format(getpid()).encode("ascii"))

    restarting_after_failure = False  # performing normal startup by default

    while True:  # keep starting secondary startup script until it exits successfully

        def drain_stream(stream):
            try:
                while stream.read(512):
                    pass
            except:
                pass  # just exit

        # pass the same arguments to the same script, only prefixing them with dash

        startup_py = popen(python, os_path.join(pmnc_dir,
                                                "startup.py"), "-", node, cage,
                           restarting_after_failure and "FAILURE" or "NORMAL")

        # any output from the secondary script is ignored

        stdout_reader = LightThread(target=drain_stream,
                                    args=(startup_py.stdout, ))
        stdout_reader.start()

        stderr_reader = LightThread(target=drain_stream,
                                    args=(startup_py.stderr, ))
        stderr_reader.start()

        # wait for the secondary script to terminate

        while startup_py.poll() is None:
            try:
                sleep(
                    3.0
                )  # fails with "interrupted system call" at logoff when started as win32 service
            except:
                pass

        stdout_reader.join(3.0)  # should have exited with eof
        stderr_reader.join(3.0)  # or broken pipe, but who knows

        if startup_py.wait() != 0:
            restarting_after_failure = True  # set the flag and restart the secondary script
        else:
            break  # successful exit
Example #4
0
class AdapterHost:

    @typecheck
    def __init__(self, class_name: str, *,
                 java: os_path.isfile,
                 arguments: tuple_of(str),
                 classpath: str,
                 jndi: dict_of(str, str),
                 factory: str,
                 queue: str,
                 username: str,
                 password: str):

        # random line prefix and suffix for packet serialization

        bol = b2a_hex(urandom(8)).decode("ascii").upper()
        eol = b2a_hex(urandom(8)).decode("ascii").upper()

        # compose the executable command line

        self._args = [ java ] + list(arguments) + \
                     [ "-classpath", classpath, class_name,
                       "connection.factory={0:s}".format(factory),
                       "connection.queue={0:s}".format(queue),
                       "stdout.bol={0:s}".format(bol),
                       "stdout.eol={0:s}".format(eol) ]

        if username or password:
            self._args.append("connection.username={0:s}".format(username))
            self._args.append("connection.password={0:s}".format(password))

        self._args.extend("jndi.{0:s}={1:s}".format(*t) for t in jndi.items())

        self._bol_b = bol.encode("ascii")
        self._eol_b = eol.encode("ascii")

        # this set tracks messages that have been processed but
        # not committed on the server due to a failure

        self._processed_messages = set()

    ###################################

    # adapter input from stdin consists of Packet sequence

    def _stdin_writer_proc(self, stdin, stdin_queue):
        try:
            try:
                pkt = stdin_queue.pop()
                while pkt is not None: # this light thread exits upon None in the queue or exception
                    pkt.save_to_stream(stdin, 128)
                    if pkt.get("XPmncResponse") == "COMMIT":
                        message_id = pkt.pop("XPmncMessageID")
                        self._processed_messages.remove(message_id)
                    pkt = stdin_queue.pop()
            finally:
                stdin.close()
        except:
            pmnc.log.error(exc_string()) # log and ignore

    ###################################

    # adapter output from stdout consists of Packet sequence

    def _stdout_reader_proc(self, stdout, stdin_queue):
        try:
            try:
                try:
                    pkt = Packet.load_from_stream(stdout, self._bol_b, self._eol_b)
                    while pkt is not None: # this light thread exits upon eof or exception
                        self._stdout_queue.push(pkt)
                        pkt = Packet.load_from_stream(stdout, self._bol_b, self._eol_b)
                finally:
                    stdout.close()
            finally:
                stdin_queue.push(None) # this releases the associated stdin writer
        except:
            pmnc.log.error(exc_string()) # log and ignore

    ###################################

    # adapter output from stderr is discarded (yet it has to be read)

    def _stderr_reader_proc(self, stderr):
        try:
            try:
                while stderr.read(512): # this light thread exits only upon exception
                    pass
            finally:
                stderr.close()
        except:
            pmnc.log.error(exc_string()) # log and ignore

    ###################################

    # the adapter is considered up until the process exits

    def _adapter_running(self):

        return self._adapter.poll() is None

    ###################################

    # this method starts the adapter process, creates the handling
    # threads and waits for the adapter to report its readiness

    def _start_adapter(self, adapter_usage, adapter_name, start_timeout):

        pmnc.log.info("starting adapter process for {0:s} {1:s}".\
                      format(adapter_usage, adapter_name))

        if pmnc.log.debug:
            pmnc.log.debug("adapter process command line: {0:s}".format(" ".join(self._args)))

        self._adapter = popen(*self._args) # start the java process
        try:

            if pmnc.log.debug:
                pmnc.log.debug("adapter process (pid {0:d}) has started".\
                               format(self._adapter.pid))

            # the process has started and its initialization is underway
            # create light threads for controlling stdin/out/err, these
            # threads are not stopped explicitly but exit when the adapter
            # exits and the pipes break

            self._stdin_queue = InterlockedQueue()
            self._stdin_writer = LightThread(target = self._stdin_writer_proc,
                                             args = (self._adapter.stdin, self._stdin_queue),
                                             name = "{0:s}:stdin".format(adapter_name))
            self._stdin_writer.start()

            self._stdout_queue = InterlockedQueue()
            self._stdout_reader = LightThread(target = self._stdout_reader_proc,
                                              args = (self._adapter.stdout, self._stdin_queue),
                                              name = "{0:s}:stdout".format(adapter_name))
            self._stdout_reader.start()

            self._stderr_queue = InterlockedQueue()
            self._stderr_reader = LightThread(target = self._stderr_reader_proc,
                                              args = (self._adapter.stderr, ),
                                              name = "{0:s}:stderr".format(adapter_name))
            self._stderr_reader.start()

            # wait for the adapter to come up and report readiness

            while not start_timeout.expired:
                pkt = self._stdout_queue.pop(min(0.5, start_timeout.remain))
                if pkt is None: # see whether the adapter has exited
                    if not self._adapter_running():
                        retcode = self._adapter.wait()
                        if retcode is not None:
                            raise Exception("adapter process exited with "
                                            "retcode {0:d}".format(retcode))
                elif "XPmncError" in pkt:
                    raise Exception(pkt["XPmncError"])
                elif pkt.get("XPmncStatus") == "READY":
                    break
                else:
                    raise Exception("adapter process returned invalid status")
            else:
                raise Exception("timeout waiting for adapter process to initialize")

        except:
            self._stop_adapter(start_timeout) # what remained of start timeout is used for stopping
            raise

        pmnc.log.info("adapter process (pid {0:d}) is ready".format(self._adapter.pid))

    ###################################

    # if not exited peacefully, adapter has to be killed

    def _stop_adapter(self, stop_timeout):

        while self._adapter_running() and not stop_timeout.expired:
            sleep(min(0.5, stop_timeout.remain))

        if self._adapter_running():
            pmnc.log.warning("killing runaway adapter process "
                             "(pid {0:d})".format(self._adapter.pid))
            self._adapter.kill()
        else:
            pmnc.log.info("adapter process (pid {0:d}) exited with retcode {1:d}".\
                          format(self._adapter.pid, self._adapter.wait()))