Пример #1
0
    def test_WorkSources_end_work():

        current_thread().stopped = lambda: False

        wss = WorkSources(2, 30.0)
        wss.add_work(1)
        assert wss.begin_work(0.0) == (False, 1)
        wss.add_work(0)
        assert wss.begin_work(0.0) == (False, 0)
        assert wss.begin_work(0.0) == (False, None)
        wss.add_work(1)
        assert wss.begin_work(0.0) == (False, None)

        th_started = Event()
        th_got_work = Event()

        def th_proc():
            th_started.set()
            assert wss.begin_work(10.0) == (False, 1)
            th_got_work.set()

        th = HeavyThread(target=th_proc)
        th.start()
        th_started.wait()
        t = Timeout(30.0)
        wss.end_work(1)
        th_got_work.wait()
        assert t.remain > 29.0, t.remain
        th.stop()
Пример #2
0
def start():
    global _state_thread
    if not _no_bssdb3:
        with _state_thread_lock:
            if not _state_thread:
                _state_thread = HeavyThread(target=_state_thread_proc,
                                            name="state")
                _state_thread.start()
    else:
        pmnc.log.message("module bsddb3 could not be imported, "
                         "persistent state is disabled")
Пример #3
0
    def start(self):  # a separate thread is started to poll each source cage

        self._pollers = []
        for source_cage in self._source_cages:
            poller = HeavyThread(target=self._poller_proc,
                                 args=(source_cage, ),
                                 name="{0:s}:{1:s}".format(
                                     self._name, source_cage))
            self._pollers.append(poller)

        for poller in self._pollers:
            poller.start()
Пример #4
0
    def test_cross_db_deadlock():

        fake_request(30.0)

        db1 = pmnc.state.get_database("db1")
        db2 = pmnc.state.get_queue("db2", re_len=6144)

        def f(txn):
            db1.put(b"key", b"value_1", txn)
            Timeout(3.0).wait()
            db2.append(pickle("item_1"), txn)

        def g(txn):
            db2.append(pickle("item_2"), txn)
            Timeout(3.0).wait()
            db1.put(b"key", b"value_2", txn)

        th_f = HeavyThread(target=lambda: pmnc.state.implicit_transaction(f))
        th_g = HeavyThread(target=lambda: pmnc.state.implicit_transaction(g))

        th_f.start()
        th_g.start()

        th_f.join()
        th_g.join()

        # now see what went through

        def fetch_result(txn):
            value = db1.get(b"key", None, txn)
            item1 = _pop(txn, db2, None, unpickle)
            item2 = _pop(txn, db2, None, unpickle)
            return value, item1, item2

        value, item1, item2 = pmnc.state.implicit_transaction(fetch_result)
        assert value in (b"value_1", b"value_2")
        assert (item1, item2) in (("item_1", "item_2"), ("item_2", "item_1"))
Пример #5
0
    def test_WorkSources_stop_thread():

        th_started = Event()

        def th_proc():
            wss = WorkSources(1, 30.0)
            th_started.set()
            assert wss.begin_work(10.0) == (True, None)

        th = HeavyThread(target=th_proc)
        th.start()
        t = Timeout(30.0)
        th_started.wait()
        sleep(1.0)
        th.stop()
        assert t.remain > 25.0
Пример #6
0
    def test_WorkSources_signal_kick():

        th_started = Event()
        th_got_work = Event()
        wss = WorkSources(1, 30.0)

        def th_proc():
            th_started.set()
            assert wss.begin_work(10.0) == (False, 0)
            th_got_work.set()

        th = HeavyThread(target=th_proc)
        th.start()
        th_started.wait()
        t = Timeout(30.0)
        wss.add_work(0)
        th_got_work.wait()
        assert t.remain > 29.0
        th.stop()
Пример #7
0
def start():
    global _perf_thread
    _perf_thread = HeavyThread(target = _perf_thread_proc, name = "performance")
    _perf_thread.start()
Пример #8
0
 def start(self):
     self._scheduler = HeavyThread(target=self._scheduler_proc,
                                   name="{0:s}:sch".format(self._name))
     self._scheduler.start()
Пример #9
0
    def test_execute_success():

        fake_request(10.0)

        request_id = None
        response = None

        def caller(*args, **kwargs):
            fake_request(6.0)
            nonlocal request_id, response
            request_id = pmnc.request.unique_id
            pmnc.request.parameters["AAA"] = "BBB"
            pmnc.request.describe("my request")
            response = pmnc.__getattr__(__name__).execute_reverse(
                "good_cage", "module", "method", args, kwargs)

        assert "good_cage" not in _rq_queues

        th = HeavyThread(target=caller, args=(1, "foo"), kwargs={"biz": "baz"})
        th.start()
        try:

            sleep(2.0)

            assert "good_cage" in _rq_queues
            assert request_id in _rs_queues

            req_id, req = _rq_queues["good_cage"].pop()

            assert req_id == request_id
            assert abs(req["request"].pop("deadline") - time() - 4.0) < 1.0

            assert req == dict \
            (
                source_cage = __cage__,
                target_cage = "good_cage",
                module = "module",
                method = "method",
                args = (1, "foo"),
                kwargs = { "biz": "baz" },
                request = dict(protocol = pmnc.request.protocol,
                               interface = pmnc.request.interface,
                               unique_id = request_id,
                               description = "my request",
                               parameters = dict(auth_tokens = {}, AAA = "BBB"),
                               log_levels = []),
            )

            _rs_queues[request_id].push({"result": "RESULT"})

            sleep(2.0)

            assert "good_cage" in _rq_queues
            assert request_id not in _rs_queues

        finally:
            th.stop()

        assert response == "RESULT"

        assert "good_cage" in _rq_queues
        assert request_id not in _rs_queues
Пример #10
0
def secondary_startup(node, cage, mode):

    cage_dir = os_path.join(cages_dir, cage)
    logs_dir = os_path.join(cage_dir, "logs")

    lib_dir = os_path.join(cage_dir, "lib")
    sys_path.insert(0, lib_dir)

    log_abbrevs = {
        1: "ERR",
        2: "MSG",
        3: "WRN",
        4: "LOG",
        5: "INF",
        6: "DBG",
        7: "NSE"
    }
    log_encoding = "windows-1251"
    log_translate = b"         \t                      " + bytes(range(
        32, 256))
    log_lock = Lock()

    log_yyyymmdd = None
    log_file = None

    # the following function will serve for all cage's logging

    def log(message, *, msg_level):

        nonlocal log_yyyymmdd, log_file

        line_time = time()
        line_yyyymmdd, line_hhmmss = strftime("%Y%m%d %H:%M:%S",
                                              localtime(line_time)).split(" ")
        log_line = "{0:s}.{1:02d} {2:s} [{3:s}] {4:s}".format(
            line_hhmmss,
            int(line_time * 100) % 100, log_abbrevs.get(msg_level, "???"),
            current_thread().name, message)
        with log_lock:

            if line_yyyymmdd != log_yyyymmdd:  # rotate log file
                try:
                    new_log_file_name = os_path.join(
                        logs_dir,
                        "{0:s}-{1:s}.log".format(cage, line_yyyymmdd))
                    new_log_file = fopen(
                        new_log_file_name,
                        os.O_WRONLY | os.O_CREAT | os.O_APPEND)
                except:
                    pass  # if rotation fails, previous log file will still be used
                else:
                    try:
                        close(log_file)
                    except:
                        pass  # this also catches the attempt to close None
                    log_file = new_log_file
                    log_yyyymmdd = line_yyyymmdd

            if log_file is not None:
                if message:
                    write(
                        log_file,
                        log_line.encode(log_encoding,
                                        "replace").translate(log_translate) +
                        b"\n")
                if msg_level == 1:
                    fsync(log_file)

    ###################################

    # create loader instance using initial default logging level

    pmnc = ModuleLoader(node, cage, cage_dir, log, "LOG", 2.0, 1.0)

    ###################################

    current_thread().name = "startup"

    if mode == "NORMAL":
        log("the cage is starting up", msg_level=2)
    elif mode == "FAILURE":
        log("the cage is restarting after a failure", msg_level=2)

    ###################################

    if win32_com:
        _main_thread_id = GetCurrentThreadId()

    def cage_thread_proc():
        try:
            pmnc.startup.start()
            try:
                while not pmnc.startup.wait(3.0):
                    pmnc.startup.maintenance()
            except:
                pmnc.log.error(exc_string())  # log and ignore
            finally:
                pmnc.startup.stop()
        finally:
            if win32_com:  # release the main thread blocked in PumpMessages
                PostThreadMessage(_main_thread_id, WM_QUIT)

    cage_thread = HeavyThread(target=cage_thread_proc, name="cage")
    cage_thread.start()

    ###################################

    def termination_watchdog_proc():
        try:
            while stdout.write("\n") > 0:
                stdout.flush()
                sleep(3.0)
        except:
            pass
        finally:
            pmnc.startup.exit()

    termination_watchdog = HeavyThread(target=termination_watchdog_proc,
                                       name="stdout")
    termination_watchdog.start()

    ###################################

    # wait for the cage thread to detect shutdown and terminate

    if win32_com:
        PumpMessages()  # in the meanwhile become a message pump

    cage_thread.join()

    ###################################

    log("the cage has been properly shut down", msg_level=2)
    log("", msg_level=1)  # force flush of a log file
Пример #11
0
 def start(self):
     self._probe_thread = HeavyThread(target = self._probe_thread_proc,
                                      name = "health_monitor:probe") # always called "health_monitor"
     self._probe_thread.start()
Пример #12
0
 def start(self):
     self._listener = HeavyThread(target = self._listener_proc,
                                  name = "{0:s}:lsn".format(self._name))
     self._listener.start()
Пример #13
0
 def start(self):
     self._maintainer = HeavyThread(target = self._maintainer_proc,
                                    name = "{0:s}:mnt".format(self._name))
     self._maintainer.start()
Пример #14
0
 def _start_threads(self):
     self._reader = HeavyThread(target = self._reader_proc, name = "{0:s}/rd".format(self._name))
     self._reader.start()
     self._writer = HeavyThread(target = self._writer_proc, name = "{0:s}/wr".format(self._name))
     self._writer.start()