示例#1
0
    def _run(self):
        log = logging.getLogger("WriterThread._run")

        log.debug("thread starts")

        file_space_info = load_file_space_info(self._database_connection)
        file_space_sanity_check(file_space_info, _repository_path)

        # Ticket #1646 mark output value files as closed at startup
        mark_value_files_as_closed(self._database_connection)

        self._writer = Writer(self._database_connection,
                             file_space_info,
                             _repository_path,
                             self._active_segments,
                             self._completions)

        log.debug("start halt_event loop")
        while not self._halt_event.is_set():
            try:
                message, data = self._message_queue.get(block=True,
                                                        timeout=_queue_timeout)
            except queue.Empty:
                pass
            else:
                self._dispatch_table[message["message-type"]](message, data)
        log.debug("end halt_event loop")

        # 2012-03-27 dougfort -- we stop the data writer first because it is
        # going to sync the value file and run the post_sync operations
        log.debug("stopping data writer")
        self._writer.close()

        log.debug("closing database connection")
        self._database_connection.close()

        if len(self._completions) > 0:
            log.warn("{0} PostSyncCompletion's lost in teardown".format(
                len(self._completions)))

        if len(self._active_segments) > 0:
            log.warn("{0} active-segments at teardown".format(
                len(self._active_segments)))
示例#2
0
    def _run(self):
        log = logging.getLogger("WriterThread._run")

        log.debug("thread starts")

        file_space_info = load_file_space_info(self._database_connection)
        file_space_sanity_check(file_space_info, _repository_path)

        # Ticket #1646 mark output value files as closed at startup
        mark_value_files_as_closed(self._database_connection)

        self._writer = Writer(self._database_connection, file_space_info,
                              _repository_path, self._active_segments,
                              self._completions)

        log.debug("start halt_event loop")
        while not self._halt_event.is_set():
            try:
                message, data = self._message_queue.get(block=True,
                                                        timeout=_queue_timeout)
            except queue.Empty:
                pass
            else:
                self._dispatch_table[message["message-type"]](message, data)
        log.debug("end halt_event loop")

        # 2012-03-27 dougfort -- we stop the data writer first because it is
        # going to sync the value file and run the post_sync operations
        log.debug("stopping data writer")
        self._writer.close()

        log.debug("closing database connection")
        self._database_connection.close()

        if len(self._completions) > 0:
            log.warn("{0} PostSyncCompletion's lost in teardown".format(
                len(self._completions)))

        if len(self._active_segments) > 0:
            log.warn("{0} active-segments at teardown".format(
                len(self._active_segments)))
def _setup(_halt_event, state):
    log = logging.getLogger("_setup")

    # do the event push client first, because we may need to
    # push an execption event from setup
    state["event-push-client"] = EventPushClient(state["zmq-context"], "data_writer")

    log.info("binding resilient-server to %s" % (_data_writer_address,))
    state["resilient-server"] = ResilientServer(state["zmq-context"], _data_writer_address, state["receive-queue"])
    state["resilient-server"].register(state["pollster"])

    state["queue-dispatcher"] = DequeDispatcher(state, state["receive-queue"], _dispatch_table)

    central_connection = get_central_connection()
    state["cluster-row"] = get_cluster_row(central_connection)
    state["node-rows"] = get_node_rows(central_connection, state["cluster-row"].id)
    central_connection.close()

    state["node-id-dict"] = dict([(node_row.name, node_row.id) for node_row in state["node-rows"]])

    state["database-connection"] = get_node_local_connection()

    # Ticket #1646 mark output value files as closed at startup
    mark_value_files_as_closed(state["database-connection"])

    state["writer"] = Writer(state["database-connection"], _repository_path)

    state["stats-reporter"] = StatsReporter(state)

    state["event-push-client"].info("program-start", "data_writer starts")

    return [
        (state["pollster"].run, time.time()),
        (state["queue-dispatcher"].run, time.time()),
        (state["stats-reporter"].run, state["stats-reporter"].next_run()),
    ]