コード例 #1
0
    def __init__(self,
                 inbox,
                 config,
                 http_client,
                 diagnostic_accumulator=None):
        self._inbox = inbox
        self._config = config
        self._http = _http_factory(config).create_pool_manager(
            1, config.events_uri) if http_client is None else http_client
        self._close_http = (http_client is None
                            )  # so we know whether to close it later
        self._disabled = False
        self._outbox = EventBuffer(config.events_max_pending)
        self._user_keys = SimpleLRUCache(config.user_keys_capacity)
        self._formatter = EventOutputFormatter(config)
        self._last_known_past_time = 0
        self._deduplicated_users = 0
        self._diagnostic_accumulator = None if config.diagnostic_opt_out else diagnostic_accumulator

        self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__,
                                              "ldclient.flush")
        self._diagnostic_flush_workers = None if self._diagnostic_accumulator is None else FixedThreadPool(
            1, "ldclient.diag_flush")
        if self._diagnostic_accumulator is not None:
            init_event = create_diagnostic_init(
                self._diagnostic_accumulator.data_since_date,
                self._diagnostic_accumulator.diagnostic_id, config)
            task = DiagnosticEventSendTask(self._http, self._config,
                                           init_event)
            self._diagnostic_flush_workers.execute(task.run)

        self._main_thread = Thread(target=self._run_main_loop)
        self._main_thread.daemon = True
        self._main_thread.start()
コード例 #2
0
    def __init__(self, queue, config, http_client):
        self._queue = queue
        self._config = config
        self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) if http_client is None else http_client
        self._close_http = (http_client is None)  # so we know whether to close it later
        self._disabled = False
        self._buffer = EventBuffer(config.events_max_pending)
        self._user_keys = SimpleLRUCache(config.user_keys_capacity)
        self._formatter = EventOutputFormatter(config)
        self._last_known_past_time = 0

        self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush")

        self._main_thread = Thread(target=self._run_main_loop)
        self._main_thread.daemon = True
        self._main_thread.start()
コード例 #3
0
class EventDispatcher(object):
    def __init__(self, queue, config, http_client):
        self._queue = queue
        self._config = config
        self._http = create_http_pool_manager(num_pools=1, verify_ssl=config.verify_ssl) if http_client is None else http_client
        self._close_http = (http_client is None)  # so we know whether to close it later
        self._disabled = False
        self._buffer = EventBuffer(config.events_max_pending)
        self._user_keys = SimpleLRUCache(config.user_keys_capacity)
        self._formatter = EventOutputFormatter(config)
        self._last_known_past_time = 0

        self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush")

        self._main_thread = Thread(target=self._run_main_loop)
        self._main_thread.daemon = True
        self._main_thread.start()

    def _run_main_loop(self):
        log.info("Starting event processor")
        while True:
            try:
                message = self._queue.get(block=True)
                if message.type == 'event':
                    self._process_event(message.param)
                elif message.type == 'flush':
                    self._trigger_flush()
                elif message.type == 'flush_users':
                    self._user_keys.clear()
                elif message.type == 'test_sync':
                    self._flush_workers.wait()
                    message.param.set()
                elif message.type == 'stop':
                    self._do_shutdown()
                    message.param.set()
                    return
            except Exception:
                log.error('Unhandled exception in event processor', exc_info=True)
    
    def _process_event(self, event):
        if self._disabled:
            return

        # Always record the event in the summarizer.
        self._buffer.add_to_summary(event)

        # Decide whether to add the event to the payload. Feature events may be added twice, once for
        # the event (if tracked) and once for debugging.
        add_full_event = False
        add_debug_event = False
        add_index_event = False
        if event['kind'] == "feature":
            add_full_event = event['trackEvents']
            add_debug_event = self._should_debug_event(event)
        else:
            add_full_event = True

        # For each user we haven't seen before, we add an index event - unless this is already
        # an identify event for that user.
        if not (add_full_event and self._config.inline_users_in_events):
            user = event.get('user')
            if user and not self.notice_user(user):
                if event['kind'] != 'identify':
                    add_index_event = True

        if add_index_event:
            ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user }
            self._buffer.add_event(ie)
        if add_full_event:
            self._buffer.add_event(event)
        if add_debug_event:
            debug_event = event.copy()
            debug_event['debug'] = True
            self._buffer.add_event(debug_event)

    # Add to the set of users we've noticed, and return true if the user was already known to us.
    def notice_user(self, user):
        if user is None or 'key' not in user:
            return False
        key = user['key']
        return self._user_keys.put(key, True)

    def _should_debug_event(self, event):
        debug_until = event.get('debugEventsUntilDate')
        if debug_until is not None:
            last_past = self._last_known_past_time
            now = int(time.time() * 1000)
            if debug_until > last_past and debug_until > now:
                return True
        return False

    def _trigger_flush(self):
        if self._disabled:
            return
        payload = self._buffer.get_payload()
        if len(payload.events) > 0 or len(payload.summary.counters) > 0:
            task = EventPayloadSendTask(self._http, self._config, self._formatter, payload,
                self._handle_response)
            if self._flush_workers.execute(task.run):
                # The events have been handed off to a flush worker; clear them from our buffer.
                self._buffer.clear()
            else:
                # We're already at our limit of concurrent flushes; leave the events in the buffer.
                pass

    def _handle_response(self, r):
        server_date_str = r.getheader('Date')
        if server_date_str is not None:
            server_date = parsedate(server_date_str)
            if server_date is not None:
                timestamp = int(time.mktime(server_date) * 1000)
                self._last_known_past_time = timestamp
        if r.status > 299:
            log.error(http_error_message(r.status, "event delivery", "some events were dropped"))
            if not is_http_error_recoverable(r.status):
                self._disabled = True
                return

    def _do_shutdown(self):
        self._flush_workers.stop()
        self._flush_workers.wait()
        if self._close_http:
            self._http.clear()