Пример #1
0
def test_submission_namespace(datastore, sio):
    submission_queue = CommsQueue('submissions', private=True)
    monitoring = get_random_id()

    ingested = random_model_obj(SubmissionMessage).as_primitives()
    ingested['msg_type'] = "SubmissionIngested"
    received = random_model_obj(SubmissionMessage).as_primitives()
    received['msg_type'] = "SubmissionReceived"
    queued = random_model_obj(SubmissionMessage).as_primitives()
    queued['msg_type'] = "SubmissionQueued"
    started = random_model_obj(SubmissionMessage).as_primitives()
    started['msg_type'] = "SubmissionStarted"

    test_res_array = []

    @sio.on('monitoring', namespace='/submissions')
    def on_monitoring(data):
        # Confirmation that we are waiting for status messages
        test_res_array.append(('on_monitoring', data == monitoring))

    @sio.on('SubmissionIngested', namespace='/submissions')
    def on_submission_ingested(data):
        test_res_array.append(
            ('on_submission_ingested', data == ingested['msg']))

    @sio.on('SubmissionReceived', namespace='/submissions')
    def on_submission_received(data):
        test_res_array.append(
            ('on_submission_received', data == received['msg']))

    @sio.on('SubmissionQueued', namespace='/submissions')
    def on_submission_queued(data):
        test_res_array.append(('on_submission_queued', data == queued['msg']))

    @sio.on('SubmissionStarted', namespace='/submissions')
    def on_submission_started(data):
        test_res_array.append(
            ('on_submission_started', data == started['msg']))

    try:
        sio.emit('monitor', monitoring, namespace='/submissions')
        sio.sleep(1)

        submission_queue.publish(ingested)
        submission_queue.publish(received)
        submission_queue.publish(queued)
        submission_queue.publish(started)

        start_time = time.time()

        while len(test_res_array) < 5 and time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 5

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")
    finally:
        sio.disconnect()
Пример #2
0
def save_alert(datastore, counter, logger, alert, psid):
    def create_alert():
        msg_type = "AlertCreated"
        datastore.alert.save(alert['alert_id'], alert)
        logger.info(f"Alert {alert['alert_id']} has been created.")
        counter.increment('created')
        ret_val = 'create'
        return msg_type, ret_val

    if psid:
        try:
            msg_type = "AlertUpdated"
            perform_alert_update(datastore, logger, alert)
            counter.increment('updated')
            ret_val = 'update'
        except AlertMissingError as e:
            logger.info(
                f"{str(e)}. Creating a new alert [{alert['alert_id']}]...")
            msg_type, ret_val = create_alert()
    else:
        msg_type, ret_val = create_alert()

    msg = AlertMessage({
        "msg": alert,
        "msg_type": msg_type,
        "sender": "alerter"
    })
    CommsQueue('alerts').publish(msg.as_primitives())
    return ret_val
    def __init__(self, config=None):
        super().__init__('assemblyline.heartbeat_manager')
        self.config = config or forge.get_config()
        self.datastore = forge.get_datastore()
        self.metrics_queue = CommsQueue(METRICS_QUEUE)
        self.scheduler = BackgroundScheduler(daemon=True)
        self.hm = HeartbeatFormatter("heartbeat_manager",
                                     self.log,
                                     config=self.config)

        self.counters_lock = Lock()
        self.counters = {}
        self.rolling_window = {}
        self.window_ttl = {}
        self.ttl = self.config.core.metrics.export_interval * 2
        self.window_size = int(60 / self.config.core.metrics.export_interval)
        if self.window_size != 60 / self.config.core.metrics.export_interval:
            self.log.warning(
                "Cannot calculate a proper window size for reporting heartbeats. "
                "Metrics reported during hearbeat will be wrong.")

        if self.config.core.metrics.apm_server.server_url is not None:
            self.log.info(
                f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}"
            )
            elasticapm.instrument()
            self.apm_client = elasticapm.Client(
                server_url=self.config.core.metrics.apm_server.server_url,
                service_name="heartbeat_manager")
        else:
            self.apm_client = None
Пример #4
0
def test_comms_queue(redis_connection):
    if redis_connection:
        from assemblyline.remote.datatypes.queues.comms import CommsQueue

        def publish_messages(message_list):
            time.sleep(0.1)
            with CommsQueue('test-comms-queue') as cq_p:
                for message in message_list:
                    cq_p.publish(message)

        msg_list = ["bob", 1, {"bob": 1}, [1, 2, 3], None, "Nice!", "stop"]
        t = Thread(target=publish_messages, args=(msg_list, ))
        t.start()

        with CommsQueue('test-comms-queue') as cq:
            x = 0
            for msg in cq.listen():
                if msg == "stop":
                    break

                assert msg == msg_list[x]

                x += 1

        t.join()
        assert not t.is_alive()
Пример #5
0
def test_alert_created(datastore, client):
    alert_queue = CommsQueue('alerts', private=True)

    created = random_model_obj(AlertMessage)
    created.msg_type = "AlertCreated"

    updated = random_model_obj(AlertMessage)
    updated.msg_type = "AlertUpdated"

    test_res_array = []

    def alerter_created_callback(data):
        test_res_array.append(('created', created['msg'] == data))

    def alerter_updated_callback(data):
        test_res_array.append(('updated', updated['msg'] == data))

    def publish_thread():
        time.sleep(1)
        alert_queue.publish(created.as_primitives())
        alert_queue.publish(updated.as_primitives())

    threading.Thread(target=publish_thread).start()
    client.socketio.listen_on_alerts_messages(
        alert_created_callback=alerter_created_callback,
        alert_updated_callback=alerter_updated_callback,
        timeout=2)
    assert len(test_res_array) == 2

    for test, result in test_res_array:
        if not result:
            pytest.fail("{} failed.".format(test))
    def __init__(self, sender, log, config=None, redis=None):
        self.sender = sender
        self.log = log

        self.config = config or forge.get_config()
        self.datastore = forge.get_datastore(self.config)

        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )
        self.redis_persist = get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )
        self.status_queue = CommsQueue(STATUS_QUEUE, self.redis)
        self.dispatch_active_hash = Hash(DISPATCH_TASK_HASH,
                                         self.redis_persist)
        self.dispatcher_submission_queue = NamedQueue(SUBMISSION_QUEUE,
                                                      self.redis)
        self.ingest_scanning = Hash('m-scanning-table', self.redis_persist)
        self.ingest_unique_queue = PriorityQueue('m-unique',
                                                 self.redis_persist)
        self.ingest_queue = NamedQueue(INGEST_QUEUE_NAME, self.redis_persist)
        self.ingest_complete_queue = NamedQueue(COMPLETE_QUEUE_NAME,
                                                self.redis)
        self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.redis_persist)

        constants = forge.get_constants(self.config)
        self.c_rng = constants.PRIORITY_RANGES['critical']
        self.h_rng = constants.PRIORITY_RANGES['high']
        self.m_rng = constants.PRIORITY_RANGES['medium']
        self.l_rng = constants.PRIORITY_RANGES['low']
        self.c_s_at = self.config.core.ingester.sampling_at['critical']
        self.h_s_at = self.config.core.ingester.sampling_at['high']
        self.m_s_at = self.config.core.ingester.sampling_at['medium']
        self.l_s_at = self.config.core.ingester.sampling_at['low']

        self.to_expire = {k: 0 for k in metrics.EXPIRY_METRICS}
        if self.config.core.expiry.batch_delete:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}/DAY]"
        else:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}]"

        self.scheduler = BackgroundScheduler(daemon=True)
        self.scheduler.add_job(
            self._reload_expiry_queues,
            'interval',
            seconds=self.config.core.metrics.export_interval * 4)
        self.scheduler.start()
Пример #7
0
def test_status_messages(datastore, client):
    status_queue = CommsQueue('status', private=True)
    test_res_array = []

    alerter_hb_msg = random_model_obj(AlerterMessage).as_primitives()
    dispatcher_hb_msg = random_model_obj(DispatcherMessage).as_primitives()
    expiry_hb_msg = random_model_obj(ExpiryMessage).as_primitives()
    ingest_hb_msg = random_model_obj(IngestMessage).as_primitives()
    service_hb_msg = random_model_obj(ServiceMessage).as_primitives()
    service_timing_msg = random_model_obj(ServiceTimingMessage).as_primitives()

    def alerter_callback(data):
        test_res_array.append(('alerter', alerter_hb_msg['msg'] == data))

    def dispatcher_callback(data):
        test_res_array.append(('dispatcher', dispatcher_hb_msg['msg'] == data))

    def expiry_callback(data):
        test_res_array.append(('expiry', expiry_hb_msg['msg'] == data))

    def ingest_callback(data):
        test_res_array.append(('ingest', ingest_hb_msg['msg'] == data))

    def service_callback(data):
        test_res_array.append(('service', service_hb_msg['msg'] == data))

    def service_timing_callback(data):
        test_res_array.append(
            ('service_timing', service_timing_msg['msg'] == data))

    def publish_thread():
        time.sleep(1)
        status_queue.publish(alerter_hb_msg)
        status_queue.publish(dispatcher_hb_msg)
        status_queue.publish(expiry_hb_msg)
        status_queue.publish(ingest_hb_msg)
        status_queue.publish(service_hb_msg)
        status_queue.publish(service_timing_msg)

    threading.Thread(target=publish_thread).start()
    client.socketio.listen_on_status_messages(
        alerter_msg_callback=alerter_callback,
        dispatcher_msg_callback=dispatcher_callback,
        expiry_msg_callback=expiry_callback,
        ingest_msg_callback=ingest_callback,
        service_msg_callback=service_callback,
        service_timing_msg_callback=service_timing_callback,
        timeout=2)
    assert len(test_res_array) == 6

    for test, result in test_res_array:
        if not result:
            pytest.fail("{} failed.".format(test))
Пример #8
0
def test_alert_namespace(datastore, sio):
    alert_queue = CommsQueue('alerts', private=True)
    test_id = get_random_id()

    created = random_model_obj(AlertMessage)
    created.msg_type = "AlertCreated"

    updated = random_model_obj(AlertMessage)
    updated.msg_type = "AlertUpdated"

    test_res_array = []

    @sio.on('monitoring', namespace='/alerts')
    def on_monitoring(data):
        # Confirmation that we are waiting for alerts
        test_res_array.append(('on_monitoring', data == test_id))

    @sio.on('AlertCreated', namespace='/alerts')
    def on_alert_created(data):
        test_res_array.append(
            ('on_alert_created', data == created.as_primitives()['msg']))

    @sio.on('AlertUpdated', namespace='/alerts')
    def on_alert_updated(data):
        test_res_array.append(
            ('on_alert_updated', data == updated.as_primitives()['msg']))

    try:
        sio.emit('alert', test_id, namespace='/alerts')
        sio.sleep(1)

        alert_queue.publish(created.as_primitives())
        alert_queue.publish(updated.as_primitives())

        start_time = time.time()

        while len(test_res_array) < 3 or time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 3

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")

    finally:
        sio.disconnect()
def test_submission_ingested(datastore, client):
    submission_queue = CommsQueue('submissions', private=True)
    test_res_array = []

    completed = random_model_obj(SubmissionMessage).as_primitives()
    completed['msg_type'] = "SubmissionCompleted"
    ingested = random_model_obj(SubmissionMessage).as_primitives()
    ingested['msg_type'] = "SubmissionIngested"
    received = random_model_obj(SubmissionMessage).as_primitives()
    received['msg_type'] = "SubmissionReceived"
    started = random_model_obj(SubmissionMessage).as_primitives()
    started['msg_type'] = "SubmissionStarted"

    def completed_callback(data):
        test_res_array.append(('completed', completed['msg'] == data))

    def ingested_callback(data):
        test_res_array.append(('ingested', ingested['msg'] == data))

    def received_callback(data):
        test_res_array.append(('received', received['msg'] == data))

    def started_callback(data):
        test_res_array.append(('started', started['msg'] == data))

    def publish_thread():
        time.sleep(1)
        submission_queue.publish(completed)
        submission_queue.publish(ingested)
        submission_queue.publish(received)
        submission_queue.publish(started)

    threading.Thread(target=publish_thread).start()
    client.socketio.listen_on_submissions(
        completed_callback=completed_callback,
        ingested_callback=ingested_callback,
        received_callback=received_callback,
        started_callback=started_callback,
        timeout=2)

    assert len(test_res_array) == 4

    for test, result in test_res_array:
        if not result:
            pytest.fail("{} failed.".format(test))
    def try_run(self):
        # If our connection to the metrics database requires a custom ca cert, prepare it
        ca_certs = None
        if self.config.core.metrics.elasticsearch.host_certificates:
            with tempfile.NamedTemporaryFile(delete=False) as ca_certs_file:
                ca_certs = ca_certs_file.name
                ca_certs_file.write(self.config.core.metrics.elasticsearch.host_certificates.encode())

        self.metrics_queue = CommsQueue(METRICS_QUEUE)
        self.es = elasticsearch.Elasticsearch(hosts=self.elastic_hosts,
                                              connection_class=elasticsearch.RequestsHttpConnection,
                                              ca_certs=ca_certs)

        self.scheduler.add_job(self._create_aggregated_metrics, 'interval', seconds=60)
        self.scheduler.start()

        while self.running:
            for msg in self.metrics_queue.listen():
                # APM Transaction start
                if self.apm_client:
                    self.apm_client.begin_transaction('metrics')

                m_name = msg.pop('name', None)
                m_type = msg.pop('type', None)
                msg.pop('host', None)
                msg.pop('instance', None)

                self.log.debug(f"Received {m_type.upper()} metrics message")
                if not m_name or not m_type:
                    # APM Transaction end
                    if self.apm_client:
                        self.apm_client.end_transaction('process_message', 'invalid_message')

                    continue

                with self.counters_lock:
                    c_key = (m_name, m_type)
                    if c_key not in self.counters or m_type in NON_AGGREGATED:
                        self.counters[c_key] = Counter(msg)
                    else:
                        self.counters[c_key].update(Counter(msg))

                # APM Transaction end
                if self.apm_client:
                    self.apm_client.end_transaction('process_message', 'success')
Пример #11
0
def save_alert(datastore, counter, logger, alert, psid):
    if psid:
        msg_type = "AlertUpdated"
        perform_alert_update(datastore, logger, alert)
        counter.increment('updated')
        ret_val = 'update'
    else:
        msg_type = "AlertCreated"
        datastore.alert.save(alert['alert_id'], alert)
        logger.info(f"Alert {alert['alert_id']} has been created.")
        counter.increment('created')
        ret_val = 'create'

    msg = AlertMessage({
        "msg": alert,
        "msg_type": msg_type,
        "sender": "alerter"
    })
    CommsQueue('alerts').publish(msg.as_primitives())
    return ret_val
Пример #12
0
    def monitor_system_status(self):
        q = CommsQueue('status', private=True)
        try:
            for msg in q.listen():
                if self.stop:
                    break

                message = msg['msg']
                msg_type = msg['msg_type']
                self.socketio.emit(msg_type, message, namespace=self.namespace)
                LOGGER.info(
                    f"SocketIO:{self.namespace} - Sending {msg_type} event to all connected users."
                )

        except Exception:
            LOGGER.exception(f"SocketIO:{self.namespace}")
        finally:
            LOGGER.info(
                f"SocketIO:{self.namespace} - No more users connected to status monitoring, exiting thread..."
            )
            with self.connections_lock:
                self.background_task = None
Пример #13
0
def _test_message_through_queue(queue_name, test_message, redis):
    t = Thread(target=publish_message, args=(queue_name, test_message, redis))

    try:
        t.start()

        with CommsQueue(queue_name) as cq:
            for msg in cq.listen():
                loader_path = msg.get('msg_loader', None)
                if loader_path is None:
                    raise ValueError(
                        "Message does not have a message loader class path.")

                msg_obj = load_module_by_path(loader_path)
                obj = msg_obj(msg)

                assert obj == test_message

                break

    finally:
        t.join()
        assert not t.is_alive()
Пример #14
0
    def monitor_alerts(self, user_info):
        sid = user_info['sid']
        q = CommsQueue('alerts', private=True)
        try:
            for msg in q.listen():
                if sid not in self.connections:
                    break

                alert = msg['msg']
                msg_type = msg['msg_type']
                if classification.is_accessible(
                        user_info['classification'],
                        alert.get('classification',
                                  classification.UNRESTRICTED)):
                    self.socketio.emit(msg_type,
                                       alert,
                                       room=sid,
                                       namespace=self.namespace)
                    LOGGER.info(
                        f"SocketIO:{self.namespace} - {user_info['display']} - "
                        f"Sending {msg_type} event for alert matching ID: {alert['alert_id']}"
                    )

                    if AUDIT:
                        AUDIT_LOG.info(
                            f"{user_info['uname']} [{user_info['classification']}]"
                            f" :: AlertMonitoringNamespace.get_alert(alert_id={alert['alert_id']})"
                        )

        except Exception:
            LOGGER.exception(
                f"SocketIO:{self.namespace} - {user_info['display']}")
        finally:
            LOGGER.info(
                f"SocketIO:{self.namespace} - {user_info['display']} - Connection to client was terminated"
            )
Пример #15
0
 def publish_messages(message_list):
     time.sleep(0.1)
     with CommsQueue('test-comms-queue') as cq_p:
         for message in message_list:
             cq_p.publish(message)
Пример #16
0
def publish_message(queue_name, test_message, redis):
    time.sleep(0.1)
    with CommsQueue(queue_name, redis) as cq:
        cq.publish(test_message.as_primitives())
Пример #17
0
    def __init__(self,
                 datastore,
                 logger,
                 classification=None,
                 redis=None,
                 persistent_redis=None,
                 metrics_name='ingester'):
        self.datastore = datastore
        self.log = logger

        # Cache the user groups
        self.cache_lock = threading.RLock(
        )  # TODO are middle man instances single threaded now?
        self._user_groups = {}
        self._user_groups_reset = time.time() // HOUR_IN_SECONDS
        self.cache = {}
        self.notification_queues = {}
        self.whitelisted = {}
        self.whitelisted_lock = threading.RLock()

        # Create a config cache that will refresh config values periodically
        self.config = forge.CachedObject(forge.get_config)

        # Module path parameters are fixed at start time. Changing these involves a restart
        self.is_low_priority = load_module_by_path(
            self.config.core.ingester.is_low_priority)
        self.get_whitelist_verdict = load_module_by_path(
            self.config.core.ingester.get_whitelist_verdict)
        self.whitelist = load_module_by_path(
            self.config.core.ingester.whitelist)

        # Constants are loaded based on a non-constant path, so has to be done at init rather than load
        constants = forge.get_constants(self.config)
        self.priority_value = constants.PRIORITIES
        self.priority_range = constants.PRIORITY_RANGES
        self.threshold_value = constants.PRIORITY_THRESHOLDS

        # Connect to the redis servers
        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )
        self.persistent_redis = persistent_redis or get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )

        # Classification engine
        self.ce = classification or forge.get_classification()

        # Metrics gathering factory
        self.counter = MetricsFactory(metrics_type='ingester',
                                      schema=Metrics,
                                      redis=self.redis,
                                      config=self.config,
                                      name=metrics_name)

        # State. The submissions in progress are stored in Redis in order to
        # persist this state and recover in case we crash.
        self.scanning = Hash('m-scanning-table', self.persistent_redis)

        # Input. The dispatcher creates a record when any submission completes.
        self.complete_queue = NamedQueue(_completeq_name, self.redis)

        # Internal. Dropped entries are placed on this queue.
        # self.drop_queue = NamedQueue('m-drop', self.persistent_redis)

        # Input. An external process places submission requests on this queue.
        self.ingest_queue = NamedQueue(INGEST_QUEUE_NAME,
                                       self.persistent_redis)

        # Output. Duplicate our input traffic into this queue so it may be cloned by other systems
        self.traffic_queue = CommsQueue('submissions', self.redis)

        # Internal. Unique requests are placed in and processed from this queue.
        self.unique_queue = PriorityQueue('m-unique', self.persistent_redis)

        # Internal, delay queue for retrying
        self.retry_queue = PriorityQueue('m-retry', self.persistent_redis)

        # Internal, timeout watch queue
        self.timeout_queue = PriorityQueue('m-timeout', self.redis)

        # Internal, queue for processing duplicates
        #   When a duplicate file is detected (same cache key => same file, and same
        #   submission parameters) the file won't be ingested normally, but instead a reference
        #   will be written to a duplicate queue. Whenever a file is finished, in the complete
        #   method, not only is the original ingestion finalized, but all entries in the duplicate queue
        #   are finalized as well. This has the effect that all concurrent ingestion of the same file
        #   are 'merged' into a single submission to the system.
        self.duplicate_queue = MultiQueue(self.persistent_redis)

        # Output. submissions that should have alerts generated
        self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.persistent_redis)

        # Utility object to help submit tasks to dispatching
        self.submit_client = SubmissionClient(datastore=self.datastore,
                                              redis=self.redis)
Пример #18
0
def get_metrics_sink(redis=None):
    from assemblyline.remote.datatypes.queues.comms import CommsQueue
    return CommsQueue('assemblyline_metrics', host=redis)
Пример #19
0
#!/usr/bin/env python

import sys

from assemblyline.remote.datatypes.queues.comms import CommsQueue
from pprint import pprint

if __name__ == "__main__":
    queue_name = None
    if len(sys.argv) > 1:
        queue_name = sys.argv[1]

    if queue_name is None:
        print(
            "\nERROR: You must specify a queue name.\n\npubsub_reader.py [queue_name]"
        )
        exit(1)

    print(f"Listening for messages on '{queue_name}' queue.")

    q = CommsQueue(queue_name)

    try:
        while True:
            for msg in q.listen():
                pprint(msg)
    except KeyboardInterrupt:
        print('Exiting')
    finally:
        q.close()
Пример #20
0
def test_status_namspace(datastore, sio):
    status_queue = CommsQueue('status', private=True)
    monitoring = get_random_id()

    alerter_hb_msg = random_model_obj(AlerterMessage).as_primitives()
    dispatcher_hb_msg = random_model_obj(DispatcherMessage).as_primitives()
    expiry_hb_msg = random_model_obj(ExpiryMessage).as_primitives()
    ingest_hb_msg = random_model_obj(IngestMessage).as_primitives()
    service_hb_msg = random_model_obj(ServiceMessage).as_primitives()

    test_res_array = []

    @sio.on('monitoring', namespace='/status')
    def on_monitoring(data):
        # Confirmation that we are waiting for status messages
        test_res_array.append(('on_monitoring', data == monitoring))

    @sio.on('AlerterHeartbeat', namespace='/status')
    def on_alerter_heartbeat(data):
        test_res_array.append(
            ('on_alerter_heartbeat', data == alerter_hb_msg['msg']))

    @sio.on('DispatcherHeartbeat', namespace='/status')
    def on_dispatcher_heartbeat(data):
        test_res_array.append(
            ('on_dispatcher_heartbeat', data == dispatcher_hb_msg['msg']))

    @sio.on('ExpiryHeartbeat', namespace='/status')
    def on_expiry_heartbeat(data):
        test_res_array.append(
            ('on_expiry_heartbeat', data == expiry_hb_msg['msg']))

    @sio.on('IngestHeartbeat', namespace='/status')
    def on_ingest_heartbeat(data):
        test_res_array.append(
            ('on_ingest_heartbeat', data == ingest_hb_msg['msg']))

    @sio.on('ServiceHeartbeat', namespace='/status')
    def on_service_heartbeat(data):
        test_res_array.append(
            ('on_service_heartbeat', data == service_hb_msg['msg']))

    try:
        sio.emit('monitor', monitoring, namespace='/status')
        sio.sleep(1)

        status_queue.publish(alerter_hb_msg)
        status_queue.publish(dispatcher_hb_msg)
        status_queue.publish(expiry_hb_msg)
        status_queue.publish(ingest_hb_msg)
        status_queue.publish(service_hb_msg)

        start_time = time.time()

        while len(test_res_array) < 6 and time.time() - start_time < 5:
            sio.sleep(0.1)

        assert len(test_res_array) == 6

        for test, result in test_res_array:
            if not result:
                pytest.fail(f"{test} failed.")
    finally:
        sio.disconnect()
Пример #21
0
def get_submission_traffic_channel():
    return CommsQueue('submissions',
                      host=config.core.redis.nonpersistent.host,
                      port=config.core.redis.nonpersistent.port)