def run(self):
        if 'logging_config' in self.config_mgr:
            fileConfig(self.config_mgr['logging_config'])
        else:
            logging.basicConfig()
            if 'log_level' in self.config_mgr:
                level = self.config_mgr['log_level']
                level = getattr(logging, level.upper())
                logging.getLogger('winchester').setLevel(level)

        self.pm_lock = threading.Lock()
        self.pipe = PipelineManager(self.config_mgr)

        #  TODO add trigger defs from the DB at startup

        # start threads
        self.stream_def_thread = threading.Thread(name='stream_defs_pipe',
                                                  target=pipe_stream_definition_consumer,
                                                  args=(self.kafka_config, self.pm_lock, self.pipe,))

        self.pipeline_ready_thread = threading.Thread(name='pipeline',
                                                      target=self.pipeline_ready_processor,
                                                      args=(self.pm_lock, self.pipe,))

        self.stream_def_thread.start()
        self.pipeline_ready_thread.start()

        self.stream_def_thread.join()
        self.pipeline_ready_thread.join()
        log.debug('Exiting')
 def __init__(self, conf):
     super(PipelineProcessor, self).__init__(conf)
     self._winchester_config = conf.winchester.winchester_config
     self._config_mgr = ConfigManager.load_config_file(
         self._winchester_config)
     self._group = conf.kafka.stream_def_pipe_group
     self._pm_lock = threading.Lock()
     self._pipe = PipelineManager(self._config_mgr)
Example #3
0
def main():
    parser = argparse.ArgumentParser(description="Winchester pipeline worker")
    parser.add_argument('--config', '-c', default='winchester.yaml',
                        help='The name of the winchester config file')
    parser.add_argument('--name', '-n', default='pipeline_worker',
                        help='The name of this process for logging purposes')
    parser.add_argument('--daemon', '-d', help='Run in daemon mode.')
    args = parser.parse_args()

    conf = ConfigManager.load_config_file(args.config)
    proc_name = args.name

    if 'log_level' in conf:
        level = conf['log_level']
        level = getattr(logging, level.upper())
    else:
        level = logging.INFO

    if 'log_file' in conf:
        log_file = conf['log_file'] % dict(proc_name=proc_name)
    else:
        log_file = '%(proc_name)s.log' % dict(proc_name=proc_name)

    # This is a hack, but it's needed to pass the logfile name & default
    # loglevel into log handlers configured with a config file. (mdragon)
    logging.LOCAL_LOG_FILE = log_file
    logging.LOCAL_DEFAULT_LEVEL = level

    if 'logging_config' in conf:
        fileConfig(conf['logging_config'])
    else:
        logging.basicConfig()
        logging.getLogger('winchester').setLevel(level)
    timesync = time_sync.TimeSync(conf)
    pipe = PipelineManager(conf, time_sync=timesync, proc_name=proc_name)
    if args.daemon:
        print("Backgrounding for daemon mode.")
        with daemon.DaemonContext():
            pipe.run()
    else:
        pipe.run()
Example #4
0
def main():
    parser = argparse.ArgumentParser(description="Winchester pipeline worker")
    parser.add_argument('--config',
                        '-c',
                        default='winchester.yaml',
                        help='The name of the winchester config file')
    parser.add_argument('--daemon', '-d', help='Run in daemon mode.')
    args = parser.parse_args()
    conf = ConfigManager.load_config_file(args.config)

    if 'logging_config' in conf:
        fileConfig(conf['logging_config'])
    else:
        logging.basicConfig()
        if 'log_level' in conf:
            level = conf['log_level']
            level = getattr(logging, level.upper())
            logging.getLogger('winchester').setLevel(level)
    pipe = PipelineManager(conf)
    if args.daemon:
        print "Backgrounding for daemon mode."
        with daemon.DaemonContext():
            pipe.run()
    else:
        pipe.run()
def main():
    parser = argparse.ArgumentParser(description="Winchester pipeline worker")
    parser.add_argument('--config', '-c', default='winchester.yaml',
                        help='The name of the winchester config file')
    parser.add_argument('--daemon', '-d', help='Run in daemon mode.')
    args = parser.parse_args()
    conf = ConfigManager.load_config_file(args.config)

    if 'logging_config' in conf:
        fileConfig(conf['logging_config'])
    else:
        logging.basicConfig()
        if 'log_level' in conf:
            level = conf['log_level']
            level = getattr(logging, level.upper())
            logging.getLogger('winchester').setLevel(level)
    pipe = PipelineManager(conf)
    if args.daemon:
        print "Backgrounding for daemon mode."
        with daemon.DaemonContext():
            pipe.run()
    else:
        pipe.run()
Example #6
0
def main():
    parser = argparse.ArgumentParser(description="Winchester pipeline worker")
    parser.add_argument('--config',
                        '-c',
                        default='winchester.yaml',
                        help='The name of the winchester config file')
    parser.add_argument('--name',
                        '-n',
                        default='pipeline_worker',
                        help='The name of this process for logging purposes')
    parser.add_argument('--daemon', '-d', help='Run in daemon mode.')
    args = parser.parse_args()

    conf = ConfigManager.load_config_file(args.config)
    proc_name = args.name

    if 'log_level' in conf:
        level = conf['log_level']
        level = getattr(logging, level.upper())
    else:
        level = logging.INFO

    if 'log_file' in conf:
        log_file = conf['log_file'] % dict(proc_name=proc_name)
    else:
        log_file = '%(proc_name)s.log' % dict(proc_name=proc_name)

    # This is a hack, but it's needed to pass the logfile name & default
    # loglevel into log handlers configured with a config file. (mdragon)
    logging.LOCAL_LOG_FILE = log_file
    logging.LOCAL_DEFAULT_LEVEL = level

    if 'logging_config' in conf:
        fileConfig(conf['logging_config'])
    else:
        logging.basicConfig()
        logging.getLogger('winchester').setLevel(level)
    timesync = time_sync.TimeSync(conf)
    pipe = PipelineManager(conf, time_sync=timesync, proc_name=proc_name)
    if args.daemon:
        print("Backgrounding for daemon mode.")
        with daemon.DaemonContext():
            pipe.run()
    else:
        pipe.run()
class TriggerTest():

    """  Trigger Test

    Adds Stream Definitions to the TriggerManager and PipelineManager classes.  Adds Fake
    distilled events to the TriggerManager and ensures the Fire and expire handlers will get called.
    This test uses the winchester mysql DB.
    """

    """ test data """

    trig_def_fc1 = [{'distinguished_by': ['instance_id'],
                     'fire_criteria': [{'event_type': 'compute.instance.create.start'},
                                       {'event_type': 'compute.instance.create.end'}],
                     'match_criteria': [{'event_type': 'compute.instance.create.*'}],
                     'name': 'fc1_trigger',
                     'debug_level': 2,
                     'expiration': '$last + 1h',
                     'fire_pipeline': 'test_pipeline',
                     'expire_pipeline': 'test_expire_pipeline'}]

    trig_def_fc1_tenant406904_filter = [{'distinguished_by': ['instance_id'],
                                         'fire_criteria': [{'event_type': 'compute.instance.create.start'}, {'event_type': 'compute.instance.create.end'}],
                                         'match_criteria': [{'traits': {'tenant_id': '406904'}, 'event_type': 'compute.instance.create.*'}],
                                         'name': 'trig_def_fc1_406904',
                                         'debug_level': 2,
                                         'expiration': '$first + 10s',
                                         'fire_pipeline': 'test_pipeline',
                                         'expire_pipeline': 'test_expire_pipeline'}]

    trig_def_fc1_tenant123456_filter = [{'distinguished_by': ['instance_id'],
                                         'fire_criteria': [{'event_type': 'compute.instance.create.start'}, {'event_type': 'compute.instance.create.end'}],
                                         'match_criteria': [{'traits': {'tenant_id': '123456'},
                                                             'event_type': 'compute.instance.create.*'}],
                                         'name': 'fc1_trigger_123456',
                                         'debug_level': 2,
                                         'expiration': '$last + 24h',
                                         'fire_pipeline': 'test_pipeline',
                                         'expire_pipeline': 'test_expire_pipeline'}]

    """ test adding events to cause fire criteria """

    distilled_events_fc1_tenant_406904 = [{'os_distro': 'com.ubuntu',
                                           'event_type': 'compute.instance.create.start',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '406904',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '111-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'building',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                           'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                           'message_id': '19701f6c-f51f-4ecb-85fb-7db40277627d'},
                                          {'os_distro': 'com.ubuntu',
                                           'message_id': '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '406904',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '111-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'active',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                           'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                           'launched_at': datetime.utcnow(),
                                           'event_type': 'compute.instance.create.end'}]

    distilled_events_fc1_tenant_406904_missing_end = [{'os_distro': 'com.ubuntu',
                                                       'event_type': 'compute.instance.create.start',
                                                       'service': 'publisher-302689',
                                                       'instance_type': '512MB Standard Instance',
                                                       'tenant_id': '406904',
                                                       'instance_flavor_id': '2',
                                                       'hostname': 'server-462185',
                                                       'host': 'publisher-302689',
                                                       'instance_flavor': '512MB Standard Instance',
                                                       'instance_id': '333-3b0f-4057-b377-b65131e8532e',
                                                       'os_version': '12.04',
                                                       'state': 'building',
                                                       'os_architecture': 'x64',
                                                       'timestamp': datetime.utcnow(),
                                                       'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                                       'message_id': '19701f6c-f51f-4ecb-85fb-7db40277627d'}]

    distilled_events_fc1_tenant_123456 = [{'os_distro': 'com.ubuntu',
                                           'event_type': 'compute.instance.create.start',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '123456',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '456-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'building',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                           'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                           'message_id': '19701f6c-f51f-4ecb-85fb-7db40277627d'},
                                          {'os_distro': 'com.ubuntu',
                                           'message_id': '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '123456',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '456-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'active',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                           'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                           'launched_at': datetime.utcnow(),
                                           'event_type': 'compute.instance.create.end'}]

    """ trigger defs for fire criteria 2 - looking for exists"""
    trig_def_fc2_rackspace_billing = [{'distinguished_by': ['instance_id',
                                                            {'timestamp': 'day'}],
                                       'fire_criteria': [{'event_type': 'compute.instance.exists'}],
                                       'match_criteria': [{'event_type': ['compute.instance.*',
                                                                          '!compute.instance.exists']},
                                                          {'event_type': 'compute.instance.exists',
                                                           'map_distingushed_by': {'timestamp': 'audit_period_beginning'}}],
                                       'name': 'rackspace_billing',
                                       'debug_level': 2,
                                       'expiration': '$last + 1h',
                                       'fire_pipeline': 'test_pipeline',
                                       'expire_pipeline': 'test_expire_pipeline'}]

    trig_def_fc3_rackspace = [{'distinguished_by': ['instance_id', {'timestamp': 'day'}],
                               'fire_criteria': [{'traits': {'audit_period_ending': {'datetime': '$audit_period_beginning + 1d'}},
                                                  'event_type': 'compute.instance.exists'}],
                               'match_criteria': [{'event_type': ['compute.instance.*',
                                                                  'snapshot_instance',
                                                                  'keypair.import.*',
                                                                  'rebuild_instance',
                                                                  'compute.instance.*',
                                                                  '!compute.instance.exists',
                                                                  '!compute.instance.exists.failed',
                                                                  '!compute.instance.exists.warnings',
                                                                  '!compute.instance.exists.verified']},
                                                  {'event_type': 'compute.instance.exists',
                                                   'map_distinguished_by': {'timestamp': 'audit_period_beginning'}}],
                               'name': 'rackspace_test_trigger',
                               'debug_level': 2,
                               'expiration': '$last + 2d',
                               'fire_pipeline': 'test_fire_pipeline',
                               'expire_pipeline': 'test_expire_pipeline'}]

    distilled_events_fc2_tenant_222333 = [{'os_distro': 'com.ubuntu',
                                           'event_type': 'compute.instance.create.start',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '222333',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '772b2f73-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'building',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                           'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                           'message_id': '19701f6c-f51f-4ecb-85fb-7db40277627d'},
                                          {'os_distro': 'com.ubuntu',
                                           'message_id': '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '222333',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '772b2f73-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'active',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                              'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                              'launched_at': datetime.utcnow(),
                                              'event_type': 'compute.instance.create.end'},
                                          {'os_distro': 'com.ubuntu',
                                           'message_id': '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
                                           'service': 'publisher-302689',
                                           'instance_type': '512MB Standard Instance',
                                           'tenant_id': '222333',
                                           'instance_flavor_id': '2',
                                           'hostname': 'server-462185',
                                           'host': 'publisher-302689',
                                           'instance_flavor': '512MB Standard Instance',
                                           'instance_id': '772b2f73-3b0f-4057-b377-b65131e8532e',
                                           'os_version': '12.04',
                                           'state': 'active',
                                           'os_architecture': 'x64',
                                           'timestamp': datetime.utcnow(),
                                              'request_id': 'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
                                              'launched_at': datetime.utcnow(),
                                              'event_type': 'compute.instance.exists'}]

    def __init__(self, winchester_config):
        self.winchester_config = winchester_config

        self.config = ConfigManager.load_config_file(winchester_config)
        self.trigger_manager = TriggerManager(self.config)
        self.pipe = PipelineManager(self.config)

    def _add_unique_event(self, e):
        ''' make the static test data contain unique message id's '''
        e['message_id'] = uuid.uuid4()
        self.trigger_manager.add_event(e)

    def add_test_stream_definitions(self):
        self.trigger_manager.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant406904_filter)
        self.trigger_manager.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant123456_filter)

        self.pipe.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant406904_filter)
        self.pipe.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant123456_filter)

    def add_distilled_events_to_fire(self):
        for e in TriggerTest.distilled_events_fc1_tenant_406904:
            self._add_unique_event(e)
        for e in TriggerTest.distilled_events_fc1_tenant_123456:
            self._add_unique_event(e)

    def add_distilled_events_to_expire(self):
        for e in TriggerTest.distilled_events_fc1_tenant_406904_missing_end:
            self._add_unique_event(e)

    def add_distilled_events_with_no_match(self):
        for e in TriggerTest.distilled_events_fc2_tenant_222333:
            self._add_unique_event(e)

    def check_for_expired_streams(self):
        stream_count = self.pipe.process_ready_streams(self.pipe.pipeline_worker_batch_size,
                                                       expire=True)
        return stream_count

    def check_for_fired_streams(self):
        stream_count = self.pipe.process_ready_streams(
            self.pipe.pipeline_worker_batch_size)
        return stream_count

    def test_no_match(self):
        self.add_distilled_events_with_no_match()
        time.sleep(2)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()

        if (fired_count == 0 and expired_count == 0):
            print ("test_no_match: Success")
        else:
            print ("test_no_match: Failed")

    def test_fired(self):
        self.add_distilled_events_to_fire()
        time.sleep(3)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()
        if (expired_count == 0 and fired_count == 2):
            print ("test_fired: Success")
        else:
            print ("test_fired: Failed")

    def test_expired(self):
        self.add_distilled_events_to_expire()
        time.sleep(11)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()
        if (expired_count == 1 and fired_count == 0):
            print ("test_expired: Success")
        else:
            print ("test_expired: Failed")
    def __init__(self, winchester_config):
        self.winchester_config = winchester_config

        self.config = ConfigManager.load_config_file(winchester_config)
        self.trigger_manager = TriggerManager(self.config)
        self.pipe = PipelineManager(self.config)
class PipelineProcessor(EventProcessorBase):

    """Pipeline Processor

        PipelineProcessor uses the stacktach PipelineManager to
        load pipeline handlers, and process ready and expired streams.
        The PipelineManager contains a TriggerManager so that
        handlers can optionally add more events to the TriggerManager
        filtered stream. The TriggerManager within the PipelineManager
        is initialized with stream definitions dynamically.
    """

    def __init__(self, conf):
        super(PipelineProcessor, self).__init__(conf)
        self._winchester_config = conf.winchester.winchester_config
        self._config_mgr = ConfigManager.load_config_file(
            self._winchester_config)
        self._group = conf.kafka.stream_def_pipe_group
        self._pm_lock = threading.Lock()
        self._pipe = PipelineManager(self._config_mgr)

    def run(self):

        # read stream-definitions from DB at startup and add
        stream_defs = self.stream_defs_from_database()
        if len(stream_defs) > 0:
            log.debug(
                'Loading {} stream definitions from the DB at startup'.format(
                    len(stream_defs)))
            self._pipe.add_trigger_definition(stream_defs)

        # start threads
        self.stream_def_thread = threading.Thread(
            name='stream_defs_pipe',
            target=self.stream_definition_consumer,
            args=(self.conf, self._pm_lock, self._group, self._pipe,))

        self.pipeline_ready_thread = threading.Thread(
            name='pipeline',
            target=self.pipeline_ready_processor,
            args=(self._pm_lock, self._pipe,))

        log.debug('Starting stream_defs_pipe and pipeline threads')
        self.stream_def_thread.start()
        self.pipeline_ready_thread.start()

        self.stream_def_thread.join()
        self.pipeline_ready_thread.join()
        log.debug('Exiting')

    def pipeline_ready_processor(self, lock, pipe):
        statsd = monascastatsd.Client(name='monasca',
                                      dimensions=self.dimensions)
        fired_streams = statsd.get_counter('fired_streams')
        expired_streams = statsd.get_counter('expired_streams')

        while True:

            lock.acquire()
            try:
                fire_ct = pipe.process_ready_streams(
                    pipe.pipeline_worker_batch_size)
                expire_ct = pipe.process_ready_streams(
                    pipe.pipeline_worker_batch_size,
                    expire=True)
                if fire_ct > 0:
                    fired_streams.increment()
                if expire_ct > 0:
                    expired_streams.increment()
            except Exception as e:
                log.exception(e)
            finally:
                lock.release()

            if (pipe.current_time() -
                    pipe.last_status).seconds > pipe.statistics_period:
                pipe._log_statistics()

            if not fire_ct and not expire_ct:
                log.debug("No streams to fire or expire. Sleeping...")
                time.sleep(pipe.pipeline_worker_delay)
            else:
                log.debug(
                    "Fired {} streams, Expired {} streams".format(
                        fire_ct,
                        expire_ct))
Example #10
0
    def __init__(self, winchester_config):
        self.winchester_config = winchester_config

        self.config = ConfigManager.load_config_file(winchester_config)
        self.trigger_manager = TriggerManager(self.config)
        self.pipe = PipelineManager(self.config)
Example #11
0
class TriggerTest():
    """  Trigger Test

    Adds Stream Definitions to the TriggerManager and PipelineManager classes.  Adds Fake
    distilled events to the TriggerManager and ensures the Fire and expire handlers will get called.
    This test uses the winchester mysql DB.
    """
    """ test data """

    trig_def_fc1 = [{
        'distinguished_by': ['instance_id'],
        'fire_criteria': [{
            'event_type': 'compute.instance.create.start'
        }, {
            'event_type': 'compute.instance.create.end'
        }],
        'match_criteria': [{
            'event_type': 'compute.instance.create.*'
        }],
        'name':
        'fc1_trigger',
        'debug_level':
        2,
        'expiration':
        '$last + 1h',
        'fire_pipeline':
        'test_pipeline',
        'expire_pipeline':
        'test_expire_pipeline'
    }]

    trig_def_fc1_tenant406904_filter = [{
        'distinguished_by': ['instance_id'],
        'fire_criteria': [{
            'event_type': 'compute.instance.create.start'
        }, {
            'event_type': 'compute.instance.create.end'
        }],
        'match_criteria': [{
            'traits': {
                'tenant_id': '406904'
            },
            'event_type': 'compute.instance.create.*'
        }],
        'name':
        'trig_def_fc1_406904',
        'debug_level':
        2,
        'expiration':
        '$first + 10s',
        'fire_pipeline':
        'test_pipeline',
        'expire_pipeline':
        'test_expire_pipeline'
    }]

    trig_def_fc1_tenant123456_filter = [{
        'distinguished_by': ['instance_id'],
        'fire_criteria': [{
            'event_type': 'compute.instance.create.start'
        }, {
            'event_type': 'compute.instance.create.end'
        }],
        'match_criteria': [{
            'traits': {
                'tenant_id': '123456'
            },
            'event_type': 'compute.instance.create.*'
        }],
        'name':
        'fc1_trigger_123456',
        'debug_level':
        2,
        'expiration':
        '$last + 24h',
        'fire_pipeline':
        'test_pipeline',
        'expire_pipeline':
        'test_expire_pipeline'
    }]
    """ test adding events to cause fire criteria """

    distilled_events_fc1_tenant_406904 = [{
        'os_distro':
        'com.ubuntu',
        'event_type':
        'compute.instance.create.start',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '406904',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '111-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'building',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'message_id':
        '19701f6c-f51f-4ecb-85fb-7db40277627d'
    }, {
        'os_distro':
        'com.ubuntu',
        'message_id':
        '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '406904',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '111-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'active',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'launched_at':
        datetime.utcnow(),
        'event_type':
        'compute.instance.create.end'
    }]

    distilled_events_fc1_tenant_406904_missing_end = [{
        'os_distro':
        'com.ubuntu',
        'event_type':
        'compute.instance.create.start',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '406904',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '333-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'building',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'message_id':
        '19701f6c-f51f-4ecb-85fb-7db40277627d'
    }]

    distilled_events_fc1_tenant_123456 = [{
        'os_distro':
        'com.ubuntu',
        'event_type':
        'compute.instance.create.start',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '123456',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '456-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'building',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'message_id':
        '19701f6c-f51f-4ecb-85fb-7db40277627d'
    }, {
        'os_distro':
        'com.ubuntu',
        'message_id':
        '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '123456',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '456-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'active',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'launched_at':
        datetime.utcnow(),
        'event_type':
        'compute.instance.create.end'
    }]
    """ trigger defs for fire criteria 2 - looking for exists"""
    trig_def_fc2_rackspace_billing = [{
        'distinguished_by': ['instance_id', {
            'timestamp': 'day'
        }],
        'fire_criteria': [{
            'event_type': 'compute.instance.exists'
        }],
        'match_criteria': [{
            'event_type': ['compute.instance.*', '!compute.instance.exists']
        }, {
            'event_type': 'compute.instance.exists',
            'map_distingushed_by': {
                'timestamp': 'audit_period_beginning'
            }
        }],
        'name':
        'rackspace_billing',
        'debug_level':
        2,
        'expiration':
        '$last + 1h',
        'fire_pipeline':
        'test_pipeline',
        'expire_pipeline':
        'test_expire_pipeline'
    }]

    trig_def_fc3_rackspace = [{
        'distinguished_by': ['instance_id', {
            'timestamp': 'day'
        }],
        'fire_criteria': [{
            'traits': {
                'audit_period_ending': {
                    'datetime': '$audit_period_beginning + 1d'
                }
            },
            'event_type': 'compute.instance.exists'
        }],
        'match_criteria': [{
            'event_type': [
                'compute.instance.*', 'snapshot_instance', 'keypair.import.*',
                'rebuild_instance', 'compute.instance.*',
                '!compute.instance.exists', '!compute.instance.exists.failed',
                '!compute.instance.exists.warnings',
                '!compute.instance.exists.verified'
            ]
        }, {
            'event_type': 'compute.instance.exists',
            'map_distinguished_by': {
                'timestamp': 'audit_period_beginning'
            }
        }],
        'name':
        'rackspace_test_trigger',
        'debug_level':
        2,
        'expiration':
        '$last + 2d',
        'fire_pipeline':
        'test_fire_pipeline',
        'expire_pipeline':
        'test_expire_pipeline'
    }]

    distilled_events_fc2_tenant_222333 = [{
        'os_distro':
        'com.ubuntu',
        'event_type':
        'compute.instance.create.start',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '222333',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '772b2f73-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'building',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'message_id':
        '19701f6c-f51f-4ecb-85fb-7db40277627d'
    }, {
        'os_distro':
        'com.ubuntu',
        'message_id':
        '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '222333',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '772b2f73-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'active',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'launched_at':
        datetime.utcnow(),
        'event_type':
        'compute.instance.create.end'
    }, {
        'os_distro':
        'com.ubuntu',
        'message_id':
        '2ae21707-70ae-48a2-89c0-b08b11dc0b1a',
        'service':
        'publisher-302689',
        'instance_type':
        '512MB Standard Instance',
        'tenant_id':
        '222333',
        'instance_flavor_id':
        '2',
        'hostname':
        'server-462185',
        'host':
        'publisher-302689',
        'instance_flavor':
        '512MB Standard Instance',
        'instance_id':
        '772b2f73-3b0f-4057-b377-b65131e8532e',
        'os_version':
        '12.04',
        'state':
        'active',
        'os_architecture':
        'x64',
        'timestamp':
        datetime.utcnow(),
        'request_id':
        'req-d096b6de-f451-4d00-bff0-646a8c8a23c3',
        'launched_at':
        datetime.utcnow(),
        'event_type':
        'compute.instance.exists'
    }]

    def __init__(self, winchester_config):
        self.winchester_config = winchester_config

        self.config = ConfigManager.load_config_file(winchester_config)
        self.trigger_manager = TriggerManager(self.config)
        self.pipe = PipelineManager(self.config)

    def _add_unique_event(self, e):
        ''' make the static test data contain unique message id's '''
        e['message_id'] = uuid.uuid4()
        self.trigger_manager.add_event(e)

    def add_test_stream_definitions(self):
        self.trigger_manager.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant406904_filter)
        self.trigger_manager.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant123456_filter)

        self.pipe.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant406904_filter)
        self.pipe.add_trigger_definition(
            TriggerTest.trig_def_fc1_tenant123456_filter)

    def add_distilled_events_to_fire(self):
        for e in TriggerTest.distilled_events_fc1_tenant_406904:
            self._add_unique_event(e)
        for e in TriggerTest.distilled_events_fc1_tenant_123456:
            self._add_unique_event(e)

    def add_distilled_events_to_expire(self):
        for e in TriggerTest.distilled_events_fc1_tenant_406904_missing_end:
            self._add_unique_event(e)

    def add_distilled_events_with_no_match(self):
        for e in TriggerTest.distilled_events_fc2_tenant_222333:
            self._add_unique_event(e)

    def check_for_expired_streams(self):
        stream_count = self.pipe.process_ready_streams(
            self.pipe.pipeline_worker_batch_size, expire=True)
        return stream_count

    def check_for_fired_streams(self):
        stream_count = self.pipe.process_ready_streams(
            self.pipe.pipeline_worker_batch_size)
        return stream_count

    def test_no_match(self):
        self.add_distilled_events_with_no_match()
        time.sleep(2)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()

        if (fired_count == 0 and expired_count == 0):
            print("test_no_match: Success")
        else:
            print("test_no_match: Failed")

    def test_fired(self):
        self.add_distilled_events_to_fire()
        time.sleep(3)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()
        if (expired_count == 0 and fired_count == 2):
            print("test_fired: Success")
        else:
            print("test_fired: Failed")

    def test_expired(self):
        self.add_distilled_events_to_expire()
        time.sleep(11)
        fired_count = self.check_for_fired_streams()
        expired_count = self.check_for_expired_streams()
        if (expired_count == 1 and fired_count == 0):
            print("test_expired: Success")
        else:
            print("test_expired: Failed")