Ejemplo n.º 1
0
def main():
    config = load_config()
    metrics.init(config, 'iris-process-retention', stats_reset)

    retention_settings = config.get('retention', {})
    if not retention_settings.get('enabled'):
        logger.info('Retention not enabled, bailing')
        return

    engine = create_engine(config['db']['conn']['str'] % config['db']['conn']['kwargs'],
                           **config['db']['kwargs'])

    max_days = int(retention_settings['max_days'])
    if max_days < 1:
        logger.error('Max days needs to at least be 1')
        return

    cooldown_time = int(retention_settings['cooldown_time'])
    batch_size = int(retention_settings['batch_size'])
    run_interval = int(retention_settings['run_interval'])
    archive_path = retention_settings['archive_path']

    spawn(metrics.emit_forever)

    while True:
        logger.info('Starting retention loop (kill messages+incidents older than %d days)', max_days)
        try:
            process_retention(engine, max_days=max_days, cooldown_time=cooldown_time, batch_size=batch_size, archive_path=archive_path)
        except Exception:
            logger.exception('Hit problem while running retention')
        logger.info('Waiting %d seconds until next iteration..', run_interval)
        sleep(run_interval)
Ejemplo n.º 2
0
def main():
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_nap_time = 3600

    try:
        nap_time = int(config.get('sync_script_nap_time', default_nap_time))
    except ValueError:
        nap_time = default_nap_time

    engine = create_engine(
        config['db']['conn']['str'] % config['db']['conn']['kwargs'],
        **config['db']['kwargs'])

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics_task = spawn(metrics.emit_forever)

    while True:
        if not bool(metrics_task):
            logger.error('metrics task failed, %s', metrics_task.exception)
            metrics_task = spawn(metrics.emit_forever)

        sync(config, engine)
        logger.info('Sleeping for %d seconds' % nap_time)
        sleep(nap_time)
Ejemplo n.º 3
0
def main():
    global ldap_timeout
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_ldap_timeout = 20
    default_nap_time = 3600

    ldap_timeout = int(
        config.get('sync_script_ldap_timeout', default_ldap_timeout))
    try:
        nap_time = int(config.get('sync_script_nap_time', default_nap_time))
    except ValueError:
        nap_time = default_nap_time

    engine = create_engine(
        config['db']['conn']['str'] % config['db']['conn']['kwargs'],
        **config['db']['kwargs'])

    # Optionally, maintain an internal list of mailing lists from ldap that can also be
    # used as targets.
    ldap_lists = config.get('ldap_lists')

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics.set('ldap_lists_found', 0)
    metrics.set('ldap_memberships_found', 0)

    metrics_task = spawn(metrics.emit_forever)

    while True:
        if not bool(metrics_task):
            logger.error('metrics task failed, %s', metrics_task.exception)
            metrics_task = spawn(metrics.emit_forever)

        sync_from_oncall(config, engine)

        # Do ldap mailing list sync *after* we do the normal sync, to ensure we have the users
        # which will be in ldap already populated.
        if ldap_lists:

            if 'ldap_cert_path' in ldap_lists:
                ldap_cert_path = ldap_lists['ldap_cert_path']
                if not os.access(ldap_cert_path, os.R_OK):
                    logger.error("Failed to read ldap_cert_path certificate")
                    raise IOError
                else:
                    ldap_lists['cert_path'] = ldap_cert_path
            list_run_start = time.time()
            sync_ldap_lists(ldap_lists, engine)
            logger.info('Ldap mailing list sync took %.2f seconds',
                        time.time() - list_run_start)

        logger.info('Sleeping for %d seconds' % nap_time)
        sleep(nap_time)
Ejemplo n.º 4
0
def main():
    config = load_config()
    metrics.init(config, 'iris-application-stats', stats_reset)
    app_stats_settings = config.get('app-stats', {})
    run_interval = int(app_stats_settings['run_interval'])
    spawn(metrics.emit_forever)

    db.init(config)
    while True:
        logger.info('Starting app stats calculation loop')
        stats_task()
        logger.info('Waiting %d seconds until next iteration..', run_interval)
        sleep(run_interval)
Ejemplo n.º 5
0
def main():
    signal.signal(signal.SIGINT, sigint_handler)

    from iris.api import get_api, load_config

    config = load_config()
    app = get_api(config)

    server = config['server']
    print 'LISTENING: %(host)s:%(port)d' % server
    server = WSGIServer((server['host'], server['port']), app)

    if HAS_INOTIFY:
        fd = inotify.init()

        for dirname, subfolders, _ in os.walk('.'):
            if '.git' in subfolders:
                subfolders.remove('.git')
            inotify.add_watch(fd, dirname, inotify.IN_MODIFY)

        gevent.spawn(event_producer, fd, server)
    else:
        print 'Missing inotify, disable watch support.'
    server.serve_forever()
Ejemplo n.º 6
0
Archivo: sender.py Proyecto: shoxv/iris
def main():

    global config
    config = load_config()

    is_master = config['sender'].get('is_master', False)
    logger.info('[-] bootstraping sender (master: %s)...', is_master)
    init_sender(config)
    init_plugins(config.get('plugins', {}))
    init_vendors(config.get('vendors', []), config.get('applications', []))

    send_task = spawn(send)
    worker_tasks = [spawn(worker) for x in xrange(100)]
    if is_master:
        if should_mock_gwatch_renewer:
            spawn(mock_gwatch_renewer)
        else:
            spawn(gwatch_renewer)
        spawn(prune_old_audit_logs_worker)

    rpc.init(config['sender'], dict(send_message=send_message))
    rpc.run(config['sender'])

    interval = 60
    logger.info('[*] sender bootstrapped')
    while True:
        runtime = int(time.time())
        logger.info('--> sender looop started.')

        cache.refresh()
        cache.purge()

        if is_master:
            try:
                escalate()
                deactivate()
                poll()
                aggregate(runtime)
            except Exception:
                metrics.incr('task_failure')
                logger.exception("Exception occured in main loop.")

        # check status for all background greenlets and respawn if necessary
        if not bool(send_task):
            logger.error("send task failed, %s", send_task.exception)
            metrics.incr('task_failure')
            send_task = spawn(send)
        bad_workers = []
        for i, task in enumerate(worker_tasks):
            if not bool(task):
                logger.error("worker task failed, %s", task.exception)
                metrics.incr('task_failure')
                bad_workers.append(i)
        for i in bad_workers:
            worker_tasks[i] = spawn(worker)

        spawn(metrics.emit)

        now = time.time()
        elapsed_time = now - runtime
        nap_time = max(0, interval - elapsed_time)
        logger.info(
            '--> sender loop finished in %s seconds - sleeping %s seconds',
            elapsed_time, nap_time)
        sleep(nap_time)
Ejemplo n.º 7
0
def main():
    global config
    config = load_config()

    start_time = time.time()

    logger.info('[-] bootstraping sender...')
    init_sender(config)
    init_plugins(config.get('plugins', {}))
    init_vendors(config.get('vendors', []), config.get('applications', []))

    send_task = spawn(send)
    worker_tasks = [spawn(worker) for x in xrange(100)]

    rpc.init(config['sender'], dict(send_message=send_message))
    rpc.run(config['sender'])

    spawn(coordinator.update_forever)

    gwatch_renewer_task = None
    prune_audit_logs_task = None

    interval = 60
    logger.info('[*] sender bootstrapped')
    while True:
        runtime = int(time.time())
        logger.info('--> sender looop started.')

        cache.refresh()
        cache.purge()

        # If we're currently a master, ensure our master-greenlets are running
        # and we're doing the master duties
        if coordinator.am_i_master():
            if not bool(gwatch_renewer_task):
                if should_mock_gwatch_renewer:
                    gwatch_renewer_task = spawn(mock_gwatch_renewer)
                else:
                    gwatch_renewer_task = spawn(gwatch_renewer)

            if not bool(prune_audit_logs_task):
                prune_audit_logs_task = spawn(prune_old_audit_logs_worker)

            try:
                escalate()
                deactivate()
                poll()
                aggregate(runtime)
            except Exception:
                metrics.incr('task_failure')
                logger.exception("Exception occured in main loop.")

        # If we're not master, don't do the master tasks and make sure those other
        # greenlets are stopped if they're running
        else:
            logger.info('I am not the master so I am not doing master sender tasks.')

            # Stop these task greenlets if they're running. Technically this should
            # never happen because if we're the master, we'll likely only stop being the
            # master if our process exits, which would kill these greenlets anyway.
            if bool(gwatch_renewer_task):
                logger.info('I am not master anymore so stopping the gwatch renewer')
                gwatch_renewer_task.kill()

            if bool(prune_audit_logs_task):
                logger.info('I am not master anymore so stopping the audit logs worker')
                prune_audit_logs_task.kill()

        # check status for all background greenlets and respawn if necessary
        if not bool(send_task):
            logger.error("send task failed, %s", send_task.exception)
            metrics.incr('task_failure')
            send_task = spawn(send)
        bad_workers = []
        for i, task in enumerate(worker_tasks):
            if not bool(task):
                logger.error("worker task failed, %s", task.exception)
                metrics.incr('task_failure')
                bad_workers.append(i)
        for i in bad_workers:
            worker_tasks[i] = spawn(worker)

        now = time.time()
        metrics.set('sender_uptime', int(now - start_time))

        spawn(metrics.emit)

        elapsed_time = now - runtime
        nap_time = max(0, interval - elapsed_time)
        logger.info('--> sender loop finished in %s seconds - sleeping %s seconds',
                    elapsed_time, nap_time)
        sleep(nap_time)
Ejemplo n.º 8
0
def main():
    global ldap_timeout
    global ldap_pagination_size
    global update_sleep
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_ldap_timeout = 60
    default_ldap_pagination_size = 400
    default_update_sleep = 0
    default_ldap_nap_time = 3600
    default_oncall_nap_time = 60

    ldap_timeout = int(config.get('sync_script_ldap_timeout', default_ldap_timeout))
    ldap_pagination_size = int(config.get('sync_script_ldap_pagination_size', default_ldap_pagination_size))
    update_sleep = float(config.get('target_update_pause', default_update_sleep))
    try:
        ldap_nap_time = int(config.get('sync_script_ldap_nap_time', default_ldap_nap_time))
        oncall_nap_time = int(config.get('sync_script_oncall_nap_time', default_oncall_nap_time))
    except ValueError:
        ldap_nap_time = default_ldap_nap_time
        oncall_nap_time = default_oncall_nap_time

    # check if we are using special connection settings for this script
    if config.get('db_target_sync'):
        engine = create_engine(config['db_target_sync']['conn']['str'] % config['db_target_sync']['conn']['kwargs'],
                               **config['db_target_sync']['kwargs'])
    else:
        engine = create_engine(config['db']['conn']['str'] % config['db']['conn']['kwargs'],
                               **config['db']['kwargs'])

    # Optionally, maintain an internal list of mailing lists from ldap that can also be
    # used as targets.
    ldap_lists = config.get('ldap_lists')

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics.set('ldap_lists_found', 0)
    metrics.set('ldap_memberships_found', 0)

    metrics_task = spawn(metrics.emit_forever)
    oncall_task = spawn(oncall_sync_loop, config, engine, oncall_nap_time)

    if ldap_lists:
        if 'ldap_cert_path' in ldap_lists:
            ldap_cert_path = ldap_lists['ldap_cert_path']
            if not os.access(ldap_cert_path, os.R_OK):
                logger.error("Failed to read ldap_cert_path certificate")
                raise IOError
            else:
                ldap_lists['cert_path'] = ldap_cert_path
        ldap_task = spawn(ldap_sync_loop, ldap_lists, engine, ldap_nap_time)

    while True:
        if not bool(metrics_task):
            metrics.incr('failed_tasks')
            logger.error('metrics task failed, %s', metrics_task.exception)
            spawn(metrics.emit_forever)

        if not bool(oncall_task):
            metrics.incr('failed_tasks')
            logger.error('oncall task failed, %s', oncall_task.exception)
            metrics_task = spawn(oncall_sync_loop, config, engine, oncall_nap_time)

        if ldap_lists:
            if not bool(ldap_task):
                metrics.incr('failed_tasks')
                logger.error('ldap task failed, %s', ldap_task.exception)
                ldap_task = spawn(ldap_sync_loop, ldap_lists, engine, ldap_nap_time)

        sleep(10)