Пример #1
0
def main():
    config = load_config()
    metrics.init(config, 'iris-process-retention', stats_reset)

    retention_settings = config.get('retention', {})
    if not retention_settings.get('enabled'):
        logger.info('Retention not enabled, bailing')
        return

    engine = create_engine(config['db']['conn']['str'] % config['db']['conn']['kwargs'],
                           **config['db']['kwargs'])

    max_days = int(retention_settings['max_days'])
    if max_days < 1:
        logger.error('Max days needs to at least be 1')
        return

    cooldown_time = int(retention_settings['cooldown_time'])
    batch_size = int(retention_settings['batch_size'])
    run_interval = int(retention_settings['run_interval'])
    archive_path = retention_settings['archive_path']

    spawn(metrics.emit_forever)

    while True:
        logger.info('Starting retention loop (kill messages+incidents older than %d days)', max_days)
        try:
            process_retention(engine, max_days=max_days, cooldown_time=cooldown_time, batch_size=batch_size, archive_path=archive_path)
        except Exception:
            logger.exception('Hit problem while running retention')
        logger.info('Waiting %d seconds until next iteration..', run_interval)
        sleep(run_interval)
Пример #2
0
def init_sender(config):
    db.init(config)
    cache.init(config)
    metrics.init(config, 'iris-sender', default_sender_metrics)
    api_cache.cache_priorities()
    api_cache.cache_applications()
    api_cache.cache_modes()

    global should_mock_gwatch_renewer, send_message
    if config['sender'].get('debug'):
        logger.info('DEBUG MODE')
        should_mock_gwatch_renewer = True
        should_skip_send = True
    else:
        should_skip_send = False
    should_mock_gwatch_renewer = should_mock_gwatch_renewer or config.get(
        'skipgmailwatch', False)
    should_skip_send = should_skip_send or config.get('skipsend', False)

    if should_skip_send:
        config['vendors'] = [{
            'type': 'iris_dummy',
            'name': 'iris dummy vendor'
        }]

    global quota
    quota = ApplicationQuota(db, cache.targets_for_role,
                             config['sender'].get('sender_app'))
Пример #3
0
def main():
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_nap_time = 3600

    try:
        nap_time = int(config.get('sync_script_nap_time', default_nap_time))
    except ValueError:
        nap_time = default_nap_time

    engine = create_engine(
        config['db']['conn']['str'] % config['db']['conn']['kwargs'],
        **config['db']['kwargs'])

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics_task = spawn(metrics.emit_forever)

    while True:
        if not bool(metrics_task):
            logger.error('metrics task failed, %s', metrics_task.exception)
            metrics_task = spawn(metrics.emit_forever)

        sync(config, engine)
        logger.info('Sleeping for %d seconds' % nap_time)
        sleep(nap_time)
Пример #4
0
def main():
    global ldap_timeout
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_ldap_timeout = 20
    default_nap_time = 3600

    ldap_timeout = int(
        config.get('sync_script_ldap_timeout', default_ldap_timeout))
    try:
        nap_time = int(config.get('sync_script_nap_time', default_nap_time))
    except ValueError:
        nap_time = default_nap_time

    engine = create_engine(
        config['db']['conn']['str'] % config['db']['conn']['kwargs'],
        **config['db']['kwargs'])

    # Optionally, maintain an internal list of mailing lists from ldap that can also be
    # used as targets.
    ldap_lists = config.get('ldap_lists')

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics.set('ldap_lists_found', 0)
    metrics.set('ldap_memberships_found', 0)

    metrics_task = spawn(metrics.emit_forever)

    while True:
        if not bool(metrics_task):
            logger.error('metrics task failed, %s', metrics_task.exception)
            metrics_task = spawn(metrics.emit_forever)

        sync_from_oncall(config, engine)

        # Do ldap mailing list sync *after* we do the normal sync, to ensure we have the users
        # which will be in ldap already populated.
        if ldap_lists:

            if 'ldap_cert_path' in ldap_lists:
                ldap_cert_path = ldap_lists['ldap_cert_path']
                if not os.access(ldap_cert_path, os.R_OK):
                    logger.error("Failed to read ldap_cert_path certificate")
                    raise IOError
                else:
                    ldap_lists['cert_path'] = ldap_cert_path
            list_run_start = time.time()
            sync_ldap_lists(ldap_lists, engine)
            logger.info('Ldap mailing list sync took %.2f seconds',
                        time.time() - list_run_start)

        logger.info('Sleeping for %d seconds' % nap_time)
        sleep(nap_time)
Пример #5
0
def main():
    boot_time = time.time()
    config = load_config()

    metrics.init(config, 'iris-owa-sync', default_metrics)

    owaconfig = config.get('owa')

    if not owaconfig:
        logger.critical('Missing OWA configs')
        sys.exit(1)

    api_host = owaconfig.get('api_host', 'http://localhost:16649')
    iris_client = IrisClient(api_host, 0, owaconfig['iris_app'],
                             owaconfig['iris_app_key'])

    proxies = owaconfig.get('proxies')

    # only way to configure a proxy is to monkey-patch (http adapter) a monkey-patch (baseprotocol) :/
    if proxies:
        UseProxyHttpAdapter._my_proxies = proxies
        exchangelib.protocol.BaseProtocol.HTTP_ADAPTER_CLS = UseProxyHttpAdapter

    creds = exchangelib.Credentials(**owaconfig['credentials'])

    try:
        nap_time = int(owaconfig.get('sleep_interval', 60))
    except ValueError:
        nap_time = 60

    while True:
        start_time = time.time()
        message_count = 0

        try:
            config = exchangelib.Configuration(credentials=creds,
                                               **owaconfig['config'])
            account = exchangelib.Account(config=config,
                                          access_type=exchangelib.DELEGATE,
                                          **owaconfig['account'])
        except (exchangelib.errors.EWSError,
                requests.exceptions.RequestException):
            logger.exception('Failed authenticating to OWA365')
            metrics.incr('owa_api_failure_count')
        else:
            logger.info('Receiving mail on behalf of %s',
                        owaconfig['account'].get('primary_smtp_address'))
            message_count = poll(account, iris_client)

        now = time.time()
        run_time = now - start_time
        logger.info(
            'Last run took %2.f seconds and processed %s messages. Waiting %s seconds until next poll..',
            run_time, message_count, nap_time)
        metrics.set('uptime', now - boot_time)
        metrics.emit()
        sleep(nap_time)
Пример #6
0
def main():
    config = load_config()
    metrics.init(config, 'iris-application-stats', stats_reset)
    app_stats_settings = config.get('app-stats', {})
    run_interval = int(app_stats_settings['run_interval'])
    spawn(metrics.emit_forever)

    db.init(config)
    while True:
        logger.info('Starting app stats calculation loop')
        stats_task()
        logger.info('Waiting %d seconds until next iteration..', run_interval)
        sleep(run_interval)
Пример #7
0
def init_sender(config):
    gevent.signal(signal.SIGINT, sender_shutdown)
    gevent.signal(signal.SIGTERM, sender_shutdown)
    gevent.signal(signal.SIGQUIT, sender_shutdown)

    api_host = config['sender'].get('api_host', 'http://localhost:16649')
    db.init(config)
    cache.init(api_host, config)
    metrics.init(config, 'iris-sender', default_sender_metrics)
    api_cache.cache_priorities()
    api_cache.cache_applications()
    api_cache.cache_modes()

    global should_mock_gwatch_renewer, send_message
    if config['sender'].get('debug'):
        logger.info('DEBUG MODE')
        should_mock_gwatch_renewer = True
        should_skip_send = True
    else:
        should_skip_send = False
    should_mock_gwatch_renewer = should_mock_gwatch_renewer or config.get(
        'skipgmailwatch', False)
    should_skip_send = should_skip_send or config.get('skipsend', False)

    if should_skip_send:
        config['vendors'] = [{
            'type': 'iris_dummy',
            'name': 'iris dummy vendor'
        }]

    global quota
    quota = ApplicationQuota(db, cache.targets_for_role,
                             config['sender'].get('sender_app'))

    global coordinator
    zk_hosts = config['sender'].get('zookeeper_cluster', False)

    if zk_hosts:
        logger.info('Initializing coordinator with ZK: %s', zk_hosts)
        coordinator = Coordinator(zk_hosts=zk_hosts,
                                  hostname=socket.gethostname(),
                                  port=config['sender'].get('port', 2321),
                                  join_cluster=True)
    else:
        logger.info(
            'ZK cluster info not specified. Using master status from config')
        coordinator = NonClusterCoordinator(
            is_master=config['sender'].get('is_master', True),
            slaves=config['sender'].get('slaves', []))
Пример #8
0
def main():
    config = load_config()

    metrics.init(config, 'iris-owa-sync', default_metrics)

    owaconfig = config.get('owa')

    if not owaconfig:
        logger.critical('Missing OWA configs')
        sys.exit(1)

    api_host = owaconfig.get('api_host', 'http://localhost:16649')
    iris_client = IrisClient(api_host, 0, owaconfig['iris_app'],
                             owaconfig['iris_app_key'])

    spawn(metrics.emit_forever)

    creds = Credentials(**owaconfig['credentials'])

    account = Account(primary_smtp_address=owaconfig['smtp_address'],
                      credentials=creds,
                      autodiscover=True,
                      access_type=DELEGATE)
    logger.info('Receiving mail on behalf of %s', owaconfig['smtp_address'])

    try:
        nap_time = int(owaconfig.get('sleep_interval', 60))
    except ValueError:
        nap_time = 60

    while True:
        start_time = time.time()
        message_count = poll(account, iris_client)
        run_time = time.time() - start_time
        logger.info(
            'Last run took %2.f seconds and processed %s messages. Waiting %s seconds until next poll..',
            run_time, message_count, nap_time)
        sleep(nap_time)
Пример #9
0
def main():
    global ldap_timeout
    global ldap_pagination_size
    global update_sleep
    config = load_config()
    metrics.init(config, 'iris-sync-targets', stats_reset)

    default_ldap_timeout = 60
    default_ldap_pagination_size = 400
    default_update_sleep = 0
    default_ldap_nap_time = 3600
    default_oncall_nap_time = 60

    ldap_timeout = int(config.get('sync_script_ldap_timeout', default_ldap_timeout))
    ldap_pagination_size = int(config.get('sync_script_ldap_pagination_size', default_ldap_pagination_size))
    update_sleep = float(config.get('target_update_pause', default_update_sleep))
    try:
        ldap_nap_time = int(config.get('sync_script_ldap_nap_time', default_ldap_nap_time))
        oncall_nap_time = int(config.get('sync_script_oncall_nap_time', default_oncall_nap_time))
    except ValueError:
        ldap_nap_time = default_ldap_nap_time
        oncall_nap_time = default_oncall_nap_time

    # check if we are using special connection settings for this script
    if config.get('db_target_sync'):
        engine = create_engine(config['db_target_sync']['conn']['str'] % config['db_target_sync']['conn']['kwargs'],
                               **config['db_target_sync']['kwargs'])
    else:
        engine = create_engine(config['db']['conn']['str'] % config['db']['conn']['kwargs'],
                               **config['db']['kwargs'])

    # Optionally, maintain an internal list of mailing lists from ldap that can also be
    # used as targets.
    ldap_lists = config.get('ldap_lists')

    # Initialize these to zero at the start of the app, and don't reset them at every
    # metrics interval
    metrics.set('users_found', 0)
    metrics.set('teams_found', 0)

    metrics.set('ldap_lists_found', 0)
    metrics.set('ldap_memberships_found', 0)

    metrics_task = spawn(metrics.emit_forever)
    oncall_task = spawn(oncall_sync_loop, config, engine, oncall_nap_time)

    if ldap_lists:
        if 'ldap_cert_path' in ldap_lists:
            ldap_cert_path = ldap_lists['ldap_cert_path']
            if not os.access(ldap_cert_path, os.R_OK):
                logger.error("Failed to read ldap_cert_path certificate")
                raise IOError
            else:
                ldap_lists['cert_path'] = ldap_cert_path
        ldap_task = spawn(ldap_sync_loop, ldap_lists, engine, ldap_nap_time)

    while True:
        if not bool(metrics_task):
            metrics.incr('failed_tasks')
            logger.error('metrics task failed, %s', metrics_task.exception)
            spawn(metrics.emit_forever)

        if not bool(oncall_task):
            metrics.incr('failed_tasks')
            logger.error('oncall task failed, %s', oncall_task.exception)
            metrics_task = spawn(oncall_sync_loop, config, engine, oncall_nap_time)

        if ldap_lists:
            if not bool(ldap_task):
                metrics.incr('failed_tasks')
                logger.error('ldap task failed, %s', ldap_task.exception)
                ldap_task = spawn(ldap_sync_loop, ldap_lists, engine, ldap_nap_time)

        sleep(10)