Example #1
0
        def on_client_ready(_):
            dispatcher = get_full_dispatcher(reactor, authenticator, log,
                                             get_service_configs(config),
                                             kz_client, store, supervisor,
                                             cassandra_cluster)
            # Setup scheduler service after starting
            scheduler = setup_scheduler(parent, dispatcher, store, kz_client)
            health_checker.checks['scheduler'] = scheduler.health_check
            otter.scheduler = scheduler
            # Give dispatcher to Otter REST object
            otter.dispatcher = dispatcher
            # Set the client after starting
            # NOTE: There is small amount of time when the start is
            # not finished and the kz_client is not set in which case
            # policy execution and group delete will fail
            store.kz_client = kz_client
            # Setup kazoo to stop when shutting down
            parent.addService(FunctionalService(
                stop=partial(call_after_supervisor,
                             kz_client.stop, supervisor)))

            setup_converger(
                parent, kz_client, dispatcher,
                config_value('converger.interval') or 10,
                config_value('converger.build_timeout') or 3600,
                config_value('converger.limited_retry_iterations') or 10,
                config_value('converger.step_limits') or {})
Example #2
0
 def test_cloudfeeds_optional(self):
     """
     Does not return cloud feeds service if the config is not there
     """
     del self.config['cloudfeeds']
     self.assertNotIn(ServiceType.CLOUD_FEEDS,
                      get_service_configs(self.config))
Example #3
0
    def test_cloudfeeds_setup(self):
        """
        Cloud feeds observer is setup if it is there in config
        """
        self.addCleanup(set_fanout, None)
        self.assertEqual(get_fanout(), None)

        conf = deepcopy(test_config)
        conf['cloudfeeds'] = {'service': 'cloudFeeds', 'tenant_id': 'tid',
                              'url': 'url'}
        makeService(conf)
        serv_confs = get_service_configs(conf)
        serv_confs[ServiceType.CLOUD_FEEDS] = {'url': 'url'}

        self.assertEqual(len(get_fanout().subobservers), 1)
        cf_observer = get_fanout().subobservers[0]
        self.assertEqual(
            cf_observer,
            CloudFeedsObserver(
                reactor=self.reactor,
                authenticator=matches(IsInstance(CachingAuthenticator)),
                tenant_id='tid',
                region='ord',
                service_configs=serv_confs))

        # single tenant authenticator is created
        authenticator = cf_observer.authenticator
        self.assertIsInstance(
            authenticator._authenticator._authenticator._authenticator,
            SingleTenantAuthenticator)
Example #4
0
 def test_metrics_optional(self):
     """
     Does not return metrics service if the config is not there
     """
     del self.config['metrics']
     self.assertNotIn(ServiceType.CLOUD_METRICS_INGEST,
                      get_service_configs(self.config))
Example #5
0
 def test_metrics_optional(self):
     """
     Does not return metrics service if the config is not there
     """
     del self.config['metrics']
     self.assertNotIn(ServiceType.CLOUD_METRICS_INGEST,
                      get_service_configs(self.config))
Example #6
0
 def test_takes_from_config(self):
     """
     Returns mapping based on info from config
     """
     self.assertEqual(
         get_service_configs(self.config), {
             ServiceType.CLOUD_SERVERS: {
                 'name': 'nova',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_LOAD_BALANCERS: {
                 'name': 'clb',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_ORCHESTRATION: {
                 'name': 'orch',
                 'region': 'DFW',
             },
             ServiceType.RACKCONNECT_V3: {
                 'name': 'rc',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_METRICS_INGEST: {
                 'name': 'm',
                 'region': 'IAD',
             },
             ServiceType.CLOUD_FEEDS: {
                 'name': 'cf',
                 'region': 'DFW',
                 'url': 'url'
             }
         })
Example #7
0
 def test_takes_from_config(self):
     """
     Returns mapping based on info from config
     """
     self.assertEqual(
         get_service_configs(self.config),
         {
             ServiceType.CLOUD_SERVERS: {
                 'name': 'nova',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_LOAD_BALANCERS: {
                 'name': 'clb',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_ORCHESTRATION: {
                 'name': 'orch',
                 'region': 'DFW',
             },
             ServiceType.RACKCONNECT_V3: {
                 'name': 'rc',
                 'region': 'DFW',
             },
             ServiceType.CLOUD_METRICS_INGEST: {
                 'name': 'm',
                 'region': 'IAD',
             },
             ServiceType.CLOUD_FEEDS: {'url': 'cf_url'},
             ServiceType.CLOUD_FEEDS_CAP: {"url": "cap_url"},
         })
Example #8
0
    def test_cloudfeeds_setup(self):
        """
        Cloud feeds observer is setup if it is there in config
        """
        self.addCleanup(set_fanout, None)
        self.assertEqual(get_fanout(), None)

        conf = deepcopy(test_config)
        conf['cloudfeeds'] = {
            'service': 'cloudFeeds',
            'tenant_id': 'tid',
            'url': 'url'
        }
        makeService(conf)
        serv_confs = get_service_configs(conf)
        serv_confs[ServiceType.CLOUD_FEEDS] = {'url': 'url'}

        self.assertEqual(len(get_fanout().subobservers), 1)
        cf_observer = get_fanout().subobservers[0]
        self.assertEqual(
            cf_observer,
            CloudFeedsObserver(reactor=self.reactor,
                               authenticator=matches(
                                   IsInstance(CachingAuthenticator)),
                               tenant_id='tid',
                               region='ord',
                               service_configs=serv_confs))

        # single tenant authenticator is created
        authenticator = cf_observer.authenticator
        self.assertIsInstance(
            authenticator._authenticator._authenticator._authenticator,
            SingleTenantAuthenticator)
Example #9
0
 def test_cloudfeeds_optional(self):
     """
     Does not return cloud feeds service if the config is not there
     """
     del self.config['cloudfeeds']
     self.assertNotIn(ServiceType.CLOUD_FEEDS,
                      get_service_configs(self.config))
Example #10
0
def set_desired_to_actual(groups, reactor, store, cass_client, authenticator,
                          conf):
    dispatcher = get_full_dispatcher(
        reactor, authenticator, mock_log(), get_service_configs(conf),
        "kzclient", store, "supervisor", cass_client)
    return gatherResults(
        map(partial(set_desired_to_actual_group, dispatcher, cass_client),
            groups))
Example #11
0
def collect_metrics(reactor, config, log, client=None, authenticator=None,
                    _print=False):
    """
    Start collecting the metrics

    :param reactor: Twisted reactor
    :param dict config: Configuration got from file containing all info
        needed to collect metrics
    :param :class:`silverberg.client.CQLClient` client:
        Optional cassandra client. A new client will be created
        if this is not given and disconnected before returing
    :param :class:`otter.auth.IAuthenticator` authenticator:
        Optional authenticator. A new authenticator will be created
        if this is not given
    :param bool _print: Should debug messages be printed to stdout?

    :return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
    """
    _client = client or connect_cass_servers(reactor, config['cassandra'])
    authenticator = authenticator or generate_authenticator(reactor,
                                                            config['identity'])
    store = CassScalingGroupCollection(_client, reactor, 1000)
    dispatcher = get_dispatcher(reactor, authenticator, log,
                                get_service_configs(config), store)

    # calculate metrics on launch_server and non-paused groups
    groups = yield perform(dispatcher, Effect(GetAllValidGroups()))
    groups = [
        g for g in groups
        if json.loads(g["launch_config"]).get("type") == "launch_server" and
        (not g.get("paused", False))]
    tenanted_groups = groupby(lambda g: g["tenantId"], groups)
    group_metrics = yield get_all_metrics(
        dispatcher, tenanted_groups, log, _print=_print)

    # Add to cloud metrics
    metr_conf = config.get("metrics", None)
    if metr_conf is not None:
        eff = add_to_cloud_metrics(
            metr_conf['ttl'], config['region'], group_metrics,
            len(tenanted_groups), config, log, _print)
        eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
        yield perform(dispatcher, eff)
        log.msg('added to cloud metrics')
        if _print:
            print('added to cloud metrics')
    if _print:
        group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
                           reverse=True)
        print('groups sorted as per divergence')
        print('\n'.join(map(str, group_metrics)))

    # Disconnect only if we created the client
    if not client:
        yield _client.disconnect()

    defer.returnValue(group_metrics)
Example #12
0
 def test_cloudfeeds_optional(self):
     """
     Does not return cloud feeds services if the config is not there
     """
     del self.config['cloudfeeds']
     del self.config['terminator']
     confs = get_service_configs(self.config)
     self.assertNotIn(ServiceType.CLOUD_FEEDS, confs)
     self.assertNotIn(ServiceType.CLOUD_FEEDS_CAP, confs)
Example #13
0
def groups_steps(groups, reactor, store, cass_client, authenticator, conf):
    """
    Return [(group, steps)] list
    """
    eff = parallel(map(group_steps, groups))
    disp = get_full_dispatcher(
        reactor, authenticator, mock_log(), get_service_configs(conf),
        "kzclient", store, "supervisor", cass_client)
    return perform(disp, eff).addCallback(lambda steps: zip(groups, steps))
Example #14
0
def groups_steps(groups, reactor, store, cass_client, authenticator, conf):
    """
    Return [(group, steps)] list
    """
    eff = parallel(map(group_steps, groups))
    disp = get_full_dispatcher(reactor, authenticator, mock_log(),
                               get_service_configs(conf), "kzclient", store,
                               "supervisor", cass_client)
    return perform(disp, eff).addCallback(lambda steps: zip(groups, steps))
Example #15
0
def collect_metrics(reactor, config, log, client=None, authenticator=None,
                    _print=False):
    """
    Start collecting the metrics

    :param reactor: Twisted reactor
    :param dict config: Configuration got from file containing all info
        needed to collect metrics
    :param :class:`silverberg.client.CQLClient` client:
        Optional cassandra client. A new client will be created
        if this is not given and disconnected before returing
    :param :class:`otter.auth.IAuthenticator` authenticator:
        Optional authenticator. A new authenticator will be created
        if this is not given
    :param bool _print: Should debug messages be printed to stdout?

    :return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
    """
    convergence_tids = config.get('convergence-tenants', [])
    _client = client or connect_cass_servers(reactor, config['cassandra'])
    authenticator = authenticator or generate_authenticator(reactor,
                                                            config['identity'])
    store = CassScalingGroupCollection(_client, reactor, 1000)
    dispatcher = get_dispatcher(reactor, authenticator, log,
                                get_service_configs(config), store)

    # calculate metrics
    fpath = get_in(["metrics", "last_tenant_fpath"], config,
                   default="last_tenant.txt")
    tenanted_groups = yield perform(
        dispatcher,
        get_todays_scaling_groups(convergence_tids, fpath))
    group_metrics = yield get_all_metrics(
        dispatcher, tenanted_groups, log, _print=_print)

    # Add to cloud metrics
    metr_conf = config.get("metrics", None)
    if metr_conf is not None:
        eff = add_to_cloud_metrics(
            metr_conf['ttl'], config['region'], group_metrics,
            len(tenanted_groups), log, _print)
        eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
        yield perform(dispatcher, eff)
        log.msg('added to cloud metrics')
        if _print:
            print('added to cloud metrics')
    if _print:
        group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
                           reverse=True)
        print('groups sorted as per divergence', *group_metrics, sep='\n')

    # Disconnect only if we created the client
    if not client:
        yield _client.disconnect()

    defer.returnValue(group_metrics)
Example #16
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    config = dict(config)
    set_config_data(config)

    parent = MultiService()

    region = config_value('region')

    seed_endpoints = [
        clientFromString(reactor, str(host))
        for host in config_value('cassandra.seed_hosts')]

    cassandra_cluster = LoggingCQLClient(
        TimingOutCQLClient(
            reactor,
            RoundRobinCassandraCluster(
                seed_endpoints,
                config_value('cassandra.keyspace'),
                disconnect_on_cancel=True),
            config_value('cassandra.timeout') or 30),
        log.bind(system='otter.silverberg'))

    store = CassScalingGroupCollection(
        cassandra_cluster, reactor, config_value('limits.absolute.maxGroups'))
    admin_store = CassAdmin(cassandra_cluster)

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    service_configs = get_service_configs(config)

    authenticator = generate_authenticator(reactor, config['identity'])
    supervisor = SupervisorService(authenticator, region, coiterate,
                                   service_configs)
    supervisor.setServiceParent(parent)

    set_supervisor(supervisor)

    health_checker = HealthChecker(reactor, {
        'store': getattr(store, 'health_check', None),
        'kazoo': store.kazoo_health_check,
        'supervisor': supervisor.health_check
    })

    # Setup cassandra cluster to disconnect when otter shuts down
    if 'cassandra_cluster' in locals():
        parent.addService(FunctionalService(stop=partial(
            call_after_supervisor, cassandra_cluster.disconnect, supervisor)))

    otter = Otter(store, region, health_checker.health_check)
    site = Site(otter.app.resource())
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(parent)

    # Setup admin service
    admin_port = config_value('admin')
    if admin_port:
        admin = OtterAdmin(admin_store)
        admin_site = Site(admin.app.resource())
        admin_site.displayTracebacks = False
        admin_service = service(str(admin_port), admin_site)
        admin_service.setServiceParent(parent)

    # setup cloud feed
    cf_conf = config.get('cloudfeeds', None)
    if cf_conf is not None:
        id_conf = deepcopy(config['identity'])
        id_conf['strategy'] = 'single_tenant'
        add_to_fanout(CloudFeedsObserver(
            reactor=reactor,
            authenticator=generate_authenticator(reactor, id_conf),
            tenant_id=cf_conf['tenant_id'],
            region=region,
            service_configs=service_configs))

    # Setup Kazoo client
    if config_value('zookeeper'):
        threads = config_value('zookeeper.threads') or 10
        disable_logs = config_value('zookeeper.no_logs')
        threadpool = ThreadPool(maxthreads=threads)
        sync_kz_client = KazooClient(
            hosts=config_value('zookeeper.hosts'),
            # Keep trying to connect until the end of time with
            # max interval of 10 minutes
            connection_retry=dict(max_tries=-1, max_delay=600),
            logger=None if disable_logs else TxLogger(log.bind(system='kazoo'))
        )
        kz_client = TxKazooClient(reactor, threadpool, sync_kz_client)
        # Don't timeout. Keep trying to connect forever
        d = kz_client.start(timeout=None)

        def on_client_ready(_):
            dispatcher = get_full_dispatcher(reactor, authenticator, log,
                                             get_service_configs(config),
                                             kz_client, store, supervisor,
                                             cassandra_cluster)
            # Setup scheduler service after starting
            scheduler = setup_scheduler(parent, dispatcher, store, kz_client)
            health_checker.checks['scheduler'] = scheduler.health_check
            otter.scheduler = scheduler
            # Give dispatcher to Otter REST object
            otter.dispatcher = dispatcher
            # Set the client after starting
            # NOTE: There is small amount of time when the start is
            # not finished and the kz_client is not set in which case
            # policy execution and group delete will fail
            store.kz_client = kz_client
            # Setup kazoo to stop when shutting down
            parent.addService(FunctionalService(
                stop=partial(call_after_supervisor,
                             kz_client.stop, supervisor)))

            setup_converger(
                parent, kz_client, dispatcher,
                config_value('converger.interval') or 10,
                config_value('converger.build_timeout') or 3600,
                config_value('converger.limited_retry_iterations') or 10,
                config_value('converger.step_limits') or {})

        d.addCallback(on_client_ready)
        d.addErrback(log.err, 'Could not start TxKazooClient')

    return parent