Beispiel #1
0
def webhook_migrate(reactor, conn, args):
    """
    Migrate webhook indexes to table
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only().on(store.add_webhook_keys)
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
Beispiel #2
0
def webhook_migrate(reactor, conn, args):
    """
    Migrate webhook indexes to table
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only().on(store.add_webhook_keys)
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
Beispiel #3
0
def webhook_index(reactor, conn, args):
    """
    Show webhook indexes that is not there table connection
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only()
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
Beispiel #4
0
def webhook_index(reactor, conn, args):
    """
    Show webhook indexes that is not there table connection
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only()
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
Beispiel #5
0
def main(reactor):
    parser = ArgumentParser(
        description="Trigger convergence on all/some groups")
    parser.add_argument(
        "-c", dest="config", required=True,
        help="Config file containing identity and cassandra info")
    parser.add_argument(
        "-g", nargs="+", dest="group",
        help=("Group(s) to trigger. Should be in tenantId:groupId form. "
              "If not provided convergence will be triggerred on all groups "
              "in CASS"))
    parser.add_argument("-l", dest="limit", type=int, default=10,
                        help="Concurrency limit. Defaults to 10")

    parsed = parser.parse_args()
    conf = json.load(open(parsed.config))

    cass_client = connect_cass_servers(reactor, conf["cassandra"])
    authenticator = generate_authenticator(reactor, conf["identity"])
    store = CassScalingGroupCollection(cass_client, reactor, 1000)
    if parsed.group:
        groups = [g.split(":") for g in parsed.group]
        groups = [{"tenantId": tid, "groupId": gid} for tid, gid in groups]
    else:
        groups = yield store.get_all_groups()

    yield trigger_convergence_groups(authenticator, conf["region"], groups,
                                     parsed.limit)
    yield cass_client.disconnect()
Beispiel #6
0
def insert_deleting_false(reactor, conn, args):
    """
    Insert false to all group's deleting column
    """
    store = CassScalingGroupCollection(conn, None, 3)
    groups = yield store.get_scaling_group_rows()
    query = ('INSERT INTO scaling_group ("tenantId", "groupId", deleting) '
             'VALUES (:tenantId{i}, :groupId{i}, false);')
    queries, params = [], {}
    for i, group in enumerate(groups):
        queries.append(query.format(i=i))
        params['tenantId{}'.format(i)] = group['tenantId']
        params['groupId{}'.format(i)] = group['groupId']
    yield conn.execute(batch(queries), params, ConsistencyLevel.ONE)
    returnValue(None)
Beispiel #7
0
def insert_deleting_false(reactor, conn, args):
    """
    Insert false to all group's deleting column
    """
    store = CassScalingGroupCollection(conn, None, 3)
    groups = yield store.get_scaling_group_rows()
    query = (
        'INSERT INTO scaling_group ("tenantId", "groupId", deleting) '
        'VALUES (:tenantId{i}, :groupId{i}, false);')
    queries, params = [], {}
    for i, group in enumerate(groups):
        queries.append(query.format(i=i))
        params['tenantId{}'.format(i)] = group['tenantId']
        params['groupId{}'.format(i)] = group['groupId']
    yield conn.execute(batch(queries), params, ConsistencyLevel.ONE)
    returnValue(None)
Beispiel #8
0
def collect_metrics(reactor, config, log, client=None, authenticator=None,
                    _print=False):
    """
    Start collecting the metrics

    :param reactor: Twisted reactor
    :param dict config: Configuration got from file containing all info
        needed to collect metrics
    :param :class:`silverberg.client.CQLClient` client:
        Optional cassandra client. A new client will be created
        if this is not given and disconnected before returing
    :param :class:`otter.auth.IAuthenticator` authenticator:
        Optional authenticator. A new authenticator will be created
        if this is not given
    :param bool _print: Should debug messages be printed to stdout?

    :return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
    """
    _client = client or connect_cass_servers(reactor, config['cassandra'])
    authenticator = authenticator or generate_authenticator(reactor,
                                                            config['identity'])
    store = CassScalingGroupCollection(_client, reactor, 1000)
    dispatcher = get_dispatcher(reactor, authenticator, log,
                                get_service_configs(config), store)

    # calculate metrics on launch_server and non-paused groups
    groups = yield perform(dispatcher, Effect(GetAllValidGroups()))
    groups = [
        g for g in groups
        if json.loads(g["launch_config"]).get("type") == "launch_server" and
        (not g.get("paused", False))]
    tenanted_groups = groupby(lambda g: g["tenantId"], groups)
    group_metrics = yield get_all_metrics(
        dispatcher, tenanted_groups, log, _print=_print)

    # Add to cloud metrics
    metr_conf = config.get("metrics", None)
    if metr_conf is not None:
        eff = add_to_cloud_metrics(
            metr_conf['ttl'], config['region'], group_metrics,
            len(tenanted_groups), config, log, _print)
        eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
        yield perform(dispatcher, eff)
        log.msg('added to cloud metrics')
        if _print:
            print('added to cloud metrics')
    if _print:
        group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
                           reverse=True)
        print('groups sorted as per divergence')
        print('\n'.join(map(str, group_metrics)))

    # Disconnect only if we created the client
    if not client:
        yield _client.disconnect()

    defer.returnValue(group_metrics)
Beispiel #9
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')
        ]

        cassandra_cluster = LoggingCQLClient(
            RoundRobinCassandraCluster(seed_endpoints,
                                       config_value('cassandra.keyspace')),
            log.bind(system='otter.silverberg'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    cache_ttl = config_value('identity.cache_ttl')

    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    authenticator = CachingAuthenticator(
        reactor,
        ImpersonatingAuthenticator(config_value('identity.username'),
                                   config_value('identity.password'),
                                   config_value('identity.url'),
                                   config_value('identity.admin_url')),
        cache_ttl)

    supervisor = Supervisor(authenticator.authenticate_tenant, coiterate)

    set_supervisor(supervisor)

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler') and not config_value('mock'):
        scheduler_service = SchedulerService(
            int(config_value('scheduler.batchsize')),
            int(config_value('scheduler.interval')), cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
Beispiel #10
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    config = dict(config)
    set_config_data(config)

    parent = MultiService()

    region = config_value('region')

    seed_endpoints = [
        clientFromString(reactor, str(host))
        for host in config_value('cassandra.seed_hosts')]

    cassandra_cluster = LoggingCQLClient(
        TimingOutCQLClient(
            reactor,
            RoundRobinCassandraCluster(
                seed_endpoints,
                config_value('cassandra.keyspace'),
                disconnect_on_cancel=True),
            config_value('cassandra.timeout') or 30),
        log.bind(system='otter.silverberg'))

    store = CassScalingGroupCollection(
        cassandra_cluster, reactor, config_value('limits.absolute.maxGroups'))
    admin_store = CassAdmin(cassandra_cluster)

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    service_configs = get_service_configs(config)

    authenticator = generate_authenticator(reactor, config['identity'])
    supervisor = SupervisorService(authenticator, region, coiterate,
                                   service_configs)
    supervisor.setServiceParent(parent)

    set_supervisor(supervisor)

    health_checker = HealthChecker(reactor, {
        'store': getattr(store, 'health_check', None),
        'kazoo': store.kazoo_health_check,
        'supervisor': supervisor.health_check
    })

    # Setup cassandra cluster to disconnect when otter shuts down
    if 'cassandra_cluster' in locals():
        parent.addService(FunctionalService(stop=partial(
            call_after_supervisor, cassandra_cluster.disconnect, supervisor)))

    otter = Otter(store, region, health_checker.health_check)
    site = Site(otter.app.resource())
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(parent)

    # Setup admin service
    admin_port = config_value('admin')
    if admin_port:
        admin = OtterAdmin(admin_store)
        admin_site = Site(admin.app.resource())
        admin_site.displayTracebacks = False
        admin_service = service(str(admin_port), admin_site)
        admin_service.setServiceParent(parent)

    # setup cloud feed
    cf_conf = config.get('cloudfeeds', None)
    if cf_conf is not None:
        id_conf = deepcopy(config['identity'])
        id_conf['strategy'] = 'single_tenant'
        add_to_fanout(CloudFeedsObserver(
            reactor=reactor,
            authenticator=generate_authenticator(reactor, id_conf),
            tenant_id=cf_conf['tenant_id'],
            region=region,
            service_configs=service_configs))

    # Setup Kazoo client
    if config_value('zookeeper'):
        threads = config_value('zookeeper.threads') or 10
        disable_logs = config_value('zookeeper.no_logs')
        threadpool = ThreadPool(maxthreads=threads)
        sync_kz_client = KazooClient(
            hosts=config_value('zookeeper.hosts'),
            # Keep trying to connect until the end of time with
            # max interval of 10 minutes
            connection_retry=dict(max_tries=-1, max_delay=600),
            logger=None if disable_logs else TxLogger(log.bind(system='kazoo'))
        )
        kz_client = TxKazooClient(reactor, threadpool, sync_kz_client)
        # Don't timeout. Keep trying to connect forever
        d = kz_client.start(timeout=None)

        def on_client_ready(_):
            dispatcher = get_full_dispatcher(reactor, authenticator, log,
                                             get_service_configs(config),
                                             kz_client, store, supervisor,
                                             cassandra_cluster)
            # Setup scheduler service after starting
            scheduler = setup_scheduler(parent, dispatcher, store, kz_client)
            health_checker.checks['scheduler'] = scheduler.health_check
            otter.scheduler = scheduler
            # Give dispatcher to Otter REST object
            otter.dispatcher = dispatcher
            # Set the client after starting
            # NOTE: There is small amount of time when the start is
            # not finished and the kz_client is not set in which case
            # policy execution and group delete will fail
            store.kz_client = kz_client
            # Setup kazoo to stop when shutting down
            parent.addService(FunctionalService(
                stop=partial(call_after_supervisor,
                             kz_client.stop, supervisor)))

            setup_converger(
                parent, kz_client, dispatcher,
                config_value('converger.interval') or 10,
                config_value('converger.build_timeout') or 3600,
                config_value('converger.limited_retry_iterations') or 10,
                config_value('converger.step_limits') or {})

        d.addCallback(on_client_ready)
        d.addErrback(log.err, 'Could not start TxKazooClient')

    return parent
Beispiel #11
0
            "desiredCapacity": 20,
            "cooldown": 3,
            "type": "webhook"
        },
    ]


try:
    cassandra_host = os.environ.get('CASSANDRA_HOST', 'localhost')
    cassandra_port = int(os.environ.get('CASSANDRA_PORT', 9160))
    keymaster = OtterKeymaster(host=cassandra_host, port=cassandra_port)
except Exception as e:
    skip = "Cassandra unavailable: {0}".format(e)
else:
    keyspace = keymaster.get_keyspace()
    store = CassScalingGroupCollection(keyspace.client)


class CassStoreRestScalingGroupTestCase(TestCase, RequestTestMixin, LockMixin):
    """
    Test case for testing the REST API for the scaling group specific endpoints
    (not policies or webhooks) against the Cassandra model.
    """

    _launch_server_config = launch_server_config()[0]
    _policies = _policy()

    def setUp(self):
        """
        Set the Cassandra store, and also patch the controller
        """
Beispiel #12
0
def main(reactor):
    parser = ArgumentParser(
        description="Trigger convergence on all/some groups")
    parser.add_argument(
        "-c",
        dest="config",
        required=True,
        help="Config file containing identity and cassandra info")
    parser.add_argument(
        "--steps",
        action="store_true",
        help=("Return steps that would be taken if convergence was triggered "
              "with desired set to current actual. No convergence triggered"))

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
        "-g",
        nargs="+",
        dest="group",
        help="Group(s) to trigger. Should be in tenantId:groupId form")
    group.add_argument("-t",
                       nargs="+",
                       dest="tenant_id",
                       help="TenantID(s) whose group's to trigger")
    group.add_argument("--conf-conv-tenants",
                       action="store_true",
                       help=("Convergence triggered on tenants configured as "
                             "\"convergence-tenants\" setting config file"))
    group.add_argument(
        "--conf-non-conv-tenants",
        action="store_true",
        dest="disabled_tenants",
        help=("Convergence triggered on all tenants except ones in "
              "\"non-convergence-tenants\" setting in conf file"))
    group.add_argument("--all",
                       action="store_true",
                       help="Convergence will be triggered on all groups")

    parser.add_argument("-l",
                        dest="limit",
                        type=int,
                        default=10,
                        help="Concurrency limit. Defaults to 10")
    parser.add_argument("--no-error-group",
                        action="store_true",
                        help="Do not converge ERROR groups")

    parsed = parser.parse_args()
    conf = json.load(open(parsed.config))

    cass_client = connect_cass_servers(reactor, conf["cassandra"])
    authenticator = generate_authenticator(reactor, conf["identity"])
    store = CassScalingGroupCollection(cass_client, reactor, 1000)

    groups = yield get_groups(parsed, store, conf)
    if parsed.steps:
        steps = yield groups_steps(groups, reactor, store, cass_client,
                                   authenticator, conf)
        print(*steps, sep='\n')
    else:
        yield trigger_convergence_groups(authenticator, conf["region"], groups,
                                         parsed.limit, parsed.no_error_group)
    yield cass_client.disconnect()