예제 #1
0
        def on_client_ready(_):
            dispatcher = get_full_dispatcher(reactor, authenticator, log,
                                             get_service_configs(config),
                                             kz_client, store, supervisor,
                                             cassandra_cluster)
            # Setup scheduler service after starting
            scheduler = setup_scheduler(parent, dispatcher, store, kz_client)
            health_checker.checks['scheduler'] = scheduler.health_check
            otter.scheduler = scheduler
            # Give dispatcher to Otter REST object
            otter.dispatcher = dispatcher
            # Set the client after starting
            # NOTE: There is small amount of time when the start is
            # not finished and the kz_client is not set in which case
            # policy execution and group delete will fail
            store.kz_client = kz_client
            # Setup kazoo to stop when shutting down
            parent.addService(FunctionalService(
                stop=partial(call_after_supervisor,
                             kz_client.stop, supervisor)))

            setup_converger(
                parent, kz_client, dispatcher,
                config_value('converger.interval') or 10,
                config_value('converger.build_timeout') or 3600,
                config_value('converger.limited_retry_iterations') or 10,
                config_value('converger.step_limits') or {})
예제 #2
0
def delete_server(log, region, service_catalog, auth_token, instance_details):
    """
    Delete the server specified by instance_details.

    TODO: Load balancer draining.

    :param str region: A rackspace region as found in the service catalog.
    :param list service_catalog: A list of services as returned by the auth apis.
    :param str auth_token: The user's auth token.
    :param tuple instance_details: A 2-tuple of server_id and a list of
        load balancer Add Node responses.

        Example::

        ('da08965f-4c2d-41aa-b492-a3c02706202f',
         [('12345',
           {'nodes': [{'id': 'a', 'address': ... }]}),
          ('54321',
           {'nodes': [{'id': 'b', 'address': ... }]})])

    :return: TODO
    """
    lb_region = config_value('regionOverrides.cloudLoadBalancers') or region
    cloudLoadBalancers = config_value('cloudLoadBalancers')
    cloudServersOpenStack = config_value('cloudServersOpenStack')

    log.msg("Looking for load balancer endpoint: %(service_name)s",
            service_name=cloudLoadBalancers,
            region=lb_region)

    lb_endpoint = public_endpoint_url(service_catalog, cloudLoadBalancers,
                                      lb_region)

    log.msg("Looking for cloud servers endpoint: %(service_name)s",
            service_name=cloudServersOpenStack,
            region=region)

    server_endpoint = public_endpoint_url(service_catalog,
                                          cloudServersOpenStack, region)

    (server_id, loadbalancer_details) = instance_details

    node_info = itertools.chain(*[[(loadbalancer_id, node['id'])
                                   for node in node_details['nodes']]
                                  for (loadbalancer_id,
                                       node_details) in loadbalancer_details])

    d = gatherResults([
        remove_from_load_balancer(lb_endpoint, auth_token, loadbalancer_id,
                                  node_id)
        for (loadbalancer_id, node_id) in node_info
    ],
                      consumeErrors=True)

    def when_removed_from_loadbalancers(_ignore):
        return verified_delete(log, server_endpoint, auth_token, server_id)

    d.addCallback(when_removed_from_loadbalancers)
    return d
예제 #3
0
 def test_update_config_existing(self):
     """
     :func:`~config.update_config_data` will update existing config
     and not remove others
     """
     config.update_config_data("baz.bax", "new")
     self.assertEqual(config.config_value("baz.bax"), "new")
     self.assertEqual(config.config_value("foo"), "bar")
def delete_server(log, region, service_catalog, auth_token, instance_details):
    """
    Delete the server specified by instance_details.

    TODO: Load balancer draining.

    :param str region: A rackspace region as found in the service catalog.
    :param list service_catalog: A list of services as returned by the auth apis.
    :param str auth_token: The user's auth token.
    :param tuple instance_details: A 2-tuple of server_id and a list of
        load balancer Add Node responses.

        Example::

        ('da08965f-4c2d-41aa-b492-a3c02706202f',
         [('12345',
           {'nodes': [{'id': 'a', 'address': ... }]}),
          ('54321',
           {'nodes': [{'id': 'b', 'address': ... }]})])

    :return: TODO
    """
    lb_region = config_value('regionOverrides.cloudLoadBalancers') or region
    cloudLoadBalancers = config_value('cloudLoadBalancers')
    cloudServersOpenStack = config_value('cloudServersOpenStack')

    log.msg("Looking for load balancer endpoint: %(service_name)s",
            service_name=cloudLoadBalancers,
            region=lb_region)

    lb_endpoint = public_endpoint_url(service_catalog,
                                      cloudLoadBalancers,
                                      lb_region)

    log.msg("Looking for cloud servers endpoint: %(service_name)s",
            service_name=cloudServersOpenStack,
            region=region)

    server_endpoint = public_endpoint_url(service_catalog,
                                          cloudServersOpenStack,
                                          region)

    (server_id, loadbalancer_details) = instance_details

    node_info = itertools.chain(
        *[[(loadbalancer_id, node['id']) for node in node_details['nodes']]
          for (loadbalancer_id, node_details) in loadbalancer_details])

    d = gatherResults(
        [remove_from_load_balancer(lb_endpoint, auth_token, loadbalancer_id, node_id)
         for (loadbalancer_id, node_id) in node_info], consumeErrors=True)

    def when_removed_from_loadbalancers(_ignore):
        return verified_delete(log, server_endpoint, auth_token, server_id)

    d.addCallback(when_removed_from_loadbalancers)
    return d
예제 #5
0
def remove_from_load_balancer(log, endpoint, auth_token, loadbalancer_id,
                              node_id, clock=None):
    """
    Remove a node from a load balancer.

    :param str endpoint: Load balancer endpoint URI.
    :param str auth_token: Keystone Auth Token.
    :param str loadbalancer_id: The ID for a cloud loadbalancer.
    :param str node_id: The ID for a node in that cloudloadbalancer.

    :returns: A Deferred that fires with None if the operation completed successfully,
        or errbacks with an RequestError.
    """
    lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id)
    # TODO: Will remove this once LB ERROR state is fixed and it is working fine
    lb_log.msg('Removing from load balancer')
    path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id))

    def check_422_deleted(failure):
        # A LB being deleted sometimes results in a 422.  This function
        # unfortunately has to parse the body of the message to see if this is an
        # acceptable 422 (if the LB has been deleted or the node has already been
        # removed, then 'removing from load balancer' as a task should be
        # successful - if the LB is in ERROR, then nothing more can be done to
        # it except resetting it - may as well remove the server.)
        failure.trap(APIError)
        error = failure.value
        if error.code == 422:
            message = json.loads(error.body)['message']
            if ('load balancer is deleted' not in message and
                    'PENDING_DELETE' not in message):
                return failure
            lb_log.msg(message)
        else:
            return failure

    def remove():
        d = treq.delete(path, headers=headers(auth_token), log=lb_log)

        # Success is 200/202.  An LB not being found is 404.  A node not being
        # found is a 404.  But a deleted LB sometimes results in a 422.
        d.addCallback(log_on_response_code, lb_log, 'Node to delete does not exist', 404)
        d.addCallback(check_success, [200, 202, 404])
        d.addCallback(treq.content)  # To avoid https://twistedmatrix.com/trac/ticket/6751
        d.addErrback(check_422_deleted)
        d.addErrback(log_lb_unexpected_errors, path, lb_log, 'remove_node')
        return d

    d = retry(
        remove,
        can_retry=retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES),
        next_interval=random_interval(
            *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)),
        clock=clock)
    d.addCallback(lambda _: lb_log.msg('Removed from load balancer'))
    return d
예제 #6
0
 def test_update_config_new(self):
     """
     :func:`~config.update_config_data` will add new config and not remove
     others
     """
     config.update_config_data("baz.new", "wha")
     config.update_config_data("baz.some.other", "who")
     self.assertEqual(config.config_value("baz.new"), "wha")
     self.assertEqual(config.config_value("baz.some.other"), "who")
     self.assertEqual(config.config_value("baz.bax"), "quux")
예제 #7
0
def add_to_load_balancer(log, endpoint, auth_token, lb_config, ip_address, undo, clock=None):
    """
    Add an IP addressed to a load balancer based on the lb_config.

    TODO: Handle load balancer node metadata.

    :param log: A bound logger
    :param str endpoint: Load balancer endpoint URI.
    :param str auth_token: Keystone Auth Token.
    :param str lb_config: An lb_config dictionary.
    :param str ip_address: The IP Address of the node to add to the load
        balancer.
    :param IUndoStack undo: An IUndoStack to push any reversable operations onto.

    :return: Deferred that fires with the Add Node to load balancer response
        as a dict.
    """
    lb_id = lb_config['loadBalancerId']
    port = lb_config['port']
    path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes')
    lb_log = log.bind(loadbalancer_id=lb_id, ip_address=ip_address)

    def add():
        d = treq.post(path, headers=headers(auth_token),
                      data=json.dumps({"nodes": [{"address": ip_address,
                                                  "port": port,
                                                  "condition": "ENABLED",
                                                  "type": "PRIMARY"}]}),
                      log=lb_log)
        d.addCallback(check_success, [200, 202])
        d.addErrback(log_lb_unexpected_errors, lb_log, 'add_node')
        d.addErrback(wrap_request_error, path, 'add_node')
        d.addErrback(check_deleted_clb, lb_id)
        return d

    d = retry(
        add,
        can_retry=compose_retries(
            transient_errors_except(CLBOrNodeDeleted),
            retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES)),
        next_interval=random_interval(
            *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)),
        clock=clock)

    def when_done(result):
        lb_log.msg('Added to load balancer', node_id=result['nodes'][0]['id'])
        undo.push(remove_from_load_balancer,
                  lb_log,
                  endpoint,
                  auth_token,
                  lb_id,
                  result['nodes'][0]['id'])
        return result

    return d.addCallback(treq.json_content).addCallback(when_done)
예제 #8
0
파일: api.py 프로젝트: zancas/otter
def generate_authenticator(reactor):
    """
    Generate authenticator
    """
    # REVIEW: Seperating out to test. Have any better idea?
    cache_ttl = config_value('identity.cache_ttl')
    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    return CachingAuthenticator(
        reactor,
        WaitingAuthenticator(
            reactor,
            RetryingAuthenticator(
                reactor,
                ImpersonatingAuthenticator(
                    config_value('identity.username'),
                    config_value('identity.password'),
                    config_value('identity.url'),
                    config_value('identity.admin_url')),
                max_retries=config_value('identity.max_retries'),
                retry_interval=config_value('identity.retry_interval')),
            config_value('identity.wait') or 5),
        cache_ttl)
예제 #9
0
def _remove_from_clb(log,
                     endpoint,
                     auth_token,
                     loadbalancer_id,
                     node_id,
                     clock=None):
    """
    Remove a node from a CLB load balancer.

    :param str endpoint: Load balancer endpoint URI.
    :param str auth_token: Keystone authentication token.
    :param str loadbalancer_id: The ID for a Cloud Load Balancer.
    :param str node_id: The ID for a node in that Cloud Load Balancer.

    :returns: A Deferred that fires with None if the operation completed successfully,
        or errbacks with an RequestError.
    """
    lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id)
    # TODO: Will remove this once LB ERROR state is fixed and it is working fine
    lb_log.msg('Removing from load balancer')
    path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id),
                           'nodes', str(node_id))

    def remove():
        d = treq.delete(path, headers=headers(auth_token), log=lb_log)
        d.addCallback(check_success, [200, 202])
        d.addCallback(treq.content
                      )  # To avoid https://twistedmatrix.com/trac/ticket/6751
        d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node')
        d.addErrback(wrap_request_error, path, 'remove_node')
        d.addErrback(check_deleted_clb, loadbalancer_id, node_id)
        return d

    d = retry(remove,
              can_retry=compose_retries(
                  transient_errors_except(CLBOrNodeDeleted),
                  retry_times(
                      config_value('worker.lb_max_retries')
                      or LB_MAX_RETRIES)),
              next_interval=random_interval(
                  *(config_value('worker.lb_retry_interval_range')
                    or LB_RETRY_INTERVAL_RANGE)),
              clock=clock)

    # A node or CLB deleted is considered successful removal
    d.addErrback(
        lambda f: f.trap(CLBOrNodeDeleted) and lb_log.msg(f.value.message))
    d.addCallback(lambda _: lb_log.msg('Removed from load balancer'))
    return d
예제 #10
0
def add_to_load_balancer(log,
                         request_bag,
                         lb_config,
                         server_details,
                         undo,
                         clock=None):
    """
    Adds a given server to a given load balancer.

    :param log: A bound logger.
    :param callable request_bag: A request function.
    :param str lb_config: An ``lb_config`` dictionary specifying which load
        balancer to add the server to.
    :param dict server_details: The server details, as returned by Nova.
    :return: Deferred that fires with the load balancer response. The
        structure of this object depends on the load balancer type.
    """
    lb_type = lb_config.get("type", "CloudLoadBalancer")
    if lb_type == "CloudLoadBalancer":
        cloudLoadBalancers = config_value('cloudLoadBalancers')
        endpoint = public_endpoint_url(request_bag.service_catalog,
                                       cloudLoadBalancers,
                                       request_bag.lb_region)
        auth_token = request_bag.auth_token
        ip_address = _servicenet_address(server_details["server"])
        return add_to_clb(log, endpoint, auth_token, lb_config, ip_address,
                          undo, clock)
    elif lb_type == "RackConnectV3":
        lb_id = lb_config["loadBalancerId"]
        server_id = server_details["server"]["id"]
        return add_to_rcv3(request_bag, lb_id, server_id)
    else:
        raise RuntimeError(
            "Unknown cloud load balancer type! config: {}".format(lb_config))
예제 #11
0
def scrub_otter_metadata(log,
                         auth_token,
                         service_catalog,
                         region,
                         server_id,
                         _treq=treq):
    """
    Scrub otter-specific management metadata from the server.

    :param BoundLog log: The bound logger instance.
    :param str auth_token: Keystone auth token.
    :param str region: The region the server is in.
    :param str server_id: The id of the server to remove metadata from.
    :param _treq: The treq instance; possibly a test double.
    """
    bound_log = log.bind(region=region, server_id=server_id)
    bound_log.msg("Scrubbing otter-specific metadata")

    service_name = config_value('cloudServersOpenStack')
    endpoint = public_endpoint_url(service_catalog, service_name, region)
    url = append_segments(endpoint, 'servers', server_id, 'metadata')

    auth_hdr = headers(auth_token)

    get, put = [
        lambda data=None, method=method: _treq.request(
            method, url, headers=auth_hdr, data=data, log=bound_log)
        for method in ["GET", "PUT"]
    ]

    return (get().addCallback(_treq.json_content).addCallback(
        comp(json.dumps,
             _without_otter_metadata)).addCallback(put).addCallback(
                 _treq.content))
예제 #12
0
    def _get_request_bag(self, log, scaling_group):
        """
        Builds :obj:`RequestBag` containing a bunch of useful stuff for making
        HTTP requests.
        """
        tenant_id = scaling_group.tenant_id
        dispatcher = get_legacy_dispatcher(reactor, self.authenticator, log,
                                           self.service_configs)
        lb_region = config_value('regionOverrides.cloudLoadBalancers')

        def authenticate():
            log.msg("Authenticating for tenant")
            d = self.authenticator.authenticate_tenant(tenant_id, log=log)

            def when_authenticated((auth_token, service_catalog)):
                bag = RequestBag(
                    lb_region=lb_region or self.region,
                    region=self.region,
                    dispatcher=dispatcher,
                    tenant_id=tenant_id,
                    auth_token=auth_token,
                    service_catalog=service_catalog,
                    re_auth=authenticate,
                )
                return bag

            return d.addCallback(when_authenticated)

        return authenticate()
예제 #13
0
 def when_authenticated((auth_token, service_catalog)):
     return launch_server_v1.delete_server(
         log,
         config_value('region'),
         service_catalog,
         auth_token,
         (server['id'], server['lb_info']))
예제 #14
0
파일: mock.py 프로젝트: dian4554/otter
    def create_webhooks(self, policy_id, data):
        """
        see :meth:`otter.models.interface.IScalingGroup.create_webhooks`
        """
        if self.error is not None:
            return defer.fail(self.error)

        if policy_id in self.policies:
            max_webhooks = config_value('limits.absolute.maxWebhooksPerPolicy')
            curr_webhooks = len(self.webhooks.get(policy_id, []))
            if len(data) + curr_webhooks > max_webhooks:
                return defer.fail(
                    WebhooksOverLimitError(self.tenant_id, self.uuid,
                                           policy_id, max_webhooks,
                                           curr_webhooks, len(data)))

            created = []
            for webhook_input in data:
                webhook_real = {'metadata': {}}
                webhook_real.update(webhook_input)
                webhook_real['capability'] = {}

                (webhook_real['capability']['version'],
                 webhook_real['capability']['hash']) = generate_capability()

                uuid = str(uuid4())
                self.webhooks[policy_id][uuid] = webhook_real
                # return a copy so this store doesn't get mutated
                created.append(dict(id=uuid, **webhook_real))

            return defer.succeed(created)
        else:
            return defer.fail(NoSuchPolicyError(self.tenant_id,
                                                self.uuid, policy_id))
예제 #15
0
def add_to_load_balancer(log, request_bag, lb_config, server_details, undo,
                         clock=None):
    """
    Adds a given server to a given load balancer.

    :param log: A bound logger.
    :param callable request_bag: A request function.
    :param str lb_config: An ``lb_config`` dictionary specifying which load
        balancer to add the server to.
    :param dict server_details: The server details, as returned by Nova.
    :return: Deferred that fires with the load balancer response. The
        structure of this object depends on the load balancer type.
    """
    lb_type = lb_config.get("type", "CloudLoadBalancer")
    if lb_type == "CloudLoadBalancer":
        cloudLoadBalancers = config_value('cloudLoadBalancers')
        endpoint = public_endpoint_url(request_bag.service_catalog,
                                       cloudLoadBalancers,
                                       request_bag.lb_region)
        auth_token = request_bag.auth_token
        ip_address = _servicenet_address(server_details["server"])
        return add_to_clb(log, endpoint, auth_token, lb_config, ip_address,
                          undo, clock)
    elif lb_type == "RackConnectV3":
        lb_id = lb_config["loadBalancerId"]
        server_id = server_details["server"]["id"]
        return add_to_rcv3(request_bag, lb_id, server_id)
    else:
        raise RuntimeError("Unknown cloud load balancer type! config: {}"
                           .format(lb_config))
예제 #16
0
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id,
                         _treq=treq):
    """
    Scrub otter-specific management metadata from the server.

    :param BoundLog log: The bound logger instance.
    :param str auth_token: Keystone auth token.
    :param str region: The region the server is in.
    :param str server_id: The id of the server to remove metadata from.
    :param _treq: The treq instance; possibly a test double.
    """
    bound_log = log.bind(region=region, server_id=server_id)
    bound_log.msg("Scrubbing otter-specific metadata")

    service_name = config_value('cloudServersOpenStack')
    endpoint = public_endpoint_url(service_catalog, service_name, region)
    url = append_segments(endpoint, 'servers', server_id, 'metadata')

    auth_hdr = headers(auth_token)

    get, put = [lambda data=None, method=method: _treq.request(
        method, url, headers=auth_hdr, data=data, log=bound_log)
        for method in ["GET", "PUT"]]

    return (get()
            .addCallback(_treq.json_content)
            .addCallback(comp(json.dumps, _without_otter_metadata))
            .addCallback(put)
            .addCallback(_treq.content))
예제 #17
0
 def when_removed_from_loadbalancers(_ignore):
     cloudServersOpenStack = config_value('cloudServersOpenStack')
     server_endpoint = public_endpoint_url(request_bag.service_catalog,
                                           cloudServersOpenStack,
                                           request_bag.region)
     return verified_delete(log, server_endpoint, request_bag, server_id,
                            clock=clock)
예제 #18
0
    def _get_request_bag(self, log, scaling_group):
        """
        Builds :obj:`RequestBag` containing a bunch of useful stuff for making
        HTTP requests.
        """
        tenant_id = scaling_group.tenant_id
        dispatcher = get_legacy_dispatcher(reactor, self.authenticator, log,
                                           self.service_configs)
        lb_region = config_value('regionOverrides.cloudLoadBalancers')

        def authenticate():
            log.msg("Authenticating for tenant")
            d = self.authenticator.authenticate_tenant(tenant_id, log=log)

            def when_authenticated((auth_token, service_catalog)):
                bag = RequestBag(
                    lb_region=lb_region or self.region,
                    region=self.region,
                    dispatcher=dispatcher,
                    tenant_id=tenant_id,
                    auth_token=auth_token,
                    service_catalog=service_catalog,
                    re_auth=authenticate,
                )
                return bag

            return d.addCallback(when_authenticated)

        return authenticate()
예제 #19
0
def remove_from_load_balancer(log, request_bag, lb_config, lb_response,
                              clock=None):
    """
    Remove a node from a load balancer.

    :param BoundLog log: A bound logger.
    :param request_bag: A request function.
    :param dict lb_config: An ``lb_config`` dictionary.
    :param lb_response: The response the load balancer provided when the server
        being removed was added. Type and shape is dependant on type of load
        balancer.
    :param IReactorTime clock: An optional clock, for testing. Will be passed
        on to implementations of node removal logic for specific load balancer
        APIs, if they support a clock.
    :returns: A Deferred that fires with :data:`None` if the operation
        completed successfully, or errbacks with an RequestError.
    """
    lb_type = lb_config.get("type", "CloudLoadBalancer")
    if lb_type == "CloudLoadBalancer":
        cloudLoadBalancers = config_value('cloudLoadBalancers')
        endpoint = public_endpoint_url(request_bag.service_catalog,
                                       cloudLoadBalancers,
                                       request_bag.lb_region)
        auth_token = request_bag.auth_token
        loadbalancer_id = lb_config["loadBalancerId"]
        node_id = next(node_info["id"] for node_info in lb_response["nodes"])
        return _remove_from_clb(log, endpoint, auth_token, loadbalancer_id,
                                node_id, clock)
    elif lb_type == "RackConnectV3":
        lb_id = lb_config["loadBalancerId"]
        node_id = next(pair["cloud_server"]["id"] for pair in lb_response)
        return remove_from_rcv3(request_bag, lb_id, node_id)
    else:
        raise RuntimeError("Unknown cloud load balancer type! config: {}"
                           .format(lb_config))
예제 #20
0
 def test_set_config_None(self):
     """
     Setting `None` via :func:`config.set_config_data` also works
     and does not raise exceptions on subsequent update or get
     """
     config.set_config_data(None)
     self.assertIsNone(config.config_value("a"))
     config.update_config_data("a.b", 2)
예제 #21
0
 def when_authenticated((auth_token, service_catalog)):
     log.msg('Validating launch server config')
     return validate_config.validate_launch_server_config(
         log,
         config_value('region'),
         service_catalog,
         auth_token,
         launch_config['args'])
예제 #22
0
파일: __init__.py 프로젝트: stephamon/otter
def _default_throttler(locks, clock, stype, method, tenant_id):
    """
    Get a throttler function with throttling policies based on configuration.
    """
    cfg_name = _CFG_NAMES.get((stype, method))
    if cfg_name is not None:
        delay = config_value('cloud_client.throttling.' + cfg_name)
        if delay is not None:
            lock = locks.get_lock((stype, method))
            return partial(lock.run, deferLater, clock, delay)

    # Could be a per-tenant lock
    cfg_name = _CFG_NAMES_PER_TENANT.get((stype, method))
    if cfg_name is not None:
        delay = config_value('cloud_client.throttling.' + cfg_name)
        if delay is not None:
            lock = locks.get_lock((stype, method, tenant_id))
            return partial(lock.run, deferLater, clock, delay)
예제 #23
0
def _default_throttler(locks, clock, stype, method, tenant_id):
    """
    Get a throttler function with throttling policies based on configuration.
    """
    cfg_name = _CFG_NAMES.get((stype, method))
    if cfg_name is not None:
        delay = config_value('cloud_client.throttling.' + cfg_name)
        if delay is not None:
            lock = locks.get_lock((stype, method))
            return partial(lock.run, deferLater, clock, delay)

    # Could be a per-tenant lock
    cfg_name = _CFG_NAMES_PER_TENANT.get((stype, method))
    if cfg_name is not None:
        delay = config_value('cloud_client.throttling.' + cfg_name)
        if delay is not None:
            lock = locks.get_lock((stype, method, tenant_id))
            return partial(lock.run, deferLater, clock, delay)
예제 #24
0
파일: supervisor.py 프로젝트: alex/otter
 def when_authenticated((auth_token, service_catalog)):
     log.msg("Executing launch config.")
     return launch_server_v1.launch_server(
         log,
         config_value('region'),
         scaling_group,
         service_catalog,
         auth_token,
         launch_config['args'])
예제 #25
0
def get_service_endpoint(service_catalog, region):
    """
    Get the service endpoint used to connect cloud services
    """
    cloudServersOpenStack = config_value('cloudServersOpenStack')
    server_endpoint = public_endpoint_url(service_catalog,
                                          cloudServersOpenStack,
                                          region)
    return server_endpoint
예제 #26
0
 def test_nonexistent_value_with_shared_key_part_at_toplevel(self):
     """
     :func:`~config.config_value` will return :data`None` if the path does
     not exist in the nested dictionaries, even if some part of the path
     does. I.e. config_value is not allowed to ignore arbitrary path
     prefixes.
     """
     value = config.config_value('prefix.shouldnt.be.ignored.foo')
     self.assertIdentical(value, None)
예제 #27
0
 def when_removed_from_loadbalancers(_ignore):
     cloudServersOpenStack = config_value('cloudServersOpenStack')
     server_endpoint = public_endpoint_url(request_bag.service_catalog,
                                           cloudServersOpenStack,
                                           request_bag.region)
     return verified_delete(log,
                            server_endpoint,
                            request_bag,
                            server_id,
                            clock=clock)
예제 #28
0
    def base(self, request):
        """
        base root route.

        :returns: Whatever is configured to be returned by the root
        """
        code = config_value('root.code')
        if code is not None:
            request.setResponseCode(code)

        headers = config_value('root.headers')
        if headers is not None:
            for header in headers:
                for value in headers[header]:
                    request.setHeader(str(header), str(value))

        body = config_value('root.body')
        if body is not None:
            return body

        return ''
예제 #29
0
def remove_from_load_balancer(log, endpoint, auth_token, loadbalancer_id,
                              node_id, clock=None):
    """
    Remove a node from a load balancer.

    :param str endpoint: Load balancer endpoint URI.
    :param str auth_token: Keystone Auth Token.
    :param str loadbalancer_id: The ID for a cloud loadbalancer.
    :param str node_id: The ID for a node in that cloudloadbalancer.

    :returns: A Deferred that fires with None if the operation completed successfully,
        or errbacks with an RequestError.
    """
    lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id)
    # TODO: Will remove this once LB ERROR state is fixed and it is working fine
    lb_log.msg('Removing from load balancer')
    path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id))

    def remove():
        d = treq.delete(path, headers=headers(auth_token), log=lb_log)
        d.addCallback(check_success, [200, 202])
        d.addCallback(treq.content)  # To avoid https://twistedmatrix.com/trac/ticket/6751
        d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node')
        d.addErrback(wrap_request_error, path, 'remove_node')
        d.addErrback(check_deleted_clb, loadbalancer_id, node_id)
        return d

    d = retry(
        remove,
        can_retry=compose_retries(
            transient_errors_except(CLBOrNodeDeleted),
            retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES)),
        next_interval=random_interval(
            *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)),
        clock=clock)

    # A node or CLB deleted is considered successful removal
    d.addErrback(lambda f: f.trap(CLBOrNodeDeleted) and lb_log.msg(f.value.message))
    d.addCallback(lambda _: lb_log.msg('Removed from load balancer'))
    return d
예제 #30
0
파일: http.py 프로젝트: manishtomar/otter
def get_collection_links(collection,
                         url,
                         rel,
                         limit=None,
                         marker=None,
                         next_marker=None):
    """
    Return links `dict` for given collection.

    The links will look somewhat like this::

        [
          {
            "href": <url with api version>,
            "rel": "self"
          },
          {
            "href": <url of next link>,
            "rel": "next"
          }
        ]

    The 'next' link is added only if number of items in `collection`
    has reached `limit`.

    :param collection: the collection whose links are required.
    :type collection: list of dict that has 'id' in it

    :param url: URL of the collection

    :param rel: What to put under 'rel'

    :param limit: pagination limit

    :param marker: the current pagination marker

    :param next_marker: a callable that takes the collection, the limit, and
        the current marker, and returns the next marker
    """
    if next_marker is None:
        next_marker = next_marker_by_id

    links = []
    limit = limit or config_value('limits.pagination') or 100
    if rel is not None:
        links.append(_pagination_link(url, rel, limit, marker))
    if len(collection) >= limit:
        links.append(
            _pagination_link(url, 'next', limit,
                             next_marker(collection, limit, marker)))
    return links
예제 #31
0
 def _get_policies(group):
     """
     Now that we know the group exists, get its policies
     """
     limit = config_value('limits.pagination') or 100
     d = self._naive_list_policies(limit=limit)
     d.addCallback(lambda policies: {
         'groupConfiguration': _jsonloads_data(group['group_config']),
         'launchConfiguration': _jsonloads_data(group['launch_config']),
         'scalingPolicies': policies,
         'id': self.uuid,
         'state': _unmarshal_state(group)
     })
     return d
예제 #32
0
파일: mock.py 프로젝트: dian4554/otter
    def create_scaling_group(self, log, tenant, config, launch, policies=None):
        """
        see :meth:`otter.models.interface.IScalingGroupCollection.create_scaling_group`
        """
        uuid = str(uuid4())
        max_groups = config_value('limits.absolute.maxGroups')

        if len(self.data[tenant]) >= max_groups:
            msg = 'client has reached maxGroups limit'
            log.bind(tenant_id=tenant, scaling_group_id=uuid).msg(msg)
            return defer.fail(ScalingGroupOverLimitError(tenant, max_groups))

        self.data[tenant][uuid] = MockScalingGroup(
            log, tenant, uuid, self,
            {'config': config, 'launch': launch, 'policies': policies})
        return self.data[tenant][uuid].view_manifest()
예제 #33
0
파일: http.py 프로젝트: manishtomar/otter
def _pagination_link(url, rel, limit, marker):
    """
    Generates a link dictionary where the href link has (possibly) limit
    and marker query parameters, so long as they are not None.

    :param url: URL of the collection
    :param rel: What to put under 'rel'
    :param limit: pagination limit
    :param marker: the current pagination marker

    :return: ``dict`` containing an href and the rel, the href being a link
        to the collection represented by the url, limit, and marker
    """
    query_params = {}

    if marker is not None:
        query_params = {'marker': marker, 'limit': limit}
    elif limit != (config_value('limits.pagination') or 100):
        query_params['limit'] = limit

    # split_url is a tuple that can't be modified, so listify it
    # (scheme, netloc, path, query, fragment)
    split_url = urlsplit(url)
    mutable_url_parts = list(split_url)

    # update mutable_url_parts with a scheme and netloc if either are missing
    # so that the final URI will always be an absolute URI
    if not (split_url.scheme and split_url.netloc):
        # generate a new absolute URI so that when split, its scheme, netloc,
        # and path parts can be cannabalized
        donor = urlsplit(
            append_segments(get_url_root(), split_url.path.lstrip('/')))

        mutable_url_parts[:3] = [donor.scheme, donor.netloc, donor.path]

    # update the query parameters with new query parameters if necessary
    if query_params:
        query = parse_qs(split_url.query)
        query.update(query_params)
        querystring = urlencode(query, doseq=True)

        # sort alphabetically for easier testing
        mutable_url_parts[3] = '&'.join(sorted(querystring.split('&')))

    url = urlunsplit(mutable_url_parts)
    return {'href': url, 'rel': rel}
예제 #34
0
파일: http.py 프로젝트: zancas/otter
def _pagination_link(url, rel, limit, marker):
    """
    Generates a link dictionary where the href link has (possibly) limit
    and marker query parameters, so long as they are not None.

    :param url: URL of the collection
    :param rel: What to put under 'rel'
    :param limit: pagination limit
    :param marker: the current pagination marker

    :return: ``dict`` containing an href and the rel, the href being a link
        to the collection represented by the url, limit, and marker
    """
    query_params = {}

    if marker is not None:
        query_params = {'marker': marker, 'limit': limit}
    elif limit != (config_value('limits.pagination') or 100):
        query_params['limit'] = limit

    # split_url is a tuple that can't be modified, so listify it
    # (scheme, netloc, path, query, fragment)
    split_url = urlsplit(url)
    mutable_url_parts = list(split_url)

    # update mutable_url_parts with a scheme and netloc if either are missing
    # so that the final URI will always be an absolute URI
    if not (split_url.scheme and split_url.netloc):
        # generate a new absolute URI so that when split, its scheme, netloc,
        # and path parts can be cannabalized
        donor = urlsplit(
            append_segments(get_url_root(), split_url.path.lstrip('/')))

        mutable_url_parts[:3] = [donor.scheme, donor.netloc, donor.path]

    # update the query parameters with new query parameters if necessary
    if query_params:
        query = parse_qs(split_url.query)
        query.update(query_params)
        querystring = urlencode(query, doseq=True)

        # sort alphabetically for easier testing
        mutable_url_parts[3] = '&'.join(sorted(querystring.split('&')))

    url = urlunsplit(mutable_url_parts)
    return {'href': url, 'rel': rel}
예제 #35
0
파일: http.py 프로젝트: zancas/otter
def get_collection_links(collection, url, rel, limit=None, marker=None,
                         next_marker=None):
    """
    Return links `dict` for given collection.

    The links will look somewhat like this::

        [
          {
            "href": <url with api version>,
            "rel": "self"
          },
          {
            "href": <url of next link>,
            "rel": "next"
          }
        ]

    The 'next' link is added only if number of items in `collection`
    has reached `limit`.

    :param collection: the collection whose links are required.
    :type collection: list of dict that has 'id' in it

    :param url: URL of the collection

    :param rel: What to put under 'rel'

    :param limit: pagination limit

    :param marker: the current pagination marker

    :param next_marker: a callable that takes the collection, the limit, and
        the current marker, and returns the next marker
    """
    if next_marker is None:
        next_marker = next_marker_by_id

    links = []
    limit = limit or config_value('limits.pagination') or 100
    if rel is not None:
        links.append(_pagination_link(url, rel, limit, marker))
    if len(collection) >= limit:
        links.append(_pagination_link(url, 'next', limit,
                                      next_marker(collection, limit, marker)))
    return links
예제 #36
0
파일: api.py 프로젝트: dwcramer/otter
def setup_scheduler(parent, store, kz_client):
    """
    Setup scheduler service
    """
    # Setup scheduler service
    if not config_value('scheduler') or config_value('mock'):
        return
    buckets = range(1, int(config_value('scheduler.buckets')) + 1)
    store.set_scheduler_buckets(buckets)
    partition_path = config_value('scheduler.partition.path') or '/scheduler_partition'
    time_boundary = config_value('scheduler.partition.time_boundary') or 15
    scheduler_service = SchedulerService(int(config_value('scheduler.batchsize')),
                                         int(config_value('scheduler.interval')),
                                         store, kz_client, partition_path, time_boundary,
                                         buckets)
    scheduler_service.setServiceParent(parent)
    return scheduler_service
예제 #37
0
def get_sempahore(operation, conf_name):
    """
    Get global semaphore of given operation if configured based on conf_name.
    Otherwise return None

    :param str operation: Operation for which semaphore is required. Must be
        same each time it is called for that operation
    :param str conf_name: Semaphore is returned only if this config exists

    :return: A :obj:`DeferredSemaphore` object corresponding to the operation
    """
    sem = _semaphores.get(operation)
    if sem is not None:
        return sem
    conf = config_value(conf_name)
    if conf is None:
        return None
    _semaphores[operation] = DeferredSemaphore(conf)
    return _semaphores[operation]
예제 #38
0
    def _(self, request, *args, **kwargs):
        paginate = {}
        hard_limit = config_value('limits.pagination')
        if 'limit' in request.args:
            try:
                paginate['limit'] = int(request.args['limit'][0])
            except:
                return defer.fail(
                    InvalidQueryArgument('Invalid query argument for "limit"'))

            paginate['limit'] = max(min(paginate['limit'], hard_limit), 1)
        else:
            paginate['limit'] = hard_limit

        if 'marker' in request.args:
            paginate['marker'] = request.args['marker'][0]

        kwargs['paginate'] = paginate
        return f(self, request, *args, **kwargs)
예제 #39
0
    def _(self, request, *args, **kwargs):
        paginate = {}
        hard_limit = config_value('limits.pagination')
        if 'limit' in request.args:
            try:
                paginate['limit'] = int(request.args['limit'][0])
            except:
                return defer.fail(InvalidQueryArgument(
                    'Invalid query argument for "limit"'))

            paginate['limit'] = max(min(paginate['limit'], hard_limit), 1)
        else:
            paginate['limit'] = hard_limit

        if 'marker' in request.args:
            paginate['marker'] = request.args['marker'][0]

        kwargs['paginate'] = paginate
        return f(self, request, *args, **kwargs)
예제 #40
0
def get_sempahore(operation, conf_name):
    """
    Get global semaphore of given operation if configured based on conf_name.
    Otherwise return None

    :param str operation: Operation for which semaphore is required. Must be
        same each time it is called for that operation
    :param str conf_name: Semaphore is returned only if this config exists

    :return: A :obj:`DeferredSemaphore` object corresponding to the operation
    """
    sem = _semaphores.get(operation)
    if sem is not None:
        return sem
    conf = config_value(conf_name)
    if conf is None:
        return None
    _semaphores[operation] = DeferredSemaphore(conf)
    return _semaphores[operation]
예제 #41
0
파일: limits.py 프로젝트: stephamon/otter
    def list_limits(self, request):
        """
        returns application limits
        """
        data = {"limits": {"absolute": config_value("limits.absolute")}}
        accept = request.getHeader("accept")

        if accept and 'xml' in accept:
            url = "http://docs.openstack.org/common/api/v1.0"

            xml = etree.Element("limits", xmlns=url)
            absolute = etree.SubElement(xml, "absolute")

            for key, val in data['limits']['absolute'].iteritems():
                etree.SubElement(absolute, "limit", name=key, value=str(val))

            request.setHeader("Content-Type", "application/xml")
            return etree.tostring(xml, encoding="UTF-8", xml_declaration=True)

        return json.dumps(data)
예제 #42
0
    def list_limits(self, request):
        """
        returns application limits
        """
        data = {"limits": {"absolute": config_value("limits.absolute")}}
        accept = request.getHeader("accept")

        if accept and 'xml' in accept:
            url = "http://docs.openstack.org/common/api/v1.0"

            xml = etree.Element("limits", xmlns=url)
            absolute = etree.SubElement(xml, "absolute")

            for key, val in data['limits']['absolute'].iteritems():
                etree.SubElement(absolute, "limit", name=key, value=str(val))

            request.setHeader("Content-Type", "application/xml")
            return etree.tostring(xml, encoding="UTF-8", xml_declaration=True)

        return json.dumps(data)
예제 #43
0
def remove_from_load_balancer(log,
                              request_bag,
                              lb_config,
                              lb_response,
                              clock=None):
    """
    Remove a node from a load balancer.

    :param BoundLog log: A bound logger.
    :param request_bag: A request function.
    :param dict lb_config: An ``lb_config`` dictionary.
    :param lb_response: The response the load balancer provided when the server
        being removed was added. Type and shape is dependant on type of load
        balancer.
    :param IReactorTime clock: An optional clock, for testing. Will be passed
        on to implementations of node removal logic for specific load balancer
        APIs, if they support a clock.
    :returns: A Deferred that fires with :data:`None` if the operation
        completed successfully, or errbacks with an RequestError.
    """
    lb_type = lb_config.get("type", "CloudLoadBalancer")
    if lb_type == "CloudLoadBalancer":
        cloudLoadBalancers = config_value('cloudLoadBalancers')
        endpoint = public_endpoint_url(request_bag.service_catalog,
                                       cloudLoadBalancers,
                                       request_bag.lb_region)
        auth_token = request_bag.auth_token
        loadbalancer_id = lb_config["loadBalancerId"]
        node_id = next(node_info["id"] for node_info in lb_response["nodes"])
        return _remove_from_clb(log, endpoint, auth_token, loadbalancer_id,
                                node_id, clock)
    elif lb_type == "RackConnectV3":
        lb_id = lb_config["loadBalancerId"]
        node_id = next(pair["cloud_server"]["id"] for pair in lb_response)
        return remove_from_rcv3(request_bag, lb_id, node_id)
    else:
        raise RuntimeError(
            "Unknown cloud load balancer type! config: {}".format(lb_config))
예제 #44
0
def setup_scheduler(parent, dispatcher, store, kz_client):
    """
    Setup scheduler service
    """
    # Setup scheduler service
    if not config_value('scheduler') or config_value('mock'):
        return
    buckets = range(1, int(config_value('scheduler.buckets')) + 1)
    store.set_scheduler_buckets(buckets)
    partition_path = (config_value('scheduler.partition.path') or
                      '/scheduler_partition')
    time_boundary = config_value('scheduler.partition.time_boundary') or 15
    partitioner_factory = partial(
        Partitioner,
        kz_client, int(config_value('scheduler.interval')), partition_path,
        buckets, time_boundary)
    scheduler_service = SchedulerService(
        dispatcher, int(config_value('scheduler.batchsize')),
        store, partitioner_factory)
    scheduler_service.setServiceParent(parent)
    return scheduler_service
예제 #45
0
 def when_authenticated((auth_token, service_catalog)):
     log.msg("Executing launch config.")
     return launch_server_v1.launch_server(log, config_value('region'),
                                           scaling_group,
                                           service_catalog, auth_token,
                                           launch_config['args'], undo)
예제 #46
0
 def test_top_level_value(self):
     """
     :func:`~config.config_value` returns the value stored at the top level key.
     """
     self.assertEqual(config.config_value('foo'), 'bar')
예제 #47
0
def launch_server(log,
                  request_bag,
                  scaling_group,
                  launch_config,
                  undo,
                  clock=None):
    """
    Launch a new server given the launch config auth tokens and service
    catalog. Possibly adding the newly launched server to a load balancer.

    :param BoundLog log: A bound logger.
    :param request_bag: An object with a bunch of useful data on it, including
        a callable to re-auth and get a new token.
    :param IScalingGroup scaling_group: The scaling group to add the launched
        server to.
    :param dict launch_config: A launch_config args structure as defined for
        the launch_server_v1 type.
    :param IUndoStack undo: The stack that will be rewound if undo fails.

    :return: Deferred that fires with a 2-tuple of server details and the
        list of load balancer responses from add_to_load_balancers.
    """
    launch_config = prepare_launch_config(scaling_group.uuid, launch_config)

    cloudServersOpenStack = config_value('cloudServersOpenStack')
    server_endpoint = public_endpoint_url(request_bag.service_catalog,
                                          cloudServersOpenStack,
                                          request_bag.region)

    lb_config = launch_config.get('loadBalancers', [])
    server_config = launch_config['server']

    log = log.bind(server_name=server_config['name'])
    ilog = [None]

    def check_metadata(server):
        # sanity check to make sure the metadata didn't change - can probably
        # be removed after a while if we do not see any log messages from this
        # function
        expected = launch_config['server']['metadata']
        result = server['server'].get('metadata')
        if result != expected:
            ilog[0].msg('Server metadata has changed.',
                        sanity_check=True,
                        expected_metadata=expected,
                        nova_metadata=result)
        return server

    def wait_for_server(server, new_request_bag):
        server_id = server['server']['id']

        # NOTE: If server create is retried, each server delete will be pushed
        # to undo stack even after it will be deleted in check_error which is
        # fine since verified_delete succeeds on deleted server
        undo.push(verified_delete, log, server_endpoint, new_request_bag,
                  server_id)

        ilog[0] = log.bind(server_id=server_id)
        return wait_for_active(ilog[0], server_endpoint,
                               new_request_bag.auth_token,
                               server_id).addCallback(check_metadata)

    def add_lb(server, new_request_bag):
        if lb_config:
            lbd = add_to_load_balancers(ilog[0], new_request_bag, lb_config,
                                        server, undo)
            lbd.addCallback(lambda lb_response: (server, lb_response))
            return lbd

        return (server, [])

    def _real_create_server(new_request_bag):
        auth_token = new_request_bag.auth_token
        d = create_server(server_endpoint, auth_token, server_config, log=log)
        d.addCallback(wait_for_server, new_request_bag)
        d.addCallback(add_lb, new_request_bag)
        return d

    def _create_server():
        return request_bag.re_auth().addCallback(_real_create_server)

    def check_error(f):
        f.trap(UnexpectedServerStatus)
        if f.value.status == 'ERROR':
            log.msg(
                '{server_id} errored, deleting and creating new '
                'server instead',
                server_id=f.value.server_id)
            # trigger server delete and return True to allow retry
            verified_delete(log, server_endpoint, request_bag,
                            f.value.server_id)
            return True
        else:
            return False

    d = retry(_create_server,
              can_retry=compose_retries(retry_times(3), check_error),
              next_interval=repeating_interval(15),
              clock=clock)

    return d
예제 #48
0
 def test_nested_value(self):
     """
     :func:`~config.config_value` returns the value stored at a . separated
     path.
     """
     self.assertEqual(config.config_value('baz.bax'), 'quux')
예제 #49
0
def add_to_clb(log,
               endpoint,
               auth_token,
               lb_config,
               ip_address,
               undo,
               clock=None):
    """
    Add an IP address to a Cloud Load Balancer based on the ``lb_config``.

    TODO: Handle load balancer node metadata.

    :param log: A bound logger
    :param str endpoint: Load balancer endpoint URI.
    :param str auth_token: Keystone auth token.
    :param dict lb_config: An ``lb_config`` dictionary.
    :param str ip_address: The IP address of the node to add to the load
        balancer.
    :param IUndoStack undo: An IUndoStack to push any reversable operations
        onto.

    :return: Deferred that fires with the load balancer response.
    """
    lb_id = lb_config['loadBalancerId']
    port = lb_config['port']
    path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes')
    lb_log = log.bind(loadbalancer_id=lb_id, ip_address=ip_address)

    def add():
        d = treq.post(path,
                      headers=headers(auth_token),
                      data=json.dumps({
                          "nodes": [{
                              "address": ip_address,
                              "port": port,
                              "condition": "ENABLED",
                              "type": "PRIMARY"
                          }]
                      }),
                      log=lb_log)
        d.addCallback(check_success, [200, 202])
        d.addErrback(log_lb_unexpected_errors, lb_log, 'add_node')
        d.addErrback(wrap_request_error, path, 'add_node')
        d.addErrback(check_deleted_clb, lb_id)
        return d

    d = retry(add,
              can_retry=compose_retries(
                  transient_errors_except(CLBOrNodeDeleted),
                  retry_times(
                      config_value('worker.lb_max_retries')
                      or LB_MAX_RETRIES)),
              next_interval=random_interval(
                  *(config_value('worker.lb_retry_interval_range')
                    or LB_RETRY_INTERVAL_RANGE)),
              clock=clock)

    def when_done(result):
        node_id = result['nodes'][0]['id']
        lb_log.msg('Added to load balancer', node_id=node_id)
        undo.push(_remove_from_clb, lb_log, endpoint, auth_token, lb_id,
                  node_id)
        return result

    return d.addCallback(treq.json_content).addCallback(when_done)
예제 #50
0
 def test_non_existent_value(self):
     """
     :func:`~config.config_value` will return :data`None` if the path does
     not exist in the nested dictionaries.
     """
     self.assertIdentical(config.config_value('baz.blah'), None)
예제 #51
0
 def when_authenticated((auth_token, service_catalog)):
     log.msg('Deleting server')
     return launch_server_v1.delete_server(
         log, config_value('region'), service_catalog, auth_token,
         (server['id'], server['lb_info']))
예제 #52
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    set_config_data(dict(config))

    if not config_value('mock'):
        seed_endpoints = [
            clientFromString(reactor, str(host))
            for host in config_value('cassandra.seed_hosts')
        ]

        cassandra_cluster = LoggingCQLClient(
            RoundRobinCassandraCluster(seed_endpoints,
                                       config_value('cassandra.keyspace')),
            log.bind(system='otter.silverberg'))

        set_store(CassScalingGroupCollection(cassandra_cluster))

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    cache_ttl = config_value('identity.cache_ttl')

    if cache_ttl is None:
        # FIXME: Pick an arbitrary cache ttl value based on absolutely no
        # science.
        cache_ttl = 300

    authenticator = CachingAuthenticator(
        reactor,
        ImpersonatingAuthenticator(config_value('identity.username'),
                                   config_value('identity.password'),
                                   config_value('identity.url'),
                                   config_value('identity.admin_url')),
        cache_ttl)

    supervisor = Supervisor(authenticator.authenticate_tenant, coiterate)

    set_supervisor(supervisor)

    s = MultiService()

    site = Site(root)
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(s)

    if config_value('scheduler') and not config_value('mock'):
        scheduler_service = SchedulerService(
            int(config_value('scheduler.batchsize')),
            int(config_value('scheduler.interval')), cassandra_cluster)
        scheduler_service.setServiceParent(s)

    return s
예제 #53
0
파일: http.py 프로젝트: manishtomar/otter
def get_url_root():
    """
    Get the URL root
    :return: string containing the URL root
    """
    return config_value('url_root')
예제 #54
0
def makeService(config):
    """
    Set up the otter-api service.
    """
    config = dict(config)
    set_config_data(config)

    parent = MultiService()

    region = config_value('region')

    seed_endpoints = [
        clientFromString(reactor, str(host))
        for host in config_value('cassandra.seed_hosts')]

    cassandra_cluster = LoggingCQLClient(
        TimingOutCQLClient(
            reactor,
            RoundRobinCassandraCluster(
                seed_endpoints,
                config_value('cassandra.keyspace'),
                disconnect_on_cancel=True),
            config_value('cassandra.timeout') or 30),
        log.bind(system='otter.silverberg'))

    store = CassScalingGroupCollection(
        cassandra_cluster, reactor, config_value('limits.absolute.maxGroups'))
    admin_store = CassAdmin(cassandra_cluster)

    bobby_url = config_value('bobby_url')
    if bobby_url is not None:
        set_bobby(BobbyClient(bobby_url))

    service_configs = get_service_configs(config)

    authenticator = generate_authenticator(reactor, config['identity'])
    supervisor = SupervisorService(authenticator, region, coiterate,
                                   service_configs)
    supervisor.setServiceParent(parent)

    set_supervisor(supervisor)

    health_checker = HealthChecker(reactor, {
        'store': getattr(store, 'health_check', None),
        'kazoo': store.kazoo_health_check,
        'supervisor': supervisor.health_check
    })

    # Setup cassandra cluster to disconnect when otter shuts down
    if 'cassandra_cluster' in locals():
        parent.addService(FunctionalService(stop=partial(
            call_after_supervisor, cassandra_cluster.disconnect, supervisor)))

    otter = Otter(store, region, health_checker.health_check)
    site = Site(otter.app.resource())
    site.displayTracebacks = False

    api_service = service(str(config_value('port')), site)
    api_service.setServiceParent(parent)

    # Setup admin service
    admin_port = config_value('admin')
    if admin_port:
        admin = OtterAdmin(admin_store)
        admin_site = Site(admin.app.resource())
        admin_site.displayTracebacks = False
        admin_service = service(str(admin_port), admin_site)
        admin_service.setServiceParent(parent)

    # setup cloud feed
    cf_conf = config.get('cloudfeeds', None)
    if cf_conf is not None:
        id_conf = deepcopy(config['identity'])
        id_conf['strategy'] = 'single_tenant'
        add_to_fanout(CloudFeedsObserver(
            reactor=reactor,
            authenticator=generate_authenticator(reactor, id_conf),
            tenant_id=cf_conf['tenant_id'],
            region=region,
            service_configs=service_configs))

    # Setup Kazoo client
    if config_value('zookeeper'):
        threads = config_value('zookeeper.threads') or 10
        disable_logs = config_value('zookeeper.no_logs')
        threadpool = ThreadPool(maxthreads=threads)
        sync_kz_client = KazooClient(
            hosts=config_value('zookeeper.hosts'),
            # Keep trying to connect until the end of time with
            # max interval of 10 minutes
            connection_retry=dict(max_tries=-1, max_delay=600),
            logger=None if disable_logs else TxLogger(log.bind(system='kazoo'))
        )
        kz_client = TxKazooClient(reactor, threadpool, sync_kz_client)
        # Don't timeout. Keep trying to connect forever
        d = kz_client.start(timeout=None)

        def on_client_ready(_):
            dispatcher = get_full_dispatcher(reactor, authenticator, log,
                                             get_service_configs(config),
                                             kz_client, store, supervisor,
                                             cassandra_cluster)
            # Setup scheduler service after starting
            scheduler = setup_scheduler(parent, dispatcher, store, kz_client)
            health_checker.checks['scheduler'] = scheduler.health_check
            otter.scheduler = scheduler
            # Give dispatcher to Otter REST object
            otter.dispatcher = dispatcher
            # Set the client after starting
            # NOTE: There is small amount of time when the start is
            # not finished and the kz_client is not set in which case
            # policy execution and group delete will fail
            store.kz_client = kz_client
            # Setup kazoo to stop when shutting down
            parent.addService(FunctionalService(
                stop=partial(call_after_supervisor,
                             kz_client.stop, supervisor)))

            setup_converger(
                parent, kz_client, dispatcher,
                config_value('converger.interval') or 10,
                config_value('converger.build_timeout') or 3600,
                config_value('converger.limited_retry_iterations') or 10,
                config_value('converger.step_limits') or {})

        d.addCallback(on_client_ready)
        d.addErrback(log.err, 'Could not start TxKazooClient')

    return parent