def remove_from_load_balancer(log, request_bag, lb_config, lb_response, clock=None): """ Remove a node from a load balancer. :param BoundLog log: A bound logger. :param request_bag: A request function. :param dict lb_config: An ``lb_config`` dictionary. :param lb_response: The response the load balancer provided when the server being removed was added. Type and shape is dependant on type of load balancer. :param IReactorTime clock: An optional clock, for testing. Will be passed on to implementations of node removal logic for specific load balancer APIs, if they support a clock. :returns: A Deferred that fires with :data:`None` if the operation completed successfully, or errbacks with an RequestError. """ lb_type = lb_config.get("type", "CloudLoadBalancer") if lb_type == "CloudLoadBalancer": cloudLoadBalancers = config_value('cloudLoadBalancers') endpoint = public_endpoint_url(request_bag.service_catalog, cloudLoadBalancers, request_bag.lb_region) auth_token = request_bag.auth_token loadbalancer_id = lb_config["loadBalancerId"] node_id = next(node_info["id"] for node_info in lb_response["nodes"]) return _remove_from_clb(log, endpoint, auth_token, loadbalancer_id, node_id, clock) elif lb_type == "RackConnectV3": lb_id = lb_config["loadBalancerId"] node_id = next(pair["cloud_server"]["id"] for pair in lb_response) return remove_from_rcv3(request_bag, lb_id, node_id) else: raise RuntimeError("Unknown cloud load balancer type! config: {}" .format(lb_config))
def find_end_point(self, _, key, service_type, region, default_url=None): """Initialize the instance with the endpoint required for later test execution. :param str key: The dictionary key to store the resulting endpoint URL under. Supported keys are listed above. :param str service_type: The kind of service to look for in the service catalog. For example, "cloudServersOpenStack" or "autoscale". :param str region: The region under which to look for the endpoint. :param str default_url: If provided, a template that can be used to compute a well-known endpoint. For example, if you're developing an implementation of a new service and using a set of mocks, you'll want to hide any mock version of your service, and use the endpoint your daemon provides in lieu of any mock. For example, "http://localhost:9000/v1.0/{0}. Note that {0} expands to the OpenStack tenant ID. :return: self. """ try: self.endpoints[key] = auth.public_endpoint_url( self.sc, service_type, region ) except auth.NoSuchEndpoint: if not default_url: raise self.endpoints[key] = default_url.format(self.tenant) d = Deferred() d.callback(self) # really wish this didn't return None. return d
def add_to_load_balancer(log, request_bag, lb_config, server_details, undo, clock=None): """ Adds a given server to a given load balancer. :param log: A bound logger. :param callable request_bag: A request function. :param str lb_config: An ``lb_config`` dictionary specifying which load balancer to add the server to. :param dict server_details: The server details, as returned by Nova. :return: Deferred that fires with the load balancer response. The structure of this object depends on the load balancer type. """ lb_type = lb_config.get("type", "CloudLoadBalancer") if lb_type == "CloudLoadBalancer": cloudLoadBalancers = config_value('cloudLoadBalancers') endpoint = public_endpoint_url(request_bag.service_catalog, cloudLoadBalancers, request_bag.lb_region) auth_token = request_bag.auth_token ip_address = _servicenet_address(server_details["server"]) return add_to_clb(log, endpoint, auth_token, lb_config, ip_address, undo, clock) elif lb_type == "RackConnectV3": lb_id = lb_config["loadBalancerId"] server_id = server_details["server"]["id"] return add_to_rcv3(request_bag, lb_id, server_id) else: raise RuntimeError( "Unknown cloud load balancer type! config: {}".format(lb_config))
def add_to_load_balancer(log, request_bag, lb_config, server_details, undo, clock=None): """ Adds a given server to a given load balancer. :param log: A bound logger. :param callable request_bag: A request function. :param str lb_config: An ``lb_config`` dictionary specifying which load balancer to add the server to. :param dict server_details: The server details, as returned by Nova. :return: Deferred that fires with the load balancer response. The structure of this object depends on the load balancer type. """ lb_type = lb_config.get("type", "CloudLoadBalancer") if lb_type == "CloudLoadBalancer": cloudLoadBalancers = config_value('cloudLoadBalancers') endpoint = public_endpoint_url(request_bag.service_catalog, cloudLoadBalancers, request_bag.lb_region) auth_token = request_bag.auth_token ip_address = _servicenet_address(server_details["server"]) return add_to_clb(log, endpoint, auth_token, lb_config, ip_address, undo, clock) elif lb_type == "RackConnectV3": lb_id = lb_config["loadBalancerId"] server_id = server_details["server"]["id"] return add_to_rcv3(request_bag, lb_id, server_id) else: raise RuntimeError("Unknown cloud load balancer type! config: {}" .format(lb_config))
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id, _treq=treq): """ Scrub otter-specific management metadata from the server. :param BoundLog log: The bound logger instance. :param str auth_token: Keystone auth token. :param str region: The region the server is in. :param str server_id: The id of the server to remove metadata from. :param _treq: The treq instance; possibly a test double. """ bound_log = log.bind(region=region, server_id=server_id) bound_log.msg("Scrubbing otter-specific metadata") service_name = config_value('cloudServersOpenStack') endpoint = public_endpoint_url(service_catalog, service_name, region) url = append_segments(endpoint, 'servers', server_id, 'metadata') auth_hdr = headers(auth_token) get, put = [lambda data=None, method=method: _treq.request( method, url, headers=auth_hdr, data=data, log=bound_log) for method in ["GET", "PUT"]] return (get() .addCallback(_treq.json_content) .addCallback(comp(json.dumps, _without_otter_metadata)) .addCallback(put) .addCallback(_treq.content))
def find_end_point(self, _, key, service_type, region, default_url=None): """Initialize the instance with the endpoint required for later test execution. :param str key: The dictionary key to store the resulting endpoint URL under. Supported keys are listed above. :param str service_type: The kind of service to look for in the service catalog. For example, "cloudServersOpenStack" or "autoscale". :param str region: The region under which to look for the endpoint. :param str default_url: If provided, a template that can be used to compute a well-known endpoint. For example, if you're developing an implementation of a new service and using a set of mocks, you'll want to hide any mock version of your service, and use the endpoint your daemon provides in lieu of any mock. For example, "http://localhost:9000/v1.0/{0}. Note that {0} expands to the OpenStack tenant ID. :return: self. """ try: self.endpoints[key] = auth.public_endpoint_url( self.sc, service_type, region) except auth.NoSuchEndpoint: if not default_url: raise self.endpoints[key] = default_url.format(self.tenant) d = Deferred() d.callback(self) # really wish this didn't return None. return d
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id, _treq=treq): """ Scrub otter-specific management metadata from the server. :param BoundLog log: The bound logger instance. :param str auth_token: Keystone auth token. :param str region: The region the server is in. :param str server_id: The id of the server to remove metadata from. :param _treq: The treq instance; possibly a test double. """ bound_log = log.bind(region=region, server_id=server_id) bound_log.msg("Scrubbing otter-specific metadata") service_name = config_value('cloudServersOpenStack') endpoint = public_endpoint_url(service_catalog, service_name, region) url = append_segments(endpoint, 'servers', server_id, 'metadata') auth_hdr = headers(auth_token) get, put = [ lambda data=None, method=method: _treq.request( method, url, headers=auth_hdr, data=data, log=bound_log) for method in ["GET", "PUT"] ] return (get().addCallback(_treq.json_content).addCallback( comp(json.dumps, _without_otter_metadata)).addCallback(put).addCallback( _treq.content))
def when_removed_from_loadbalancers(_ignore): cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(request_bag.service_catalog, cloudServersOpenStack, request_bag.region) return verified_delete(log, server_endpoint, request_bag, server_id, clock=clock)
def trigger_convergence(authenticator, region, group): token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) resp = yield treq.post( append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), data="") if resp.code != 204: raise ValueError("bad code", resp.code)
def test_public_endpoint_url(self): """ public_endpoint_url returns the first publicURL for the named service in a specific region. """ self.assertEqual( public_endpoint_url(fake_service_catalog, 'cloudServersOpenStack', 'DFW'), 'http://dfw.openstack/')
def get_service_endpoint(service_catalog, region): """ Get the service endpoint used to connect cloud services """ cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(service_catalog, cloudServersOpenStack, region) return server_endpoint
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get(append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get( append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def service_request(*args, **kwargs): """ Perform an HTTP request similar to the request from :func:`get_request_func`, with the additional feature of being bound to a specific Rackspace/OpenStack service, so that the path can be relative to the service endpoint. """ endpoint = public_endpoint_url(catalog, service_name, region) bound_request = add_bind_root(endpoint, request_func) return bound_request(*args, **kwargs)
def trigger_convergence(authenticator, region, group): """ Trigger convergence on a group :param IAuthenticator authenticator: Otter authenticator :param str region: Region where this is running :param dict group: Scaling group dict """ token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) resp = yield treq.post( append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), data="") yield check_success(resp, [204])
def trigger_convergence(authenticator, region, group, no_error_group): """ Trigger convergence on a group :param IAuthenticator authenticator: Otter authenticator :param str region: Region where this is running :param dict group: Scaling group dict :param bool no_error_group: If true then do not converge ERROR groups """ token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) conv_on_error = "false" if no_error_group else "true" resp = yield treq.post( append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), params={"on_error": conv_on_error}, data="") yield check_success(resp, [204])
def trigger_convergence(authenticator, region, group, no_error_group): """ Trigger convergence on a group :param IAuthenticator authenticator: Otter authenticator :param str region: Region where this is running :param dict group: Scaling group dict :param bool no_error_group: If true then do not converge ERROR groups """ token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) conv_on_error = "false" if no_error_group else "true" resp = yield treq.post(append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), params={"on_error": conv_on_error}, data="") if resp.code != 204: raise ValueError("bad code", resp.code)
def remove_from_load_balancer(log, request_bag, lb_config, lb_response, clock=None): """ Remove a node from a load balancer. :param BoundLog log: A bound logger. :param request_bag: A request function. :param dict lb_config: An ``lb_config`` dictionary. :param lb_response: The response the load balancer provided when the server being removed was added. Type and shape is dependant on type of load balancer. :param IReactorTime clock: An optional clock, for testing. Will be passed on to implementations of node removal logic for specific load balancer APIs, if they support a clock. :returns: A Deferred that fires with :data:`None` if the operation completed successfully, or errbacks with an RequestError. """ lb_type = lb_config.get("type", "CloudLoadBalancer") if lb_type == "CloudLoadBalancer": cloudLoadBalancers = config_value('cloudLoadBalancers') endpoint = public_endpoint_url(request_bag.service_catalog, cloudLoadBalancers, request_bag.lb_region) auth_token = request_bag.auth_token loadbalancer_id = lb_config["loadBalancerId"] node_id = next(node_info["id"] for node_info in lb_response["nodes"]) return _remove_from_clb(log, endpoint, auth_token, loadbalancer_id, node_id, clock) elif lb_type == "RackConnectV3": lb_id = lb_config["loadBalancerId"] node_id = next(pair["cloud_server"]["id"] for pair in lb_response) return remove_from_rcv3(request_bag, lb_id, node_id) else: raise RuntimeError( "Unknown cloud load balancer type! config: {}".format(lb_config))
def get_service_endpoint(service_name, service_catalog, region): """Get the service endpoint used to connect cloud services.""" return public_endpoint_url( service_catalog, config_value(service_name), region)
def get_service_endpoint(service_name, service_catalog, region): """Get the service endpoint used to connect cloud services.""" return public_endpoint_url(service_catalog, config_value(service_name), region)
def launch_server(log, request_bag, scaling_group, launch_config, undo, clock=None): """ Launch a new server given the launch config auth tokens and service catalog. Possibly adding the newly launched server to a load balancer. :param BoundLog log: A bound logger. :param request_bag: An object with a bunch of useful data on it, including a callable to re-auth and get a new token. :param IScalingGroup scaling_group: The scaling group to add the launched server to. :param dict launch_config: A launch_config args structure as defined for the launch_server_v1 type. :param IUndoStack undo: The stack that will be rewound if undo fails. :return: Deferred that fires with a 2-tuple of server details and the list of load balancer responses from add_to_load_balancers. """ launch_config = prepare_launch_config(scaling_group.uuid, launch_config) cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(request_bag.service_catalog, cloudServersOpenStack, request_bag.region) lb_config = launch_config.get('loadBalancers', []) server_config = launch_config['server'] log = log.bind(server_name=server_config['name']) ilog = [None] def check_metadata(server): # sanity check to make sure the metadata didn't change - can probably # be removed after a while if we do not see any log messages from this # function expected = launch_config['server']['metadata'] result = server['server'].get('metadata') if result != expected: ilog[0].msg('Server metadata has changed.', sanity_check=True, expected_metadata=expected, nova_metadata=result) return server def wait_for_server(server, new_request_bag): server_id = server['server']['id'] # NOTE: If server create is retried, each server delete will be pushed # to undo stack even after it will be deleted in check_error which is # fine since verified_delete succeeds on deleted server undo.push(verified_delete, log, server_endpoint, new_request_bag, server_id) ilog[0] = log.bind(server_id=server_id) return wait_for_active(ilog[0], server_endpoint, new_request_bag.auth_token, server_id).addCallback(check_metadata) def add_lb(server, new_request_bag): if lb_config: lbd = add_to_load_balancers(ilog[0], new_request_bag, lb_config, server, undo) lbd.addCallback(lambda lb_response: (server, lb_response)) return lbd return (server, []) def _real_create_server(new_request_bag): auth_token = new_request_bag.auth_token d = create_server(server_endpoint, auth_token, server_config, log=log) d.addCallback(wait_for_server, new_request_bag) d.addCallback(add_lb, new_request_bag) return d def _create_server(): return request_bag.re_auth().addCallback(_real_create_server) def check_error(f): f.trap(UnexpectedServerStatus) if f.value.status == 'ERROR': log.msg( '{server_id} errored, deleting and creating new ' 'server instead', server_id=f.value.server_id) # trigger server delete and return True to allow retry verified_delete(log, server_endpoint, request_bag, f.value.server_id) return True else: return False d = retry(_create_server, can_retry=compose_retries(retry_times(3), check_error), next_interval=repeating_interval(15), clock=clock) return d
def launch_server(log, request_bag, scaling_group, launch_config, undo, clock=None): """ Launch a new server given the launch config auth tokens and service catalog. Possibly adding the newly launched server to a load balancer. :param BoundLog log: A bound logger. :param request_bag: An object with a bunch of useful data on it, including a callable to re-auth and get a new token. :param IScalingGroup scaling_group: The scaling group to add the launched server to. :param dict launch_config: A launch_config args structure as defined for the launch_server_v1 type. :param IUndoStack undo: The stack that will be rewound if undo fails. :return: Deferred that fires with a 2-tuple of server details and the list of load balancer responses from add_to_load_balancers. """ launch_config = prepare_launch_config(scaling_group.uuid, launch_config) cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(request_bag.service_catalog, cloudServersOpenStack, request_bag.region) lb_config = launch_config.get('loadBalancers', []) server_config = launch_config['server'] log = log.bind(server_name=server_config['name']) ilog = [None] def check_metadata(server): # sanity check to make sure the metadata didn't change - can probably # be removed after a while if we do not see any log messages from this # function expected = launch_config['server']['metadata'] result = server['server'].get('metadata') if result != expected: ilog[0].msg('Server metadata has changed.', sanity_check=True, expected_metadata=expected, nova_metadata=result) return server def wait_for_server(server, new_request_bag): server_id = server['server']['id'] # NOTE: If server create is retried, each server delete will be pushed # to undo stack even after it will be deleted in check_error which is # fine since verified_delete succeeds on deleted server undo.push( verified_delete, log, server_endpoint, new_request_bag, server_id) ilog[0] = log.bind(server_id=server_id) return wait_for_active( ilog[0], server_endpoint, new_request_bag.auth_token, server_id).addCallback(check_metadata) def add_lb(server, new_request_bag): if lb_config: lbd = add_to_load_balancers( ilog[0], new_request_bag, lb_config, server, undo) lbd.addCallback(lambda lb_response: (server, lb_response)) return lbd return (server, []) def _real_create_server(new_request_bag): auth_token = new_request_bag.auth_token d = create_server(server_endpoint, auth_token, server_config, log=log) d.addCallback(wait_for_server, new_request_bag) d.addCallback(add_lb, new_request_bag) return d def _create_server(): return request_bag.re_auth().addCallback(_real_create_server) def check_error(f): f.trap(UnexpectedServerStatus) if f.value.status == 'ERROR': log.msg('{server_id} errored, deleting and creating new ' 'server instead', server_id=f.value.server_id) # trigger server delete and return True to allow retry verified_delete(log, server_endpoint, request_bag, f.value.server_id) return True else: return False d = retry(_create_server, can_retry=compose_retries(retry_times(3), check_error), next_interval=repeating_interval(15), clock=clock) return d