def add_to_load_balancer(log, endpoint, auth_token, lb_config, ip_address, undo, clock=None): """ Add an IP addressed to a load balancer based on the lb_config. TODO: Handle load balancer node metadata. :param log: A bound logger :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str lb_config: An lb_config dictionary. :param str ip_address: The IP Address of the node to add to the load balancer. :param IUndoStack undo: An IUndoStack to push any reversable operations onto. :return: Deferred that fires with the Add Node to load balancer response as a dict. """ lb_id = lb_config['loadBalancerId'] port = lb_config['port'] path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes') lb_log = log.bind(loadbalancer_id=lb_id, ip_address=ip_address) def add(): d = treq.post(path, headers=headers(auth_token), data=json.dumps({"nodes": [{"address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY"}]}), log=lb_log) d.addCallback(check_success, [200, 202]) d.addErrback(log_lb_unexpected_errors, lb_log, 'add_node') d.addErrback(wrap_request_error, path, 'add_node') d.addErrback(check_deleted_clb, lb_id) return d d = retry( add, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) def when_done(result): lb_log.msg('Added to load balancer', node_id=result['nodes'][0]['id']) undo.push(remove_from_load_balancer, lb_log, endpoint, auth_token, lb_id, result['nodes'][0]['id']) return result return d.addCallback(treq.json_content).addCallback(when_done)
def test_compose_retries(self): """ `compose_retries` returns True only if all its function returns True """ f1 = lambda f: f % 2 == 0 f2 = lambda f: f % 5 == 0 can_retry = compose_retries(f1, f2) # True only if both f1 and f2 return True self.assertTrue(can_retry(10)) # False otherwise self.assertFalse(can_retry(8)) self.assertFalse(can_retry(3))
def add_event(event, admin_tenant_id, region, log): """ Add event to cloud feeds """ event, error, timestamp, event_tenant_id, event_id = sanitize_event(event) req = prepare_request(request_format, event, error, timestamp, region, event_tenant_id, event_id) eff = retry_effect( publish_autoscale_event(req, log=log), compose_retries(lambda f: (not f.check(APIError) or f.value.code < 400 or f.value.code >= 500), retry_times(5)), exponential_backoff_interval(2), ) return Effect(TenantScope(tenant_id=admin_tenant_id, effect=eff))
def _remove_from_clb(log, endpoint, auth_token, loadbalancer_id, node_id, clock=None): """ Remove a node from a CLB load balancer. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone authentication token. :param str loadbalancer_id: The ID for a Cloud Load Balancer. :param str node_id: The ID for a node in that Cloud Load Balancer. :returns: A Deferred that fires with None if the operation completed successfully, or errbacks with an RequestError. """ lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id) # TODO: Will remove this once LB ERROR state is fixed and it is working fine lb_log.msg('Removing from load balancer') path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id)) def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) d.addCallback(check_success, [200, 202]) d.addCallback(treq.content ) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node') d.addErrback(wrap_request_error, path, 'remove_node') d.addErrback(check_deleted_clb, loadbalancer_id, node_id) return d d = retry(remove, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times( config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) # A node or CLB deleted is considered successful removal d.addErrback( lambda f: f.trap(CLBOrNodeDeleted) and lb_log.msg(f.value.message)) d.addCallback(lambda _: lb_log.msg('Removed from load balancer')) return d
def add_event(event, admin_tenant_id, region, log): """ Add event to cloud feeds """ event, error, timestamp, event_tenant_id, event_id = sanitize_event(event) req = prepare_request(request_format, event, error, timestamp, region, event_tenant_id, event_id) eff = retry_effect( publish_autoscale_event(req, log=log), compose_retries( lambda f: (not f.check(APIError) or f.value.code < 400 or f.value.code >= 500), retry_times(5)), exponential_backoff_interval(2)) return Effect(TenantScope(tenant_id=admin_tenant_id, effect=eff))
def remove_from_load_balancer(log, endpoint, auth_token, loadbalancer_id, node_id, clock=None): """ Remove a node from a load balancer. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str loadbalancer_id: The ID for a cloud loadbalancer. :param str node_id: The ID for a node in that cloudloadbalancer. :returns: A Deferred that fires with None if the operation completed successfully, or errbacks with an RequestError. """ lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id) # TODO: Will remove this once LB ERROR state is fixed and it is working fine lb_log.msg('Removing from load balancer') path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id)) def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) d.addCallback(check_success, [200, 202]) d.addCallback(treq.content) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node') d.addErrback(wrap_request_error, path, 'remove_node') d.addErrback(check_deleted_clb, loadbalancer_id, node_id) return d d = retry( remove, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) # A node or CLB deleted is considered successful removal d.addErrback(lambda f: f.trap(CLBOrNodeDeleted) and lb_log.msg(f.value.message)) d.addCallback(lambda _: lb_log.msg('Removed from load balancer')) return d
def launch_server(log, request_bag, scaling_group, launch_config, undo, clock=None): """ Launch a new server given the launch config auth tokens and service catalog. Possibly adding the newly launched server to a load balancer. :param BoundLog log: A bound logger. :param request_bag: An object with a bunch of useful data on it, including a callable to re-auth and get a new token. :param IScalingGroup scaling_group: The scaling group to add the launched server to. :param dict launch_config: A launch_config args structure as defined for the launch_server_v1 type. :param IUndoStack undo: The stack that will be rewound if undo fails. :return: Deferred that fires with a 2-tuple of server details and the list of load balancer responses from add_to_load_balancers. """ launch_config = prepare_launch_config(scaling_group.uuid, launch_config) cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(request_bag.service_catalog, cloudServersOpenStack, request_bag.region) lb_config = launch_config.get('loadBalancers', []) server_config = launch_config['server'] log = log.bind(server_name=server_config['name']) ilog = [None] def check_metadata(server): # sanity check to make sure the metadata didn't change - can probably # be removed after a while if we do not see any log messages from this # function expected = launch_config['server']['metadata'] result = server['server'].get('metadata') if result != expected: ilog[0].msg('Server metadata has changed.', sanity_check=True, expected_metadata=expected, nova_metadata=result) return server def wait_for_server(server, new_request_bag): server_id = server['server']['id'] # NOTE: If server create is retried, each server delete will be pushed # to undo stack even after it will be deleted in check_error which is # fine since verified_delete succeeds on deleted server undo.push(verified_delete, log, server_endpoint, new_request_bag, server_id) ilog[0] = log.bind(server_id=server_id) return wait_for_active(ilog[0], server_endpoint, new_request_bag.auth_token, server_id).addCallback(check_metadata) def add_lb(server, new_request_bag): if lb_config: lbd = add_to_load_balancers(ilog[0], new_request_bag, lb_config, server, undo) lbd.addCallback(lambda lb_response: (server, lb_response)) return lbd return (server, []) def _real_create_server(new_request_bag): auth_token = new_request_bag.auth_token d = create_server(server_endpoint, auth_token, server_config, log=log) d.addCallback(wait_for_server, new_request_bag) d.addCallback(add_lb, new_request_bag) return d def _create_server(): return request_bag.re_auth().addCallback(_real_create_server) def check_error(f): f.trap(UnexpectedServerStatus) if f.value.status == 'ERROR': log.msg( '{server_id} errored, deleting and creating new ' 'server instead', server_id=f.value.server_id) # trigger server delete and return True to allow retry verified_delete(log, server_endpoint, request_bag, f.value.server_id) return True else: return False d = retry(_create_server, can_retry=compose_retries(retry_times(3), check_error), next_interval=repeating_interval(15), clock=clock) return d
def add_to_clb(log, endpoint, auth_token, lb_config, ip_address, undo, clock=None): """ Add an IP address to a Cloud Load Balancer based on the ``lb_config``. TODO: Handle load balancer node metadata. :param log: A bound logger :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone auth token. :param dict lb_config: An ``lb_config`` dictionary. :param str ip_address: The IP address of the node to add to the load balancer. :param IUndoStack undo: An IUndoStack to push any reversable operations onto. :return: Deferred that fires with the load balancer response. """ lb_id = lb_config['loadBalancerId'] port = lb_config['port'] path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes') lb_log = log.bind(loadbalancer_id=lb_id, ip_address=ip_address) def add(): d = treq.post(path, headers=headers(auth_token), data=json.dumps({ "nodes": [{ "address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY" }] }), log=lb_log) d.addCallback(check_success, [200, 202]) d.addErrback(log_lb_unexpected_errors, lb_log, 'add_node') d.addErrback(wrap_request_error, path, 'add_node') d.addErrback(check_deleted_clb, lb_id) return d d = retry(add, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times( config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) def when_done(result): node_id = result['nodes'][0]['id'] lb_log.msg('Added to load balancer', node_id=node_id) undo.push(_remove_from_clb, lb_log, endpoint, auth_token, lb_id, node_id) return result return d.addCallback(treq.json_content).addCallback(when_done)
def create_server(server_endpoint, auth_token, server_config, log=None, clock=None, retries=3, create_failure_delay=5, _treq=None): """ Create a new server. If there is an error from Nova from this call, checks to see if the server was created anyway. If not, will retry the create ``retries`` times (checking each time if a server). If the error from Nova is a 400, does not retry, because that implies that retrying will just result in another 400 (bad args). If checking to see if the server is created also results in a failure, does not retry because there might just be something wrong with Nova. :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :param: int retries: Number of tries to retry the create. :param: int create_failure_delay: how much time in seconds to wait after a create server failure before checking Nova to see if a server was created :param log: logger :type log: :class:`otter.log.bound.BoundLog` :param _treq: To be used for testing - what treq object to use :type treq: something with the same api as :obj:`treq` :return: Deferred that fires with the CreateServer response as a dict. """ path = append_segments(server_endpoint, 'servers') if _treq is None: # pragma: no cover _treq = treq if clock is None: # pragma: no cover from twisted.internet import reactor clock = reactor def _check_results(result, propagated_f): """ Return the original failure, if checking a server resulted in a failure too. Returns a wrapped propagated failure, if there were no servers created, so that the retry utility knows that server creation can be retried. """ if isinstance(result, Failure): log.msg( "Attempt to find a created server in nova resulted in " "{failure}. Propagating the original create error instead.", failure=result) return propagated_f if result is None: raise _NoCreatedServerFound(propagated_f) return result def _check_server_created(f): """ If creating a server failed with anything other than a 400, see if Nova created a server anyway (a 400 means that the server creation args were bad, and there is no point in retrying). If Nova created a server, just return it and pretend that the error never happened. If it didn't, or if checking resulted in another failure response, return a failure of some type. """ f.trap(APIError) if f.value.code == 400: return f d = deferLater(clock, create_failure_delay, find_server, server_endpoint, auth_token, server_config, log=log) d.addBoth(_check_results, f) return d def _create_with_delay(to_delay): d = _treq.post(path, headers=headers(auth_token), data=json.dumps({'server': server_config}), log=log) if to_delay: # Add 1 second delay to space 1 second between server creations d.addCallback(delay, clock, 1) return d def _create_server(): """ Attempt to create a server, handling spurious non-400 errors from Nova by seeing if Nova created a server anyway in spite of the error. If so then create server succeeded. If not, and if no further errors occur, server creation can be retried. """ sem = get_sempahore("create_server", "worker.create_server_limit") if sem is not None: d = sem.run(_create_with_delay, True) else: d = _create_with_delay(False) d.addCallback(check_success, [202], _treq=_treq) d.addCallback(_treq.json_content) d.addErrback(_check_server_created) return d def _unwrap_NoCreatedServerFound(f): """ The original failure was wrapped in a :class:`_NoCreatedServerFound` for ease of retry, but that should not be the final error propagated up by :func:`create_server`. This errback unwraps the :class:`_NoCreatedServerFound` error and returns the original failure. """ f.trap(_NoCreatedServerFound) return f.value.original d = retry(_create_server, can_retry=compose_retries( retry_times(retries), terminal_errors_except(_NoCreatedServerFound)), next_interval=repeating_interval(15), clock=clock) d.addErrback(_unwrap_NoCreatedServerFound) d.addErrback(wrap_request_error, path, 'server_create') return d
def launch_server(log, request_bag, scaling_group, launch_config, undo, clock=None): """ Launch a new server given the launch config auth tokens and service catalog. Possibly adding the newly launched server to a load balancer. :param BoundLog log: A bound logger. :param request_bag: An object with a bunch of useful data on it, including a callable to re-auth and get a new token. :param IScalingGroup scaling_group: The scaling group to add the launched server to. :param dict launch_config: A launch_config args structure as defined for the launch_server_v1 type. :param IUndoStack undo: The stack that will be rewound if undo fails. :return: Deferred that fires with a 2-tuple of server details and the list of load balancer responses from add_to_load_balancers. """ launch_config = prepare_launch_config(scaling_group.uuid, launch_config) cloudServersOpenStack = config_value('cloudServersOpenStack') server_endpoint = public_endpoint_url(request_bag.service_catalog, cloudServersOpenStack, request_bag.region) lb_config = launch_config.get('loadBalancers', []) server_config = launch_config['server'] log = log.bind(server_name=server_config['name']) ilog = [None] def check_metadata(server): # sanity check to make sure the metadata didn't change - can probably # be removed after a while if we do not see any log messages from this # function expected = launch_config['server']['metadata'] result = server['server'].get('metadata') if result != expected: ilog[0].msg('Server metadata has changed.', sanity_check=True, expected_metadata=expected, nova_metadata=result) return server def wait_for_server(server, new_request_bag): server_id = server['server']['id'] # NOTE: If server create is retried, each server delete will be pushed # to undo stack even after it will be deleted in check_error which is # fine since verified_delete succeeds on deleted server undo.push( verified_delete, log, server_endpoint, new_request_bag, server_id) ilog[0] = log.bind(server_id=server_id) return wait_for_active( ilog[0], server_endpoint, new_request_bag.auth_token, server_id).addCallback(check_metadata) def add_lb(server, new_request_bag): if lb_config: lbd = add_to_load_balancers( ilog[0], new_request_bag, lb_config, server, undo) lbd.addCallback(lambda lb_response: (server, lb_response)) return lbd return (server, []) def _real_create_server(new_request_bag): auth_token = new_request_bag.auth_token d = create_server(server_endpoint, auth_token, server_config, log=log) d.addCallback(wait_for_server, new_request_bag) d.addCallback(add_lb, new_request_bag) return d def _create_server(): return request_bag.re_auth().addCallback(_real_create_server) def check_error(f): f.trap(UnexpectedServerStatus) if f.value.status == 'ERROR': log.msg('{server_id} errored, deleting and creating new ' 'server instead', server_id=f.value.server_id) # trigger server delete and return True to allow retry verified_delete(log, server_endpoint, request_bag, f.value.server_id) return True else: return False d = retry(_create_server, can_retry=compose_retries(retry_times(3), check_error), next_interval=repeating_interval(15), clock=clock) return d
def create_server(server_endpoint, auth_token, server_config, log=None, clock=None, retries=3, create_failure_delay=5, _treq=None): """ Create a new server. If there is an error from Nova from this call, checks to see if the server was created anyway. If not, will retry the create ``retries`` times (checking each time if a server). If the error from Nova is a 400, does not retry, because that implies that retrying will just result in another 400 (bad args). If checking to see if the server is created also results in a failure, does not retry because there might just be something wrong with Nova. :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :param: int retries: Number of tries to retry the create. :param: int create_failure_delay: how much time in seconds to wait after a create server failure before checking Nova to see if a server was created :param log: logger :type log: :class:`otter.log.bound.BoundLog` :param _treq: To be used for testing - what treq object to use :type treq: something with the same api as :obj:`treq` :return: Deferred that fires with the CreateServer response as a dict. """ path = append_segments(server_endpoint, 'servers') if _treq is None: # pragma: no cover _treq = treq if clock is None: # pragma: no cover from twisted.internet import reactor clock = reactor def _check_results(result, propagated_f): """ Return the original failure, if checking a server resulted in a failure too. Returns a wrapped propagated failure, if there were no servers created, so that the retry utility knows that server creation can be retried. """ if isinstance(result, Failure): log.msg("Attempt to find a created server in nova resulted in " "{failure}. Propagating the original create error instead.", failure=result) return propagated_f if result is None: raise _NoCreatedServerFound(propagated_f) return result def _check_server_created(f): """ If creating a server failed with anything other than a 400, see if Nova created a server anyway (a 400 means that the server creation args were bad, and there is no point in retrying). If Nova created a server, just return it and pretend that the error never happened. If it didn't, or if checking resulted in another failure response, return a failure of some type. """ f.trap(APIError) if f.value.code == 400: return f d = deferLater(clock, create_failure_delay, find_server, server_endpoint, auth_token, server_config, log=log) d.addBoth(_check_results, f) return d def _create_with_delay(to_delay): d = _treq.post(path, headers=headers(auth_token), data=json.dumps({'server': server_config}), log=log) if to_delay: # Add 1 second delay to space 1 second between server creations d.addCallback(delay, clock, 1) return d def _create_server(): """ Attempt to create a server, handling spurious non-400 errors from Nova by seeing if Nova created a server anyway in spite of the error. If so then create server succeeded. If not, and if no further errors occur, server creation can be retried. """ sem = get_sempahore("create_server", "worker.create_server_limit") if sem is not None: d = sem.run(_create_with_delay, True) else: d = _create_with_delay(False) d.addCallback(check_success, [202], _treq=_treq) d.addCallback(_treq.json_content) d.addErrback(_check_server_created) return d def _unwrap_NoCreatedServerFound(f): """ The original failure was wrapped in a :class:`_NoCreatedServerFound` for ease of retry, but that should not be the final error propagated up by :func:`create_server`. This errback unwraps the :class:`_NoCreatedServerFound` error and returns the original failure. """ f.trap(_NoCreatedServerFound) return f.value.original d = retry( _create_server, can_retry=compose_retries( retry_times(retries), terminal_errors_except(_NoCreatedServerFound)), next_interval=repeating_interval(15), clock=clock) d.addErrback(_unwrap_NoCreatedServerFound) d.addErrback(wrap_request_error, path, 'server_create') return d
def launch_server(log, region, scaling_group, service_catalog, auth_token, launch_config, undo, clock=None): """ Launch a new server given the launch config auth tokens and service catalog. Possibly adding the newly launched server to a load balancer. :param BoundLog log: A bound logger. :param str region: A rackspace region as found in the service catalog. :param IScalingGroup scaling_group: The scaling group to add the launched server to. :param list service_catalog: A list of services as returned by the auth apis. :param str auth_token: The user's auth token. :param dict launch_config: A launch_config args structure as defined for the launch_server_v1 type. :param IUndoStack undo: The stack that will be rewound if undo fails. :return: Deferred that fires with a 2-tuple of server details and the list of load balancer responses from add_to_load_balancers. """ launch_config = prepare_launch_config(scaling_group.uuid, launch_config) lb_region = config_value('regionOverrides.cloudLoadBalancers') or region cloudLoadBalancers = config_value('cloudLoadBalancers') cloudServersOpenStack = config_value('cloudServersOpenStack') lb_endpoint = public_endpoint_url(service_catalog, cloudLoadBalancers, lb_region) server_endpoint = public_endpoint_url(service_catalog, cloudServersOpenStack, region) lb_config = launch_config.get('loadBalancers', []) server_config = launch_config['server'] log = log.bind(server_name=server_config['name']) ilog = [None] def wait_for_server(server): server_id = server['server']['id'] # NOTE: If server create is retried, each server delete will be pushed # to undo stack even after it will be deleted in check_error which is fine # since verified_delete succeeds on deleted server undo.push( verified_delete, log, server_endpoint, auth_token, server_id) ilog[0] = log.bind(server_id=server_id) return wait_for_active( ilog[0], server_endpoint, auth_token, server_id) def add_lb(server): ip_address = private_ip_addresses(server)[0] lbd = add_to_load_balancers( ilog[0], lb_endpoint, auth_token, lb_config, ip_address, undo) lbd.addCallback(lambda lb_response: (server, lb_response)) return lbd def _create_server(): d = create_server(server_endpoint, auth_token, server_config, log=log) d.addCallback(wait_for_server) d.addCallback(add_lb) return d def check_error(f): f.trap(UnexpectedServerStatus) if f.value.status == 'ERROR': log.msg('{server_id} errored, deleting and creating new server instead', server_id=f.value.server_id) # trigger server delete and return True to allow retry verified_delete(log, server_endpoint, auth_token, f.value.server_id) return True else: return False d = retry(_create_server, can_retry=compose_retries(retry_times(3), check_error), next_interval=repeating_interval(15), clock=clock) return d