def test_append_segments(self): """ append_segments will append an arbitrary number of path segments to a base url even if there is a trailing / on the base uri. """ expected = 'http://example.com/foo/bar/baz' self.assertEqual( append_segments('http://example.com', 'foo', 'bar', 'baz'), expected ) self.assertEqual( append_segments('http://example.com/', 'foo', 'bar', 'baz'), expected )
def authenticate_user(auth_endpoint, username, password, tenant_id=None, log=None, pool=None): """ Authenticate to a Identity auth endpoint with a username and password. :param str auth_endpoint: Identity API endpoint URL. :param str username: Username to authenticate as. :param str password: Password for the specified user. :param str tenant_id: Tenant ID to include in auth request :param log: If provided, a BoundLog object. :param twisted.web.client.HTTPConnectionPool pool: If provided, a connection pool which an integration test can manually clean up to avoid a race condition between Trial and Twisted. :return: Decoded JSON response as dict. """ if not log: log = _DoNothingLogger(None, None) request = {"auth": {"passwordCredentials": {"username": username, "password": password}}} if tenant_id: request["auth"]["tenantId"] = tenant_id d = treq.post(append_segments(auth_endpoint, "tokens"), json.dumps(request), headers=headers(), log=log, pool=pool) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_upstream_error, "identity", ("authenticating", username), auth_endpoint) d.addCallback(treq.json_content) return d
def test_append_segments(self): """ append_segments will append an arbitrary number of path segments to a base url even if there is a trailing / on the base uri. """ expected = 'http://example.com/foo/bar/baz' self.assertEqual( append_segments('http://example.com', 'foo', 'bar', 'baz'), expected) self.assertEqual( append_segments('http://example.com/', 'foo', 'bar', 'baz'), expected)
def authenticate_user(auth_endpoint, username, password): """ Authenticate to a Identity auth endpoint with a username and password. :param str auth_endpoint: Identity API endpoint URL. :param str username: Username to authenticate as. :param str password: Password for the specified user. :return: Decoded JSON response as dict. """ d = treq.post(append_segments(auth_endpoint, 'tokens'), json.dumps({ "auth": { "passwordCredentials": { "username": username, "password": password } } }), headers=headers()) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data=('authenticating', username)) d.addCallback(treq.json_content) return d
def add_to_load_balancer(endpoint, auth_token, lb_config, ip_address): """ Add an IP addressed to a load balancer based on the lb_config. TODO: Handle load balancer node metadata. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str lb_config: An lb_config dictionary. :param str ip_address: The IP Address of the node to add to the load balancer. :return: Deferred that fires with the Add Node to load balancer response as a dict. """ lb_id = lb_config['loadBalancerId'] port = lb_config['port'] path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes') d = treq.post(path, headers=headers(auth_token), data=json.dumps({"nodes": [{"address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY"}]})) d.addCallback(check_success, [200, 202]) d.addErrback(wrap_request_error, endpoint, 'add') return d.addCallback(treq.json_content)
def delete_and_verify(server_id): """ Check the status of the server to see if it's actually been deleted. Succeeds only if it has been either deleted (404) or acknowledged by Nova to be deleted (task_state = "deleted"). Note that ``task_state`` is in the server details key ``OS-EXT-STS:task_state``, which is supported by Openstack but available only when looking at the extended status of a server. """ def check_task_state((resp, server_blob)): if resp.code == 404: return server_details = server_blob['server'] is_deleting = server_details.get("OS-EXT-STS:task_state", "") if is_deleting.strip().lower() != "deleting": raise UnexpectedServerStatus(server_id, is_deleting, "deleting") def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200, 404)) return ver_eff.on(check_task_state) return service_request( ServiceType.CLOUD_SERVERS, 'DELETE', append_segments('servers', server_id), success_pred=has_code(404)).on(error=catch(APIError, verify))
def create_server(self, tenant_id, group_id, server_id): """ Create a server in Bobby. This means that Bobby will look up all of the policies and add checks and alarms as necessary. You need to create the group before you can create a policy or server in it. :param tenant_id: The tenant ID of the policy to create in Bobby :param group_id: The group ID of the scaling group to create in Bobby :param server_id: The Nova server URI to create in Bobby. :return: a ``dict`` containing the server object on Bobby's end if successful. returns an errorback if the response is not a 201 Created. """ payload = { 'serverId': server_id, 'entityId': 'Damnit, Bobby' # Bobby is going to create the entity ID. } d = self.treq_client.post(append_segments(self.server_endpoint, tenant_id, 'groups', group_id, 'servers'), data=json.dumps(payload)) d.addCallback(check_success, [201]) d.addErrback(wrap_request_error, self.server_endpoint, 'create_server') return d.addCallback(self.treq_client.json_content)
def create_policy(self, tenant_id, group_id, policy_id, check_template, alarm_template): """ Create a policy in Bobby. This means that Bobby will start to roll out alarms and checks across all of the servers that are present. You need to create the group before you can create a policy or server in it. :param tenant_id: The tenant ID of the policy to create in Bobby :param group_id: The group ID of the scaling group to create in Bobby :param policy_id: The policy ID of the scaling policy we are creating :param check_template: A ``dict`` that contains the check check_template :param alarm_template: A string containing the alarm DSL :return: a ``dict`` containing the policy object on Bobby's end if successful. returns an errorback if the response is not a 201 Created. """ payload = { 'alarmTemplate': alarm_template, 'checkTemplate': check_template, 'policyId': policy_id } d = self.treq_client.post(append_segments(self.server_endpoint, tenant_id, 'groups', group_id, 'policies'), data=json.dumps(payload)) d.addCallback(check_success, [201]) d.addErrback(wrap_request_error, self.server_endpoint, 'create_policy') return d.addCallback(self.treq_client.json_content)
def create_group(self, tenant_id, group_id): """ Create a group in Bobby. This will create the notification plan and notification for Bobby to work against. Once you've created the group, you can add servers or policies. :param tenant_id: The tenant ID of the policy to create in Bobby :param group_id: The group ID of the scaling group to create in Bobby :return: a ``dict`` containing the group object on Bobby's end if successful. returns an errorback if the response is not a 201 Created. """ payload = { 'groupId': group_id, 'notification': 'Damnit, Bobby', # these shouldn't be passed to Bobby 'notificationPlan': 'Damnit, Bobby' } d = self.treq_client.post(append_segments(self.server_endpoint, tenant_id, 'groups'), data=json.dumps(payload)) d.addCallback(check_success, [201]) d.addErrback(wrap_request_error, self.server_endpoint, 'create_group') return d.addCallback(self.treq_client.json_content)
def impersonate_user(auth_endpoint, identity_admin_token, username, expire_in=10800): """ Acquire an auth-token for a user via impersonation. :param str auth_endpoint: Identity API endpoint URL. :param str identity_admin_token: Auth token that has the appropriate permissions to impersonate other users. :param str username: Username to impersonate. :param str expire_in: Number of seconds for which the token will be valid. :return: Decoded JSON as dict. """ d = treq.post(append_segments(auth_endpoint, 'RAX-AUTH', 'impersonation-tokens'), json.dumps({ "RAX-AUTH:impersonation": { "user": { "username": username }, "expire-in-seconds": expire_in } }), headers=headers(identity_admin_token)) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data='impersonation') d.addCallback(treq.json_content) return d
def remove_alarm(self, entity_id, alarm_id): """Remove an alarm.""" d = treq.delete( http.append_segments(self._endpoint, "entities", entity_id, "alarms", alarm_id), headers=http.headers(self._auth_token), ) return d.addCallback(http.check_success, [204])
def list_servers_details_page(parameters=None): """ List a single page of servers details given filtering and pagination parameters. :ivar dict parameters: A dictionary with pagination information, changes-since filters, and name filters. Succeed on 200. :return: a `tuple` of (:obj:`twisted.web.client.Response`, JSON `dict`) :raise: :class:`NovaRateLimitError`, :class:`NovaComputeFaultError`, :class:`APIError` """ @_only_json_api_errors def _parse_known_errors(code, json_body): _match_errors(_nova_standard_errors, code, json_body) return ( service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', 'detail'), params=parameters) .on(error=_parse_known_errors) .on(log_success_response('request-list-servers-details', identity, log_as_json=False)) )
def list_servers_details_page(parameters=None): """ List a single page of servers details given filtering and pagination parameters. :ivar dict parameters: A dictionary with pagination information, changes-since filters, and name filters. Succeed on 200. :return: a `tuple` of (:obj:`twisted.web.client.Response`, JSON `dict`) :raise: :class:`NovaRateLimitError`, :class:`NovaComputeFaultError`, :class:`APIError` """ @only_json_api_errors def _parse_known_errors(code, json_body): match_errors(_nova_standard_errors, code, json_body) return ( service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', 'detail'), params=parameters) .on(error=_parse_known_errors) .on(log_success_response('request-list-servers-details', identity, log_as_json=False)) )
def get_server_details(server_id): """ Get details for one particular server. :ivar str server_id: a Nova server ID. Succeed on 200. :return: a `tuple` of (:obj:`twisted.web.client.Response`, JSON `dict`) :raise: :class:`NoSuchServer`, :class:`NovaRateLimitError`, :class:`NovaComputeFaultError`, :class:`APIError` """ eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200)) @only_json_api_errors def _parse_known_errors(code, json_body): other_errors = [ (404, ('itemNotFound', 'message'), None, partial(NoSuchServerError, server_id=six.text_type(server_id))), ] match_errors(_nova_standard_errors + other_errors, code, json_body) return eff.on(error=_parse_known_errors).on( log_success_response('request-one-server-details', identity))
def impersonate_user(auth_endpoint, identity_admin_token, username, expire_in=10800): """ Acquire an auth-token for a user via impersonation. :param str auth_endpoint: Identity API endpoint URL. :param str identity_admin_token: Auth token that has the appropriate permissions to impersonate other users. :param str username: Username to impersonate. :param str expire_in: Number of seconds for which the token will be valid. :return: Decoded JSON as dict. """ d = treq.post( append_segments(auth_endpoint, 'RAX-AUTH', 'impersonation-tokens'), json.dumps({ "RAX-AUTH:impersonation": { "user": {"username": username}, "expire-in-seconds": expire_in } }), headers=headers(identity_admin_token)) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data='impersonation') d.addCallback(treq.json_content) return d
def authenticate_user(auth_endpoint, username, password): """ Authenticate to a Identity auth endpoint with a username and password. :param str auth_endpoint: Identity API endpoint URL. :param str username: Username to authenticate as. :param str password: Password for the specified user. :return: Decoded JSON response as dict. """ d = treq.post( append_segments(auth_endpoint, 'tokens'), json.dumps( { "auth": { "passwordCredentials": { "username": username, "password": password } } }), headers=headers()) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data=('authenticating', username)) d.addCallback(treq.json_content) return d
def delete_entity(self, entity_id): entity_url = http.append_segments(self._endpoint, 'entities', entity_id) d = treq.delete(entity_url, headers=http.headers(self._auth_token)) d.addCallback(http.check_success, [204]) return d
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id, _treq=treq): """ Scrub otter-specific management metadata from the server. :param BoundLog log: The bound logger instance. :param str auth_token: Keystone auth token. :param str region: The region the server is in. :param str server_id: The id of the server to remove metadata from. :param _treq: The treq instance; possibly a test double. """ bound_log = log.bind(region=region, server_id=server_id) bound_log.msg("Scrubbing otter-specific metadata") service_name = config_value('cloudServersOpenStack') endpoint = public_endpoint_url(service_catalog, service_name, region) url = append_segments(endpoint, 'servers', server_id, 'metadata') auth_hdr = headers(auth_token) get, put = [lambda data=None, method=method: _treq.request( method, url, headers=auth_hdr, data=data, log=bound_log) for method in ["GET", "PUT"]] return (get() .addCallback(_treq.json_content) .addCallback(comp(json.dumps, _without_otter_metadata)) .addCallback(put) .addCallback(_treq.content))
def find_server(server_endpoint, auth_token, server_config, log=None): """ Given a server config, attempts to find a server created with that config. Uses the Nova list server details endpoint to filter out any server that does not have the exact server name (the filter is a regex, so can filter by ``^<name>$``), image ID, and flavor ID (both of which are exact filters). :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :param log: A bound logger :return: Deferred that fires with a server (in the format of a server detail response) that matches that server config and creation time, or None if none matches :raises: :class:`ServerCreationRetryError` """ query_params = { 'image': server_config.get('imageRef', ''), 'flavor': server_config['flavorRef'], 'name': '^{0}$'.format(re.escape(server_config['name'])) } if query_params['image'] is None: query_params['image'] = '' url = '{path}?{query}'.format( path=append_segments(server_endpoint, 'servers', 'detail'), query=urlencode(query_params)) def _check_if_server_exists(list_server_details): nova_servers = list_server_details['servers'] if len(nova_servers) > 1: raise ServerCreationRetryError( "Nova returned {0} servers that match the same " "image/flavor and name {1}.".format( len(nova_servers), server_config['name'])) elif len(nova_servers) == 1: nova_server = list_server_details['servers'][0] if nova_server['metadata'] != server_config['metadata']: raise ServerCreationRetryError( "Nova found a server of the right name ({name}) but wrong " "metadata. Expected {expected_metadata} and got {nova_metadata}" .format(expected_metadata=server_config['metadata'], nova_metadata=nova_server['metadata'], name=server_config['name'])) return {'server': nova_server} return None d = treq.get(url, headers=headers(auth_token), log=log) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(_check_if_server_exists) return d
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id, _treq=treq): """ Scrub otter-specific management metadata from the server. :param BoundLog log: The bound logger instance. :param str auth_token: Keystone auth token. :param str region: The region the server is in. :param str server_id: The id of the server to remove metadata from. :param _treq: The treq instance; possibly a test double. """ bound_log = log.bind(region=region, server_id=server_id) bound_log.msg("Scrubbing otter-specific metadata") service_name = config_value('cloudServersOpenStack') endpoint = public_endpoint_url(service_catalog, service_name, region) url = append_segments(endpoint, 'servers', server_id, 'metadata') auth_hdr = headers(auth_token) get, put = [ lambda data=None, method=method: _treq.request( method, url, headers=auth_hdr, data=data, log=bound_log) for method in ["GET", "PUT"] ] return (get().addCallback(_treq.json_content).addCallback( comp(json.dumps, _without_otter_metadata)).addCallback(put).addCallback( _treq.content))
def find_server(server_endpoint, auth_token, server_config, log=None): """ Given a server config, attempts to find a server created with that config. Uses the Nova list server details endpoint to filter out any server that does not have the exact server name (the filter is a regex, so can filter by ``^<name>$``), image ID, and flavor ID (both of which are exact filters). :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :param log: A bound logger :return: Deferred that fires with a server (in the format of a server detail response) that matches that server config and creation time, or None if none matches :raises: :class:`ServerCreationRetryError` """ query_params = { 'image': server_config.get('imageRef', ''), 'flavor': server_config['flavorRef'], 'name': '^{0}$'.format(re.escape(server_config['name'])) } if query_params['image'] is None: query_params['image'] = '' url = '{path}?{query}'.format(path=append_segments(server_endpoint, 'servers', 'detail'), query=urlencode(query_params)) def _check_if_server_exists(list_server_details): nova_servers = list_server_details['servers'] if len(nova_servers) > 1: raise ServerCreationRetryError( "Nova returned {0} servers that match the same " "image/flavor and name {1}.".format(len(nova_servers), server_config['name'])) elif len(nova_servers) == 1: nova_server = list_server_details['servers'][0] if nova_server['metadata'] != server_config['metadata']: raise ServerCreationRetryError( "Nova found a server of the right name ({name}) but wrong " "metadata. Expected {expected_metadata} and got {nova_metadata}" .format(expected_metadata=server_config['metadata'], nova_metadata=nova_server['metadata'], name=server_config['name'])) return {'server': nova_server} return None d = treq.get(url, headers=headers(auth_token), log=log) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(_check_if_server_exists) return d
def change_clb_node(lb_id, node_id, condition, weight, _type="PRIMARY"): """ Generate effect to change a node on a load balancer. :param str lb_id: The load balancer ID to add the nodes to :param str node_id: The node id to change. :param str condition: The condition to change to: one of "ENABLED", "DRAINING", or "DISABLED" :param int weight: The weight to change to. :param str _type: The type to change the CLB node to. :return: :class:`ServiceRequest` effect :raises: :class:`CLBImmutableError`, :class:`CLBDeletedError`, :class:`NoSuchCLBError`, :class:`NoSuchCLBNodeError`, :class:`APIError` """ eff = service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'PUT', append_segments('loadbalancers', lb_id, 'nodes', node_id), data={'node': { 'condition': condition, 'weight': weight, 'type': _type}}, success_pred=has_code(202)) @_only_json_api_errors def _parse_known_errors(code, json_body): _process_clb_api_error(code, json_body, lb_id) _match_errors( _expand_clb_matches( [(404, _CLB_NO_SUCH_NODE_PATTERN, NoSuchCLBNodeError)], lb_id=lb_id, node_id=node_id), code, json_body) return eff.on(error=_parse_known_errors)
def get_server_details(server_id): """ Get details for one particular server. :ivar str server_id: a Nova server ID. Succeed on 200. :return: a `tuple` of (:obj:`twisted.web.client.Response`, JSON `dict`) :raise: :class:`NoSuchServer`, :class:`NovaRateLimitError`, :class:`NovaComputeFaultError`, :class:`APIError` """ eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200)) @_only_json_api_errors def _parse_known_errors(code, json_body): other_errors = [ (404, ('itemNotFound', 'message'), None, partial(NoSuchServerError, server_id=six.text_type(server_id))), ] _match_errors(_nova_standard_errors + other_errors, code, json_body) return eff.on(error=_parse_known_errors).on( log_success_response('request-one-server-details', identity))
def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, "GET", append_segments("servers", server_id), success_pred=has_code(200, 404) ) return ver_eff.on(check_task_state)
def delete_and_verify(log, server_endpoint, auth_token, server_id): """ Check the status of the server to see if it's actually been deleted. Succeeds only if it has been either deleted (404) or acknowledged by Nova to be deleted (task_state = "deleted"). Note that ``task_state`` is in the server details key ``OS-EXT-STS:task_state``, which is supported by Openstack but available only when looking at the extended status of a server. """ path = append_segments(server_endpoint, 'servers', server_id) def delete(): del_d = treq.delete(path, headers=headers(auth_token), log=log) del_d.addCallback(check_success, [404]) del_d.addCallback(treq.content) return del_d def check_task_state(json_blob): server_details = json_blob['server'] is_deleting = server_details.get("OS-EXT-STS:task_state", "") if is_deleting.strip().lower() != "deleting": raise UnexpectedServerStatus(server_id, is_deleting, "deleting") def verify(f): f.trap(APIError) if f.value.code != 204: return wrap_request_error(f, path, 'delete_server') ver_d = server_details(server_endpoint, auth_token, server_id, log=log) ver_d.addCallback(check_task_state) ver_d.addErrback(lambda f: f.trap(ServerDeleted)) return ver_d return delete().addErrback(verify)
def test_append_segments_unicode_uri(self): """ append_segments will convert a uri to an ascii bytestring if it is a unicode object. """ self.assertEqual(append_segments(u'http://example.com', 'foo'), 'http://example.com/foo')
def trigger_convergence(authenticator, region, group): token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) resp = yield treq.post( append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), data="") if resp.code != 204: raise ValueError("bad code", resp.code)
def test_append_segments_quote(self): """ append_segments will quote all path segments. """ self.assertEqual( append_segments('http://example.com', 'foo bar'), 'http://example.com/foo%20bar' )
def test_append_segments_unicode(self): """ append_segments will convert to utf-8 and quote unicode path segments. """ self.assertEqual( append_segments('http://example.com', u'\u2603'), 'http://example.com/%E2%98%83' )
def verify((_type, error, traceback)): if error.code != 204: raise _type, error, traceback ver_eff = service_request( ServiceType.CLOUD_SERVERS, 'GET', append_segments('servers', server_id), success_pred=has_code(200, 404)) return ver_eff.on(check_task_state)
def as_request(self): """Produce a :obj:`Request` to remove a load balancer node.""" return Request( service=ServiceType.CLOUD_LOAD_BALANCERS, method='DELETE', path=append_segments('loadbalancers', str(self.loadbalancer_id), str(self.node_id)))
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get(append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get( append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def remove_from_load_balancer(log, endpoint, auth_token, loadbalancer_id, node_id, clock=None): """ Remove a node from a load balancer. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str loadbalancer_id: The ID for a cloud loadbalancer. :param str node_id: The ID for a node in that cloudloadbalancer. :returns: A Deferred that fires with None if the operation completed successfully, or errbacks with an RequestError. """ lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id) # TODO: Will remove this once LB ERROR state is fixed and it is working fine lb_log.msg('Removing from load balancer') path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id)) def check_422_deleted(failure): # A LB being deleted sometimes results in a 422. This function # unfortunately has to parse the body of the message to see if this is an # acceptable 422 (if the LB has been deleted or the node has already been # removed, then 'removing from load balancer' as a task should be # successful - if the LB is in ERROR, then nothing more can be done to # it except resetting it - may as well remove the server.) failure.trap(APIError) error = failure.value if error.code == 422: message = json.loads(error.body)['message'] if ('load balancer is deleted' not in message and 'PENDING_DELETE' not in message): return failure lb_log.msg(message) else: return failure def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) # Success is 200/202. An LB not being found is 404. A node not being # found is a 404. But a deleted LB sometimes results in a 422. d.addCallback(log_on_response_code, lb_log, 'Node to delete does not exist', 404) d.addCallback(check_success, [200, 202, 404]) d.addCallback(treq.content) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(check_422_deleted) d.addErrback(log_lb_unexpected_errors, path, lb_log, 'remove_node') return d d = retry( remove, can_retry=retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) d.addCallback(lambda _: lb_log.msg('Removed from load balancer')) return d
def as_request(self): """Produce a :obj:`Request` to modify a load balancer node.""" return Request( service=ServiceType.CLOUD_LOAD_BALANCERS, method='PUT', path=append_segments('loadbalancers', self.loadbalancer_id, 'nodes', self.node_id), data={'condition': self.condition, 'weight': self.weight})
def on_listing_pools(lblist_result): _, body = lblist_result return parallel([ service_request(ServiceType.RACKCONNECT_V3, 'GET', append_segments('load_balancer_pools', lb_pool['id'], 'nodes')).on( partial(on_listing_nodes, RCv3Description(lb_id=lb_pool['id']))) for lb_pool in body ])
def test_append_segments_unicode_uri(self): """ append_segments will convert a uri to an ascii bytestring if it is a unicode object. """ self.assertEqual( append_segments(u'http://example.com', 'foo'), 'http://example.com/foo' )
def get_clb_nodes(lb_id): """ Fetch the nodes of the given load balancer. Returns list of node JSON. """ return ( service_request(ServiceType.CLOUD_LOAD_BALANCERS, "GET", append_segments("loadbalancers", str(lb_id), "nodes")) .on(error=_only_json_api_errors(lambda c, b: _process_clb_api_error(c, b, lb_id))) .on(log_success_response("request-list-clb-nodes", identity)) .on(success=lambda (response, body): body["nodes"]) )
def add_to_load_balancer(log, endpoint, auth_token, lb_config, ip_address, undo, clock=None): """ Add an IP addressed to a load balancer based on the lb_config. TODO: Handle load balancer node metadata. :param log: A bound logger :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str lb_config: An lb_config dictionary. :param str ip_address: The IP Address of the node to add to the load balancer. :param IUndoStack undo: An IUndoStack to push any reversable operations onto. :return: Deferred that fires with the Add Node to load balancer response as a dict. """ lb_id = lb_config['loadBalancerId'] port = lb_config['port'] path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes') lb_log = log.bind(loadbalancer_id=lb_id, ip_address=ip_address) def add(): d = treq.post(path, headers=headers(auth_token), data=json.dumps({"nodes": [{"address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY"}]}), log=lb_log) d.addCallback(check_success, [200, 202]) d.addErrback(log_lb_unexpected_errors, lb_log, 'add_node') d.addErrback(wrap_request_error, path, 'add_node') d.addErrback(check_deleted_clb, lb_id) return d d = retry( add, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times(config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) def when_done(result): lb_log.msg('Added to load balancer', node_id=result['nodes'][0]['id']) undo.push(remove_from_load_balancer, lb_log, endpoint, auth_token, lb_id, result['nodes'][0]['id']) return result return d.addCallback(treq.json_content).addCallback(when_done)
def on_listing_pools(lblist_result): _, body = lblist_result return parallel([ service_request( ServiceType.RACKCONNECT_V3, 'GET', append_segments('load_balancer_pools', lb_pool['id'], 'nodes')).on( partial( on_listing_nodes, RCv3Description(lb_id=lb_pool['id']))) for lb_pool in body ])
def get_clb_node_feed(lb_id, node_id): """Get the atom feed associated with a CLB node. Returns feed as str.""" return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', append_segments('loadbalancers', str(lb_id), 'nodes', '{}.atom'.format(node_id)), json_response=False).on(error=_only_json_api_errors( lambda c, b: _process_clb_api_error(c, b, lb_id))).on( log_success_response( 'request-get-clb-node-feed', identity)).on(success=lambda (response, body): body)
def remove_notification_and_plan(self, notification_plan_id, notification_id): """Delete a notification plan and notification id.""" notification_plan_url = http.append_segments(self._endpoint, "notification_plans", notification_plan_id) d = treq.delete(notification_plan_url, headers=http.headers(self._auth_token)) d.addCallback(http.check_success, [204]) def delete_notification(_): notification_url = http.append_segments(self._endpoint, "notifications", notification_id) return treq.delete(notification_url, headers=http.headers(self._auth_token)) d.addCallback(delete_notification) d.addCallback(http.check_success, [204]) return d
def get_clb_node_feed(lb_id, node_id): """Get the atom feed associated with a CLB node. Returns feed as str.""" return ( service_request( ServiceType.CLOUD_LOAD_BALANCERS, "GET", append_segments("loadbalancers", str(lb_id), "nodes", "{}.atom".format(node_id)), json_response=False, ) .on(error=_only_json_api_errors(lambda c, b: _process_clb_api_error(c, b, lb_id))) .on(log_success_response("request-get-clb-node-feed", identity)) .on(success=lambda (response, body): body) )
def validate_flavor(log, auth_token, server_endpoint, flavor_ref): """ Validate flavor by getting its information """ url = append_segments(server_endpoint, 'flavors', flavor_ref) d = treq.get(url, headers=headers(auth_token)) d.addCallback(check_success, [200, 203]) d.addErrback(raise_error, 404, UnknownFlavor(flavor_ref), url, 'get_flavor') # Extracting the content to avoid a strange bug in twisted/treq where next # subsequent call to nova hangs indefintely d.addCallback(treq.content) return d
def get_clb_nodes(lb_id): """ Fetch the nodes of the given load balancer. Returns list of node JSON. """ return service_request( ServiceType.CLOUD_LOAD_BALANCERS, 'GET', append_segments('loadbalancers', str(lb_id), 'nodes'), ).on(error=only_json_api_errors( lambda c, b: _process_clb_api_error(c, b, lb_id))).on( log_success_response( 'request-list-clb-nodes', identity)).on(success=lambda (response, body): body['nodes'])
def add_notification_and_plan(self): """Groups must have a Notification and Notification plan for Auto Scale. This should only have to be created for each group, and the ids should be stored in the database. """ notification_id = [] # TODO: Finish this path to the webhook # TODO: Add the group to the label. notification_data = { 'label': 'Auto Scale Webhook Notification', 'type': 'webhook', 'details': { 'url': '/alarm' } } notification_url = http.append_segments(self._endpoint, 'notifications') d = treq.post(notification_url, headers=http.headers(self._auth_token), data=json.dumps(notification_data)) d.addCallback(http.check_success, [201]) # Get the newly created notification def create_notification_plan(result): not_id = result.headers.getRawHeaders('x-object-id')[0] notification_id.append(not_id) notification_plan_data = { 'label': 'Auto Scale Notification Plan', 'critical_state': [not_id], 'ok_state': [not_id], 'warning_state': [not_id] } notification_plan_url = http.append_segments( self._endpoint, 'notification_plans') return treq.post(notification_plan_url, headers=http.headers(self._auth_token), data=json.dumps(notification_plan_data)) d.addCallback(create_notification_plan) d.addCallback(http.check_success, [201]) def return_ids(result): notification_plan_id = result.headers.getRawHeaders( 'x-object-id')[0] return defer.succeed((notification_id[0], notification_plan_id)) return d.addCallback(return_ids)
def validate_flavor(log, auth_token, server_endpoint, flavor_ref): """ Validate flavor by getting its information """ url = append_segments(server_endpoint, 'flavors', flavor_ref) d = treq.get(url, headers=headers(auth_token), log=log) d.addCallback(check_success, [200, 203]) d.addErrback(raise_error_on_code, 404, UnknownFlavor(flavor_ref), url, 'get_flavor') # Extracting the content to avoid a strange bug in twisted/treq where next # subsequent call to nova hangs indefintely d.addCallback(treq.content) return d
def _remove_from_clb(log, endpoint, auth_token, loadbalancer_id, node_id, clock=None): """ Remove a node from a CLB load balancer. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone authentication token. :param str loadbalancer_id: The ID for a Cloud Load Balancer. :param str node_id: The ID for a node in that Cloud Load Balancer. :returns: A Deferred that fires with None if the operation completed successfully, or errbacks with an RequestError. """ lb_log = log.bind(loadbalancer_id=loadbalancer_id, node_id=node_id) # TODO: Will remove this once LB ERROR state is fixed and it is working fine lb_log.msg('Removing from load balancer') path = append_segments(endpoint, 'loadbalancers', str(loadbalancer_id), 'nodes', str(node_id)) def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) d.addCallback(check_success, [200, 202]) d.addCallback(treq.content ) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node') d.addErrback(wrap_request_error, path, 'remove_node') d.addErrback(check_deleted_clb, loadbalancer_id, node_id) return d d = retry(remove, can_retry=compose_retries( transient_errors_except(CLBOrNodeDeleted), retry_times( config_value('worker.lb_max_retries') or LB_MAX_RETRIES)), next_interval=random_interval( *(config_value('worker.lb_retry_interval_range') or LB_RETRY_INTERVAL_RANGE)), clock=clock) # A node or CLB deleted is considered successful removal d.addErrback( lambda f: f.trap(CLBOrNodeDeleted) and lb_log.msg(f.value.message)) d.addCallback(lambda _: lb_log.msg('Removed from load balancer')) return d
def create_notification_plan(result): not_id = result.headers.getRawHeaders('x-object-id')[0] notification_id.append(not_id) notification_plan_data = { 'label': 'Auto Scale Notification Plan', 'critical_state': [not_id], 'ok_state': [not_id], 'warning_state': [not_id] } notification_plan_url = http.append_segments( self._endpoint, 'notification_plans') return treq.post(notification_plan_url, headers=http.headers(self._auth_token), data=json.dumps(notification_plan_data))
def add_check(self, policy_id, entity_id, check_template): """Add a new check to the entity.""" d = treq.post(http.append_segments(self._endpoint, 'entities', entity_id, 'checks'), headers=http.headers(self._auth_token), data=check_template) d.addCallback(http.check_success, [201]) def get_check(result): location = result.headers.getRawHeaders('Location')[0] return treq.get(location, headers=http.headers(self._auth_token)) d.addCallback(get_check) d.addCallback(http.check_success, [200]) return d.addCallback(treq.json_content)
def create_stack(self, heat_url, stack_name, parameters, timeout, template): """Create a stack.""" payload = { 'stack_name': stack_name, 'parameters': parameters, 'timeout_mins': timeout, 'template': template } log = self.log.bind(event='create-stack', stack_name=stack_name) result = self.treq.post( append_segments(heat_url, 'stacks'), data=json.dumps(payload), headers=headers(self.auth_token), log=log) result.addCallback(check_success, [201]) return result.addCallback(self.treq.json_content)
def create_server(server_endpoint, auth_token, server_config): """ Create a new server. :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :return: Deferred that fires with the CreateServer response as a dict. """ d = treq.post(append_segments(server_endpoint, 'servers'), headers=headers(auth_token), data=json.dumps({'server': server_config})) d.addCallback(check_success, [202]) d.addErrback(wrap_request_error, server_endpoint, 'server_create') return d.addCallback(treq.json_content)
def add_alarm(self, policy_id, entity_id, notification_plan_id, check_id, alarm_template): """Add an alarm.""" d = treq.post(http.append_segments(self._endpoint, 'entities', entity_id, 'alarms'), headers=http.headers(self._auth_token), data=json.dumps(alarm_template)) d.addCallback(http.check_success, [201]) def get_alarm(result): location = result.headers.getRawHeaders('Location')[0] return treq.get(location, headers=http.headers(self._auth_token)) d.addCallback(get_alarm) d.addCallback(http.check_success, [200]) return d.addCallback(treq.json_content)
def publish_autoscale_event(event, log=None): """ Publish event dictionary to autoscale feed """ return service_request( ServiceType.CLOUD_FEEDS, 'POST', append_segments('autoscale', 'events'), # note: if we actually wanted a JSON response instead of XML, # we'd have to pass the header: # 'accept': ['application/vnd.rackspace.atom+json'], headers={'content-type': ['application/vnd.rackspace.atom+json']}, data=event, log=log, success_pred=has_code(201), json_response=False)
def get_clb_node_feed(lb_id, node_id): """ Get the atom feed associated with a CLB node. :param int lb_id: Cloud Load balancer ID :param int node_id: Node ID of in loadbalancer node :returns: Effect of ``list`` of atom entry :class:`Element` :rtype: ``Effect`` """ return cf.read_entries( ServiceType.CLOUD_LOAD_BALANCERS, append_segments('loadbalancers', str(lb_id), 'nodes', '{}.atom'.format(node_id)), {}, cf.Direction.NEXT, "request-get-clb-node-feed").on( itemgetter(0)).on(error=only_json_api_errors( lambda c, b: _process_clb_api_error(c, b, lb_id)))