def replace_group_config(self, rcs, replacement_config): """ Replace the current group configuration with the provided config and update the stored groupConfiguration information. :param TestResources rcs: The integration test resources instance. This provides useful information to complete the request, like which endpoint to use to make the API request. :param dict replacement_config: A dictionary representation of the JSON description of a scaling group groupConfiguration Note that since this is a replacement config, all fields in the JSON descirptor are mandatory. """ def record_results(_, replacement_config): # Replace the stored value in the group_config self.group_config["groupConfiguration"] = replacement_config # Find the correct group from the list (by id) for g in rcs.groups: if g['group']['id'] == self.group_id: g['group']['groupConfiguration'] = replacement_config if verbosity > 0: print('Update group_config with {}'.format(replacement_config)) return rcs return (treq.put( "{0}/groups/{1}/config".format(rcs.endpoints["otter"], self.group_id), json.dumps(replacement_config), headers=headers(str(rcs.token)), pool=self.pool, ).addCallback(check_success, [204]).addCallback(record_results, replacement_config))
def get_stack(self, stack_url): """Get the metadata about a stack.""" result = self.treq.get(stack_url, headers=headers(self.auth_token), log=self.log) result.addCallback(check_success, [200]) result.addCallback(self.treq.json_content) return result
def test_headers_sets_auth_token(self): """ headers will set the X-Auth-Token header based on it's auth_token argument. """ self.assertEqual( headers('my-auth-token')['x-auth-token'], ['my-auth-token'])
def impersonate_user(auth_endpoint, identity_admin_token, username, expire_in=10800): """ Acquire an auth-token for a user via impersonation. :param str auth_endpoint: Identity API endpoint URL. :param str identity_admin_token: Auth token that has the appropriate permissions to impersonate other users. :param str username: Username to impersonate. :param str expire_in: Number of seconds for which the token will be valid. :return: Decoded JSON as dict. """ d = treq.post( append_segments(auth_endpoint, 'RAX-AUTH', 'impersonation-tokens'), json.dumps({ "RAX-AUTH:impersonation": { "user": {"username": username}, "expire-in-seconds": expire_in } }), headers=headers(identity_admin_token)) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data='impersonation') d.addCallback(treq.json_content) return d
def get_scaling_group_state(self, rcs, success_codes=None): """Retrieve the state of the scaling group. :param TestResources rcs: The integration test resources instance. This provides useful information to complete the request, like which endpoint to use to make the API request. :return: A :class:`Deferred` which, upon firing, returns the result code and, optionally, scaling group state as a 2-tuple, in that order. If not found, the result code will be 404, and the state will be None. """ success_codes = [200, 404] if success_codes is None else success_codes def decide(resp): if resp.code == 200: return self.treq.json_content(resp).addCallback(lambda x: (200, x)) return self.treq.content(resp).addCallback(lambda _: (resp.code, None)) def debug_print(resp_tuple): if verbosity > 0: print('ScalingGroup.get_scaling_group_state response: ') pp.pprint(resp_tuple) return resp_tuple return (self.treq.get( "%s/groups/%s/state" % (str(rcs.endpoints["otter"]), self.group_id), headers=headers(str(rcs.token)), pool=self.pool).addCallback( check_success, success_codes).addCallback(decide).addCallback(debug_print))
def _create_with_delay(to_delay): d = _treq.post(path, headers=headers(auth_token), data=json.dumps({'server': server_config}), log=log) if to_delay: # Add 1 second delay to space 1 second between server creations d.addCallback(delay, clock, 1) return d
def find_server(server_endpoint, auth_token, server_config, log=None): """ Given a server config, attempts to find a server created with that config. Uses the Nova list server details endpoint to filter out any server that does not have the exact server name (the filter is a regex, so can filter by ``^<name>$``), image ID, and flavor ID (both of which are exact filters). :param str server_endpoint: Server endpoint URI. :param str auth_token: Keystone Auth Token. :param dict server_config: Nova server config. :param log: A bound logger :return: Deferred that fires with a server (in the format of a server detail response) that matches that server config and creation time, or None if none matches :raises: :class:`ServerCreationRetryError` """ query_params = { 'image': server_config.get('imageRef', ''), 'flavor': server_config['flavorRef'], 'name': '^{0}$'.format(re.escape(server_config['name'])) } if query_params['image'] is None: query_params['image'] = '' url = '{path}?{query}'.format( path=append_segments(server_endpoint, 'servers', 'detail'), query=urlencode(query_params)) def _check_if_server_exists(list_server_details): nova_servers = list_server_details['servers'] if len(nova_servers) > 1: raise ServerCreationRetryError( "Nova returned {0} servers that match the same " "image/flavor and name {1}.".format( len(nova_servers), server_config['name'])) elif len(nova_servers) == 1: nova_server = list_server_details['servers'][0] if nova_server['metadata'] != server_config['metadata']: raise ServerCreationRetryError( "Nova found a server of the right name ({name}) but wrong " "metadata. Expected {expected_metadata} and got {nova_metadata}" .format(expected_metadata=server_config['metadata'], nova_metadata=nova_server['metadata'], name=server_config['name'])) return {'server': nova_server} return None d = treq.get(url, headers=headers(auth_token), log=log) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(_check_if_server_exists) return d
def execute(self, rcs, success_codes=None): """Executes the scaling policy. :param TestResources rcs: The integration test resources instance. This provides useful information to complete the request, like which endpoint to use to make the API request. :param iterable success_codes: An iterable of HTTP status codes to expect in the success case. Defaults to 202. :return: A :class:`Deferred` which, when triggered, removes the scaling policy. It returns the test resources supplied, easing continuity of integration test code. """ return ( treq.post( "%sexecute" % self.link, headers=headers(str(rcs.token)), pool=self.scaling_group.pool, ).addCallback(check_success, [202] if success_codes is None else success_codes) # Policy execution does not return anything meaningful, # per http://tinyurl.com/ndds6ap (link to docs.rackspace). # So, we forcefully return our resources here. .addCallback(lambda _, x: x, rcs))
def start(self, rcs, test): """Creates and registers, but does not execute, the policy. :param TestResources rcs: The integration test resources instance. This provides useful information to complete the request, like which endpoint to use to make the API request. :param twisted.trial.unittest.TestCase test: The test case running the integration test. :return: A :class:`Deferred` which, when triggered, creates the scaling policy and registers it with AutoScale API. It does not execute the policy, however. The policy, when created, will also appear in the test resources `groups` list. The full JSON will be available for inspection. In addition, this object's :attribute:`policy_id` member will contain the ID of the policy. The deferred will itself return the TestResources instance provided. """ test.addCleanup(self.stop, rcs) def record_results(resp): self.policy_id = resp["policies"][0]["id"] self.link = str(resp["policies"][0]["links"][0]["href"]) return rcs return (treq.post( "%s/groups/%s/policies" % (str(rcs.endpoints["otter"]), self.scaling_group.group_id), json.dumps(self.policy), headers=headers(str(rcs.token)), pool=self.scaling_group.pool, ).addCallback(check_success, [201]).addCallback( treq.json_content).addCallback(record_results))
def scrub_otter_metadata(log, auth_token, service_catalog, region, server_id, _treq=treq): """ Scrub otter-specific management metadata from the server. :param BoundLog log: The bound logger instance. :param str auth_token: Keystone auth token. :param str region: The region the server is in. :param str server_id: The id of the server to remove metadata from. :param _treq: The treq instance; possibly a test double. """ bound_log = log.bind(region=region, server_id=server_id) bound_log.msg("Scrubbing otter-specific metadata") service_name = config_value('cloudServersOpenStack') endpoint = public_endpoint_url(service_catalog, service_name, region) url = append_segments(endpoint, 'servers', server_id, 'metadata') auth_hdr = headers(auth_token) get, put = [lambda data=None, method=method: _treq.request( method, url, headers=auth_hdr, data=data, log=bound_log) for method in ["GET", "PUT"]] return (get() .addCallback(_treq.json_content) .addCallback(comp(json.dumps, _without_otter_metadata)) .addCallback(put) .addCallback(_treq.content))
def start(self, rcs, test): """Create a scaling group. :param TestResources rcs: A set of OpenStack resources encapsulated in a TestResources instance. :return: The same instance of TestResources. """ test.addCleanup(self.stop, rcs) def record_results(resp): rcs.groups.append(resp) self.group_id = str(resp["group"]["id"]) if verbosity > 0: print('Created scaling group {0} \n'.format(self.group_id)) pp.pprint(rcs.groups) return rcs return ( self.treq.post( "%s/groups" % str(rcs.endpoints["otter"]), json.dumps(self.group_config), headers=headers(str(rcs.token)), pool=self.pool ) .addCallback(check_success, [201]) .addCallback(self.treq.json_content) .addCallback(record_results) )
def add_to_load_balancer(endpoint, auth_token, lb_config, ip_address): """ Add an IP addressed to a load balancer based on the lb_config. TODO: Handle load balancer node metadata. :param str endpoint: Load balancer endpoint URI. :param str auth_token: Keystone Auth Token. :param str lb_config: An lb_config dictionary. :param str ip_address: The IP Address of the node to add to the load balancer. :return: Deferred that fires with the Add Node to load balancer response as a dict. """ lb_id = lb_config['loadBalancerId'] port = lb_config['port'] path = append_segments(endpoint, 'loadbalancers', str(lb_id), 'nodes') d = treq.post(path, headers=headers(auth_token), data=json.dumps({"nodes": [{"address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY"}]})) d.addCallback(check_success, [200, 202]) d.addErrback(wrap_request_error, endpoint, 'add') return d.addCallback(treq.json_content)
def test_throttling(self): """ When the throttler function returns a bracketing function, it's used to throttle the request. """ def throttler(stype, method, tid): if (stype == ServiceType.CLOUD_SERVERS and method == 'get' and tid == 1): return bracket bracket = object() svcreq = service_request( ServiceType.CLOUD_SERVERS, 'GET', 'servers').intent response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (_Throttle(bracket=bracket, effect=mock.ANY), nested_sequence([ (Authenticate(authenticator=self.authenticator, tenant_id=1, log=self.log), lambda i: ('token', fake_service_catalog)), (Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log), lambda i: response), ])), ]) eff = self._concrete(svcreq, throttler=throttler) with seq.consume(): result = sync_perform(seq, eff) self.assertEqual(result, (response[0], {}))
def remove_alarm(self, entity_id, alarm_id): """Remove an alarm.""" d = treq.delete( http.append_segments(self._endpoint, "entities", entity_id, "alarms", alarm_id), headers=http.headers(self._auth_token), ) return d.addCallback(http.check_success, [204])
def authenticate_user(auth_endpoint, username, password): """ Authenticate to a Identity auth endpoint with a username and password. :param str auth_endpoint: Identity API endpoint URL. :param str username: Username to authenticate as. :param str password: Password for the specified user. :return: Decoded JSON response as dict. """ d = treq.post( append_segments(auth_endpoint, 'tokens'), json.dumps( { "auth": { "passwordCredentials": { "username": username, "password": password } } }), headers=headers()) d.addCallback(check_success, [200, 203]) d.addErrback(wrap_request_error, auth_endpoint, data=('authenticating', username)) d.addCallback(treq.json_content) return d
def start(self, rcs, test): """Creates the cloud load balancer and launches it in the cloud. :param TestResources rcs: The resources used to make appropriate API calls with. :param twisted.trial.unittest.TestCase test: The test case running the integration test. :return: A `Deferred` which, when fired, returns the resources provided to the `start` function. The instance will also have its cloud load balancer ID (`clb_id`) set by this time. """ test.addCleanup(self.stop, rcs) def record_results(resp): rcs.clbs.append(resp) self.clb_id = str(resp["loadBalancer"]["id"]) return rcs return (self.treq.post("%s/loadbalancers" % str(rcs.endpoints["loadbalancers"]), json.dumps(self.config()), headers=headers(str(rcs.token)), pool=self.pool) .addCallback(check_success, [202], _treq=self.treq) .addCallback(self.treq.json_content) .addCallback(record_results))
def list_nodes(self, rcs): """ Get all the nodes on the load balancer. :param rcs: a :class:`otter.integration.lib.resources.TestResources` instance :return: the JSON response from the load balancer, which looks like:: { "nodes": [ { "id": ... }, { "id": ... }, ... ] } """ d = self.treq.get( "{0}/nodes".format(self.endpoint(rcs)), headers=headers(str(rcs.token)), pool=self.pool ) d.addCallback(check_success, [200], _treq=self.treq) d.addCallback(self.treq.json_content) return d
def execute(self, rcs, success_codes=None): """Executes the scaling policy. :param TestResources rcs: The integration test resources instance. This provides useful information to complete the request, like which endpoint to use to make the API request. :param iterable success_codes: An iterable of HTTP status codes to expect in the success case. Defaults to 202. :return: A :class:`Deferred` which, when triggered, removes the scaling policy. It returns the test resources supplied, easing continuity of integration test code. """ return ( treq.post( "%sexecute" % self.link, headers=headers(str(rcs.token)), pool=self.scaling_group.pool, ).addCallback(check_success, [202] if success_codes is None else success_codes) # Policy execution does not return anything meaningful, # per http://tinyurl.com/ndds6ap (link to docs.rackspace). # So, we forcefully return our resources here. .addCallback(lambda _, x: x, rcs) )
def setUp(self): """ Set up fake pool, treq, responses, and RCS. """ self.pool = object() self.rcs = _FakeRCS() self.server_id = 'server_id' self.expected_kwargs = {'headers': headers('token'), 'pool': self.pool}
def try_delete(): d = self.treq.delete("{}/servers/{}".format( rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [404], _treq=self.treq) d.addCallback(self.treq.content) return d
def try_update(): d = self.treq.put("{}/healthmonitor".format(self.endpoint(rcs)), json.dumps({"healthMonitor": config}), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d.addCallback(lambda _: None)
def resume(self, rcs): """ Resume group """ d = self.treq.post("{}/resume".format(self._endpoint(rcs)), headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, [204])
def trigger_convergence(self, rcs, success_codes=None): """ Trigger convergence on a group """ d = self.treq.post( "{}/converge".format(self._endpoint(rcs)), headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, success_codes or [204])
def try_delete(): d = self.treq.delete( "{}/servers/{}".format(rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [404], _treq=self.treq) d.addCallback(self.treq.content) return d
def trigger_convergence(authenticator, region, group): token, catalog = yield authenticator.authenticate_tenant(group["tenantId"]) endpoint = public_endpoint_url(catalog, "autoscale", region) resp = yield treq.post( append_segments(endpoint, "groups", group["groupId"], "converge"), headers=headers(token), data="") if resp.code != 204: raise ValueError("bad code", resp.code)
def trigger_convergence(self, rcs, success_codes=None): """ Trigger convergence on a group """ d = self.treq.post("{}/converge".format(self._endpoint(rcs)), headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, success_codes or [204])
def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) d.addCallback(check_success, [200, 202]) d.addCallback(treq.content) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node') d.addErrback(wrap_request_error, path, 'remove_node') d.addErrback(check_deleted_clb, loadbalancer_id, node_id) return d
def resume(self, rcs): """ Resume group """ d = self.treq.post( "{}/resume".format(self._endpoint(rcs)), headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, [204])
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get(append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def get_tenant_ids(token, catalog): endpoint = public_endpoint_url(catalog, "cloudMetrics", "IAD") d = treq.get( append_segments(endpoint, "metrics", "search"), headers=headers(token), params={"query": "*.*.desired"}) d.addCallback(check_success, [200]) d.addCallback(treq.json_content) d.addCallback(lambda body: [item["metric"].split(".")[1] for item in body]) return d
def details(self, rcs): """ Get a server's details. """ return self.treq.get( "{}/servers/{}".format(rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool ).addCallback(check_success, [200]).addCallback(self.treq.json_content)
def get_stack_list(self): return (self.helper.treq.get( '{}/stacks'.format(self.rcs.endpoints['heat']), headers=headers(str(self.rcs.token)), params={ 'tags': get_stack_tag_for_group(self.group.group_id)}, pool=self.helper.pool) .addCallback(check_success, [200]) .addCallback(self.helper.treq.json_content))
def really_add(): d = self.treq.post("{0}/nodes".format(self.endpoint(rcs)), json.dumps({"nodes": node_list}), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.json_content, _pending_update_to_transient) return d
def really_change(): d = self.treq.put("{0}/nodes/{1}".format(self.endpoint(rcs), node_id), json.dumps({"node": data}), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d
def really_delete(): d = self.treq.delete("{0}/nodes".format(self.endpoint(rcs)), params=[('id', node_id) for node_id in node_ids], headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d
def try_update(): d = self.treq.put( "{}/healthmonitor".format(self.endpoint(rcs)), json.dumps({"healthMonitor": config}), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d.addCallback(lambda _: None)
def disown(self, rcs, server_id, purge=False, replace=False): """ Disown a server from the autoscaling group. """ d = self.treq.delete( "{0}/servers/{1}".format(self._endpoint(rcs), server_id), params={'replace': replace, 'purge': purge}, headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, [202])
def remove(): d = treq.delete(path, headers=headers(auth_token), log=lb_log) d.addCallback(check_success, [200, 202]) d.addCallback(treq.content ) # To avoid https://twistedmatrix.com/trac/ticket/6751 d.addErrback(log_lb_unexpected_errors, lb_log, 'remove_node') d.addErrback(wrap_request_error, path, 'remove_node') d.addErrback(check_deleted_clb, loadbalancer_id, node_id) return d
def list_images(rcs, pool, _treq=treq): """ Get a list of all images. """ params = {'limit': 10000} return _treq.get("{}/images".format(rcs.endpoints['nova']), params=params, headers=headers(str(rcs.token)), pool=pool).addCallback(check_success, [200]).addCallback( _treq.json_content)
def setUp(self): """ Set up fake pool, treq, responses, and RCS. """ self.pool = object() self.rcs = _FakeRCS() self.expected_kwargs = { 'headers': headers('token'), 'pool': self.pool }
def add(): d = treq.post(path, headers=headers(auth_token), data=json.dumps({"nodes": [{"address": ip_address, "port": port, "condition": "ENABLED", "type": "PRIMARY"}]}), log=lb_log) d.addCallback(check_success, [200, 202]) d.addErrback(log_lb_unexpected_errors, path, lb_log, 'add_node') return d
def details(self, rcs): """ Get a server's details. """ return self.treq.get("{}/servers/{}".format(rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool).addCallback( check_success, [200]).addCallback(self.treq.json_content)
def get_stack_list(self): return (self.helper.treq.get( '{}/stacks'.format(self.rcs.endpoints['heat']), headers=headers(str(self.rcs.token)), params={ 'tags': get_stack_tag_for_group(self.group.group_id) }, pool=self.helper.pool).addCallback( check_success, [200]).addCallback(self.helper.treq.json_content))
def really_delete(): d = self.treq.delete( "{0}/nodes".format(self.endpoint(rcs)), params=[('id', node_id) for node_id in node_ids], headers=headers(str(rcs.token)), pool=self.pool ) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d
def test_default_headers_win(self): """ When merging headers together, the predefined set takes precedent over any that are passed. """ request_ = self._no_reauth_client() eff = request_("get", "/foo", headers={"x-auth-token": "abc123"}) req = eff.intent expected_headers = headers('my-token') self.assertEqual(req.headers, expected_headers)
def really_change(): d = self.treq.put( "{0}/nodes/{1}".format(self.endpoint(rcs), node_id), json.dumps({"node": data}), headers=headers(str(rcs.token)), pool=self.pool ) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.content, _pending_update_to_transient) return d
def really_add(): d = self.treq.post( "{0}/nodes".format(self.endpoint(rcs)), json.dumps({"nodes": node_list}), headers=headers(str(rcs.token)), pool=self.pool ) d.addCallback(check_success, [202], _treq=self.treq) d.addCallbacks(self.treq.json_content, _pending_update_to_transient) return d
def get_addresses(self, rcs): """ Get the network addresses for a server. :param rcs: an instance of :class:`otter.integration.lib.resources.TestResources` """ return self.treq.get( "{}/servers/{}/ips".format(rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool).addCallback(check_success, [200]).addCallback( self.treq.json_content)
def test_performs_tenant_scope(self, deferred_lock_run): """ :func:`perform_tenant_scope` performs :obj:`TenantScope`, and uses the default throttler """ # We want to ensure # 1. the TenantScope can be performed # 2. the ServiceRequest is run within a lock, since it matches the # default throttling policy set_config_data({ "cloud_client": { "throttling": { "create_server_delay": 1, "delete_server_delay": 0.4 } } }) self.addCleanup(set_config_data, {}) clock = Clock() authenticator = object() log = object() dispatcher = get_cloud_client_dispatcher(clock, authenticator, log, make_service_configs()) svcreq = service_request(ServiceType.CLOUD_SERVERS, 'POST', 'servers') tscope = TenantScope(tenant_id='111', effect=svcreq) def run(f, *args, **kwargs): result = f(*args, **kwargs) result.addCallback(lambda x: (x[0], assoc(x[1], 'locked', True))) return result deferred_lock_run.side_effect = run response = stub_pure_response({}, 200) seq = SequenceDispatcher([ (Authenticate(authenticator=authenticator, tenant_id='111', log=log), lambda i: ('token', fake_service_catalog)), (Request(method='POST', url='http://dfw.openstack/servers', headers=headers('token'), log=log), lambda i: response), ]) disp = ComposedDispatcher([seq, dispatcher]) with seq.consume(): result = perform(disp, Effect(tscope)) self.assertNoResult(result) clock.advance(1) self.assertEqual(self.successResultOf(result), (response[0], { 'locked': True }))
def list_metadata(self, rcs): """ Use Nova to get the server's metadata. :param rcs: an instance of :class:`otter.integration.lib.resources.TestResources` """ return self.treq.get( "{}/servers/{}/metadata".format(rcs.endpoints["nova"], self.id), headers=headers(str(rcs.token)), pool=self.pool, ).addCallback(check_success, [200]).addCallback(self.treq.json_content)
def list_servers(rcs, pool, _treq=treq): """ Get a list of all servers, with an optional name regex provided. This does not handle pagination, and instead just increases the limit to an absurdly high number. """ params = {'limit': 10000} return _treq.get("{}/servers/detail".format(rcs.endpoints['nova']), params=params, headers=headers(str(rcs.token)), pool=pool).addCallback(check_success, [200]).addCallback( _treq.json_content)
def disown(self, rcs, server_id, purge=False, replace=False): """ Disown a server from the autoscaling group. """ d = self.treq.delete("{0}/servers/{1}".format(self._endpoint(rcs), server_id), params={ 'replace': replace, 'purge': purge }, headers=headers(str(rcs.token)), pool=self.pool) return d.addCallback(check_success, [202])
def create_webhook(self, rcs): """ Create webhook and return `Webhook` object as Deferred """ d = treq.post("{}/webhooks".format(self.link.rstrip("/")), headers=headers(str(rcs.token)), data=json.dumps([{ "name": "integration-test-webhook" }]), pool=self.scaling_group.pool) d.addCallback(check_success, [201]) d.addCallback(treq.json_content) return d.addCallback(lambda r: Webhook.from_json(r["webhooks"][0]))
def add_notification_and_plan(self): """Groups must have a Notification and Notification plan for Auto Scale. This should only have to be created for each group, and the ids should be stored in the database. """ notification_id = [] # TODO: Finish this path to the webhook # TODO: Add the group to the label. notification_data = { 'label': 'Auto Scale Webhook Notification', 'type': 'webhook', 'details': { 'url': '/alarm' } } notification_url = http.append_segments(self._endpoint, 'notifications') d = treq.post(notification_url, headers=http.headers(self._auth_token), data=json.dumps(notification_data)) d.addCallback(http.check_success, [201]) # Get the newly created notification def create_notification_plan(result): not_id = result.headers.getRawHeaders('x-object-id')[0] notification_id.append(not_id) notification_plan_data = { 'label': 'Auto Scale Notification Plan', 'critical_state': [not_id], 'ok_state': [not_id], 'warning_state': [not_id] } notification_plan_url = http.append_segments( self._endpoint, 'notification_plans') return treq.post(notification_plan_url, headers=http.headers(self._auth_token), data=json.dumps(notification_plan_data)) d.addCallback(create_notification_plan) d.addCallback(http.check_success, [201]) def return_ids(result): notification_plan_id = result.headers.getRawHeaders( 'x-object-id')[0] return defer.succeed((notification_id[0], notification_plan_id)) return d.addCallback(return_ids)
def validate_flavor(log, auth_token, server_endpoint, flavor_ref): """ Validate flavor by getting its information """ url = append_segments(server_endpoint, 'flavors', flavor_ref) d = treq.get(url, headers=headers(auth_token), log=log) d.addCallback(check_success, [200, 203]) d.addErrback(raise_error_on_code, 404, UnknownFlavor(flavor_ref), url, 'get_flavor') # Extracting the content to avoid a strange bug in twisted/treq where next # subsequent call to nova hangs indefintely d.addCallback(treq.content) return d
def send(image_id): lc_type = self.group_config["launchConfiguration"]["type"] if image_id is not None and lc_type == "launch_server": self.group_config = update_in( self.group_config, ["launchConfiguration", "args", "server", "imageRef"], lambda _: image_id) d = self.treq.post("%s/groups" % str(rcs.endpoints["otter"]), json.dumps(self.group_config), headers=headers(str(rcs.token)), pool=self.pool) d.addCallback(check_success, [201]) d.addCallback(self.treq.json_content) return d.addCallback(record_results)
def test_authenticates(self): """Auth is done before making the request.""" eff = self._concrete(self.svcreq) expected_intent = Authenticate(self.authenticator, 1, self.log) self.assertEqual(eff.intent, expected_intent) next_eff = resolve_authenticate(eff) # The next effect in the chain is the requested HTTP request, # with appropriate auth headers self.assertEqual( next_eff.intent, Request(method='GET', url='http://dfw.openstack/servers', headers=headers('token'), log=self.log))
def get_state(self, rcs): """Returns the current state of the cloud load balancer. :param TestResources rcs: The resources used to make appropriate API calls with. :return: A `Deferred` which, when fired, returns the parsed JSON for the current cloud load balancer state. """ return (self.treq.get( self.endpoint(rcs), headers=headers(str(rcs.token)), pool=self.pool, ).addCallback(check_success, [200], _treq=self.treq).addCallback(self.treq.json_content))