def add_load_balancer(tenant_id, store, lb_info, lb_id, current_timestamp): """ Returns response of a newly created load balancer with response code 202, and adds the new lb to the store's lbs. Note: ``store.lbs`` has tenant_id added as an extra key in comparison to the lb_example. """ status = "ACTIVE" # Loadbalancers metadata is a list object, creating a metadata store # so we dont have to deal with the list meta = {} if "metadata" in lb_info: for each in lb_info["metadata"]: meta.update({each["key"]: each["value"]}) store.meta[lb_id] = meta log.msg(store.meta) if "lb_building" in store.meta[lb_id]: status = "BUILD" # Add tenant_id and nodeCount to store.lbs current_timestring = seconds_to_timestamp(current_timestamp) store.lbs[lb_id] = load_balancer_example(lb_info, lb_id, status, current_timestring) store.lbs[lb_id].update({"tenant_id": tenant_id}) store.lbs[lb_id].update( {"nodeCount": len(store.lbs[lb_id].get("nodes", []))}) # and remove before returning response for add lb new_lb = _lb_without_tenant(store, lb_id) return {'loadBalancer': new_lb}, 202
def add_load_balancer(self, lb_info, lb_id): """ Returns response of a newly created load balancer with response code 202, and adds the new lb to the store's lbs. :param dict lb_info: Configuration for the load balancer. See Openstack docs for creating CLBs. :param string lb_id: Unique ID for this load balancer. """ status = "ACTIVE" # Loadbalancers metadata is a list object, creating a metadata store # so we dont have to deal with the list meta = {} if "metadata" in lb_info: for each in lb_info["metadata"]: meta.update({each["key"]: each["value"]}) self.meta[lb_id] = meta log.msg(self.meta) if "lb_building" in self.meta[lb_id]: status = "BUILD" current_timestring = seconds_to_timestamp(self.clock.seconds()) self.lbs[lb_id] = CLB(load_balancer_example(lb_info, lb_id, status, current_timestring), nodes=[Node.from_json(blob) for blob in lb_info.get("nodes", [])]) return {'loadBalancer': self.lbs[lb_id].full_json()}, 202
def send_messages(self, request): """ Responds with response code 200 with a generic response body. If the `to` address is `[email protected]` results in error 500 and if it is `[email protected]` results in 400. """ content = parse_qs(request.content.read().decode("utf-8")) to_address = content.get("to") headers = {} for key, value in content.items(): if key.startswith("h:") or key.startswith("v:"): headers[key] = value if "*****@*****.**" in to_address: request.setResponseCode(500) global count count += 1 return b"" if "*****@*****.**" in to_address: request.setResponseCode(400) return b"" request.setResponseCode(200) message_id = "{0}@samples.mailgun.org".format(seconds_to_timestamp(time.time(), "%Y%m%d%H%M%S.%f")) self.core.message_store.add_to_message_store( message_id=message_id, to=content.get("to")[0], msg_from=content.get("from"), subject=content.get("subject")[0], body=content.get("html"), custom_headers=headers, ) return json.dumps({"message": "Queued. Thank you.", "id": message_id})
def update_node(self, lb_id, node_id, node_updates): """ Update the weight, condition, or type of a single node. The IP, port, status, and ID are immutable, and attempting to change them will cause a 400 response to be returned. All success and error behavior verified as of 2016-06-16. :param str lb_id: the load balancer ID :param str node_id: the node ID to update :param dict node_updates: The JSON dictionary containing node attributes to update :param current_timestamp: What the current time is :return: a `tuple` of (json response as a dict, http status code) """ feed_summary = ( "Node successfully updated with address: '{address}', port: '{port}', " "weight: '{weight}', condition: '{condition}'") # first, store whether address and port were provided - if they were # that's a validation error not a schema error things_wrong = dict([(k, True) for k in ("address", "port", "id") if k in node_updates]) node_updates = dict([(k, v) for k, v in node_updates.items() if k not in ("address", "port")]) # use the Node.from_json to check the schema try: Node.from_json(dict(address="1.1.1.1", port=80, **node_updates)) except (TypeError, ValueError): return invalid_json_schema() # handle the possible validation (as opposed to schema) errors if not 1 <= node_updates.get('weight', 1) <= 100: things_wrong["weight"] = True if things_wrong: return updating_node_validation_error(**things_wrong) # Now, finally, check if the LB exists and node exists if lb_id in self.lbs: self._verify_and_update_lb_state(lb_id, False, self.clock.seconds()) if self.lbs[lb_id]["status"] != "ACTIVE": return considered_immutable_error( self.lbs[lb_id]["status"], lb_id) for i, node in enumerate(self.lbs[lb_id].nodes): if node.id == node_id: params = attr.asdict(node) params.update(node_updates) self.lbs[lb_id].nodes[i] = Node(**params) self.lbs[lb_id].nodes[i].feed_events.append( (feed_summary.format(**params), seconds_to_timestamp(self.clock.seconds()))) return ("", 202) return node_not_found() return loadbalancer_not_found()
def update_node(self, lb_id, node_id, node_updates): """ Update the weight, condition, or type of a single node. The IP, port, status, and ID are immutable, and attempting to change them will cause a 400 response to be returned. All success and error behavior verified as of 2016-06-16. :param str lb_id: the load balancer ID :param str node_id: the node ID to update :param dict node_updates: The JSON dictionary containing node attributes to update :param current_timestamp: What the current time is :return: a `tuple` of (json response as a dict, http status code) """ feed_summary = ( "Node successfully updated with address: '{address}', port: '{port}', " "weight: '{weight}', condition: '{condition}'") # first, store whether address and port were provided - if they were # that's a validation error not a schema error things_wrong = {k: True for k in ("address", "port", "id") if k in node_updates} node_updates = {k: node_updates[k] for k in node_updates if k not in ("address", "port")} # use the Node.from_json to check the schema try: Node.from_json(dict(address="1.1.1.1", port=80, **node_updates)) except (TypeError, ValueError): return invalid_json_schema() # handle the possible validation (as opposed to schema) errors if not 1 <= node_updates.get('weight', 1) <= 100: things_wrong["weight"] = True if things_wrong: return updating_node_validation_error(**things_wrong) # Now, finally, check if the LB exists and node exists if lb_id in self.lbs: self._verify_and_update_lb_state(lb_id, False, self.clock.seconds()) if self.lbs[lb_id]["status"] != "ACTIVE": return considered_immutable_error( self.lbs[lb_id]["status"], lb_id) for i, node in enumerate(self.lbs[lb_id].nodes): if node.id == node_id: params = attr.asdict(node) params.update(node_updates) self.lbs[lb_id].nodes[i] = Node(**params) self.lbs[lb_id].nodes[i].feed_events.append( (feed_summary.format(**params), seconds_to_timestamp(self.clock.seconds()))) return ("", 202) return node_not_found() return loadbalancer_not_found()
def detail_json(self, absolutize_url): """ Long-form JSON-serializable object representation of this server, as returned by either a GET on this individual server or a member in the list returned by the list-details request. """ template = self.static_defaults.copy() tenant_id = self.collection.tenant_id template.update({ "id": self.server_id, "OS-DCF:diskConfig": self.disk_config, "OS-EXT-STS:vm_state": self.status, "addresses": self.addresses_json(), "created": seconds_to_timestamp(self.creation_time), "updated": seconds_to_timestamp(self.update_time), "flavor": { "id": self.flavor_ref, "links": [{ "href": absolutize_url("{0}/flavors/{1}".format( tenant_id, self.flavor_ref)), "rel": "bookmark" }], }, "image": { "id": self.image_ref, "links": [{ "href": absolutize_url("{0}/images/{1}".format( tenant_id, self.flavor_ref)), "rel": "bookmark" }] } if self.image_ref is not None else '', "links": self.links_json(absolutize_url), "key_name": self.key_name, "metadata": self.metadata, "name": self.server_name, "tenant_id": tenant_id, "status": self.status }) return template
def advance_time(self, request): """ Advance time by the given number of seconds. """ body = json.loads(request.content.read()) amount = body["amount"] self.clock.advance(amount) request.setResponseCode(200) return json.dumps({"advanced": amount, "now": seconds_to_timestamp(self.clock.seconds())})
def test_seconds_to_timestamp_default_timestamp(self): """ :func:`helper.seconds_to_timestamp` returns a timestamp matching the seconds since the epoch given. The timestamp conforms to with the default format string of ``%Y-%m-%dT%H:%M:%S.%fZ`` if no format string is provided. """ for seconds, timestamp in self.matches: self.assertEqual(timestamp, helper.seconds_to_timestamp(seconds))
def test_seconds_to_timestamp_provided_timestamp(self): """ :func:`helper.seconds_to_timestamp` uses the provided timestamp format to format the seconds. """ formats = [("%m-%d-%Y %H:%M:%S", "01-01-1970 00:00:00"), ("%Y-%m-%d", "1970-01-01"), ("%H %M %S (%f)", "00 00 00 (000000)")] for fmt, timestamp in formats: self.assertEqual(timestamp, helper.seconds_to_timestamp(0, fmt))
def get_users_details(self, request): """ Returns response with detailed account information about each user including email, name, user ID, account configuration and status information. """ username = request.args.get(b"name")[0].decode("utf-8") session = self.core.sessions.session_for_username_password( username, "test") return json.dumps(dict(user={ "RAX-AUTH:domainId": session.tenant_id, "id": session.user_id, "enabled": True, "username": session.username, "email": "*****@*****.**", "RAX-AUTH:defaultRegion": "ORD", "created": seconds_to_timestamp(time.time()), "updated": seconds_to_timestamp(time.time()) }))
def _verify_and_update_lb_state(store, lb_id, set_state=True, current_timestamp=None): """ Based on the current state, the metadata on the lb and the time since the LB has been in that state, set the appropriate state in store.lbs Note: Reconsider if update metadata is implemented """ current_timestring = seconds_to_timestamp(current_timestamp) if store.lbs[lb_id]["status"] == "BUILD": store.meta[lb_id][ "lb_building"] = store.meta[lb_id]["lb_building"] or 10 store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_building"], current_timestamp=current_timestamp) or "BUILD" elif store.lbs[lb_id]["status"] == "ACTIVE" and set_state: if "lb_pending_update" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "PENDING-UPDATE" log.msg("here") log.msg(store.lbs[lb_id]["status"]) if "lb_pending_delete" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "PENDING-DELETE" if "lb_error_state" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "ERROR" store.lbs[lb_id]["updated"]["time"] = current_timestring elif store.lbs[lb_id]["status"] == "PENDING-UPDATE": if "lb_pending_update" in store.meta[lb_id]: store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_pending_update"], current_timestamp=current_timestamp) or "PENDING-UPDATE" elif store.lbs[lb_id]["status"] == "PENDING-DELETE": store.meta[lb_id][ "lb_pending_delete"] = store.meta[lb_id]["lb_pending_delete"] or 10 store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_pending_delete"], "DELETED", current_timestamp=current_timestamp) or "PENDING-DELETE" store.lbs[lb_id]["updated"]["time"] = current_timestring elif store.lbs[lb_id]["status"] == "DELETED": # see del_load_balancer above for an explanation of this state change. store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], 3600, "DELETING-NOW", current_timestamp=current_timestamp) or "DELETED" if store.lbs[lb_id]["status"] == "DELETING-NOW": del store.lbs[lb_id]
def detail_json(self, absolutize_url): """ Long-form JSON-serializable object representation of this server, as returned by either a GET on this individual server or a member in the list returned by the list-details request. """ template = self.static_defaults.copy() tenant_id = self.collection.tenant_id template.update( { "id": self.server_id, "OS-DCF:diskConfig": self.disk_config, "OS-EXT-STS:vm_state": self.status, "addresses": self.addresses_json(), "created": seconds_to_timestamp(self.creation_time), "updated": seconds_to_timestamp(self.update_time), "flavor": { "id": self.flavor_ref, "links": [ { "href": absolutize_url("{0}/flavors/{1}".format(tenant_id, self.flavor_ref)), "rel": "bookmark", } ], }, "image": { "id": self.image_ref, "links": [ {"href": absolutize_url("{0}/images/{1}".format(tenant_id, self.flavor_ref)), "rel": "bookmark"} ], } if self.image_ref is not None else "", "links": self.links_json(absolutize_url), "key_name": self.key_name, "metadata": self.metadata, "name": self.server_name, "tenant_id": tenant_id, "status": self.status, } ) return template
def test_seconds_to_timestamp_provided_timestamp(self): """ :func:`helper.seconds_to_timestamp` uses the provided timestamp format to format the seconds. """ matches = [("%m-%d-%Y %H:%M:%S", "01-01-1970 00:00:00"), ("%Y-%m-%d", "1970-01-01"), ("%H %M %S (%f)", "00 00 00 (000000)")] for match in matches: self.assertEqual(match[1], helper.seconds_to_timestamp(0, match[0]))
def bulk_add_nodes_to_load_balancer_pools(self, request): """ Add multiple nodes to multiple load balancer pools. Returns a 400 if the lb pool_id is not a uuid. Returns a 409 if the lb pool_id does not exist or the cloud server already exists on the lb pool. http://docs.rcv3.apiary.io/#post-%2Fv3%2F%7Btenant_id%7D%2Fload_balancer_pools%2Fnodes TODO: blow up with a 500 and verify if the given server exists in nova. """ body = json.loads(request.content.read()) added_nodes = [] error_response = {"errors": []} for each in body: pool_id = each['load_balancer_pool']['id'] try: UUID(pool_id, version=4) except (ValueError, AttributeError): request.setResponseCode(400) return json.dumps( 'The input was not in the correct format. Please reference ' 'the documentation at http://docs.rcv3.apiary.io for ' 'further assistance.') pool = self._pool_by_id(pool_id) if pool is None: response_code = 409 error_response["errors"].append("Load Balancer Pool {0} does " "not exist".format(pool_id)) elif pool.node_by_cloud_server(each['cloud_server']['id']): response_code = 409 error_response["errors"].append( "Cloud Server {0} is already a " "member of Load Balancer Pool " "{1}".format(each['cloud_server']['id'], pool_id)) if not error_response['errors']: for add in body: node = LoadBalancerPoolNode( created=seconds_to_timestamp(self.clock.seconds(), timestamp_format), load_balancer_pool=pool, cloud_server=add['cloud_server']['id']) pool.nodes.append(node) added_nodes.append(node) response_code = 201 request.setResponseCode(response_code) if response_code == 201: return json.dumps([n.short_json() for n in added_nodes]) else: return json.dumps(error_response)
def advance_time(self, request): """ Advance time by the given number of seconds. """ body = json_from_request(request) amount = body['amount'] self.clock.advance(amount) request.setResponseCode(200) return json.dumps({ "advanced": amount, "now": seconds_to_timestamp(self.clock.seconds()) })
def get_users_details(self, request): """ Returns response with detailed account information about each user including email, name, user ID, account configuration and status information. """ username = request.args.get(b"name")[0].decode("utf-8") session = self.core.sessions.session_for_username_password( username, "test") return json.dumps( dict( user={ "RAX-AUTH:domainId": session.tenant_id, "id": session.user_id, "enabled": True, "username": session.username, "email": "*****@*****.**", "RAX-AUTH:defaultRegion": "ORD", "created": seconds_to_timestamp(time.time()), "updated": seconds_to_timestamp(time.time()) }))
def test_seconds_to_timestamp_default_timestamp(self): """ :func:`helper.seconds_to_timestamp` returns a timestamp matching the seconds since the epoch given. The timestamp conforms to with the default format string of ``%Y-%m-%dT%H:%M:%S.%fZ`` if no format string is provided. """ matches = [(0, "1970-01-01T00:00:00.000000Z"), (1.5, "1970-01-01T00:00:01.500000Z"), (121.4005, "1970-01-01T00:02:01.400500Z")] for match in matches: self.assertEqual(match[1], helper.seconds_to_timestamp(match[0]))
def bulk_add_nodes_to_load_balancer_pools(self, request): """ Add multiple nodes to multiple load balancer pools. Returns a 400 if the lb pool_id is not a uuid. Returns a 409 if the lb pool_id does not exist or the cloud server already exists on the lb pool. http://docs.rcv3.apiary.io/#post-%2Fv3%2F%7Btenant_id%7D%2Fload_balancer_pools%2Fnodes TODO: blow up with a 500 and verify if the given server exists in nova. """ body = json_from_request(request) added_nodes = [] error_response = {"errors": []} for each in body: pool_id = each['load_balancer_pool']['id'] try: UUID(pool_id, version=4) except (ValueError, AttributeError): request.setResponseCode(400) return json.dumps('The input was not in the correct format. Please reference ' 'the documentation at http://docs.rcv3.apiary.io for ' 'further assistance.') pool = self._pool_by_id(pool_id) if pool is None: response_code = 409 error_response["errors"].append("Load Balancer Pool {0} does " "not exist".format(pool_id)) elif pool.node_by_cloud_server(each['cloud_server']['id']): response_code = 409 error_response["errors"].append("Cloud Server {0} is already a " "member of Load Balancer Pool " "{1}".format(each['cloud_server']['id'], pool_id)) if not error_response['errors']: for add in body: node = LoadBalancerPoolNode( created=seconds_to_timestamp(self.clock.seconds(), timestamp_format), load_balancer_pool=pool, cloud_server=add['cloud_server']['id']) pool.nodes.append(node) added_nodes.append(node) response_code = 201 request.setResponseCode(response_code) if response_code == 201: return json.dumps([n.short_json() for n in added_nodes]) else: return json.dumps(error_response)
def _verify_and_update_lb_state(store, lb_id, set_state=True, current_timestamp=None): """ Based on the current state, the metadata on the lb and the time since the LB has been in that state, set the appropriate state in store.lbs Note: Reconsider if update metadata is implemented """ current_timestring = seconds_to_timestamp(current_timestamp) if store.lbs[lb_id]["status"] == "BUILD": store.meta[lb_id]["lb_building"] = store.meta[lb_id]["lb_building"] or 10 store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_building"], current_timestamp=current_timestamp ) or "BUILD" elif store.lbs[lb_id]["status"] == "ACTIVE" and set_state: if "lb_pending_update" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "PENDING-UPDATE" log.msg("here") log.msg(store.lbs[lb_id]["status"]) if "lb_pending_delete" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "PENDING-DELETE" if "lb_error_state" in store.meta[lb_id]: store.lbs[lb_id]["status"] = "ERROR" store.lbs[lb_id]["updated"]["time"] = current_timestring elif store.lbs[lb_id]["status"] == "PENDING-UPDATE": if "lb_pending_update" in store.meta[lb_id]: store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_pending_update"], current_timestamp=current_timestamp ) or "PENDING-UPDATE" elif store.lbs[lb_id]["status"] == "PENDING-DELETE": store.meta[lb_id]["lb_pending_delete"] = store.meta[lb_id]["lb_pending_delete"] or 10 store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], store.meta[lb_id]["lb_pending_delete"], "DELETED", current_timestamp=current_timestamp ) or "PENDING-DELETE" store.lbs[lb_id]["updated"]["time"] = current_timestring elif store.lbs[lb_id]["status"] == "DELETED": # see del_load_balancer above for an explanation of this state change. store.lbs[lb_id]["status"] = set_resource_status( store.lbs[lb_id]["updated"]["time"], 3600, "DELETING-NOW", current_timestamp=current_timestamp ) or "DELETED" if store.lbs[lb_id]["status"] == "DELETING-NOW": del store.lbs[lb_id]
def _add_node_created_feeds(self, nodes): """ Add "Node created..." feed for each given nodes :param list nodes: List of :obj:`Node` being created """ # This format is not documented publicly and was confirmed via email # from CLB team. However, https://jira.rax.io/browse/CLB-132 issue # has been created to make it public created_feed = ( "Node successfully created with address: '{address}', port: '{port}', " "condition: '{condition}', weight: '{weight}'") for node in nodes: node.feed_events.append( (created_feed.format(**node.as_json()), seconds_to_timestamp(self.clock.seconds())))
def create_server(self, request, tenant_id): """ Returns a generic create server response, with status 'ACTIVE'. """ try: content = json.loads(request.content.read()) except ValueError: request.setResponseCode(400) return json.dumps(invalid_resource("Invalid JSON request body")) server_id = 'test-server{0}-id-{0}'.format(str(randrange(9999999999))) response_data = create_server( tenant_id, content['server'], server_id, self.uri_prefix, s_cache=self._server_cache_for_tenant(tenant_id), current_time=seconds_to_timestamp( self._session_store.clock.seconds()) ) request.setResponseCode(response_data[1]) return json.dumps(response_data[0])
def create_server(self, request, tenant_id): """ Returns a generic create server response, with status 'ACTIVE'. """ try: content = json.loads(request.content.read()) except ValueError: request.setResponseCode(400) return json.dumps(invalid_resource("Invalid JSON request body")) server_id = 'test-server{0}-id-{0}'.format(str(randrange(9999999999))) response_data = create_server( tenant_id, content['server'], server_id, self.uri_prefix, s_cache=self._server_cache_for_tenant(tenant_id), current_time=seconds_to_timestamp( self._session_store.clock.seconds())) request.setResponseCode(response_data[1]) return json.dumps(response_data[0])
def add_load_balancer(self, tenant_id, lb_info, lb_id, current_timestamp): """ Returns response of a newly created load balancer with response code 202, and adds the new lb to the store's lbs. Note: ``store.lbs`` has tenant_id added as an extra key in comparison to the lb_example. :param string tenant_id: Tenant ID who will own this load balancer. :param dict lb_info: Configuration for the load balancer. See Openstack docs for creating CLBs. :param string lb_id: Unique ID for this load balancer. :param float current_timestamp: The time since epoch when the CLB is created, measured in seconds. """ status = "ACTIVE" # Loadbalancers metadata is a list object, creating a metadata store # so we dont have to deal with the list meta = {} if "metadata" in lb_info: for each in lb_info["metadata"]: meta.update({each["key"]: each["value"]}) self.meta[lb_id] = meta log.msg(self.meta) if "lb_building" in self.meta[lb_id]: status = "BUILD" # Add tenant_id and nodeCount to self.lbs current_timestring = seconds_to_timestamp(current_timestamp) self.lbs[lb_id] = load_balancer_example(lb_info, lb_id, status, current_timestring) self.lbs[lb_id].update({"tenant_id": tenant_id}) self.lbs[lb_id].update( {"nodeCount": len(self.lbs[lb_id].get("nodes", []))}) # and remove before returning response for add lb new_lb = _lb_without_tenant(self, lb_id) return {'loadBalancer': new_lb}, 202