def get(self): """Returns a list of virtual ips attached to a specific Load Balancer. :param load_balancer_id: id of lb Url: GET /loadbalancers/{load_balancer_id}/virtualips Returns: dict """ tenant_id = get_limited_to_project(request.headers) if not self.lbid: response.status = 400 return dict(message="Bad Request", details="Load Balancer ID not provided") with db_session() as session: vip = ( session.query(Vip.id, Vip.ip) .join(LoadBalancer.devices) .join(Device.vip) .filter(LoadBalancer.id == self.lbid) .filter(LoadBalancer.tenantid == tenant_id) .first() ) if not vip: session.rollback() response.status = 404 return dict(message="Not Found", details="Load Balancer ID not valid") resp = { "virtualIps": [ {"id": vip.id, "address": str(ipaddress.IPv4Address(vip.ip)), "type": "PUBLIC", "ipVersion": "IPV4"} ] } session.rollback() return resp
def post(self, body=None): if self.lbid is None: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: load_balancer = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if load_balancer is None: session.rollback() raise NotFound('Load Balancer not found') if load_balancer.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot get logs from a Load Balancer in a non-ACTIVE ' 'state, current state: {0}'.format(load_balancer.status) ) load_balancer.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() data = { 'deviceid': device.id } if body.objectStoreType != Unset: data['objectStoreType'] = body.objectStoreType.lower() else: data['objectStoreType'] = 'swift' if body.objectStoreBasePath != Unset: data['objectStoreBasePath'] = body.objectStoreBasePath else: data['objectStoreBasePath'] = conf.swift.swift_basepath if body.objectStoreEndpoint != Unset: data['objectStoreEndpoint'] = body.objectStoreEndpoint else: data['objectStoreEndpoint'] = '{0}/{1}'.\ format(conf.swift.swift_endpoint.rstrip('/'), tenant_id) if body.authToken != Unset: data['authToken'] = body.authToken else: data['authToken'] = request.headers.get('X-Auth-Token') submit_job( 'ARCHIVE', device.name, data, self.lbid ) return
def delete(self): """Remove a load balancer from the account. :param load_balancer_id: id of lb Urls: DELETE /loadbalancers/{load_balancer_id} Notes: curl -i -H "Accept: application/json" -X DELETE http://dev.server:8080/loadbalancers/1 Returns: None """ load_balancer_id = self.lbid tenant_id = get_limited_to_project(request.headers) # grab the lb with db_session() as session: lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == load_balancer_id).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound("Load Balancer ID is not valid") # So we can delete ERROR, but not other Immutable states if lb.status in ImmutableStatesNoError: session.rollback() raise ImmutableEntity( 'Cannot delete a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(lb.status)) lb.status = 'PENDING_DELETE' device = session.query( Device.id, Device.name ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == load_balancer_id).\ first() counter = session.query(Counters).\ filter(Counters.name == 'api_loadbalancers_delete').first() counter.value += 1 if device is None: # This can happen if a device was manually deleted from the DB lb.status = 'DELETED' session.execute(loadbalancers_devices.delete().where( loadbalancers_devices.c.loadbalancer == lb.id)) session.query(Node).\ filter(Node.lbid == lb.id).delete() session.query(HealthMonitor).\ filter(HealthMonitor.lbid == lb.id).delete() session.commit() else: session.commit() submit_job('DELETE', device.name, device.id, lb.id) return None
def delete(self): """Remove a load balancer from the account. :param load_balancer_id: id of lb Urls: DELETE /loadbalancers/{load_balancer_id} Notes: curl -i -H "Accept: application/json" -X DELETE http://dev.server:8080/loadbalancers/1 Returns: None """ load_balancer_id = self.lbid tenant_id = get_limited_to_project(request.headers) # grab the lb with db_session() as session: lb = ( session.query(LoadBalancer) .filter(LoadBalancer.id == load_balancer_id) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .first() ) if lb is None: session.rollback() raise NotFound("Load Balancer ID is not valid") # So we can delete ERROR, but not other Immutable states if lb.status in ImmutableStatesNoError: session.rollback() raise ImmutableEntity( "Cannot delete a Load Balancer in a non-ACTIVE state" ", current state: {0}".format(lb.status) ) lb.status = "PENDING_DELETE" device = ( session.query(Device.id, Device.name) .join(LoadBalancer.devices) .filter(LoadBalancer.id == load_balancer_id) .first() ) if device is None: # This can happen if a device was manually deleted from the DB lb.status = "DELETED" session.execute(loadbalancers_devices.delete().where(loadbalancers_devices.c.loadbalancer == lb.id)) session.query(Node).filter(Node.lbid == lb.id).delete() session.query(HealthMonitor).filter(HealthMonitor.lbid == lb.id).delete() session.commit() else: session.commit() submit_job("DELETE", device.name, device.id, lb.id) return None
def delete(self): """Remove the health monitor. :param load_balancer_id: id of lb Url: DELETE /loadbalancers/{load_balancer_id}/healthmonitor Returns: void """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: query = session.query( LoadBalancer, HealthMonitor ).outerjoin(LoadBalancer.monitors).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if query is None: session.rollback() raise NotFound("Load Balancer not found") lb, monitor = query if lb is None: session.rollback() raise NotFound("Load Balancer not found") if monitor is not None: session.delete(monitor) session.flush() device = session.query( Device.id, Device.name ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() counter = session.query(Counters).\ filter(Counters.name == 'api_healthmonitor.delete').first() counter.value += 1 session.commit() submit_job( 'UPDATE', device.name, device.id, self.lbid ) return None
def delete(self): """Remove the health monitor. :param load_balancer_id: id of lb Url: DELETE /loadbalancers/{load_balancer_id}/healthmonitor Returns: void """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: query = session.query( LoadBalancer, HealthMonitor ).outerjoin(LoadBalancer.monitors).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if query is None: session.rollback() raise NotFound("Load Balancer not found") lb, monitor = query if lb is None: session.rollback() raise NotFound("Load Balancer not found") if monitor is not None: session.delete(monitor) session.flush() device = session.query( Device.id, Device.name ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() counter = session.query(Counters).\ filter(Counters.name == 'api_healthmonitor.delete').first() counter.value += 1 session.commit() submit_job('UPDATE', device.name, device.id, self.lbid) return None
def put(self, body=None): if not self.lbid: raise ClientSideError("Load Balancer ID is required") tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = ( session.query(LoadBalancer) .filter(LoadBalancer.id == self.lbid) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .first() ) if lb is None: session.rollback() raise NotFound("Load Balancer ID is not valid") if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( "Cannot modify a Load Balancer in a non-ACTIVE state" ", current state: {0}".format(lb.status) ) if body.name != Unset: namelimit = session.query(Limits.value).filter(Limits.name == "maxLoadBalancerNameLength").scalar() if len(body.name) > namelimit: session.rollback() raise ClientSideError("Length of Load Balancer name too long") lb.name = body.name if body.algorithm != Unset: lb.algorithm = body.algorithm lb.status = "PENDING_UPDATE" device = ( session.query(Device.id, Device.name, Device.status) .join(LoadBalancer.devices) .filter(LoadBalancer.id == self.lbid) .first() ) session.commit() submit_job("UPDATE", device.name, device.id, lb.id) return ""
def get(self): """Retrieve the health monitor configuration, if one exists. Url: GET /loadbalancers/{load_balancer_id}/healthmonitor Returns: dict """ if not self.lbid: raise ClientSideError("Load Balancer ID has not been supplied") tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb monitor = ( session.query( HealthMonitor.type, HealthMonitor.delay, HealthMonitor.timeout, HealthMonitor.attempts, HealthMonitor.path, ) .join(LoadBalancer.monitors) .filter(LoadBalancer.id == self.lbid) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .first() ) response.status = 200 if monitor is None: session.rollback() return {} monitor_data = { "type": monitor.type, "delay": monitor.delay, "timeout": monitor.timeout, "attemptsBeforeDeactivation": monitor.attempts, } if monitor.path: monitor_data["path"] = monitor.path session.commit() return monitor_data
def get(self): """Retrieve the health monitor configuration, if one exists. Url: GET /loadbalancers/{load_balancer_id}/healthmonitor Returns: dict """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb monitor = session.query( HealthMonitor.type, HealthMonitor.delay, HealthMonitor.timeout, HealthMonitor.attempts, HealthMonitor.path ).join(LoadBalancer.monitors).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').\ first() response.status = 200 if monitor is None: session.rollback() return {} monitor_data = { 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'attemptsBeforeDeactivation': monitor.attempts } if monitor.path: monitor_data['path'] = monitor.path counter = session.query(Counters).\ filter(Counters.name == 'api_healthmonitor_get').first() counter.value += 1 session.commit() return monitor_data
def put(self, body=None): if not self.lbid: raise ClientSideError('Load Balancer ID is required') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound('Load Balancer ID is not valid') if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(lb.status)) if body.name != Unset: namelimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancerNameLength').scalar() if len(body.name) > namelimit: session.rollback() raise ClientSideError( 'Length of Load Balancer name too long') lb.name = body.name if body.algorithm != Unset: lb.algorithm = body.algorithm lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job('UPDATE', device.name, device.id, lb.id) return ''
def get(self): """Returns a list of virtual ips attached to a specific Load Balancer. :param load_balancer_id: id of lb Url: GET /loadbalancers/{load_balancer_id}/virtualips Returns: dict """ tenant_id = get_limited_to_project(request.headers) if not self.lbid: response.status = 400 return dict( message="Bad Request", details="Load Balancer ID not provided" ) with db_session() as session: vip = session.query( Vip.id, Vip.ip ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).first() if not vip: session.rollback() response.status = 404 return dict( message="Not Found", details="Load Balancer ID not valid" ) resp = { "virtualIps": [{ "id": vip.id, "address": str(ipaddress.IPv4Address(vip.ip)), "type": "PUBLIC", "ipVersion": "IPV4" }] } session.rollback() return resp
def get(self): resp = {} tenant_id = get_limited_to_project(request.headers) with db_session() as session: limits = session.query(Limits).all() # Get per-tenant values tenant_lblimit = session.query(TenantLimits.loadbalancers).\ filter(TenantLimits.tenantid == tenant_id).scalar() for limit in limits: resp[limit.name] = limit.value # Set per-tenant values if tenant_lblimit: resp['maxLoadBalancers'] = tenant_lblimit resp = {"limits": {"absolute": {"values": resp}}} session.rollback() return resp
def put(self, body=None): """Update the settings for a health monitor. :param load_balancer_id: id of lb :param *args: holds the posted json or xml data Url: PUT /loadbalancers/{load_balancer_id}/healthmonitor Returns: dict """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb query = session.query(LoadBalancer, HealthMonitor).\ outerjoin(LoadBalancer.monitors).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if query is None: session.rollback() raise NotFound("Load Balancer not found") lb, monitor = query if lb is None: session.rollback() raise NotFound('Load Balancer not found') # Check inputs if (body.type == Unset or body.type is None or body.delay == Unset or body.delay is None or body.timeout == Unset or body.timeout is None or body.attemptsBeforeDeactivation == Unset or body.attemptsBeforeDeactivation is None): session.rollback() raise ClientSideError( "Missing field(s): {0}, {1}, {2}, and {3} are required". format("type", "delay", "timeout", "attemptsBeforeDeactivation")) data = { "lbid": self.lbid, "type": body.type, "delay": int(body.delay), "timeout": int(body.timeout), "attempts": int(body.attemptsBeforeDeactivation) } # Path only required when type is not CONNECT if body.path != Unset and body.path is not None: if body.type == "CONNECT": session.rollback() raise ClientSideError( "Path argument is invalid with CONNECT type") # Encode everything apart from allowed characters # This ignore list in the second parameter is everything in # RFC3986 section 2 that isn't already ignored by # urllib.quote() data["path"] = quote(body.path, "/~+*,;:!$'[]()?&=#%") # If path is empty, set to / if len(data["path"]) == 0 or data["path"][0] != "/": session.rollback() raise ClientSideError("Path must begin with leading /") if len(data["path"]) > self.PATH_LIMIT: raise ClientSideError( "Path must be less than {0} characters".format( self.PATH_LIMIT)) else: if body.type != "CONNECT": session.rollback() raise ClientSideError("Path argument is required") data["path"] = None # Check timeout limits. Must be > 0 and limited to 1 hour if data["timeout"] < 1 or data["timeout"] > self.TIMEOUT_LIMIT: session.rollback() raise ClientSideError( "timeout must be between 1 and {0} seconds".format( self.TIMEOUT_LIMIT)) # Check delay limits. Must be > 0 and limited to 1 hour if data["delay"] < 1 or data["delay"] > self.DELAY_LIMIT: session.rollback() raise ClientSideError( "delay must be between 1 and {0} seconds".format( self.DELAY_LIMIT)) if data["timeout"] > data["delay"]: session.rollback() raise ClientSideError("timeout cannot be greater than delay") if (data["attempts"] < 1 or data["attempts"] > 10): session.rollback() raise ClientSideError( "attemptsBeforeDeactivation must be between 1 and 10") if monitor is None: # This is ok for LBs that already existed without # monitoring. Create a new entry. monitor = HealthMonitor(lbid=self.lbid, type=data["type"], delay=data["delay"], timeout=data["timeout"], attempts=data["attempts"], path=data["path"]) session.add(monitor) else: # Modify the existing entry. monitor.type = data["type"] monitor.delay = data["delay"] monitor.timeout = data["timeout"] monitor.attempts = data["attempts"] monitor.path = data["path"] if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(lb.status)) lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() return_data = LBMonitorResp() return_data.type = data["type"] return_data.delay = str(data["delay"]) return_data.timeout = str(data["timeout"]) return_data.attemptsBeforeDeactivation =\ str(data["attempts"]) if ((data["path"] is not None) and (len(data["path"]) > 0)): return_data.path = data["path"] counter = session.query(Counters).\ filter(Counters.name == 'api_healthmonitor_modify').first() counter.value += 1 session.commit() submit_job('UPDATE', device.name, device.id, lb.id) return return_data
def get(self): """List node(s) configured for the load balancer OR if node_id == None .. Retrieve the configuration of node {node_id} of loadbalancer {load_balancer_id}. :param load_balancer_id: id of lb :param node_id: id of node (optional) Urls: GET /loadbalancers/{load_balancer_id}/nodes GET /loadbalancers/{load_balancer_id}/nodes/{node_id} Returns: dict """ tenant_id = get_limited_to_project(request.headers) if not self.lbid: raise ClientSideError('Load Balancer ID not supplied') with db_session() as session: if not self.nodeid: nodes = session.query( Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight ).join(LoadBalancer.nodes).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ all() node_response = {'nodes': []} for item in nodes: node = item._asdict() if node['enabled'] == 1: node['condition'] = 'ENABLED' else: node['condition'] = 'DISABLED' del node['enabled'] if node['weight'] == 1: del node['weight'] node_response['nodes'].append(node) else: node = session.query( Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight ).join(LoadBalancer.nodes).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(Node.id == self.nodeid).\ first() if node is None: session.rollback() raise NotFound('node not found') node_response = node._asdict() if node_response['enabled'] == 1: node_response['condition'] = 'ENABLED' else: node_response['condition'] = 'DISABLED' del node_response['enabled'] if node_response['weight'] == 1: del node_response['weight'] session.commit() response.status = 200 return node_response
def post(self, body=None): """Adds a new node to the load balancer OR Modify the configuration of a node on the load balancer. :param load_balancer_id: id of lb :param node_id: id of node (optional) when missing a new node is added. :param *args: holds the posted json or xml data Urls: POST /loadbalancers/{load_balancer_id}/nodes PUT /loadbalancers/{load_balancer_id}/nodes/{node_id} Returns: dict of the full list of nodes or the details of the single node """ tenant_id = get_limited_to_project(request.headers) if self.lbid is None: raise ClientSideError('Load Balancer ID has not been supplied') if body.nodes == Unset or not len(body.nodes): raise ClientSideError('No nodes have been supplied') for node in body.nodes: if node.address == Unset: raise ClientSideError( 'A supplied node has no address' ) if node.port == Unset: raise ClientSideError( 'Node {0} is missing a port'.format(node.address) ) if node.port < 1 or node.port > 65535: raise ClientSideError( 'Node {0} port number {1} is invalid' .format(node.address, node.port) ) try: node.address = ipfilter(node.address, conf.ip_filters) except IPOutOfRange: raise ClientSideError( 'IP Address {0} is not allowed as a backend node' .format(node.address) ) except: raise ClientSideError( 'IP Address {0} not valid'.format(node.address) ) if node.weight != Unset: try: weight = int(node.weight) except ValueError: raise ClientSideError( 'Node weight must be an integer' ) if weight < 1 or weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256' ) with db_session() as session: load_balancer = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if load_balancer is None: session.rollback() raise NotFound('Load Balancer not found') if load_balancer.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}' .format(load_balancer.status) ) load_balancer.status = 'PENDING_UPDATE' # check if we are over limit nodelimit = session.query(Limits.value).\ filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).count() if (nodecount + len(body.nodes)) > nodelimit: session.rollback() raise OverLimit( 'Command would exceed Load Balancer node limit' ) return_data = LBNodeResp() return_data.nodes = [] is_galera = False if load_balancer.protocol.lower() == 'galera': is_galera = True for node in body.nodes: is_backup = False if node.backup != Unset and node.backup == 'TRUE': is_backup = True # Galera load balancer sanity checking. Only allowed to add # backup nodes since a primary is presumably already defined. if is_galera and not is_backup: raise ClientSideError( 'Galera load balancer may have only one primary node' ) if node.condition == 'DISABLED': enabled = 0 node_status = 'OFFLINE' else: enabled = 1 node_status = 'ONLINE' weight = 1 if node.weight != Unset: weight = node.weight new_node = Node( lbid=self.lbid, port=node.port, address=node.address, enabled=enabled, status=node_status, weight=weight, backup=int(is_backup) ) session.add(new_node) session.flush() if new_node.enabled: condition = 'ENABLED' else: condition = 'DISABLED' if weight == 1: return_data.nodes.append( NodeResp( id=new_node.id, port=new_node.port, address=new_node.address, condition=condition, status=new_node.status ) ) else: return_data.nodes.append( NodeResp( id=new_node.id, port=new_node.port, address=new_node.address, condition=condition, status=new_node.status, weight=weight ) ) device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job( 'UPDATE', device.name, device.id, self.lbid ) return return_data
def get(self, status=None): """Fetches a list of load balancers or the details of one balancer if load_balancer_id is not empty. :param load_balancer_id: id of lb we want to get, if none it returns a list of all Url: GET /loadbalancers List all load balancers configured for the account. Url: GET /loadbalancers/{load_balancer_id} List details of the specified load balancer. Returns: dict """ tenant_id = get_limited_to_project(request.headers) with db_session() as session: # if we don't have an id then we want a list of them own by this # tenent if not self.lbid: if status and status == "DELETED": lbs = ( session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, ) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status == "DELETED") .all() ) else: lbs = ( session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, ) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .all() ) load_balancers = {"loadBalancers": []} for lb in lbs: lb = lb._asdict() lb["nodeCount"] = session.query(Node).filter(Node.lbid == lb["id"]).count() lb["id"] = str(lb["id"]) load_balancers["loadBalancers"].append(lb) else: load_balancers = ( session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, LoadBalancer.errmsg, Vip.id.label("vipid"), Vip.ip, ) .join(LoadBalancer.devices) .outerjoin(Device.vip) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.id == self.lbid) .first() ) if not load_balancers: session.rollback() raise NotFound("Load Balancer ID not found") load_balancers = load_balancers._asdict() load_balancers["nodeCount"] = session.query(Node).filter(Node.lbid == load_balancers["id"]).count() if load_balancers["vipid"]: load_balancers["virtualIps"] = [ { "id": load_balancers["vipid"], "type": "PUBLIC", "ipVersion": "IPV4", "address": str(ipaddress.IPv4Address(load_balancers["ip"])), } ] del (load_balancers["ip"]) del (load_balancers["vipid"]) else: # We are still assigning a VIP load_balancers["virtualIps"] = [ {"id": None, "type": "ASSIGNING", "ipVersion": "IPV4", "address": None} ] del (load_balancers["vipid"]) nodes = ( session.query(Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight) .join(LoadBalancer.nodes) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.id == self.lbid) .all() ) load_balancers["id"] = str(load_balancers["id"]) if not load_balancers["errmsg"]: load_balancers["statusDescription"] = "" else: load_balancers["statusDescription"] = load_balancers["errmsg"] del (load_balancers["errmsg"]) load_balancers["nodes"] = [] for item in nodes: node = item._asdict() if node["enabled"] == 1: node["condition"] = "ENABLED" else: node["condition"] = "DISABLED" del node["enabled"] node["port"] = str(node["port"]) node["id"] = str(node["id"]) if node["weight"] == 1: del node["weight"] load_balancers["nodes"].append(node) session.rollback() response.status = 200 return load_balancers
def get(self, status=None): """Fetches a list of load balancers or the details of one balancer if load_balancer_id is not empty. :param load_balancer_id: id of lb we want to get, if none it returns a list of all Url: GET /loadbalancers List all load balancers configured for the account. Url: GET /loadbalancers/{load_balancer_id} List details of the specified load balancer. Returns: dict """ tenant_id = get_limited_to_project(request.headers) with db_session() as session: # if we don't have an id then we want a list of them own by this # tenent if not self.lbid: if status and status == 'DELETED': lbs = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, LoadBalancer.timeout, LoadBalancer.retries ).filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status == 'DELETED').all() else: lbs = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, LoadBalancer.timeout, LoadBalancer.retries ).filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').all() load_balancers = {'loadBalancers': []} for lb in lbs: lb = lb._asdict() lb['nodeCount'] = session.query(Node).\ filter(Node.lbid == lb['id']).count() lb['id'] = str(lb['id']) # Unset options get set to default values lb['options'] = {} if lb['timeout']: lb['options']['timeout'] = lb['timeout'] else: lb['options']['timeout'] = self.LB_TIMEOUT_MS if lb['retries']: lb['options']['retries'] = lb['retries'] else: lb['options']['retries'] = self.LB_RETRIES del(lb['timeout']) del(lb['retries']) load_balancers['loadBalancers'].append(lb) else: load_balancers = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, LoadBalancer.errmsg, LoadBalancer.timeout, LoadBalancer.retries, Vip.id.label('vipid'), Vip.ip ).join(LoadBalancer.devices).\ outerjoin(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ first() if not load_balancers: session.rollback() raise NotFound("Load Balancer ID not found") load_balancers = load_balancers._asdict() load_balancers['nodeCount'] = session.query(Node).\ filter(Node.lbid == load_balancers['id']).count() if load_balancers['vipid']: load_balancers['virtualIps'] = [{ "id": load_balancers['vipid'], "type": "PUBLIC", "ipVersion": "IPV4", "address": str(ipaddress.IPv4Address( load_balancers['ip'] )), }] del(load_balancers['ip']) del(load_balancers['vipid']) else: # We are still assigning a VIP load_balancers['virtualIps'] = [{ "id": None, "type": "ASSIGNING", "ipVersion": "IPV4", "address": None }] del(load_balancers['vipid']) nodes = session.query( Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight ).join(LoadBalancer.nodes).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ all() load_balancers['id'] = str(load_balancers['id']) if not load_balancers['errmsg']: load_balancers['statusDescription'] = '' else: load_balancers['statusDescription'] =\ load_balancers['errmsg'] del(load_balancers['errmsg']) load_balancers['nodes'] = [] for item in nodes: node = item._asdict() if node['enabled'] == 1: node['condition'] = 'ENABLED' else: node['condition'] = 'DISABLED' del node['enabled'] node['port'] = str(node['port']) node['id'] = str(node['id']) if node['weight'] == 1: del node['weight'] load_balancers['nodes'].append(node) # Unset options get set to default values load_balancers['options'] = {} if load_balancers['timeout']: load_balancers['options']['timeout'] =\ load_balancers['timeout'] else: load_balancers['options']['timeout'] = self.LB_TIMEOUT_MS if load_balancers['retries']: load_balancers['options']['retries'] =\ load_balancers['retries'] else: load_balancers['options']['retries'] = self.LB_RETRIES del(load_balancers['timeout']) del(load_balancers['retries']) counter = session.query(Counters).\ filter(Counters.name == 'api_loadbalancers_get').first() counter.value += 1 session.commit() response.status = 200 return load_balancers
def get(self, status=None): """Fetches a list of load balancers or the details of one balancer if load_balancer_id is not empty. :param load_balancer_id: id of lb we want to get, if none it returns a list of all Url: GET /loadbalancers List all load balancers configured for the account. Url: GET /loadbalancers/{load_balancer_id} List details of the specified load balancer. Returns: dict """ tenant_id = get_limited_to_project(request.headers) with db_session() as session: # if we don't have an id then we want a list of them own by this # tenent if not self.lbid: if status and status == 'DELETED': lbs = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated ).filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status == 'DELETED').all() else: lbs = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated ).filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').all() load_balancers = {'loadBalancers': []} for lb in lbs: lb = lb._asdict() lb['nodeCount'] = session.query(Node).\ filter(Node.lbid == lb['id']).count() lb['id'] = str(lb['id']) load_balancers['loadBalancers'].append(lb) else: load_balancers = session.query( LoadBalancer.name, LoadBalancer.id, LoadBalancer.protocol, LoadBalancer.port, LoadBalancer.algorithm, LoadBalancer.status, LoadBalancer.created, LoadBalancer.updated, LoadBalancer.errmsg, Vip.id.label('vipid'), Vip.ip ).join(LoadBalancer.devices).\ outerjoin(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ first() if not load_balancers: session.rollback() raise NotFound("Load Balancer ID not found") load_balancers = load_balancers._asdict() load_balancers['nodeCount'] = session.query(Node).\ filter(Node.lbid == load_balancers['id']).count() if load_balancers['vipid']: load_balancers['virtualIps'] = [{ "id": load_balancers['vipid'], "type": "PUBLIC", "ipVersion": "IPV4", "address": str(ipaddress.IPv4Address(load_balancers['ip'])), }] del (load_balancers['ip']) del (load_balancers['vipid']) else: # We are still assigning a VIP load_balancers['virtualIps'] = [{ "id": None, "type": "ASSIGNING", "ipVersion": "IPV4", "address": None }] del (load_balancers['vipid']) nodes = session.query( Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight ).join(LoadBalancer.nodes).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ all() load_balancers['id'] = str(load_balancers['id']) if not load_balancers['errmsg']: load_balancers['statusDescription'] = '' else: load_balancers['statusDescription'] =\ load_balancers['errmsg'] del (load_balancers['errmsg']) load_balancers['nodes'] = [] for item in nodes: node = item._asdict() if node['enabled'] == 1: node['condition'] = 'ENABLED' else: node['condition'] = 'DISABLED' del node['enabled'] node['port'] = str(node['port']) node['id'] = str(node['id']) if node['weight'] == 1: del node['weight'] load_balancers['nodes'].append(node) session.rollback() response.status = 200 return load_balancers
def post(self, body=None): """Adds a new node to the load balancer OR Modify the configuration of a node on the load balancer. :param load_balancer_id: id of lb :param node_id: id of node (optional) when missing a new node is added. :param *args: holds the posted json or xml data Urls: POST /loadbalancers/{load_balancer_id}/nodes PUT /loadbalancers/{load_balancer_id}/nodes/{node_id} Returns: dict of the full list of nodes or the details of the single node """ tenant_id = get_limited_to_project(request.headers) if self.lbid is None: raise ClientSideError('Load Balancer ID has not been supplied') if body.nodes == Unset or not len(body.nodes): raise ClientSideError('No nodes have been supplied') for node in body.nodes: if node.address == Unset: raise ClientSideError('A supplied node has no address') if node.port == Unset: raise ClientSideError('Node {0} is missing a port'.format( node.address)) if node.port < 1 or node.port > 65535: raise ClientSideError( 'Node {0} port number {1} is invalid'.format( node.address, node.port)) try: node.address = ipfilter(node.address, conf.ip_filters) except IPOutOfRange: raise ClientSideError( 'IP Address {0} is not allowed as a backend node'.format( node.address)) except: raise ClientSideError('IP Address {0} not valid'.format( node.address)) if node.weight != Unset: try: weight = int(node.weight) except ValueError: raise ClientSideError('Node weight must be an integer') if weight < 1 or weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256') with db_session() as session: load_balancer = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if load_balancer is None: session.rollback() raise NotFound('Load Balancer not found') if load_balancer.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(load_balancer.status)) load_balancer.status = 'PENDING_UPDATE' # check if we are over limit nodelimit = session.query(Limits.value).\ filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).count() if (nodecount + len(body.nodes)) > nodelimit: session.rollback() raise OverLimit( 'Command would exceed Load Balancer node limit') return_data = LBNodeResp() return_data.nodes = [] is_galera = False if load_balancer.protocol.lower() == 'galera': is_galera = True for node in body.nodes: is_backup = False if node.backup != Unset and node.backup == 'TRUE': is_backup = True # Galera load balancer sanity checking. Only allowed to add # backup nodes since a primary is presumably already defined. if is_galera and not is_backup: raise ClientSideError( 'Galera load balancer may have only one primary node') if node.condition == 'DISABLED': enabled = 0 node_status = 'OFFLINE' else: enabled = 1 node_status = 'ONLINE' weight = 1 if node.weight != Unset: weight = node.weight new_node = Node(lbid=self.lbid, port=node.port, address=node.address, enabled=enabled, status=node_status, weight=weight, backup=int(is_backup)) session.add(new_node) session.flush() if new_node.enabled: condition = 'ENABLED' else: condition = 'DISABLED' if weight == 1: return_data.nodes.append( NodeResp(id=new_node.id, port=new_node.port, address=new_node.address, condition=condition, status=new_node.status)) else: return_data.nodes.append( NodeResp(id=new_node.id, port=new_node.port, address=new_node.address, condition=condition, status=new_node.status, weight=weight)) device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job('UPDATE', device.name, device.id, self.lbid) return return_data
def put(self, body=None): """ Update a node condition: ENABLED or DISABLED """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') if not self.nodeid: raise ClientSideError('Node ID has not been supplied') if body.condition == Unset and body.weight == Unset: raise ClientSideError('Node condition or weight is required') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound('Load Balancer ID is not valid') node = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.id == self.nodeid).first() if node is None: session.rollback() raise NotFound('Node ID is not valid') if body.condition != Unset: if body.condition == 'DISABLED': nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.enabled == 1).count() if nodecount <= 1: session.rollback() raise ClientSideError( "Cannot disable the last enabled node") node.enabled = 0 node.status = 'OFFLINE' else: node.enabled = 1 node.status = 'ONLINE' if body.weight != Unset: try: node.weight = int(body.weight) except ValueError: raise ClientSideError('Node weight must be an integer') if node.weight < 1 or node.weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256') if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(lb.status)) lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job('UPDATE', device.name, device.id, lb.id) return ''
def put(self, body=None): if not self.lbid: raise ClientSideError('Load Balancer ID is required') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound('Load Balancer ID is not valid') if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(lb.status)) if body.name != Unset: namelimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancerNameLength').scalar() if len(body.name) > namelimit: session.rollback() raise ClientSideError( 'Length of Load Balancer name too long') lb.name = body.name if body.algorithm != Unset: lb.algorithm = body.algorithm if body.options: if body.options.timeout != Unset: try: timeout_ms = int(body.options.timeout) if timeout_ms < 0 or timeout_ms > self.LB_TIMEOUT_MAX: raise ClientSideError( 'timeout must be between 0 and {0} ms'.format( self.LB_TIMEOUT_MAX)) lb.timeout = timeout_ms except ValueError: raise ClientSideError('timeout must be an integer') if body.options.retries != Unset: try: retries = int(body.options.retries) if retries < 0 or retries > self.LB_RETRIES_MAX: raise ClientSideError( 'retries must be between 0 and {0}'.format( self.LB_RETRIES_MAX)) lb.retries = retries except ValueError: raise ClientSideError('retries must be an integer') lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() counter = session.query(Counters).\ filter(Counters.name == 'api_loadbalancers_modify').first() counter.value += 1 session.commit() submit_job('UPDATE', device.name, device.id, lb.id) return ''
def post(self, body=None): """Accepts edit if load_balancer_id isn't blank or create load balancer posts. :param load_balancer_id: id of lb :param *args: holds the posted json or xml data Urls: POST /loadbalancers/{load_balancer_id} PUT /loadbalancers Notes: curl -i -H "Accept: application/json" -X POST \ -d "data={"name": "my_lb"}" \ http://dev.server:8080/loadbalancers/100 Returns: dict """ tenant_id = get_limited_to_project(request.headers) if body.nodes == Unset or not len(body.nodes): raise ClientSideError("At least one backend node needs to be supplied") # When the load balancer is used for Galera, we need to do some # sanity checking of the nodes to make sure 1 and only 1 node is # defined as the primary node. if body.protocol and body.protocol.lower() == "galera": is_galera = True else: is_galera = False num_galera_primary_nodes = 0 for node in body.nodes: if node.address == Unset: raise ClientSideError("A supplied node has no address") if node.port == Unset: raise ClientSideError("Node {0} is missing a port".format(node.address)) if node.port < 1 or node.port > 65535: raise ClientSideError("Node {0} port number {1} is invalid".format(node.address, node.port)) try: node.address = ipfilter(node.address, conf.ip_filters) except IPOutOfRange: raise ClientSideError("IP Address {0} is not allowed as a backend node".format(node.address)) except: raise ClientSideError("IP Address {0} not valid".format(node.address)) if node.weight != Unset: try: weight = int(node.weight) except ValueError: raise ClientSideError("Node weight must be an integer") if weight < 1 or weight > 256: raise ClientSideError("Node weight must be between 1 and 256") is_backup = False if node.backup != Unset and node.backup == "TRUE": is_backup = True if is_galera and not is_backup: num_galera_primary_nodes += 1 # Options defaults client_timeout_ms = 30000 server_timeout_ms = 30000 connect_timeout_ms = 30000 connect_retries = 3 if body.options: if body.options.client_timeout != Unset: client_timeout_ms = body.options.client_timeout if body.options.server_timeout != Unset: server_timeout_ms = body.options.server_timeout if body.options.connect_timeout != Unset: connect_timeout_ms = body.options.connect_timeout if body.options.connect_retries != Unset: connect_retries = body.options.connect_retries # Galera sanity checks if is_galera and num_galera_primary_nodes != 1: raise ClientSideError("Galera load balancer must have exactly one primary node") with db_session() as session: lblimit = session.query(Limits.value).filter(Limits.name == "maxLoadBalancers").scalar() nodelimit = session.query(Limits.value).filter(Limits.name == "maxNodesPerLoadBalancer").scalar() namelimit = session.query(Limits.value).filter(Limits.name == "maxLoadBalancerNameLength").scalar() count = ( session.query(LoadBalancer) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .count() ) ports = session.query(Ports.protocol, Ports.portnum).filter(Ports.enabled == 1).all() # Allow per-tenant LB limit, defaulting to the global limit if # the per-tenant value is not set. tenant_lblimit = ( session.query(TenantLimits.loadbalancers).filter(TenantLimits.tenantid == tenant_id).scalar() ) if tenant_lblimit: lblimit = tenant_lblimit if len(body.name) > namelimit: session.rollback() raise ClientSideError("Length of Load Balancer name too long") # TODO: this should probably be a 413, not sure how to do that yet if count >= lblimit: session.rollback() raise OverLimit("Account has hit limit of {0} Load Balancers".format(lblimit)) if len(body.nodes) > nodelimit: session.rollback() raise OverLimit("Too many backend nodes supplied (limit is {0})".format(nodelimit)) device = None old_lb = None # if we don't have an id then we want to create a new lb lb = LoadBalancer() lb.tenantid = tenant_id lb.name = body.name if body.protocol: if body.protocol.lower() in ("tcp", "http", "galera"): lb.protocol = body.protocol.upper() else: raise ClientSideError("Invalid protocol %s" % body.protocol) else: lb.protocol = "HTTP" if body.port: if body.port < 1 or body.port > 65535: raise ClientSideError("Port number {0} is invalid".format(body.port)) # Make sure the port is valid and enabled valid = False for item in ports: item = item._asdict() if lb.protocol == item["protocol"].upper() and body.port == item["portnum"]: valid = True if valid: lb.port = body.port else: raise ClientSideError("Port number {0} is invalid".format(body.port)) else: if lb.protocol == "HTTP": lb.port = 80 elif lb.protocol == "TCP": lb.port = 443 elif lb.protocol == "GALERA": lb.port = 3306 lb.status = "BUILD" lb.created = None if body.virtualIps == Unset: # find free device # lock with "for update" so multiple APIs don't grab the same # LB device = ( session.query(Device) .filter(~Device.id.in_(session.query(loadbalancers_devices.c.device))) .filter(Device.status == "OFFLINE") .filter(Device.pingCount == 0) .with_lockmode("update") .first() ) if device is None: session.rollback() raise ExhaustedError("No devices available") vip = None else: virtual_id = body.virtualIps[0].id # Make sure virtual ID is actually an int try: virtual_id = int(virtual_id) except: session.rollback() raise NotFound("Invalid virtual IP provided") # This is an additional load balancer device = session.query(Device).join(Device.vip).filter(Vip.id == virtual_id).first() old_lb = ( session.query(LoadBalancer) .join(LoadBalancer.devices) .join(Device.vip) .filter(LoadBalancer.tenantid == tenant_id) .filter(Vip.id == virtual_id) .first() ) if old_lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( "Existing Load Balancer on VIP in a non-ACTIVE state" ", current state: {0}".format(old_lb.status) ) vip = session.query(Vip).filter(Vip.device == device.id).first() if old_lb is None: session.rollback() raise NotFound("Invalid virtual IP provided") old_count = ( session.query(LoadBalancer) .join(LoadBalancer.devices) .join(Device.vip) .filter(LoadBalancer.tenantid == tenant_id) .filter(Vip.id == virtual_id) .filter(LoadBalancer.port == lb.port) .count() ) if old_count: session.rollback() # Error here, can have only one LB per port on a device raise ClientSideError("Only one load balancer per port allowed per device") if body.algorithm: lb.algorithm = body.algorithm.upper() else: lb.algorithm = "ROUND_ROBIN" lb.client_timeout = client_timeout_ms lb.server_timeout = server_timeout_ms lb.connect_timeout = connect_timeout_ms lb.connect_retries = connect_retries lb.devices = [device] # write to database session.add(lb) session.flush() # refresh the lb record so we get the id back session.refresh(lb) for node in body.nodes: if node.condition == "DISABLED": enabled = 0 node_status = "OFFLINE" else: enabled = 1 node_status = "ONLINE" if node.backup == "TRUE": backup = 1 else: backup = 0 weight = 1 if node.weight != Unset: weight = node.weight out_node = Node( lbid=lb.id, port=node.port, address=node.address, enabled=enabled, status=node_status, weight=weight, backup=backup, ) session.add(out_node) # now save the loadbalancer_id to the device and switch its status # to build so the monitoring does not trigger early. # The gearman message code will switch to ONLINE once we know # everything is good device.status = "BUILD" session.flush() return_data = LBResp() return_data.id = str(lb.id) return_data.name = lb.name return_data.protocol = lb.protocol return_data.port = str(lb.port) return_data.algorithm = lb.algorithm return_data.status = lb.status return_data.created = lb.created return_data.updated = lb.updated if vip: vip_resp = LBVipResp( address=str(ipaddress.IPv4Address(vip.ip)), id=str(vip.id), type="PUBLIC", ipVersion="IPV4" ) else: vip_resp = LBVipResp(address=None, id=None, type="ASSIGNING", ipVersion="IPV4") return_data.virtualIps = [vip_resp] return_data.nodes = [] for node in body.nodes: if node.weight != Unset and node.weight != 1: out_node = LBRespNode( port=str(node.port), address=node.address, condition=node.condition, weight=weight ) else: out_node = LBRespNode(port=str(node.port), address=node.address, condition=node.condition) return_data.nodes.append(out_node) session.commit() # trigger gearman client to create new lb submit_job("UPDATE", device.name, device.id, lb.id) return return_data
def put(self, body=None): """ Update a node condition: ENABLED or DISABLED """ if not self.lbid: raise ClientSideError('Load Balancer ID has not been supplied') if not self.nodeid: raise ClientSideError('Node ID has not been supplied') if body.condition == Unset and body.weight == Unset: raise ClientSideError('Node condition or weight is required') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound('Load Balancer ID is not valid') node = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.id == self.nodeid).first() if node is None: session.rollback() raise NotFound('Node ID is not valid') if body.condition != Unset: if body.condition == 'DISABLED': nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.enabled == 1).count() if nodecount <= 1: session.rollback() raise ClientSideError( "Cannot disable the last enabled node" ) node.enabled = 0 node.status = 'OFFLINE' else: node.enabled = 1 node.status = 'ONLINE' if body.weight != Unset: try: node.weight = int(body.weight) except ValueError: raise ClientSideError( 'Node weight must be an integer' ) if node.weight < 1 or node.weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256' ) if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}' .format(lb.status) ) lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job( 'UPDATE', device.name, device.id, lb.id ) return ''
def put(self, body=None): if not self.lbid: raise ClientSideError('Load Balancer ID is required') tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').first() if lb is None: session.rollback() raise NotFound('Load Balancer ID is not valid') if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}' .format(lb.status) ) if body.name != Unset: namelimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancerNameLength').scalar() if len(body.name) > namelimit: session.rollback() raise ClientSideError( 'Length of Load Balancer name too long' ) lb.name = body.name if body.algorithm != Unset: lb.algorithm = body.algorithm if body.options: if body.options.timeout != Unset: try: timeout_ms = int(body.options.timeout) if timeout_ms < 0 or timeout_ms > self.LB_TIMEOUT_MAX: raise ClientSideError( 'timeout must be between 0 and {0} ms' .format(self.LB_TIMEOUT_MAX) ) lb.timeout = timeout_ms except ValueError: raise ClientSideError( 'timeout must be an integer' ) if body.options.retries != Unset: try: retries = int(body.options.retries) if retries < 0 or retries > self.LB_RETRIES_MAX: raise ClientSideError( 'retries must be between 0 and {0}' .format(self.LB_RETRIES_MAX) ) lb.retries = retries except ValueError: raise ClientSideError( 'retries must be an integer' ) lb.status = 'PENDING_UPDATE' device = session.query( Device.id, Device.name, Device.status ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() counter = session.query(Counters).\ filter(Counters.name == 'api_loadbalancers_modify').first() counter.value += 1 session.commit() submit_job( 'UPDATE', device.name, device.id, lb.id ) return ''
def delete(self): """Remove a node from the load balancer. :param load_balancer_id: id of lb :param node_id: id of node Url: DELETE /loadbalancers/{load_balancer_id}/nodes/{node_id} Returns: None """ node_id = self.nodeid tenant_id = get_limited_to_project(request.headers) if self.lbid is None: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: load_balancer = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if load_balancer is None: session.rollback() raise NotFound("Load Balancer not found") if load_balancer.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}' .format(load_balancer.status) ) load_balancer.status = 'PENDING_UPDATE' nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.enabled == 1).count() # Can't delete the last LB if nodecount <= 1: session.rollback() raise ClientSideError( "Cannot delete the last enabled node in a load balancer" ) node = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.id == node_id).\ first() if not node: session.rollback() raise NotFound( "Node not found in supplied Load Balancer" ) # May not delete the primary node of a Galera LB if load_balancer.protocol.lower() == 'galera' and node.backup == 0: session.rollback() raise ClientSideError( "Cannot delete the primary node in a Galera load balancer" ) session.delete(node) device = session.query( Device.id, Device.name ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job( 'UPDATE', device.name, device.id, self.lbid ) return None
def post(self, body=None): """Accepts edit if load_balancer_id isn't blank or create load balancer posts. :param load_balancer_id: id of lb :param *args: holds the posted json or xml data Urls: POST /loadbalancers/{load_balancer_id} PUT /loadbalancers Notes: curl -i -H "Accept: application/json" -X POST \ -d "data={"name": "my_lb"}" \ http://dev.server:8080/loadbalancers/100 Returns: dict """ tenant_id = get_limited_to_project(request.headers) if body.nodes == Unset or not len(body.nodes): raise ClientSideError( 'At least one backend node needs to be supplied') # When the load balancer is used for Galera, we need to do some # sanity checking of the nodes to make sure 1 and only 1 node is # defined as the primary node. if body.protocol and body.protocol.lower() == 'galera': is_galera = True else: is_galera = False num_galera_primary_nodes = 0 for node in body.nodes: if node.address == Unset: raise ClientSideError('A supplied node has no address') if node.port == Unset: raise ClientSideError('Node {0} is missing a port'.format( node.address)) if node.port < 1 or node.port > 65535: raise ClientSideError( 'Node {0} port number {1} is invalid'.format( node.address, node.port)) try: node.address = ipfilter(node.address, conf.ip_filters) except IPOutOfRange: raise ClientSideError( 'IP Address {0} is not allowed as a backend node'.format( node.address)) except: raise ClientSideError('IP Address {0} not valid'.format( node.address)) if node.weight != Unset: try: weight = int(node.weight) except ValueError: raise ClientSideError('Node weight must be an integer') if weight < 1 or weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256') is_backup = False if node.backup != Unset and node.backup == 'TRUE': is_backup = True if is_galera and not is_backup: num_galera_primary_nodes += 1 # Options defaults client_timeout_ms = 30000 server_timeout_ms = 30000 connect_timeout_ms = 30000 connect_retries = 3 if body.options: if body.options.client_timeout != Unset: client_timeout_ms = body.options.client_timeout if body.options.server_timeout != Unset: server_timeout_ms = body.options.server_timeout if body.options.connect_timeout != Unset: connect_timeout_ms = body.options.connect_timeout if body.options.connect_retries != Unset: connect_retries = body.options.connect_retries # Galera sanity checks if is_galera and num_galera_primary_nodes != 1: raise ClientSideError( 'Galera load balancer must have exactly one primary node') with db_session() as session: lblimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancers').scalar() nodelimit = session.query(Limits.value).\ filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() namelimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancerNameLength').scalar() count = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').count() ports = session.query(Ports.protocol, Ports.portnum).\ filter(Ports.enabled == 1).all() # Allow per-tenant LB limit, defaulting to the global limit if # the per-tenant value is not set. tenant_lblimit = session.query(TenantLimits.loadbalancers).\ filter(TenantLimits.tenantid == tenant_id).scalar() if tenant_lblimit: lblimit = tenant_lblimit if len(body.name) > namelimit: session.rollback() raise ClientSideError('Length of Load Balancer name too long') # TODO: this should probably be a 413, not sure how to do that yet if count >= lblimit: session.rollback() raise OverLimit( 'Account has hit limit of {0} Load Balancers'.format( lblimit)) if len(body.nodes) > nodelimit: session.rollback() raise OverLimit( 'Too many backend nodes supplied (limit is {0})'.format( nodelimit)) device = None old_lb = None # if we don't have an id then we want to create a new lb lb = LoadBalancer() lb.tenantid = tenant_id lb.name = body.name if body.protocol: if body.protocol.lower() in ('tcp', 'http', 'galera'): lb.protocol = body.protocol.upper() else: raise ClientSideError('Invalid protocol %s' % body.protocol) else: lb.protocol = 'HTTP' if body.port: if body.port < 1 or body.port > 65535: raise ClientSideError('Port number {0} is invalid'.format( body.port)) # Make sure the port is valid and enabled valid = False for item in ports: item = item._asdict() if (lb.protocol == item["protocol"].upper() and body.port == item["portnum"]): valid = True if valid: lb.port = body.port else: raise ClientSideError('Port number {0} is invalid'.format( body.port)) else: if lb.protocol == 'HTTP': lb.port = 80 elif lb.protocol == 'TCP': lb.port = 443 elif lb.protocol == 'GALERA': lb.port = 3306 lb.status = 'BUILD' lb.created = None if body.virtualIps == Unset: # find free device # lock with "for update" so multiple APIs don't grab the same # LB device = session.query(Device).\ filter(~Device.id.in_( session.query(loadbalancers_devices.c.device) )).\ filter(Device.status == "OFFLINE").\ filter(Device.pingCount == 0).\ with_lockmode('update').\ first() if device is None: session.rollback() raise ExhaustedError('No devices available') vip = None else: virtual_id = body.virtualIps[0].id # Make sure virtual ID is actually an int try: virtual_id = int(virtual_id) except: session.rollback() raise NotFound('Invalid virtual IP provided') # This is an additional load balancer device = session.query( Device ).join(Device.vip).\ filter(Vip.id == virtual_id).\ first() old_lb = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ first() if old_lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Existing Load Balancer on VIP in a non-ACTIVE state' ', current state: {0}'.format(old_lb.status)) vip = session.query(Vip).\ filter(Vip.device == device.id).\ first() if old_lb is None: session.rollback() raise NotFound('Invalid virtual IP provided') old_count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ filter(LoadBalancer.port == lb.port).\ count() if old_count: session.rollback() # Error here, can have only one LB per port on a device raise ClientSideError( 'Only one load balancer per port allowed per device') if body.algorithm: lb.algorithm = body.algorithm.upper() else: lb.algorithm = 'ROUND_ROBIN' lb.client_timeout = client_timeout_ms lb.server_timeout = server_timeout_ms lb.connect_timeout = connect_timeout_ms lb.connect_retries = connect_retries lb.devices = [device] # write to database session.add(lb) session.flush() #refresh the lb record so we get the id back session.refresh(lb) for node in body.nodes: if node.condition == 'DISABLED': enabled = 0 node_status = 'OFFLINE' else: enabled = 1 node_status = 'ONLINE' if node.backup == 'TRUE': backup = 1 else: backup = 0 weight = 1 if node.weight != Unset: weight = node.weight out_node = Node(lbid=lb.id, port=node.port, address=node.address, enabled=enabled, status=node_status, weight=weight, backup=backup) session.add(out_node) # now save the loadbalancer_id to the device and switch its status # to build so the monitoring does not trigger early. # The gearman message code will switch to ONLINE once we know # everything is good device.status = "BUILD" session.flush() return_data = LBResp() return_data.id = str(lb.id) return_data.name = lb.name return_data.protocol = lb.protocol return_data.port = str(lb.port) return_data.algorithm = lb.algorithm return_data.status = lb.status return_data.created = lb.created return_data.updated = lb.updated if vip: vip_resp = LBVipResp(address=str(ipaddress.IPv4Address( vip.ip)), id=str(vip.id), type='PUBLIC', ipVersion='IPV4') else: vip_resp = LBVipResp(address=None, id=None, type='ASSIGNING', ipVersion='IPV4') return_data.virtualIps = [vip_resp] return_data.nodes = [] for node in body.nodes: if node.weight != Unset and node.weight != 1: out_node = LBRespNode(port=str(node.port), address=node.address, condition=node.condition, weight=weight) else: out_node = LBRespNode(port=str(node.port), address=node.address, condition=node.condition) return_data.nodes.append(out_node) session.commit() # trigger gearman client to create new lb submit_job('UPDATE', device.name, device.id, lb.id) return return_data
def delete(self): """Remove a node from the load balancer. :param load_balancer_id: id of lb :param node_id: id of node Url: DELETE /loadbalancers/{load_balancer_id}/nodes/{node_id} Returns: None """ node_id = self.nodeid tenant_id = get_limited_to_project(request.headers) if self.lbid is None: raise ClientSideError('Load Balancer ID has not been supplied') tenant_id = get_limited_to_project(request.headers) with db_session() as session: load_balancer = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ first() if load_balancer is None: session.rollback() raise NotFound("Load Balancer not found") if load_balancer.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Cannot modify a Load Balancer in a non-ACTIVE state' ', current state: {0}'.format(load_balancer.status)) load_balancer.status = 'PENDING_UPDATE' nodecount = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.enabled == 1).count() # Can't delete the last LB if nodecount <= 1: session.rollback() raise ClientSideError( "Cannot delete the last enabled node in a load balancer") node = session.query(Node).\ filter(Node.lbid == self.lbid).\ filter(Node.id == node_id).\ first() if not node: session.rollback() raise NotFound("Node not found in supplied Load Balancer") # May not delete the primary node of a Galera LB if load_balancer.protocol.lower() == 'galera' and node.backup == 0: session.rollback() raise ClientSideError( "Cannot delete the primary node in a Galera load balancer") session.delete(node) device = session.query( Device.id, Device.name ).join(LoadBalancer.devices).\ filter(LoadBalancer.id == self.lbid).\ first() session.commit() submit_job('UPDATE', device.name, device.id, self.lbid) return None
def put(self, body=None): """Update the settings for a health monitor. :param load_balancer_id: id of lb :param *args: holds the posted json or xml data Url: PUT /loadbalancers/{load_balancer_id}/healthmonitor Returns: dict """ if not self.lbid: raise ClientSideError("Load Balancer ID has not been supplied") tenant_id = get_limited_to_project(request.headers) with db_session() as session: # grab the lb query = ( session.query(LoadBalancer, HealthMonitor) .outerjoin(LoadBalancer.monitors) .filter(LoadBalancer.id == self.lbid) .filter(LoadBalancer.tenantid == tenant_id) .filter(LoadBalancer.status != "DELETED") .first() ) if query is None: session.rollback() raise NotFound("Load Balancer not found") lb, monitor = query if lb is None: session.rollback() raise NotFound("Load Balancer not found") # Check inputs if ( body.type == Unset or body.type is None or body.delay == Unset or body.delay is None or body.timeout == Unset or body.timeout is None or body.attemptsBeforeDeactivation == Unset or body.attemptsBeforeDeactivation is None ): session.rollback() raise ClientSideError( "Missing field(s): {0}, {1}, {2}, and {3} are required".format( "type", "delay", "timeout", "attemptsBeforeDeactivation" ) ) data = { "lbid": self.lbid, "type": body.type, "delay": int(body.delay), "timeout": int(body.timeout), "attempts": int(body.attemptsBeforeDeactivation), } # Path only required when type is not CONNECT if body.path != Unset and body.path is not None: if body.type == "CONNECT": session.rollback() raise ClientSideError("Path argument is invalid with CONNECT type") # Encode everything apart from allowed characters # This ignore list in the second parameter is everything in # RFC3986 section 2 that isn't already ignored by # urllib.quote() data["path"] = quote(body.path, "/~+*,;:!$'[]()?&=#%") # If path is empty, set to / if len(data["path"]) == 0 or data["path"][0] != "/": session.rollback() raise ClientSideError("Path must begin with leading /") if len(data["path"]) > self.PATH_LIMIT: raise ClientSideError("Path must be less than {0} characters".format(self.PATH_LIMIT)) else: if body.type != "CONNECT": session.rollback() raise ClientSideError("Path argument is required") data["path"] = None # Check timeout limits. Must be > 0 and limited to 1 hour if data["timeout"] < 1 or data["timeout"] > self.TIMEOUT_LIMIT: session.rollback() raise ClientSideError("timeout must be between 1 and {0} seconds".format(self.TIMEOUT_LIMIT)) # Check delay limits. Must be > 0 and limited to 1 hour if data["delay"] < 1 or data["delay"] > self.DELAY_LIMIT: session.rollback() raise ClientSideError("delay must be between 1 and {0} seconds".format(self.DELAY_LIMIT)) if data["timeout"] > data["delay"]: session.rollback() raise ClientSideError("timeout cannot be greater than delay") if data["attempts"] < 1 or data["attempts"] > 10: session.rollback() raise ClientSideError("attemptsBeforeDeactivation must be between 1 and 10") if monitor is None: # This is ok for LBs that already existed without # monitoring. Create a new entry. monitor = HealthMonitor( lbid=self.lbid, type=data["type"], delay=data["delay"], timeout=data["timeout"], attempts=data["attempts"], path=data["path"], ) session.add(monitor) else: # Modify the existing entry. monitor.type = data["type"] monitor.delay = data["delay"] monitor.timeout = data["timeout"] monitor.attempts = data["attempts"] monitor.path = data["path"] if lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( "Cannot modify a Load Balancer in a non-ACTIVE state" ", current state: {0}".format(lb.status) ) lb.status = "PENDING_UPDATE" device = ( session.query(Device.id, Device.name, Device.status) .join(LoadBalancer.devices) .filter(LoadBalancer.id == self.lbid) .first() ) return_data = LBMonitorResp() return_data.type = data["type"] return_data.delay = str(data["delay"]) return_data.timeout = str(data["timeout"]) return_data.attemptsBeforeDeactivation = str(data["attempts"]) if (data["path"] is not None) and (len(data["path"]) > 0): return_data.path = data["path"] session.commit() submit_job("UPDATE", device.name, device.id, lb.id) return return_data
def post(self, body=None): """Accepts edit if load_balancer_id isn't blank or create load balancer posts. :param load_balancer_id: id of lb :param *args: holds the posted json or xml data Urls: POST /loadbalancers/{load_balancer_id} PUT /loadbalancers Notes: curl -i -H "Accept: application/json" -X POST \ -d "data={"name": "my_lb"}" \ http://dev.server:8080/loadbalancers/100 Returns: dict """ tenant_id = get_limited_to_project(request.headers) if body.nodes == Unset or not len(body.nodes): raise ClientSideError( 'At least one backend node needs to be supplied' ) # When the load balancer is used for Galera, we need to do some # sanity checking of the nodes to make sure 1 and only 1 node is # defined as the primary node. if body.protocol and body.protocol.lower() == 'galera': is_galera = True else: is_galera = False num_galera_primary_nodes = 0 for node in body.nodes: if node.address == Unset: raise ClientSideError( 'A supplied node has no address' ) if node.port == Unset: raise ClientSideError( 'Node {0} is missing a port'.format(node.address) ) if node.port < 1 or node.port > 65535: raise ClientSideError( 'Node {0} port number {1} is invalid' .format(node.address, node.port) ) try: node.address = ipfilter(node.address, conf.ip_filters) except IPOutOfRange: raise ClientSideError( 'IP Address {0} is not allowed as a backend node' .format(node.address) ) except: raise ClientSideError( 'IP Address {0} not valid'.format(node.address) ) if node.weight != Unset: try: weight = int(node.weight) except ValueError: raise ClientSideError( 'Node weight must be an integer' ) if weight < 1 or weight > 256: raise ClientSideError( 'Node weight must be between 1 and 256' ) is_backup = False if node.backup != Unset and node.backup == 'TRUE': is_backup = True if is_galera and not is_backup: num_galera_primary_nodes += 1 # Options defaults timeout_ms = self.LB_TIMEOUT_MS retries = self.LB_RETRIES if body.options: if body.options.timeout != Unset: try: timeout_ms = int(body.options.timeout) if timeout_ms < 0 or timeout_ms > self.LB_TIMEOUT_MAX: raise ClientSideError( 'timeout must be between 0 and {0} ms' .format(self.LB_TIMEOUT_MAX) ) except ValueError: raise ClientSideError( 'timeout must be an integer' ) if body.options.retries != Unset: try: retries = int(body.options.retries) if retries < 0 or retries > self.LB_RETRIES_MAX: raise ClientSideError( 'retries must be between 0 and {0}' .format(self.LB_RETRIES_MAX) ) except ValueError: raise ClientSideError( 'retries must be an integer' ) # Galera sanity checks if is_galera and num_galera_primary_nodes != 1: raise ClientSideError( 'Galera load balancer must have exactly one primary node' ) with db_session() as session: lblimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancers').scalar() nodelimit = session.query(Limits.value).\ filter(Limits.name == 'maxNodesPerLoadBalancer').scalar() namelimit = session.query(Limits.value).\ filter(Limits.name == 'maxLoadBalancerNameLength').scalar() count = session.query(LoadBalancer).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.status != 'DELETED').count() ports = session.query(Ports.protocol, Ports.portnum).\ filter(Ports.enabled == 1).all() # Allow per-tenant LB limit, defaulting to the global limit if # the per-tenant value is not set. tenant_lblimit = session.query(TenantLimits.loadbalancers).\ filter(TenantLimits.tenantid == tenant_id).scalar() if tenant_lblimit: lblimit = tenant_lblimit if len(body.name) > namelimit: session.rollback() raise ClientSideError( 'Length of Load Balancer name too long' ) # TODO: this should probably be a 413, not sure how to do that yet if count >= lblimit: session.rollback() raise OverLimit( 'Account has hit limit of {0} Load Balancers'. format(lblimit) ) if len(body.nodes) > nodelimit: session.rollback() raise OverLimit( 'Too many backend nodes supplied (limit is {0})'. format(nodelimit) ) device = None old_lb = None # if we don't have an id then we want to create a new lb lb = LoadBalancer() lb.tenantid = tenant_id lb.name = body.name if body.protocol: if body.protocol.lower() in ('tcp', 'http', 'galera'): lb.protocol = body.protocol.upper() else: raise ClientSideError( 'Invalid protocol %s' % body.protocol ) else: lb.protocol = 'HTTP' if body.port: if body.port < 1 or body.port > 65535: raise ClientSideError( 'Port number {0} is invalid'.format(body.port) ) # Make sure the port is valid and enabled valid = False for item in ports: item = item._asdict() if(lb.protocol == item["protocol"].upper() and body.port == item["portnum"]): valid = True if valid: lb.port = body.port else: raise ClientSideError( 'Port number {0} is not allowed for {1} protocol' .format(body.port, lb.protocol) ) else: if lb.protocol == 'HTTP': lb.port = 80 elif lb.protocol == 'TCP': lb.port = 443 elif lb.protocol == 'GALERA': lb.port = 3306 lb.status = 'BUILD' lb.created = None if body.virtualIps == Unset: # find free device # lock with "for update" so multiple APIs don't grab the same # LB device = session.query(Device).\ filter(~Device.id.in_( session.query(loadbalancers_devices.c.device) )).\ filter(Device.status == "OFFLINE").\ filter(Device.pingCount == 0).\ with_lockmode('update').\ first() if device is None: session.rollback() raise ExhaustedError('No devices available') vip = None else: virtual_id = body.virtualIps[0].id # Make sure virtual ID is actually an int try: virtual_id = int(virtual_id) except: session.rollback() raise NotFound('Invalid virtual IP provided') # This is an additional load balancer device = session.query( Device ).join(Device.vip).\ filter(Vip.id == virtual_id).\ first() old_lb = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ first() if old_lb.status in ImmutableStates: session.rollback() raise ImmutableEntity( 'Existing Load Balancer on VIP in a non-ACTIVE state' ', current state: {0}' .format(old_lb.status) ) vip = session.query(Vip).\ filter(Vip.device == device.id).\ first() if old_lb is None: session.rollback() raise NotFound('Invalid virtual IP provided') old_count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ filter(LoadBalancer.port == lb.port).\ count() if old_count: session.rollback() # Error, can have only one LB per port on a device raise ClientSideError( 'Only one load balancer per port allowed per device' ) if lb.protocol == 'HTTP': protocol_count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ filter(LoadBalancer.protocol == lb.protocol).\ count() else: # TCP or GALERA. Both are TCP really protocol_count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ join(Device.vip).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(Vip.id == virtual_id).\ filter((LoadBalancer.protocol == 'TCP') | (LoadBalancer.protocol == 'GALERA')).\ count() if protocol_count: session.rollback() # Error, can have only one LB per protocol on a device raise ClientSideError( 'Only one load balancer per protocol' ' allowed per device' ) if body.algorithm: lb.algorithm = body.algorithm.upper() else: lb.algorithm = 'ROUND_ROBIN' lb.timeout = timeout_ms lb.retries = retries lb.devices = [device] # write to database session.add(lb) session.flush() # refresh the lb record so we get the id back session.refresh(lb) for node in body.nodes: if node.condition == 'DISABLED': enabled = 0 node_status = 'OFFLINE' else: enabled = 1 node_status = 'ONLINE' if node.backup == 'TRUE': backup = 1 else: backup = 0 weight = 1 if node.weight != Unset: weight = node.weight out_node = Node( lbid=lb.id, port=node.port, address=node.address, enabled=enabled, status=node_status, weight=weight, backup=backup ) session.add(out_node) # now save the loadbalancer_id to the device and switch its status # to build so the monitoring does not trigger early. # The gearman message code will switch to ONLINE once we know # everything is good device.status = "BUILD" session.flush() return_data = LBResp() return_data.id = str(lb.id) return_data.name = lb.name return_data.protocol = lb.protocol return_data.port = str(lb.port) return_data.algorithm = lb.algorithm return_data.status = lb.status return_data.created = lb.created return_data.updated = lb.updated if vip: vip_resp = LBVipResp( address=str(ipaddress.IPv4Address(vip.ip)), id=str(vip.id), type='PUBLIC', ipVersion='IPV4' ) else: vip_resp = LBVipResp( address=None, id=None, type='ASSIGNING', ipVersion='IPV4' ) return_data.virtualIps = [vip_resp] nodes = session.query( Node.id, Node.address, Node.port, Node.status, Node.enabled, Node.weight ).join(LoadBalancer.nodes).\ filter(LoadBalancer.tenantid == tenant_id).\ filter(LoadBalancer.id == lb.id).\ all() return_data.nodes = [] for node in nodes: if node.enabled == 1: condition = 'ENABLED' else: condition = 'DISABLED' if node.weight == 1: return_data.nodes.append( LBRespNode( id=str(node.id), port=str(node.port), address=node.address, condition=condition, status=node.status ) ) else: return_data.nodes.append( LBRespNode( id=str(node.id), port=str(node.port), address=node.address, condition=condition, status=node.status, weight=str(node.weight) ) ) return_data.options = LBOptions(timeout=timeout_ms, retries=retries) counter = session.query(Counters).\ filter(Counters.name == 'api_loadbalancers_create').first() counter.value += 1 session.commit() # trigger gearman client to create new lb submit_job( 'UPDATE', device.name, device.id, lb.id ) return return_data