def client_job(job_type, host, data, lbid): try: client = GearmanClientThread(host, lbid) LOG.info("Sending Gearman job {0} to {1} for loadbalancer {2}".format( job_type, host, lbid)) if job_type == 'UPDATE': client.send_update(data) if job_type == 'DELETE': client.send_delete(data) if job_type == 'ARCHIVE': client.send_archive(data) if job_type == 'ASSIGN': # Try the assign 5 times for x in xrange(0, 5): status = client.send_assign(data) if status: break with db_session() as session: device = session.query(Device).\ filter(Device.name == data).first() if device is None: LOG.error( "Device {0} not found in ASSIGN, this shouldn't happen" .format(data)) return mnb_data = {} if not status: LOG.error( "Giving up vip assign for device {0}".format(data)) errmsg = 'Floating IP assign failed' client._set_error(device.id, errmsg, session) else: lbs = session.query( LoadBalancer ).join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == device.id).\ filter(LoadBalancer.status != 'DELETED').\ all() for lb in lbs: if lb.status == 'BUILD': # Only send a create message to MnB if we # are going from BUILD to ACTIVE. After the # DB is updated. mnb_data["lbid"] = lb.id mnb_data["tenantid"] = lb.tenantid lb.status = 'ACTIVE' device.status = 'ONLINE' session.commit() # Send the MnB create if needed if "lbid" in mnb_data: update_mnb('lbaas.instance.create', mnb_data["lbid"], mnb_data["tenantid"]) if job_type == 'REMOVE': client.send_remove(data) return except: LOG.exception("Gearman thread unhandled exception")
def main(): CONF(project='mnbtest', version=__version__) logging.setup('mnbtest') LOG.debug('Configuration:') print "Starting Test" print "LOG FILE = {0}".format(CONF.log_file) LOG.info('STARTING MNBTEST') CONF.log_opt_values(LOG, std_logging.DEBUG) LOG.info("Calling update_mnb with {0} messages".format(CONF.testcount)) update_mnb('lbaas.instance.test', CONF.testcount, 456) time.sleep(30)
def main(): CONF(project="mnbtest", version=__version__) logging.setup("mnbtest") LOG.debug("Configuration:") print "Starting Test" print "LOG FILE = {0}".format(CONF.log_file) LOG.info("STARTING MNBTEST") CONF.log_opt_values(LOG, std_logging.DEBUG) LOG.info("Calling update_mnb with {0} messages".format(CONF.testcount)) update_mnb("lbaas.instance.test", CONF.testcount, 456) time.sleep(30)
def _exec_usage(self): with db_session() as session: # Next check if it's time to send bandwidth usage notifications delta = datetime.timedelta(minutes=self.usage_freq) exp = timeutils.utcnow() - delta start, = session.query( Billing.last_update ).filter(Billing.name == "usage").\ first() if start and start > exp: # Not time yet LOG.info('Not time to send usage statistics yet {0}'. format(exp)) session.rollback() return # Check the connection before sending the notifications if not test_mnb_connection(): # Abort the exists notifications LOG.info("Aborting usage notifications. Could not connect") session.rollback() return # Calculate the stopping point by rounding backward to the nearest # N minutes. i.e. if N = 60, this will round us back to HH:00:00, # or if N = 15, it will round us back to HH:15:00, HH:30:00, # HH:45:00, or HH:00:00, whichever is closest. N = cfg.CONF['admin_api'].usage_freq now = timeutils.utcnow() stop = now - datetime.timedelta(minutes=now.minute % N, seconds=now.second, microseconds=now.microsecond) # Release the lock session.query(Billing).\ filter(Billing.name == "usage").\ update({"last_update": stop}, synchronize_session='fetch') session.commit() # Send the usage notifications. Pass the timestamps to save # queries. update_mnb('lbaas.bandwidth.usage', start, stop)
def _exec_exists(self): with db_session() as session: # Check if it's time to send exists notifications delta = datetime.timedelta(minutes=self.exists_freq) exp = timeutils.utcnow() - delta exp_time = exp.strftime('%Y-%m-%d %H:%M:%S') updated = session.query( Billing.last_update ).filter(Billing.name == "exists").\ filter(Billing.last_update > exp_time).\ first() if updated is not None: # Not time yet LOG.info('Not time to send exists notifications yet {0}'. format(exp_time)) session.rollback() return # Check the connection before sending the notifications if not test_mnb_connection(): # Abort the exists notifications LOG.info("Aborting exists notifications. Could not connect") session.rollback() return #Update the exists timestamp now session.query(Billing).\ filter(Billing.name == "exists").\ update({"last_update": func.now()}, synchronize_session='fetch') session.commit() # Send the notifications update_mnb('lbaas.instance.exists', None, None)
def client_job(job_type, host, data, lbid): try: client = GearmanClientThread(host, lbid) LOG.info( "Sending Gearman job {0} to {1} for loadbalancer {2}".format( job_type, host, lbid ) ) if job_type == 'UPDATE': client.send_update(data) if job_type == 'DELETE': client.send_delete(data) if job_type == 'ARCHIVE': client.send_archive(data) if job_type == 'ASSIGN': # Try the assign 5 times for x in xrange(0, 5): status = client.send_assign(data) if status: break with db_session() as session: device = session.query(Device).\ filter(Device.name == data).first() if device is None: LOG.error( "Device {0} not found in ASSIGN, this shouldn't happen" .format(data) ) return mnb_data = {} if not status: LOG.error( "Giving up vip assign for device {0}".format(data) ) errmsg = 'Floating IP assign failed' client._set_error(device.id, errmsg, session) else: lbs = session.query( LoadBalancer ).join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == device.id).\ filter(LoadBalancer.status != 'DELETED').\ all() for lb in lbs: if lb.status == 'BUILD': # Only send a create message to MnB if we # are going from BUILD to ACTIVE. After the # DB is updated. mnb_data["lbid"] = lb.id mnb_data["tenantid"] = lb.tenantid lb.status = 'ACTIVE' device.status = 'ONLINE' session.commit() # Send the MnB create if needed if "lbid" in mnb_data: update_mnb('lbaas.instance.create', mnb_data["lbid"], mnb_data["tenantid"]) if job_type == 'REMOVE': client.send_remove(data) return except: LOG.exception("Gearman thread unhandled exception")
def send_update(self, data): with db_session() as session: lbs = session.query( LoadBalancer ).join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.status != 'DELETED').\ all() job_data = { 'hpcs_action': 'UPDATE', 'loadBalancers': [] } degraded = [] if lbs is None: LOG.error( 'Attempting to send empty LB data for device {0} ({1}), ' 'something went wrong'.format(data, self.host) ) self._set_error(data, "LB config error", session) session.commit() return for lb in lbs: lb_data = { 'name': lb.name, 'protocol': lb.protocol, 'algorithm': lb.algorithm, 'port': lb.port, 'nodes': [], 'monitor': {} } for node in lb.nodes: if not node.enabled: continue condition = 'ENABLED' backup = 'FALSE' if node.backup != 0: backup = 'TRUE' node_data = { 'id': node.id, 'port': node.port, 'address': node.address, 'weight': node.weight, 'condition': condition, 'backup': backup } lb_data['nodes'].append(node_data) # Track if we have a DEGRADED LB if node.status == 'ERROR': degraded.append(lb.id) # Add a default health monitor if one does not exist monitor = session.query(HealthMonitor).\ filter(HealthMonitor.lbid == lb.id).first() if monitor is None: # Set it to a default configuration monitor = HealthMonitor( lbid=lb.id, type="CONNECT", delay=30, timeout=30, attempts=2, path=None ) session.add(monitor) session.flush() monitor_data = { 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'attempts': monitor.attempts } if monitor.path is not None: monitor_data['path'] = monitor.path # All new LBs created since these options were supported # will have default values in the DB. Pre-existing LBs will # not have any values, so we need to check for that. if any([lb.timeout, lb.retries]): lb_data['options'] = { 'client_timeout': lb.timeout, 'server_timeout': lb.timeout, 'connect_timeout': lb.timeout, 'connect_retries': lb.retries } lb_data['monitor'] = monitor_data job_data['loadBalancers'].append(lb_data) # Update the worker mnb_data = {} status, response = self._send_message(job_data, 'hpcs_response') if not status: self._set_error(data, response, session) else: for lb in lbs: if lb.id in degraded: lb.status = 'DEGRADED' lb.errmsg = "A node on the load balancer has failed" elif lb.status == 'ERROR': # Do nothing because something else failed in the mean # time pass elif lb.status == 'BUILD': # Do nothing if a new device, stay in BUILD state until # floating IP assign finishes if len(lbs) > 1: lb.status = 'ACTIVE' if lb.id == self.lbid: # This is the new LB being added to a device. # We don't have to assign a vip so we can # notify billing of the LB creation (once the # DB is updated) mnb_data["lbid"] = lb.id mnb_data["tenantid"] = lb.tenantid else: lb.status = 'ACTIVE' lb.errmsg = None device = session.query(Device).\ filter(Device.id == data).\ first() if device is None: # Shouldn't hit here, but just to be safe session.commit() return if device.status == 'BUILD' and len(lbs) > 1: device.status = 'ONLINE' device_name = device.name device_status = device.status counter = session.query(Counters).\ filter(Counters.name == 'loadbalancers_updated').first() counter.value += 1 session.commit() if device_status == 'BUILD': submit_vip_job( 'ASSIGN', device_name, None ) # Send the MnB create if needed if "lbid" in mnb_data: update_mnb('lbaas.instance.create', mnb_data["lbid"], mnb_data["tenantid"])
def send_delete(self, data): with db_session() as session: count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.id != self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ filter(LoadBalancer.status != 'PENDING_DELETE').\ count() if count >= 1: # This is an update message because we want to retain the # remaining LB keep_lb = session.query(LoadBalancer).\ join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.id != self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ filter(LoadBalancer.status != 'PENDING_DELETE').\ first() job_data = { 'hpcs_action': 'UPDATE', 'loadBalancers': [{ 'name': keep_lb.name, 'protocol': keep_lb.protocol, 'algorithm': keep_lb.algorithm, 'port': keep_lb.port, 'nodes': [] }] } for node in keep_lb.nodes: if not node.enabled: continue condition = 'ENABLED' node_data = { 'id': node.id, 'port': node.port, 'address': node.address, 'weight': node.weight, 'condition': condition } job_data['loadBalancers'][0]['nodes'].append(node_data) else: # This is a delete dev = session.query(Device.name).\ filter(Device.id == data).first() vip = session.query(Vip).\ filter(Vip.device == data).first() if vip: submit_vip_job( 'REMOVE', dev.name, str(ipaddress.IPv4Address(vip.ip)) ) job_data = {"hpcs_action": "DELETE"} status, response = self._send_message(job_data, 'hpcs_response') lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ first() if not status: LOG.error( "Failed Gearman delete for LB {0}".format(lb.id) ) self._set_error(data, response, session) lb.status = 'DELETED' tenant_id = lb.tenantid if count == 0: # Device should never be used again device = session.query(Device).\ filter(Device.id == data).first() device.status = 'DELETED' # Remove LB-device join session.execute(loadbalancers_devices.delete().where( loadbalancers_devices.c.loadbalancer == lb.id )) session.query(Node).\ filter(Node.lbid == lb.id).delete() session.query(HealthMonitor).\ filter(HealthMonitor.lbid == lb.id).delete() counter = session.query(Counters).\ filter(Counters.name == 'loadbalancers_deleted').first() counter.value += 1 session.commit() # Notify billing of the LB deletion update_mnb('lbaas.instance.delete', self.lbid, tenant_id)
def send_update(self, data): with db_session() as session: lbs = session.query( LoadBalancer ).join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.status != 'DELETED').\ all() job_data = {'hpcs_action': 'UPDATE', 'loadBalancers': []} degraded = [] if lbs is None: LOG.error( 'Attempting to send empty LB data for device {0} ({1}), ' 'something went wrong'.format(data, self.host)) self._set_error(data, "LB config error", session) session.commit() return for lb in lbs: lb_data = { 'name': lb.name, 'protocol': lb.protocol, 'algorithm': lb.algorithm, 'port': lb.port, 'nodes': [], 'monitor': {} } for node in lb.nodes: if not node.enabled: continue condition = 'ENABLED' backup = 'FALSE' if node.backup != 0: backup = 'TRUE' node_data = { 'id': node.id, 'port': node.port, 'address': node.address, 'weight': node.weight, 'condition': condition, 'backup': backup } lb_data['nodes'].append(node_data) # Track if we have a DEGRADED LB if node.status == 'ERROR': degraded.append(lb.id) # Add a default health monitor if one does not exist monitor = session.query(HealthMonitor).\ filter(HealthMonitor.lbid == lb.id).first() if monitor is None: # Set it to a default configuration monitor = HealthMonitor(lbid=lb.id, type="CONNECT", delay=30, timeout=30, attempts=2, path=None) session.add(monitor) session.flush() monitor_data = { 'type': monitor.type, 'delay': monitor.delay, 'timeout': monitor.timeout, 'attempts': monitor.attempts } if monitor.path is not None: monitor_data['path'] = monitor.path # All new LBs created since these options were supported # will have default values in the DB. Pre-existing LBs will # not have any values, so we need to check for that. if any([lb.timeout, lb.retries]): lb_data['options'] = { 'client_timeout': lb.timeout, 'server_timeout': lb.timeout, 'connect_timeout': lb.timeout, 'connect_retries': lb.retries } lb_data['monitor'] = monitor_data job_data['loadBalancers'].append(lb_data) # Update the worker mnb_data = {} status, response = self._send_message(job_data, 'hpcs_response') if not status: self._set_error(data, response, session) else: for lb in lbs: if lb.id in degraded: lb.status = 'DEGRADED' lb.errmsg = "A node on the load balancer has failed" elif lb.status == 'ERROR': # Do nothing because something else failed in the mean # time pass elif lb.status == 'BUILD': # Do nothing if a new device, stay in BUILD state until # floating IP assign finishes if len(lbs) > 1: lb.status = 'ACTIVE' if lb.id == self.lbid: # This is the new LB being added to a device. # We don't have to assign a vip so we can # notify billing of the LB creation (once the # DB is updated) mnb_data["lbid"] = lb.id mnb_data["tenantid"] = lb.tenantid else: lb.status = 'ACTIVE' lb.errmsg = None device = session.query(Device).\ filter(Device.id == data).\ first() if device is None: # Shouldn't hit here, but just to be safe session.commit() return if device.status == 'BUILD' and len(lbs) > 1: device.status = 'ONLINE' device_name = device.name device_status = device.status counter = session.query(Counters).\ filter(Counters.name == 'loadbalancers_updated').first() counter.value += 1 session.commit() if device_status == 'BUILD': submit_vip_job('ASSIGN', device_name, None) # Send the MnB create if needed if "lbid" in mnb_data: update_mnb('lbaas.instance.create', mnb_data["lbid"], mnb_data["tenantid"])
def send_delete(self, data): with db_session() as session: count = session.query( LoadBalancer ).join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.id != self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ filter(LoadBalancer.status != 'PENDING_DELETE').\ count() if count >= 1: # This is an update message because we want to retain the # remaining LB keep_lb = session.query(LoadBalancer).\ join(LoadBalancer.nodes).\ join(LoadBalancer.devices).\ filter(Device.id == data).\ filter(LoadBalancer.id != self.lbid).\ filter(LoadBalancer.status != 'DELETED').\ filter(LoadBalancer.status != 'PENDING_DELETE').\ first() job_data = { 'hpcs_action': 'UPDATE', 'loadBalancers': [{ 'name': keep_lb.name, 'protocol': keep_lb.protocol, 'algorithm': keep_lb.algorithm, 'port': keep_lb.port, 'nodes': [] }] } for node in keep_lb.nodes: if not node.enabled: continue condition = 'ENABLED' node_data = { 'id': node.id, 'port': node.port, 'address': node.address, 'weight': node.weight, 'condition': condition } job_data['loadBalancers'][0]['nodes'].append(node_data) else: # This is a delete dev = session.query(Device.name).\ filter(Device.id == data).first() vip = session.query(Vip).\ filter(Vip.device == data).first() if vip: submit_vip_job('REMOVE', dev.name, str(ipaddress.IPv4Address(vip.ip))) job_data = {"hpcs_action": "DELETE"} status, response = self._send_message(job_data, 'hpcs_response') lb = session.query(LoadBalancer).\ filter(LoadBalancer.id == self.lbid).\ first() if not status: LOG.error("Failed Gearman delete for LB {0}".format(lb.id)) self._set_error(data, response, session) lb.status = 'DELETED' tenant_id = lb.tenantid if count == 0: # Device should never be used again device = session.query(Device).\ filter(Device.id == data).first() device.status = 'DELETED' # Remove LB-device join session.execute(loadbalancers_devices.delete().where( loadbalancers_devices.c.loadbalancer == lb.id)) session.query(Node).\ filter(Node.lbid == lb.id).delete() session.query(HealthMonitor).\ filter(HealthMonitor.lbid == lb.id).delete() counter = session.query(Counters).\ filter(Counters.name == 'loadbalancers_deleted').first() counter.value += 1 session.commit() # Notify billing of the LB deletion update_mnb('lbaas.instance.delete', self.lbid, tenant_id)