def test_loadbalancer_destroy(self): values = get_fake_lb('1', 'tenant1') lb = db_api.loadbalancer_create(self.conf, values) db_api.loadbalancer_destroy(self.conf, lb['id']) with self.assertRaises(exception.LoadBalancerNotFound) as cm: db_api.loadbalancer_get(self.conf, lb['id']) err = cm.exception self.assertEqual(err.kwargs, {'loadbalancer_id': lb['id']})
def lb_add_vip(conf, tenant_id, lb_id, vip_dict): LOG.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] # XXX(akscram): Set default protocol from LoadBalancer to # VirtualServer if it is not present. if not values.get('extra'): values['extra'] = {'protocol': lb_ref['protocol']} elif 'protocol' not in values['extra']: values['extra']['protocol'] = lb_ref['protocol'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def lb_get_data(conf, lb_id): logger.debug("Getting information about loadbalancer with id: %s" % lb_id) lb = db_api.loadbalancer_get(conf, lb_id) lb_dict = db_api.unpack_extra(lb) if 'virtualIps' in lb_dict: lb_dict.pop("virtualIps") logger.debug("Got information: %s" % list) return lb_dict
def lb_delete_sticky(conf, lb_id, sticky_id): lb = db_api.loadbalancer_get(conf, lb_id) sticky = db_api.sticky_get(conf, sticky_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_sticky_from_loadbalancer(ctx, lb, sticky) db_api.sticky_destroy(conf, sticky_id) return sticky_id
def test_loadbalancer_get_with_tenant(self): db_api.loadbalancer_create(self.conf, get_fake_lb('1', 'tenant1')) values = get_fake_lb('2', 'tenant2') lb_ref1 = db_api.loadbalancer_create(self.conf, values) lb_ref2 = db_api.loadbalancer_get(self.conf, lb_ref1['id'], tenant_id='tenant2') self.assertEqual(dict(lb_ref1.iteritems()), dict(lb_ref2.iteritems()))
def lb_delete_node(conf, lb_id, lb_node_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] rs = db_api.server_get(conf, lb_node_id) db_api.server_destroy(conf, lb_node_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_node_from_loadbalancer(ctx, sf, rs) return lb_node_id
def lb_delete_probe(conf, lb_id, probe_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] probe = db_api.probe_get(conf, probe_id) db_api.probe_destroy(conf, probe_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_probe_from_server_farm(ctx, sf, probe) return probe_id
def update_lb(conf, tenant_id, lb_id, lb_body): lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) old_lb_ref = copy.deepcopy(lb_ref) db_api.pack_update(lb_ref, lb_body) lb_ref = db_api.loadbalancer_update(conf, lb_id, lb_ref) if (lb_ref['algorithm'] == old_lb_ref['algorithm'] and lb_ref['protocol'] == old_lb_ref['protocol']): LOG.debug("In LB %r algorithm and protocol have not changed, " "nothing to do on the device %r.", lb_ref['id'], lb_ref['device_id']) return sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] if lb_ref['algorithm'] != old_lb_ref['algorithm']: predictor_ref = db_api.predictor_get_by_sf_id(conf, sf_ref['id']) db_api.predictor_update(conf, predictor_ref['id'], {'type': lb_ref['algorithm']}) vips = db_api.virtualserver_get_all_by_sf_id(conf, sf_ref['id']) if lb_ref['protocol'] != old_lb_ref['protocol']: vip_update_values = {'protocol': lb_ref['protocol']} for vip in vips: db_api.pack_update(vip, vip_update_values) db_api.virtualserver_update(conf, vip['id'], vip) servers = db_api.server_get_all_by_sf_id(conf, sf_ref['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf_ref['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf_ref['id']) device_ref = scheduler.reschedule(conf, lb_ref) if device_ref['id'] != lb_ref['device_id']: from_driver = drivers.get_device_driver(conf, lb_ref['device_id']) to_driver = drivers.get_device_driver(conf, device_ref['id']) lb_ref = db_api.loadbalancer_update(conf, lb_ref['id'], {'device_id': device_ref['id']}) else: from_driver = drivers.get_device_driver(conf, device_ref['id']) to_driver = from_driver with from_driver.request_context() as ctx: try: commands.delete_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) except Exception: with utils.save_and_reraise_exception(): db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ERROR}) with to_driver.request_context() as ctx: try: commands.create_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) except Exception: with utils.save_and_reraise_exception(): db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ERROR}) db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ACTIVE})
def lb_delete_vip(conf, lb_id, vip_id): logger.debug("Called lb_delete_vip(), conf: %r, lb_id: %s, vip_id: %s", conf, lb_id, vip_id) lb_ref = db_api.loadbalancer_get(conf, lb_id) vip_ref = db_api.virtualserver_get(conf, vip_id) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.delete_vip(ctx, vip_ref) db_api.virtualserver_destroy(conf, vip_id)
def lb_add_sticky(conf, lb_id, st): logger.debug("Got new sticky description %s" % st) if st['persistenceType'] is None: return lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] values = db_api.sticky_pack_extra(st) values['sf_id'] = sf['id'] sticky_ref = db_api.sticky_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_sticky_to_loadbalancer(ctx, lb, sticky_ref) return db_api.unpack_extra(sticky_ref)
def lb_update_node(conf, lb_id, lb_node_id, lb_node): rs = db_api.server_get(conf, lb_node_id) lb = db_api.loadbalancer_get(conf, lb_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) sf = db_api.serverfarm_get(conf, rs['sf_id']) with device_driver.request_context() as ctx: commands.delete_rserver_from_server_farm(ctx, sf, rs) db_api.pack_update(rs, lb_node) new_rs = db_api.server_update(conf, rs['id'], rs) commands.add_rserver_to_server_farm(ctx, sf, new_rs) return db_api.unpack_extra(new_rs)
def lb_add_nodes(conf, lb_id, nodes): nodes_list = [] lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] for node in nodes: values = db_api.server_pack_extra(node) values['sf_id'] = sf['id'] rs_ref = db_api.server_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_node_to_loadbalancer(ctx, sf, rs_ref) nodes_list.append(db_api.unpack_extra(rs_ref)) return nodes_list
def update_lb(conf, lb_id, lb_body): lb_ref = db_api.loadbalancer_get(conf, lb_id) old_lb_ref = copy.deepcopy(lb_ref) db_api.pack_update(lb_ref, lb_body) new_lb_ref = db_api.loadbalancer_update(conf, lb_id, lb_ref) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: try: commands.update_loadbalancer(ctx, old_lb_ref, new_lb_ref) except Exception: db_api.loadbalancer_update(conf, lb_id, {'status': lb_status.ERROR}) raise db_api.loadbalancer_update(conf, lb_id, {'status': lb_status.ACTIVE})
def lb_show_details(conf, lb_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] vips = db_api.virtualserver_get_all_by_sf_id(conf, sf['id']) rs = db_api.server_get_all_by_sf_id(conf, sf['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf['id']) lb_ref = db_api.unpack_extra(lb) lb_ref['nodes'] = [db_api.unpack_extra(rserver) for rserver in rs] lb_ref['virtualIps'] = [db_api.unpack_extra(vip) for vip in vips] lb_ref['healthMonitor'] = [db_api.unpack_extra(probe) for probe in probes] lb_ref['sessionPersistence'] = [db_api.unpack_extra(sticky)\ for sticky in stickies] return lb_ref
def delete_lb(conf, tenant_id, lb_id): lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] vips = db_api.virtualserver_get_all_by_sf_id(conf, sf_ref['id']) servers = db_api.server_get_all_by_sf_id(conf, sf_ref['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf_ref['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf_ref['id']) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.delete_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) db_api.probe_destroy_by_sf_id(conf, sf_ref['id']) db_api.sticky_destroy_by_sf_id(conf, sf_ref['id']) db_api.server_destroy_by_sf_id(conf, sf_ref['id']) db_api.virtualserver_destroy_by_sf_id(conf, sf_ref['id']) db_api.predictor_destroy_by_sf_id(conf, sf_ref['id']) db_api.serverfarm_destroy(conf, sf_ref['id']) db_api.loadbalancer_destroy(conf, lb_ref['id'])
def lb_add_vip(conf, lb_id, vip_dict): logger.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def lb_add_probe(conf, lb_id, probe_dict): logger.debug("Got new probe description %s" % probe_dict) # NOTE(akscram): historically strange validation, wrong place for it. if probe_dict['type'] is None: return lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.probe_pack_extra(probe_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] probe_ref = db_api.probe_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.add_probe_to_loadbalancer(ctx, sf_ref, probe_ref) return db_api.unpack_extra(probe_ref)
def lb_change_node_status(conf, lb_id, lb_node_id, lb_node_status): lb = db_api.loadbalancer_get(conf, lb_id) rs = db_api.server_get(conf, lb_node_id) sf = db_api.serverfarm_get(conf, rs['sf_id']) if rs['state'] == lb_node_status: return "OK" rs['state'] = lb_node_status rsname = rs['name'] if rs['parent_id'] != "": rs['name'] = rs['parent_id'] logger.debug("Changing RServer status to: %s" % lb_node_status) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: if lb_node_status == "inservice": commands.activate_rserver(ctx, sf, rs) else: commands.suspend_rserver(ctx, sf, rs) rs['name'] = rsname db_api.server_update(conf, rs['id'], rs) return db_api.unpack_extra(rs)
def loadFromDB(self, lb_id): self.lb = db_api.loadbalancer_get(self.conf, lb_id) self.sf = db_api.serverfarm_get_all_by_lb_id(self.conf, lb_id)[0] sf_id = self.sf['id'] self.vips = db_api.virtualserver_get_all_by_sf_id(self.conf, sf_id) predictor = db_api.predictor_get_all_by_sf_id(self.conf, sf_id)[0] self.sf._predictor = predictor self.rs = db_api.server_get_all_by_sf_id(self.conf, sf_id) self.sf._rservers = [] for rs in self.rs: self.sf._rservers.append(rs) self.probes = db_api.probe_get_all_by_sf_id(self.conf, sf_id) self.sf._probes = [] for prob in self.probes: self.sf._probes.append(prob) sticks = db_api.sticky_get_all_by_sf_id(self.conf, sf_id) self.sf._sticky = [] for st in sticks: self.sf._sticky.append(st)
def test_loadbalancer_get_with_tenant_fails(self): values = get_fake_lb('1', 'tenant1') lb_ref1 = db_api.loadbalancer_create(self.conf, values) with self.assertRaises(exception.LoadBalancerNotFound): db_api.loadbalancer_get(self.conf, lb_ref1['id'], tenant_id='tenant2')
def lb_get_data(conf, lb_id): logger.debug("Getting information about loadbalancer with id: %s" % lb_id) lb = db_api.loadbalancer_get(conf, lb_id) logger.debug("Got information: %s" % list) return db_api.unpack_extra(lb)
def delete_lb(conf, lb_id): lb = db_api.loadbalancer_get(conf, lb_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.delete_loadbalancer(ctx, lb)
def test_loadbalancer_get(self): values = get_fake_lb("1", "tenant1") lb_ref1 = db_api.loadbalancer_create(self.conf, values) lb_ref2 = db_api.loadbalancer_get(self.conf, lb_ref1["id"]) self.assertEqual(dict(lb_ref1.iteritems()), dict(lb_ref2.iteritems()))