def lb_show_details(conf, lb_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] vips = db_api.virtualserver_get_all_by_sf_id(conf, sf['id']) rs = db_api.server_get_all_by_sf_id(conf, sf['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf['id']) lb_ref = db_api.unpack_extra(lb) lb_ref['nodes'] = [db_api.unpack_extra(rserver) for rserver in rs] lb_ref['virtualIps'] = [db_api.unpack_extra(vip) for vip in vips] lb_ref['healthMonitor'] = [db_api.unpack_extra(probe) for probe in probes] lb_ref['sessionPersistence'] = [db_api.unpack_extra(sticky)\ for sticky in stickies] return lb_ref
def lb_add_nodes(conf, lb_id, lb_nodes): node_list = [] balancer_instance = vserver.Balancer(conf) for lb_node in lb_nodes: logger.debug("Got new node description %s" % lb_node) balancer_instance.loadFromDB(lb_id) balancer_instance.removeFromDB() rs = db_api.server_pack_extra(lb_node) rs['sf_id'] = balancer_instance.sf['id'] balancer_instance.rs.append(rs) balancer_instance.sf._rservers.append(rs) balancer_instance.savetoDB() rs = balancer_instance.rs[-1] device_driver = drivers.get_device_driver(conf, balancer_instance.\ lb['device_id']) with device_driver.request_context() as ctx: commands.add_node_to_loadbalancer(ctx, balancer_instance, rs) node_list.append(db_api.unpack_extra(rs)) return {'nodes': node_list}
def lb_add_vip(conf, tenant_id, lb_id, vip_dict): LOG.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] # XXX(akscram): Set default protocol from LoadBalancer to # VirtualServer if it is not present. if not values.get('extra'): values['extra'] = {'protocol': lb_ref['protocol']} elif 'protocol' not in values['extra']: values['extra']['protocol'] = lb_ref['protocol'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def lb_change_node_status(conf, lb_id, lb_node_id, lb_node_status): balancer_instance = vserver.Balancer(conf) balancer_instance.loadFromDB(lb_id) rs = db_api.server_get(conf, lb_node_id) sf = balancer_instance.sf if rs['state'] == lb_node_status: return "OK" rs['state'] = lb_node_status rsname = rs['name'] if rs['parent_id'] != "": rs['name'] = rs['parent_id'] logger.debug("Changing RServer status to: %s" % lb_node_status) device_driver = drivers.get_device_driver( balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: if lb_node_status == "inservice": commands.activate_rserver(ctx, sf, rs) else: commands.suspend_rserver(ctx, sf, rs) rs['name'] = rsname db_api.server_update(conf, rs['id'], rs) return db_api.unpack_extra(rs)
def lb_show_details(conf, lb_id): #store = Storage(conf) #reader = store.getReader() lb = vserver.Balancer(conf) lb.loadFromDB(lb_id) obj = {'loadbalancer': db_api.unpack_extra(lb.lb)} lbobj = obj['loadbalancer'] lbobj['nodes'] = [db_api.unpack_extra(v) for v in lb.rs] lbobj['virtualIps'] = [db_api.unpack_extra(v) for v in lb.vips] lbobj['healthMonitor'] = [db_api.unpack_extra(v) for v in lb.probes] logger.debug("Getting information about loadbalancer with id: %s" % lb_id) #list = reader.getLoadvserver.BalancerById(id) logger.debug("Got information: %s" % lbobj) return lbobj
def create_loadbalancer(ctx, balancer, nodes, probes, vips): lb = db_api.unpack_extra(balancer) sf = db_api.serverfarm_create(ctx.conf, {'lb_id': lb['id']}) if 'algorithm' in lb: predictor_params = {'sf_id': sf['id'], 'type': lb['algorithm']} else: predictor_params = {'sf_id': sf['id']} db_api.predictor_create(ctx.conf, predictor_params) create_server_farm(ctx, sf) for node in nodes: node_values = db_api.server_pack_extra(node) node_values['sf_id'] = sf['id'] rs_ref = db_api.server_create(ctx.conf, node_values) create_rserver(ctx, rs_ref) add_rserver_to_server_farm(ctx, sf, rs_ref) for probe in probes: probe_values = db_api.probe_pack_extra(probe) probe_values['lb_id'] = lb['id'] probe_values['sf_id'] = sf['id'] probe_ref = db_api.probe_create(ctx.conf, probe_values) create_probe(ctx, probe_ref) add_probe_to_server_farm(ctx, sf, probe_ref) for vip in vips: vip_values = db_api.virtualserver_pack_extra(vip) vip_values['lb_id'] = lb['id'] vip_values['sf_id'] = sf['id'] vip_ref = db_api.virtualserver_create(ctx.conf, vip_values) create_vip(ctx, vip_ref, sf)
def create(self, req, body): logger.debug("Got create request. Request: %s", req) params = body logger.debug("Request params: %s" % params) self._validate_params(params) device = core_api.device_create(self.conf, **params) return {"device": db_api.unpack_extra(device)}
def lb_get_data(conf, lb_id): logger.debug("Getting information about loadbalancer with id: %s" % lb_id) lb = db_api.loadbalancer_get(conf, lb_id) lb_dict = db_api.unpack_extra(lb) if 'virtualIps' in lb_dict: lb_dict.pop("virtualIps") logger.debug("Got information: %s" % list) return lb_dict
def lb_show_nodes(conf, lb_id): balancer_instance = vserver.Balancer(conf) nodes = {'nodes': []} balancer_instance.loadFromDB(lb_id) for rs in balancer_instance.rs: rs_dict = db_api.unpack_extra(rs) nodes['nodes'].append(rs_dict) return nodes
def lb_get_index(conf, tenant_id=''): lbs = db_api.loadbalancer_get_all_by_project(conf, tenant_id) lbs = [db_api.unpack_extra(lb) for lb in lbs] for lb in lbs: if 'virtualIps' in lb: lb.pop('virtualIps') return lbs
def show(self, req, **args): logger.debug("Got device data request. Request: %s Id: %s" % \ (req, args['id'])) try: device = db_api.device_get(self.conf, args['id']) result = db_api.unpack_extra(device) return {'device': result} except exception.NotFound: raise exc.DeviceNotFound
def test_unpack_extra(self): obj_ref = {'name': 'fakename', 'type': 'faketype', 'extra': {'other': 'fakeother'}} values = db_api.unpack_extra(obj_ref) expected = {'name': 'fakename', 'type': 'faketype', 'other': 'fakeother'} self.assertEqual(values, expected)
def lb_add_sticky(conf, lb_id, st): logger.debug("Got new sticky description %s" % st) if st['persistenceType'] is None: return lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] values = db_api.sticky_pack_extra(st) values['sf_id'] = sf['id'] sticky_ref = db_api.sticky_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_sticky_to_loadbalancer(ctx, lb, sticky_ref) return db_api.unpack_extra(sticky_ref)
def lb_update_node(conf, lb_id, lb_node_id, lb_node): rs = db_api.server_get(conf, lb_node_id) lb = db_api.loadbalancer_get(conf, lb_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) sf = db_api.serverfarm_get(conf, rs['sf_id']) with device_driver.request_context() as ctx: commands.delete_rserver_from_server_farm(ctx, sf, rs) db_api.pack_update(rs, lb_node) new_rs = db_api.server_update(conf, rs['id'], rs) commands.add_rserver_to_server_farm(ctx, sf, new_rs) return db_api.unpack_extra(new_rs)
def lb_add_nodes(conf, lb_id, nodes): nodes_list = [] lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] for node in nodes: values = db_api.server_pack_extra(node) values['sf_id'] = sf['id'] rs_ref = db_api.server_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_node_to_loadbalancer(ctx, sf, rs_ref) nodes_list.append(db_api.unpack_extra(rs_ref)) return nodes_list
def device_delete(conf, device_id): try: lb_refs = db_api.loadbalancer_get_all_by_device_id(conf, device_id) except exc.LoadBalancerNotFound: db_api.device_destroy(conf, device_id) drivers.delete_device_driver(conf, device_id) return lbs = [] for lb_ref in lb_refs: lb = db_api.unpack_extra(lb_ref) lbs.append(lb['id']) raise exc.DeviceConflict('Device %s is in use now by loadbalancers %s' % (device_id, ', '.join(lbs)))
def lb_show_sticky(conf, lb_id): try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] except IndexError: raise exc.ServerFarmNotFound stickies = db_api.sticky_get_all_by_sf_id(conf, sf_ref['id']) list = [] dict = {"sessionPersistence": {}} for sticky in stickies: list.append(db_api.unpack_extra(sticky)) dict['sessionPersistence'] = list return dict
def lb_show_probes(conf, lb_id): try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] except IndexError: raise exc.ServerFarmNotFound probes = db_api.probe_get_all_by_sf_id(conf, sf_ref['id']) list = [] dict = {"healthMonitoring": {}} for probe in probes: list.append(db_api.unpack_extra(probe)) dict['healthMonitoring'] = list return dict
def lb_add_vip(conf, lb_id, vip_dict): logger.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def lb_change_node_status(conf, tenant_id, lb_id, lb_node_id, lb_node_status): lb = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) rs = db_api.server_get(conf, lb_node_id) sf = db_api.serverfarm_get(conf, rs['sf_id']) if rs['state'] == lb_node_status: return "OK" rs['state'] = lb_node_status rsname = rs['name'] if rs['parent_id'] != "": rs['name'] = rs['parent_id'] LOG.debug("Changing RServer status to: %s" % lb_node_status) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: if lb_node_status == "inservice": commands.activate_rserver(ctx, sf, rs) else: commands.suspend_rserver(ctx, sf, rs) rs['name'] = rsname db_api.server_update(conf, rs['id'], rs) return db_api.unpack_extra(rs)
def lb_add_probe(conf, lb_id, probe_dict): logger.debug("Got new probe description %s" % probe_dict) # NOTE(akscram): historically strange validation, wrong place for it. if probe_dict['type'] is None: return lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.probe_pack_extra(probe_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] probe_ref = db_api.probe_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.add_probe_to_loadbalancer(ctx, sf_ref, probe_ref) return db_api.unpack_extra(probe_ref)
def create_lb(conf, params): nodes = params.pop('nodes', []) probes = params.pop('healthMonitor', []) vips = params.pop('virtualIps', []) values = db_api.loadbalancer_pack_extra(params) lb_ref = db_api.loadbalancer_create(conf, values) device = scheduler.schedule_loadbalancer(conf, lb_ref) device_driver = drivers.get_device_driver(conf, device['id']) lb = db_api.unpack_extra(lb_ref) lb['device_id'] = device['id'] lb_ref = db_api.loadbalancer_pack_extra(lb) try: with device_driver.request_context() as ctx: commands.create_loadbalancer(ctx, lb_ref, nodes, probes, vips) except (exception.Error, exception.Invalid): lb_ref.status = lb_status.ERROR lb_ref.deployed = 'False' else: lb_ref.status = lb_status.ACTIVE lb_ref.deployed = 'True' db_api.loadbalancer_update(conf, lb['id'], lb_ref) return lb_ref['id']
def show(self, req, id): logger.debug("Got device data request. Request: %s" % req) device_ref = db_api.device_get(self.conf, id) return {'device': db_api.unpack_extra(device_ref)}
def lb_get_index(conf, tenant_id=''): lbs = db_api.loadbalancer_get_all_by_project(conf, tenant_id) lbs = [db_api.unpack_extra(lb) for lb in lbs] return lbs
def show(self, req, tenant_id, lb_id, probe_id): LOG.debug("Got showProbe request. Request: %s", req) probe = db_api.probe_get(self.conf, probe_id, tenant_id=tenant_id) return {"healthMonitoring": db_api.unpack_extra(probe)}
def lb_get_data(conf, lb_id): logger.debug("Getting information about loadbalancer with id: %s" % lb_id) lb = db_api.loadbalancer_get(conf, lb_id) logger.debug("Got information: %s" % list) return db_api.unpack_extra(lb)
def show(self, req, lb_id, id): LOG.debug("Called show(), req: %r, lb_id: %s, id: %s", req, lb_id, id) vip_ref = db_api.virtualserver_get(self.conf, id) return {'virtualIp': db_api.unpack_extra(vip_ref)}
def showNode(self, req, lb_id, lb_node_id): logger.debug("Got showNode request. Request: %s", req) return {'node': db_api.unpack_extra( db_api.server_get(self.conf, lb_node_id, lb_id))}
def device_get_index(conf): devices = db_api.device_get_all(conf) devices = [db_api.unpack_extra(dev) for dev in devices] return devices
def lb_find_for_vm(conf, vm_id, tenant_id=''): lbs = db_api.loadbalancer_get_all_by_vm_id(conf, vm_id, tenant_id) lbs = [db_api.unpack_extra(lb) for lb in lbs] return lbs