def update_lb(conf, tenant_id, lb_id, lb_body): lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) old_lb_ref = copy.deepcopy(lb_ref) db_api.pack_update(lb_ref, lb_body) lb_ref = db_api.loadbalancer_update(conf, lb_id, lb_ref) if (lb_ref['algorithm'] == old_lb_ref['algorithm'] and lb_ref['protocol'] == old_lb_ref['protocol']): LOG.debug("In LB %r algorithm and protocol have not changed, " "nothing to do on the device %r.", lb_ref['id'], lb_ref['device_id']) return sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] if lb_ref['algorithm'] != old_lb_ref['algorithm']: predictor_ref = db_api.predictor_get_by_sf_id(conf, sf_ref['id']) db_api.predictor_update(conf, predictor_ref['id'], {'type': lb_ref['algorithm']}) vips = db_api.virtualserver_get_all_by_sf_id(conf, sf_ref['id']) if lb_ref['protocol'] != old_lb_ref['protocol']: vip_update_values = {'protocol': lb_ref['protocol']} for vip in vips: db_api.pack_update(vip, vip_update_values) db_api.virtualserver_update(conf, vip['id'], vip) servers = db_api.server_get_all_by_sf_id(conf, sf_ref['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf_ref['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf_ref['id']) device_ref = scheduler.reschedule(conf, lb_ref) if device_ref['id'] != lb_ref['device_id']: from_driver = drivers.get_device_driver(conf, lb_ref['device_id']) to_driver = drivers.get_device_driver(conf, device_ref['id']) lb_ref = db_api.loadbalancer_update(conf, lb_ref['id'], {'device_id': device_ref['id']}) else: from_driver = drivers.get_device_driver(conf, device_ref['id']) to_driver = from_driver with from_driver.request_context() as ctx: try: commands.delete_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) except Exception: with utils.save_and_reraise_exception(): db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ERROR}) with to_driver.request_context() as ctx: try: commands.create_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) except Exception: with utils.save_and_reraise_exception(): db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ERROR}) db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ACTIVE})
def create_lb(conf, **params): balancer_instance = vserver.Balancer(conf) #Step 1. Parse parameters came from request balancer_instance.parseParams(params) bal_instance = scheduller.Scheduller.Instance(conf) # device = sched.getDevice() device = bal_instance.getDeviceByID(balancer_instance.lb['device_id']) lb = balancer_instance.getLB() lb['device_id'] = device['id'] #Step 2. Save config in DB balancer_instance.savetoDB() #Step 3. Deploy config to device device_driver = drivers.get_device_driver(conf, device['id']) try: with device_driver.request_context() as ctx: commands.create_loadbalancer(ctx, balancer_instance) except (exception.Error, exception.Invalid): balancer_instance.lb.status = lb_status.ERROR else: balancer_instance.lb.status = lb_status.ACTIVE balancer_instance.update() #balancer_instance.lb.status = \ # balancer.loadbalancers.loadbalancer.LB_ACTIVE_STATUS #balancer_instance.update() #self._task.status = STATUS_DONE return lb['id']
def lb_add_probe(conf, lb_id, lb_probe): logger.debug("Got new probe description %s" % lb_probe) if lb_probe['type'] is None: return balancer_instance = vserver.Balancer(conf) balancer_instance.loadFromDB(lb_id) balancer_instance.removeFromDB() prb = db_api.probe_pack_extra(lb_probe) prb['sf_id'] = balancer_instance.sf['id'] balancer_instance.probes.append(prb) balancer_instance.sf._probes.append(prb) balancer_instance.savetoDB() prb = balancer_instance.probes[-1] device_driver = drivers.get_device_driver(conf, balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: commands.add_probe_to_loadbalancer(ctx, balancer_instance, prb) return prb['id']
def lb_change_node_status(conf, lb_id, lb_node_id, lb_node_status): balancer_instance = vserver.Balancer(conf) balancer_instance.loadFromDB(lb_id) rs = db_api.server_get(conf, lb_node_id) sf = balancer_instance.sf if rs['state'] == lb_node_status: return "OK" rs['state'] = lb_node_status rsname = rs['name'] if rs['parent_id'] != "": rs['name'] = rs['parent_id'] logger.debug("Changing RServer status to: %s" % lb_node_status) device_driver = drivers.get_device_driver( balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: if lb_node_status == "inservice": commands.activate_rserver(ctx, sf, rs) else: commands.suspend_rserver(ctx, sf, rs) rs['name'] = rsname db_api.server_update(conf, rs['id'], rs) return "Node %s has status %s" % (lb_node_id, rs['state'])
def lb_add_nodes(conf, lb_id, lb_nodes): id_list = [] balancer_instance = vserver.Balancer(conf) for lb_node in lb_nodes: logger.debug("Got new node description %s" % lb_node) balancer_instance.loadFromDB(lb_id) balancer_instance.removeFromDB() rs = db_api.server_pack_extra(lb_node) rs['sf_id'] = balancer_instance.sf['id'] balancer_instance.rs.append(rs) balancer_instance.sf._rservers.append(rs) balancer_instance.savetoDB() rs = balancer_instance.rs[-1] device_driver = drivers.get_device_driver(conf, balancer_instance.\ lb['device_id']) with device_driver.request_context() as ctx: commands.add_node_to_loadbalancer(ctx, balancer_instance, rs) id_list.append({'id': rs['id']}) return {'nodes': id_list}
def lb_add_vip(conf, tenant_id, lb_id, vip_dict): LOG.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] # XXX(akscram): Set default protocol from LoadBalancer to # VirtualServer if it is not present. if not values.get('extra'): values['extra'] = {'protocol': lb_ref['protocol']} elif 'protocol' not in values['extra']: values['extra']['protocol'] = lb_ref['protocol'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def lb_delete_sticky(conf, lb_id, sticky_id): lb = db_api.loadbalancer_get(conf, lb_id) sticky = db_api.sticky_get(conf, sticky_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_sticky_from_loadbalancer(ctx, lb, sticky) db_api.sticky_destroy(conf, sticky_id) return sticky_id
def lb_delete_vip(conf, lb_id, vip_id): logger.debug("Called lb_delete_vip(), conf: %r, lb_id: %s, vip_id: %s", conf, lb_id, vip_id) lb_ref = db_api.loadbalancer_get(conf, lb_id) vip_ref = db_api.virtualserver_get(conf, vip_id) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.delete_vip(ctx, vip_ref) db_api.virtualserver_destroy(conf, vip_id)
def lb_delete_probe(conf, lb_id, probe_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] probe = db_api.probe_get(conf, probe_id) db_api.probe_destroy(conf, probe_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_probe_from_server_farm(ctx, sf, probe) return probe_id
def lb_delete_node(conf, lb_id, lb_node_id): lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] rs = db_api.server_get(conf, lb_node_id) db_api.server_destroy(conf, lb_node_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.remove_node_from_loadbalancer(ctx, sf, rs) return lb_node_id
def device_show_algorithms(conf): devices = db_api.device_get_all(conf) algorithms = [] for device in devices: device_driver = drivers.get_device_driver(conf, device['id']) capabilities = device_driver.get_capabilities() if capabilities is not None: algorithms += [a for a in capabilities.get('algorithms', []) if a not in algorithms] return algorithms
def device_show_protocols(conf): devices = db_api.device_get_all(conf) protocols = [] for device in devices: device_driver = drivers.get_device_driver(conf, device['id']) capabilities = device_driver.get_capabilities() if capabilities is not None: protocols += [a for a in capabilities.get('protocols', []) if a not in protocols] return protocols
def lb_update_node(conf, lb_id, lb_node_id, lb_node): rs = db_api.server_get(conf, lb_node_id) lb = db_api.loadbalancer_get(conf, lb_id) device_driver = drivers.get_device_driver(conf, lb['device_id']) sf = db_api.serverfarm_get(conf, rs['sf_id']) with device_driver.request_context() as ctx: commands.delete_rserver_from_server_farm(ctx, sf, rs) db_api.pack_update(rs, lb_node) new_rs = db_api.server_update(conf, rs['id'], rs) commands.add_rserver_to_server_farm(ctx, sf, new_rs) return db_api.unpack_extra(new_rs)
def lb_add_sticky(conf, lb_id, st): logger.debug("Got new sticky description %s" % st) if st['persistenceType'] is None: return lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] values = db_api.sticky_pack_extra(st) values['sf_id'] = sf['id'] sticky_ref = db_api.sticky_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_sticky_to_loadbalancer(ctx, lb, sticky_ref) return db_api.unpack_extra(sticky_ref)
def lb_add_nodes(conf, lb_id, nodes): nodes_list = [] lb = db_api.loadbalancer_get(conf, lb_id) sf = db_api.serverfarm_get_all_by_lb_id(conf, lb_id)[0] for node in nodes: values = db_api.server_pack_extra(node) values['sf_id'] = sf['id'] rs_ref = db_api.server_create(conf, values) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: commands.add_node_to_loadbalancer(ctx, sf, rs_ref) nodes_list.append(db_api.unpack_extra(rs_ref)) return nodes_list
def create_lb(conf, params): node_values = params.pop('nodes', []) probe_values = params.pop('healthMonitor', []) vip_values = params.pop('virtualIps', []) lb_values = db_api.loadbalancer_pack_extra(params) lb_ref = db_api.loadbalancer_create(conf, lb_values) sf_ref = db_api.serverfarm_create(conf, {'lb_id': lb_ref['id']}) db_api.predictor_create(conf, {'sf_id': sf_ref['id'], 'type': lb_ref['algorithm']}) vip_update_values = {'protocol': lb_ref['protocol']} vips = [] for vip in vip_values: vip = db_api.virtualserver_pack_extra(vip) db_api.pack_update(vip, vip_update_values) vip['lb_id'] = lb_ref['id'] vip['sf_id'] = sf_ref['id'] vips.append(db_api.virtualserver_create(conf, vip)) servers = [] for server in node_values: server = db_api.server_pack_extra(server) server['sf_id'] = sf_ref['id'] servers.append(db_api.server_create(conf, server)) probes = [] for probe in probe_values: probe = db_api.probe_pack_extra(probe) probe['lb_id'] = lb_ref['id'] probe['sf_id'] = sf_ref['id'] probes.append(db_api.probe_create(conf, probe)) device_ref = scheduler.schedule(conf, lb_ref) db_api.loadbalancer_update(conf, lb_ref['id'], {'device_id': device_ref['id']}) device_driver = drivers.get_device_driver(conf, device_ref['id']) with device_driver.request_context() as ctx: try: commands.create_loadbalancer(ctx, sf_ref, vips, servers, probes, []) except Exception: with utils.save_and_reraise_exception(): db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ERROR, 'deployed': False}) db_api.loadbalancer_update(conf, lb_ref['id'], {'status': lb_status.ACTIVE, 'deployed': True}) return lb_ref['id']
def device_show_protocols(conf): devices = db_api.device_get_all(conf) protocols = [] for device in devices: try: device_driver = drivers.get_device_driver(conf, device['id']) capabilities = device_driver.get_capabilities() if capabilities is not None: protocols += [a for a in capabilities.get('protocols', []) if a not in protocols] except Exception: LOG.warn('Failed to get supported protocols of device %s', device['name'], exc_info=True) return protocols
def update_lb(conf, lb_id, lb_body): lb_ref = db_api.loadbalancer_get(conf, lb_id) old_lb_ref = copy.deepcopy(lb_ref) db_api.pack_update(lb_ref, lb_body) new_lb_ref = db_api.loadbalancer_update(conf, lb_id, lb_ref) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: try: commands.update_loadbalancer(ctx, old_lb_ref, new_lb_ref) except Exception: db_api.loadbalancer_update(conf, lb_id, {'status': lb_status.ERROR}) raise db_api.loadbalancer_update(conf, lb_id, {'status': lb_status.ACTIVE})
def lb_delete_node(conf, lb_id, lb_node_id): balancer_instance = vserver.Balancer(conf) #Step 1: Load balancer from DB balancer_instance.loadFromDB(lb_id) #Step 3: Get RS object from DB rs = db_api.server_get(conf, lb_node_id) #Step 4: Delete RS from DB db_api.server_destroy(conf, lb_node_id) #Step 5: Delete real server from device device_driver = drivers.get_device_driver(conf, balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: commands.remove_node_from_loadbalancer(ctx, balancer_instance, rs) return lb_node_id
def update_lb(conf, lb_id, lb_body): #Step 1. Load LB from DB old_balancer_instance = vserver.Balancer(conf) balancer_instance = vserver.Balancer(conf) logger.debug("Loading LB data from DB for Lb id: %s" % lb_id) #TODO add clone function to vserver.Balancer in order to avoid double read balancer_instance.loadFromDB(lb_id) old_balancer_instance.loadFromDB(lb_id) #Step 2. Parse parameters came from request lb = balancer_instance.lb #old_predictor_id = None #port_updated = False for key in lb_body.keys(): if hasattr(lb, key): logger.debug("updating attribute %s of LB. Value is %s"\ % (key, lb_body[key])) setattr(lb, key, lb_body[key]) if key.lower() == "algorithm": #old_predictor_id = balancer_instance.sf._predictor.id # balancer_instance.sf._predictor =\ # createPredictor(lb_body[key]) balancer_instance.sf._predictor = \ db_api.predictor_pack_extra({'type': lb_body[key]}) else: logger.debug("Got unknown attribute %s of LB. Value is %s"\ % (key, lb_body[key])) #Step 3: Save updated data in DB lb.status = lb_status.PENDING_UPDATE balancer_instance.update() #Step 5. Deploy new config to device device_driver = drivers.get_device_driver(conf, lb['device_id']) try: with device_driver.request_context() as ctx: commands.update_loadbalancer(ctx, old_balancer_instance, balancer_instance) except: old_balancer_instance.lb.status = lb_status.ERROR old_balancer_instance.update() return #balancer_instance.lb.status =\ # balancer.loadbalancers.loadbalancer.LB_ACTIVE_STATUS #balancer_instance.update() #self._task.status = STATUS_DONE return lb['id']
def delete_lb(conf, lb_id): balancer_instance = vserver.Balancer(conf) balancer_instance.loadFromDB(lb_id) #Step 1. Parse parameters came from request #bal_deploy.parseParams(params) # #Step 2. Delete config in DB # balancer_instance.removeFromDB() #Step 3. Destruct config at device device_id = balancer_instance.lb['device_id'] device_driver = drivers.get_device_driver(conf, device_id) with device_driver.request_context() as ctx: commands.delete_loadbalancer(ctx, balancer_instance) balancer_instance.removeFromDB()
def delete_lb(conf, tenant_id, lb_id): lb_ref = db_api.loadbalancer_get(conf, lb_id, tenant_id=tenant_id) sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] vips = db_api.virtualserver_get_all_by_sf_id(conf, sf_ref['id']) servers = db_api.server_get_all_by_sf_id(conf, sf_ref['id']) probes = db_api.probe_get_all_by_sf_id(conf, sf_ref['id']) stickies = db_api.sticky_get_all_by_sf_id(conf, sf_ref['id']) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.delete_loadbalancer(ctx, sf_ref, vips, servers, probes, stickies) db_api.probe_destroy_by_sf_id(conf, sf_ref['id']) db_api.sticky_destroy_by_sf_id(conf, sf_ref['id']) db_api.server_destroy_by_sf_id(conf, sf_ref['id']) db_api.virtualserver_destroy_by_sf_id(conf, sf_ref['id']) db_api.predictor_destroy_by_sf_id(conf, sf_ref['id']) db_api.serverfarm_destroy(conf, sf_ref['id']) db_api.loadbalancer_destroy(conf, lb_ref['id'])
def lb_delete_sticky(conf, lb_id, sticky_id): balancer_instance = vserver.Balancer(conf) #Step 1: Load balancer from DB balancer_instance.loadFromDB(lb_id) #Step 3: Get sticky object from DB st = db_api.sticky_get(conf, sticky_id) #Step 4: Delete real server from device device_driver = drivers.get_device_driver(conf, balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: commands.remove_sticky_from_loadbalancer(ctx, balancer_instance, st) #Step 5: Delete sticky from DB db_api.sticky_destroy(conf, sticky_id) return sticky_id
def lb_delete_probe(conf, lb_id, probe_id): balancer_instance = vserver.Balancer(conf) #Step 1: Load balancer from DB balancer_instance.loadFromDB(lb_id) #Step 2: Get reader and writer #Step 3: Get RS object from DB prb = db_api.probe_get(conf, probe_id) #Step 4: Delete RS from DB db_api.probe_destroy(conf, probe_id) #Step 5: Delete real server from device device_driver = drivers.get_device_driver(conf, balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: commands.remove_probe_from_server_farm(ctx, balancer_instance, prb) return probe_id
def lb_add_vip(conf, lb_id, vip_dict): logger.debug("Called lb_add_vip(), conf: %r, lb_id: %s, vip_dict: %r", conf, lb_id, vip_dict) lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.virtualserver_pack_extra(vip_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] vip_ref = db_api.virtualserver_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.create_vip(ctx, vip_ref, sf_ref) return db_api.unpack_extra(vip_ref)
def filter_capabilities(conf, lb_ref, dev_ref): try: device_filter_capabilities = conf.device_filter_capabilities except cfg.NoSuchOptError: conf.register_opt(cfg.ListOpt('device_filter_capabilities', default=['algorithm'])) device_filter_capabilities = conf.device_filter_capabilities device_driver = drivers.get_device_driver(conf, dev_ref['id']) capabilities = device_driver.get_capabilities() if capabilities is None: capabilities = {} for opt in device_filter_capabilities: lb_req = lb_ref.get(opt) if not lb_req: continue dev_caps = capabilities.get(opt + 's', []) if not (lb_req in dev_caps): LOG.debug('Device %s does not support %s "%s"', dev_ref['id'], opt, lb_req) return False return True
def lb_change_node_status(conf, lb_id, lb_node_id, lb_node_status): lb = db_api.loadbalancer_get(conf, lb_id) rs = db_api.server_get(conf, lb_node_id) sf = db_api.serverfarm_get(conf, rs['sf_id']) if rs['state'] == lb_node_status: return "OK" rs['state'] = lb_node_status rsname = rs['name'] if rs['parent_id'] != "": rs['name'] = rs['parent_id'] logger.debug("Changing RServer status to: %s" % lb_node_status) device_driver = drivers.get_device_driver(conf, lb['device_id']) with device_driver.request_context() as ctx: if lb_node_status == "inservice": commands.activate_rserver(ctx, sf, rs) else: commands.suspend_rserver(ctx, sf, rs) rs['name'] = rsname db_api.server_update(conf, rs['id'], rs) return db_api.unpack_extra(rs)
def lb_add_probe(conf, lb_id, probe_dict): logger.debug("Got new probe description %s" % probe_dict) # NOTE(akscram): historically strange validation, wrong place for it. if probe_dict['type'] is None: return lb_ref = db_api.loadbalancer_get(conf, lb_id) # NOTE(akscram): server farms are really only create problems than # they solve multiply use of the virtual IPs. try: sf_ref = db_api.serverfarm_get_all_by_lb_id(conf, lb_ref['id'])[0] except IndexError: raise exc.ServerFarmNotFound values = db_api.probe_pack_extra(probe_dict) values['lb_id'] = lb_ref['id'] values['sf_id'] = sf_ref['id'] probe_ref = db_api.probe_create(conf, values) device_driver = drivers.get_device_driver(conf, lb_ref['device_id']) with device_driver.request_context() as ctx: commands.add_probe_to_loadbalancer(ctx, sf_ref, probe_ref) return db_api.unpack_extra(probe_ref)
def create_lb(conf, params): nodes = params.pop('nodes', []) probes = params.pop('healthMonitor', []) vips = params.pop('virtualIps', []) values = db_api.loadbalancer_pack_extra(params) lb_ref = db_api.loadbalancer_create(conf, values) device = scheduler.schedule_loadbalancer(conf, lb_ref) device_driver = drivers.get_device_driver(conf, device['id']) lb = db_api.unpack_extra(lb_ref) lb['device_id'] = device['id'] lb_ref = db_api.loadbalancer_pack_extra(lb) try: with device_driver.request_context() as ctx: commands.create_loadbalancer(ctx, lb_ref, nodes, probes, vips) except (exception.Error, exception.Invalid): lb_ref.status = lb_status.ERROR lb_ref.deployed = 'False' else: lb_ref.status = lb_status.ACTIVE lb_ref.deployed = 'True' db_api.loadbalancer_update(conf, lb['id'], lb_ref) return lb_ref['id']
def lb_add_sticky(conf, lb_id, sticky): logger.debug("Got new sticky description %s" % sticky) if sticky['persistenceType'] is None: return balancer_instance = vserver.Balancer(conf) balancer_instance.loadFromDB(lb_id) balancer_instance.removeFromDB() st = db_api.sticky_pack_extra(sticky) st['sf_id'] = balancer_instance.sf['id'] balancer_instance.sf._sticky.append(st) balancer_instance.savetoDB() st = balancer_instance.sf._sticky[-1] device_driver = drivers.get_device_driver(conf, balancer_instance.lb['device_id']) with device_driver.request_context() as ctx: commands.add_sticky_to_loadbalancer(ctx, balancer_instance, st) return st['id']