class FwdOperator(ObjectOperator): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls.__init__(cls, **kwargs) return cls._instance def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_fwds(self): def list_fwd_obj_fn(name, spec, plurals): logger.info("Bootstrapped fwd {}".format(name)) fwd = Fwd(name, self.obj_api, self.store, spec) if fwd.status == OBJ_STATUS.obj_status_provisioned: self.store.update_obj(fwd) kube_list_obj(self.obj_api, RESOURCES.fwds, list_fwd_obj_fn) self.bootstrapped = True def get_stored_obj(self, name, spec): return Fwd(name, self.obj_api, self.store, spec) def create_default_fwds(self, n, default_dft): for i in range(n): fwd_name = default_dft + '-fwd-' + str(i) fwd = Fwd(fwd_name, self.obj_api, self.store) fwd.dft = default_dft fwd.create_obj()
class DropletOperator(ObjectOperator): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls.__init__(cls, **kwargs) return cls._instance def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() self.allocated_droplets = set() def query_existing_droplets(self): def list_droplet_obj_fn(name, spec, plurals): logger.info("Bootstrapped droplet {}".format(name)) droplet = Droplet(name, self.obj_api, self.store, spec) if droplet.status == OBJ_STATUS.obj_status_provisioned: self.store.update_obj(droplet) kube_list_obj(self.obj_api, RESOURCES.droplets, list_droplet_obj_fn) self.bootstrapped = True def get_stored_obj(self, name, spec): return Droplet(name, self.obj_api, self.store, spec) def get_droplet_by_ip(self, ip): for d in self.store.store["Droplet"]: if self.store.store["Droplet"][d].ip == ip: return self.store.store["Droplet"][d] return None def assign_droplet(self, obj): droplets = set(self.store.get_all_obj_type( KIND.droplet)) - self.allocated_droplets if len(droplets) == 0: return False d = random.sample(droplets, 1)[0] obj.droplet = d self.allocated_droplets.add(d) logger.info("Assigned droplet {} to {}".format(d, obj.name)) return True def unassign_droplet(self, obj): if obj.droplet == "": return False self.allocated_droplets.remove(obj.droplet) obj.droplet = "" logger.info("Unassigned droplet {} from {}".format(d, obj.name)) return True def get_unallocated_droplets(self): return set(self.store.get_all_obj_type( KIND.droplet)) - self.allocated_droplets
class ChainOperator(ObjectOperator): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls.__init__(cls, **kwargs) return cls._instance def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_chains(self): def list_chain_obj_fn(name, spec, plurals): logger.info("Bootstrapped chain {}".format(name)) chain = Chain(name, self.obj_api, self.store, spec) if chain.status == OBJ_STATUS.obj_status_provisioned: self.store.update_obj(chain) kube_list_obj(self.obj_api, RESOURCES.chains, list_chain_obj_fn) self.bootstrapped = True def get_stored_obj(self, name, spec): return Chain(name, self.obj_api, self.store, spec) def create_n_chains(self, dft): for i in range(dft.numchains): chain_name = dft.name + '-chain-' + str(i) chain = Chain(chain_name, self.obj_api, self.store) chain.dft = dft.name chain.size = dft.numchainreplicas chain.create_obj() dft.maglev_table.add(chain.name) dft.table = dft.maglev_table.table
class DftOperator(ObjectOperator): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls.__init__(cls, **kwargs) return cls._instance def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_dfts(self): def list_dft_obj_fn(name, spec, plurals): logger.info("Bootstrapped dft {}".format(name)) dft = Dft(name, self.obj_api, self.store, spec) if dft.status == OBJ_STATUS.obj_status_provisioned: self.store.update_obj(dft) kube_list_obj(self.obj_api, RESOURCES.dfts, list_dft_obj_fn) self.bootstrapped = True def get_stored_obj(self, name, spec): return Dft(name, self.obj_api, self.store, spec) def create_default_dft(self): if self.store.get_obj(OBJ_DEFAULTS.default_dft, KIND.dft): return dft = Dft(OBJ_DEFAULTS.default_dft, self.obj_api, self.store) dft.numchains = OBJ_DEFAULTS.default_n_chains dft.numchainreplicas = OBJ_DEFAULTS.default_n_replicas dft.maglev_table = MaglevTable(OBJ_DEFAULTS.default_maglev_table_size) dft.create_obj()
class ObjectOperator(object): def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() def store_get_obj(self, name, kind, spec): obj = self.store.get_obj(name, kind) if not obj: logger.info("{} of type {} not found in store".format(name, kind)) return None obj.set_obj_spec(spec) return obj def store_update_obj(self, obj): self.store.update_obj(obj) obj.update_obj() def store_delete_obj(self, obj): self.store.delete_obj(obj.name, obj.kind) obj.delete_obj() def set_object_provisioned(self, obj): obj.set_status(OBJ_STATUS.obj_status_provisioned) self.store_update_obj(obj)
class FtnOperator(ObjectOperator): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super().__new__(cls) cls.__init__(cls, **kwargs) return cls._instance def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_ftns(self): def list_ftn_obj_fn(name, spec, plurals): logger.info("Bootstrapped ftn {}".format(name)) ftn = Ftn(name, self.obj_api, self.store, spec) if ftn.status == OBJ_STATUS.obj_status_provisioned: self.store.update_obj(ftn) kube_list_obj(self.obj_api, RESOURCES.ftns, list_ftn_obj_fn) self.bootstrapped = True def get_stored_obj(self, name, spec): return Ftn(name, self.obj_api, self.store, spec) def create_n_ftns(self, chain): for i in range(chain.size): ftn_name = chain.name + '-ftn-' + str(i) ftn = Ftn(ftn_name, self.obj_api, self.store) ftn.parent_chain = chain.name ftn.dft = chain.dft ftn.create_obj() chain.ftns.append(ftn.name)
def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi()
class DividerOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(DividerOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_dividers(self): logger.info("divider on_startup") def list_dividers_obj_fn(name, spec, plurals): logger.info("Bootstrapped Divider {}".format(name)) d = Divider(name, self.obj_api, self.store, spec) if d.status == OBJ_STATUS.divider_status_provisioned: self.store_update(d) kube_list_obj(self.obj_api, RESOURCES.droplets, list_dividers_obj_fn) def get_divider_tmp_obj(self, name, spec): return Divider(name, self.obj_api, None, spec) def get_divider_stored_obj(self, name, spec): return Divider(name, self.obj_api, self.store, spec) def store_update(self, divider): self.store.update_divider(divider) def set_divider_provisioned(self, div): div.set_status(OBJ_STATUS.divider_status_provisioned) div.update_obj() def update_divider_with_bouncers(self, bouncer, net): dividers = self.store.get_dividers_of_vpc(bouncer.vpc).values() for d in dividers: d.update_net(net) def delete_bouncer_from_dividers(self, bouncer, net): dividers = self.store.get_dividers_of_vpc(bouncer.vpc).values() for d in dividers: d.update_net(net, False) def update_net(self, net, dividers=None): if not dividers: dividers = self.store.get_dividers_of_vpc(net.vpc).values() for d in dividers: d.update_net(net) def delete_net(self, net): dividers = self.store.get_dividers_of_vpc(net.vpc).values() for d in dividers: d.delete_net(net) def delete_nets_from_divider(self, nets, divider): for net in nets: divider.delete_net(net) def update_vpc(self, bouncer): dividers = self.store.get_dividers_of_vpc(bouncer.vpc).values() bouncer.update_vpc(dividers)
def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() self.allocated_droplets = set()
class BouncerOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(BouncerOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_bouncers(self): logger.info("bouncer on_startup") def list_bouncers_obj_fn(name, spec, plurals): logger.info("Bootstrapped Bouncer {}".format(name)) b = Bouncer(name, self.obj_api, self.store, spec) self.store_update(b) kube_list_obj(self.obj_api, RESOURCES.droplets, list_bouncers_obj_fn) def get_bouncer_tmp_obj(self, name, spec): return Bouncer(name, self.obj_api, None, spec) def get_bouncer_stored_obj(self, name, spec): return Bouncer(name, self.obj_api, self.store, spec) def store_update(self, b): self.store.update_bouncer(b) def set_bouncer_provisioned(self, bouncer): bouncer.set_status(OBJ_STATUS.bouncer_status_provisioned) bouncer.update_obj() def update_bouncers_with_divider(self, div): bouncers = self.store.get_bouncers_of_vpc(div.vpc) for b in bouncers.values(): logger.info("BB {}".format(b.name)) b.update_vpc(set([div])) def delete_divider_from_bouncers(self, div): bouncers = self.store.get_bouncers_of_vpc(div.vpc) for b in bouncers.values(): b.update_vpc(set([div]), False) def update_endpoint_with_bouncers(self, ep): bouncers = self.store.get_bouncers_of_net(ep.net) eps = set([ep]) for key in bouncers: bouncers[key].update_eps(eps) if ep.type == OBJ_DEFAULTS.ep_type_simple: ep.update_bouncers(bouncers) def delete_endpoint_from_bouncers(self, ep): bouncers = self.store.get_bouncers_of_net(ep.net) eps = set([ep]) for key in bouncers: bouncers[key].delete_eps(eps) self.store.update_bouncers_of_net(ep.net, bouncers) if ep.type == OBJ_DEFAULTS.ep_type_simple: ep.unload_transit_agent_xdp() def delete_vpc(self, bouncer): bouncer.delete_vpc()
class NetOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(NetOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_nets(self): def list_net_obj_fn(name, spec, plurals): logger.info("Bootstrapped {}".format(name)) n = Net(name, self.obj_api, self.store, spec) self.store_update(n) kube_list_obj(self.obj_api, RESOURCES.nets, list_net_obj_fn) self.create_default_net() logger.debug("Bootstrap Net store: {}".format(self.store._dump_nets())) def get_net_tmp_obj(self, name, spec): return Net(name, self.obj_api, None, spec) def get_net_stored_obj(self, name, spec): return Net(name, self.obj_api, self.store, spec) def store_update(self, net): self.store.update_net(net) def create_default_net(self): if self.store.get_net(OBJ_DEFAULTS.default_ep_net): return n = Net(OBJ_DEFAULTS.default_ep_net, self.obj_api, self.store) n.create_obj() def set_net_provisioned(self, net): net.set_status(OBJ_STATUS.net_status_provisioned) net.update_obj() def on_net_init(self, body, spec, **kwargs): name = kwargs['name'] logger.info("Net on_net_init {}".format(spec)) n = Net(name, self.obj_api, self.store, spec) for i in range(n.n_bouncers): n.create_bouncer() n.set_status(OBJ_STATUS.net_status_allocated) n.update_obj() def create_net_bouncers(self, net, n): logger.info("Create {} Bouncers for net: {}".format(n, net.name)) for i in range(n): net.create_bouncer() return net def delete_net_bouncers(self, net, n): logger.info("Deleting all Bouncers for net: {}".format(net.name)) for i in range(n): net.delete_bouncer() def process_bouncer_change(self, net, old, new): diff = new - old if diff > 0: logger.info("Scaling out Nets bouncers: {}".format(diff)) return self.create_net_bouncers(net, abs(diff)) if diff < 0: logger.info("Scaling in Nets bouncers: {}".format(diff)) pass return def on_net_delete(self, body, spec, **kwargs): logger.info("on_net_delete {}".format(spec)) def on_bouncer_provisioned(self, body, spec, **kwargs): name = kwargs['name'] logger.info("Net on_bouncer_provisioned {} with spec: {}".format(name, spec)) b = Bouncer(name, self.obj_api, None, spec) n = self.store.get_net(b.net) if n.status != OBJ_STATUS.net_status_provisioned: n.set_status(OBJ_STATUS.net_status_provisioned) n.update_obj() def on_endpoint_init(self, body, spec, **kwargs): name = kwargs['name'] logger.info("Net on_endpoint_init {} with spec: {}".format(name, spec)) ep = Endpoint(name, self.obj_api, None, spec) n = self.store.get_net(ep.net) ip = n.allocate_ip() gw = n.get_gw_ip() prefix = n.get_prefixlen() ep.set_ip(ip) ep.set_gw(gw) ep.set_prefix(prefix) ep.load_transit_agent() n.mark_ip_as_allocated(ip) ep.set_status(OBJ_STATUS.ep_status_allocated) ep.update_obj() def allocate_endpoint(self, ep): n = self.store.get_net(ep.net) if ep.ip == "": ip = n.allocate_ip() ep.set_ip(ip) gw = n.get_gw_ip() prefix = n.get_prefixlen() ep.set_gw(gw) ep.set_prefix(prefix) #TODO: Most of the time is spent in loading the transit agent #if ep.type == OBJ_DEFAULTS.ep_type_simple: # ep.load_transit_agent() n.mark_ip_as_allocated(ep.ip) ep.set_vni(n.vni) def deallocate_endpoint(self, ep): n = self.store.get_net(ep.net) n.deallocate_ip(ep.ip)
class CniService(Service): config = None cert = "" ssl_ca_cert = None droplet = None store = OprStore() droplet_configured = False def configure_droplet(obj_api): if CniService.droplet_configured: return name, spec = get_host_info() CniService.droplet = Droplet(name, obj_api, CniService.store, spec) CniService.droplet.create_obj() CniService.droplet_configured = True return name def on_connect(self, conn): self.iproute = IPRoute() CniService.ssl_ca_cert = '/tmp/ca' f = open(CniService.ssl_ca_cert, 'w') f.write(CniService.cert) self._set_config() self.obj_api = client.CustomObjectsApi() self.configure_droplet() def _set_config(self): configuration = Configuration() configuration.host = CniService.config.host configuration.ssl_ca_cert = CniService.ssl_ca_cert configuration.api_key['authorization'] = CniService.config.api_key[ 'authorization'] Configuration.set_default(configuration) def __del__(self): self.iproute.close() def exposed_add(self, params): val = "Add service failed!" status = 1 ep = self.get_or_create_ep(params) if ep.status != OBJ_STATUS.ep_status_provisioned: return val, status result = { "cniVersion": params.cni_version, "interfaces": [{ "name": ep.veth_name, "mac": ep.mac, "sandbox": ep.netns }], "ips": [{ "version": "4", "address": "{}/{}".format(ep.ip, ep.prefix), "gateway": ep.gw, "interface": 0 }] } val = json.dumps(result) status = 0 logger.info("cni service added {}".format(ep.name)) return val, status def exposed_delete(self, params): val = "" status = 0 self.delete_ep(params) return val, status def exposed_get(self, params): val = "!!get service!!" status = 1 logger.info("cni service get {}".format(params)) return val, status def exposed_version(self, params): val = "!!version service!!" status = 1 logger.info("cni service version {}".format(params)) return val, status def get_or_create_ep(self, params): logger.debug("Allocate endpoint name") name = "" if 'K8S_POD_NAME' in params.cni_args_dict: name = params.cni_args_dict['K8S_POD_NAME'] name = 'simple-ep-' + name if CniService.store.contains_ep(name): return CniService.store.get_ep(name) start_time = time.time() ep = Endpoint(name, self.obj_api, CniService.store) # If not provided in Pod, use defaults # TODO: have it pod :) ep.set_vni(params.default_vni) ep.set_vpc(params.default_vpc) ep.set_net(params.default_net) ep.set_type(OBJ_DEFAULTS.ep_type_simple) ep.set_status(OBJ_STATUS.ep_status_init) ep.set_veth_name(params.interface) ep.set_droplet(CniService.droplet.name) ep.set_container_id(params.container_id) self.allocate_local_id(ep) ep.set_veth_name("eth-" + ep.local_id) ep.set_veth_peer("veth-" + ep.local_id) ep.set_netns("mizar-" + ep.local_id) ep.set_droplet_ip(CniService.droplet.ip) ep.set_droplet_mac(CniService.droplet.mac) ep.set_droplet_obj(CniService.droplet) iproute_ns = self.create_mizarnetns(params, ep) self.prepare_veth_pair(ep, iproute_ns, params) ep.load_transit_agent() ep.set_cnidelay(time.time() - start_time) ep.create_obj() ep.watch_obj(self.ep_ready_fn) self.provision_endpoint(ep, iproute_ns) ep.set_status(OBJ_STATUS.ep_status_provisioned) ep.set_provisiondelay(time.time() - start_time) ep.update_obj() CniService.store.update_ep(ep) return ep def provision_endpoint(self, ep, iproute_ns): #ip netns exec {ep.ns} sysctl -w net.ipv4.tcp_mtu_probing=2 && \ #ip netns exec {ep.ns} ethtool -K veth0 tso off gso off ufo off && \ #ip netns exec {ep.ns} ethtool --offload veth0 rx off tx off && \ logging.debug("Add address to ep {}".format(ep.name)) iproute_ns.addr('add', index=ep.veth_index, address=ep.ip, prefixlen=int(ep.prefix)) logging.debug("Add route to default GW to ep {}".format(ep.name)) iproute_ns.route('add', gateway=ep.gw) def ep_ready_fn(self, event, ep): name = event['object']['metadata']['name'] status = event['object']['spec']['status'] if name != ep.name: return False if status != OBJ_STATUS.ep_status_provisioned: return False spec = event['object']['spec'] # Now get the gw, ip, and prefix ep.set_gw(spec['gw']) ep.set_ip(spec['ip']) ep.set_prefix(spec['prefix']) return True def allocate_local_id(self, ep): e = [1] v = [1] while len(e) or len(v): localid = ep.container_id[-8:] eth = "eth-" + localid veth = 'veth-' + localid e = self.iproute.link_lookup(ifname=eth) v = self.iproute.link_lookup(ifname=veth) ep.set_local_id(localid) logging.debug("Allocated ID for {} as {}".format(ep.name, localid)) def create_mizarnetns(self, params, ep): os.makedirs('/var/run/netns/', exist_ok=True) f = os.listdir('/var/run/netns/') logging.debug("files ns {}".format(f)) src = params.netns dst = '/var/run/netns/{}'.format(ep.netns) os.symlink(src, dst) logging.debug("Created namespace {} from {}".format( ep.netns, params.netns)) return NetNS(ep.netns) def prepare_veth_pair(self, ep, iproute_ns, params): self.iproute.link('add', ifname=ep.veth_name, peer=ep.veth_peer, kind='veth') ep.set_veth_index(get_iface_index(ep.veth_name, self.iproute)) ep.set_veth_peer_index(get_iface_index(ep.veth_peer, self.iproute)) ep.set_mac(get_iface_mac(ep.veth_index, self.iproute)) ep.set_veth_peer_mac(get_iface_mac(ep.veth_peer_index, self.iproute)) logger.debug("Move interface {} to netns {}".format( ep.veth_name, ep.netns)) self.iproute.link('set', index=ep.veth_index, net_ns_fd=ep.netns) logger.debug("Rename interface to {}".format(params.interface)) ep.set_veth_name(params.interface) iproute_ns.link('set', index=ep.veth_index, ifname=params.interface) logging.info("Bring loopback device in netns {} up".format(ep.netns)) lo_idx = self.iproute.link_lookup(ifname="lo")[0] iproute_ns.link('set', index=lo_idx, state='up') logging.info("Bring endpoint's interface {} up in netns {}".format( ep.veth_name, ep.netns)) iproute_ns.link('set', index=ep.veth_index, state='up') logging.info("Bring veth interface {} up and set mtu to 9000".format( ep.veth_peer)) self.iproute.link('set', index=ep.veth_peer_index, state='up', mtu=9000) def delete_ep(self, params): if 'K8S_POD_NAME' in params.cni_args_dict: pod_name = params.cni_args_dict['K8S_POD_NAME'] name = 'simple-ep-' + pod_name if CniService.store.contains_ep(name): ep = CniService.store.get_ep(name) else: return else: logger.debug("Pod name not found!!") return ep.delete_obj() logger.info("cni service delete {}".format(ep.name)) CniService.store.delete_ep(ep.name) self.delete_mizarnetns(ep) self.delete_veth_pair(ep) def delete_mizarnetns(self, ep): os.remove("/var/run/netns/{}".format(ep.netns)) logging.debug("Deleted namespace {}".format(ep.netns)) def delete_veth_pair(self, ep): self.iproute.link('del', index=ep.veth_peer_index) logging.debug("Deleted veth-pair {}, from {}".format( ep.veth_peer, ep.netns))
class EndpointOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(EndpointOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() self.core_api = client.CoreV1Api() def query_existing_endpoints(self): def list_endpoint_obj_fn(name, spec, plurals): logger.info("Bootstrapped {}".format(name)) ep = Endpoint(name, self.obj_api, self.store, spec) if ep.status == OBJ_STATUS.ep_status_provisioned: self.store_update(ep) kube_list_obj(self.obj_api, RESOURCES.endpoints, list_endpoint_obj_fn) def get_endpoint_tmp_obj(self, name, spec): return Endpoint(name, self.obj_api, None, spec) def get_endpoint_stored_obj(self, name, spec): return Endpoint(name, self.obj_api, self.store, spec) def set_endpoint_provisioned(self, ep): ep.set_status(OBJ_STATUS.ep_status_provisioned) ep.update_obj() def store_update(self, ep): self.store.update_ep(ep) def store_delete(self, ep): self.store.delete_ep(ep.name) def on_endpoint_delete(self, body, spec, **kwargs): logger.info("on_endpoint_delete {}".format(spec)) def on_endpoint_provisioned(self, body, spec, **kwargs): name = kwargs['name'] logger.info("on_endpoint_provisioned {}".format(spec)) ep = Endpoint(name, self.obj_api, self.store, spec) self.store.update_ep(ep) def update_bouncer_with_endpoints(self, bouncer): eps = self.store.get_eps_in_net(bouncer.net).values() bouncer.update_eps(eps) def update_endpoints_with_bouncers(self, bouncer): eps = self.store.get_eps_in_net(bouncer.net).values() for ep in eps: ep.update_bouncers(set([bouncer])) def create_scaled_endpoint(self, name, spec): logger.info("Create scaled endpoint {} spec {}".format(name, spec)) ep = Endpoint(name, self.obj_api, self.store) ip = spec['clusterIP'] # If not provided in Pod, use defaults # TODO: have it pod :) ep.set_vni(OBJ_DEFAULTS.default_vpc_vni) ep.set_vpc(OBJ_DEFAULTS.default_ep_vpc) ep.set_net(OBJ_DEFAULTS.default_ep_net) ep.set_ip(ip) ep.set_mac(self.rand_mac()) ep.set_type(OBJ_DEFAULTS.ep_type_scaled) ep.set_status(OBJ_STATUS.ep_status_init) ep.create_obj() self.annotate_builtin_endpoints(name) def annotate_builtin_endpoints(self, name, namespace='default'): get_body = True while get_body: endpoint = self.core_api.read_namespaced_endpoints( name=name, namespace=namespace) endpoint.metadata.annotations[ OBJ_DEFAULTS. mizar_service_annotation_key] = OBJ_DEFAULTS.mizar_service_annotation_val try: self.core_api.patch_namespaced_endpoints(name=name, namespace=namespace, body=endpoint) get_body = False except: logger.debug( "Retry updating annotating endpoints {}".format(name)) get_body = True def delete_scaled_endpoint(self, ep): logger.info("Delete scaled endpoint {}".format(ep.name)) ep.delete_obj() def rand_mac(self): return "a5:5b:00:%02x:%02x:%02x" % ( random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), ) def update_scaled_endpoint_backend(self, name, spec): ep = self.store.get_ep(name) if ep is None: return None backends = set() for s in spec: for a in s['addresses']: backends.add(a['ip']) ep.set_backends(backends) self.store_update(ep) logger.info("Update scaled endpoint {} with backends: {}".format( name, backends)) return self.store.get_ep(name) def delete_endpoints_from_bouncers(self, bouncer): eps = self.store.get_eps_in_net(bouncer.net).values() bouncer.delete_eps(eps) def delete_bouncer_from_endpoints(self, bouncer): eps = self.store.get_eps_in_net(bouncer.net).values() for ep in eps: ep.update_bouncers(set([bouncer]), False)
class VpcOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(VpcOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() def query_existing_vpcs(self): def list_vpc_obj_fn(name, spec, plurals): logger.info("Bootstrapped {}".format(name)) v = self.get_vpc_stored_obj(name, spec) if v.status == OBJ_STATUS.vpc_status_provisioned: self.store_update(v) kube_list_obj(self.obj_api, RESOURCES.vpcs, list_vpc_obj_fn) logger.debug("Bootstrap VPC store: {}".format(self.store._dump_vpcs())) def get_vpc_tmp_obj(self, name, spec): return Vpc(name, self.obj_api, None, spec) def get_vpc_stored_obj(self, name, spec): return Vpc(name, self.obj_api, self.store, spec) def create_default_vpc(self): if self.store.get_vpc(OBJ_DEFAULTS.default_ep_vpc): return v = Vpc(OBJ_DEFAULTS.default_ep_vpc, self.obj_api, self.store) v.create_obj() def store_update(self, vpc): self.store.update_vpc(vpc) def create_vpc_dividers(self, vpc, n): logger.info("Create {} dividers for vpc: {}".format(n, vpc.name)) for i in range(n): vpc.create_divider() def delete_vpc_dividers(self, vpc, n): logger.info("Delete {} dividers for vpc: {}".format(n, vpc.name)) for i in range(n): vpc.delete_divider() def process_divider_change(self, vpc, old, new): diff = new - old if diff > 0: logger.info("Scaling out VPCs dividers: {}".format(diff)) return self.create_vpc_dividers(vpc, abs(diff)) if diff < 0: logger.info("Scaling in VPCs dividers: {}".format(diff)) return self.delete_vpc_dividers(vpc, abs(diff)) def set_vpc_provisioned(self, vpc): vpc.set_status(OBJ_STATUS.vpc_status_provisioned) vpc.update_obj() def on_divider_provisioned(self, body, spec, **kwargs): name = kwargs['name'] self._on_divider_provisioned(name, spec) def _on_divider_provisioned(self, name, spec): logger.info("on_divider_provisioned {} with spec: {}".format( name, spec)) div = Divider(name, self.obj_api, None, spec) v = self.store.get_vpc(div.vpc) if v.status != OBJ_STATUS.vpc_status_provisioned: v.set_status(OBJ_STATUS.vpc_status_provisioned) v.update_obj() def on_vpc_delete(self, body, spec, **kwargs): logger.info("on_vpc_delete {}".format(spec)) def allocate_vni(self, vpc): # Scrappy allocator for now!! # TODO: There is a tiny chance of collision here, not to worry about now if vpc.name == OBJ_DEFAULTS.default_ep_vpc: return OBJ_DEFAULTS.default_vpc_vni vpc.set_vni(uuid.uuid4().int & (1 << 24) - 1) def deallocate_vni(self, vpc): # TODO: Keep track of VNI allocation pass
def __init__(self, **kwargs): logger.info(kwargs) self.store = OprStore()
class DropletOperator(object): _instance = None def __new__(cls, **kwargs): if cls._instance is None: cls._instance = super(DropletOperator, cls).__new__(cls) cls._init(cls, **kwargs) return cls._instance def _init(self, **kwargs): logger.info(kwargs) self.store = OprStore() config.load_incluster_config() self.obj_api = client.CustomObjectsApi() self.bootstrapped = False def query_existing_droplets(self): def list_droplet_obj_fn(name, spec, plurals): logger.info("Bootstrapped droplet {}".format(name)) d = Droplet(name, self.obj_api, self.store, spec) if d.status == OBJ_STATUS.droplet_status_provisioned: self.store_update(d) kube_list_obj(self.obj_api, RESOURCES.droplets, list_droplet_obj_fn) self.bootstrapped = True def is_bootstrapped(self): return self.bootstrapped def get_droplet_tmp_obj(self, name, spec): return Droplet(name, self.obj_api, None, spec) def get_droplet_stored_obj(self, name, spec): return Droplet(name, self.obj_api, self.store, spec) def set_droplet_provisioned(self, droplet): droplet.set_status(OBJ_STATUS.droplet_status_provisioned) droplet.update_obj() def store_update(self, droplet): self.store.update_droplet(droplet) def on_droplet_provisioned(self, body, spec, **kwargs): name = kwargs['name'] logger.info("Droplet on_droplet_provisioned {} with spec: {}".format( name, spec)) d = Droplet(name, self.obj_api, self.store, spec) self.store.update_droplet(d) def on_bouncer_init(self, body, spec, **kwargs): name = kwargs['name'] logger.info("Droplet place bouncer {} with spec: {}".format( name, spec)) b = Bouncer(name, self.obj_api, None, spec) self.assign_bouncer_droplet(b) b.set_status(OBJ_STATUS.bouncer_status_placed) b.update_obj() def assign_bouncer_droplet(self, bouncer): droplets = set(self.store.get_all_droplets()) d = random.sample(droplets, 1)[0] bouncer.set_droplet(d) def assign_divider_droplet(self, divider): droplets = set(self.store.get_all_droplets()) d = random.sample(droplets, 1)[0] divider.set_droplet(d) def on_delete(self, body, spec, **kwargs): name = kwargs['name'] logger.info("*delete_droplet {}".format(name))