def call(self, pk): slice_deployment = SliceDeployments.objects.get(pk=pk) user = User.objects.get(id=slice_deployment.slice.creator.id) driver = OpenStackDriver().admin_driver(deployment=slice_deployment.deployment.name) client_driver = driver.client_driver(caller=user, tenant=slice_deployment.slice.name, deployment=slice_deployment.deployment.name) if slice_deployment.router_id and slice_deployment.subnet_id: client_driver.delete_router_interface(slice_deployment.router_id, slice_deployment.subnet_id) if slice_deployment.subnet_id: client_driver.delete_subnet(slice_deployment.subnet_id) if slice_deployment.router_id: client_driver.delete_router(slice_deployment.router_id) if slice_deployment.network_id: client_driver.delete_network(slice_deployment.network_id) if slice_deployment.tenant_id: driver.delete_tenant(slice_deployment.tenant_id) # delete external route #subnet = None #subnets = client_driver.shell.quantum.list_subnets()['subnets'] #for snet in subnets: # if snet['id'] == slice_deployment.subnet_id: # subnet = snet #if subnet: # driver.delete_external_route(subnet) slice_deployment.delete()
def add_subnet(auth, fields): driver = OpenStackDriver(client=auth_check(auth)) slices = _get_slices(fields.get('slice')) if slices: fields['slice'] = slices[0] subnet = Subnet(**fields) # create quantum subnet quantum_subnet = driver.create_subnet(name=subnet.slice.name, network_id=subnet.slice.network_id, cidr_ip=subnet.cidr, ip_version=subnet.ip_version, start=subnet.start, end=subnet.end) subnet.subnet_id = quantum_subnet['id'] ## set dns servers #driver.update_subnet(subnet.id, {'dns_nameservers': ['8.8.8.8', '8.8.4.4']}) # add subnet as interface to slice's router try: driver.add_router_interface(subnet.slice.router_id, subnet.subnet_id) except: pass #add_route = 'route add -net %s dev br-ex gw 10.100.0.5' % self.cidr commands.getstatusoutput(add_route) subnet.save() return subnet
def init_caller(self, caller, tenant): auth = { 'username': caller.email, 'password': hashlib.md5(caller.password).hexdigest()[:6], 'tenant': tenant } self.client = OpenStackClient(**auth) self.driver = OpenStackDriver(client=self.client) self.caller = caller
def delete_subnet(auth, filter={}): driver = OpenStackDriver(client=auth_check(auth)) subnets = Subnet.objects.filter(**filter) for subnet in subnets: driver.delete_router_interface(subnet.slice.router_id, subnet.subnet_id) driver.delete_subnet(subnet.subnet_id) subnet.delete() #del_route = 'route del -net %s' % subnet.cidr commands.getstatusoutput(del_route) return 1
def delete_record(self, controller_network): driver = OpenStackDriver().client_driver( caller=controller_network.network.owner.creator, tenant=controller_network.network.owner.name, controller=controller_network.controller.name) if (controller_network.router_id) and (controller_network.subnet_id): driver.delete_router_interface(controller_network.router_id, controller_network.subnet_id) if controller_network.subnet_id: driver.delete_subnet(controller_network.subnet_id) if controller_network.router_id: driver.delete_router(controller_network.router_id) if controller_network.net_id: driver.delete_network(controller_network.net_id)
def call(self, pk): network_deployment = NetworkDeployments.objects.get(pk=pk) driver = OpenStackDriver().client_driver( caller=network_deployment.network.owner.creator, tenant=network_deployment.network.owner.name, deployment=network_deployment.deployment.name) if (network_deployment.router_id) and (network_deployment.subnet_id): driver.delete_router_interface(network_deployment.router_id, network_deployment.subnet_id) if network_deployment.subnet_id: driver.delete_subnet(network_deployment.subnet_id) if network_deployment.router_id: driver.delete_router(network_deployment.router_id) if network_deployment.net_id: driver.delete_network(network_deployment.net_id) network_deployment.delete()
def delete_record(self, controller_slice): user = User.objects.get(id=controller_slice.slice.creator.id) driver = OpenStackDriver().admin_driver(controller=controller_slice.controller) client_driver = driver.client_driver(caller=user, tenant=controller_slice.slice.name, controller=controller_slice.controller) if controller_slice.router_id and controller_slice.subnet_id: client_driver.delete_router_interface(controller_slice.router_id, controller_slice.subnet_id) if controller_slice.subnet_id: client_driver.delete_subnet(controller_slice.subnet_id) if controller_slice.router_id: client_driver.delete_router(controller_slice.router_id) if controller_slice.network_id: client_driver.delete_network(controller_slice.network_id) if controller_slice.tenant_id: driver.delete_tenant(controller_slice.tenant_id)
def sync_record(self, controller_slice): logger.info("sync'ing slice controller %s" % controller_slice) controller_register = json.loads(controller_slice.controller.backend_register) if (controller_register.get('disabled',False)): raise InnocuousException('Controller %s is disabled'%controller_slice.controller.name) if not controller_slice.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice.controller) return controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator, controller=controller_slice.controller) if not controller_users: raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name)) else: controller_user = controller_users[0] roles = ['Admin'] max_instances=int(controller_slice.slice.max_slivers) tenant_fields = {'endpoint':controller_slice.controller.auth_url, 'admin_user': controller_slice.controller.admin_user, 'admin_password': controller_slice.controller.admin_password, 'admin_tenant': 'admin', 'tenant': controller_slice.slice.name, 'tenant_description': controller_slice.slice.description, 'roles':roles, 'name':controller_user.user.email, 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name), 'max_instances':max_instances} expected_num = len(roles)+1 res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num) tenant_id = res[0]['id'] if (not controller_slice.tenant_id): try: driver = OpenStackDriver().admin_driver(controller=controller_slice.controller) driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers)) except: logger.log_exc('Could not update quota for %s'%controller_slice.slice.name) raise Exception('Could not update quota for %s'%controller_slice.slice.name) controller_slice.tenant_id = tenant_id controller_slice.backend_status = '1 - OK' controller_slice.save()
def map_sync_outputs(self, controller_slice, res): tenant_id = res[0]['id'] if (not controller_slice.tenant_id): try: driver = OpenStackDriver().admin_driver( controller=controller_slice.controller) driver.shell.nova.quotas.update( tenant_id=tenant_id, instances=int(controller_slice.slice.max_instances)) except: logger.log_exc('Could not update quota for %s' % controller_slice.slice.name) raise Exception('Could not update quota for %s' % controller_slice.slice.name) controller_slice.tenant_id = tenant_id controller_slice.backend_status = '1 - OK' controller_slice.save()
def get_driver(self, port): # We need to use a client driver that specifies the tenant # of the destination instance. Nova-compute will not connect # ports to instances if the port's tenant does not match # the instance's tenant. # A bunch of stuff to compensate for OpenStackDriver.client_driveR() # not being in working condition. from openstack.client import OpenStackClient from openstack.driver import OpenStackDriver controller = port.instance.node.site_deployment.controller slice = port.instance.slice caller = port.network.owner.creator auth = { 'username': caller.email, 'password': caller.remote_password, 'tenant': slice.name } client = OpenStackClient( controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert, driver = OpenStackDriver(client=client) return driver
def init_admin(self, tenant=None): # use the admin credentials self.client = OpenStackClient(tenant=tenant) self.driver = OpenStackDriver(client=self.client) self.caller = self.driver.admin_user self.caller.kuser_id = self.caller.id
def call(self, **args): logger.info("sync'ing network instances") ports = Port.objects.all() ports_by_id = {} ports_by_neutron_port = {} for port in ports: ports_by_id[port.id] = port ports_by_neutron_port[port.port_id] = port networks = Network.objects.all() networks_by_id = {} for network in networks: for nd in network.controllernetworks.all(): networks_by_id[nd.net_id] = network #logger.info("networks_by_id = ") #for (network_id, network) in networks_by_id.items(): # logger.info(" %s: %s" % (network_id, network.name)) instances = Instance.objects.all() instances_by_instance_uuid = {} for instance in instances: instances_by_instance_uuid[instance.instance_uuid] = instance # Get all ports in all controllers ports_by_id = {} templates_by_id = {} for controller in Controller.objects.all(): if not controller.admin_tenant: logger.info("controller %s has no admin_tenant" % controller) continue try: driver = self.driver.admin_driver(controller=controller) ports = driver.shell.quantum.list_ports()["ports"] except: logger.log_exc("failed to get ports from controller %s" % controller) continue for port in ports: ports_by_id[port["id"]] = port # public-nat and public-dedicated networks don't have a net-id anywhere # in the data model, so build up a list of which ids map to which network # templates. try: neutron_networks = driver.shell.quantum.list_networks( )["networks"] except: print "failed to get networks from controller %s" % controller continue for network in neutron_networks: for template in NetworkTemplate.objects.all(): if template.shared_network_name == network["name"]: templates_by_id[network["id"]] = template for port in ports_by_id.values(): #logger.info("port %s" % str(port)) if port["id"] in ports_by_neutron_port: # we already have it #logger.info("already accounted for port %s" % port["id"]) continue if port["device_owner"] != "compute:nova": # we only want the ports that connect to instances #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"])) continue instance = instances_by_instance_uuid.get(port['device_id'], None) if not instance: logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id'])) continue network = networks_by_id.get(port['network_id'], None) if not network: # maybe it's public-nat or public-dedicated. Search the templates for # the id, then see if the instance's slice has some network that uses # that template template = templates_by_id.get(port['network_id'], None) if template and instance.slice: for candidate_network in instance.slice.networks.all(): if candidate_network.template == template: network = candidate_network if not network: logger.info("no network for port %s network %s" % (port["id"], port["network_id"])) # we know it's associated with a instance, but we don't know # which network it is part of. continue if network.template.shared_network_name: # If it's a shared network template, then more than one network # object maps to the quantum network. We have to do a whole bunch # of extra work to find the right one. networks = network.template.network_set.all() network = None for candidate_network in networks: if (candidate_network.owner == instance.slice): logger.info("found network %s" % candidate_network) network = candidate_network if not network: logger.info( "failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"])) continue if not port["fixed_ips"]: logger.info("port %s has no fixed_ips" % port["id"]) continue ip = port["fixed_ips"][0]["ip_address"] mac = port["mac_address"] logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"]))) ns = Port(network=network, instance=instance, ip=ip, mac=mac, port_id=port["id"]) try: ns.save() except: logger.log_exc("failed to save port %s" % str(ns)) continue # For ports that were created by the user, find that ones # that don't have neutron ports, and create them. for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False)): logger.info("XXX working on port %s" % port) controller = port.instance.node.site_deployment.controller slice = port.instance.slice if controller: cn = port.network.controllernetworks.filter( controller=controller) if not cn: logger.log_exc("no controllernetwork for %s" % port) continue cn = cn[0] if cn.lazy_blocked: cn.lazy_blocked = False cn.save() logger.info( "deferring port %s because controllerNetwork was lazy-blocked" % port) continue if not cn.net_id: logger.info( "deferring port %s because controllerNetwork does not have a port-id yet" % port) continue try: # We need to use a client driver that specifies the tenant # of the destination instance. Nova-compute will not connect # ports to instances if the port's tenant does not match # the instance's tenant. # A bunch of stuff to compensate for OpenStackDriver.client_driveR() # not being in working condition. from openstack.client import OpenStackClient from openstack.driver import OpenStackDriver caller = port.network.owner.creator auth = { 'username': caller.email, 'password': caller.remote_password, 'tenant': slice.name } client = OpenStackClient( controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert, driver = OpenStackDriver(client=client) neutron_port = driver.shell.quantum.create_port( {"port": { "network_id": cn.net_id }})["port"] port.port_id = neutron_port["id"] if neutron_port["fixed_ips"]: port.ip = neutron_port["fixed_ips"][0]["ip_address"] port.mac = neutron_port["mac_address"] except: logger.log_exc("failed to create neutron port for %s" % port) continue port.save()
def call(self, **args): logger.info("sync'ing network slivers") networkSlivers = NetworkSliver.objects.all() networkSlivers_by_id = {} networkSlivers_by_port = {} for networkSliver in networkSlivers: networkSlivers_by_id[networkSliver.id] = networkSliver networkSlivers_by_port[networkSliver.port_id] = networkSliver networks = Network.objects.all() networks_by_id = {} for network in networks: for nd in network.controllernetworks.all(): networks_by_id[nd.net_id] = network #logger.info("networks_by_id = ") #for (network_id, network) in networks_by_id.items(): # logger.info(" %s: %s" % (network_id, network.name)) slivers = Sliver.objects.all() slivers_by_instance_uuid = {} for sliver in slivers: slivers_by_instance_uuid[sliver.instance_uuid] = sliver # Get all ports in all controllers ports_by_id = {} templates_by_id = {} for controller in Controller.objects.all(): if not controller.admin_tenant: logger.info("controller %s has no admin_tenant" % controller) continue try: driver = self.driver.admin_driver(controller=controller, tenant='admin') ports = driver.shell.quantum.list_ports()["ports"] except: logger.log_exc("failed to get ports from controller %s" % controller) continue for port in ports: ports_by_id[port["id"]] = port # public-nat and public-dedicated networks don't have a net-id anywhere # in the data model, so build up a list of which ids map to which network # templates. try: neutron_networks = driver.shell.quantum.list_networks( )["networks"] except: print "failed to get networks from controller %s" % controller continue for network in neutron_networks: for template in NetworkTemplate.objects.all(): if template.shared_network_name == network["name"]: templates_by_id[network["id"]] = template for port in ports_by_id.values(): #logger.info("port %s" % str(port)) if port["id"] in networkSlivers_by_port: # we already have it #logger.info("already accounted for port %s" % port["id"]) continue if port["device_owner"] != "compute:nova": # we only want the ports that connect to instances #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"])) continue sliver = slivers_by_instance_uuid.get(port['device_id'], None) if not sliver: logger.info("no sliver for port %s device_id %s" % (port["id"], port['device_id'])) continue network = networks_by_id.get(port['network_id'], None) if not network: # maybe it's public-nat or public-dedicated. Search the templates for # the id, then see if the sliver's slice has some network that uses # that template template = templates_by_id.get(port['network_id'], None) if template and sliver.slice: for candidate_network in sliver.slice.networks.all(): if candidate_network.template == template: network = candidate_network if not network: logger.info("no network for port %s network %s" % (port["id"], port["network_id"])) # we know it's associated with a sliver, but we don't know # which network it is part of. continue if network.template.shared_network_name: # If it's a shared network template, then more than one network # object maps to the quantum network. We have to do a whole bunch # of extra work to find the right one. networks = network.template.network_set.all() network = None for candidate_network in networks: if (candidate_network.owner == sliver.slice): print "found network", candidate_network network = candidate_network if not network: logger.info( "failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"])) continue if not port["fixed_ips"]: logger.info("port %s has no fixed_ips" % port["id"]) continue ip = port["fixed_ips"][0]["ip_address"] logger.info("creating NetworkSliver (%s, %s, %s, %s)" % (str(network), str(sliver), ip, str(port["id"]))) ns = NetworkSliver(network=network, sliver=sliver, ip=ip, port_id=port["id"]) try: ns.save() except: logger.log_exc("failed to save networksliver %s" % str(ns)) continue # For networkSlivers that were created by the user, find that ones # that don't have neutron ports, and create them. for networkSliver in NetworkSliver.objects.filter( port_id__isnull=True, sliver__isnull=False): #logger.info("XXX working on networksliver %s" % networkSliver) controller = networkSliver.sliver.node.site_deployment.controller if controller: cn = networkSliver.network.controllernetworks.filter( controller=controller) if not cn: logger.log_exc("no controllernetwork for %s" % networkSliver) continue cn = cn[0] if cn.lazy_blocked: cn.lazy_blocked = False cn.save() logger.info( "deferring networkSliver %s because controllerNetwork was lazy-blocked" % networkSliver) continue if not cn.net_id: logger.info( "deferring networkSliver %s because controllerNetwork does not have a port-id yet" % networkSliver) continue try: # We need to use a client driver that specifies the tenant # of the destination sliver. Nova-compute will not connect # ports to slivers if the port's tenant does not match # the sliver's tenant. # A bunch of stuff to compensate for OpenStackDriver.client_driveR() # not being in working condition. from openstack.client import OpenStackClient from openstack.driver import OpenStackDriver caller = networkSliver.network.owner.creator auth = { 'username': caller.email, 'password': caller.remote_password, 'tenant': networkSliver.sliver.slice.name } # networkSliver.network.owner.name} client = OpenStackClient( controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert, driver = OpenStackDriver(client=client) port = driver.shell.quantum.create_port( {"port": { "network_id": cn.net_id }})["port"] networkSliver.port_id = port["id"] if port["fixed_ips"]: networkSliver.ip = port["fixed_ips"][0]["ip_address"] except: logger.log_exc("failed to create neutron port for %s" % networkSliver) continue networkSliver.save() # Now, handle port forwarding # We get the list of NetworkSlivers again, since we might have just # added a few. Then, for each one of them we find it's quantum port and # make sure quantum's nat:forward_ports argument is the same. for networkSliver in NetworkSliver.objects.all(): try: nat_list = networkSliver.network.nat_list except (TypeError, ValueError), e: logger.info("Failed to decode nat_list: %s" % str(e)) continue if not networkSliver.port_id: continue neutron_port = ports_by_id.get(networkSliver.port_id, None) if not neutron_port: continue neutron_nat_list = neutron_port.get("nat:forward_ports", None) if not neutron_nat_list: # make sure that None and the empty set are treated identically neutron_nat_list = [] if (neutron_nat_list != nat_list): logger.info( "Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list))) try: driver = self.driver.admin_driver( controller=networkSliver.sliver.node.site_deployment. controller, tenant='admin') driver.shell.quantum.update_port( networkSliver.port_id, {"port": { "nat:forward_ports": nat_list }}) except: logger.log_exc("failed to update port with nat_list %s" % str(nat_list)) continue else: #logger.info("port %s network %s sliver %s nat %s is already set" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list))) pass