def parse_output(msg): lines = msg.splitlines() results = [] observer_logger.info(msg) for l in lines: magic_str = 'ok: [127.0.0.1] => ' magic_str2 = 'changed: [127.0.0.1] => ' if (l.startswith(magic_str)): w = len(magic_str) str = l[w:] # handle ok: [127.0.0.1] => (item=org.onosproject.driver) => {... if str.startswith("(") and (" => {" in str): str = str.split("=> ",1)[1] d = json.loads(str) results.append(d) elif (l.startswith(magic_str2)): w = len(magic_str2) str = l[w:] if str.startswith("(") and (" => {" in str): str = str.split("=> ",1)[1] d = json.loads(str) results.append(d) return results
def map_sync_inputs(self, controller_slice): logger.info("sync'ing slice controller %s" % controller_slice) if not controller_slice.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice.controller) return controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator, controller=controller_slice.controller) if not controller_users: raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name)) else: controller_user = controller_users[0] roles = ['admin'] max_instances=int(controller_slice.slice.max_instances) tenant_fields = {'endpoint':controller_slice.controller.auth_url, 'endpoint_v3': controller_slice.controller.auth_url_v3, 'domain': controller_slice.controller.domain, 'admin_user': controller_slice.controller.admin_user, 'admin_password': controller_slice.controller.admin_password, 'admin_tenant': 'admin', 'tenant': controller_slice.slice.name, 'tenant_description': controller_slice.slice.description, 'roles':roles, 'name':controller_user.user.email, 'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name), 'max_instances':max_instances} return tenant_fields
def parse_output(msg): lines = msg.splitlines() results = [] observer_logger.info(msg) for l in lines: magic_str = 'ok: [127.0.0.1] => ' magic_str2 = 'changed: [127.0.0.1] => ' if (l.startswith(magic_str)): w = len(magic_str) str = l[w:] # handle ok: [127.0.0.1] => (item=org.onosproject.driver) => {... if str.startswith("(") and (" => {" in str): str = str.split("=> ",1)[1] d = json.loads(str) results.append(d) elif (l.startswith(magic_str2)): w = len(magic_str2) str = l[w:] if str.startswith("(") and (" => {" in str): str = str.split("=> ",1)[1] d = json.loads(str) results.append(d) return results
def map_sync_inputs(self, controller_site_privilege): controller_register = json.loads(controller_site_privilege.controller.backend_register) if not controller_site_privilege.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller) return roles = [controller_site_privilege.site_privilege.role.role] # setup user home site roles at controller if not controller_site_privilege.site_privilege.user.site: raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email) else: # look up tenant id for the user's site at the controller #ctrl_site_deployments = SiteDeployment.objects.filter( # site_deployment__site=controller_site_privilege.user.site, # controller=controller_site_privilege.controller) #if ctrl_site_deployments: # # need the correct tenant id for site at the controller # tenant_id = ctrl_site_deployments[0].tenant_id # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base user_fields = { 'endpoint':controller_site_privilege.controller.auth_url, 'endpoint_v3': controller_site_privilege.controller.auth_url_v3, 'domain': controller_site_privilege.controller.domain, 'name': controller_site_privilege.site_privilege.user.email, 'email': controller_site_privilege.site_privilege.user.email, 'password': controller_site_privilege.site_privilege.user.remote_password, 'admin_user': controller_site_privilege.controller.admin_user, 'admin_password': controller_site_privilege.controller.admin_password, 'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name), 'admin_tenant': controller_site_privilege.controller.admin_tenant, 'roles':roles, 'tenant':controller_site_privilege.site_privilege.site.login_base} return user_fields
def map_sync_inputs(self, controller_site_privilege): controller_register = json.loads(controller_site_privilege.controller.backend_register) if not controller_site_privilege.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller) return roles = [controller_site_privilege.site_privilege.role.role] # setup user home site roles at controller if not controller_site_privilege.site_privilege.user.site: raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email) else: # look up tenant id for the user's site at the controller #ctrl_site_deployments = SiteDeployment.objects.filter( # site_deployment__site=controller_site_privilege.user.site, # controller=controller_site_privilege.controller) #if ctrl_site_deployments: # # need the correct tenant id for site at the controller # tenant_id = ctrl_site_deployments[0].tenant_id # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base user_fields = { 'endpoint':controller_site_privilege.controller.auth_url, 'endpoint_v3': controller_site_privilege.controller.auth_url_v3, 'domain': controller_site_privilege.controller.domain, 'name': controller_site_privilege.site_privilege.user.email, 'email': controller_site_privilege.site_privilege.user.email, 'password': controller_site_privilege.site_privilege.user.remote_password, 'admin_user': controller_site_privilege.controller.admin_user, 'admin_password': controller_site_privilege.controller.admin_password, 'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name), 'admin_tenant': controller_site_privilege.controller.admin_tenant, 'roles':roles, 'tenant':controller_site_privilege.site_privilege.site.login_base} return user_fields
def map_sync_inputs(self, controller_user): if not controller_user.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_user.controller) return # All users will have at least the 'user' role at their home site/tenant. # We must also check if the user should have the admin role roles = ['user'] if controller_user.user.is_admin: driver = self.driver.admin_driver( controller=controller_user.controller) roles.append(driver.get_admin_role().name) # setup user home site roles at controller if not controller_user.user.site: raise Exception('Siteless user %s' % controller_user.user.email) else: # look up tenant id for the user's site at the controller #ctrl_site_deployments = SiteDeployment.objects.filter( # site_deployment__site=controller_user.user.site, # controller=controller_user.controller) #if ctrl_site_deployments: # # need the correct tenant id for site at the controller # tenant_id = ctrl_site_deployments[0].tenant_id # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base user_fields = { 'endpoint': controller_user.controller.auth_url, 'endpoint_v3': controller_user.controller.auth_url_v3, 'domain': controller_user.controller.domain, 'name': controller_user.user.email, 'email': controller_user.user.email, 'password': controller_user.user.remote_password, 'admin_user': controller_user.controller.admin_user, 'admin_password': controller_user.controller.admin_password, 'ansible_tag': '%s@%s' % (controller_user.user.email.replace( '@', '-at-'), controller_user.controller.name), 'admin_project': controller_user.controller.admin_tenant, 'roles': roles, 'project': controller_user.user.site.login_base } return user_fields
def delete_record(self, port): if port.xos_created and port.port_id: logger.info("calling openstack to destroy port %s" % port.port_id) try: driver = self.get_driver(port) driver.shell.quantum.delete_port(port.port_id) except: logger.log_exc("failed to delete port %s from neutron" % port.port_id) return logger.info("Purging port %s" % port) port.delete(purge=True)
def delete_record(self, port): if port.xos_created and port.port_id: logger.info("calling openstack to destroy port %s" % port.port_id) try: driver = self.get_driver(port) driver.shell.neutron.delete_port(port.port_id) except: logger.log_exc("failed to delete port %s from neutron" % port.port_id) return logger.info("Purging port %s" % port) port.delete(purge=True)
def map_sync_inputs(self, controller_slice): logger.info("sync'ing slice controller %s" % controller_slice) if not controller_slice.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice.controller) return controller_users = ControllerUser.objects.filter( user_id=controller_slice.slice.creator.id, controller_id=controller_slice.controller.id) if not controller_users: raise Exception( "slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name)) else: controller_user = controller_users[0] driver = self.driver.admin_driver( controller=controller_slice.controller) roles = [driver.get_admin_role().name] max_instances = int(controller_slice.slice.max_instances) tenant_fields = { 'endpoint': controller_slice.controller.auth_url, 'endpoint_v3': controller_slice.controller.auth_url_v3, 'domain': controller_slice.controller.domain, 'admin_user': controller_slice.controller.admin_user, 'admin_password': controller_slice.controller.admin_password, 'admin_project': 'admin', 'project': controller_slice.slice.name, 'project_description': controller_slice.slice.description, 'roles': roles, 'username': controller_user.user.email, 'ansible_tag': '%s@%s' % (controller_slice.slice.name, controller_slice.controller.name), 'max_instances': max_instances } return tenant_fields
def map_sync_inputs(self, controller_user): if not controller_user.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_user.controller) return if not controller_user.user.site: raise Exception('Siteless user %s' % controller_user.user.email) if controller_user.user.email == controller_user.controller.admin_user: logger.info( "user %s is the admin_user at controller %r, skipping" % (controller_user.user.email, controller_user.controller)) return user_fields = { 'endpoint': controller_user.controller.auth_url, 'name': controller_user.user.email, 'firstname': controller_user.user.firstname, 'lastname': controller_user.user.lastname, 'phone': controller_user.user.phone, 'user_url': controller_user.user.user_url, 'public_key': controller_user.user.public_key, 'is_active': controller_user.user.is_active, 'is_admin': controller_user.user.is_admin, 'is_readonly': controller_user.user.is_readonly, 'is_appuser': controller_user.user.is_appuser, 'password': controller_user.user.remote_password, 'admin_user': controller_user.controller.admin_user, 'admin_password': controller_user.controller.admin_password, 'ansible_tag': '%s@%s' % (controller_user.user.email.replace( '@', '-at-'), controller_user.controller.name), } return user_fields
def map_sync_inputs(self, controller_slice): logger.info("sync'ing slice controller %s" % controller_slice) if not controller_slice.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice.controller) return controller_users = ControllerUser.objects.filter( user=controller_slice.slice.creator, controller=controller_slice.controller) if not controller_users: raise Exception( "slice creator %s has not account at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name)) else: controller_user = controller_users[0] max_instances = int(controller_slice.slice.max_instances) slice_fields = { 'endpoint': controller_slice.controller.auth_url, 'admin_user': controller_slice.controller.admin_user, 'admin_password': controller_slice.controller.admin_password, 'slice_name': controller_slice.slice.name, 'slice_description': controller_slice.slice.description, 'name': controller_user.user.email, 'ansible_tag': '%s@%s' % (controller_slice.slice.name, controller_slice.controller.name), 'image': controller_slice.slice.default_image.name, 'addresses': '10.168.2.0/24', # FIXME 'gateway_ip': '10.168.2.1', # FIXME 'gateway_mac': '02:42:0a:a8:02:01', # FIXME 'max_instances': max_instances } return slice_fields
def map_sync_inputs(self, controller_network): # XXX This check should really be made from booleans, rather than using hardcoded network names if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct']): logger.info("skipping network controller %s because it is not private" % controller_network) # We only sync private networks return SyncStep.SYNC_WITHOUT_RUNNING if not controller_network.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_network.controller) return if controller_network.network.owner and controller_network.network.owner.creator: return self.save_controller_network(controller_network) else: raise Exception('Could not save network controller %s'%controller_network)
def map_sync_inputs(self, controller_network): # make sure to not sync a shared network if (controller_network.network.template.shared_network_name or controller_network.network.template.shared_network_id): return SyncStep.SYNC_WITHOUT_RUNNING if not controller_network.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_network.controller) return if controller_network.network.owner and controller_network.network.owner.creator: return self.save_controller_network(controller_network) else: raise Exception('Could not save network controller %s' % controller_network)
def run_playbook(ansible_hosts, ansible_config, fqp, opts): args = { "ansible_hosts": ansible_hosts, "ansible_config": ansible_config, "fqp": fqp, "opts": opts, "config_file": Config.get_config_file() } keep_temp_files = Config.get("keep_temp_files") dir = tempfile.mkdtemp() args_fn = None result_fn = None try: logger.info("creating args file in %s" % dir) args_fn = os.path.join(dir, "args") result_fn = os.path.join(dir, "result") open(args_fn, "w").write(pickle.dumps(args)) ansible_main_fn = os.path.join(os.path.dirname(__file__), "ansible_main.py") os.system("python %s %s %s" % (ansible_main_fn, args_fn, result_fn)) result = pickle.loads(open(result_fn).read()) if hasattr(result, "exception"): logger.log_error("Exception in playbook: %s" % result["exception"]) stats = result.get("stats", None) aresults = result.get("aresults", None) except: logger.log_exc("Exception running ansible_main") stats = None aresults = None finally: if not keep_temp_files: if args_fn and os.path.exists(args_fn): os.remove(args_fn) if result_fn and os.path.exists(result_fn): os.remove(result_fn) os.rmdir(dir) return (stats, aresults)
def map_sync_inputs(self, controller_slice_privilege): if not controller_slice_privilege.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller) return template = os_template_env.get_template('sync_controller_users.yaml') role = controller_slice_privilege.slice_privilege.role.role # setup user home slice roles at controller if not controller_slice_privilege.slice_privilege.user.site: raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email) user_fields = { 'endpoint':controller_slice_privilege.controller.auth_url, 'user_name': controller_slice_privilege.slice_privilege.user.email, 'admin_user': controller_slice_privilege.controller.admin_user, 'admin_password': controller_slice_privilege.controller.admin_password, 'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name), 'role':role, 'slice_name':controller_slice_privilege.slice_privilege.slice.name} return user_fields
def map_sync_inputs(self, controller_network): # XXX This check should really be made from booleans, rather than using hardcoded network names #if (controller_network.network.template.name not in ['Private', 'Private-Indirect', 'Private-Direct', 'management_template'): # logger.info("skipping network controller %s because it is not private" % controller_network) # # We only sync private networks # return SyncStep.SYNC_WITHOUT_RUNNING # hopefully a better approach than above if (controller_network.network.template.shared_network_name or controller_network.network.template.shared_network_id): return SyncStep.SYNC_WITHOUT_RUNNING if not controller_network.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_network.controller) return if controller_network.network.owner and controller_network.network.owner.creator: return self.save_controller_network(controller_network) else: raise Exception('Could not save network controller %s'%controller_network)
def map_sync_inputs(self, controller_user): if not controller_user.controller.admin_user: logger.info("controller %r has no admin_user, skipping" % controller_user.controller) return # All users will have at least the 'user' role at their home site/tenant. # We must also check if the user should have the admin role roles = ['user'] if controller_user.user.is_admin: roles.append('admin') # setup user home site roles at controller if not controller_user.user.site: raise Exception('Siteless user %s'%controller_user.user.email) else: # look up tenant id for the user's site at the controller #ctrl_site_deployments = SiteDeployment.objects.filter( # site_deployment__site=controller_user.user.site, # controller=controller_user.controller) #if ctrl_site_deployments: # # need the correct tenant id for site at the controller # tenant_id = ctrl_site_deployments[0].tenant_id # tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base user_fields = { 'endpoint':controller_user.controller.auth_url, 'endpoint_v3': controller_user.controller.auth_url_v3, 'domain': controller_user.controller.domain, 'name': controller_user.user.email, 'email': controller_user.user.email, 'password': controller_user.user.remote_password, 'admin_user': controller_user.controller.admin_user, 'admin_password': controller_user.controller.admin_password, 'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name), 'admin_tenant': controller_user.controller.admin_tenant, 'roles':roles, 'tenant':controller_user.user.site.login_base } return user_fields
def fetch_pending(self, deleted): # Images come from the back end # You can't delete them if (deleted): logger.info("SyncImages: returning because deleted=True") return [] # get list of images on disk images_path = Config().observer_images_directory logger.info("SyncImages: deleted=False, images_path=%s" % images_path) available_images = {} if os.path.exists(images_path): for f in os.listdir(images_path): filename = os.path.join(images_path, f) if os.path.isfile(filename) and filename.endswith(".img"): available_images[f] = filename logger.info("SyncImages: available_images = %s" % str(available_images)) images = Image.objects.all() image_names = [image.name for image in images] for image_name in available_images: #remove file extension clean_name = ".".join(image_name.split('.')[:-1]) if clean_name not in image_names: logger.info("SyncImages: adding %s" % clean_name) image = Image(name=clean_name, disk_format='raw', container_format='bare', path=available_images[image_name]) image.save() return Image.objects.filter( Q(enacted__lt=F('updated')) | Q(enacted=None))
def fetch_pending(self, deleted): # Images come from the back end # You can't delete them if (deleted): logger.info("SyncImages: returning because deleted=True") return [] # get list of images on disk images_path = Config().observer_images_directory logger.info("SyncImages: deleted=False, images_path=%s" % images_path) available_images = {} if os.path.exists(images_path): for f in os.listdir(images_path): filename = os.path.join(images_path, f) if os.path.isfile(filename): available_images[f] = filename logger.info("SyncImages: available_images = %s" % str(available_images)) images = Image.objects.all() image_names = [image.name for image in images] for image_name in available_images: #remove file extension clean_name = ".".join(image_name.split('.')[:-1]) if clean_name not in image_names: logger.info("SyncImages: adding %s" % clean_name) image = Image(name=clean_name, disk_format='raw', container_format='bare', path = available_images[image_name]) image.save() return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None))
def get_tacker_client(site, service_type='nfv-orchestration', timeout=None, **kwargs): """ Get a client connection to Tacker authenticated with our Keystone credentials ie) client = construct_http_client(username='******', password='******', tenant_name='admin', auth_url='http://192.168.1.121:5000/v2.0') :param site: (ControllerSite) Site to get client for :param service_type: (string) Service type defined for Tacker service. For the Liberty release, this will may be 'servicevm', for Mitaka+, it will most likely be 'nfv-orchestration'. Run the following command on your controller to verify: 'openstack service list' and look for the'tacker' entry's 'Type' :param timeout: (integer) Connection timeout, see keystoneclient.v2_0.client module :return: (HttpClient) Tacker HTTP API client """ observer_logger.info('TACKER: get client request: user: %s, tenant: %s, auth: %s' % (site.controller.admin_user, site.controller.admin_tenant, site.controller.auth_url)) client = construct_http_client(username=site.controller.admin_user, tenant_name=site.controller.admin_tenant, password=site.controller.admin_password, auth_url=site.controller.auth_url, service_type=service_type, timeout=timeout, **kwargs) if not client: observer_logger.info('TACKER: get client failed') else: observer_logger.info('TACKER: get client results: %s' % client) try: client.authenticate() except Unauthorized as e: observer_logger.error('get_tacker_client: (%s of %s) authentication error: %s' % (site.controller.admin_user, site.controller.admin_tenant, e.message)) raise except ConnectionFailed: # This can happen during unittest if you retry too often raise return client
def map_sync_inputs(self, instance): # sanity check - make sure model_policy for slice has run if ((not instance.slice.policed) or (instance.slice.policed < instance.slice.updated)): raise DeferredException( "Instance %s waiting on Slice %s to execute model policies" % (instance, instance.slice.name)) # sanity check - make sure model_policy for all slice networks have run for network in instance.slice.ownedNetworks.all(): if ((not network.policed) or (network.policed < network.updated)): raise DeferredException( "Instance %s waiting on Network %s to execute model policies" % (instance, network.name)) inputs = {} metadata_update = {} if (instance.numberCores): metadata_update["cpu_cores"] = str(instance.numberCores) # not supported by API... assuming it's not used ... look into enabling later # for tag in instance.slice.tags.all(): # if tag.name.startswith("sysctl-"): # metadata_update[tag.name] = tag.value slice_memberships = SlicePrivilege.objects.filter( slice_id=instance.slice.id) pubkeys = set([ sm.user.public_key for sm in slice_memberships if sm.user.public_key ]) if instance.creator.public_key: pubkeys.add(instance.creator.public_key) if instance.slice.creator.public_key: pubkeys.add(instance.slice.creator.public_key) if instance.slice.service and instance.slice.service.public_key: pubkeys.add(instance.slice.service.public_key) nics = [] # handle ports the were created by the user port_ids = [] for port in Port.objects.filter(instance_id=instance.id): if not port.port_id: raise DeferredException("Instance %s waiting on port %s" % (instance, port)) nics.append({ "kind": "port", "value": port.port_id, "network": port.network }) # we want to exclude from 'nics' any network that already has a Port existing_port_networks = [ port.network for port in Port.objects.filter(instance_id=instance.id) ] existing_port_network_ids = [x.id for x in existing_port_networks] networks = [ ns.network for ns in NetworkSlice.objects.filter(slice_id=instance.slice.id) if ns.network.id not in existing_port_network_ids ] networks_ids = [x.id for x in networks] controller_networks = ControllerNetwork.objects.filter( controller_id=instance.node.site_deployment.controller.id) controller_networks = [ x for x in controller_networks if x.network_id in networks_ids ] for network in networks: if not ControllerNetwork.objects.filter( network_id=network.id, controller_id=instance.node.site_deployment.controller.id ).exists(): raise DeferredException( "Instance %s Private Network %s lacks ControllerNetwork object" % (instance, network.name)) for controller_network in controller_networks: # Lenient exception - causes slow backoff if controller_network.network.template.translation == 'none': if not controller_network.net_id: raise DeferredException( "Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name)) nics.append({ "kind": "net", "value": controller_network.net_id, "network": controller_network.network }) # now include network template network_templates = [network.template.shared_network_name for network in networks \ if network.template.shared_network_name] driver = self.driver.admin_driver( tenant='admin', controller=instance.node.site_deployment.controller) nets = driver.shell.neutron.list_networks()['networks'] for net in nets: if net['name'] in network_templates: nics.append({ "kind": "net", "value": net['id'], "network": None }) if (not nics): for net in nets: if net['name'] == 'public': nics.append({ "kind": "net", "value": net['id'], "network": None }) nics = self.sort_nics(nics) image_name = None controller_images = instance.image.controllerimages.all() controller_images = [ x for x in controller_images if x.controller_id == instance.node.site_deployment.controller.id ] if controller_images: image_name = controller_images[0].image.name logger.info("using image from ControllerImage object: " + str(image_name)) if image_name is None: controller_driver = self.driver.admin_driver( controller=instance.node.site_deployment.controller) images = controller_driver.shell.glanceclient.images.list() for image in images: if image.name == instance.image.name or not image_name: image_name = image.name logger.info("using image from glance: " + str(image_name)) host_filter = instance.node.name.strip() availability_zone_filter = 'nova:%s' % host_filter instance_name = '%s-%d' % (instance.slice.name, instance.id) self.instance_name = instance_name userData = self.get_userdata(instance, pubkeys) if instance.userData: userData += instance.userData # make sure nics is pickle-able sanitized_nics = [{ "kind": nic["kind"], "value": nic["value"] } for nic in nics] controller = instance.node.site_deployment.controller fields = { 'endpoint': controller.auth_url, 'endpoint_v3': controller.auth_url_v3, 'domain': controller.domain, 'admin_user': instance.creator.email, 'admin_password': instance.creator.remote_password, 'project_name': instance.slice.name, 'tenant': instance.slice.name, 'tenant_description': instance.slice.description, 'name': instance_name, 'ansible_tag': instance_name, 'availability_zone': availability_zone_filter, 'image_name': image_name, 'flavor_name': instance.flavor.name, 'nics': sanitized_nics, 'meta': metadata_update, 'user_data': r'%s' % escape(userData) } return fields
def sync_ports(self): logger.info("sync'ing Ports [delete=False]") ports = Port.objects.all() ports_by_id = {} ports_by_neutron_port = {} for port in ports: ports_by_id[port.id] = port ports_by_neutron_port[port.port_id] = port networks = Network.objects.all() networks_by_id = {} for network in networks: for nd in network.controllernetworks.all(): networks_by_id[nd.net_id] = network #logger.info("networks_by_id = ") #for (network_id, network) in networks_by_id.items(): # logger.info(" %s: %s" % (network_id, network.name)) instances = Instance.objects.all() instances_by_instance_uuid = {} for instance in instances: instances_by_instance_uuid[instance.instance_uuid] = instance # Get all ports in all controllers ports_by_id = {} templates_by_id = {} for controller in Controller.objects.all(): if not controller.admin_tenant: logger.info("controller %s has no admin_tenant" % controller) continue try: driver = self.driver.admin_driver(controller = controller) ports = driver.shell.neutron.list_ports()["ports"] except: logger.log_exc("failed to get ports from controller %s" % controller) continue for port in ports: ports_by_id[port["id"]] = port # public-nat and public-dedicated networks don't have a net-id anywhere # in the data model, so build up a list of which ids map to which network # templates. try: neutron_networks = driver.shell.neutron.list_networks()["networks"] except: print "failed to get networks from controller %s" % controller continue for network in neutron_networks: for template in NetworkTemplate.objects.all(): if template.shared_network_name == network["name"]: templates_by_id[network["id"]] = template for port in ports_by_id.values(): #logger.info("port %s" % str(port)) if port["id"] in ports_by_neutron_port: # we already have it #logger.info("already accounted for port %s" % port["id"]) continue if port["device_owner"] != "compute:nova": # we only want the ports that connect to instances #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"])) continue instance = instances_by_instance_uuid.get(port['device_id'], None) if not instance: logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id'])) continue network = networks_by_id.get(port['network_id'], None) if not network: # maybe it's public-nat or public-dedicated. Search the templates for # the id, then see if the instance's slice has some network that uses # that template template = templates_by_id.get(port['network_id'], None) if template and instance.slice: for candidate_network in instance.slice.networks.all(): if candidate_network.template == template: network=candidate_network if not network: logger.info("no network for port %s network %s" % (port["id"], port["network_id"])) # we know it's associated with a instance, but we don't know # which network it is part of. continue if network.template.shared_network_name: # If it's a shared network template, then more than one network # object maps to the neutron network. We have to do a whole bunch # of extra work to find the right one. networks = network.template.network_set.all() network = None for candidate_network in networks: if (candidate_network.owner == instance.slice): logger.info("found network %s" % candidate_network) network = candidate_network if not network: logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"])) continue if not port["fixed_ips"]: logger.info("port %s has no fixed_ips" % port["id"]) continue ip=port["fixed_ips"][0]["ip_address"] mac=port["mac_address"] logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"]))) ns = Port(network=network, instance=instance, ip=ip, mac=mac, port_id=port["id"]) try: ns.save() except: logger.log_exc("failed to save port %s" % str(ns)) continue # For ports that were created by the user, find that ones # that don't have neutron ports, and create them. These are ports # with a null port_id and a non-null instance_id. ports = Port.objects.all() ports = [x for x in ports if ((not x.port_id) and (x.instance_id))] for port in ports: logger.info("XXX working on port %s" % port) controller = port.instance.node.site_deployment.controller slice = port.instance.slice if controller: cn=[x for x in port.network.controllernetworks.all() if x.controller_id==controller.id] if not cn: logger.log_exc("no controllernetwork for %s" % port) continue cn=cn[0] if cn.lazy_blocked: cn.lazy_blocked=False cn.save() logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port) continue if not cn.net_id: logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port) continue try: driver = self.get_driver(port) args = {"network_id": cn.net_id} neutron_port_name = port.get_parameters().get("neutron_port_name", None) neutron_port_ip = port.get_parameters().get("neutron_port_ip", None) if neutron_port_name: args["name"] = neutron_port_name if neutron_port_ip: args["fixed_ips"] = [{"ip_address": neutron_port_ip, "subnet_id": cn.subnet_id}] neutron_port = driver.shell.neutron.create_port({"port": args})["port"] port.port_id = neutron_port["id"] if neutron_port["fixed_ips"]: port.ip = neutron_port["fixed_ips"][0]["ip_address"] port.mac = neutron_port["mac_address"] port.xos_created = True logger.info("created neutron port %s for %s" % (port.port_id, port)) except: logger.log_exc("failed to create neutron port for %s" % port) continue port.save()
def delete_ports(self): logger.info("sync'ing Ports [delete=True]") for port in Port.deleted_objects.all(): self.delete_record(port)
def delete_ports(self): logger.info("sync'ing Ports [delete=True]") ports = self.fetch_pending(deletion=True) for port in ports: self.delete_record(port)
def sync_ports(self): logger.info("sync'ing Ports [delete=False]") ports = Port.objects.all() ports_by_id = {} ports_by_neutron_port = {} for port in ports: ports_by_id[port.id] = port ports_by_neutron_port[port.port_id] = port networks = Network.objects.all() networks_by_id = {} for network in networks: for nd in network.controllernetworks.all(): networks_by_id[nd.net_id] = network # logger.info("networks_by_id = ") # for (network_id, network) in networks_by_id.items(): # logger.info(" %s: %s" % (network_id, network.name)) instances = Instance.objects.all() instances_by_instance_uuid = {} for instance in instances: instances_by_instance_uuid[instance.instance_uuid] = instance # Get all ports in all controllers ports_by_id = {} templates_by_id = {} for controller in Controller.objects.all(): if not controller.admin_tenant: logger.info("controller %s has no admin_tenant" % controller) continue try: driver = self.driver.admin_driver(controller=controller) ports = driver.shell.quantum.list_ports()["ports"] except: logger.log_exc("failed to get ports from controller %s" % controller) continue for port in ports: ports_by_id[port["id"]] = port # public-nat and public-dedicated networks don't have a net-id anywhere # in the data model, so build up a list of which ids map to which network # templates. try: neutron_networks = driver.shell.quantum.list_networks()["networks"] except: print "failed to get networks from controller %s" % controller continue for network in neutron_networks: for template in NetworkTemplate.objects.all(): if template.shared_network_name == network["name"]: templates_by_id[network["id"]] = template for port in ports_by_id.values(): # logger.info("port %s" % str(port)) if port["id"] in ports_by_neutron_port: # we already have it # logger.info("already accounted for port %s" % port["id"]) continue if port["device_owner"] != "compute:nova": # we only want the ports that connect to instances # logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"])) continue instance = instances_by_instance_uuid.get(port["device_id"], None) if not instance: logger.info("no instance for port %s device_id %s" % (port["id"], port["device_id"])) continue network = networks_by_id.get(port["network_id"], None) if not network: # maybe it's public-nat or public-dedicated. Search the templates for # the id, then see if the instance's slice has some network that uses # that template template = templates_by_id.get(port["network_id"], None) if template and instance.slice: for candidate_network in instance.slice.networks.all(): if candidate_network.template == template: network = candidate_network if not network: logger.info("no network for port %s network %s" % (port["id"], port["network_id"])) # we know it's associated with a instance, but we don't know # which network it is part of. continue if network.template.shared_network_name: # If it's a shared network template, then more than one network # object maps to the quantum network. We have to do a whole bunch # of extra work to find the right one. networks = network.template.network_set.all() network = None for candidate_network in networks: if candidate_network.owner == instance.slice: logger.info("found network %s" % candidate_network) network = candidate_network if not network: logger.info( "failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]) ) continue if not port["fixed_ips"]: logger.info("port %s has no fixed_ips" % port["id"]) continue ip = port["fixed_ips"][0]["ip_address"] mac = port["mac_address"] logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"]))) ns = Port(network=network, instance=instance, ip=ip, mac=mac, port_id=port["id"]) try: ns.save() except: logger.log_exc("failed to save port %s" % str(ns)) continue # For ports that were created by the user, find that ones # that don't have neutron ports, and create them. for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False)): logger.info("XXX working on port %s" % port) controller = port.instance.node.site_deployment.controller slice = port.instance.slice if controller: cn = port.network.controllernetworks.filter(controller=controller) if not cn: logger.log_exc("no controllernetwork for %s" % port) continue cn = cn[0] if cn.lazy_blocked: cn.lazy_blocked = False cn.save() logger.info("deferring port %s because controllerNetwork was lazy-blocked" % port) continue if not cn.net_id: logger.info("deferring port %s because controllerNetwork does not have a port-id yet" % port) continue try: driver = self.get_driver(port) args = {"network_id": cn.net_id} neutron_port_name = port.get_parameters().get("neutron_port_name", None) if neutron_port_name: args["name"] = neutron_port_name neutron_port = driver.shell.quantum.create_port({"port": args})["port"] port.port_id = neutron_port["id"] if neutron_port["fixed_ips"]: port.ip = neutron_port["fixed_ips"][0]["ip_address"] port.mac = neutron_port["mac_address"] port.xos_created = True logger.info("created neutron port %s for %s" % (port.port_id, port)) except: logger.log_exc("failed to create neutron port for %s" % port) continue port.save()
def call(self, **args): logger.info("sync'ing network instances") ports = Port.objects.all() ports_by_id = {} ports_by_neutron_port = {} for port in ports: ports_by_id[port.id] = port ports_by_neutron_port[port.port_id] = port networks = Network.objects.all() networks_by_id = {} for network in networks: for nd in network.controllernetworks.all(): networks_by_id[nd.net_id] = network #logger.info("networks_by_id = ") #for (network_id, network) in networks_by_id.items(): # logger.info(" %s: %s" % (network_id, network.name)) instances = Instance.objects.all() instances_by_instance_uuid = {} for instance in instances: instances_by_instance_uuid[instance.instance_uuid] = instance # Get all ports in all controllers ports_by_id = {} templates_by_id = {} for controller in Controller.objects.all(): if not controller.admin_tenant: logger.info("controller %s has no admin_tenant" % controller) continue try: driver = self.driver.admin_driver(controller=controller) ports = driver.shell.quantum.list_ports()["ports"] except: logger.log_exc("failed to get ports from controller %s" % controller) continue for port in ports: ports_by_id[port["id"]] = port # public-nat and public-dedicated networks don't have a net-id anywhere # in the data model, so build up a list of which ids map to which network # templates. try: neutron_networks = driver.shell.quantum.list_networks( )["networks"] except: print "failed to get networks from controller %s" % controller continue for network in neutron_networks: for template in NetworkTemplate.objects.all(): if template.shared_network_name == network["name"]: templates_by_id[network["id"]] = template for port in ports_by_id.values(): #logger.info("port %s" % str(port)) if port["id"] in ports_by_neutron_port: # we already have it #logger.info("already accounted for port %s" % port["id"]) continue if port["device_owner"] != "compute:nova": # we only want the ports that connect to instances #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"])) continue instance = instances_by_instance_uuid.get(port['device_id'], None) if not instance: logger.info("no instance for port %s device_id %s" % (port["id"], port['device_id'])) continue network = networks_by_id.get(port['network_id'], None) if not network: # maybe it's public-nat or public-dedicated. Search the templates for # the id, then see if the instance's slice has some network that uses # that template template = templates_by_id.get(port['network_id'], None) if template and instance.slice: for candidate_network in instance.slice.networks.all(): if candidate_network.template == template: network = candidate_network if not network: logger.info("no network for port %s network %s" % (port["id"], port["network_id"])) # we know it's associated with a instance, but we don't know # which network it is part of. continue if network.template.shared_network_name: # If it's a shared network template, then more than one network # object maps to the quantum network. We have to do a whole bunch # of extra work to find the right one. networks = network.template.network_set.all() network = None for candidate_network in networks: if (candidate_network.owner == instance.slice): logger.info("found network %s" % candidate_network) network = candidate_network if not network: logger.info( "failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"])) continue if not port["fixed_ips"]: logger.info("port %s has no fixed_ips" % port["id"]) continue ip = port["fixed_ips"][0]["ip_address"] mac = port["mac_address"] logger.info("creating Port (%s, %s, %s, %s)" % (str(network), str(instance), ip, str(port["id"]))) ns = Port(network=network, instance=instance, ip=ip, mac=mac, port_id=port["id"]) try: ns.save() except: logger.log_exc("failed to save port %s" % str(ns)) continue # For ports that were created by the user, find that ones # that don't have neutron ports, and create them. for port in Port.objects.filter(Q(port_id__isnull=True), Q(instance__isnull=False)): logger.info("XXX working on port %s" % port) controller = port.instance.node.site_deployment.controller slice = port.instance.slice if controller: cn = port.network.controllernetworks.filter( controller=controller) if not cn: logger.log_exc("no controllernetwork for %s" % port) continue cn = cn[0] if cn.lazy_blocked: cn.lazy_blocked = False cn.save() logger.info( "deferring port %s because controllerNetwork was lazy-blocked" % port) continue if not cn.net_id: logger.info( "deferring port %s because controllerNetwork does not have a port-id yet" % port) continue try: # We need to use a client driver that specifies the tenant # of the destination instance. Nova-compute will not connect # ports to instances if the port's tenant does not match # the instance's tenant. # A bunch of stuff to compensate for OpenStackDriver.client_driveR() # not being in working condition. from openstack.client import OpenStackClient from openstack.driver import OpenStackDriver caller = port.network.owner.creator auth = { 'username': caller.email, 'password': caller.remote_password, 'tenant': slice.name } client = OpenStackClient( controller=controller, **auth) # cacert=self.config.nova_ca_ssl_cert, driver = OpenStackDriver(client=client) neutron_port = driver.shell.quantum.create_port( {"port": { "network_id": cn.net_id }})["port"] port.port_id = neutron_port["id"] if neutron_port["fixed_ips"]: port.ip = neutron_port["fixed_ips"][0]["ip_address"] port.mac = neutron_port["mac_address"] except: logger.log_exc("failed to create neutron port for %s" % port) continue port.save()
def map_sync_inputs(self, instance): inputs = {} metadata_update = {} if (instance.numberCores): metadata_update["cpu_cores"] = str(instance.numberCores) for tag in instance.slice.tags.all(): if tag.name.startswith("sysctl-"): metadata_update[tag.name] = tag.value slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice) pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key]) if instance.creator.public_key: pubkeys.add(instance.creator.public_key) if instance.slice.creator.public_key: pubkeys.add(instance.slice.creator.public_key) if instance.slice.service and instance.slice.service.public_key: pubkeys.add(instance.slice.service.public_key) nics=[] # handle ports the were created by the user port_ids=[] for port in Port.objects.filter(instance=instance): if not port.port_id: raise DeferredException("Instance %s waiting on port %s" % (instance, port)) nics.append({"kind": "port", "value": port.port_id, "network": port.network}) # we want to exclude from 'nics' any network that already has a Port existing_port_networks = [port.network for network in Port.objects.filter(instance=instance)] networks = [ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice) if ns.network not in existing_port_networks] controller_networks = ControllerNetwork.objects.filter(network__in=networks, controller=instance.node.site_deployment.controller) #controller_networks = self.sort_controller_networks(controller_networks) for controller_network in controller_networks: # Lenient exception - causes slow backoff if controller_network.network.template.visibility == 'private' and \ controller_network.network.template.translation == 'none': if not controller_network.net_id: raise DeferredException("Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name)) nics.append({"kind": "net", "value": controller_network.net_id, "network": controller_network.network}) # now include network template network_templates = [network.template.shared_network_name for network in networks \ if network.template.shared_network_name] #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork) driver = self.driver.admin_driver(tenant='admin', controller=instance.node.site_deployment.controller) nets = driver.shell.quantum.list_networks()['networks'] for net in nets: if net['name'] in network_templates: nics.append({"kind": "net", "value": net['id'], "network": None}) if (not nics): for net in nets: if net['name']=='public': nics.append({"kind": "net", "value": net['id'], "network": None}) nics = self.sort_nics(nics) image_name = None controller_images = instance.image.controllerimages.filter(controller=instance.node.site_deployment.controller) if controller_images: image_name = controller_images[0].image.name logger.info("using image from ControllerImage object: " + str(image_name)) if image_name is None: controller_driver = self.driver.admin_driver(controller=instance.node.site_deployment.controller) images = controller_driver.shell.glanceclient.images.list() for image in images: if image.name == instance.image.name or not image_name: image_name = image.name logger.info("using image from glance: " + str(image_name)) try: legacy = Config().observer_legacy except: legacy = False if (legacy): host_filter = instance.node.name.split('.',1)[0] else: host_filter = instance.node.name.strip() availability_zone_filter = 'nova:%s'%host_filter instance_name = '%s-%d'%(instance.slice.name,instance.id) self.instance_name = instance_name userData = self.get_userdata(instance, pubkeys) if instance.userData: userData += instance.userData controller = instance.node.site_deployment.controller fields = {'endpoint':controller.auth_url, 'endpoint_v3': controller.auth_url_v3, 'domain': controller.domain, 'admin_user': instance.creator.email, 'admin_password': instance.creator.remote_password, 'admin_tenant': instance.slice.name, 'tenant': instance.slice.name, 'tenant_description': instance.slice.description, 'name':instance_name, 'ansible_tag':instance_name, 'availability_zone': availability_zone_filter, 'image_name':image_name, 'flavor_name':instance.flavor.name, 'nics':nics, 'meta':metadata_update, 'user_data':r'%s'%escape(userData)} return fields
def delete_ports(self): logger.info("sync'ing Ports [delete=True]") for port in Port.deleted_objects.all(): self.delete_record(port)
def map_sync_inputs(self, instance): inputs = {} metadata_update = {} if (instance.numberCores): metadata_update["cpu_cores"] = str(instance.numberCores) for tag in instance.slice.tags.all(): if tag.name.startswith("sysctl-"): metadata_update[tag.name] = tag.value slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice) pubkeys = set([ sm.user.public_key for sm in slice_memberships if sm.user.public_key ]) if instance.creator.public_key: pubkeys.add(instance.creator.public_key) if instance.slice.creator.public_key: pubkeys.add(instance.slice.creator.public_key) if instance.slice.service and instance.slice.service.public_key: pubkeys.add(instance.slice.service.public_key) nics = [] networks = [ ns.network for ns in NetworkSlice.objects.filter(slice=instance.slice) ] controller_networks = ControllerNetwork.objects.filter( network__in=networks, controller=instance.node.site_deployment.controller) for controller_network in controller_networks: # Lenient exception - causes slow backoff if controller_network.network.template.visibility == 'private' and \ controller_network.network.template.translation == 'none': if not controller_network.net_id: raise DeferredException( "Instance %s Private Network %s has no id; Try again later" % (instance, controller_network.network.name)) nics.append(controller_network.net_id) # now include network template network_templates = [network.template.shared_network_name for network in networks \ if network.template.shared_network_name] #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork) driver = self.driver.admin_driver( tenant='admin', controller=instance.node.site_deployment.controller) nets = driver.shell.quantum.list_networks()['networks'] for net in nets: if net['name'] in network_templates: nics.append(net['id']) if (not nics): for net in nets: if net['name'] == 'public': nics.append(net['id']) image_name = None controller_images = instance.image.controllerimages.filter( controller=instance.node.site_deployment.controller) if controller_images: image_name = controller_images[0].image.name logger.info("using image from ControllerImage object: " + str(image_name)) if image_name is None: controller_driver = self.driver.admin_driver( controller=instance.node.site_deployment.controller) images = controller_driver.shell.glanceclient.images.list() for image in images: if image.name == instance.image.name or not image_name: image_name = image.name logger.info("using image from glance: " + str(image_name)) try: legacy = Config().observer_legacy except: legacy = False if (legacy): host_filter = instance.node.name.split('.', 1)[0] else: host_filter = instance.node.name.strip() availability_zone_filter = 'nova:%s' % host_filter instance_name = '%s-%d' % (instance.slice.name, instance.id) self.instance_name = instance_name userData = self.get_userdata(instance, pubkeys) if instance.userData: userData += instance.userData controller = instance.node.site_deployment.controller fields = { 'endpoint': controller.auth_url, 'endpoint_v3': controller.auth_url_v3, 'domain': controller.domain, 'admin_user': instance.creator.email, 'admin_password': instance.creator.remote_password, 'admin_tenant': instance.slice.name, 'tenant': instance.slice.name, 'tenant_description': instance.slice.description, 'name': instance_name, 'ansible_tag': instance_name, 'availability_zone': availability_zone_filter, 'image_name': image_name, 'flavor_name': instance.flavor.name, 'nics': nics, 'meta': metadata_update, 'user_data': r'%s' % escape(userData) } return fields
def run_template(name, opts, path='', expected_num=None, ansible_config=None, ansible_hosts=None, run_ansible_script=None, object=None): template = os_template_env.get_template(name) buffer = template.render(opts) (opts, fqp) = get_playbook_fn(opts, path) f = open(fqp, 'w') f.write(buffer) f.flush() """ q = Queue() p = Process(target=run_playbook, args=(ansible_hosts, ansible_config, fqp, opts, q,)) p.start() stats,aresults = q.get() p.join() """ stats, aresults = run_playbook(ansible_hosts, ansible_config, fqp, opts) error_msg = [] output_file = fqp + '.out' try: if (aresults is None): raise ValueError("Error executing playbook %s" % fqp) ok_results = [] total_unreachable = 0 failed = 0 ofile = open(output_file, 'w') for x in aresults: if not x.is_failed() and not x.is_unreachable( ) and not x.is_skipped(): ok_results.append(x) elif x.is_unreachable(): failed += 1 total_unreachable += 1 try: error_msg.append(x._result['msg']) except: pass elif x.is_failed(): failed += 1 try: error_msg.append(x._result['msg']) except: pass # FIXME (zdw, 2017-02-19) - may not be needed with new callback logging ofile.write('%s: %s\n' % (x._task, str(x._result))) if (object): oprops = object.tologdict() ansible = x._result oprops['xos_type'] = 'ansible' oprops['ansible_result'] = json.dumps(ansible) if failed == 0: oprops['ansible_status'] = 'OK' else: oprops['ansible_status'] = 'FAILED' logger.info(x._task, extra=oprops) ofile.close() if (expected_num is not None) and (len(ok_results) != expected_num): raise ValueError('Unexpected num %s!=%d' % (str(expected_num), len(ok_results))) #total_unreachable = stats.unreachable if (failed): raise ValueError('Ansible playbook failed.') except ValueError, e: if error_msg: try: error = ' // '.join(error_msg) except: error = "failed to join error_msg" raise Exception(error) else: raise