示例#1
0
    def sync_record(self, controller_image):
        logger.info("Working on image %s on controller %s" %
                    (controller_image.image.name, controller_image.controller))
        image_fields = {
            'endpoint':
            controller_image.controller.auth_url,
            'admin_user':
            controller_image.controller.admin_user,
            'admin_password':
            controller_image.controller.admin_password,
            'name':
            controller_image.image.name,
            'filepath':
            controller_image.image.path,
            'ansible_tag':
            '%s@%s' %
            (controller_image.image.name,
             controller_image.controller.name),  # name of ansible playbook
        }

        res = run_template('sync_controller_images.yaml',
                           image_fields,
                           path='controller_images',
                           expected_num=1)

        image_id = res[0]['id']
        controller_image.glance_image_id = image_id
        controller_image.backend_status = '1 - OK'
        controller_image.save()
    def map_inputs(self, controller_slice_privilege):
        if not controller_slice_privilege.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_slice_privilege.controller)
            return

        template = os_template_env.get_template('sync_controller_users.yaml')
        roles = [controller_slice_privilege.slice_privilege.role.role]
        # setup user home slice roles at controller
        if not controller_slice_privilege.slice_privilege.user.site:
            raise Exception(
                'Sliceless user %s' %
                controller_slice_privilege.slice_privilege.user.email)
        else:
            # look up tenant id for the user's slice at the controller
            #ctrl_slice_deployments = SliceDeployment.objects.filter(
            #  slice_deployment__slice=controller_slice_privilege.user.slice,
            #  controller=controller_slice_privilege.controller)

            #if ctrl_slice_deployments:
            #    # need the correct tenant id for slice at the controller
            #    tenant_id = ctrl_slice_deployments[0].tenant_id
            #    tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
            user_fields = {
                'endpoint':
                controller_slice_privilege.controller.auth_url,
                'endpoint_v3':
                controller_slice_privilege.controller.auth_url_v3,
                'domain':
                controller_slice_privilege.controller.domain,
                'name':
                controller_slice_privilege.slice_privilege.user.email,
                'email':
                controller_slice_privilege.slice_privilege.user.email,
                'password':
                controller_slice_privilege.slice_privilege.user.
                remote_password,
                'admin_user':
                controller_slice_privilege.controller.admin_user,
                'admin_password':
                controller_slice_privilege.controller.admin_password,
                'ansible_tag':
                '%s@%s@%s' %
                (controller_slice_privilege.slice_privilege.user.email.replace(
                    '@', '-at-'),
                 controller_slice_privilege.slice_privilege.slice.name,
                 controller_slice_privilege.controller.name),
                'admin_tenant':
                controller_slice_privilege.controller.admin_tenant,
                'roles':
                roles,
                'tenant':
                controller_slice_privilege.slice_privilege.slice.name
            }
            return user_fields
示例#3
0
    def map_sync_inputs(self, controller_user):
        if not controller_user.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_user.controller)
            return

        # All users will have at least the 'user' role at their home site/tenant.
        # We must also check if the user should have the admin role
        roles = ['user']
        if controller_user.user.is_admin:
            roles.append('admin')

        # setup user home site roles at controller
        if not controller_user.user.site:
            raise Exception('Siteless user %s' % controller_user.user.email)
        else:
            # look up tenant id for the user's site at the controller
            #ctrl_site_deployments = SiteDeployment.objects.filter(
            #  site_deployment__site=controller_user.user.site,
            #  controller=controller_user.controller)

            #if ctrl_site_deployments:
            #    # need the correct tenant id for site at the controller
            #    tenant_id = ctrl_site_deployments[0].tenant_id
            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
            user_fields = {
                'endpoint':
                controller_user.controller.auth_url,
                'endpoint_v3':
                controller_user.controller.auth_url_v3,
                'domain':
                controller_user.controller.domain,
                'name':
                controller_user.user.email,
                'email':
                controller_user.user.email,
                'password':
                controller_user.user.remote_password,
                'admin_user':
                controller_user.controller.admin_user,
                'admin_password':
                controller_user.controller.admin_password,
                'ansible_tag':
                '%s@%s' % (controller_user.user.email.replace(
                    '@', '-at-'), controller_user.controller.name),
                'admin_tenant':
                controller_user.controller.admin_tenant,
                'roles':
                roles,
                'tenant':
                controller_user.user.site.login_base
            }
            return user_fields
示例#4
0
    def sync_record(self, controller_user):
        logger.info("sync'ing user %s at controller %s" % (controller_user.user, controller_user.controller))

        controller_register = json.loads(controller_user.controller.backend_register)
        if (controller_register.get('disabled',False)):
            raise InnocuousException('Controller %s is disabled'%controller_user.controller.name)

        if not controller_user.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_user.controller)
            return

        template = os_template_env.get_template('sync_controller_users.yaml')

        # All users will have at least the 'user' role at their home site/tenant.
        # We must also check if the user should have the admin role
        roles = ['user']
        if controller_user.user.is_admin:
            roles.append('admin')

        # setup user home site roles at controller
        if not controller_user.user.site:
            raise Exception('Siteless user %s'%controller_user.user.email)
        else:
            # look up tenant id for the user's site at the controller
            #ctrl_site_deployments = SiteDeployment.objects.filter(
            #  site_deployment__site=controller_user.user.site,
            #  controller=controller_user.controller)

            #if ctrl_site_deployments:
            #    # need the correct tenant id for site at the controller
            #    tenant_id = ctrl_site_deployments[0].tenant_id
            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
            user_fields = {
                'endpoint':controller_user.controller.auth_url,
                'name': controller_user.user.email,
                'email': controller_user.user.email,
                'password': controller_user.user.remote_password,
                'admin_user': controller_user.controller.admin_user,
                'admin_password': controller_user.controller.admin_password,
                'ansible_tag':'%s@%s'%(controller_user.user.email.replace('@','-at-'),controller_user.controller.name),
                'admin_tenant': controller_user.controller.admin_tenant,
                'roles':roles,
                'tenant':controller_user.user.site.login_base
                }

            rendered = template.render(user_fields)
            expected_length = len(roles) + 1

            res = run_template('sync_controller_users.yaml', user_fields,path='controller_users', expected_num=expected_length)

            controller_user.kuser_id = res[0]['id']
            controller_user.backend_status = '1 - OK'
            controller_user.save()
示例#5
0
    def sync_record(self, controller_network):
        if (controller_network.network.template.name != 'Private'):
            logger.info(
                "skipping network controller %s because it is not private" %
                controller_network)
            # We only sync private networks
            return

        logger.info(
            "sync'ing network controller %s for network %s slice %s controller %s"
            % (controller_network, controller_network.network,
               str(controller_network.network.owner),
               controller_network.controller))

        controller_register = json.loads(
            controller_network.controller.backend_register)
        if (controller_register.get('disabled', False)):
            raise InnocuousException('Controller %s is disabled' %
                                     controller_network.controller.name)

        if not controller_network.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_network.controller)
            return

        if controller_network.network.owner and controller_network.network.owner.creator:
            self.save_controller_network(controller_network)
            logger.info("saved network controller: %s" % (controller_network))
示例#6
0
    def map_sync_inputs(self, controller_slice):
        logger.info("sync'ing slice controller %s" % controller_slice)

        if not controller_slice.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_slice.controller)
            return

        controller_users = ControllerUser.objects.filter(
            user=controller_slice.slice.creator,
            controller=controller_slice.controller)
        if not controller_users:
            raise Exception(
                "slice createor %s has not accout at controller %s" %
                (controller_slice.slice.creator,
                 controller_slice.controller.name))
        else:
            controller_user = controller_users[0]
            roles = ['admin']

        max_instances = int(controller_slice.slice.max_instances)
        tenant_fields = {
            'endpoint':
            controller_slice.controller.auth_url,
            'endpoint_v3':
            controller_slice.controller.auth_url_v3,
            'domain':
            controller_slice.controller.domain,
            'admin_user':
            controller_slice.controller.admin_user,
            'admin_password':
            controller_slice.controller.admin_password,
            'admin_tenant':
            'admin',
            'tenant':
            controller_slice.slice.name,
            'tenant_description':
            controller_slice.slice.description,
            'roles':
            roles,
            'name':
            controller_user.user.email,
            'ansible_tag':
            '%s@%s' %
            (controller_slice.slice.name, controller_slice.controller.name),
            'max_instances':
            max_instances
        }

        return tenant_fields
    def sync_record(self, controller_site_privilege):
        logger.info("sync'ing controler_site_privilege %s at controller %s" % (controller_site_privilege, controller_site_privilege.controller))

	controller_register = json.loads(controller_site_privilege.controller.backend_register)
        if (controller_register.get('disabled',False)):
                raise Exception('Controller %s is disabled'%controller_site_privilege.controller.name)


        if not controller_site_privilege.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
            return

	template = os_template_env.get_template('sync_controller_users.yaml')
        roles = [controller_site_privilege.site_privilege.role.role]
	# setup user home site roles at controller 
        if not controller_site_privilege.site_privilege.user.site:
            raise Exception('Siteless user %s'%controller_site_privilege.site_privilege.user.email)
        else:
            # look up tenant id for the user's site at the controller
            #ctrl_site_deployments = SiteDeployment.objects.filter(
            #  site_deployment__site=controller_site_privilege.user.site,
            #  controller=controller_site_privilege.controller)

            #if ctrl_site_deployments:
            #    # need the correct tenant id for site at the controller
            #    tenant_id = ctrl_site_deployments[0].tenant_id  
            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
            user_fields = {
                       'endpoint':controller_site_privilege.controller.auth_url,
		       'name': controller_site_privilege.site_privilege.user.email,
                       'email': controller_site_privilege.site_privilege.user.email,
                       'password': controller_site_privilege.site_privilege.user.remote_password,
                       'admin_user': controller_site_privilege.controller.admin_user,
		       'admin_password': controller_site_privilege.controller.admin_password,
	               'ansible_tag':'%s@%s'%(controller_site_privilege.site_privilege.user.email.replace('@','-at-'),controller_site_privilege.controller.name),
		       'admin_tenant': controller_site_privilege.controller.admin_tenant,
		       'roles':roles,
		       'tenant':controller_site_privilege.site_privilege.site.login_base}    
	
	    rendered = template.render(user_fields)
	    expected_length = len(roles) + 1
	    res = run_template('sync_controller_users.yaml', user_fields,path='controller_site_privileges', expected_num=expected_length)

	    # results is an array in which each element corresponds to an 
	    # "ok" string received per operation. If we get as many oks as
	    # the number of operations we issued, that means a grand success.
	    # Otherwise, the number of oks tell us which operation failed.
            controller_site_privilege.role_id = res[0]['id']
            controller_site_privilege.save()
示例#8
0
    def sync_record(self, controller_slice_privilege):
        logger.info("sync'ing controler_slice_privilege %s at controller %s" % (controller_slice_privilege, controller_slice_privilege.controller))

	controller_register = json.loads(controller_slice_privilege.controller.backend_register)
        if (controller_register.get('disabled',False)):
                raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)

        if not controller_slice_privilege.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_slice_privilege.controller)
            return

	template = os_template_env.get_template('sync_controller_users.yaml')
        roles = [controller_slice_privilege.slice_privilege.role.role]
	# setup user home slice roles at controller 
        if not controller_slice_privilege.slice_privilege.user.site:
            raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
        else:
            # look up tenant id for the user's slice at the controller
            #ctrl_slice_deployments = SliceDeployment.objects.filter(
            #  slice_deployment__slice=controller_slice_privilege.user.slice,
            #  controller=controller_slice_privilege.controller)

            #if ctrl_slice_deployments:
            #    # need the correct tenant id for slice at the controller
            #    tenant_id = ctrl_slice_deployments[0].tenant_id  
            #    tenant_name = ctrl_slice_deployments[0].slice_deployment.slice.login_base
            user_fields = {
                       'endpoint':controller_slice_privilege.controller.auth_url,
		       'name': controller_slice_privilege.slice_privilege.user.email,
                       'email': controller_slice_privilege.slice_privilege.user.email,
                       'password': controller_slice_privilege.slice_privilege.user.remote_password,
                       'admin_user': controller_slice_privilege.controller.admin_user,
		       'admin_password': controller_slice_privilege.controller.admin_password,
                       'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
		       'admin_tenant': controller_slice_privilege.controller.admin_tenant,
		       'roles':roles,
		       'tenant':controller_slice_privilege.slice_privilege.slice.name}    
	
	    rendered = template.render(user_fields)
	    expected_length = len(roles) + 1
	    res = run_template('sync_controller_users.yaml', user_fields, path='controller_slice_privileges', expected_num=expected_length)

	    # results is an array in which each element corresponds to an 
	    # "ok" string received per operation. If we get as many oks as
	    # the number of operations we issued, that means a grand success.
	    # Otherwise, the number of oks tell us which operation failed.
            controller_slice_privilege.role_id = res[0]['id']
            controller_slice_privilege.save()
示例#9
0
    def sync_record(self, controller_image):
        logger.info("Working on image %s on controller %s" % (controller_image.image.name, controller_image.controller))
        image_fields = {'endpoint':controller_image.controller.auth_url,
                        'admin_user':controller_image.controller.admin_user,
                        'admin_password':controller_image.controller.admin_password,
                        'name':controller_image.image.name,
                        'filepath':controller_image.image.path,
                        'ansible_tag': '%s@%s'%(controller_image.image.name,controller_image.controller.name), # name of ansible playbook
                        }


        res = run_template('sync_controller_images.yaml', image_fields, path='controller_images', expected_num=1)

        image_id = res[0]['id']
        controller_image.glance_image_id = image_id
	controller_image.backend_status = '1 - OK'
        controller_image.save()
示例#10
0
    def sync_record(self, controller_slice):
        logger.info("sync'ing slice controller %s" % controller_slice)

        controller_register = json.loads(controller_slice.controller.backend_register)
        if (controller_register.get('disabled',False)):
            raise Exception('Controller %s is disabled'%controller_slice.controller.name)

        if not controller_slice.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
            return

        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
                                                             controller=controller_slice.controller)
        if not controller_users:
            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
        else:
            controller_user = controller_users[0]
            roles = ['Admin']

        max_instances=int(controller_slice.slice.max_slivers)
        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
                         'admin_user': controller_slice.controller.admin_user,
                         'admin_password': controller_slice.controller.admin_password,
                         'admin_tenant': 'admin',
                         'tenant': controller_slice.slice.name,
                         'tenant_description': controller_slice.slice.description,
                         'roles':roles,
                         'name':controller_user.user.email,
                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
                         'max_instances':max_instances}

        expected_num = len(roles)+1
        res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
        tenant_id = res[0]['id']
        if (not controller_slice.tenant_id):
            try:
                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
                driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
            except:
                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
                raise Exception('Could not update quota for %s'%controller_slice.slice.name)

            controller_slice.tenant_id = tenant_id
            controller_slice.backend_status = '1 - OK'
            controller_slice.save()
示例#11
0
    def sync_record(self, controller_slice):
        logger.info("sync'ing slice controller %s" % controller_slice)

        controller_register = json.loads(controller_slice.controller.backend_register)
        if (controller_register.get('disabled',False)):
            raise InnocuousException('Controller %s is disabled'%controller_slice.controller.name)

        if not controller_slice.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_slice.controller)
            return

        controller_users = ControllerUser.objects.filter(user=controller_slice.slice.creator,
                                                             controller=controller_slice.controller)
        if not controller_users:
            raise Exception("slice createor %s has not accout at controller %s" % (controller_slice.slice.creator, controller_slice.controller.name))
        else:
            controller_user = controller_users[0]
            roles = ['Admin']

        max_instances=int(controller_slice.slice.max_slivers)
        tenant_fields = {'endpoint':controller_slice.controller.auth_url,
                         'admin_user': controller_slice.controller.admin_user,
                         'admin_password': controller_slice.controller.admin_password,
                         'admin_tenant': 'admin',
                         'tenant': controller_slice.slice.name,
                         'tenant_description': controller_slice.slice.description,
                         'roles':roles,
                         'name':controller_user.user.email,
                         'ansible_tag':'%s@%s'%(controller_slice.slice.name,controller_slice.controller.name),
                         'max_instances':max_instances}

        expected_num = len(roles)+1
        res = run_template('sync_controller_slices.yaml', tenant_fields, path='controller_slices', expected_num=expected_num)
        tenant_id = res[0]['id']
        if (not controller_slice.tenant_id):
            try:
                driver = OpenStackDriver().admin_driver(controller=controller_slice.controller)
                driver.shell.nova.quotas.update(tenant_id=controller_slice.tenant_id, instances=int(controller_slice.slice.max_slivers))
            except:
                logger.log_exc('Could not update quota for %s'%controller_slice.slice.name)
                raise Exception('Could not update quota for %s'%controller_slice.slice.name)

            controller_slice.tenant_id = tenant_id
            controller_slice.backend_status = '1 - OK'
            controller_slice.save()
示例#12
0
    def map_sync_inputs(self, controller_network):
        if (controller_network.network.template.name != 'Private'):
            logger.info(
                "skipping network controller %s because it is not private" %
                controller_network)
            # We only sync private networks
            return SyncStep.SYNC_WITHOUT_RUNNING

        if not controller_network.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_network.controller)
            return

        if controller_network.network.owner and controller_network.network.owner.creator:
            return self.save_controller_network(controller_network)
        else:
            raise Exception('Could not save network controller %s' %
                            controller_network)
示例#13
0
文件: ansible.py 项目: xuys50/xos
def parse_output(msg):
    lines = msg.splitlines()
    results = []

    observer_logger.info(msg)

    for l in lines:
        magic_str = 'ok: [127.0.0.1] => '
        magic_str2 = 'changed: [127.0.0.1] => '
        if (l.startswith(magic_str)):
            w = len(magic_str)
            str = l[w:]
            d = json.loads(str)
            results.append(d)
        elif (l.startswith(magic_str2)):
            w = len(magic_str2)
            str = l[w:]
            d = json.loads(str)
            results.append(d)

    return results
示例#14
0
    def sync_record(self, controller_image):
        logger.info("Working on image %s on controller %s" % (controller_image.image.name, controller_image.controller))

        controller_register = json.loads(controller_image.controller.backend_register)
        if controller_register.get("disabled", False):
            raise InnocuousException("Controller %s is disabled" % controller_image.controller.name)

        image_fields = {
            "endpoint": controller_image.controller.auth_url,
            "admin_user": controller_image.controller.admin_user,
            "admin_password": controller_image.controller.admin_password,
            "name": controller_image.image.name,
            "filepath": controller_image.image.path,
            "ansible_tag": "%s@%s"
            % (controller_image.image.name, controller_image.controller.name),  # name of ansible playbook
        }

        res = run_template("sync_controller_images.yaml", image_fields, path="controller_images", expected_num=1)

        image_id = res[0]["id"]
        controller_image.glance_image_id = image_id
        controller_image.backend_status = "1 - OK"
        controller_image.save()
示例#15
0
    def sync_record(self, controller_image):
        logger.info("Working on image %s on controller %s" %
                    (controller_image.image.name, controller_image.controller))

        controller_register = json.loads(
            controller_image.controller.backend_register)
        if (controller_register.get('disabled', False)):
            raise InnocuousException('Controller %s is disabled' %
                                     controller_image.controller.name)

        image_fields = {
            'endpoint':
            controller_image.controller.auth_url,
            'admin_user':
            controller_image.controller.admin_user,
            'admin_password':
            controller_image.controller.admin_password,
            'name':
            controller_image.image.name,
            'filepath':
            controller_image.image.path,
            'ansible_tag':
            '%s@%s' %
            (controller_image.image.name,
             controller_image.controller.name),  # name of ansible playbook
        }

        res = run_template('sync_controller_images.yaml',
                           image_fields,
                           path='controller_images',
                           expected_num=1)

        image_id = res[0]['id']
        controller_image.glance_image_id = image_id
        controller_image.backend_status = '1 - OK'
        controller_image.save()
示例#16
0
    def sync_record(self, controller_network):
        logger.info("sync'ing network controller %s for network %s slice %s controller %s" % (controller_network, controller_network.network, str(controller_network.network.owner), controller_network.controller))

        if not controller_network.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
            return

        if controller_network.network.owner and controller_network.network.owner.creator:
	    self.save_controller_network(controller_network)
	    logger.info("saved network controller: %s" % (controller_network))
示例#17
0
    def sync_record(self, controller_network):
        logger.info("sync'ing network controller %s for network %s slice %s controller %s" % (controller_network, controller_network.network, str(controller_network.network.owner), controller_network.controller))

	controller_register = json.loads(controller_network.controller.backend_register)
        if (controller_register.get('disabled',False)):
                raise Exception('Controller %s is disabled'%controller_network.controller.name)

        if not controller_network.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_network.controller)
            return

        if controller_network.network.owner and controller_network.network.owner.creator:
	    self.save_controller_network(controller_network)
	    logger.info("saved network controller: %s" % (controller_network))
示例#18
0
    def sync_record(self, controller_network):
        logger.info(
            "sync'ing network controller %s for network %s slice %s controller %s"
            % (controller_network, controller_network.network,
               str(controller_network.network.owner),
               controller_network.controller))

        if not controller_network.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" %
                        controller_network.controller)
            return

        if controller_network.network.owner and controller_network.network.owner.creator:
            self.save_controller_network(controller_network)
            logger.info("saved network controller: %s" % (controller_network))
示例#19
0
文件: sync_images.py 项目: xuys50/xos
    def fetch_pending(self, deleted):
        # Images come from the back end
        # You can't delete them
        if (deleted):
            logger.info("SyncImages: returning because deleted=True")
            return []

        # get list of images on disk
        images_path = Config().observer_images_directory

        logger.info("SyncImages: deleted=False, images_path=%s" % images_path)

        available_images = {}
        if os.path.exists(images_path):
            for f in os.listdir(images_path):
                filename = os.path.join(images_path, f)
                if os.path.isfile(filename):
                    available_images[f] = filename

        logger.info("SyncImages: available_images = %s" %
                    str(available_images))

        images = Image.objects.all()
        image_names = [image.name for image in images]

        for image_name in available_images:
            #remove file extension
            clean_name = ".".join(image_name.split('.')[:-1])
            if clean_name not in image_names:
                logger.info("SyncImages: adding %s" % clean_name)
                image = Image(name=clean_name,
                              disk_format='raw',
                              container_format='bare',
                              path=available_images[image_name])
                image.save()

        return Image.objects.filter(
            Q(enacted__lt=F('updated')) | Q(enacted=None))
示例#20
0
    def fetch_pending(self, deleted):
        # Images come from the back end
        # You can't delete them
        if (deleted):
            logger.info("SyncImages: returning because deleted=True")
            return []

        # get list of images on disk
        images_path = Config().observer_images_directory

        logger.info("SyncImages: deleted=False, images_path=%s" % images_path)

        available_images = {}
        if os.path.exists(images_path):
            for f in os.listdir(images_path):
                filename = os.path.join(images_path, f)
                if os.path.isfile(filename):
                    available_images[f] = filename

        logger.info("SyncImages: available_images = %s" % str(available_images))

        images = Image.objects.all()
        image_names = [image.name for image in images]

        for image_name in available_images:
            #remove file extension
            clean_name = ".".join(image_name.split('.')[:-1])
            if clean_name not in image_names:
                logger.info("SyncImages: adding %s" % clean_name)
                image = Image(name=clean_name,
                              disk_format='raw',
                              container_format='bare', 
                              path = available_images[image_name])
                image.save()

        return Image.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None)) 
示例#21
0
    def sync_record(self, sliver):
        logger.info("sync'ing sliver:%s slice:%s controller:%s " % (sliver, sliver.slice.name, sliver.node.site_deployment.controller))
        controller_register = json.loads(sliver.node.site_deployment.controller.backend_register)

        if (controller_register.get('disabled',False)):
            raise Exception('Controller %s is disabled'%sliver.node.site_deployment.controller.name)

        metadata_update = {}
        if (sliver.numberCores):
            metadata_update["cpu_cores"] = str(sliver.numberCores)

        for tag in sliver.slice.tags.all():
            if tag.name.startswith("sysctl-"):
                metadata_update[tag.name] = tag.value

        # public keys
        slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
        pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
        if sliver.creator.public_key:
            pubkeys.add(sliver.creator.public_key)

        if sliver.slice.creator.public_key:
            pubkeys.add(sliver.slice.creator.public_key)

        if sliver.slice.service and sliver.slice.service.public_key:
            pubkeys.add(sliver.slice.service.public_key)

        nics = []
        networks = [ns.network for ns in NetworkSlice.objects.filter(slice=sliver.slice)]
        controller_networks = ControllerNetwork.objects.filter(network__in=networks,
                                                                controller=sliver.node.site_deployment.controller)

        for controller_network in controller_networks:
            if controller_network.network.template.visibility == 'private' and \
               controller_network.network.template.translation == 'none':
                   if not controller_network.net_id:
                        raise Exception("Private Network %s has no id; Try again later" % controller_network.network.name)
                   nics.append(controller_network.net_id)

        # now include network template
        network_templates = [network.template.shared_network_name for network in networks \
                             if network.template.shared_network_name]

        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
        driver = self.driver.admin_driver(tenant='admin', controller=sliver.node.site_deployment.controller)
        nets = driver.shell.quantum.list_networks()['networks']
        for net in nets:
            if net['name'] in network_templates:
                nics.append(net['id'])

        if (not nics):
            for net in nets:
                if net['name']=='public':
                    nics.append(net['id'])

        image_id = None
        controller_images = sliver.image.controllerimages.filter(controller=sliver.node.site_deployment.controller)
        if controller_images:
            image_id = controller_images[0].glance_image_id
            logger.info("using image_id from ControllerImage object: " + str(image_id))

        if image_id is None:
            controller_driver = self.driver.admin_driver(controller=sliver.node.site_deployment.controller)
            image_id = None
            images = controller_driver.shell.glanceclient.images.list()
            for image in images:
                if image.name == sliver.image.name or not image_id:
                    image_id = image.id
                    logger.info("using image_id from glance: " + str(image_id))

        try:
            legacy = Config().observer_legacy
        except:
            legacy = False

        if (legacy):
            host_filter = sliver.node.name.split('.',1)[0]
        else:
            host_filter = sliver.node.name.strip()

        availability_zone_filter = 'nova:%s'%host_filter
        sliver_name = '%s-%d'%(sliver.slice.name,sliver.id)

        userData = self.get_userdata(sliver, pubkeys)
        if sliver.userData:
            userData = sliver.userData

        controller = sliver.node.site_deployment.controller
        tenant_fields = {'endpoint':controller.auth_url,
                     'admin_user': sliver.creator.email,
                     'admin_password': sliver.creator.remote_password,
                     'admin_tenant': sliver.slice.name,
                     'tenant': sliver.slice.name,
                     'tenant_description': sliver.slice.description,
                     'name':sliver_name,
                     'ansible_tag':sliver_name,
                     'availability_zone': availability_zone_filter,
                     'image_id':image_id,
                     'flavor_id':sliver.flavor.id,
                     'nics':nics,
                     'meta':metadata_update,
                     'user_data':r'%s'%escape(userData)}

        res = run_template('sync_slivers.yaml', tenant_fields,path='slivers', expected_num=1)
        sliver_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
        sliver_uuid = res[0]['id']

        try:
            hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
            ip = socket.gethostbyname(hostname)
            sliver.ip = ip
        except:
            pass

        sliver.instance_id = sliver_id
        sliver.instance_uuid = sliver_uuid
        sliver.instance_name = sliver_name
        sliver.save()
示例#22
0
    def sync_record(self, sliver):
        logger.info("sync'ing sliver:%s slice:%s controller:%s " %
                    (sliver, sliver.slice.name,
                     sliver.node.site_deployment.controller))

        metadata_update = {}
        if (sliver.numberCores):
            metadata_update["cpu_cores"] = str(sliver.numberCores)

        for tag in sliver.slice.tags.all():
            if tag.name.startswith("sysctl-"):
                metadata_update[tag.name] = tag.value

        # public keys
        slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
        pubkeys = set([
            sm.user.public_key for sm in slice_memberships
            if sm.user.public_key
        ])
        if sliver.creator.public_key:
            pubkeys.add(sliver.creator.public_key)

        if sliver.slice.creator.public_key:
            pubkeys.add(sliver.slice.creator.public_key)

        nics = []
        networks = [
            ns.network
            for ns in NetworkSlice.objects.filter(slice=sliver.slice)
        ]
        controller_networks = ControllerNetwork.objects.filter(
            network__in=networks,
            controller=sliver.node.site_deployment.controller)

        for controller_network in controller_networks:
            if controller_network.network.template.visibility == 'private' and \
               controller_network.network.template.translation == 'none' and controller_network.net_id:
                nics.append(controller_network.net_id)

        # now include network template
        network_templates = [network.template.shared_network_name for network in networks \
                             if network.template.shared_network_name]

        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
        driver = self.driver.admin_driver(
            tenant='admin', controller=sliver.node.site_deployment.controller)
        nets = driver.shell.quantum.list_networks()['networks']
        for net in nets:
            if net['name'] in network_templates:
                nics.append(net['id'])

        if (not nics):
            for net in nets:
                if net['name'] == 'public':
                    nics.append(net['id'])

        # look up image id
        if (not sliver.image.id):
            controller_driver = self.driver.admin_driver(
                controller=sliver.node.site_deployment.controller)
            image_id = None
            images = controller_driver.shell.glanceclient.images.list()
            for image in images:
                if image.name == sliver.image.name or not image_id:
                    image_id = image.id
        else:
            image_id = sliver.image.id

        try:
            legacy = Config().observer_legacy
        except:
            legacy = False

        if (legacy):
            host_filter = sliver.node.name.split('.', 1)[0]
        else:
            host_filter = sliver.node.name.strip()

        availability_zone_filter = 'nova:%s' % host_filter
        sliver_name = '%s-%d' % (sliver.slice.name, sliver.id)

        userData = self.get_userdata(sliver, pubkeys)
        if sliver.userData:
            userData = sliver.userData

        controller = sliver.node.site_deployment.controller
        tenant_fields = {
            'endpoint': controller.auth_url,
            'admin_user': sliver.creator.email,
            'admin_password': sliver.creator.remote_password,
            'admin_tenant': sliver.slice.name,
            'tenant': sliver.slice.name,
            'tenant_description': sliver.slice.description,
            'name': sliver_name,
            'ansible_tag': sliver_name,
            'availability_zone': availability_zone_filter,
            'image_id': image_id,
            'flavor_id': sliver.flavor.id,
            'nics': nics,
            'meta': metadata_update,
            'user_data': r'%s' % escape(userData)
        }

        res = run_template('sync_slivers.yaml',
                           tenant_fields,
                           path='slivers',
                           expected_num=2)
        sliver_id = res[1]['info'][
            'OS-EXT-SRV-ATTR:instance_name']  # 0 is for the key
        sliver_uuid = res[1]['id']  # 0 is for the key

        try:
            hostname = res[1]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
            ip = socket.gethostbyname(hostname)
            sliver.ip = ip
        except:
            pass

        sliver.instance_id = sliver_id
        sliver.instance_uuid = sliver_uuid
        sliver.instance_name = sliver_name
        sliver.save()
示例#23
0
    def call(self, **args):
        logger.info("sync'ing network slivers")

        networkSlivers = NetworkSliver.objects.all()
        networkSlivers_by_id = {}
        networkSlivers_by_port = {}
        for networkSliver in networkSlivers:
            networkSlivers_by_id[networkSliver.id] = networkSliver
            networkSlivers_by_port[networkSliver.port_id] = networkSliver

        networks = Network.objects.all()
        networks_by_id = {}
        for network in networks:
            for nd in network.controllernetworks.all():
                networks_by_id[nd.net_id] = network

        #logger.info("networks_by_id = ")
        #for (network_id, network) in networks_by_id.items():
        #    logger.info("   %s: %s" % (network_id, network.name))

        slivers = Sliver.objects.all()
        slivers_by_instance_uuid = {}
        for sliver in slivers:
            slivers_by_instance_uuid[sliver.instance_uuid] = sliver

        # Get all ports in all controllers

        ports_by_id = {}
        templates_by_id = {}
        for controller in Controller.objects.all():
            if not controller.admin_tenant:
                logger.info("controller %s has no admin_tenant" % controller)
                continue
            try:
                driver = self.driver.admin_driver(controller=controller,
                                                  tenant='admin')
                ports = driver.shell.quantum.list_ports()["ports"]
            except:
                logger.log_exc("failed to get ports from controller %s" %
                               controller)
                continue

            for port in ports:
                ports_by_id[port["id"]] = port

            # public-nat and public-dedicated networks don't have a net-id anywhere
            # in the data model, so build up a list of which ids map to which network
            # templates.
            try:
                neutron_networks = driver.shell.quantum.list_networks(
                )["networks"]
            except:
                print "failed to get networks from controller %s" % controller
                continue
            for network in neutron_networks:
                for template in NetworkTemplate.objects.all():
                    if template.shared_network_name == network["name"]:
                        templates_by_id[network["id"]] = template

        for port in ports_by_id.values():
            #logger.info("port %s" % str(port))
            if port["id"] in networkSlivers_by_port:
                # we already have it
                #logger.info("already accounted for port %s" % port["id"])
                continue

            if port["device_owner"] != "compute:nova":
                # we only want the ports that connect to instances
                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
                continue

            sliver = slivers_by_instance_uuid.get(port['device_id'], None)
            if not sliver:
                logger.info("no sliver for port %s device_id %s" %
                            (port["id"], port['device_id']))
                continue

            network = networks_by_id.get(port['network_id'], None)
            if not network:
                # maybe it's public-nat or public-dedicated. Search the templates for
                # the id, then see if the sliver's slice has some network that uses
                # that template
                template = templates_by_id.get(port['network_id'], None)
                if template and sliver.slice:
                    for candidate_network in sliver.slice.networks.all():
                        if candidate_network.template == template:
                            network = candidate_network
            if not network:
                logger.info("no network for port %s network %s" %
                            (port["id"], port["network_id"]))

                # we know it's associated with a sliver, but we don't know
                # which network it is part of.

                continue

            if network.template.shared_network_name:
                # If it's a shared network template, then more than one network
                # object maps to the quantum network. We have to do a whole bunch
                # of extra work to find the right one.
                networks = network.template.network_set.all()
                network = None
                for candidate_network in networks:
                    if (candidate_network.owner == sliver.slice):
                        print "found network", candidate_network
                        network = candidate_network

                if not network:
                    logger.info(
                        "failed to find the correct network for a shared template for port %s network %s"
                        % (port["id"], port["network_id"]))
                    continue

            if not port["fixed_ips"]:
                logger.info("port %s has no fixed_ips" % port["id"])
                continue

            ip = port["fixed_ips"][0]["ip_address"]
            logger.info("creating NetworkSliver (%s, %s, %s, %s)" %
                        (str(network), str(sliver), ip, str(port["id"])))

            ns = NetworkSliver(network=network,
                               sliver=sliver,
                               ip=ip,
                               port_id=port["id"])

            try:
                ns.save()
            except:
                logger.log_exc("failed to save networksliver %s" % str(ns))
                continue

        # Now, handle port forwarding
        # We get the list of NetworkSlivers again, since we might have just
        # added a few. Then, for each one of them we find it's quantum port and
        # make sure quantum's nat:forward_ports argument is the same.

        for networkSliver in NetworkSliver.objects.all():
            try:
                nat_list = networkSliver.network.nat_list
            except (TypeError, ValueError), e:
                logger.info("Failed to decode nat_list: %s" % str(e))
                continue

            if not networkSliver.port_id:
                continue

            neutron_port = ports_by_id.get(networkSliver.port_id, None)
            if not neutron_port:
                continue

            neutron_nat_list = neutron_port.get("nat:forward_ports", None)
            if not neutron_nat_list:
                # make sure that None and the empty set are treated identically
                neutron_nat_list = []

            if (neutron_nat_list != nat_list):
                logger.info(
                    "Setting nat:forward_ports for port %s network %s sliver %s to %s"
                    %
                    (str(networkSliver.port_id), str(networkSliver.network.id),
                     str(networkSliver.sliver), str(nat_list)))
                try:
                    driver = self.driver.admin_driver(
                        controller=networkSliver.sliver.node.site_deployment.
                        controller,
                        tenant='admin')
                    driver.shell.quantum.update_port(
                        networkSliver.port_id,
                        {"port": {
                            "nat:forward_ports": nat_list
                        }})
                except:
                    logger.log_exc("failed to update port with nat_list %s" %
                                   str(nat_list))
                    continue
            else:
                #logger.info("port %s network %s sliver %s nat %s is already set" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                pass
    def sync_record(self, controller_site_privilege):
        logger.info(
            "sync'ing controler_site_privilege %s at controller %s"
            % (controller_site_privilege, controller_site_privilege.controller)
        )

        controller_register = json.loads(controller_site_privilege.controller.backend_register)
        if controller_register.get("disabled", False):
            raise InnocuousException("Controller %s is disabled" % controller_site_privilege.controller.name)

        if not controller_site_privilege.controller.admin_user:
            logger.info("controller %r has no admin_user, skipping" % controller_site_privilege.controller)
            return

        template = os_template_env.get_template("sync_controller_users.yaml")
        roles = [controller_site_privilege.site_privilege.role.role]
        # setup user home site roles at controller
        if not controller_site_privilege.site_privilege.user.site:
            raise Exception("Siteless user %s" % controller_site_privilege.site_privilege.user.email)
        else:
            # look up tenant id for the user's site at the controller
            # ctrl_site_deployments = SiteDeployment.objects.filter(
            #  site_deployment__site=controller_site_privilege.user.site,
            #  controller=controller_site_privilege.controller)

            # if ctrl_site_deployments:
            #    # need the correct tenant id for site at the controller
            #    tenant_id = ctrl_site_deployments[0].tenant_id
            #    tenant_name = ctrl_site_deployments[0].site_deployment.site.login_base
            user_fields = {
                "endpoint": controller_site_privilege.controller.auth_url,
                "name": controller_site_privilege.site_privilege.user.email,
                "email": controller_site_privilege.site_privilege.user.email,
                "password": controller_site_privilege.site_privilege.user.remote_password,
                "admin_user": controller_site_privilege.controller.admin_user,
                "admin_password": controller_site_privilege.controller.admin_password,
                "ansible_tag": "%s@%s"
                % (
                    controller_site_privilege.site_privilege.user.email.replace("@", "-at-"),
                    controller_site_privilege.controller.name,
                ),
                "admin_tenant": controller_site_privilege.controller.admin_tenant,
                "roles": roles,
                "tenant": controller_site_privilege.site_privilege.site.login_base,
            }

            rendered = template.render(user_fields)
            expected_length = len(roles) + 1
            res = run_template(
                "sync_controller_users.yaml",
                user_fields,
                path="controller_site_privileges",
                expected_num=expected_length,
            )

            # results is an array in which each element corresponds to an
            # "ok" string received per operation. If we get as many oks as
            # the number of operations we issued, that means a grand success.
            # Otherwise, the number of oks tell us which operation failed.
            controller_site_privilege.role_id = res[0]["id"]
            controller_site_privilege.save()
示例#25
0
    def sync_record(self, sliver):
        logger.info("sync'ing sliver:%s slice:%s controller:%s " %
                    (sliver, sliver.slice.name,
                     sliver.node.site_deployment.controller))
        controller_register = json.loads(
            sliver.node.site_deployment.controller.backend_register)

        if (controller_register.get('disabled', False)):
            raise InnocuousException(
                'Controller %s is disabled' %
                sliver.node.site_deployment.controller.name)

        metadata_update = {}
        if (sliver.numberCores):
            metadata_update["cpu_cores"] = str(sliver.numberCores)

        for tag in sliver.slice.tags.all():
            if tag.name.startswith("sysctl-"):
                metadata_update[tag.name] = tag.value

        # public keys
        slice_memberships = SlicePrivilege.objects.filter(slice=sliver.slice)
        pubkeys = set([
            sm.user.public_key for sm in slice_memberships
            if sm.user.public_key
        ])
        if sliver.creator.public_key:
            pubkeys.add(sliver.creator.public_key)

        if sliver.slice.creator.public_key:
            pubkeys.add(sliver.slice.creator.public_key)

        if sliver.slice.service and sliver.slice.service.public_key:
            pubkeys.add(sliver.slice.service.public_key)

        # Handle any ports that are already created and attached to the sliver.
        # If we do have a port for a network, then add that network to an
        # exclude list so we won't try to auto-create ports on that network
        # when instantiating.
        ports = []
        exclude_networks = set()
        exclude_templates = set()
        for ns in sliver.networkslivers.all():
            if not ns.port_id:
                raise Exception(
                    "Port %s on sliver %s has no id; Try again later" %
                    (str(ns), str(sliver)))
            ports.append(ns.port_id)
            exclude_networks.add(ns.network)
            exclude_templates.add(ns.network.template)

        nics = []
        networks = [
            ns.network
            for ns in NetworkSlice.objects.filter(slice=sliver.slice)
        ]
        networks = [n for n in networks if (n not in exclude_networks)]
        controller_networks = ControllerNetwork.objects.filter(
            network__in=networks,
            controller=sliver.node.site_deployment.controller)

        for controller_network in controller_networks:
            if controller_network.network.template.visibility == 'private' and \
               controller_network.network.template.translation == 'none':
                if not controller_network.net_id:
                    raise Exception(
                        "Private Network %s has no id; Try again later" %
                        controller_network.network.name)
                nics.append(controller_network.net_id)

        # Now include network templates, for those networks that use a
        # shared_network_name.
        network_templates = [network.template.shared_network_name for network in networks \
                             if network.template.shared_network_name]
        network_templates = [
            nt for nt in network_templates if (nt not in exclude_templates)
        ]

        #driver = self.driver.client_driver(caller=sliver.creator, tenant=sliver.slice.name, controller=sliver.controllerNetwork)
        driver = self.driver.admin_driver(
            tenant='admin', controller=sliver.node.site_deployment.controller)
        nets = driver.shell.quantum.list_networks()['networks']
        for net in nets:
            if net['name'] in network_templates:
                nics.append(net['id'])

        # If the slice isn't connected to anything, then at least put it on
        # the public network.
        if (not nics) and (not ports):
            for net in nets:
                if net['name'] == 'public':
                    nics.append(net['id'])

        image_name = None
        controller_images = sliver.image.controllerimages.filter(
            controller=sliver.node.site_deployment.controller)
        if controller_images:
            image_name = controller_images[0].image.name
            logger.info("using image from ControllerImage object: " +
                        str(image_name))

        if image_name is None:
            controller_driver = self.driver.admin_driver(
                controller=sliver.node.site_deployment.controller)
            images = controller_driver.shell.glanceclient.images.list()
            for image in images:
                if image.name == sliver.image.name or not image_name:
                    image_name = image.name
                    logger.info("using image from glance: " + str(image_name))

        try:
            legacy = Config().observer_legacy
        except:
            legacy = False

        if (legacy):
            host_filter = sliver.node.name.split('.', 1)[0]
        else:
            host_filter = sliver.node.name.strip()

        availability_zone_filter = 'nova:%s' % host_filter
        sliver_name = '%s-%d' % (sliver.slice.name, sliver.id)

        userData = self.get_userdata(sliver, pubkeys)
        if sliver.userData:
            userData = sliver.userData

        controller = sliver.node.site_deployment.controller
        tenant_fields = {
            'endpoint': controller.auth_url,
            'admin_user': sliver.creator.email,
            'admin_password': sliver.creator.remote_password,
            'admin_tenant': sliver.slice.name,
            'tenant': sliver.slice.name,
            'tenant_description': sliver.slice.description,
            'name': sliver_name,
            'ansible_tag': sliver_name,
            'availability_zone': availability_zone_filter,
            'image_name': image_name,
            'flavor_name': sliver.flavor.name,
            'nics': nics,
            'ports': ports,
            'meta': metadata_update,
            'user_data': r'%s' % escape(userData)
        }

        res = run_template('sync_slivers.yaml',
                           tenant_fields,
                           path='slivers',
                           expected_num=1)
        sliver_id = res[0]['info']['OS-EXT-SRV-ATTR:instance_name']
        sliver_uuid = res[0]['id']

        try:
            hostname = res[0]['info']['OS-EXT-SRV-ATTR:hypervisor_hostname']
            ip = socket.gethostbyname(hostname)
            sliver.ip = ip
        except:
            pass

        sliver.instance_id = sliver_id
        sliver.instance_uuid = sliver_uuid
        sliver.instance_name = sliver_name
        sliver.save()
示例#26
0
文件: sync_ports.py 项目: xuys50/xos
    def call(self, **args):
        logger.info("sync'ing network instances")

        ports = Port.objects.all()
        ports_by_id = {}
        ports_by_neutron_port = {}
        for port in ports:
            ports_by_id[port.id] = port
            ports_by_neutron_port[port.port_id] = port

        networks = Network.objects.all()
        networks_by_id = {}
        for network in networks:
            for nd in network.controllernetworks.all():
                networks_by_id[nd.net_id] = network

        #logger.info("networks_by_id = ")
        #for (network_id, network) in networks_by_id.items():
        #    logger.info("   %s: %s" % (network_id, network.name))

        instances = Instance.objects.all()
        instances_by_instance_uuid = {}
        for instance in instances:
            instances_by_instance_uuid[instance.instance_uuid] = instance

        # Get all ports in all controllers

        ports_by_id = {}
        templates_by_id = {}
        for controller in Controller.objects.all():
            if not controller.admin_tenant:
                logger.info("controller %s has no admin_tenant" % controller)
                continue
            try:
                driver = self.driver.admin_driver(controller=controller)
                ports = driver.shell.quantum.list_ports()["ports"]
            except:
                logger.log_exc("failed to get ports from controller %s" %
                               controller)
                continue

            for port in ports:
                ports_by_id[port["id"]] = port

            # public-nat and public-dedicated networks don't have a net-id anywhere
            # in the data model, so build up a list of which ids map to which network
            # templates.
            try:
                neutron_networks = driver.shell.quantum.list_networks(
                )["networks"]
            except:
                print "failed to get networks from controller %s" % controller
                continue
            for network in neutron_networks:
                for template in NetworkTemplate.objects.all():
                    if template.shared_network_name == network["name"]:
                        templates_by_id[network["id"]] = template

        for port in ports_by_id.values():
            #logger.info("port %s" % str(port))
            if port["id"] in ports_by_neutron_port:
                # we already have it
                #logger.info("already accounted for port %s" % port["id"])
                continue

            if port["device_owner"] != "compute:nova":
                # we only want the ports that connect to instances
                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
                continue

            instance = instances_by_instance_uuid.get(port['device_id'], None)
            if not instance:
                logger.info("no instance for port %s device_id %s" %
                            (port["id"], port['device_id']))
                continue

            network = networks_by_id.get(port['network_id'], None)
            if not network:
                # maybe it's public-nat or public-dedicated. Search the templates for
                # the id, then see if the instance's slice has some network that uses
                # that template
                template = templates_by_id.get(port['network_id'], None)
                if template and instance.slice:
                    for candidate_network in instance.slice.networks.all():
                        if candidate_network.template == template:
                            network = candidate_network
            if not network:
                logger.info("no network for port %s network %s" %
                            (port["id"], port["network_id"]))

                # we know it's associated with a instance, but we don't know
                # which network it is part of.

                continue

            if network.template.shared_network_name:
                # If it's a shared network template, then more than one network
                # object maps to the quantum network. We have to do a whole bunch
                # of extra work to find the right one.
                networks = network.template.network_set.all()
                network = None
                for candidate_network in networks:
                    if (candidate_network.owner == instance.slice):
                        logger.info("found network %s" % candidate_network)
                        network = candidate_network

                if not network:
                    logger.info(
                        "failed to find the correct network for a shared template for port %s network %s"
                        % (port["id"], port["network_id"]))
                    continue

            if not port["fixed_ips"]:
                logger.info("port %s has no fixed_ips" % port["id"])
                continue

            ip = port["fixed_ips"][0]["ip_address"]
            mac = port["mac_address"]
            logger.info("creating Port (%s, %s, %s, %s)" %
                        (str(network), str(instance), ip, str(port["id"])))

            ns = Port(network=network,
                      instance=instance,
                      ip=ip,
                      mac=mac,
                      port_id=port["id"])

            try:
                ns.save()
            except:
                logger.log_exc("failed to save port %s" % str(ns))
                continue

        # For ports that were created by the user, find that ones
        # that don't have neutron ports, and create them.
        for port in Port.objects.filter(Q(port_id__isnull=True),
                                        Q(instance__isnull=False)):
            logger.info("XXX working on port %s" % port)
            controller = port.instance.node.site_deployment.controller
            slice = port.instance.slice

            if controller:
                cn = port.network.controllernetworks.filter(
                    controller=controller)
                if not cn:
                    logger.log_exc("no controllernetwork for %s" % port)
                    continue
                cn = cn[0]
                if cn.lazy_blocked:
                    cn.lazy_blocked = False
                    cn.save()
                    logger.info(
                        "deferring port %s because controllerNetwork was lazy-blocked"
                        % port)
                    continue
                if not cn.net_id:
                    logger.info(
                        "deferring port %s because controllerNetwork does not have a port-id yet"
                        % port)
                    continue
                try:
                    # We need to use a client driver that specifies the tenant
                    # of the destination instance. Nova-compute will not connect
                    # ports to instances if the port's tenant does not match
                    # the instance's tenant.

                    # A bunch of stuff to compensate for OpenStackDriver.client_driveR()
                    # not being in working condition.
                    from openstack.client import OpenStackClient
                    from openstack.driver import OpenStackDriver
                    caller = port.network.owner.creator
                    auth = {
                        'username': caller.email,
                        'password': caller.remote_password,
                        'tenant': slice.name
                    }
                    client = OpenStackClient(
                        controller=controller,
                        **auth)  # cacert=self.config.nova_ca_ssl_cert,
                    driver = OpenStackDriver(client=client)

                    neutron_port = driver.shell.quantum.create_port(
                        {"port": {
                            "network_id": cn.net_id
                        }})["port"]
                    port.port_id = neutron_port["id"]
                    if neutron_port["fixed_ips"]:
                        port.ip = neutron_port["fixed_ips"][0]["ip_address"]
                    port.mac = neutron_port["mac_address"]
                except:
                    logger.log_exc("failed to create neutron port for %s" %
                                   port)
                    continue
                port.save()
示例#27
0
    def call(self, **args):
        logger.info("sync'ing network slivers")

        networkSlivers = NetworkSliver.objects.all()
        networkSlivers_by_id = {}
        networkSlivers_by_port = {}
        for networkSliver in networkSlivers:
            networkSlivers_by_id[networkSliver.id] = networkSliver
            networkSlivers_by_port[networkSliver.port_id] = networkSliver

        networks = Network.objects.all()
        networks_by_id = {}
        for network in networks:
            for nd in network.controllernetworks.all():
                networks_by_id[nd.net_id] = network

        #logger.info("networks_by_id = ")
        #for (network_id, network) in networks_by_id.items():
        #    logger.info("   %s: %s" % (network_id, network.name))

        slivers = Sliver.objects.all()
        slivers_by_instance_uuid = {}
        for sliver in slivers:
            slivers_by_instance_uuid[sliver.instance_uuid] = sliver

        # Get all ports in all controllers

        ports_by_id = {}
        templates_by_id = {}
        for controller in Controller.objects.all():
            if not controller.admin_tenant:
                logger.info("controller %s has no admin_tenant" % controller)
                continue
            try:
                driver = self.driver.admin_driver(controller = controller,tenant='admin')
                ports = driver.shell.quantum.list_ports()["ports"]
            except:
                logger.log_exc("failed to get ports from controller %s" % controller)
                continue

            for port in ports:
                ports_by_id[port["id"]] = port

            # public-nat and public-dedicated networks don't have a net-id anywhere
            # in the data model, so build up a list of which ids map to which network
            # templates.
            try:
                neutron_networks = driver.shell.quantum.list_networks()["networks"]
            except:
                print "failed to get networks from controller %s" % controller
                continue
            for network in neutron_networks:
                for template in NetworkTemplate.objects.all():
                    if template.shared_network_name == network["name"]:
                        templates_by_id[network["id"]] = template

        for port in ports_by_id.values():
            #logger.info("port %s" % str(port))
            if port["id"] in networkSlivers_by_port:
                # we already have it
                #logger.info("already accounted for port %s" % port["id"])
                continue

            if port["device_owner"] != "compute:nova":
                # we only want the ports that connect to instances
                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
                continue

            sliver = slivers_by_instance_uuid.get(port['device_id'], None)
            if not sliver:
                logger.info("no sliver for port %s device_id %s" % (port["id"], port['device_id']))
                continue

            network = networks_by_id.get(port['network_id'], None)
            if not network:
                # maybe it's public-nat or public-dedicated. Search the templates for
                # the id, then see if the sliver's slice has some network that uses
                # that template
                template = templates_by_id.get(port['network_id'], None)
                if template and sliver.slice:
                    for candidate_network in sliver.slice.networks.all():
                         if candidate_network.template == template:
                             network=candidate_network
            if not network:
                logger.info("no network for port %s network %s" % (port["id"], port["network_id"]))

                # we know it's associated with a sliver, but we don't know
                # which network it is part of.

                continue

            if network.template.shared_network_name:
                # If it's a shared network template, then more than one network
                # object maps to the quantum network. We have to do a whole bunch
                # of extra work to find the right one.
                networks = network.template.network_set.all()
                network = None
                for candidate_network in networks:
                    if (candidate_network.owner == sliver.slice):
                        print "found network", candidate_network
                        network = candidate_network

                if not network:
                    logger.info("failed to find the correct network for a shared template for port %s network %s" % (port["id"], port["network_id"]))
                    continue

            if not port["fixed_ips"]:
                logger.info("port %s has no fixed_ips" % port["id"])
                continue

            ip=port["fixed_ips"][0]["ip_address"]
            logger.info("creating NetworkSliver (%s, %s, %s, %s)" % (str(network), str(sliver), ip, str(port["id"])))

            ns = NetworkSliver(network=network,
                               sliver=sliver,
                               ip=ip,
                               port_id=port["id"])

            try:
                ns.save()
            except:
                logger.log_exc("failed to save networksliver %s" % str(ns))
                continue

        # Now, handle port forwarding
        # We get the list of NetworkSlivers again, since we might have just
        # added a few. Then, for each one of them we find it's quantum port and
        # make sure quantum's nat:forward_ports argument is the same.

        for networkSliver in NetworkSliver.objects.all():
            try:
                nat_list = networkSliver.network.nat_list
            except (TypeError, ValueError), e:
                logger.info("Failed to decode nat_list: %s" % str(e))
                continue

            if not networkSliver.port_id:
                continue

            neutron_port = ports_by_id.get(networkSliver.port_id, None)
            if not neutron_port:
                continue

            neutron_nat_list = neutron_port.get("nat:forward_ports", None)
            if not neutron_nat_list:
                # make sure that None and the empty set are treated identically
                neutron_nat_list = []

            if (neutron_nat_list != nat_list):
                logger.info("Setting nat:forward_ports for port %s network %s sliver %s to %s" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                try:
                    driver = self.driver.admin_driver(controller=networkSliver.sliver.node.site_deployment.controller,tenant='admin')
                    driver.shell.quantum.update_port(networkSliver.port_id, {"port": {"nat:forward_ports": nat_list}})
                except:
                    logger.log_exc("failed to update port with nat_list %s" % str(nat_list))
                    continue
            else:
                #logger.info("port %s network %s sliver %s nat %s is already set" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                pass
示例#28
0
    def map_sync_inputs(self, instance):
        inputs = {}
        metadata_update = {}
        if (instance.numberCores):
            metadata_update["cpu_cores"] = str(instance.numberCores)

        for tag in instance.slice.tags.all():
            if tag.name.startswith("sysctl-"):
                metadata_update[tag.name] = tag.value

        slice_memberships = SlicePrivilege.objects.filter(slice=instance.slice)
        pubkeys = set([
            sm.user.public_key for sm in slice_memberships
            if sm.user.public_key
        ])
        if instance.creator.public_key:
            pubkeys.add(instance.creator.public_key)

        if instance.slice.creator.public_key:
            pubkeys.add(instance.slice.creator.public_key)

        if instance.slice.service and instance.slice.service.public_key:
            pubkeys.add(instance.slice.service.public_key)

        nics = []
        networks = [
            ns.network
            for ns in NetworkSlice.objects.filter(slice=instance.slice)
        ]
        controller_networks = ControllerNetwork.objects.filter(
            network__in=networks,
            controller=instance.node.site_deployment.controller)

        for controller_network in controller_networks:

            # Lenient exception - causes slow backoff
            if controller_network.network.template.visibility == 'private' and \
               controller_network.network.template.translation == 'none':
                if not controller_network.net_id:
                    raise DeferredException(
                        "Instance %s Private Network %s has no id; Try again later"
                        % (instance, controller_network.network.name))
                nics.append(controller_network.net_id)

        # now include network template
        network_templates = [network.template.shared_network_name for network in networks \
                             if network.template.shared_network_name]

        #driver = self.driver.client_driver(caller=instance.creator, tenant=instance.slice.name, controller=instance.controllerNetwork)
        driver = self.driver.admin_driver(
            tenant='admin',
            controller=instance.node.site_deployment.controller)
        nets = driver.shell.quantum.list_networks()['networks']
        for net in nets:
            if net['name'] in network_templates:
                nics.append(net['id'])

        if (not nics):
            for net in nets:
                if net['name'] == 'public':
                    nics.append(net['id'])

        image_name = None
        controller_images = instance.image.controllerimages.filter(
            controller=instance.node.site_deployment.controller)
        if controller_images:
            image_name = controller_images[0].image.name
            logger.info("using image from ControllerImage object: " +
                        str(image_name))

        if image_name is None:
            controller_driver = self.driver.admin_driver(
                controller=instance.node.site_deployment.controller)
            images = controller_driver.shell.glanceclient.images.list()
            for image in images:
                if image.name == instance.image.name or not image_name:
                    image_name = image.name
                    logger.info("using image from glance: " + str(image_name))

        try:
            legacy = Config().observer_legacy
        except:
            legacy = False

        if (legacy):
            host_filter = instance.node.name.split('.', 1)[0]
        else:
            host_filter = instance.node.name.strip()

        availability_zone_filter = 'nova:%s' % host_filter
        instance_name = '%s-%d' % (instance.slice.name, instance.id)
        self.instance_name = instance_name

        userData = self.get_userdata(instance, pubkeys)
        if instance.userData:
            userData += instance.userData

        controller = instance.node.site_deployment.controller
        fields = {
            'endpoint': controller.auth_url,
            'endpoint_v3': controller.auth_url_v3,
            'domain': controller.domain,
            'admin_user': instance.creator.email,
            'admin_password': instance.creator.remote_password,
            'admin_tenant': instance.slice.name,
            'tenant': instance.slice.name,
            'tenant_description': instance.slice.description,
            'name': instance_name,
            'ansible_tag': instance_name,
            'availability_zone': availability_zone_filter,
            'image_name': image_name,
            'flavor_name': instance.flavor.name,
            'nics': nics,
            'meta': metadata_update,
            'user_data': r'%s' % escape(userData)
        }
        return fields
示例#29
0
    def call(self, **args):
        logger.info("sync'ing network slivers")

        networkSlivers = NetworkSliver.objects.all()
        networkSlivers_by_id = {}
        networkSlivers_by_port = {}
        for networkSliver in networkSlivers:
            networkSlivers_by_id[networkSliver.id] = networkSliver
            networkSlivers_by_port[networkSliver.port_id] = networkSliver

        networks = Network.objects.all()
        networks_by_id = {}
        for network in networks:
            for nd in network.controllernetworks.all():
                networks_by_id[nd.net_id] = network

        #logger.info("networks_by_id = ")
        #for (network_id, network) in networks_by_id.items():
        #    logger.info("   %s: %s" % (network_id, network.name))

        slivers = Sliver.objects.all()
        slivers_by_instance_uuid = {}
        for sliver in slivers:
            slivers_by_instance_uuid[sliver.instance_uuid] = sliver

        # Get all ports in all controllers

        ports_by_id = {}
        templates_by_id = {}
        for controller in Controller.objects.all():
            if not controller.admin_tenant:
                logger.info("controller %s has no admin_tenant" % controller)
                continue
            try:
                driver = self.driver.admin_driver(controller=controller,
                                                  tenant='admin')
                ports = driver.shell.quantum.list_ports()["ports"]
            except:
                logger.log_exc("failed to get ports from controller %s" %
                               controller)
                continue

            for port in ports:
                ports_by_id[port["id"]] = port

            # public-nat and public-dedicated networks don't have a net-id anywhere
            # in the data model, so build up a list of which ids map to which network
            # templates.
            try:
                neutron_networks = driver.shell.quantum.list_networks(
                )["networks"]
            except:
                print "failed to get networks from controller %s" % controller
                continue
            for network in neutron_networks:
                for template in NetworkTemplate.objects.all():
                    if template.shared_network_name == network["name"]:
                        templates_by_id[network["id"]] = template

        for port in ports_by_id.values():
            #logger.info("port %s" % str(port))
            if port["id"] in networkSlivers_by_port:
                # we already have it
                #logger.info("already accounted for port %s" % port["id"])
                continue

            if port["device_owner"] != "compute:nova":
                # we only want the ports that connect to instances
                #logger.info("port %s is not a compute port, it is a %s" % (port["id"], port["device_owner"]))
                continue

            sliver = slivers_by_instance_uuid.get(port['device_id'], None)
            if not sliver:
                logger.info("no sliver for port %s device_id %s" %
                            (port["id"], port['device_id']))
                continue

            network = networks_by_id.get(port['network_id'], None)
            if not network:
                # maybe it's public-nat or public-dedicated. Search the templates for
                # the id, then see if the sliver's slice has some network that uses
                # that template
                template = templates_by_id.get(port['network_id'], None)
                if template and sliver.slice:
                    for candidate_network in sliver.slice.networks.all():
                        if candidate_network.template == template:
                            network = candidate_network
            if not network:
                logger.info("no network for port %s network %s" %
                            (port["id"], port["network_id"]))

                # we know it's associated with a sliver, but we don't know
                # which network it is part of.

                continue

            if network.template.shared_network_name:
                # If it's a shared network template, then more than one network
                # object maps to the quantum network. We have to do a whole bunch
                # of extra work to find the right one.
                networks = network.template.network_set.all()
                network = None
                for candidate_network in networks:
                    if (candidate_network.owner == sliver.slice):
                        print "found network", candidate_network
                        network = candidate_network

                if not network:
                    logger.info(
                        "failed to find the correct network for a shared template for port %s network %s"
                        % (port["id"], port["network_id"]))
                    continue

            if not port["fixed_ips"]:
                logger.info("port %s has no fixed_ips" % port["id"])
                continue

            ip = port["fixed_ips"][0]["ip_address"]
            logger.info("creating NetworkSliver (%s, %s, %s, %s)" %
                        (str(network), str(sliver), ip, str(port["id"])))

            ns = NetworkSliver(network=network,
                               sliver=sliver,
                               ip=ip,
                               port_id=port["id"])

            try:
                ns.save()
            except:
                logger.log_exc("failed to save networksliver %s" % str(ns))
                continue

        # For networkSlivers that were created by the user, find that ones
        # that don't have neutron ports, and create them.
        for networkSliver in NetworkSliver.objects.filter(
                port_id__isnull=True, sliver__isnull=False):
            #logger.info("XXX working on networksliver %s" % networkSliver)
            controller = networkSliver.sliver.node.site_deployment.controller
            if controller:
                cn = networkSliver.network.controllernetworks.filter(
                    controller=controller)
                if not cn:
                    logger.log_exc("no controllernetwork for %s" %
                                   networkSliver)
                    continue
                cn = cn[0]
                if cn.lazy_blocked:
                    cn.lazy_blocked = False
                    cn.save()
                    logger.info(
                        "deferring networkSliver %s because controllerNetwork was lazy-blocked"
                        % networkSliver)
                    continue
                if not cn.net_id:
                    logger.info(
                        "deferring networkSliver %s because controllerNetwork does not have a port-id yet"
                        % networkSliver)
                    continue
                try:
                    # We need to use a client driver that specifies the tenant
                    # of the destination sliver. Nova-compute will not connect
                    # ports to slivers if the port's tenant does not match
                    # the sliver's tenant.

                    # A bunch of stuff to compensate for OpenStackDriver.client_driveR()
                    # not being in working condition.
                    from openstack.client import OpenStackClient
                    from openstack.driver import OpenStackDriver
                    caller = networkSliver.network.owner.creator
                    auth = {
                        'username': caller.email,
                        'password': caller.remote_password,
                        'tenant': networkSliver.sliver.slice.name
                    }  # networkSliver.network.owner.name}
                    client = OpenStackClient(
                        controller=controller,
                        **auth)  # cacert=self.config.nova_ca_ssl_cert,
                    driver = OpenStackDriver(client=client)

                    port = driver.shell.quantum.create_port(
                        {"port": {
                            "network_id": cn.net_id
                        }})["port"]
                    networkSliver.port_id = port["id"]
                    if port["fixed_ips"]:
                        networkSliver.ip = port["fixed_ips"][0]["ip_address"]
                except:
                    logger.log_exc("failed to create neutron port for %s" %
                                   networkSliver)
                    continue
                networkSliver.save()

        # Now, handle port forwarding
        # We get the list of NetworkSlivers again, since we might have just
        # added a few. Then, for each one of them we find it's quantum port and
        # make sure quantum's nat:forward_ports argument is the same.

        for networkSliver in NetworkSliver.objects.all():
            try:
                nat_list = networkSliver.network.nat_list
            except (TypeError, ValueError), e:
                logger.info("Failed to decode nat_list: %s" % str(e))
                continue

            if not networkSliver.port_id:
                continue

            neutron_port = ports_by_id.get(networkSliver.port_id, None)
            if not neutron_port:
                continue

            neutron_nat_list = neutron_port.get("nat:forward_ports", None)
            if not neutron_nat_list:
                # make sure that None and the empty set are treated identically
                neutron_nat_list = []

            if (neutron_nat_list != nat_list):
                logger.info(
                    "Setting nat:forward_ports for port %s network %s sliver %s to %s"
                    %
                    (str(networkSliver.port_id), str(networkSliver.network.id),
                     str(networkSliver.sliver), str(nat_list)))
                try:
                    driver = self.driver.admin_driver(
                        controller=networkSliver.sliver.node.site_deployment.
                        controller,
                        tenant='admin')
                    driver.shell.quantum.update_port(
                        networkSliver.port_id,
                        {"port": {
                            "nat:forward_ports": nat_list
                        }})
                except:
                    logger.log_exc("failed to update port with nat_list %s" %
                                   str(nat_list))
                    continue
            else:
                #logger.info("port %s network %s sliver %s nat %s is already set" % (str(networkSliver.port_id), str(networkSliver.network.id), str(networkSliver.sliver), str(nat_list)))
                pass