Exemple #1
0
 def update_vnf_with_alarm(self, vnf, policy_name, policy_dict):
     params = dict()
     params['vnf_id'] = vnf['id']
     params['mon_policy_name'] = policy_name
     driver = policy_dict['triggers']['resize_compute'][
         'event_type']['implementation']
     policy_action = policy_dict['triggers']['resize_compute'].get('action')
     if not policy_action:
         _log_monitor_events(t_context.get_admin_context(),
                             vnf,
                             "Alarm not set: policy action missing")
         return
     alarm_action_name = policy_action['resize_compute'].get('action_name')
     if not alarm_action_name:
         _log_monitor_events(t_context.get_admin_context(),
                             vnf,
                             "Alarm not set: alarm action name missing")
         return
     params['mon_policy_action'] = alarm_action_name
     alarm_url = self.call_alarm_url(driver, vnf, params)
     details = "Alarm URL set successfully: %s" % alarm_url
     _log_monitor_events(t_context.get_admin_context(),
                         vnf,
                         details)
     return alarm_url
Exemple #2
0
 def execute_action(cls, plugin, vnf_dict):
     _log_monitor_events(t_context.get_admin_context(),
                         vnf_dict,
                         "ActionLogAndKill invoked")
     vnf_id = vnf_dict['id']
     if plugin._mark_vnf_dead(vnf_dict['id']):
         if vnf_dict['attributes'].get('monitoring_policy'):
             plugin._vnf_monitor.mark_dead(vnf_dict['id'])
         plugin.delete_vnf(t_context.get_admin_context(), vnf_id)
     LOG.error(_('vnf %s dead'), vnf_id)
Exemple #3
0
        def create_alarm_action(action, action_list, scaling_type):
            params = dict()
            params['vnf_id'] = vnf['id']
            params['mon_policy_name'] = action
            driver = 'ceilometer'

            def _refactor_backend_policy(bk_policy_name, bk_action_name):
                policy = '%(policy_name)s%(action_name)s' % {
                    'policy_name': bk_policy_name,
                    'action_name': bk_action_name}
                return policy

            for index, policy_action_name in enumerate(action_list):
                filters = {'name': policy_action_name}
                bkend_policies = \
                    plugin.get_vnf_policies(context, vnf['id'], filters)
                if bkend_policies:
                    if constants.POLICY_SCALING in str(bkend_policies[0]):
                        action_list[index] = _refactor_backend_policy(
                            policy_action_name, scaling_type)

                # Support multiple action. Ex: respawn % notify
                action_name = '%'.join(action_list)
                params['mon_policy_action'] = action_name
                alarm_url[action] = \
                    self.call_alarm_url(driver, vnf, params)
                details = "Alarm URL set successfully: %s" % alarm_url
                vnfm_utils.log_events(t_context.get_admin_context(), vnf,
                                      constants.RES_EVT_MONITOR,
                                      details)
Exemple #4
0
    def execute_action(cls, plugin, device_dict):
        LOG.error(_('device %s dead'), device_dict['id'])
        if plugin._mark_device_dead(device_dict['id']):
            plugin._vnf_monitor.mark_dead(device_dict['id'])

            attributes = device_dict['attributes'].copy()
            attributes['dead_device_id'] = device_dict['id']
            new_device = {'attributes': attributes}
            for key in ('tenant_id', 'template_id', 'name'):
                new_device[key] = device_dict[key]
            LOG.debug(_('new_device %s'), new_device)

            # keystone v2.0 specific
            authtoken = CONF.keystone_authtoken
            token = clients.OpenstackClients().auth_token

            context = t_context.get_admin_context()
            context.tenant_name = authtoken.project_name
            context.user_name = authtoken.username
            context.auth_token = token['id']
            context.tenant_id = token['tenant_id']
            context.user_id = token['user_id']
            new_device_dict = plugin.create_device(context,
                                                   {'device': new_device})
            LOG.info(_('respawned new device %s'), new_device_dict['id'])
Exemple #5
0
    def on_failure(cls, plugin, device_dict):
        LOG.error(_('device %s dead'), device_dict['id'])
        attributes = device_dict['attributes'].copy()
        attributes['dead_device_id'] = device_dict['id']
        new_device = {'attributes': attributes}
        for key in ('tenant_id', 'template_id', 'name'):
            new_device[key] = device_dict[key]
        LOG.debug(_('new_device %s'), new_device)

        # keystone v2.0 specific
        auth_url = CONF.keystone_authtoken.auth_uri + '/v2.0'
        authtoken = CONF.keystone_authtoken
        kc = ks_client.Client(
            tenant_name=authtoken.project_name,
            username=authtoken.username,
            password=authtoken.password,
            auth_url=auth_url)
        token = kc.service_catalog.get_token()

        context = t_context.get_admin_context()
        context.tenant_name = authtoken.project_name
        context.user_name = authtoken.username
        context.auth_token = token['id']
        context.tenant_id = token['tenant_id']
        context.user_id = token['user_id']
        new_device_dict = plugin.create_device(context, {'device': new_device})
        LOG.info(_('respawned new device %s'), new_device_dict['id'])
 def setUp(self):
     super(TestCommonServicesPlugin, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self.event_db_plugin =\
         common_services_db_plugin.CommonServicesPluginDb()
     self.coreutil_plugin = common_services_plugin.CommonServicesPlugin()
 def test_enforce_admin_only_subattribute(self):
     action = "create_something"
     target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x',
                                             'sub_attr_2': 'y'}}
     result = policy.enforce(context.get_admin_context(),
                             action, target, None)
     self.assertEqual(True, result)
Exemple #8
0
    def execute_action(cls, plugin, vnf_dict):
        LOG.error(_('vnf %s dead'), vnf_dict['id'])
        if plugin._mark_vnf_dead(vnf_dict['id']):
            plugin._vnf_monitor.mark_dead(vnf_dict['id'])

            attributes = vnf_dict['attributes'].copy()
            attributes['dead_vnf_id'] = vnf_dict['id']
            new_vnf = {'attributes': attributes}
            for key in ('tenant_id', 'vnfd_id', 'name'):
                new_vnf[key] = vnf_dict[key]
            LOG.debug(_('new_vnf %s'), new_vnf)

            # keystone v2.0 specific
            authtoken = CONF.keystone_authtoken
            token = clients.OpenstackClients().auth_token

            context = t_context.get_admin_context()
            context.tenant_name = authtoken.project_name
            context.user_name = authtoken.username
            context.auth_token = token['id']
            context.tenant_id = token['tenant_id']
            context.user_id = token['user_id']
            _log_monitor_events(context, vnf_dict,
                                "ActionRespawnPolicy invoked")
            new_vnf_dict = plugin.create_vnf(context,
                                             {'vnf': new_vnf})
            _log_monitor_events(context, new_vnf_dict,
                                "ActionRespawnPolicy complete")
            LOG.info(_('respawned new vnf %s'), new_vnf_dict['id'])
 def setUp(self):
     super(TestNfvoPlugin, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self._mock_driver_manager()
     mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.__run__').start()
     self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
Exemple #10
0
    def update_vnf_with_alarm(self, plugin, context, vnf, policy_dict):
        triggers = policy_dict['triggers']
        alarm_url = dict()
        for trigger_name, trigger_dict in triggers.items():
            params = dict()
            params['vnf_id'] = vnf['id']
            params['mon_policy_name'] = trigger_name
            driver = trigger_dict['event_type']['implementation']
            # TODO(Tung Doan) trigger_dict.get('actions') needs to be used
            policy_action = trigger_dict.get('action')
            if len(policy_action) == 0:
                vnfm_utils.log_events(t_context.get_admin_context(), vnf,
                                      constants.RES_EVT_MONITOR,
                                      "Alarm not set: policy action missing")
                return
            # Other backend policies with the construct (policy, action)
            # ex: (SP1, in), (SP1, out)

            def _refactor_backend_policy(bk_policy_name, bk_action_name):
                policy = '%(policy_name)s-%(action_name)s' % {
                    'policy_name': bk_policy_name,
                    'action_name': bk_action_name}
                return policy

            for index, policy_action_name in enumerate(policy_action):
                filters = {'name': policy_action_name}
                bkend_policies =\
                    plugin.get_vnf_policies(context, vnf['id'], filters)
                if bkend_policies:
                    bkend_policy = bkend_policies[0]
                    if bkend_policy['type'] == constants.POLICY_SCALING:
                        cp = trigger_dict['condition'].\
                            get('comparison_operator')
                        scaling_type = 'out' if cp == 'gt' else 'in'
                        policy_action[index] = _refactor_backend_policy(
                            policy_action_name, scaling_type)

            # Support multiple action. Ex: respawn % notify
            action_name = '%'.join(policy_action)

            params['mon_policy_action'] = action_name
            alarm_url[trigger_name] =\
                self.call_alarm_url(driver, vnf, params)
            details = "Alarm URL set successfully: %s" % alarm_url
            vnfm_utils.log_events(t_context.get_admin_context(), vnf,
                                  constants.RES_EVT_MONITOR, details)
        return alarm_url
Exemple #11
0
 def _init_monitoring(self):
     context = t_context.get_admin_context()
     vnfs = self.get_vnfs(context)
     for vnf in vnfs:
         # Add tenant_id in context object as it is required
         # to get VIM in monitoring.
         context.tenant_id = vnf['tenant_id']
         self.add_vnf_to_monitor(context, vnf)
Exemple #12
0
 def test_tacker_context_admin_to_dict(self):
     self.db_api_session.return_value = 'fakesession'
     ctx = context.get_admin_context()
     ctx_dict = ctx.to_dict()
     self.assertIsNone(ctx_dict['user_id'])
     self.assertIsNone(ctx_dict['tenant_id'])
     self.assertIsNotNone(ctx.session)
     self.assertNotIn('session', ctx_dict)
Exemple #13
0
 def setUp(self):
     super(VNFActionRespawn, self).setUp()
     self.context = context.get_admin_context()
     mock.patch('tacker.db.common_services.common_services_db_plugin.'
                'CommonServicesPluginDb.create_event'
                ).start()
     self._cos_db_plugin =\
         common_services_db_plugin.CommonServicesPluginDb()
    def test_tacker_context_get_admin_context_not_update_local_store(self):
        ctx = context.Context('user_id', 'tenant_id')
        req_id_before = local.store.context.request_id
        self.assertEqual(ctx.request_id, req_id_before)

        ctx_admin = context.get_admin_context()
        self.assertEqual(req_id_before, local.store.context.request_id)
        self.assertNotEqual(req_id_before, ctx_admin.request_id)
Exemple #15
0
 def setUp(self):
     super(TestVNFMPlugin, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self._mock_device_manager()
     self._mock_vnf_monitor()
     self._mock_green_pool()
     self.vnfm_plugin = plugin.VNFMPlugin()
Exemple #16
0
    def test_tacker_context_get_admin_context_not_update_local_store(self):
        ctx = context.Context('user_id', 'tenant_id')
        req_id_before = oslo_context.get_current().request_id
        self.assertEqual(req_id_before, ctx.request_id)

        ctx_admin = context.get_admin_context()
        self.assertEqual(req_id_before,
                         oslo_context.get_current().request_id)
        self.assertNotEqual(req_id_before, ctx_admin.request_id)
Exemple #17
0
 def setUp(self):
     super(TestVNFActionVduAutoheal, self).setUp()
     self.context = context.get_admin_context()
     self._mock_device_manager()
     self._mock_vnf_monitor()
     self._insert_dummy_vim()
     self.vnfm_plugin = plugin.VNFMPlugin()
     self.vdu_autoheal = vdu_autoheal.VNFActionVduAutoheal()
     self.addCleanup(mock.patch.stopall)
Exemple #18
0
 def down_cb(hosting_device_):
     if self._mark_device_dead(device_id):
         self._device_status.mark_dead(device_id)
         device_dict_ = self.get_device(
             t_context.get_admin_context(), device_id)
         failure_cls = monitor.FailurePolicy.get_policy(
             device_dict_['attributes'].get('failure_policy'),
             device_dict_)
         if failure_cls:
             failure_cls.on_failure(self, device_dict_)
Exemple #19
0
 def setUp(self):
     super(TestOpenStack, self).setUp()
     self.context = context.get_admin_context()
     self.infra_driver = openstack.OpenStack()
     self._mock_heat_client()
     mock.patch('tacker.db.common_services.common_services_db_plugin.'
                'CommonServicesPluginDb.create_event'
                ).start()
     self._cos_db_plugin = \
         common_services_db_plugin.CommonServicesPluginDb()
     self.addCleanup(mock.patch.stopall)
 def __init__(self):
     super(NfvoPlugin, self).__init__()
     self._vim_drivers = driver_manager.DriverManager(
         'tacker.nfvo.vim.drivers',
         cfg.CONF.nfvo_vim.vim_drivers)
     self._created_vims = dict()
     context = t_context.get_admin_context()
     vims = self.get_vims(context)
     for vim in vims:
         self._created_vims[vim["id"]] = vim
     self._monitor_interval = cfg.CONF.nfvo_vim.monitor_interval
     threading.Thread(target=self.__run__).start()
 def setUp(self):
     super(TestDbPurgeDelete, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self._mock_config()
     mock.patch('sqlalchemy.Table').start()
     mock.patch('tacker.db.migration.purge_tables._purge_resource_tables'
                ).start()
     mock.patch('tacker.db.migration.purge_tables._purge_events_table',
                ).start()
     mock.patch('tacker.db.migration.purge_tables.'
                '_generate_associated_tables_map').start()
     mock.patch('tacker.db.migration.purge_tables.get_engine').start()
Exemple #22
0
 def setUp(self):
     super(TestNfvoPlugin, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self._mock_driver_manager()
     mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin.__run__').start()
     mock.patch('tacker.nfvo.nfvo_plugin.NfvoPlugin._get_vim_from_vnf',
                side_effect=dummy_get_vim).start()
     self.nfvo_plugin = nfvo_plugin.NfvoPlugin()
     mock.patch('tacker.db.common_services.common_services_db.'
                'CommonServicesPluginDb.create_event'
                ).start()
     self._cos_db_plugin = common_services_db.CommonServicesPluginDb()
Exemple #23
0
    def execute_action(cls, plugin, device_dict, auth_attr):
        device_id = device_dict['id']
        LOG.error(_('device %s dead'), device_id)
        if plugin._mark_device_dead(device_dict['id']):
            plugin._vnf_monitor.mark_dead(device_dict['id'])
            attributes = device_dict['attributes']
            config = attributes.get('config')
            LOG.debug(_('device config %s dead'), config)
            failure_count = int(attributes.get('failure_count', '0')) + 1
            failure_count_str = str(failure_count)
            attributes['failure_count'] = failure_count_str
            attributes['dead_instance_id_' + failure_count_str] = device_dict[
                'instance_id']

            new_device_id = device_id + '-RESPAWN-' + failure_count_str
            attributes = device_dict['attributes'].copy()
            attributes['dead_device_id'] = device_id
            new_device = {'id': new_device_id, 'attributes': attributes}
            for key in ('tenant_id', 'template_id', 'name', 'vim_id',
                        'placement_attr'):
                new_device[key] = device_dict[key]
            LOG.debug(_('new_device %s'), new_device)
            placement_attr = device_dict.get('placement_attr', {})
            region_name = placement_attr.get('region_name', None)
            # kill heat stack
            heatclient = heat.HeatClient(auth_attr=auth_attr,
                                         region_name=region_name)
            heatclient.delete(device_dict['instance_id'])

            # TODO(anyone) set the current request ctxt instead of admin ctxt
            context = t_context.get_admin_context()
            new_device_dict = plugin.create_device_sync(
                context, {'device': new_device})
            LOG.info(_('respawned new device %s'), new_device_dict['id'])

            # ungly hack to keep id unchanged
            dead_device_id = device_id + '-DEAD-' + failure_count_str
            LOG.debug(_('%(dead)s %(new)s %(cur)s'),
                      {'dead': dead_device_id,
                       'new': new_device_id,
                       'cur': device_id})
            plugin.rename_device_id(context, device_id, dead_device_id)
            plugin.rename_device_id(context, new_device_id, device_id)
            LOG.debug('Delete dead device')
            plugin.delete_device(context, dead_device_id)
            new_device_dict['id'] = device_id
            if config:
                new_device_dict.setdefault('attributes', {})['config'] = config

            plugin.config_device(context, new_device_dict)
            plugin.add_device_to_monitor(new_device_dict, auth_attr)
Exemple #24
0
    def add_hosting_vnf(self, new_vnf):
        LOG.debug('Adding host %(id)s, Mgmt IP %(ips)s',
                  {'id': new_vnf['id'],
                   'ips': new_vnf['management_ip_addresses']})
        new_vnf['boot_at'] = timeutils.utcnow()
        with self._lock:
            self._hosting_vnfs[new_vnf['id']] = new_vnf

        attrib_dict = new_vnf['vnf']['attributes']
        mon_policy_dict = attrib_dict['monitoring_policy']
        evt_details = (("VNF added for monitoring. "
                        "mon_policy_dict = %s,") % (mon_policy_dict))
        _log_monitor_events(t_context.get_admin_context(), new_vnf['vnf'],
                            evt_details)
 def monitor_vim(self, vim_obj):
     vim_id = vim_obj["id"]
     auth_url = vim_obj["auth_url"]
     vim_status = self._vim_drivers.invoke(vim_obj['type'],
                                           'vim_status',
                                           auth_url=auth_url)
     current_status = "REACHABLE" if vim_status else "UNREACHABLE"
     if current_status != vim_obj["status"]:
         status = current_status
         with self._lock:
             super(NfvoPlugin, self).update_vim_status(
                 t_context.get_admin_context(),
                 vim_id, status)
             self._created_vims[vim_id]["status"] = status
Exemple #26
0
    def execute_action(cls, plugin, vnf_dict):
        vnf_id = vnf_dict['id']
        LOG.info(_('vnf %s is dead and needs to be respawned'), vnf_id)
        attributes = vnf_dict['attributes']
        vim_id = vnf_dict['vim_id']
        # TODO(anyone) set the current request ctxt
        context = t_context.get_admin_context()

        def _update_failure_count():
            failure_count = int(attributes.get('failure_count', '0')) + 1
            failure_count_str = str(failure_count)
            LOG.debug(_("vnf %(vnf_id)s failure count %(failure_count)s") %
                      {'vnf_id': vnf_id, 'failure_count': failure_count_str})
            attributes['failure_count'] = failure_count_str
            attributes['dead_instance_id_' + failure_count_str] = vnf_dict[
                'instance_id']

        def _fetch_vim(vim_uuid):
            return vim_client.VimClient().get_vim(context, vim_uuid)

        def _delete_heat_stack(vim_auth):
            placement_attr = vnf_dict.get('placement_attr', {})
            region_name = placement_attr.get('region_name')
            heatclient = openstack.HeatClient(auth_attr=vim_auth,
                                              region_name=region_name)
            heatclient.delete(vnf_dict['instance_id'])
            LOG.debug(_("Heat stack %s delete initiated"), vnf_dict[
                'instance_id'])
            _log_monitor_events(context, vnf_dict, "ActionRespawnHeat invoked")

        def _respin_vnf():
            update_vnf_dict = plugin.create_vnf_sync(context, vnf_dict)
            LOG.info(_('respawned new vnf %s'), update_vnf_dict['id'])
            plugin.config_vnf(context, update_vnf_dict)
            return update_vnf_dict

        if plugin._mark_vnf_dead(vnf_dict['id']):
            _update_failure_count()
            vim_res = _fetch_vim(vim_id)
            if vnf_dict['attributes'].get('monitoring_policy'):
                plugin._vnf_monitor.mark_dead(vnf_dict['id'])
                _delete_heat_stack(vim_res['vim_auth'])
                updated_vnf = _respin_vnf()
                plugin.add_vnf_to_monitor(updated_vnf, vim_res['vim_type'])
                LOG.debug(_("VNF %s added to monitor thread"), updated_vnf[
                    'id'])
            if vnf_dict['attributes'].get('alarm_url'):
                _delete_heat_stack(vim_res['vim_auth'])
                vnf_dict['attributes'].pop('alarm_url')
                _respin_vnf()
Exemple #27
0
    def _mark_device_status(self, device_id, exclude_status, new_status):
        context = t_context.get_admin_context()
        with context.session.begin(subtransactions=True):
            try:
                device_db = (
                    self._model_query(context, Device).
                    filter(Device.id == device_id).
                    filter(~Device.status.in_(exclude_status)).
                    with_lockmode('update').one())
            except orm_exc.NoResultFound:
                LOG.warning(_('no device found %s'), device_id)
                return False

            device_db.update({'status': new_status})
        return True
Exemple #28
0
    def update_hosting_vnf(self, updated_vnf_dict, evt_details=None):
        with self._lock:
            vnf_to_update = VNFMonitor._hosting_vnfs.get(
                updated_vnf_dict.get('id'))
            if vnf_to_update:
                updated_vnf = copy.deepcopy(updated_vnf_dict)
                vnf_to_update['vnf'] = updated_vnf
                vnf_to_update['mgmt_ip_addresses'] = jsonutils.loads(
                    updated_vnf_dict['mgmt_ip_address'])

                if evt_details is not None:
                    vnfm_utils.log_events(t_context.get_admin_context(),
                                          vnf_to_update['vnf'],
                                          constants.RES_EVT_HEAL,
                                          evt_details=evt_details)
 def setUp(self):
     super(TestVNFMPlugin, self).setUp()
     self.addCleanup(mock.patch.stopall)
     self.context = context.get_admin_context()
     self._mock_vim_client()
     self._stub_get_vim()
     self._mock_device_manager()
     self._mock_vnf_monitor()
     self._mock_vnf_alarm_monitor()
     self._mock_green_pool()
     self._insert_dummy_vim()
     self.vnfm_plugin = plugin.VNFMPlugin()
     mock.patch('tacker.db.common_services.common_services_db.'
                'CommonServicesPluginDb.create_event'
                ).start()
     self._cos_db_plugin = common_services_db.CommonServicesPluginDb()
Exemple #30
0
    def setUp(self):
        super(TestVNFMPlugin, self).setUp()
        self.addCleanup(mock.patch.stopall)
        self.context = context.get_admin_context()
        self._mock_vim_client()
        self._stub_get_vim()
        self._mock_vnf_monitor()
        self._mock_vnf_alarm_monitor()
        self._mock_vnf_reservation_monitor()
        self._insert_dummy_vim()
        self.vnfm_plugin = plugin.VNFMPlugin()
        mock.patch('tacker.db.common_services.common_services_db_plugin.'
                   'CommonServicesPluginDb.create_event'
                   ).start()
        mock.patch('tacker.db.vnfm.vnfm_db.VNFMPluginDb._mgmt_driver_name',
                   return_value='noop').start()
        self.create = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                 'openstack.OpenStack.create',
                        return_value=uuidutils.generate_uuid()).start()
        self.create_wait = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                  'openstack.OpenStack.create_wait').start()
        self.update = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                 'openstack.OpenStack.update').start()
        self.update_wait = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                    'openstack.OpenStack.update_wait').start()
        self.delete = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                 'openstack.OpenStack.delete').start()
        self.delete_wait = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                      'openstack.OpenStack.'
                                      'delete_wait').start()
        self.scale = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                'openstack.OpenStack.scale',
                                return_value=uuidutils.generate_uuid()).start()
        self.scale_wait = mock.patch('tacker.vnfm.infra_drivers.openstack.'
                                'openstack.OpenStack.scale_wait',
                                return_value=uuidutils.generate_uuid()).start()

        def _fake_spawn(func, *args, **kwargs):
            func(*args, **kwargs)

        mock.patch.object(self.vnfm_plugin, 'spawn_n',
                          _fake_spawn).start()
        self._cos_db_plugin =\
            common_services_db_plugin.CommonServicesPluginDb()
Exemple #31
0
 def setUp(self):
     super(TestVnfResource, self).setUp()
     self.context = context.get_admin_context()
Exemple #32
0
 def execute_action(cls, plugin, vnf_dict):
     vnf_id = vnf_dict['id']
     if plugin._mark_vnf_dead(vnf_dict['id']):
         plugin._vnf_monitor.mark_dead(vnf_dict['id'])
         plugin.delete_vnf(t_context.get_admin_context(), vnf_id)
     LOG.error(_('vnf %s dead'), vnf_id)
Exemple #33
0
 def context(self):
     if 'tacker.context' not in self.environ:
         self.environ['tacker.context'] = context.get_admin_context()
     return self.environ['tacker.context']
    def test_generate_hot_from_tosca(self):
        tosca_file = './data/etsi_nfv/' \
            'tosca_generate_hot_from_tosca.yaml'
        hot_file = './data/etsi_nfv/hot/' \
            'hot_generate_hot_from_tosca.yaml'
        vnfd_dict = self._load_yaml(tosca_file, update_import=True)

        # Input params
        dev_attrs = {}

        data = [{
            "id": 'VL1',
            "resource_id": 'neutron-network-uuid_VL1',
                "ext_cps": [{
                    "cpd_id": "CP1",
                    "cp_config": [{
                        "cp_protocol_data": [{
                            "layer_protocol": "IP_OVER_ETHERNET",
                            "ip_over_ethernet": {
                                "mac_address": 'fa:16:3e:11:11:11',
                                "ip_addresses": [{
                                    'type': 'IPV4',
                                    'fixed_addresses': ['1.1.1.1'],
                                    'subnet_id': 'neutron-subnet-uuid_CP1'}]}
                        }]
                    }]}]},
               {
                "id": 'VL2',
                "resource_id": 'neutron-network-uuid_VL2',
                "ext_cps": [{
                    "cpd_id": 'CP2',
                    "cp_config": [{
                        "link_port_id": uuidsentinel.link_port_id,
                        "cp_protocol_data": [{
                            "layer_protocol": "IP_OVER_ETHERNET"}]}]
                }],
                "ext_link_ports": [{
                    "id": uuidsentinel.link_port_id,
                    "resource_handle": {
                        "resource_id": 'neutron-port-uuid_CP2'}
                }]}]

        ext_mg_vl = [{'id': 'VL3', 'vnf_virtual_link_desc_id': 'VL3',
                      'resource_id': 'neutron-network-uuid_VL3'}]
        request = {'ext_managed_virtual_links': ext_mg_vl,
                   'ext_virtual_links': data, 'flavour_id': 'simple'}
        ctxt = context.get_admin_context()
        inst_req_info = objects.InstantiateVnfRequest.obj_from_primitive(
            request, ctxt)

        # image and info
        grant_info = {
            'VDU1': [objects.VnfResource(id=uuidsentinel.id,
                    vnf_instance_id=uuidsentinel.vnf_instance_id,
                    resource_type='image',
                    resource_identifier='glance-image-uuid_VDU1')]}

        self.tth._generate_hot_from_tosca(vnfd_dict, dev_attrs,
                                     inst_req_info, grant_info)

        expected_hot_tpl = self._load_yaml(hot_file)
        actual_hot_tpl = yaml.safe_load(self.tth.heat_template_yaml)
        self.assertEqual(expected_hot_tpl, actual_hot_tpl)
Exemple #35
0
 def _test_enforce_adminonly_attribute(self, action):
     admin_context = context.get_admin_context()
     target = {'shared': True}
     result = policy.enforce(admin_context, action, target)
     self.assertEqual(result, True)