예제 #1
0
    def execute_action(cls, plugin, vnf_dict):
        LOG.error(_('vnf %s dead'), vnf_dict['id'])
        if plugin._mark_vnf_dead(vnf_dict['id']):
            plugin._vnf_monitor.mark_dead(vnf_dict['id'])

            attributes = vnf_dict['attributes'].copy()
            attributes['dead_vnf_id'] = vnf_dict['id']
            new_vnf = {'attributes': attributes}
            for key in ('tenant_id', 'vnfd_id', 'name'):
                new_vnf[key] = vnf_dict[key]
            LOG.debug(_('new_vnf %s'), new_vnf)

            # keystone v2.0 specific
            authtoken = CONF.keystone_authtoken
            token = clients.OpenstackClients().auth_token

            context = t_context.get_admin_context()
            context.tenant_name = authtoken.project_name
            context.user_name = authtoken.username
            context.auth_token = token['id']
            context.tenant_id = token['tenant_id']
            context.user_id = token['user_id']
            _log_monitor_events(context, vnf_dict,
                                "ActionRespawnPolicy invoked")
            new_vnf_dict = plugin.create_vnf(context, {'vnf': new_vnf})
            _log_monitor_events(context, new_vnf_dict,
                                "ActionRespawnPolicy complete")
            LOG.info(_('respawned new vnf %s'), new_vnf_dict['id'])
예제 #2
0
 def _get_openstack_clients(self, context, vnf_dict):
     vim_res = self.vim_client.get_vim(context, vnf_dict['vim_id'])
     region_name = vnf_dict.setdefault('placement_attr',
                                       {}).get('region_name', None)
     client = clients.OpenstackClients(auth_attr=vim_res['vim_auth'],
                                       region_name=region_name)
     return client
예제 #3
0
    def execute_action(cls, plugin, device_dict):
        LOG.error(_('device %s dead'), device_dict['id'])
        if plugin._mark_device_dead(device_dict['id']):
            plugin._vnf_monitor.mark_dead(device_dict['id'])

            attributes = device_dict['attributes'].copy()
            attributes['dead_device_id'] = device_dict['id']
            new_device = {'attributes': attributes}
            for key in ('tenant_id', 'template_id', 'name'):
                new_device[key] = device_dict[key]
            LOG.debug(_('new_device %s'), new_device)

            # keystone v2.0 specific
            authtoken = CONF.keystone_authtoken
            token = clients.OpenstackClients().auth_token

            context = t_context.get_admin_context()
            context.tenant_name = authtoken.project_name
            context.user_name = authtoken.username
            context.auth_token = token['id']
            context.tenant_id = token['tenant_id']
            context.user_id = token['user_id']
            new_device_dict = plugin.create_device(context,
                                                   {'device': new_device})
            LOG.info(_('respawned new device %s'), new_device_dict['id'])
예제 #4
0
 def __init__(self, auth_attr, region_name=None):
     # context, password are unused
     self.heat = clients.OpenstackClients(auth_attr, region_name).heat
     self.stacks = self.heat.stacks
     self.resource_types = self.heat.resource_types
     self.resources = self.heat.resources
예제 #5
0
파일: heat.py 프로젝트: YujiAzama/tacker
 def __init__(self, context, password=None):
     # context, password are unused
     self.stacks = clients.OpenstackClients().heat.stacks
예제 #6
0
    def execute_action(cls, plugin, device_dict):
        device_id = device_dict['id']
        LOG.error(_('device %s dead'), device_id)
        if plugin._mark_device_dead(device_dict['id']):
            plugin._vnf_monitor.mark_dead(device_dict['id'])
            attributes = device_dict['attributes']
            config = attributes.get('config')
            LOG.debug(_('device config %s dead'), config)
            failure_count = int(attributes.get('failure_count', '0')) + 1
            failure_count_str = str(failure_count)
            attributes['failure_count'] = failure_count_str
            attributes['dead_instance_id_' +
                       failure_count_str] = device_dict['instance_id']

            new_device_id = device_id + '-RESPAWN-' + failure_count_str
            attributes = device_dict['attributes'].copy()
            attributes['dead_device_id'] = device_id
            new_device = {'id': new_device_id, 'attributes': attributes}
            for key in ('tenant_id', 'template_id', 'name'):
                new_device[key] = device_dict[key]
            LOG.debug(_('new_device %s'), new_device)

            # kill heat stack
            heatclient = heat.HeatClient(None)
            heatclient.delete(device_dict['instance_id'])

            # TODO(sripriya): sleep timer has been provided as a temporary
            # workaround for the nova neutron port still in use issue. Need
            # to come up with a better fix for the issue
            LOG.debug('Sleeping for 10 seconds before initiating respawn')
            time.sleep(10)

            # keystone v2.0 specific
            authtoken = CONF.keystone_authtoken
            token = clients.OpenstackClients().auth_token

            context = t_context.get_admin_context()
            context.tenant_name = authtoken.project_name
            context.user_name = authtoken.username
            context.auth_token = token['id']
            context.tenant_id = token['tenant_id']
            context.user_id = token['user_id']

            new_device_dict = plugin.create_device_sync(
                context, {'device': new_device})
            LOG.info(_('respawned new device %s'), new_device_dict['id'])

            # ungly hack to keep id unchanged
            dead_device_id = device_id + '-DEAD-' + failure_count_str
            LOG.debug(_('%(dead)s %(new)s %(cur)s'), {
                'dead': dead_device_id,
                'new': new_device_id,
                'cur': device_id
            })
            plugin.rename_device_id(context, device_id, dead_device_id)
            plugin.rename_device_id(context, new_device_id, device_id)
            LOG.debug('Delete dead device')
            plugin.delete_device(context, dead_device_id)
            new_device_dict['id'] = device_id
            if config:
                new_device_dict.setdefault('attributes', {})['config'] = config

            plugin.config_device(context, new_device_dict)
            plugin.add_device_to_monitor(new_device_dict)