def test_get_os_admin_context(self, password_plugin):
        imp.reload(ec2_context)
        # NOTE(ft): initialize a regular context to populate oslo_context's
        # local storage to prevent admin context to populate it.
        # Used to implicitly validate overwrite=False argument of the call
        # RequestContext constructor from inside get_os_admin_context
        if not context.get_current():
            ec2_context.RequestContext(None, None)

        ctx = ec2_context.get_os_admin_context()
        conf = cfg.CONF
        password_plugin.assert_called_once_with(
            username=conf.admin_user,
            password=conf.admin_password,
            tenant_name=conf.admin_tenant_name,
            project_name=conf.admin_tenant_name,
            auth_url=conf.keystone_url)
        self.assertIsNone(ctx.user_id)
        self.assertIsNone(ctx.project_id)
        self.assertIsNone(ctx.auth_token)
        self.assertEqual([], ctx.service_catalog)
        self.assertTrue(ctx.is_os_admin)
        self.assertIsNotNone(ctx.session)
        self.assertIsNotNone(ctx.session.auth)
        self.assertNotEqual(context.get_current(), ctx)

        password_plugin.reset_mock()
        ec2_context.get_os_admin_context()
        self.assertFalse(password_plugin.called)
 def callback(future):
     if not future.exception():
         self._table_info_repo.delete(
             tenant, table_info.name
         )
         self._notifier.info(
             req_context.get_current(),
             notifier.EVENT_TYPE_TABLE_DELETE,
             dict(
                 tenant=tenant,
                 table_name=table_info.name,
                 table_uuid=str(table_info.id),
                 value=start_time
             ))
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_DELETE_FAILED
         self._table_info_repo.update(
             tenant, table_info, ["status"]
         )
         self._notifier.error(
             req_context.get_current(),
             notifier.EVENT_TYPE_TABLE_DELETE_ERROR,
             dict(
                 message=future.exception(),
                 tenant=tenant,
                 table_name=table_info.name,
                 table_uuid=str(table_info.id),
                 value=start_time
             ))
Exemple #3
0
    def detach(self, cluster):
        """Routine to be called when the policy is detached from a cluster.

        :param cluster: The cluster from which the policy is to be detached.
        :returns: When the operation was successful, returns a tuple of
            (True, data) where the data contains references to the resources
            created; otherwise returns a tuple of (False, err) where the err
            contains a error message.
        """
        reason = _('LB resources deletion succeeded.')
        params = self._build_conn_params(cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)

        cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(),
                                               cluster.id, self.id)

        policy_data = self._extract_policy_data(cp.data)
        if policy_data is None:
            return True, reason

        res, reason = lb_driver.lb_delete(**policy_data)
        if res is False:
            return False, reason

        nodes = node_mod.Node.load_all(oslo_context.get_current(),
                                       cluster_id=cluster.id)
        for node in nodes:
            if 'lb_member' in node.data:
                node.data.pop('lb_member')
                node.store(oslo_context.get_current())

        return True, reason
 def callback(future):
     if not future.exception():
         table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
         table_info.internal_name = future.result()
         self._table_info_repo.update(
             tenant, table_info, ["status", "internal_name"]
         )
         self._notifier.info(
             req_context.get_current(),
             notifier.EVENT_TYPE_TABLE_CREATE,
             dict(
                 tenant=tenant,
                 table_name=table_info.name,
                 table_uuid=str(table_info.id),
                 schema=table_info.schema,
                 value=start_time
             ))
     else:
         table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
         self._table_info_repo.update(tenant, table_info, ["status"])
         self._notifier.error(
             req_context.get_current(),
             notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
             dict(
                 tenant=tenant,
                 table_name=table_info.name,
                 table_uuid=str(table_info.id),
                 message=future.exception(),
                 value=start_time
             ))
Exemple #5
0
    def test_get_os_admin_context(self, session, auth):
        conf = config_fixture.Config()
        clients._admin_session = None
        conf.config(auth_type='fake', group=GROUP_AUTHTOKEN)

        imp.reload(ec2_context)
        # NOTE(ft): initialize a regular context to populate oslo_context's
        # local storage to prevent admin context to populate it.
        # Used to implicitly validate overwrite=False argument of the call
        # RequestContext constructor from inside get_os_admin_context
        if not context.get_current():
            ec2_context.RequestContext(None, None)

        ctx = ec2_context.get_os_admin_context()
        conf = cfg.CONF
        auth.assert_called_once_with(conf, GROUP_AUTHTOKEN)
        auth_plugin = auth.return_value
        session.assert_called_once_with(conf, GROUP_AUTHTOKEN,
                                        auth=auth_plugin)
        self.assertIsNone(ctx.user_id)
        self.assertIsNone(ctx.project_id)
        self.assertIsNone(ctx.auth_token)
        self.assertEqual([], ctx.service_catalog)
        self.assertTrue(ctx.is_os_admin)
        self.assertIsNotNone(ctx.session)
        self.assertIsNotNone(ctx.session.auth)
        self.assertNotEqual(context.get_current(), ctx)

        session.reset_mock()
        ec2_context.get_os_admin_context()
        self.assertFalse(session.called)
Exemple #6
0
    def _do_create_table(self, tenant, table_info):
        start_time = time.time()
        try:
            table_info.internal_name = self._storage_driver.create_table(
                tenant, table_info
            )
        except exception.BackendInteractionError as ex:
            table_info.status = models.TableMeta.TABLE_STATUS_CREATE_FAILED
            self._table_info_repo.update(tenant, table_info, ["status"])

            self._notifier.error(
                req_context.get_current(),
                notifier.EVENT_TYPE_TABLE_CREATE_ERROR,
                dict(
                    tenant=tenant,
                    table_name=table_info.name,
                    message=ex.message,
                    value=start_time
                ))
            raise

        table_info.status = models.TableMeta.TABLE_STATUS_ACTIVE
        self._table_info_repo.update(
            tenant, table_info, ["status", "internal_name"]
        )

        self._notifier.audit(
            req_context.get_current(),
            notifier.EVENT_TYPE_TABLE_CREATE,
            dict(
                tenant=tenant,
                table_name=table_info.name,
                schema=table_info.schema,
                value=start_time
            ))
Exemple #7
0
    def test_neutron_context_get_admin_context_not_update_local_store(self):
        ctx = context.Context("user_id", "tenant_id")
        req_id_before = oslo_context.get_current().request_id
        self.assertEqual(ctx.request_id, req_id_before)

        ctx_admin = context.get_admin_context()
        self.assertEqual(req_id_before, oslo_context.get_current().request_id)
        self.assertNotEqual(req_id_before, ctx_admin.request_id)
Exemple #8
0
    def test_tacker_context_get_admin_context_not_update_local_store(self):
        ctx = context.Context('user_id', 'tenant_id')
        req_id_before = oslo_context.get_current().request_id
        self.assertEqual(req_id_before, ctx.request_id)

        ctx_admin = context.get_admin_context()
        self.assertEqual(req_id_before,
                         oslo_context.get_current().request_id)
        self.assertNotEqual(req_id_before, ctx_admin.request_id)
Exemple #9
0
    def test_x_project_id_header(self):
        tenant = '012345'
        self.state.request.headers['X-Project-ID'] = tenant

        self.hook.before(self.state)
        self.assertEqual(tenant, context.get_current().tenant)
        self.assertEqual(
            self.state.request.host_url + '/v1.0',
            context.get_current().base_url
        )
Exemple #10
0
    def member_add(self, node, lb_id, pool_id, port, subnet):
        """Add a member to Neutron lbaas pool.

        :param node: A node object to be added to the specified pool.
        :param lb_id: The ID of the loadbalancer.
        :param pool_id: The ID of the pool for receiving the node.
        :param port: The port for the new LB member to be created.
        :param subnet: The subnet to be used by the new LB member.
        :returns: The ID of the new LB member or None if errors occurred.
        """
        addresses = self._get_node_address(node, version=4)
        if not addresses:
            LOG.error(_LE('Node (%(n)s) does not have valid IPv4 address.'),
                      {'n': node.id})
            return None

        try:
            subnet_obj = self.nc().subnet_get(subnet)
            net_id = subnet_obj.network_id
            net = self.nc().network_get(net_id)
        except exception.InternalError as ex:
            resource = 'subnet' if subnet in ex.message else 'network'
            msg = _LE('Failed in getting %(resource)s: %(msg)s.'
                      ) % {'resource': resource, 'msg': six.text_type(ex)}
            LOG.exception(msg)
            event.warning(oslo_context.get_current(), self,
                          resource.upper()+'_GET', 'ERROR', msg)
            return None
        net_name = net.name

        if net_name not in addresses:
            LOG.error(_LE('Node is not in subnet %(subnet)s'),
                      {'subnet': subnet})
            return None

        address = addresses[net_name]
        try:
            member = self.nc().pool_member_create(pool_id, address, port,
                                                  subnet_obj.id)
        except exception.InternalError as ex:
            msg = _LE('Failed in creating lb pool member: %s.'
                      ) % six.text_type(ex)
            LOG.exception(msg)
            event.warning(oslo_context.get_current(), self,
                          'POOL_MEMBER_CREATE', 'ERROR', msg)
            return None
        res = self._wait_for_lb_ready(lb_id)
        if res is False:
            LOG.error(_LE('Failed in creating pool member (%s).') % member.id)
            return None

        return member.id
Exemple #11
0
    def test_neutron_context_overwrite(self):
        ctx1 = context.Context("user_id", "tenant_id")
        self.assertEqual(ctx1.request_id, oslo_context.get_current().request_id)

        # If overwrite is not specified, request_id should be updated.
        ctx2 = context.Context("user_id", "tenant_id")
        self.assertNotEqual(ctx2.request_id, ctx1.request_id)
        self.assertEqual(ctx2.request_id, oslo_context.get_current().request_id)

        # If overwrite is specified, request_id should be kept.
        ctx3 = context.Context("user_id", "tenant_id", overwrite=False)
        self.assertNotEqual(ctx3.request_id, ctx2.request_id)
        self.assertEqual(ctx2.request_id, oslo_context.get_current().request_id)
Exemple #12
0
    def test_context_in_local_store(self):
        """Mocks Oslo local store to ensure the context is stored.

        This test exists to ensure we continue to populate a context in the
        Oslo local store, thus allowing Oslo log to pick it up.
        """
        tenant = '012345'
        self.state.request.headers['X-Project-ID'] = tenant

        self.hook.before(self.state)
        self.assertIsNotNone(context.get_current())
        self.assertIsInstance(
            context.get_current(), hooks.context.PoppyRequestContext
        )
Exemple #13
0
    def test_spawn_n_no_context(self):
        self.assertIsNone(common_context.get_current())

        def _fake_spawn(func, *args, **kwargs):
            # call the method to ensure no error is raised
            func(*args, **kwargs)
            self.assertEqual('test', args[0])

        def fake(arg):
            pass

        with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
            getattr(utils, self.spawn_name)(fake, 'test')
        self.assertIsNone(common_context.get_current())
Exemple #14
0
    def test_spawn_n_no_context(self):
        self.assertIsNone(common_context.get_current())

        def _fake_spawn(func, *args, **kwargs):
            # call the method to ensure no error is raised
            func(*args, **kwargs)
            self.assertEqual("test", args[0])

        def fake(arg):
            pass

        with mock.patch.object(eventlet, "spawn_n", _fake_spawn):
            utils.spawn_n(fake, "test")
        self.assertIsNone(common_context.get_current())
Exemple #15
0
    def test_tenant_id_url_with_header_and_injection(self):
        self.mock_cfg.project_id_in_url = True

        header_tenant = '012345'
        url_tenant = '567890'
        self.state.request.headers['X-Project-ID'] = header_tenant
        self.state.request.path = '/v1.0/' + url_tenant + '/services'

        self.hook.before(self.state)

        self.assertEqual(header_tenant, context.get_current().tenant)
        self.assertEqual(
            self.state.request.host_url + '/v1.0/' + header_tenant,
            context.get_current().base_url
        )
    def test_store_current_resets_correctly(self):
        # By default a new context is stored.
        ctx = context.RequestContext()

        # the use of the fixture should put us in a reset state, not
        # doing so is a bug because when this fixture is consumed by
        # other test suites there is no guaruntee that all tests use
        # this fixture.
        self.useFixture(fixture.ClearRequestContext())
        self.assertIsNone(context.get_current())

        ctx = context.RequestContext()
        self.assertIs(context.get_current(), ctx)
        fixture.ClearRequestContext()._remove_cached_context()
        self.assertIsNone(context.get_current())
Exemple #17
0
def delete_item(tenant, table_name, key_attribute_map,
                expected_condition_map=None):
    """
    :param tenant: tenant for table for deleting item from
    :param table_name: name of the table
    :param key_attribute_map: attribute name to AttributeValue instance map,
                which represents key to identify item
                to delete
    :param expected_condition_map: expected attribute name to
                ExpectedCondition instance mapping. It provides
                preconditions to make decision about should item be deleted
                or not

    :returns: True if operation performed, otherwise False (if operation was
                skipped by out of date timestamp, it is considered as
                successfully performed)

    :raises: BackendInteractionException
    """
    req_context.get_current().request_args = dict(
        tenant=tenant, table_name=table_name,
        key_attribute_map=key_attribute_map,
        expected_condition_map=expected_condition_map
    )
    return __STORAGE_MANAGER_IMPL.delete_item(
        tenant, table_name, key_attribute_map, expected_condition_map
    )
Exemple #18
0
def update_item(tenant, table_name, key_attribute_map,
                attribute_action_map, expected_condition_map=None):
    """
    :param tenant: tenant for table where item will be updated
    :param table_name: String, name of table to delete item from
    :param key_attribute_map: key attribute name to
                AttributeValue mapping. It defines row it to update item
    :param attribute_action_map: attribute name to UpdateItemAction
                instance mapping. It defines actions to perform for each
                given attribute
    :param expected_condition_map: expected attribute name to
                ExpectedCondition instance mapping. It provides
                preconditions
                to make decision about should item be updated or not
    :returns: True if operation performed, otherwise False

    :raises: BackendInteractionException
    """
    req_context.get_current().request_args = dict(
        tenant=tenant, table_name=table_name,
        key_attribute_map=key_attribute_map,
        attribute_action_map=attribute_action_map,
        expected_condition_map=expected_condition_map
    )
    return __STORAGE_MANAGER_IMPL.update_item(
        tenant, table_name, key_attribute_map, attribute_action_map,
        expected_condition_map
    )
Exemple #19
0
def scan(tenant, table_name, condition_map, attributes_to_get=None,
         limit=None, exclusive_start_key=None,
         consistent=False):
    """
    :param tenant: tenant for table for scanning items from
    :param table_name: String, name of table to get item from
    :param condition_map: attribute name to
                IndexedCondition instance mapping. It defines rows
                set to be selected
    :param attributes_to_get: list of attribute names to be included in
                result. If None, all attributes will be included
    :param limit: maximum count of returned values
    :param exclusive_start_key: key attribute names to AttributeValue
                instance
    :param consistent: define is operation consistent or not (by default it
                is not consistent)

    :returns: list of attribute name to AttributeValue mappings

    :raises: BackendInteractionException
    """
    req_context.get_current().request_args = dict(
        tenant=tenant, table_name=table_name, condition_map=condition_map,
        attributes_to_get=attributes_to_get, limit=limit,
        exclusive_start_key=exclusive_start_key, consistent=consistent,
    )
    return __STORAGE_MANAGER_IMPL.scan(
        tenant, table_name, condition_map, attributes_to_get, limit,
        exclusive_start_key, consistent=False
    )
Exemple #20
0
 def test_store_when_no_overwrite(self):
     # If no context exists we store one even if overwrite is false
     # (since we are not overwriting anything).
     ctx = context.RequestContext('111',
                                   '222',
                                   overwrite=False)
     self.assertIs(o_context.get_current(), ctx)
Exemple #21
0
    def test_spawn_n_context(self):
        self.assertIsNone(common_context.get_current())
        ctxt = context.RequestContext('user', 'project')

        def _fake_spawn(func, *args, **kwargs):
            # call the method to ensure no error is raised
            func(*args, **kwargs)
            self.assertEqual(ctxt, args[0])
            self.assertEqual('test', kwargs['kwarg1'])

        def fake(context, kwarg1=None):
            pass

        with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
            getattr(utils, self.spawn_name)(fake, ctxt, kwarg1='test')
        self.assertEqual(ctxt, common_context.get_current())
Exemple #22
0
    def setUp(self):
        super(BaseTestCase, self).setUp()

        self.useFixture(fixtures.NestedTempfile())
        self.useFixture(fixtures.TempHomeDir())

        self.useFixture(fixtures.MockPatchObject(sys, 'exit',
                                                 side_effect=UnexpectedExit))
        self.useFixture(log_fixture.get_logging_handle_error_fixture())

        warnings.filterwarnings('error', category=DeprecationWarning,
                                module='^keystone\\.')
        warnings.simplefilter('error', exc.SAWarning)
        self.addCleanup(warnings.resetwarnings)
        # Ensure we have an empty threadlocal context at the start of each
        # test.
        self.assertIsNone(oslo_context.get_current())
        self.useFixture(oslo_ctx_fixture.ClearRequestContext())

        orig_debug_level = ldap.get_option(ldap.OPT_DEBUG_LEVEL)
        self.addCleanup(ldap.set_option, ldap.OPT_DEBUG_LEVEL,
                        orig_debug_level)
        orig_tls_cacertfile = ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)
        self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTFILE,
                        orig_tls_cacertfile)
        orig_tls_cacertdir = ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)
        # Setting orig_tls_cacertdir to None is not allowed.
        if orig_tls_cacertdir is None:
            orig_tls_cacertdir = ''
        self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTDIR,
                        orig_tls_cacertdir)
        orig_tls_require_cert = ldap.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)
        self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_REQUIRE_CERT,
                        orig_tls_require_cert)
        self.addCleanup(ks_ldap.PooledLDAPHandler.connection_pools.clear)
 def test_admin_no_overwrite(self):
     # If there is already a context in the cache creating an admin
     # context will not overwrite it.
     ctx1 = context.RequestContext(overwrite=True)
     context.get_admin_context()
     self.assertIs(context.get_current(), ctx1)
     self.assertFalse(ctx1.is_admin)
Exemple #24
0
    def process_action(self, service_name, api_version, action_name,
                       action_params):
        service_capabilities = self.capabilities.get(service_name, None)

        if service_capabilities is None:
            raise amz_exception.AWSBadRequestException(
                "Service '%s' isn't supported" % service_name)

        target_capabilities = service_capabilities.get(api_version, None)

        if target_capabilities is None:
            raise (
                amz_exception.AWSBadRequestException(
                    "Service '%s' doesn't support API version '%s'" %
                    (service_name, api_version))
            )

        action = target_capabilities.get(action_name, None)

        if action is None:
            raise (
                amz_exception.AWSValidationException(
                    "Service '%s', API version '%s' "
                    "doesn't support action '%s'" %
                    (service_name, api_version, action_name))
            )

        context = req_context.get_current()

        context.request_type = action_name

        return action.perform(context.tenant, action_params)
Exemple #25
0
    def test_spawn_n_context(self):
        self.assertIsNone(common_context.get_current())
        ctxt = context.RequestContext("user", "project")

        def _fake_spawn(func, *args, **kwargs):
            # call the method to ensure no error is raised
            func(*args, **kwargs)
            self.assertEqual(ctxt, args[0])
            self.assertEqual("test", kwargs["kwarg1"])

        def fake(context, kwarg1=None):
            pass

        with mock.patch.object(eventlet, "spawn_n", _fake_spawn):
            utils.spawn_n(fake, ctxt, kwarg1="test")
        self.assertEqual(ctxt, common_context.get_current())
Exemple #26
0
def notify(method, engine_id=None, **kwargs):
    '''Send notification to dispatcher

    :param method: remote method to call
    :param engine_id: dispatcher to notify; None implies broadcast
    '''

    client = rpc_messaging.get_rpc_client(version=consts.RPC_API_VERSION)

    if engine_id:
        # Notify specific dispatcher identified by engine_id
        call_context = client.prepare(
            version=consts.RPC_API_VERSION,
            topic=consts.ENGINE_DISPATCHER_TOPIC,
            server=engine_id)
    else:
        # Broadcast to all disptachers
        call_context = client.prepare(
            version=consts.RPC_API_VERSION,
            topic=consts.ENGINE_DISPATCHER_TOPIC)

    try:
        # We don't use ctext parameter in action progress
        # actually. But since RPCClient.call needs this param,
        # we use oslo current context here.
        call_context.call(oslo_context.get_current(), method, **kwargs)
        return True
    except oslo_messaging.MessagingTimeout:
        return False
Exemple #27
0
def inject_headers():
    ctx = context_utils.get_current()
    if ctx:
        ctx_dict = ctx.to_dict()
        return {'X-NSX-EUSER': ctx_dict.get('user_identity'),
                'X-NSX-EREQID': ctx_dict.get('request_id')}
    return {}
    def detach(self, cluster):
        """Routine to be called when the policy is detached from a cluster.

        :param cluster: The cluster from which the policy is to be detached.
        :returns: When the operation was successful, returns a tuple of
                  (True, data) where the data contains references to the
                  resources created; otherwise returns a tuple of (False,
                  error) where the err contains a error message.
        """

        reason = _('Server group resources deletion succeeded')

        cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(),
                                               cluster.id, self.id)
        if cp is None or cp.data is None:
            return True, reason

        policy_data = self._extract_policy_data(cp.data)
        if policy_data is None:
            return True, reason

        group_id = policy_data.get('group_id', None)
        inherited_group = policy_data.get('inherited_group', False)

        if group_id and not inherited_group:
            try:
                # to add into nova driver
                self.nova(cluster).delete_server_group(group_id)
            except exception.InternalError as ex:
                msg = 'Failed in deleting server_group'
                LOG.exception(_LE('%(msg)s: %(ex)s') % {
                    'msg': msg, 'ex': six.text_type(ex)})
                return False, msg

        return True, reason
Exemple #29
0
    def _action_per_service_obj(self, project_id, action, service_obj):

        kwargs = {
            'project_id': project_id,
            'service_obj': json.dumps(service_obj.to_dict()),
            'time_seconds': self.determine_sleep_times(),
            'context_dict': context_utils.get_current().to_dict()
        }

        try:
            if action == 'delete':
                LOG.info('Deleting  service: %s, project_id: %s' % (
                    service_obj.service_id, project_id))
                self.delete(project_id, service_obj.service_id)
            elif action == 'enable':
                LOG.info('Enabling  service: %s, project_id: %s' % (
                    service_obj.service_id, project_id))
                kwargs['state'] = 'enabled'
                self.distributed_task_controller.submit_task(
                    update_service_state.enable_service, **kwargs)
            elif action == 'disable':
                LOG.info('Disabling  service: %s, project_id: %s' % (
                    service_obj.service_id, project_id))
                kwargs['state'] = 'disabled'
                self.distributed_task_controller.submit_task(
                    update_service_state.disable_service, **kwargs)
        except Exception as e:
            # If one service's action failed, we log it and not
            # impact other services' action
            LOG.warning('Perform action %s on service: %s,'
                        ' project_id: %s failed, reason: %s' % (
                            action,
                            service_obj.service_id,
                            project_id,
                            str(e)))
Exemple #30
0
    def delete(self, project_id, service_id):
        """delete.

        :param project_id
        :param service_id
        :raises LookupError
        """
        service_obj = self.storage_controller.get(project_id, service_id)

        # get provider details for this service
        provider_details = self._get_provider_details(project_id, service_id)

        # change each provider detail's status to delete_in_progress
        for provider in service_obj.provider_details:
            service_obj.provider_details[provider].status = (
                u'delete_in_progress')

        self.storage_controller.update(project_id, service_id, service_obj)

        kwargs = {
            "provider_details": json.dumps(
                dict([(k, v.to_dict()) for k, v in provider_details.items()])),
            "project_id": project_id,
            "service_id": service_id,
            'time_seconds': self.determine_sleep_times(),
            'context_dict': context_utils.get_current().to_dict()
        }

        self.distributed_task_controller.submit_task(
            delete_service.delete_service, **kwargs)

        return
Exemple #31
0
 def test_get_context_no_overwrite(self):
     # If there is already a context in the cache creating another context
     # should not overwrite it.
     ctx1 = context.RequestContext('111',
                                   '222',
                                   overwrite=True)
     context.get_context()
     self.assertIs(ctx1, o_context.get_current())
Exemple #32
0
 def test_admin_no_overwrite(self):
     # If there is already a context in the cache creating an admin
     # context will not overwrite it.
     ctx1 = context.RequestContext('111',
                                   '222',
                                   overwrite=True)
     context.get_admin_context()
     self.assertIs(o_context.get_current(), ctx1)
Exemple #33
0
    def attach(self, cluster, enabled=True):
        """Routine to be invoked when policy is to be attached to a cluster.

        :param cluster: The cluster to which the policy is being attached to.
        :param enabled: The attached cluster policy is enabled or disabled.
        :returns: When the operation was successful, returns a tuple (True,
                  message); otherwise, return a tuple (False, error).
        """
        res, data = super(LoadBalancingPolicy, self).attach(cluster)
        if res is False:
            return False, data

        lb_driver = self.lbaas(cluster.user, cluster.project)
        lb_driver.lb_status_timeout = self.lb_status_timeout

        # TODO(Anyone): Check if existing nodes has conflicts regarding the
        # subnets. Each VM addresses detail has a key named to the network
        # which can be used for validation.
        if self.lb:
            data = {}
            data['preexisting'] = True
            data['loadbalancer'] = self.lb
            data['pool'] = self.pool_spec.get(self.POOL_ID, None)
            data['vip_address'] = self.vip_spec.get(self.VIP_ADDRESS, None)
            if self.hm_spec and self.hm_spec.get(self.HM_ID, None):
                data['healthmonitor'] = self.hm_spec.get(self.HM_ID)
        else:
            res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec,
                                            self.hm_spec, self.az_spec)
            if res is False:
                return False, data

        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        for node in cluster.nodes:
            member_id = lb_driver.member_add(node, data['loadbalancer'],
                                             data['pool'], port, subnet)
            if member_id is None:
                # When failed in adding member, remove all lb resources that
                # were created and return the failure reason.
                # TODO(anyone): May need to "roll-back" changes caused by any
                # successful member_add() calls.
                if not self.lb:
                    lb_driver.lb_delete(**data)
                return False, 'Failed in adding node into lb pool'

            node.data.update({'lb_member': member_id})
            values = {'data': node.data}
            no.Node.update(oslo_context.get_current(), node.id, values)

        cluster_data_lb = cluster.data.get('loadbalancers', {})
        cluster_data_lb[self.id] = {'vip_address': data.pop('vip_address')}
        cluster.data['loadbalancers'] = cluster_data_lb

        policy_data = self._build_policy_data(data)

        return True, policy_data
Exemple #34
0
    def member_add(self, node, lb_id, pool_id, port, subnet):
        """Add a member to Neutron lbaas pool.

        :param node: A node object to be added to the specified pool.
        :param lb_id: The ID of the loadbalancer.
        :param pool_id: The ID of the pool for receiving the node.
        :param port: The port for the new LB member to be created.
        :param subnet: The subnet to be used by the new LB member.
        :returns: The ID of the new LB member or None if errors occurred.
        """
        try:
            subnet_obj = self.nc().subnet_get(subnet)
            net_id = subnet_obj.network_id
            net = self.nc().network_get(net_id)
        except exception.InternalError as ex:
            resource = 'subnet' if subnet in ex.message else 'network'
            msg = ('Failed in getting %(resource)s: %(msg)s.' % {
                'resource': resource,
                'msg': six.text_type(ex)
            })
            LOG.exception(msg)
            return None
        net_name = net.name

        node_detail = node.get_details(oslo_context.get_current())
        addresses = node_detail.get('addresses')
        if net_name not in addresses:
            msg = 'Node is not in subnet %(subnet)s'
            LOG.error(msg, {'subnet': subnet})
            return None

        # Use the first IP address if more than one are found in target network
        address = addresses[net_name][0]['addr']
        try:
            # FIXME(Yanyan Hu): Currently, Neutron lbaasv2 service can not
            # handle concurrent lb member operations well: new member creation
            # deletion request will directly fail rather than being lined up
            # when another operation is still in progress. In this workaround,
            # loadbalancer status will be checked before creating lb member
            # request is sent out. If loadbalancer keeps unready till waiting
            # timeout, exception will be raised to fail member_add.
            res = self._wait_for_lb_ready(lb_id)
            if not res:
                msg = 'Loadbalancer %s is not ready.' % lb_id
                raise exception.Error(msg)
            member = self.nc().pool_member_create(pool_id, address, port,
                                                  subnet_obj.id)
        except (exception.InternalError, exception.Error) as ex:
            msg = ('Failed in creating lb pool member: %s.' %
                   six.text_type(ex))
            LOG.exception(msg)
            return None
        res = self._wait_for_lb_ready(lb_id)
        if res is False:
            LOG.error('Failed in creating pool member (%s).', member.id)
            return None

        return member.id
Exemple #35
0
def inject_headers():
    ctx = context_utils.get_current()
    if ctx:
        ctx_dict = ctx.to_dict()
        return {
            'X-NSX-EUSER': ctx_dict.get('user_identity'),
            'X-NSX-EREQID': ctx_dict.get('request_id')
        }
    return {}
Exemple #36
0
 def verify_ctx(wrapper):
     _context = ctx.get_current()
     self.assertIsNotNone(_context)
     self.assertEqual('123', _context.request_id)
     # Copy the base set of locks to expect
     our_locks = list(locks)
     # Add our wrappers uuid since that will be set also.
     our_locks.append(wrapper.uuid)
     self.assertEqual(set(our_locks), set(tx._get_locks()))
Exemple #37
0
    def test_spawn_n_context_different_from_passed(self):
        self.assertIsNone(common_context.get_current())
        ctxt = context.RequestContext('user', 'project')
        ctxt_passed = context.RequestContext('user', 'project',
                overwrite=False)
        self.assertEqual(ctxt, common_context.get_current())

        def _fake_spawn(func, *args, **kwargs):
            # call the method to ensure no error is raised
            func(*args, **kwargs)
            self.assertEqual(ctxt_passed, args[0])
            self.assertEqual('test', kwargs['kwarg1'])

        def fake(context, kwarg1=None):
            pass

        with mock.patch.object(eventlet, self.spawn_name, _fake_spawn):
            getattr(utils, self.spawn_name)(fake, ctxt_passed, kwarg1='test')
        self.assertEqual(ctxt, common_context.get_current())
Exemple #38
0
 def delete_ssl_certificate(self, project_id, domain_name, cert_type):
     kwargs = {
         'project_id': project_id,
         'domain_name': domain_name,
         'cert_type': cert_type,
         'context_dict': context_utils.get_current().to_dict()
     }
     self.distributed_task_controller.submit_task(
         delete_ssl_certificate.delete_ssl_certificate, **kwargs)
     return kwargs
Exemple #39
0
 def test_no_overwrite(self):
     # If there is already a context in the cache a new one will
     # not overwrite it if overwrite=False.
     ctx1 = context.RequestContext('111',
                                   '222',
                                   overwrite=True)
     context.RequestContext('333',
                            '444',
                            overwrite=False)
     self.assertIs(o_context.get_current(), ctx1)
Exemple #40
0
    def attach(self, cluster):
        """Routine to be invoked when policy is to be attached to a cluster.

        :param cluster: The target cluster to be attached to;
        :returns: When the operation was successful, returns a tuple (True,
                  message); otherwise, return a tuple (False, error).
        """
        res, data = super(LoadBalancingPolicy, self).attach(cluster)
        if res is False:
            return False, data

        nodes = node_mod.Node.load_all(oslo_context.get_current(),
                                       cluster_id=cluster.id)

        params = self._build_conn_params(cluster)
        lb_driver = driver_base.SenlinDriver().loadbalancing(params)

        res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec)
        if res is False:
            return False, data

        port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
        subnet = self.pool_spec.get(self.POOL_SUBNET)

        for node in nodes:
            member_id = lb_driver.member_add(node, data['loadbalancer'],
                                             data['pool'], port, subnet)
            if member_id is None:
                # When failed in adding member, remove all lb resources that
                # were created and return the failure reason.
                # TODO(Yanyan Hu): Maybe we should tolerate member adding
                # failure and allow policy attaching to succeed without
                # all nodes being added into lb pool?
                lb_driver.lb_delete(**data)
                return False, 'Failed in adding node into lb pool'

            node.data.update({'lb_member': member_id})
            node.store(oslo_context.get_current())

        policy_data = self._build_policy_data(data)

        return True, policy_data
Exemple #41
0
    def ensure_thread_contain_context(self):
        """Ensure threading contains context

        For async/periodic tasks, the context of local thread is missing.
        Set it with request context and this is useful to log the request_id
        in log messages.

        """
        if context.get_current():
            return
        self.update_store()
Exemple #42
0
    def test_tgm_start(self):
        stack_id = 'test'

        thm = service.ThreadGroupManager()
        ret = thm.start(stack_id, self.f, *self.fargs, **self.fkwargs)

        self.assertEqual(self.tg_mock, thm.groups['test'])
        self.tg_mock.add_thread.assert_called_with(
            thm._start_with_trace, context.get_current(), None,
            self.f, *self.fargs, **self.fkwargs)
        self.assertEqual(ret, self.tg_mock.add_thread())
Exemple #43
0
    def detach(self, cluster):
        """Routine to be called when the policy is detached from a cluster.

        :param cluster: The cluster from which the policy is to be detached.
        :returns: When the operation was successful, returns a tuple of
            (True, data) where the data contains references to the resources
            created; otherwise returns a tuple of (False, err) where the err
            contains a error message.
        """
        reason = _('LB resources deletion succeeded.')
        lb_driver = self.lbaas(cluster.user, cluster.project)
        lb_driver.lb_status_timeout = self.lb_status_timeout

        cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(),
                                               cluster.id, self.id)

        policy_data = self._extract_policy_data(cp.data)
        if policy_data is None:
            return True, reason

        res, reason = lb_driver.lb_delete(**policy_data)
        if res is False:
            return False, reason

        nodes = nm.Node.load_all(oslo_context.get_current(),
                                 cluster_id=cluster.id,
                                 project_safe=False)
        for node in nodes:
            if 'lb_member' in node.data:
                node.data.pop('lb_member')
                node.store(oslo_context.get_current())

        lb_data = cluster.data.get('loadbalancers', {})
        if lb_data and isinstance(lb_data, dict):
            lb_data.pop(self.id, None)
            if lb_data:
                cluster.data['loadbalancers'] = lb_data
            else:
                cluster.data.pop('loadbalancers')

        return True, reason
Exemple #44
0
    def test_start(self):
        def f():
            pass

        mock_group = mock.Mock()
        self.mock_tg.return_value = mock_group

        tgm = scheduler.ThreadGroupManager()
        tgm.start(f)

        mock_group.add_thread.assert_called_once_with(
            tgm._start_with_trace, oslo_context.get_current(), None, f)
Exemple #45
0
    def read(self, metric_names, from_timestamp, to_timestamp, resolution):
        """read metrics from metrics driver.

        """
        curr_resolution = \
            helper.resolution_converter_seconds_to_enum(resolution)
        context_dict = context_utils.get_current().to_dict()

        project_id = context_dict['tenant']
        auth_token = None
        if self.driver.metrics_conf.use_keystone_auth:
            auth_token = context_dict['auth_token']

        tenanted_blueflood_url = \
            self.driver.metrics_conf.blueflood_url.format(
                project_id=project_id
            )
        from_timestamp = int(helper.datetime_to_epoch(from_timestamp))
        to_timestamp = int(helper.datetime_to_epoch(to_timestamp))
        urls = []
        params = {
            'to': to_timestamp,
            'from': from_timestamp,
            'resolution': curr_resolution
        }
        for metric_name in metric_names:
            tenanted_blueflood_url_with_metric = helper.join_url(
                tenanted_blueflood_url,
                metric_name.strip().replace(" ", ""))
            LOG.info("Querying BlueFlood Metric: {0}".format(
                tenanted_blueflood_url_with_metric))
            urls.append(
                helper.set_qs_on_url(tenanted_blueflood_url_with_metric,
                                     **params))
        executors = self.driver.metrics_conf.no_of_executors
        blueflood_client = client.BlueFloodMetricsClient(token=auth_token,
                                                         project_id=project_id,
                                                         executors=executors)
        results = blueflood_client.async_requests(urls)
        reordered_metric_names = []
        for result in results:
            metric_name = helper.retrieve_last_relative_url(result.url)
            reordered_metric_names.append(metric_name)

        formatted_results = []
        for metric_name, result in zip(reordered_metric_names, results):
            formatted_result = self._result_formatter(result)
            # NOTE(TheSriram): Tuple to pass the associated metric name, along
            # with the formatted result
            formatted_results.append((metric_name, formatted_result))

        return formatted_results
    def execute(self, service_old, service_obj, project_id):
        """Delete certificates of domains deleted.

        :param unicode service_old: json object of the old service
        :param unicode service_obj: json object of the service
        :param unicode project_id: project id of user
        """
        service_controller, dns = \
            memoized_controllers.task_controllers('poppy', 'dns')

        # get old domains
        service_old_json = json.loads(service_old)
        service_old = service.load_from_json(service_old_json)
        old_domains = set([
            domain.domain for domain in service_old.domains
            if domain.protocol == 'https'
            and
            domain.certificate in ['san', 'sni']
        ])

        # get new domains
        service_new_json = json.loads(service_obj)
        service_new = service.load_from_json(service_new_json)
        new_domains = set([
            domain.domain for domain in service_new.domains
            if domain.protocol == 'https'
            and
            domain.certificate in ['san', 'sni']
        ])

        removed_domains = old_domains.difference(new_domains)

        LOG.info("update_service Old domains: {0}".format(old_domains))
        LOG.info("update_service New domains: {0}".format(new_domains))
        LOG.info("update_service Deleted domains: {0}".format(removed_domains))

        kwargs = {
            'project_id': project_id,
            'cert_type': 'san',
            'context_dict': context_utils.get_current().to_dict()
        }

        for domain in removed_domains:
            kwargs['domain_name'] = domain
            LOG.info(
                "update_service removing certificate "
                "for deleted domain {0}".format(domain)
            )
            service_controller.distributed_task_controller.submit_task(
                delete_ssl_certificate.delete_ssl_certificate,
                **kwargs
            )
Exemple #47
0
    def test_start_action(self, mock_action_acquire, mock_action_acquire_1st):
        action = mock.Mock()
        action.id = '0123'
        mock_action_acquire.return_value = action
        mock_action_acquire_1st.return_value = None

        svc = service.EngineService('HOST', 'TOPIC')
        svc.tg = self.mock_tg
        svc.start_action('4567', '0123')

        self.mock_tg.add_thread.assert_called_once_with(
            svc._start_with_trace, oslo_context.get_current(), None,
            actionm.ActionProc, svc.db_session, '0123')
Exemple #48
0
def send_api_fault(url, status, exception):
    """Send an api.fault notification."""

    if not CONF.notify_api_faults:
        return

    payload = {'url': url, 'exception': six.text_type(exception),
               'status': status}

    rpc.get_notifier('api').error(common_context.get_current() or
                                  nova.context.get_admin_context(),
                                  'api.fault',
                                  payload)
Exemple #49
0
    def test_start_action_no_action_id(self, mock_acquire_action):
        mock_action = mock.Mock()
        mock_action.id = '0123'
        mock_action.action = 'CLUSTER_CREATE'
        mock_acquire_action.side_effect = [mock_action, None]

        svc = service.EngineService('HOST', 'TOPIC')
        svc.tg = self.mock_tg
        svc.start_action('4567')

        self.mock_tg.add_thread.assert_called_once_with(
            svc._start_with_trace, oslo_context.get_current(), None,
            actionm.ActionProc, svc.db_session, '0123')
Exemple #50
0
    def __call__(self, env, start_response):
        """Handle incoming request.

        Authenticate send downstream on success. Reject request if
        we can't authenticate.
        """
        LOG.debug('Authenticating user token')
        ctx = context.get_current()
        authenticated = self.rpc_client.authenticated_to_backend(ctx)
        if authenticated:
            return self.app(env, start_response)
        else:
            return self._reject_request(env, start_response)
Exemple #51
0
    def test_start_action_no_action_id(self, mock_acquire_action):
        mock_action = mock.Mock()
        mock_action.id = '0123'
        mock_action.action = 'CLUSTER_CREATE'
        mock_acquire_action.side_effect = [mock_action, None]
        mock_group = mock.Mock()
        self.mock_tg.return_value = mock_group

        tgm = scheduler.ThreadGroupManager()
        tgm.start_action('4567')

        mock_group.add_thread.assert_called_once_with(
            tgm._start_with_trace, oslo_context.get_current(), None,
            actionm.ActionProc, tgm.db_session, '0123')
Exemple #52
0
def _set_logging_config():
    context_utils.generate_request_id = lambda: '-'
    context_utils.get_current().request_id = '-'

    EMBER_CONFIG.setdefault(
        'logging_context_format_string',
        '%(asctime)s %(levelname)s %(name)s [%(request_id)s] %(message)s')
    EMBER_CONFIG.setdefault('disable_logs', False)

    if EMBER_CONFIG.get('debug'):
        log_levels = defaults.DEBUG_LOG_LEVELS
    else:
        log_levels = defaults.LOG_LEVELS
    EMBER_CONFIG.setdefault('default_log_levels', log_levels)
Exemple #53
0
    def test_start_action(self, mock_action_acquire, mock_action_acquire_1st):
        mock_group = mock.Mock()
        self.mock_tg.return_value = mock_group
        action = mock.Mock()
        action.id = '0123'
        mock_action_acquire.return_value = action
        mock_action_acquire_1st.return_value = None

        tgm = scheduler.ThreadGroupManager()
        tgm.start_action('4567', '0123')

        mock_group.add_thread.assert_called_once_with(
            tgm._start_with_trace, oslo_context.get_current(), None,
            actionm.ActionProc, tgm.db_session, '0123')
def _update_record_with_context(record):
    """Given a log record, update it with context information.

    The request context, if there is one, will either be in the
    extra values for the incoming record or in the global
    thread-local store.
    """
    context = record.__dict__.get('context', context_utils.get_current())
    d = _dictify_context(context)
    # Copy the context values directly onto the record so they can be
    # used by the formatting strings.
    for k, v in d.items():
        setattr(record, k, v)
    return context
Exemple #55
0
    def purge(self, project_id, service_id, hard=False, purge_url=None):
        """If purge_url is none, all content of this service will be purge."""
        try:
            service_obj = self.storage_controller.get_service(
                project_id, service_id)
        except ValueError as e:
            # This except is hit when service object does not exist
            raise LookupError(str(e))

        if service_obj.status not in [u'deployed']:
            raise errors.ServiceStatusNotDeployed(
                u'Service {0} is not deployed.'.format(service_id))

        provider_details = self._get_provider_details(project_id, service_id)

        # change each provider detail's status to
        # cache_invalidation_in_progress if its a soft invalidation,
        # i.e hard is set to False
        if not hard:
            for provider in service_obj.provider_details:
                service_obj.provider_details[provider].status = (
                    u'update_in_progress')

        self.storage_controller.update_service(project_id, service_id,
                                               service_obj)

        # possible validation of purge url here...
        kwargs = {
            'service_obj':
            json.dumps(service_obj.to_dict()),
            'provider_details':
            json.dumps(
                dict([(k, v.to_dict()) for k, v in provider_details.items()])),
            'project_id':
            project_id,
            'hard':
            json.dumps(hard),
            'service_id':
            service_id,
            'purge_url':
            str(purge_url),
            'context_dict':
            context_utils.get_current().to_dict()
        }

        self.distributed_task_controller.submit_task(
            purge_service.purge_service, **kwargs)

        return
Exemple #56
0
    def test_get_os_admin_context_deprecated(self, password_plugin):
        conf = config_fixture.Config()
        clients._admin_session = None
        conf.config(auth_type=None, group=GROUP_AUTHTOKEN)
        conf.config(admin_user='******',
                    admin_password='******',
                    admin_tenant_name='service')

        imp.reload(ec2_context)
        # NOTE(ft): initialize a regular context to populate oslo_context's
        # local storage to prevent admin context to populate it.
        # Used to implicitly validate overwrite=False argument of the call
        # RequestContext constructor from inside get_os_admin_context
        if not context.get_current():
            ec2_context.RequestContext(None, None)

        ctx = ec2_context.get_os_admin_context()
        conf = cfg.CONF
        password_plugin.assert_called_once_with(
            username=conf.admin_user,
            password=conf.admin_password,
            tenant_name=conf.admin_tenant_name,
            project_name=conf.admin_tenant_name,
            auth_url=conf.keystone_url)
        self.assertIsNone(ctx.user_id)
        self.assertIsNone(ctx.project_id)
        self.assertIsNone(ctx.auth_token)
        self.assertEqual([], ctx.service_catalog)
        self.assertTrue(ctx.is_os_admin)
        self.assertIsNotNone(ctx.session)
        self.assertIsNotNone(ctx.session.auth)
        self.assertNotEqual(context.get_current(), ctx)

        password_plugin.reset_mock()
        ec2_context.get_os_admin_context()
        self.assertFalse(password_plugin.called)
Exemple #57
0
    def submit(self, fn, *args, **kwargs):
        context = ctx.get_current()
        # Get the list of locks held by this thread, we don't want sub
        # tasks locking the same thing!
        held_locks = list(_get_locks())

        def wrapped():
            # This is executed in the new thread.
            if context is not None:
                context.update_store()
            # Ensure the sub task knows about the parent's locks and doesn't
            # block on them.
            _set_locks(held_locks)
            return fn(*args, **kwargs)
        return super(ContextThreadPoolExecutor, self).submit(wrapped)
Exemple #58
0
    def validate(self):
        self._untar_file(self.SYSTEM_FILES)

        if self.MODE not in ('controller', 'node', 'all'):
            LOG.error('Invalid mode value (%s)' % self.MODE)
            exit(constants.ERROR_MODE)

        if self.MODE != 'node' and not self.BACKEND_CONFIG:
            LOG.error('Missing required backend configuration')
            exit(constants.ERROR_MISSING_BACKEND)

        if self.DEFAULT_MOUNT_FS not in self.SUPPORTED_FS_TYPES:
            LOG.error('Invalid default mount filesystem %s' %
                      self.DEFAULT_MOUNT_FS)
            exit(constants.ERROR_FS_TYPE)

        if not isinstance(self.WORKERS, int) or not self.WORKERS:
            LOG.error('grpc_workers must be a positive integer number')
            exit(constants.ERROR_WORKERS)

        # Accept spaces and a v prefix on CSI spec version
        spec_version = self.CSI_SPEC.strip()
        if spec_version.startswith('v'):
            spec_version = spec_version[1:]

        # Support both x, x.y, and x.y.z versioning, but convert it to x.y.z
        if '.' not in spec_version:
            spec_version += '.0'
        spec_version = version.StrictVersion(spec_version)
        spec_version = '%s.%s.%s' % spec_version.version

        if spec_version not in constants.SUPPORTED_SPEC_VERSIONS:
            LOG.error(
                'CSI spec %s not in supported versions: %s' %
                (self.CSI_SPEC, ', '.join(constants.SUPPORTED_SPEC_VERSIONS)))
            exit(constants.ERROR_CSI_SPEC)

        # Store version in x.y.z formatted string
        self.CSI_SPEC = spec_version
        self.NAME, self.PROJECT_NAME = self._get_names(spec_version,
                                                       self.PLUGIN_NAME)
        context_utils.get_current().project_name = self.PROJECT_NAME

        self._map_backend_config(self.BACKEND_CONFIG)
        self._set_topology_config()
        self._create_default_dirs_files()
Exemple #59
0
    def wait_for_task(self, task):
        """Waits for the given task to complete and returns the result.

        The task is polled until it is done. The method returns the task
        information upon successful completion. In case of any error,
        appropriate exception is raised.

        :param task: managed object reference of the task
        :returns: task info upon successful completion of the task
        :raises: VimException, VimFaultException, VimAttributeException,
                 VimSessionOverLoadException, VimConnectionException
        """
        ctx = context.get_current()
        loop = loopingcall.FixedIntervalLoopingCall(self._poll_task, task, ctx)
        evt = loop.start(self._task_poll_interval)
        LOG.debug("Waiting for the task: %s to complete.", task)
        return evt.wait()
Exemple #60
0
    def member_add(self, node, lb_id, pool_id, port, subnet):
        """Add a member to Neutron lbaas pool.

        :param node: A node object to be added to the specified pool.
        :param lb_id: The ID of the loadbalancer.
        :param pool_id: The ID of the pool for receiving the node.
        :param port: The port for the new LB member to be created.
        :param subnet: The subnet to be used by the new LB member.
        :returns: The ID of the new LB member or None if errors occurred.
        """
        try:
            subnet_obj = self.nc().subnet_get(subnet)
            net_id = subnet_obj.network_id
            net = self.nc().network_get(net_id)
        except exception.InternalError as ex:
            resource = 'subnet' if subnet in ex.message else 'network'
            msg = _LE('Failed in getting %(resource)s: %(msg)s.'
                      ) % {'resource': resource, 'msg': six.text_type(ex)}
            LOG.exception(msg)
            return None
        net_name = net.name

        node_detail = node.get_details(oslo_context.get_current())
        addresses = node_detail.get('addresses')
        if net_name not in addresses:
            LOG.error(_LE('Node is not in subnet %(subnet)s'),
                      {'subnet': subnet})
            return None

        # Use the first IP address if more than one are found in target network
        address = addresses[net_name][0]
        try:
            member = self.nc().pool_member_create(pool_id, address, port,
                                                  subnet_obj.id)
        except exception.InternalError as ex:
            msg = _LE('Failed in creating lb pool member: %s.'
                      ) % six.text_type(ex)
            LOG.exception(msg)
            return None
        res = self._wait_for_lb_ready(lb_id)
        if res is False:
            LOG.error(_LE('Failed in creating pool member (%s).') % member.id)
            return None

        return member.id