Ejemplo n.º 1
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        bay1 = utils.get_test_bay(id=1, stack_id='11',
                                  status=bay_status.CREATE_IN_PROGRESS)
        bay2 = utils.get_test_bay(id=2, stack_id='22',
                                  status=bay_status.DELETE_IN_PROGRESS)
        bay3 = utils.get_test_bay(id=3, stack_id='33',
                                  status=bay_status.UPDATE_IN_PROGRESS)
        bay4 = utils.get_test_bay(id=4, stack_id='44',
                                  status=bay_status.CREATE_COMPLETE)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
        self.bay4 = objects.Bay(ctx, **bay4)

        mock_magnum_service_refresh = mock.Mock()

        class FakeMS(object):
            report_state_up = mock_magnum_service_refresh

        self.fake_ms = FakeMS()
        self.fake_ms_refresh = mock_magnum_service_refresh
Ejemplo n.º 2
0
def is_octavia_enabled():
    """Check if Octavia service is deployed in the cloud.

    Octavia is already an official LBaaS solution for Openstack
    (https://governance.openstack.org/tc/reference/projects/octavia.html) and
    will deprecate the neutron-lbaas extension starting from Queens release.

    We use Octavia instead of Neutron LBaaS API for load balancing
    functionality for k8s cluster if Octavia service is deployed and enabled
    in the cloud.
    """
    # Put the import here to avoid circular importing.
    from magnum.common import context
    admin_context = context.make_admin_context()
    keystone = KeystoneClientV3(admin_context)

    try:
        octavia_svc = keystone.client.services.list(type='load-balancer')
    except Exception:
        LOG.exception('Failed to list services')
        raise exception.ServicesListFailed()

    # Always assume there is only one load balancing service configured.
    if octavia_svc and octavia_svc[0].enabled:
        return True

    return False
Ejemplo n.º 3
0
 def setUp(self):
     self.certificate = "My Certificate"
     self.intermediates = "My Intermediates"
     self.private_key = "My Private Key"
     self.private_key_passphrase = "My Private Key Passphrase"
     self.context = context.make_admin_context()
     super(TestX509keypairManager, self).setUp()
Ejemplo n.º 4
0
def add_policy_attributes(target):
    """Adds extra information for policy enforcement to raw target object"""
    admin_context = context.make_admin_context()
    admin_osc = clients.OpenStackClients(admin_context)
    trustee_domain_id = admin_osc.keystone().trustee_domain_id
    target['trustee_domain_id'] = trustee_domain_id
    return target
 def setUp(self):
     self.certificate = "My Certificate"
     self.intermediates = "My Intermediates"
     self.private_key = "My Private Key"
     self.private_key_passphrase = "My Private Key Passphrase"
     self.context = context.make_admin_context()
     super(TestX509keypairManager, self).setUp()
Ejemplo n.º 6
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        bay1 = utils.get_test_bay(id=1,
                                  stack_id='11',
                                  status=bay_status.CREATE_IN_PROGRESS)
        bay2 = utils.get_test_bay(id=2,
                                  stack_id='22',
                                  status=bay_status.DELETE_IN_PROGRESS)
        bay3 = utils.get_test_bay(id=3,
                                  stack_id='33',
                                  status=bay_status.UPDATE_IN_PROGRESS)
        bay4 = utils.get_test_bay(id=4,
                                  stack_id='44',
                                  status=bay_status.CREATE_COMPLETE)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
        self.bay4 = objects.Bay(ctx, **bay4)

        mock_magnum_service_refresh = mock.Mock()

        class FakeMS(object):
            report_state_up = mock_magnum_service_refresh

        self.fake_ms = FakeMS()
        self.fake_ms_refresh = mock_magnum_service_refresh
Ejemplo n.º 7
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        # Can be identical for all bays.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
            }

        trust_attrs.update({'id': 1, 'stack_id': '11',
                           'status': bay_status.CREATE_IN_PROGRESS})
        bay1 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({'id': 2, 'stack_id': '22',
                           'status': bay_status.DELETE_IN_PROGRESS})
        bay2 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({'id': 3, 'stack_id': '33',
                           'status': bay_status.UPDATE_IN_PROGRESS})
        bay3 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({'id': 4, 'stack_id': '44',
                           'status': bay_status.CREATE_COMPLETE})
        bay4 = utils.get_test_bay(**trust_attrs)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
        self.bay4 = objects.Bay(ctx, **bay4)
Ejemplo n.º 8
0
 def test_get_bay_list_by_admin_all_tenants(self):
     uuids = []
     for i in range(1, 6):
         bay = utils.create_test_bay(uuid=uuidutils.generate_uuid(),
                                     project_id=uuidutils.generate_uuid(),
                                     user_id=uuidutils.generate_uuid())
         uuids.append(six.text_type(bay['uuid']))
     ctx = context.make_admin_context(all_tenants=True)
     res = self.dbapi.get_bay_list(ctx)
     res_uuids = [r.uuid for r in res]
     self.assertEqual(sorted(uuids), sorted(res_uuids))
Ejemplo n.º 9
0
 def test_get_bay_list_by_admin_all_tenants(self):
     uuids = []
     for i in range(1, 6):
         bay = utils.create_test_bay(
             uuid=magnum_utils.generate_uuid(),
             project_id=magnum_utils.generate_uuid(),
             user_id=magnum_utils.generate_uuid())
         uuids.append(six.text_type(bay['uuid']))
     ctx = context.make_admin_context()
     res = self.dbapi.get_bay_list(ctx, opts={'get_all_tenants': True})
     res_uuids = [r.uuid for r in res]
     self.assertEqual(sorted(uuids), sorted(res_uuids))
Ejemplo n.º 10
0
 def test_get_cluster_list_by_admin_all_tenants(self):
     uuids = []
     for i in range(1, 6):
         cluster = utils.create_test_cluster(
             uuid=uuidutils.generate_uuid(),
             project_id=uuidutils.generate_uuid(),
             user_id=uuidutils.generate_uuid())
         uuids.append(six.text_type(cluster['uuid']))
     ctx = context.make_admin_context(all_tenants=True)
     res = self.dbapi.get_cluster_list(ctx)
     res_uuids = [r.uuid for r in res]
     self.assertEqual(sorted(uuids), sorted(res_uuids))
Ejemplo n.º 11
0
 def test_get_x509keypair_list_by_admin_all_tenants(self):
     uuids = []
     for i in range(1, 6):
         x509keypair = utils.create_test_x509keypair(
             uuid=magnum_utils.generate_uuid(),
             project_id=magnum_utils.generate_uuid(),
             user_id=magnum_utils.generate_uuid())
         uuids.append(six.text_type(x509keypair['uuid']))
     ctx = context.make_admin_context()
     res = self.dbapi.get_x509keypair_list(ctx,
                                           opts={'get_all_tenants': True})
     res_uuids = [r.uuid for r in res]
     self.assertEqual(sorted(uuids), sorted(res_uuids))
Ejemplo n.º 12
0
    def test_get_federation_list_by_admin_all_tenants(self):
        uuids = []
        for _ in range(5):
            federation = utils.create_test_federation(
                uuid=uuidutils.generate_uuid(),
                project_id=uuidutils.generate_uuid())
            uuids.append(six.text_type(federation['uuid']))

        ctx = context.make_admin_context(all_tenants=True)
        res = self.dbapi.get_federation_list(ctx)
        res_uuids = [r.uuid for r in res]
        self.assertEqual(len(res), 5)
        self.assertEqual(sorted(uuids), sorted(res_uuids))
Ejemplo n.º 13
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        bay1 = utils.get_test_bay(id=1, stack_id='11',
                                  status=bay_status.CREATE_IN_PROGRESS)
        bay2 = utils.get_test_bay(id=2, stack_id='22',
                                  status=bay_status.DELETE_IN_PROGRESS)
        bay3 = utils.get_test_bay(id=3, stack_id='33',
                                  status=bay_status.UPDATE_IN_PROGRESS)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
Ejemplo n.º 14
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        # Can be identical for all bays.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
        }

        trust_attrs.update({
            'id': 1,
            'stack_id': '11',
            'status': bay_status.CREATE_IN_PROGRESS
        })
        bay1 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({
            'id': 2,
            'stack_id': '22',
            'status': bay_status.DELETE_IN_PROGRESS
        })
        bay2 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({
            'id': 3,
            'stack_id': '33',
            'status': bay_status.UPDATE_IN_PROGRESS
        })
        bay3 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({
            'id': 4,
            'stack_id': '44',
            'status': bay_status.CREATE_COMPLETE
        })
        bay4 = utils.get_test_bay(**trust_attrs)
        trust_attrs.update({
            'id': 5,
            'stack_id': '55',
            'status': bay_status.ROLLBACK_IN_PROGRESS
        })
        bay5 = utils.get_test_bay(**trust_attrs)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
        self.bay4 = objects.Bay(ctx, **bay4)
        self.bay5 = objects.Bay(ctx, **bay5)
Ejemplo n.º 15
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        ctx = context.make_admin_context()

        bay1 = utils.get_test_bay(id=1,
                                  stack_id='11',
                                  status=bay_status.CREATE_IN_PROGRESS)
        bay2 = utils.get_test_bay(id=2,
                                  stack_id='22',
                                  status=bay_status.DELETE_IN_PROGRESS)
        bay3 = utils.get_test_bay(id=3,
                                  stack_id='33',
                                  status=bay_status.UPDATE_IN_PROGRESS)

        self.bay1 = objects.Bay(ctx, **bay1)
        self.bay2 = objects.Bay(ctx, **bay2)
        self.bay3 = objects.Bay(ctx, **bay3)
Ejemplo n.º 16
0
    def _add_tenant_filters(self, context, query):
        if context.is_admin and context.all_tenants:
            return query

        admin_context = request_context.make_admin_context(all_tenants=True)
        osc = clients.OpenStackClients(admin_context)
        kst = osc.keystone()

        # User in a regular project (not in the trustee domain)
        if context.project_id and context.domain_id != kst.trustee_domain_id:
            query = query.filter_by(project_id=context.project_id)
        # Match project ID component in trustee user's user name against
        # cluster's project_id to associate per-cluster trustee users who have
        # no project information with the project their clusters/cluster models
        # reside in. This is equivalent to the project filtering above.
        elif context.domain_id == kst.trustee_domain_id:
            user_name = kst.client.users.get(context.user_id).name
            user_project = user_name.split('_', 2)[1]
            query = query.filter_by(project_id=user_project)
        else:
            query = query.filter_by(user_id=context.user_id)

        return query
Ejemplo n.º 17
0
    def _add_tenant_filters(self, context, query):
        if context.is_admin and context.all_tenants:
            return query

        admin_context = request_context.make_admin_context(all_tenants=True)
        osc = clients.OpenStackClients(admin_context)
        kst = osc.keystone()

        # User in a regular project (not in the trustee domain)
        if context.project_id and context.domain_id != kst.trustee_domain_id:
            query = query.filter_by(project_id=context.project_id)
        # Match project ID component in trustee user's user name against
        # cluster's project_id to associate per-cluster trustee users who have
        # no project information with the project their clusters/cluster models
        # reside in. This is equivalent to the project filtering above.
        elif context.domain_id == kst.trustee_domain_id:
            user_name = kst.client.users.get(context.user_id).name
            user_project = user_name.split('_', 2)[1]
            query = query.filter_by(project_id=user_project)
        else:
            query = query.filter_by(user_id=context.user_id)

        return query
Ejemplo n.º 18
0
 def handler(self, ctx):
     ctx = context.make_admin_context()
     context.set_ctx(ctx)
     func(self, ctx)
     context.set_ctx(None)
Ejemplo n.º 19
0
 def handler(self, ctx):
     ctx = context.make_admin_context(all_tenants=True)
     context.set_ctx(ctx)
     func(self, ctx)
     context.set_ctx(None)
Ejemplo n.º 20
0
 def handler(self, ctx):
     ctx = context.make_admin_context(all_tenants=True)
     context.set_ctx(ctx)
     func(self, ctx)
     context.set_ctx(None)
Ejemplo n.º 21
0
 def test_request_context_sets_is_admin(self):
     ctxt = magnum_context.make_admin_context()
     self.assertTrue(ctxt.is_admin)
Ejemplo n.º 22
0
 def test_request_context_sets_is_admin(self):
     ctxt = magnum_context.make_admin_context()
     self.assertEqual(ctxt.is_admin, True)
Ejemplo n.º 23
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        self.context = context.make_admin_context()

        # Can be identical for all clusters.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
        }

        trust_attrs.update({
            'id': 1,
            'stack_id': '11',
            'status': cluster_status.CREATE_IN_PROGRESS,
            'status_reason': 'no change'
        })
        cluster1 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({
            'id': 2,
            'stack_id': '22',
            'status': cluster_status.DELETE_IN_PROGRESS,
            'status_reason': 'no change'
        })
        cluster2 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({
            'id': 3,
            'stack_id': '33',
            'status': cluster_status.UPDATE_IN_PROGRESS,
            'status_reason': 'no change'
        })
        cluster3 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({
            'id': 4,
            'stack_id': '44',
            'status': cluster_status.DELETE_IN_PROGRESS,
            'status_reason': 'no change'
        })
        cluster4 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({
            'id': 5,
            'stack_id': '55',
            'status': cluster_status.ROLLBACK_IN_PROGRESS,
            'status_reason': 'no change'
        })
        cluster5 = utils.get_test_cluster(**trust_attrs)

        self.cluster1 = objects.Cluster(self.context, **cluster1)
        self.cluster2 = objects.Cluster(self.context, **cluster2)
        self.cluster3 = objects.Cluster(self.context, **cluster3)
        self.cluster4 = objects.Cluster(self.context, **cluster4)
        self.cluster5 = objects.Cluster(self.context, **cluster5)

        # these tests are based on the basic behavior of our standard
        # Heat-based drivers, but drivers based on other orchestration
        # methods should generally behave in a similar fashion as far
        # as the actual calls go. It is up to the driver implementor
        # to ensure their implementation of update_cluster_status behaves
        # as expected regardless of how the periodic updater task works
        self.mock_heat_client = mock.MagicMock()
        self.stack1 = fake_stack(id='11',
                                 stack_status=cluster_status.CREATE_COMPLETE,
                                 stack_status_reason='fake_reason_11')
        self.stack2 = fake_stack(
            id='22',
            stack_status=cluster_status.DELETE_IN_PROGRESS,
            stack_status_reason='fake_reason_11')
        self.stack3 = fake_stack(id='33',
                                 stack_status=cluster_status.UPDATE_COMPLETE,
                                 stack_status_reason='fake_reason_33')
        self.stack5 = fake_stack(id='55',
                                 stack_status=cluster_status.ROLLBACK_COMPLETE,
                                 stack_status_reason='fake_reason_55')
        self.mock_heat_client.stacks.list.return_value = [
            self.stack1, self.stack2, self.stack3, self.stack5
        ]

        self.get_stacks = {
            '11': self.stack1,
            '22': self.stack2,
            '33': self.stack3,
            '55': self.stack5
        }

        self.mock_driver = mock.MagicMock(spec=driver.Driver)

        def _mock_update_status(context, cluster):
            try:
                stack = self.get_stacks[cluster.stack_id]
            except KeyError:
                cluster.status_reason = "Stack %s not found" % cluster.stack_id
                if cluster.status == "DELETE_IN_PROGRESS":
                    cluster.status = cluster_status.DELETE_COMPLETE
                else:
                    cluster.status = cluster.status.replace(
                        "IN_PROGRESS", "FAILED")
                    cluster.status = cluster.status.replace(
                        "COMPLETE", "FAILED")
            else:
                if cluster.status != stack.stack_status:
                    cluster.status = stack.stack_status
                    cluster.status_reason = stack.stack_status_reason

        self.mock_driver.update_cluster_status.side_effect = (
            _mock_update_status)
Ejemplo n.º 24
0
 def handler(self, ctx):
     ctx = context.make_admin_context()
     context.set_ctx(ctx)
     func(self, ctx)
     context.set_ctx(None)
Ejemplo n.º 25
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        self.context = context.make_admin_context()

        # Can be identical for all clusters.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
            }

        trust_attrs.update({'id': 1, 'stack_id': '11',
                            'status': cluster_status.CREATE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster1 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({'id': 2, 'stack_id': '22',
                            'status': cluster_status.DELETE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster2 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({'id': 3, 'stack_id': '33',
                            'status': cluster_status.UPDATE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster3 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({'id': 4, 'stack_id': '44',
                            'status': cluster_status.DELETE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster4 = utils.get_test_cluster(**trust_attrs)
        trust_attrs.update({'id': 5, 'stack_id': '55',
                            'status': cluster_status.ROLLBACK_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster5 = utils.get_test_cluster(**trust_attrs)

        self.cluster1 = objects.Cluster(self.context, **cluster1)
        self.cluster2 = objects.Cluster(self.context, **cluster2)
        self.cluster3 = objects.Cluster(self.context, **cluster3)
        self.cluster4 = objects.Cluster(self.context, **cluster4)
        self.cluster5 = objects.Cluster(self.context, **cluster5)

        # these tests are based on the basic behavior of our standard
        # Heat-based drivers, but drivers based on other orchestration
        # methods should generally behave in a similar fashion as far
        # as the actual calls go. It is up to the driver implementor
        # to ensure their implementation of update_cluster_status behaves
        # as expected regardless of how the periodic updater task works
        self.mock_heat_client = mock.MagicMock()
        self.stack1 = fake_stack(
            id='11', stack_status=cluster_status.CREATE_COMPLETE,
            stack_status_reason='fake_reason_11')
        self.stack2 = fake_stack(
            id='22', stack_status=cluster_status.DELETE_IN_PROGRESS,
            stack_status_reason='fake_reason_11')
        self.stack3 = fake_stack(
            id='33', stack_status=cluster_status.UPDATE_COMPLETE,
            stack_status_reason='fake_reason_33')
        self.stack5 = fake_stack(
            id='55', stack_status=cluster_status.ROLLBACK_COMPLETE,
            stack_status_reason='fake_reason_55')
        self.mock_heat_client.stacks.list.return_value = [
            self.stack1, self.stack2, self.stack3, self.stack5]

        self.get_stacks = {
            '11': self.stack1,
            '22': self.stack2,
            '33': self.stack3,
            '55': self.stack5
        }

        self.mock_driver = mock.MagicMock(spec=driver.Driver)

        def _mock_update_status(context, cluster):
            try:
                stack = self.get_stacks[cluster.stack_id]
            except KeyError:
                cluster.status_reason = "Stack %s not found" % cluster.stack_id
                if cluster.status == "DELETE_IN_PROGRESS":
                    cluster.status = cluster_status.DELETE_COMPLETE
                else:
                    cluster.status = cluster.status.replace("IN_PROGRESS",
                                                            "FAILED")
                    cluster.status = cluster.status.replace("COMPLETE",
                                                            "FAILED")
            else:
                if cluster.status != stack.stack_status:
                    cluster.status = stack.stack_status
                    cluster.status_reason = stack.stack_status_reason

        self.mock_driver.update_cluster_status.side_effect = (
            _mock_update_status)
Ejemplo n.º 26
0
    def test_get_federation_list_with_filters(self):
        fed1 = utils.create_test_federation(
            id=1,
            uuid=uuidutils.generate_uuid(),
            name='fed1',
            project_id='proj1',
            hostcluster_id='master1',
            member_ids=['member1', 'member2'],
            properties={'dns-zone': 'fed1.com.'})

        fed2 = utils.create_test_federation(
            id=2,
            uuid=uuidutils.generate_uuid(),
            name='fed',
            project_id='proj2',
            hostcluster_id='master2',
            member_ids=['member3', 'member4'],
            properties={"dns-zone": "fed2.com."})

        # NOTE(clenimar): we are specifying a project_id to the test
        # resources above, which means that our current context
        # (self.context) will not be able to see these resources.
        # Create an admin context in order to test the queries:
        ctx = context.make_admin_context(all_tenants=True)

        # Filter by name:
        res = self.dbapi.get_federation_list(ctx, filters={'name': 'fed1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx, filters={'name': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by project_id
        res = self.dbapi.get_federation_list(ctx,
                                             filters={'project_id': 'proj1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'project_id': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by hostcluster_id
        res = self.dbapi.get_federation_list(
            ctx, filters={'hostcluster_id': 'master1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(
            ctx, filters={'hostcluster_id': 'master2'})
        self.assertEqual([fed2.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'hostcluster_id': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by member_ids (please note that it is currently implemented
        # as an exact match. So it will only return federations whose member
        # clusters are exactly those passed as a filter)
        res = self.dbapi.get_federation_list(
            ctx, filters={'member_ids': ['member1', 'member2']})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'member_ids': ['foo']})
        self.assertEqual([], [r.id for r in res])

        # Filter by properties
        res = self.dbapi.get_federation_list(
            ctx, filters={'properties': {
                'dns-zone': 'fed2.com.'
            }})
        self.assertEqual([fed2.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(
            ctx, filters={'properties': {
                'dns-zone': 'foo.bar.'
            }})
        self.assertEqual([], [r.id for r in res])
Ejemplo n.º 27
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        self.context = context.make_admin_context()

        # Can be identical for all clusters.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
        }

        uuid = uuidutils.generate_uuid()
        trust_attrs.update({
            'id': 1,
            'stack_id': '11',
            'uuid': uuid,
            'status': cluster_status.CREATE_IN_PROGRESS,
            'status_reason': 'no change',
            'keypair': 'keipair1',
            'health_status': None
        })
        cluster1 = utils.get_test_cluster(**trust_attrs)
        ngs1 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({
            'id': 2,
            'stack_id': '22',
            'uuid': uuid,
            'status': cluster_status.DELETE_IN_PROGRESS,
            'status_reason': 'no change',
            'keypair': 'keipair1',
            'health_status': None
        })
        cluster2 = utils.get_test_cluster(**trust_attrs)
        ngs2 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({
            'id': 3,
            'stack_id': '33',
            'uuid': uuid,
            'status': cluster_status.UPDATE_IN_PROGRESS,
            'status_reason': 'no change',
            'keypair': 'keipair1',
            'health_status': None
        })
        cluster3 = utils.get_test_cluster(**trust_attrs)
        ngs3 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({
            'id': 4,
            'stack_id': '44',
            'uuid': uuid,
            'status': cluster_status.DELETE_IN_PROGRESS,
            'status_reason': 'no change',
            'keypair': 'keipair1',
            'health_status': None
        })
        cluster4 = utils.get_test_cluster(**trust_attrs)
        ngs4 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({
            'id': 5,
            'stack_id': '55',
            'uuid': uuid,
            'status': cluster_status.ROLLBACK_IN_PROGRESS,
            'status_reason': 'no change',
            'keypair': 'keipair1',
            'health_status': None
        })
        cluster5 = utils.get_test_cluster(**trust_attrs)
        ngs5 = utils.get_nodegroups_for_cluster()

        self.nodegroups1 = [
            objects.NodeGroup(self.context, **ngs1['master']),
            objects.NodeGroup(self.context, **ngs1['worker'])
        ]
        self.nodegroups2 = [
            objects.NodeGroup(self.context, **ngs2['master']),
            objects.NodeGroup(self.context, **ngs2['worker'])
        ]
        self.nodegroups3 = [
            objects.NodeGroup(self.context, **ngs3['master']),
            objects.NodeGroup(self.context, **ngs3['worker'])
        ]
        self.nodegroups4 = [
            objects.NodeGroup(self.context, **ngs4['master']),
            objects.NodeGroup(self.context, **ngs4['worker'])
        ]
        self.nodegroups5 = [
            objects.NodeGroup(self.context, **ngs5['master']),
            objects.NodeGroup(self.context, **ngs5['worker'])
        ]

        self.cluster1 = objects.Cluster(self.context, **cluster1)
        self.cluster2 = objects.Cluster(self.context, **cluster2)
        self.cluster3 = objects.Cluster(self.context, **cluster3)
        self.cluster4 = objects.Cluster(self.context, **cluster4)
        self.cluster5 = objects.Cluster(self.context, **cluster5)

        # This is used to mock the get_cluster_nodegroups from magnum.db.api.
        # It's not the greatest way to do it, But we have to populate the
        # dictionary in the runtime (or have statically defined uuids per NG).
        global cluster_ngs
        cluster_ngs = {
            self.cluster1.uuid: self.nodegroups1,
            self.cluster2.uuid: self.nodegroups2,
            self.cluster3.uuid: self.nodegroups3,
            self.cluster4.uuid: self.nodegroups4,
            self.cluster5.uuid: self.nodegroups5
        }

        # these tests are based on the basic behavior of our standard
        # Heat-based drivers, but drivers based on other orchestration
        # methods should generally behave in a similar fashion as far
        # as the actual calls go. It is up to the driver implementor
        # to ensure their implementation of update_cluster_status behaves
        # as expected regardless of how the periodic updater task works
        self.mock_heat_client = mock.MagicMock()
        self.stack1 = fake_stack(id='11',
                                 stack_status=cluster_status.CREATE_COMPLETE,
                                 stack_status_reason='fake_reason_11')
        self.stack2 = fake_stack(
            id='22',
            stack_status=cluster_status.DELETE_IN_PROGRESS,
            stack_status_reason='fake_reason_11')
        self.stack3 = fake_stack(id='33',
                                 stack_status=cluster_status.UPDATE_COMPLETE,
                                 stack_status_reason='fake_reason_33')
        self.stack5 = fake_stack(id='55',
                                 stack_status=cluster_status.ROLLBACK_COMPLETE,
                                 stack_status_reason='fake_reason_55')
        self.mock_heat_client.stacks.list.return_value = [
            self.stack1, self.stack2, self.stack3, self.stack5
        ]

        self.get_stacks = {
            '11': self.stack1,
            '22': self.stack2,
            '33': self.stack3,
            '55': self.stack5
        }

        self.mock_driver = mock.MagicMock(spec=driver.Driver)

        def _mock_update_status(context, cluster):
            try:
                stack = self.get_stacks[cluster.stack_id]
            except KeyError:
                cluster.status_reason = "Stack %s not found" % cluster.stack_id
                if cluster.status == "DELETE_IN_PROGRESS":
                    cluster.status = cluster_status.DELETE_COMPLETE
                else:
                    cluster.status = cluster.status.replace(
                        "IN_PROGRESS", "FAILED")
                    cluster.status = cluster.status.replace(
                        "COMPLETE", "FAILED")
            else:
                if cluster.status != stack.stack_status:
                    cluster.status = stack.stack_status
                    cluster.status_reason = stack.stack_status_reason

        self.mock_driver.update_cluster_status.side_effect = (
            _mock_update_status)
Ejemplo n.º 28
0
from magnum.service import periodic
from magnum.tests import base
from magnum.tests.unit.db import utils

periodic_opts = [
    cfg.BoolOpt('periodic_enable',
                default=True,
                help='Enable periodic tasks.'),
    cfg.IntOpt('periodic_interval_max',
               default=60,
               help='Max interval size between periodic tasks execution in '
                    'seconds.'),
]


ctx = context.make_admin_context()

bay1 = utils.get_test_bay(id=1, stack_id='11',
                          status=bay_status.CREATE_IN_PROGRESS)
bay2 = utils.get_test_bay(id=2, stack_id='22',
                          status=bay_status.DELETE_IN_PROGRESS)
bay3 = utils.get_test_bay(id=3, stack_id='33',
                          status=bay_status.UPDATE_IN_PROGRESS)

bay1 = objects.Bay(ctx, **bay1)
bay2 = objects.Bay(ctx, **bay2)
bay3 = objects.Bay(ctx, **bay3)


class fake_stack(object):
    def __init__(self, **kw):
Ejemplo n.º 29
0
    def setUp(self):
        super(PeriodicTestCase, self).setUp()

        self.context = context.make_admin_context()

        # Can be identical for all clusters.
        trust_attrs = {
            'trustee_username': '******',
            'trustee_password': '******',
            'trust_id': '39d920ca-67c6-4047-b57a-01e9e16bb96f',
            }

        uuid = uuidutils.generate_uuid()
        trust_attrs.update({'id': 1, 'stack_id': '11', 'uuid': uuid,
                            'status': cluster_status.CREATE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster1 = utils.get_test_cluster(**trust_attrs)
        ngs1 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({'id': 2, 'stack_id': '22', 'uuid': uuid,
                            'status': cluster_status.DELETE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster2 = utils.get_test_cluster(**trust_attrs)
        ngs2 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({'id': 3, 'stack_id': '33', 'uuid': uuid,
                            'status': cluster_status.UPDATE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster3 = utils.get_test_cluster(**trust_attrs)
        ngs3 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({'id': 4, 'stack_id': '44', 'uuid': uuid,
                            'status': cluster_status.DELETE_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster4 = utils.get_test_cluster(**trust_attrs)
        ngs4 = utils.get_nodegroups_for_cluster()
        uuid = uuidutils.generate_uuid()
        trust_attrs.update({'id': 5, 'stack_id': '55', 'uuid': uuid,
                            'status': cluster_status.ROLLBACK_IN_PROGRESS,
                            'status_reason': 'no change'})
        cluster5 = utils.get_test_cluster(**trust_attrs)
        ngs5 = utils.get_nodegroups_for_cluster()

        self.nodegroups1 = [
            objects.NodeGroup(self.context, **ngs1['master']),
            objects.NodeGroup(self.context, **ngs1['worker'])
        ]
        self.nodegroups2 = [
            objects.NodeGroup(self.context, **ngs2['master']),
            objects.NodeGroup(self.context, **ngs2['worker'])
        ]
        self.nodegroups3 = [
            objects.NodeGroup(self.context, **ngs3['master']),
            objects.NodeGroup(self.context, **ngs3['worker'])
        ]
        self.nodegroups4 = [
            objects.NodeGroup(self.context, **ngs4['master']),
            objects.NodeGroup(self.context, **ngs4['worker'])
        ]
        self.nodegroups5 = [
            objects.NodeGroup(self.context, **ngs5['master']),
            objects.NodeGroup(self.context, **ngs5['worker'])
        ]

        self.cluster1 = objects.Cluster(self.context, **cluster1)
        self.cluster2 = objects.Cluster(self.context, **cluster2)
        self.cluster3 = objects.Cluster(self.context, **cluster3)
        self.cluster4 = objects.Cluster(self.context, **cluster4)
        self.cluster5 = objects.Cluster(self.context, **cluster5)

        # This is used to mock the get_cluster_nodegroups from magnum.db.api.
        # It's not the greatest way to do it, But we have to populate the
        # dictionary in the runtime (or have statically defined uuids per NG).
        global cluster_ngs
        cluster_ngs = {
            self.cluster1.uuid: self.nodegroups1,
            self.cluster2.uuid: self.nodegroups2,
            self.cluster3.uuid: self.nodegroups3,
            self.cluster4.uuid: self.nodegroups4,
            self.cluster5.uuid: self.nodegroups5
        }

        # these tests are based on the basic behavior of our standard
        # Heat-based drivers, but drivers based on other orchestration
        # methods should generally behave in a similar fashion as far
        # as the actual calls go. It is up to the driver implementor
        # to ensure their implementation of update_cluster_status behaves
        # as expected regardless of how the periodic updater task works
        self.mock_heat_client = mock.MagicMock()
        self.stack1 = fake_stack(
            id='11', stack_status=cluster_status.CREATE_COMPLETE,
            stack_status_reason='fake_reason_11')
        self.stack2 = fake_stack(
            id='22', stack_status=cluster_status.DELETE_IN_PROGRESS,
            stack_status_reason='fake_reason_11')
        self.stack3 = fake_stack(
            id='33', stack_status=cluster_status.UPDATE_COMPLETE,
            stack_status_reason='fake_reason_33')
        self.stack5 = fake_stack(
            id='55', stack_status=cluster_status.ROLLBACK_COMPLETE,
            stack_status_reason='fake_reason_55')
        self.mock_heat_client.stacks.list.return_value = [
            self.stack1, self.stack2, self.stack3, self.stack5]

        self.get_stacks = {
            '11': self.stack1,
            '22': self.stack2,
            '33': self.stack3,
            '55': self.stack5
        }

        self.mock_driver = mock.MagicMock(spec=driver.Driver)

        def _mock_update_status(context, cluster):
            try:
                stack = self.get_stacks[cluster.stack_id]
            except KeyError:
                cluster.status_reason = "Stack %s not found" % cluster.stack_id
                if cluster.status == "DELETE_IN_PROGRESS":
                    cluster.status = cluster_status.DELETE_COMPLETE
                else:
                    cluster.status = cluster.status.replace("IN_PROGRESS",
                                                            "FAILED")
                    cluster.status = cluster.status.replace("COMPLETE",
                                                            "FAILED")
            else:
                if cluster.status != stack.stack_status:
                    cluster.status = stack.stack_status
                    cluster.status_reason = stack.stack_status_reason

        self.mock_driver.update_cluster_status.side_effect = (
            _mock_update_status)