Beispiel #1
0
    def test_get_all_quotas_with_quotas(self):
        project_id1 = uuidutils.generate_uuid()
        project_id2 = uuidutils.generate_uuid()
        quota_path1 = self.QUOTA_PATH.format(project_id=project_id1)
        quota1 = {'load_balancer': constants.QUOTA_UNLIMITED, 'listener': 30,
                  'pool': 30, 'health_monitor': 30, 'member': 30}
        body1 = {'quota': quota1}
        self.put(quota_path1, body1, status=202)
        quota_path2 = self.QUOTA_PATH.format(project_id=project_id2)
        quota2 = {'load_balancer': 50, 'listener': 50, 'pool': 50,
                  'health_monitor': 50, 'member': 50}
        body2 = {'quota': quota2}
        self.put(quota_path2, body2, status=202)

        response = self.get(self.QUOTAS_PATH)
        quota_list = response.json

        quota1['project_id'] = quota1['tenant_id'] = project_id1
        quota2['project_id'] = quota2['tenant_id'] = project_id2
        # Expected deprecated names until T
        quota1['healthmonitor'] = quota1['health_monitor']
        quota1['loadbalancer'] = quota1['load_balancer']
        quota2['healthmonitor'] = quota2['health_monitor']
        quota2['loadbalancer'] = quota2['load_balancer']

        expected = {'quotas': [quota1, quota2], 'quotas_links': []}
        self.assertEqual(expected, quota_list)
Beispiel #2
0
 def test_create(self, **optionals):
     sni1 = uuidutils.generate_uuid()
     sni2 = uuidutils.generate_uuid()
     lb_listener = {'name': 'listener1', 'default_pool_id': None,
                    'description': 'desc1',
                    'enabled': False, 'protocol': constants.PROTOCOL_HTTP,
                    'protocol_port': 80, 'connection_limit': 10,
                    'tls_certificate_id': uuidutils.generate_uuid(),
                    'sni_containers': [sni1, sni2],
                    'project_id': uuidutils.generate_uuid()}
     lb_listener.update(optionals)
     response = self.post(self.listeners_path, lb_listener)
     listener_api = response.json
     extra_expects = {'provisioning_status': constants.PENDING_CREATE,
                      'operating_status': constants.OFFLINE}
     lb_listener.update(extra_expects)
     self.assertTrue(uuidutils.is_uuid_like(listener_api.get('id')))
     for key, value in optionals.items():
         self.assertEqual(value, lb_listener.get(key))
     lb_listener['id'] = listener_api.get('id')
     lb_listener.pop('sni_containers')
     sni_ex = [sni1, sni2]
     sni_resp = listener_api.pop('sni_containers')
     self.assertEqual(2, len(sni_resp))
     for sni in sni_resp:
         self.assertIn(sni, sni_ex)
     self.assertEqual(lb_listener, listener_api)
     self.assert_correct_lb_status(self.lb.get('id'),
                                   constants.PENDING_UPDATE,
                                   constants.ONLINE)
     self.assert_final_lb_statuses(self.lb.get('id'))
     self.assert_final_listener_statuses(self.lb.get('id'),
                                         listener_api.get('id'))
Beispiel #3
0
    def test_list_containers_with_filters(self):
        container1 = utils.create_test_container(
            name='container-one',
            uuid=uuidutils.generate_uuid(),
            context=self.context)
        container2 = utils.create_test_container(
            name='container-two',
            uuid=uuidutils.generate_uuid(),
            context=self.context)

        res = dbapi.list_containers(
            self.context, consts.TYPE_CONTAINER,
            filters={'name': 'container-one'})
        self.assertEqual([container1.id], [r.id for r in res])

        res = dbapi.list_containers(
            self.context, consts.TYPE_CONTAINER,
            filters={'name': 'container-two'})
        self.assertEqual([container2.id], [r.id for r in res])

        res = dbapi.list_containers(
            self.context, consts.TYPE_CONTAINER,
            filters={'name': 'bad-container'})
        self.assertEqual([], [r.id for r in res])

        res = dbapi.list_containers(
            self.context, consts.TYPE_CONTAINER,
            filters={'name': container1.name})
        self.assertEqual([container1.id], [r.id for r in res])
Beispiel #4
0
 def test_get_provider_names_by_resource_ids(self):
     self._set_override([dp.DUMMY_SERVICE_TYPE +
                         ':dummy1:driver_path',
                         dp.DUMMY_SERVICE_TYPE +
                         ':dummy2:driver_path2'])
     ctx = context.get_admin_context()
     test_data = [{'provider_name': 'dummy1',
                   'resource_id': uuidutils.generate_uuid()},
                  {'provider_name': 'dummy1',
                   'resource_id': uuidutils.generate_uuid()},
                  {'provider_name': 'dummy2',
                   'resource_id': uuidutils.generate_uuid()}]
     self.manager.add_resource_association(ctx,
                                           dp.DUMMY_SERVICE_TYPE,
                                           **test_data[0])
     self.manager.add_resource_association(ctx,
                                           dp.DUMMY_SERVICE_TYPE,
                                           **test_data[1])
     self.manager.add_resource_association(ctx,
                                           dp.DUMMY_SERVICE_TYPE,
                                           **test_data[2])
     names_by_id = self.manager.get_provider_names_by_resource_ids(
         ctx, [td['resource_id'] for td in test_data])
     # unmatched IDs will be excluded from the result
     self.assertEqual({td['resource_id']: td['provider_name']
                       for td in test_data}, names_by_id)
 def create_l2_gateway(self, context, l2_gateway):
     """Create a logical gateway."""
     self._admin_check(context, "CREATE")
     gw = l2_gateway[self.gateway_resource]
     tenant_id = self._get_tenant_id_for_create(context, gw)
     devices = gw["devices"]
     self._validate_any_seg_id_empty_in_interface_dict(devices)
     with context.session.begin(subtransactions=True):
         gw_db = models.L2Gateway(
             id=gw.get("id", uuidutils.generate_uuid()), tenant_id=tenant_id, name=gw.get("name")
         )
         context.session.add(gw_db)
         l2gw_device_dict = {}
         for device in devices:
             l2gw_device_dict["l2_gateway_id"] = id
             device_name = device["device_name"]
             l2gw_device_dict["device_name"] = device_name
             l2gw_device_dict["id"] = uuidutils.generate_uuid()
             uuid = self._generate_uuid()
             dev_db = models.L2GatewayDevice(id=uuid, l2_gateway_id=gw_db.id, device_name=device_name)
             context.session.add(dev_db)
             for interface_list in device["interfaces"]:
                 int_name = interface_list.get("name")
                 if constants.SEG_ID in interface_list:
                     seg_id_list = interface_list.get(constants.SEG_ID)
                     for seg_ids in seg_id_list:
                         uuid = self._generate_uuid()
                         interface_db = self._get_int_model(uuid, int_name, dev_db.id, seg_ids)
                         context.session.add(interface_db)
                 else:
                     uuid = self._generate_uuid()
                     interface_db = self._get_int_model(uuid, int_name, dev_db.id, 0)
                     context.session.add(interface_db)
                 context.session.query(models.L2GatewayDevice).all()
     return self._make_l2_gateway_dict(gw_db)
Beispiel #6
0
    def test_connection_switch(self):
        ctxt = context.RequestContext('fake-user', 'fake-project')
        # Make a request context with a cell mapping
        mapping = objects.CellMapping(context=ctxt,
                                      uuid=uuidutils.generate_uuid(),
                                      database_connection=self.fake_conn,
                                      transport_url='none:///')
        mapping.create()
        # Create an instance in the cell database
        uuid = uuidutils.generate_uuid()
        with context.target_cell(ctxt, mapping):
            # Must set project_id because instance get specifies
            # project_only=True to model_query, which means non-admin
            # users can only read instances for their project
            instance = objects.Instance(context=ctxt, uuid=uuid,
                                        project_id='fake-project')
            instance.create()

            # Verify the instance is found in the cell database
            inst = objects.Instance.get_by_uuid(ctxt, uuid)
            self.assertEqual(uuid, inst.uuid)

        # Verify the instance isn't found in the main database
        self.assertRaises(exception.InstanceNotFound,
                          objects.Instance.get_by_uuid, ctxt, uuid)
Beispiel #7
0
 def test_update_default_status(self):
     project_ids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()]
     for project_id in project_ids:
         qos_policy = self._create_qos_policy(project_id, True)
         self.assertTrue(qos_policy['is_default'])
         qos_policy = self._update_qos_policy(qos_policy['id'], False)
         self.assertFalse(qos_policy['policy']['is_default'])
    def test_map_instances_duplicates(self):
        ctxt = context.RequestContext('fake-user', 'fake_project')
        cell_uuid = uuidutils.generate_uuid()
        cell_mapping = objects.CellMapping(
                ctxt, uuid=cell_uuid, name='fake',
                transport_url='fake://', database_connection='fake://')
        cell_mapping.create()
        instance_uuids = []
        for i in range(3):
            uuid = uuidutils.generate_uuid()
            instance_uuids.append(uuid)
            objects.Instance(ctxt, project_id=ctxt.project_id,
                             uuid=uuid).create()

        objects.InstanceMapping(ctxt, project_id=ctxt.project_id,
                instance_uuid=instance_uuids[0],
                cell_mapping=cell_mapping).create()

        self.commands.map_instances(cell_uuid)

        for uuid in instance_uuids:
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(ctxt,
                    uuid)
            self.assertEqual(ctxt.project_id, inst_mapping.project_id)

        mappings = objects.InstanceMappingList.get_by_project_id(ctxt,
                ctxt.project_id)
        self.assertEqual(3, len(mappings))
    def test_map_instances_max_count(self):
        ctxt = context.RequestContext('fake-user', 'fake_project')
        cell_uuid = uuidutils.generate_uuid()
        cell_mapping = objects.CellMapping(
                ctxt, uuid=cell_uuid, name='fake',
                transport_url='fake://', database_connection='fake://')
        cell_mapping.create()
        instance_uuids = []
        for i in range(6):
            uuid = uuidutils.generate_uuid()
            instance_uuids.append(uuid)
            objects.Instance(ctxt, project_id=ctxt.project_id,
                             uuid=uuid).create()

        ret = self.commands.map_instances(cell_uuid, max_count=3)
        self.assertEqual(1, ret)

        for uuid in instance_uuids[:3]:
            # First three are mapped
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(ctxt,
                    uuid)
            self.assertEqual(ctxt.project_id, inst_mapping.project_id)
        for uuid in instance_uuids[3:]:
            # Last three are not
            self.assertRaises(exception.InstanceMappingNotFound,
                    objects.InstanceMapping.get_by_instance_uuid, ctxt,
                    uuid)
    def test_update_ips_for_port_passes_port_dict_to_factory(self, pool_mock):
        address_factory = mock.Mock()
        mocks = self._prepare_mocks_with_pool_mock(
            pool_mock, address_factory=address_factory)
        context = mock.Mock()
        new_ips = mock.Mock()
        original_ips = mock.Mock()
        mac = mock.Mock()

        ip_dict = {'ip_address': '192.1.1.10',
                   'subnet_id': uuidutils.generate_uuid()}
        changes = ipam_pluggable_backend.IpamPluggableBackend.Changes(
            add=[ip_dict], original=[], remove=[])
        changes_mock = mock.Mock(return_value=changes)
        fixed_ips_mock = mock.Mock(return_value=changes.add)
        mocks['ipam'] = ipam_pluggable_backend.IpamPluggableBackend()
        mocks['ipam']._get_changed_ips_for_port = changes_mock
        mocks['ipam']._ipam_get_subnets = mock.Mock()
        mocks['ipam']._test_fixed_ips_for_port = fixed_ips_mock
        mocks['ipam']._update_ips_for_pd_subnet = mock.Mock(return_value=[])

        port_dict = {'device_owner': uuidutils.generate_uuid(),
                     'network_id': uuidutils.generate_uuid()}

        mocks['ipam']._update_ips_for_port(context, port_dict, None,
                                           original_ips, new_ips, mac)
        mocks['driver'].get_address_request_factory.assert_called_once_with()
        mocks['ipam']._ipam_get_subnets.assert_called_once_with(
            context, network_id=port_dict['network_id'], host=None)
        # Validate port_dict is passed into address_factory
        address_factory.get_request.assert_called_once_with(context,
                                                            port_dict,
                                                            ip_dict)
Beispiel #11
0
    def test_map_instances_marker_deleted(self):
        ctxt = context.RequestContext('fake-user', 'fake_project')
        cell_uuid = uuidutils.generate_uuid()
        cell_mapping = objects.CellMapping(
                ctxt, uuid=cell_uuid, name='fake',
                transport_url='fake://', database_connection='fake://')
        cell_mapping.create()
        instance_uuids = []
        for i in range(6):
            uuid = uuidutils.generate_uuid()
            instance_uuids.append(uuid)
            objects.Instance(ctxt, project_id=ctxt.project_id,
                             uuid=uuid).create()

        ret = self.commands.map_instances(cell_uuid, max_count=3)
        self.assertEqual(1, ret)

        # Instances are mapped in the order created so we know the marker is
        # based off the third instance.
        marker = instance_uuids[2].replace('-', ' ')
        marker_mapping = objects.InstanceMapping.get_by_instance_uuid(ctxt,
                marker)
        marker_mapping.destroy()

        ret = self.commands.map_instances(cell_uuid)
        self.assertEqual(0, ret)

        for uuid in instance_uuids:
            inst_mapping = objects.InstanceMapping.get_by_instance_uuid(ctxt,
                    uuid)
            self.assertEqual(ctxt.project_id, inst_mapping.project_id)
Beispiel #12
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }

        self._registry_notify(resources.SECURITY_GROUP, events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict, **kwargs)

        tenant_id = s['tenant_id']

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)

        with db_api.autonested_transaction(context.session):
            security_group_db = SecurityGroup(id=s.get('id') or (
                                              uuidutils.generate_uuid()),
                                              description=s['description'],
                                              tenant_id=tenant_id,
                                              name=s['name'])
            context.session.add(security_group_db)
            if default_sg:
                context.session.add(DefaultSecurityGroup(
                    security_group=security_group_db,
                    tenant_id=security_group_db['tenant_id']))
            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = SecurityGroupRule(
                        id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                        security_group=security_group_db,
                        direction='ingress',
                        ethertype=ethertype,
                        source_group=security_group_db)
                    context.session.add(ingress_rule)

                egress_rule = SecurityGroupRule(
                    id=uuidutils.generate_uuid(), tenant_id=tenant_id,
                    security_group=security_group_db,
                    direction='egress',
                    ethertype=ethertype)
                context.session.add(egress_rule)
                self._registry_notify(resources.SECURITY_GROUP,
                                      events.PRECOMMIT_CREATE,
                                      exc_cls=ext_sg.SecurityGroupConflict,
                                      **kwargs)

        secgroup_dict = self._make_security_group_dict(security_group_db)

        kwargs['security_group'] = secgroup_dict
        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict
    def test_list_agents_including_midonet_agents(self):
        agents = self._register_agent_states()
        agent1 = {
            "id": uuidutils.generate_uuid(),
            "binary": "midolman",
            "admin_state_up": True,
            "host": "midohostA",
            "agent_type": "Midonet Agent",
        }

        agent2 = {
            "id": uuidutils.generate_uuid(),
            "binary": "midolman",
            "admin_state_up": False,
            "host": "midohostB",
            "agent_type": "Midonet Agent",
        }

        self.client_mock.get_agents.return_value = [agent1, agent2]

        res = self._list("agents")
        agent_ids = [ag["id"] for ag in res["agents"]]

        self.assertEqual(len(agents) + 2, len(res["agents"]))
        self.assertIn(agent1["id"], agent_ids)
        self.assertIn(agent2["id"], agent_ids)
Beispiel #14
0
 def create_resources(self, _type, ctx, body):
     if 'id' not in body[_type]:
         body[_type]['id'] = uuidutils.generate_uuid()
     if _type == 'port' and 'fixed_ips' in body[_type]:
         ip_dict = body[_type]['fixed_ips'][0]
         self._check_port_ip_conflict(ip_dict['subnet_id'],
                                      ip_dict['ip_address'])
     if _type == 'security_group':
         body[_type]['security_group_rules'] = [
             {'remote_group_id': None,
              'direction': 'egress',
              'remote_ip_prefix': None,
              'protocol': None,
              'port_range_max': None,
              'port_range_min': None,
              'ethertype': 'IPv4',
              'id': uuidutils.generate_uuid()},
             {'remote_group_id': None,
              'direction': 'egress',
              'remote_ip_prefix': None,
              'protocol': None,
              'port_range_max': None,
              'port_range_min': None,
              'ethertype': 'IPv6',
              'id': uuidutils.generate_uuid()},
         ]
     res_list = self._get_res_list(_type)
     res = dict(body[_type])
     res_list.append(res)
     return res
Beispiel #15
0
 def test_get_stats_no_listeners(self):
     self.lb2 = self.create_load_balancer(
         uuidutils.generate_uuid()).get('loadbalancer')
     self.lb2_id = self.lb2.get('id')
     self.set_lb_status(self.lb2_id)
     self.amp2_args = {
         'load_balancer_id': self.lb2_id,
         'compute_id': uuidutils.generate_uuid(),
         'lb_network_ip': '192.168.1.20',
         'vrrp_ip': '192.168.1.5',
         'ha_ip': '192.168.1.100',
         'vrrp_port_id': uuidutils.generate_uuid(),
         'ha_port_id': uuidutils.generate_uuid(),
         'cert_expiration': datetime.datetime.now(),
         'cert_busy': False,
         'role': constants.ROLE_STANDALONE,
         'status': constants.AMPHORA_ALLOCATED,
         'vrrp_interface': 'eth1',
         'vrrp_id': 1,
         'vrrp_priority': 100,
         'cached_zone': None,
         'created_at': datetime.datetime.now(),
         'updated_at': datetime.datetime.now(),
         'image_id': uuidutils.generate_uuid(),
     }
     self.amp2 = self.amphora_repo.create(self.session, **self.amp2_args)
     self.amp2_id = self.amp2.id
     self.get(self.AMPHORA_STATS_PATH.format(
         amphora_id=self.amp2_id), status=404)
Beispiel #16
0
 def start_fixture(self):
     super(AllocationFixture, self).start_fixture()
     self.context = context.get_admin_context()
     # Stealing from the super
     rp_name = os.environ['RP_NAME']
     rp_uuid = os.environ['RP_UUID']
     rp = objects.ResourceProvider(
         self.context, name=rp_name, uuid=rp_uuid)
     rp.create()
     inventory = objects.Inventory(
         self.context, resource_provider=rp,
         resource_class='DISK_GB', total=2048)
     inventory.obj_set_defaults()
     rp.add_inventory(inventory)
     allocation = objects.Allocation(
         self.context, resource_provider=rp,
         resource_class='DISK_GB',
         consumer_id=uuidutils.generate_uuid(),
         used=512)
     allocation.create()
     allocation = objects.Allocation(
         self.context, resource_provider=rp,
         resource_class='DISK_GB',
         consumer_id=uuidutils.generate_uuid(),
         used=512)
     allocation.create()
Beispiel #17
0
    def test_populate_tenant_id(self):
        tenant_id_1 = uuidutils.generate_uuid()
        tenant_id_2 = uuidutils.generate_uuid()
        # apart from the admin, nobody can create a res on behalf of another
        # tenant
        ctx = context.Context(user_id=None, tenant_id=tenant_id_1)
        res_dict = {'tenant_id': tenant_id_2}
        self.assertRaises(webob.exc.HTTPBadRequest,
                          attributes.populate_tenant_id,
                          ctx, res_dict, None, None)
        ctx.is_admin = True
        self.assertIsNone(attributes.populate_tenant_id(ctx, res_dict,
                                                        None, None))

        # for each create request, the tenant_id should be added to the
        # req body
        res_dict2 = {}
        attributes.populate_tenant_id(ctx, res_dict2, None, True)
        self.assertEqual(
            {'tenant_id': ctx.tenant_id, 'project_id': ctx.tenant_id},
            res_dict2)

        # if the tenant_id is mandatory for the resource and not specified
        # in the request nor in the context, an exception should be raised
        res_dict3 = {}
        attr_info = {'tenant_id': {'allow_post': True}, }
        ctx.tenant_id = None
        self.assertRaises(webob.exc.HTTPBadRequest,
                          attributes.populate_tenant_id,
                          ctx, res_dict3, attr_info, True)
Beispiel #18
0
 def _test_validate_create_port_callback(self, policy_id=None,
                                         network_policy_id=None):
     port_id = uuidutils.generate_uuid()
     kwargs = {
         "context": self.ctxt,
         "port": {"id": port_id}
     }
     port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
     network_mock = mock.MagicMock(
         id=uuidutils.generate_uuid(), qos_policy_id=network_policy_id)
     policy_mock = mock.MagicMock(id=policy_id)
     expected_policy_id = policy_id or network_policy_id
     with mock.patch(
         'neutron.objects.ports.Port.get_object',
         return_value=port_mock
     ), mock.patch(
         'neutron.objects.network.Network.get_object',
         return_value=network_mock
     ), mock.patch(
         'neutron.objects.qos.policy.QosPolicy.get_object',
         return_value=policy_mock
     ) as get_policy, mock.patch.object(
         self.qos_plugin, "validate_policy_for_port"
     ) as validate_policy_for_port:
         self.qos_plugin._validate_create_port_callback(
             "PORT", "precommit_create", "test_plugin", **kwargs)
         if policy_id or network_policy_id:
             get_policy.assert_called_once_with(self.ctxt,
                                                id=expected_policy_id)
             validate_policy_for_port.assert_called_once_with(policy_mock,
                                                              port_mock)
         else:
             get_policy.assert_not_called()
             validate_policy_for_port.assert_not_called()
Beispiel #19
0
    def start_fixture(self):
        # Set up stderr and stdout captures by directly driving the
        # existing nova fixtures that do that. This captures the
        # output that happens outside individual tests (for
        # example database migrations).
        self.standard_logging_fixture = fixtures.StandardLogging()
        self.standard_logging_fixture.setUp()
        self.output_stream_fixture = fixtures.OutputStreamCapture()
        self.output_stream_fixture.setUp()

        self.conf = CONF
        self.conf.set_override('auth_strategy', 'noauth2')
        # Be explicit about all three database connections to avoid
        # potential conflicts with config on disk.
        self.conf.set_override('connection', "sqlite://", group='database')
        self.conf.set_override('connection', "sqlite://",
                               group='api_database')
        self.conf.set_override('connection', "sqlite://",
                               group='placement_database')
        config.parse_args([], default_config_files=None, configure_db=False,
                          init_rpc=False)

        # NOTE(cdent): api and main database are not used but we still need
        # to manage them to make the fixtures work correctly and not cause
        # conflicts with other tests in the same process.
        self.api_db_fixture = fixtures.Database('api')
        self.main_db_fixture = fixtures.Database('main')
        self.api_db_fixture.reset()
        self.main_db_fixture.reset()

        os.environ['RP_UUID'] = uuidutils.generate_uuid()
        os.environ['RP_NAME'] = uuidutils.generate_uuid()
 def test_port_notification(self):
     node_uuid = uuidutils.generate_uuid()
     portgroup_uuid = uuidutils.generate_uuid()
     port = obj_utils.get_test_port(self.context,
                                    address='11:22:33:77:88:99',
                                    local_link_connection={'a': 25},
                                    extra={'as': 34},
                                    pxe_enabled=False)
     test_level = fields.NotificationLevel.INFO
     test_status = fields.NotificationStatus.SUCCESS
     notif_utils._emit_api_notification(self.context, port, 'create',
                                        test_level, test_status,
                                        node_uuid=node_uuid,
                                        portgroup_uuid=portgroup_uuid)
     init_kwargs = self.port_notify_mock.call_args[1]
     payload = init_kwargs['payload']
     event_type = init_kwargs['event_type']
     self.assertEqual('port', event_type.object)
     self.assertEqual(port.uuid, payload.uuid)
     self.assertEqual(node_uuid, payload.node_uuid)
     self.assertEqual(portgroup_uuid, payload.portgroup_uuid)
     self.assertEqual('11:22:33:77:88:99', payload.address)
     self.assertEqual({'a': 25}, payload.local_link_connection)
     self.assertEqual({'as': 34}, payload.extra)
     self.assertIs(False, payload.pxe_enabled)
Beispiel #21
0
 def test__generate_connector_multiple_fc_wwns(self):
     """Validate handling of WWPNs and WWNNs."""
     expected = {
         'wwpns': ['wwpn1', 'wwpn2'],
         'wwnns': ['wwnn3', 'wwnn4'],
         'host': self.node.uuid,
         'multipath': True}
     object_utils.create_test_volume_connector(
         self.context,
         node_id=self.node.id,
         type='wwpn',
         connector_id='wwpn1',
         uuid=uuidutils.generate_uuid())
     object_utils.create_test_volume_connector(
         self.context,
         node_id=self.node.id,
         type='wwpn',
         connector_id='wwpn2',
         uuid=uuidutils.generate_uuid())
     object_utils.create_test_volume_connector(
         self.context,
         node_id=self.node.id,
         type='wwnn',
         connector_id='wwnn3',
         uuid=uuidutils.generate_uuid())
     object_utils.create_test_volume_connector(
         self.context,
         node_id=self.node.id,
         type='wwnn',
         connector_id='wwnn4',
         uuid=uuidutils.generate_uuid())
     with task_manager.acquire(self.context, self.node.id) as task:
         return_value = self.interface._generate_connector(task)
     self.assertDictEqual(expected, return_value)
Beispiel #22
0
    def test_port_bind(self, mock_getitem, mock_getattribute,
                       mock_execute, mock_path_exists):
        fake_mtu = 1450
        fake_docker_network_id = utils.get_hash()
        fake_docker_endpoint_id = utils.get_hash()
        fake_port_id = uuidutils.generate_uuid()
        fake_neutron_v4_subnet_id = uuidutils.generate_uuid()
        fake_neutron_v6_subnet_id = uuidutils.generate_uuid()
        fake_port = self._get_fake_port(
            fake_docker_endpoint_id, fake_docker_network_id,
            fake_port_id, constants.PORT_STATUS_ACTIVE,
            fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
        fake_subnets = self._get_fake_subnets(
            fake_docker_endpoint_id, fake_docker_network_id,
            fake_neutron_v4_subnet_id, fake_neutron_v6_subnet_id)
        fake_network = self._get_fake_networks(fake_docker_network_id)
        fake_network['networks'][0]['mtu'] = fake_mtu

        binding.port_bind(fake_docker_endpoint_id, fake_port['port'],
                          fake_subnets['subnets'],
                          fake_network['networks'][0])

        expect_calls = [call.__enter__().set_mtu(fake_mtu),
                        call.__enter__().up()]
        mock_interface.assert_has_calls(expect_calls, any_order=True)
        mock_path_exists.assert_called_once()
        mock_execute.assert_called_once()
 def test_remove_expired_items_from_cache(self):
     expired_items = {
         uuidutils.generate_uuid(): glance_v2.TempUrlCacheElement(
             'fake-url-1',
             int(time.time()) - 10
         ),
         uuidutils.generate_uuid(): glance_v2.TempUrlCacheElement(
             'fake-url-2',
             int(time.time()) + 90  # Agent won't be able to start in time
         )
     }
     valid_items = {
         uuidutils.generate_uuid(): glance_v2.TempUrlCacheElement(
             'fake-url-3',
             int(time.time()) + 1000
         ),
         uuidutils.generate_uuid(): glance_v2.TempUrlCacheElement(
             'fake-url-4',
             int(time.time()) + 2000
         )
     }
     self.glance_service._cache.update(expired_items)
     self.glance_service._cache.update(valid_items)
     self.glance_service._remove_expired_items_from_cache()
     for uuid in valid_items:
         self.assertEqual(valid_items[uuid],
                          self.glance_service._cache[uuid])
     for uuid in expired_items:
         self.assertNotIn(uuid, self.glance_service._cache)
    def test_add_two_rules_and_get(self):
        id1 = uuidutils.generate_uuid()
        rule1_str = "p(x) :- q(x)"
        id2 = uuidutils.generate_uuid()
        rule2_str = "z(x) :- q(x)"
        policy_name = "classification"
        comment = "None"
        db_policy_rules.add_policy_rule(id=id1,
                                        policy_name=policy_name,
                                        rule=rule1_str,
                                        comment=comment)

        db_policy_rules.add_policy_rule(id=id2,
                                        policy_name=policy_name,
                                        rule=rule2_str,
                                        comment=comment)

        rules = db_policy_rules.get_policy_rules(policy_name)
        self.assertEqual(len(rules), 2)
        self.assertEqual(id1, rules[0].id)
        self.assertEqual(policy_name, rules[0].policy_name)
        self.assertEqual(rule1_str, rules[0].rule)
        self.assertEqual(comment, rules[0].comment)
        self.assertEqual(id2, rules[1].id)
        self.assertEqual(policy_name, rules[1].policy_name)
        self.assertEqual(rule2_str, rules[1].rule)
        self.assertEqual(comment, rules[1].comment)
        self.assertEqual(len(db_policy_rules.get_policy_rules()), 2)
 def setup_keystone_mock(session_mock):
     keystone_client_instance = session_mock.return_value
     keystone_client_instance.auth_token = uuidutils.generate_uuid()
     keystone_client_instance.project_id = uuidutils.generate_uuid()
     keystone_client_instance.user_id = uuidutils.generate_uuid()
     keystone_client_instance.auth_ref = str(jsonutils.dumps({}))
     return keystone_client_instance
    def test_with_candidates(self):
        obj_utils.create_test_node(self.context,
                                   uuid=uuidutils.generate_uuid(),
                                   power_state='power on',
                                   resource_class='x-large',
                                   provision_state='available')
        node = obj_utils.create_test_node(self.context,
                                          uuid=uuidutils.generate_uuid(),
                                          power_state='power on',
                                          resource_class='x-large',
                                          provision_state='available')

        allocation = obj_utils.create_test_allocation(
            self.context, resource_class='x-large',
            candidate_nodes=[node['uuid']])

        allocations.do_allocate(self.context, allocation)

        allocation = objects.Allocation.get_by_uuid(self.context,
                                                    allocation['uuid'])
        self.assertIsNone(allocation['last_error'])
        self.assertEqual('active', allocation['state'])

        node = objects.Node.get_by_uuid(self.context, node['uuid'])
        self.assertEqual(allocation['uuid'], node['instance_uuid'])
        self.assertEqual(allocation['id'], node['allocation_id'])
        self.assertEqual([node['uuid']], allocation['candidate_nodes'])
    def test_nodes_locked(self, mock_acquire):
        self.config(node_locked_retry_attempts=2, group='conductor')
        node1 = obj_utils.create_test_node(self.context,
                                           uuid=uuidutils.generate_uuid(),
                                           maintenance=False,
                                           resource_class='x-large',
                                           power_state='power off',
                                           provision_state='available',
                                           reservation='example.com')
        node2 = obj_utils.create_test_node(self.context,
                                           uuid=uuidutils.generate_uuid(),
                                           resource_class='x-large',
                                           power_state='power off',
                                           provision_state='available',
                                           reservation='example.com')

        allocation = obj_utils.create_test_allocation(self.context,
                                                      resource_class='x-large')
        allocations.do_allocate(self.context, allocation)
        self.assertIn('could not reserve any of 2', allocation['last_error'])
        self.assertEqual('error', allocation['state'])

        self.assertEqual(6, mock_acquire.call_count)
        # NOTE(dtantsur): node are tried in random order by design, so we
        # cannot directly use assert_has_calls. Check that all nodes are tried
        # before going into retries (rather than each tried 3 times in a row).
        nodes = [call[0][1] for call in mock_acquire.call_args_list]
        for offset in (0, 2, 4):
            self.assertEqual(set(nodes[offset:offset + 2]),
                             {node1.uuid, node2.uuid})
Beispiel #28
0
    def test_plug_with_namespace_sets_mtu_higher_than_bridge(self):
        device_mtu = 1450

        # Create a new OVS bridge
        ovs_bridge = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
        self.assertFalse(ovs_bridge.get_port_name_list())

        # Add a new linuxbridge port with reduced MTU to OVS bridge
        lb_bridge = self.useFixture(
            net_helpers.LinuxBridgeFixture()).bridge
        lb_bridge_port = self.useFixture(
            net_helpers.LinuxBridgePortFixture(lb_bridge))
        lb_bridge_port.port.link.set_mtu(device_mtu - 1)
        ovs_bridge.add_port(lb_bridge_port.port.name)

        # Now plug a device with intended MTU that is higher than for the port
        # above and validate that its MTU is not reduced to the least MTU on
        # the bridge
        device_name = utils.get_rand_name()
        mac_address = utils.get_random_mac('fa:16:3e:00:00:00'.split(':'))
        namespace = self.useFixture(net_helpers.NamespaceFixture()).name
        self.interface.plug(network_id=uuidutils.generate_uuid(),
                            port_id=uuidutils.generate_uuid(),
                            device_name=device_name,
                            mac_address=mac_address,
                            bridge=ovs_bridge.br_name,
                            namespace=namespace,
                            mtu=device_mtu)

        self.assertIn(device_name, ovs_bridge.get_port_name_list())
        self.assertTrue(ip_lib.device_exists(device_name, namespace))
        self.assertEqual(
            device_mtu,
            ip_lib.IPDevice(device_name, namespace=namespace).link.mtu
        )
Beispiel #29
0
    def test_floatingip_update_qos_policy_id(self):
        ctx = context.get_admin_context()
        policy_obj_1 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant', name='pol2',
                                        rules=[])
        policy_obj_1.create()
        policy_obj_2 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant', name='pol3',
                                        rules=[])
        policy_obj_2.create()
        with self.subnet(cidr='11.0.0.0/24') as s:
            network_id = s['subnet']['network_id']
            self._set_net_external(network_id)
            fip = self._make_floatingip(
                self.fmt,
                network_id,
                qos_policy_id=policy_obj_1.id)
            self.assertEqual(policy_obj_1.id,
                             fip['floatingip'][qos_consts.QOS_POLICY_ID])
            body = self._show('floatingips', fip['floatingip']['id'])
            self.assertEqual(policy_obj_1.id,
                             body['floatingip'][qos_consts.QOS_POLICY_ID])

            body = self._update(
                'floatingips', fip['floatingip']['id'],
                {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj_2.id}})
            self.assertEqual(policy_obj_2.id,
                             body['floatingip'][qos_consts.QOS_POLICY_ID])
    def setUp(self):
        super(TestTaasPlugin, self).setUp()
        mock.patch.object(n_rpc, 'create_connection', auto_spec=True).start()
        mock.patch.object(taas_plugin, 'TaasCallbacks', auto_spec=True).start()
        mock.patch.object(taas_plugin, 'TaasAgentApi', auto_spec=True).start()
        self._plugin = taas_plugin.TaasPlugin()
        self._context = context.get_admin_context()

        self._tenant_id = 'tenant-X'
        self._network_id = uuidutils.generate_uuid()
        self._host_id = 'host-A'
        self._port_id = uuidutils.generate_uuid()
        self._port_details = {
            'tenant_id': self._tenant_id,
            'binding:host_id': self._host_id,
            'mac_address': n_utils.get_random_mac(
                'fa:16:3e:00:00:00'.split(':')),
        }
        self._tap_service = {
            'tenant_id': self._tenant_id,
            'name': 'MyTap',
            'description': 'This is my tap service',
            'port_id': self._port_id,
            'network_id': self._network_id,
        }
        self._tap_flow = {
            'description': 'This is my tap flow',
            'direction': 'BOTH',
            'name': 'MyTapFlow',
            'source_port': self._port_id,
            'tenant_id': self._tenant_id,
        }
Beispiel #31
0
 def test_get_not_versions_no_model(self):
     utils.create_test_node(uuid=uuidutils.generate_uuid(), version='1.4')
     self.assertRaises(exception.IronicException,
                       self.dbapi.get_not_versions, 'NotExist', ['1.6'])
Beispiel #32
0
    def post(self, port):
        """Create a new port.

        :param port: a port within the request body.
        :raises: NotAcceptable, HTTPNotFound, Conflict
        """
        context = pecan.request.context
        cdict = context.to_policy_values()
        policy.authorize('baremetal:port:create', cdict, cdict)

        if self.parent_node_ident or self.parent_portgroup_ident:
            raise exception.OperationNotPermitted()

        pdict = port.as_dict()
        self._check_allowed_port_fields(pdict)

        create_remotely = pecan.request.rpcapi.can_send_create_port()
        if (not create_remotely and pdict.get('portgroup_uuid')):
            # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the
            # conductor service to facilitate validation of the physical
            # network field of ports in portgroups. During a rolling upgrade,
            # the RPCAPI will reject the create_port method, so we need to
            # create the port locally. If the port is a member of a portgroup,
            # we are unable to perform the validation and must reject the
            # request.
            raise exception.NotAcceptable()

        vif = api_utils.handle_post_port_like_extra_vif(pdict)

        if (pdict.get('portgroup_uuid')
                and (pdict.get('pxe_enabled') or vif)):
            rpc_pg = objects.Portgroup.get_by_uuid(context,
                                                   pdict['portgroup_uuid'])
            if not rpc_pg.standalone_ports_supported:
                msg = _("Port group %s doesn't support standalone ports. "
                        "This port cannot be created as a member of that "
                        "port group because either 'extra/vif_port_id' "
                        "was specified or 'pxe_enabled' was set to True.")
                raise exception.Conflict(
                    msg % pdict['portgroup_uuid'])

        # NOTE(yuriyz): UUID is mandatory for notifications payload
        if not pdict.get('uuid'):
            pdict['uuid'] = uuidutils.generate_uuid()

        rpc_port = objects.Port(context, **pdict)
        rpc_node = objects.Node.get_by_id(context, rpc_port.node_id)

        notify_extra = {'node_uuid': port.node_uuid,
                        'portgroup_uuid': port.portgroup_uuid}
        notify.emit_start_notification(context, rpc_port, 'create',
                                       **notify_extra)
        with notify.handle_error_notification(context, rpc_port, 'create',
                                              **notify_extra):
            # NOTE(mgoddard): In RPC API v1.41, port creation was moved to the
            # conductor service to facilitate validation of the physical
            # network field of ports in portgroups. During a rolling upgrade,
            # the RPCAPI will reject the create_port method, so we need to
            # create the port locally.
            if create_remotely:
                topic = pecan.request.rpcapi.get_topic_for(rpc_node)
                new_port = pecan.request.rpcapi.create_port(context, rpc_port,
                                                            topic)
            else:
                rpc_port.create()
                new_port = rpc_port
        notify.emit_end_notification(context, new_port, 'create',
                                     **notify_extra)
        # Set the HTTP Location Header
        pecan.response.location = link.build_url('ports', new_port.uuid)
        return Port.convert_with_links(new_port)
Beispiel #33
0
    def _create_portchain_hop_details(self, context, port_chain,
                                      reverse=False):
        project_id = port_chain['tenant_id']
        hop_details = []
        port_pair_groups = port_chain['port_pair_groups']

        fcs = self._get_fcs_by_ids(port_chain['flow_classifiers'])

        classifiers = []
        src_rts = []
        src_ports = []
        dest_rts = []
        dest_ports = []
        for fc in fcs:
            if fc.get('logical_source_port'):
                src_subnet = self._get_subnet_by_port(
                    fc['logical_source_port'])
                src_ports.append(fc['logical_source_port'])
            else:
                src_subnet = self._get_subnet_by_prefix(
                    fc['source_ip_prefix'])
                src_ports.extend(
                    self._get_ports_by_network(src_subnet['network_id'])
                )

            # Check if network is associated to BGPVPNs
            src_bgpvpns = (
                self._get_bgpvpns_by_network(context,
                                             src_subnet['network_id'])
            )
            if src_bgpvpns:
                src_rts.extend(self._get_bgpvpn_rts(src_bgpvpns)[0])
            else:
                src_rts.append(self._get_network_rt(src_subnet['network_id']))

            if fc.get('logical_destination_port'):
                dest_subnet = self._get_subnet_by_port(
                    fc['logical_destination_port'])
                dest_ports.append(fc['logical_destination_port'])
            else:
                dest_subnet = self._get_subnet_by_prefix(
                    fc['destination_ip_prefix'])
                dest_ports.extend(
                    self._get_ports_by_network(dest_subnet['network_id'])
                )

            # Check if network is associated to BGPVPNs
            dest_bgpvpns = (
                self._get_bgpvpns_by_network(context,
                                             dest_subnet['network_id'])
                )
            if dest_bgpvpns:
                dest_rts.extend(self._get_bgpvpn_rts(dest_bgpvpns)[0])
            else:
                dest_rts.append(
                    self._get_network_rt(dest_subnet['network_id'])
                )

            (ingress_bgpvpns, egress_bgpvpns) = (
                (dest_bgpvpns, src_bgpvpns) if reverse
                else (src_bgpvpns, dest_bgpvpns)
            )

            classifiers.append(
                self._build_bagpipe_classifier_from_fc(fc, reverse,
                                                       ingress_bgpvpns)
            )

            # bagpipe-bgp only support one flow classifier for the moment
            break

        reversed_ppg = port_pair_groups[::-1] if reverse else port_pair_groups
        reversed_ingress = (constants.REVERSE_PORT_SIDE[constants.INGRESS]
                            if reverse else constants.INGRESS)
        reversed_egress = (constants.REVERSE_PORT_SIDE[constants.EGRESS]
                           if reverse else constants.EGRESS)
        # Iterate in reversed order to propagate default route from last
        # ingress VRF
        for position, ppg_id in reversed(list(enumerate(reversed_ppg))):
            # Last Hop:
            # - Between last SF egress and Destination ports
            # - Between first SF ingress and Source ports if symmetric reverse
            #   traffic
            if position == len(reversed_ppg)-1:
                last_ppg = context._plugin._get_port_pair_group(
                    context._plugin_context, ppg_id)

                last_eports = self._get_ports_by_portpairs_side(
                    last_ppg['port_pairs'], reversed_egress)

                last_subnet = self._get_subnet_by_port(last_eports[0])

                hop_detail_obj = sfc_obj.BaGPipeChainHop(
                    context._plugin_context,
                    id=uuidutils.generate_uuid(),
                    project_id=project_id,
                    portchain_id=port_chain['id'],
                    rts=(src_rts if reverse else dest_rts),
                    ingress_gw=last_subnet['gateway_ip'],
                    egress_gw=(src_subnet['gateway_ip'] if reverse
                               else dest_subnet['gateway_ip']),
                    reverse_hop=reverse,
                    ingress_ppg=last_ppg['id'],
                    egress_network=(src_subnet['network_id'] if reverse
                                    else dest_subnet['network_id'])
                )
                hop_detail_obj.create()
                hop_details.append(hop_detail_obj)

            # Intermediate Hop: Between one SF ingress and previous (reversed
            # order) SF egress ports
            if (position < len(reversed_ppg)-1 and
                    len(reversed_ppg) > 1):
                prev_ppg_id = reversed_ppg[position+1]

                current_ppg = context._plugin._get_port_pair_group(
                    context._plugin_context, ppg_id)

                current_eports = self._get_ports_by_portpairs_side(
                    current_ppg['port_pairs'], reversed_egress)

                current_subnet = self._get_subnet_by_port(current_eports[0])

                prev_ppg = context._plugin._get_port_pair_group(
                    context._plugin_context,
                    prev_ppg_id)

                prev_iports = self._get_ports_by_portpairs_side(
                    prev_ppg['port_pairs'], reversed_ingress)

                prev_subnet = self._get_subnet_by_port(prev_iports[0])

                prev_ppg_rt = self.rt_allocator.allocate_rt(
                    prev_ppg_id,
                    reverse=reverse)

                prev_redirect_rt = self.rt_allocator.allocate_rt(
                    prev_ppg_id,
                    is_redirect=True,
                    reverse=reverse)

                if position+1 == len(reversed_ppg)-1:
                    # Advertise FlowSpec routes from last intermediate hop
                    prev_readv_from_rts = ((src_rts if reverse else dest_rts)
                                           if egress_bgpvpns else None)
                    prev_readv_to_rt = (prev_redirect_rt
                                        if egress_bgpvpns else None)
                    prev_attract_to_rt = (prev_redirect_rt
                                          if not egress_bgpvpns else None)

                    hop_detail_obj = sfc_obj.BaGPipeChainHop(
                        context._plugin_context,
                        id=uuidutils.generate_uuid(),
                        project_id=project_id,
                        portchain_id=port_chain['id'],
                        rts=[prev_ppg_rt],
                        ingress_gw=current_subnet['gateway_ip'],
                        egress_gw=prev_subnet['gateway_ip'],
                        reverse_hop=reverse,
                        ingress_ppg=current_ppg['id'],
                        egress_ppg=prev_ppg['id'],
                        readv_from_rts=prev_readv_from_rts,
                        readv_to_rt=prev_readv_to_rt,
                        attract_to_rt=prev_attract_to_rt,
                        redirect_rts=[prev_ppg_rt],
                        classifiers=jsonutils.dumps(classifiers)
                    )
                else:
                    # Readvertise FlowSpec routes between intermediate hops
                    from_redirect_rt = (
                        self.rt_allocator.get_redirect_rt_by_ppg(
                            reversed_ppg[position+2]))

                    hop_detail_obj = sfc_obj.BaGPipeChainHop(
                        context._plugin_context,
                        id=uuidutils.generate_uuid(),
                        project_id=project_id,
                        portchain_id=port_chain['id'],
                        rts=[prev_ppg_rt],
                        ingress_gw=current_subnet['gateway_ip'],
                        egress_gw=prev_subnet['gateway_ip'],
                        reverse_hop=reverse,
                        ingress_ppg=current_ppg['id'],
                        egress_ppg=prev_ppg['id'],
                        readv_from_rts=[from_redirect_rt],
                        readv_to_rt=prev_redirect_rt,
                        redirect_rts=[prev_ppg_rt],
                        classifiers=jsonutils.dumps(classifiers)
                    )
                hop_detail_obj.create()
                hop_details.append(hop_detail_obj)

            # First Hop:
            # - Between Source and first SF ingress ports
            # - Between Destination and last SF egress ports if symmetric
            #   reverse traffic
            if position == 0:
                first_ppg = context._plugin._get_port_pair_group(
                    context._plugin_context, ppg_id)

                first_iports = self._get_ports_by_portpairs_side(
                    first_ppg['port_pairs'], reversed_ingress)

                first_subnet = self._get_subnet_by_port(first_iports[0])

                first_ppg_rt = self.rt_allocator.allocate_rt(
                    ppg_id,
                    reverse=reverse)

                first_redirect_rt = self.rt_allocator.allocate_rt(
                    ppg_id,
                    is_redirect=True,
                    reverse=reverse)

                first_rts = ((dest_rts if reverse else src_rts)
                             if ingress_bgpvpns else [first_ppg_rt])

                if len(reversed_ppg) == 1:
                    first_readv_from_rts = ((src_rts if reverse else dest_rts)
                                            if egress_bgpvpns else None)
                    first_readv_to_rt = (first_redirect_rt
                                         if egress_bgpvpns else None)
                    first_attract_to_rt = (first_redirect_rt
                                           if not egress_bgpvpns else None)
                    first_rts = ((dest_rts if reverse else src_rts)
                                 if ingress_bgpvpns else [first_ppg_rt])

                    hop_detail_obj = sfc_obj.BaGPipeChainHop(
                        context._plugin_context,
                        id=uuidutils.generate_uuid(),
                        project_id=project_id,
                        portchain_id=port_chain['id'],
                        rts=first_rts,
                        ingress_gw=(dest_subnet['gateway_ip'] if reverse
                                    else src_subnet['gateway_ip']),
                        egress_gw=first_subnet['gateway_ip'],
                        reverse_hop=reverse,
                        ingress_network=(dest_subnet['network_id'] if reverse
                                         else src_subnet['network_id']),
                        egress_ppg=first_ppg['id'],
                        readv_from_rts=first_readv_from_rts,
                        readv_to_rt=first_readv_to_rt,
                        attract_to_rt=first_attract_to_rt,
                        redirect_rts=first_rts,
                        classifiers=jsonutils.dumps(classifiers)
                    )
                else:
                    from_redirect_rt = (
                        self.rt_allocator.get_redirect_rt_by_ppg(
                            reversed_ppg[position+1]))

                    hop_detail_obj = sfc_obj.BaGPipeChainHop(
                        context._plugin_context,
                        id=uuidutils.generate_uuid(),
                        project_id=project_id,
                        portchain_id=port_chain['id'],
                        rts=first_rts,
                        ingress_gw=(dest_subnet['gateway_ip'] if reverse
                                    else src_subnet['gateway_ip']),
                        egress_gw=first_subnet['gateway_ip'],
                        reverse_hop=reverse,
                        ingress_network=(dest_subnet['network_id'] if reverse
                                         else src_subnet['network_id']),
                        egress_ppg=first_ppg['id'],
                        readv_from_rts=[from_redirect_rt],
                        readv_to_rt=first_redirect_rt,
                        redirect_rts=first_rts,
                        classifiers=jsonutils.dumps(classifiers)
                    )
                hop_detail_obj.create()
                hop_details.append(hop_detail_obj)

        LOG.debug("BaGPipe SFC driver Chain Hop details: %s", hop_details)

        return hop_details
Beispiel #34
0
    def start_fixture(self):
        # Set up stderr and stdout captures by directly driving the
        # existing nova fixtures that do that. This captures the
        # output that happens outside individual tests (for
        # example database migrations).
        self.standard_logging_fixture = capture.Logging()
        self.standard_logging_fixture.setUp()
        self.output_stream_fixture = output.CaptureOutput()
        self.output_stream_fixture.setUp()
        # Filter ignorable warnings during test runs.
        self.warnings_fixture = capture.WarningsFixture()
        self.warnings_fixture.setUp()

        self.conf_fixture = config_fixture.Config(CONF)
        self.conf_fixture.setUp()
        # The Database fixture will get confused if only one of the databases
        # is configured.
        for group in ('placement_database', 'api_database', 'database'):
            self.conf_fixture.config(
                group=group,
                connection='sqlite://',
                sqlite_synchronous=False)
        self.conf_fixture.config(
            group='api', auth_strategy='noauth2')

        self.context = context.RequestContext()

        # Register CORS opts, but do not set config. This has the
        # effect of exercising the "don't use cors" path in
        # deploy.py. Without setting some config the group will not
        # be present.
        CONF.register_opts(cors.CORS_OPTS, 'cors')

        # Make sure default_config_files is an empty list, not None.
        # If None /etc/nova/nova.conf is read and confuses results.
        CONF([], default_config_files=[])

        self._reset_db_flags()
        self.placement_db_fixture = fixtures.Database('placement')
        self.placement_db_fixture.setUp()
        # Do this now instead of waiting for the WSGI app to start so that
        # fixtures can have traits.
        deploy.update_database()

        os.environ['RP_UUID'] = uuidutils.generate_uuid()
        os.environ['RP_NAME'] = uuidutils.generate_uuid()
        os.environ['CUSTOM_RES_CLASS'] = 'CUSTOM_IRON_NFV'
        os.environ['PROJECT_ID'] = uuidutils.generate_uuid()
        os.environ['USER_ID'] = uuidutils.generate_uuid()
        os.environ['PROJECT_ID_ALT'] = uuidutils.generate_uuid()
        os.environ['USER_ID_ALT'] = uuidutils.generate_uuid()
        os.environ['INSTANCE_UUID'] = uuidutils.generate_uuid()
        os.environ['MIGRATION_UUID'] = uuidutils.generate_uuid()
        os.environ['CONSUMER_UUID'] = uuidutils.generate_uuid()
        os.environ['PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid()
        os.environ['ALT_PARENT_PROVIDER_UUID'] = uuidutils.generate_uuid()
Beispiel #35
0
 def setUp(self):
     super(TestCinderActions, self).setUp()
     self.node = object_utils.create_test_node(
         self.context, instance_uuid=uuidutils.generate_uuid())
     self.mount_point = 'ironic_mountpoint'
Beispiel #36
0
 def setUp(self):
     super(TestCinderUtils, self).setUp()
     self.node = object_utils.create_test_node(
         self.context, instance_uuid=uuidutils.generate_uuid())
Beispiel #37
0
from neutron.db.models import l3 as l3_models
from neutron.objects import l3_hamode
from neutron.objects import network as network_obj
from neutron.objects import ports as port_obj
from neutron.objects import router as l3_objs
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2 import models
from neutron.tests.common import helpers
from neutron.tests.unit import testlib_api

HOST = helpers.HOST
HOST_2 = 'HOST_2'
HOST_3 = 'HOST_3'
HOST_2_TUNNELING_IP = '20.0.0.2'
HOST_3_TUNNELING_IP = '20.0.0.3'
TEST_ROUTER_ID = uuidutils.generate_uuid()
TEST_NETWORK_ID = uuidutils.generate_uuid()
TEST_HA_NETWORK_ID = uuidutils.generate_uuid()
PLUGIN_NAME = 'ml2'


class TestL2PopulationDBTestCase(testlib_api.SqlTestCase):
    def setUp(self):
        super(TestL2PopulationDBTestCase, self).setUp()
        self.setup_coreplugin(PLUGIN_NAME)
        self.ctx = context.get_admin_context()
        self._create_network()

    def _create_network(self, network_id=TEST_NETWORK_ID):
        network_obj.Network(self.ctx, id=network_id).create()
Beispiel #38
0
 def setUp(self):
     super(TestKarborObject, self).setUp()
     self.obj = TestObject(scheduled_at=None,
                           uuid=uuidutils.generate_uuid(),
                           text='text')
     self.obj.obj_reset_changes()
Beispiel #39
0
 def test_show_raises_when_no_authtoken_in_the_context(self):
     self.context.auth_token = False
     self.assertRaises(exception.ImageNotFound, self.service.show,
                       uuidutils.generate_uuid())
Beispiel #40
0
 def _create_dscp_marking_rule_obj(self):
     rule_obj = rule.QosDscpMarkingRule()
     rule_obj.id = uuidutils.generate_uuid()
     rule_obj.dscp_mark = 32
     rule_obj.obj_reset_changes()
     return rule_obj
Beispiel #41
0
    def create_security_group(self, context, security_group, default_sg=False):
        """Create security group.

        If default_sg is true that means we are a default security group for
        a given tenant if it does not exist.
        """
        s = security_group['security_group']
        kwargs = {
            'context': context,
            'security_group': s,
            'is_default': default_sg,
        }
        self._registry_notify(resources.SECURITY_GROUP,
                              events.BEFORE_CREATE,
                              exc_cls=ext_sg.SecurityGroupConflict,
                              payload=events.DBEventPayload(
                                  context,
                                  metadata={'is_default': default_sg},
                                  request_body=security_group,
                                  desired_state=s))

        tenant_id = s['tenant_id']

        if not default_sg:
            self._ensure_default_security_group(context, tenant_id)
        else:
            existing_def_sg_id = self._get_default_sg_id(context, tenant_id)
            if existing_def_sg_id is not None:
                # default already exists, return it
                return self.get_security_group(context, existing_def_sg_id)

        with db_api.CONTEXT_WRITER.using(context):
            sg = sg_obj.SecurityGroup(context,
                                      id=s.get('id')
                                      or uuidutils.generate_uuid(),
                                      description=s['description'],
                                      project_id=tenant_id,
                                      name=s['name'],
                                      is_default=default_sg)
            sg.create()

            for ethertype in ext_sg.sg_supported_ethertypes:
                if default_sg:
                    # Allow intercommunication
                    ingress_rule = sg_obj.SecurityGroupRule(
                        context,
                        id=uuidutils.generate_uuid(),
                        project_id=tenant_id,
                        security_group_id=sg.id,
                        direction='ingress',
                        ethertype=ethertype,
                        remote_group_id=sg.id)
                    ingress_rule.create()
                    sg.rules.append(ingress_rule)

                egress_rule = sg_obj.SecurityGroupRule(
                    context,
                    id=uuidutils.generate_uuid(),
                    project_id=tenant_id,
                    security_group_id=sg.id,
                    direction='egress',
                    ethertype=ethertype)
                egress_rule.create()
                sg.rules.append(egress_rule)
            sg.obj_reset_changes(['rules'])

            # fetch sg from db to load the sg rules with sg model.
            sg = sg_obj.SecurityGroup.get_object(context, id=sg.id)
            secgroup_dict = self._make_security_group_dict(sg)
            kwargs['security_group'] = secgroup_dict
            self._registry_notify(resources.SECURITY_GROUP,
                                  events.PRECOMMIT_CREATE,
                                  exc_cls=ext_sg.SecurityGroupConflict,
                                  **kwargs)

        registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self,
                        **kwargs)
        return secgroup_dict
Beispiel #42
0
 def test_parse_image_id_from_uuid(self):
     image_href = uuidutils.generate_uuid()
     parsed_id = service_utils.parse_image_id(image_href)
     self.assertEqual(image_href, parsed_id)
Beispiel #43
0
    def test_get_federation_list_with_filters(self):
        fed1 = utils.create_test_federation(
            id=1,
            uuid=uuidutils.generate_uuid(),
            name='fed1',
            project_id='proj1',
            hostcluster_id='master1',
            member_ids=['member1', 'member2'],
            properties={'dns-zone': 'fed1.com.'})

        fed2 = utils.create_test_federation(
            id=2,
            uuid=uuidutils.generate_uuid(),
            name='fed',
            project_id='proj2',
            hostcluster_id='master2',
            member_ids=['member3', 'member4'],
            properties={"dns-zone": "fed2.com."})

        # NOTE(clenimar): we are specifying a project_id to the test
        # resources above, which means that our current context
        # (self.context) will not be able to see these resources.
        # Create an admin context in order to test the queries:
        ctx = context.make_admin_context(all_tenants=True)

        # Filter by name:
        res = self.dbapi.get_federation_list(ctx, filters={'name': 'fed1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx, filters={'name': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by project_id
        res = self.dbapi.get_federation_list(ctx,
                                             filters={'project_id': 'proj1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'project_id': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by hostcluster_id
        res = self.dbapi.get_federation_list(
            ctx, filters={'hostcluster_id': 'master1'})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(
            ctx, filters={'hostcluster_id': 'master2'})
        self.assertEqual([fed2.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'hostcluster_id': 'foo'})
        self.assertEqual([], [r.id for r in res])

        # Filter by member_ids (please note that it is currently implemented
        # as an exact match. So it will only return federations whose member
        # clusters are exactly those passed as a filter)
        res = self.dbapi.get_federation_list(
            ctx, filters={'member_ids': ['member1', 'member2']})
        self.assertEqual([fed1.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(ctx,
                                             filters={'member_ids': ['foo']})
        self.assertEqual([], [r.id for r in res])

        # Filter by properties
        res = self.dbapi.get_federation_list(
            ctx, filters={'properties': {
                'dns-zone': 'fed2.com.'
            }})
        self.assertEqual([fed2.id], [r.id for r in res])

        res = self.dbapi.get_federation_list(
            ctx, filters={'properties': {
                'dns-zone': 'foo.bar.'
            }})
        self.assertEqual([], [r.id for r in res])
Beispiel #44
0
 def test_parse_image_id_from_glance(self):
     uuid = uuidutils.generate_uuid()
     image_href = u'glance://some-stuff/%s' % uuid
     parsed_id = service_utils.parse_image_id(image_href)
     self.assertEqual(uuid, parsed_id)
Beispiel #45
0
 def _create_one_router(self):
     self.router_gw_port = self._create_one_port(2000, self.fips_network.id)
     self.router = router_obj.Router(self.ctx,
                                     id=uuidutils.generate_uuid(),
                                     gw_port_id=self.router_gw_port.id)
     self.router.create()
 def setUp(self):
     super(TestAggregatesNovaClient, self).setUp()
     self.agg1 = 'agg-%s' % uuidutils.generate_uuid()
     self.agg2 = 'agg-%s' % uuidutils.generate_uuid()
     self.addCleanup(self._clean_aggregates)
Beispiel #47
0
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects.qos import policy
from neutron.objects.qos import rule
from neutron.plugins.ml2.drivers.openvswitch.agent import (
        ovs_agent_extension_api as ovs_ext_api)
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
    ovs_bridge)
from neutron.services.qos import qos_consts
from neutron.tests import base

BASE_TEST_POLICY = {'context': None,
                    'name': 'test1',
                    'id': uuidutils.generate_uuid()}

TEST_POLICY = policy.QosPolicy(**BASE_TEST_POLICY)

TEST_POLICY_DESCR = policy.QosPolicy(description='fake_descr',
                                     **BASE_TEST_POLICY)

TEST_POLICY2 = policy.QosPolicy(context=None,
                                name='test2', id=uuidutils.generate_uuid())

TEST_PORT = {'port_id': 'test_port_id',
             'qos_policy_id': TEST_POLICY.id}

TEST_PORT2 = {'port_id': 'test_port_id_2',
             'qos_policy_id': TEST_POLICY2.id}
Beispiel #48
0
 def test_update_federation_not_found(self):
     federation_uuid = uuidutils.generate_uuid()
     self.assertRaises(exception.FederationNotFound,
                       self.dbapi.update_federation, federation_uuid,
                       {'member_ids': ['foo']})
Beispiel #49
0
    def test_handle_network_delta(self, mock_get_net_driver):
        mock_net_driver = mock.MagicMock()
        mock_get_net_driver.return_value = mock_net_driver

        nic1 = mock.MagicMock()
        nic1.network_id = uuidutils.generate_uuid()
        nic2 = mock.MagicMock()
        nic2.network_id = uuidutils.generate_uuid()
        interface1 = mock.MagicMock()
        interface1.port_id = uuidutils.generate_uuid()
        port1 = mock.MagicMock()
        port1.network_id = uuidutils.generate_uuid()
        fixed_ip = mock.MagicMock()
        fixed_ip.subnet_id = uuidutils.generate_uuid()
        port1.fixed_ips = [fixed_ip]
        subnet = mock.MagicMock()
        network = mock.MagicMock()

        delta = data_models.Delta(amphora_id=self.amphora_mock.id,
                                  compute_id=self.amphora_mock.compute_id,
                                  add_nics=[nic1],
                                  delete_nics=[nic2, nic2, nic2])

        mock_net_driver.plug_network.return_value = interface1
        mock_net_driver.get_port.return_value = port1
        mock_net_driver.get_network.return_value = network
        mock_net_driver.get_subnet.return_value = subnet

        mock_net_driver.unplug_network.side_effect = [
            None, net_base.NetworkNotFound, Exception]

        handle_net_delta_obj = network_tasks.HandleNetworkDelta()
        result = handle_net_delta_obj.execute(self.amphora_mock, delta)

        mock_net_driver.plug_network.assert_called_once_with(
            self.amphora_mock.compute_id, nic1.network_id)
        mock_net_driver.get_port.assert_called_once_with(interface1.port_id)
        mock_net_driver.get_network.assert_called_once_with(port1.network_id)
        mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id)

        self.assertEqual({self.amphora_mock.id: [port1]}, result)

        mock_net_driver.unplug_network.assert_called_with(
            self.amphora_mock.compute_id, nic2.network_id)

        # Revert
        delta2 = data_models.Delta(amphora_id=self.amphora_mock.id,
                                   compute_id=self.amphora_mock.compute_id,
                                   add_nics=[nic1, nic1],
                                   delete_nics=[nic2, nic2, nic2])

        mock_net_driver.unplug_network.reset_mock()
        handle_net_delta_obj.revert(
            failure.Failure.from_exception(Exception('boom')), None, None)
        mock_net_driver.unplug_network.assert_not_called()

        mock_net_driver.unplug_network.reset_mock()
        handle_net_delta_obj.revert(None, None, None)
        mock_net_driver.unplug_network.assert_not_called()

        mock_net_driver.unplug_network.reset_mock()
        handle_net_delta_obj.revert(None, None, delta2)
Beispiel #50
0
 def _create_test_port_dict(self, qos_policy_id=None):
     return {'port_id': uuidutils.generate_uuid(),
             'qos_policy_id': qos_policy_id or TEST_POLICY.id}
Beispiel #51
0
 def create_obj_side_effect(obj_cls, context, values, populate_id=True):
     if counter['value'] < 1:
         counter['value'] += 1
         raise exc.DBDuplicateEntry()
     obj_cls.id = uuidutils.generate_uuid()
Beispiel #52
0
#    License for the specific language governing permissions and limitations
#    under the License.

import mock
from neutron.tests import base
from neutron_lib.plugins import constants
from oslo_utils import uuidutils

from vmware_nsx.plugins.common.housekeeper import base_job
from vmware_nsx.plugins.nsx_v3.housekeeper import orphaned_dhcp_server

DUMMY_DHCP_SERVER = {
    "resource_type":
    "LogicalDhcpServer",
    "id":
    uuidutils.generate_uuid(),
    "display_name":
    "test",
    "tags": [{
        "scope": "os-neutron-net-id",
        "tag": uuidutils.generate_uuid()
    }, {
        "scope": "os-project-id",
        "tag": uuidutils.generate_uuid()
    }, {
        "scope": "os-project-name",
        "tag": "admin"
    }, {
        "scope": "os-api-version",
        "tag": "13.0.0.0b3.dev90"
    }],
Beispiel #53
0
    def setUp(self):
        super(TestQosPlugin, self).setUp()
        self.setup_coreplugin(load_plugins=False)

        mock.patch('neutron.objects.db.api.create_object').start()
        mock.patch('neutron.objects.db.api.update_object').start()
        mock.patch('neutron.objects.db.api.delete_object').start()
        mock.patch('neutron.objects.db.api.get_object').start()
        mock.patch(
            'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start()
        # We don't use real models as per mocks above. We also need to mock-out
        # methods that work with real data types
        mock.patch('neutron.objects.base.NeutronDbObject.modify_fields_from_db'
                   ).start()

        cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
        cfg.CONF.set_override("service_plugins", ["qos"])

        manager.init()
        self.qos_plugin = directory.get_plugin(constants.QOS)

        #TODO(mangelajo): Remove notification_driver_manager mock in Pike
        self.qos_plugin.notification_driver_manager = mock.Mock()
        self.qos_plugin.driver_manager = mock.Mock()

        self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
                                   '.ResourcesPushRpcApi.push').start()

        self.ctxt = context.Context('fake_user', 'fake_tenant')
        mock.patch.object(self.ctxt.session, 'refresh').start()
        mock.patch.object(self.ctxt.session, 'expunge').start()

        self.policy_data = {
            'policy': {
                'id': uuidutils.generate_uuid(),
                'project_id': uuidutils.generate_uuid(),
                'name': 'test-policy',
                'description': 'Test policy description',
                'shared': True
            }
        }

        self.rule_data = {
            'bandwidth_limit_rule': {
                'id': uuidutils.generate_uuid(),
                'max_kbps': 100,
                'max_burst_kbps': 150
            },
            'dscp_marking_rule': {
                'id': uuidutils.generate_uuid(),
                'dscp_mark': 16
            }
        }

        self.policy = policy_object.QosPolicy(self.ctxt,
                                              **self.policy_data['policy'])

        self.rule = rule_object.QosBandwidthLimitRule(
            self.ctxt, **self.rule_data['bandwidth_limit_rule'])

        self.dscp_rule = rule_object.QosDscpMarkingRule(
            self.ctxt, **self.rule_data['dscp_marking_rule'])
Beispiel #54
0
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure

from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.controller.worker.tasks import network_tasks
from octavia.network import base as net_base
from octavia.network import data_models
from octavia.tests.common import constants as t_constants
import octavia.tests.unit.base as base


AMPHORA_ID = 7
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
IP_ADDRESS = "172.24.41.1"
VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID,
                        subnet_id=t_constants.MOCK_SUBNET_ID,
                        qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2,
                         subnet_id=t_constants.MOCK_SUBNET_ID2,
                         qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2)
LB = o_data_models.LoadBalancer(vip=VIP)
LB2 = o_data_models.LoadBalancer(vip=VIP2)
FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID}
FIXED_IPS = [FIRST_IP]
INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(),
Beispiel #55
0
    def test_add_4_mix_bbbb(self):
        # Mock fsid with a faux cluster_uuid
        cluster_uuid = uuidutils.generate_uuid()
        with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
            mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
            self.service.start()
            self.service._init_ceph_cluster_info()
            mock_fsid.assert_called()

        storage_0 = self._create_storage_ihost('storage-0')
        self.service._ceph.update_ceph_cluster(storage_0)
        ihost = self.dbapi.ihost_get(storage_0.id)
        self.assertEqual(storage_0.id, ihost.id)
        peer = self.dbapi.peer_get(ihost.peer_id)
        self.assertEqual(peer.name, 'group-0')
        self.assertIn(ihost.hostname, peer.hosts)

        peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
        self.assertEqual(
            set([(p.name, tuple(sorted(p.hosts))) for p in peers]), {
                ('group-0', ('storage-0', )),
            })

        storage_1 = self._create_storage_ihost('storage-1')
        self.service._ceph.update_ceph_cluster(storage_1)
        ihost = self.dbapi.ihost_get(storage_1.id)
        self.assertEqual(storage_1.id, ihost.id)
        peer = self.dbapi.peer_get(ihost.peer_id)
        self.assertEqual(peer.name, 'group-0')
        self.assertIn(ihost.hostname, peer.hosts)

        peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
        self.assertEqual(
            set([(p.name, tuple(sorted(p.hosts))) for p in peers]), {
                ('group-0', ('storage-0', 'storage-1')),
            })

        storage_2 = self._create_storage_ihost('storage-2')
        self.service._ceph.update_ceph_cluster(storage_2)
        ihost = self.dbapi.ihost_get(storage_2.id)
        self.assertEqual(storage_2.id, ihost.id)
        peer = self.dbapi.peer_get(ihost.peer_id)
        self.assertEqual(peer.name, 'group-1')
        self.assertIn(ihost.hostname, peer.hosts)

        peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
        self.assertEqual(
            set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
            {('group-0', ('storage-0', 'storage-1')),
             ('group-1', ('storage-2', ))})

        storage_3 = self._create_storage_ihost('storage-3')
        self.service._ceph.update_ceph_cluster(storage_3)
        ihost = self.dbapi.ihost_get(storage_3.id)
        self.assertEqual(storage_3.id, ihost.id)
        peer = self.dbapi.peer_get(ihost.peer_id)
        self.assertEqual(peer.name, 'group-1')
        self.assertIn(ihost.hostname, peer.hosts)

        peers = self.dbapi.peers_get_all_by_cluster(cluster_uuid)
        self.assertEqual(
            set([(p.name, tuple(sorted(p.hosts))) for p in peers]),
            {('group-0', ('storage-0', 'storage-1')),
             ('group-1', ('storage-2', 'storage-3'))})
Beispiel #56
0
 def setUp(self):
     super(TypeManagerTestCase, self).setUp()
     self.type_manager = managers.TypeManager()
     self.ctx = mock.Mock()
     self.network = {'id': uuidutils.generate_uuid(),
                     'project_id': uuidutils.generate_uuid()}
Beispiel #57
0
    def __init__(self):
        self.project_id = uuidutils.generate_uuid()
        self.lb_id = uuidutils.generate_uuid()
        self.ip_address = '192.0.2.30'
        self.port_id = uuidutils.generate_uuid()
        self.network_id = uuidutils.generate_uuid()
        self.subnet_id = uuidutils.generate_uuid()
        self.qos_policy_id = uuidutils.generate_uuid()

        self.listener1_id = uuidutils.generate_uuid()
        self.listener2_id = uuidutils.generate_uuid()
        self.default_tls_container_ref = uuidutils.generate_uuid()
        self.sni_container_ref_1 = uuidutils.generate_uuid()
        self.sni_container_ref_2 = uuidutils.generate_uuid()

        self.pool1_id = uuidutils.generate_uuid()
        self.pool2_id = uuidutils.generate_uuid()

        self.hm1_id = uuidutils.generate_uuid()
        self.hm2_id = uuidutils.generate_uuid()

        self.member1_id = uuidutils.generate_uuid()
        self.member2_id = uuidutils.generate_uuid()
        self.member3_id = uuidutils.generate_uuid()
        self.member4_id = uuidutils.generate_uuid()

        self.l7policy1_id = uuidutils.generate_uuid()
        self.l7policy2_id = uuidutils.generate_uuid()

        self.l7rule1_id = uuidutils.generate_uuid()
        self.l7rule2_id = uuidutils.generate_uuid()

        self._common_test_dict = {'provisioning_status': constants.ACTIVE,
                                  'operating_status': constants.ONLINE,
                                  'project_id': self.project_id,
                                  'created_at': 'then',
                                  'updated_at': 'now',
                                  'enabled': True}

        # Setup Health Monitors
        self.test_hm1_dict = {'id': self.hm1_id,
                              'type': constants.HEALTH_MONITOR_PING,
                              'delay': 1, 'timeout': 3, 'fall_threshold': 1,
                              'rise_threshold': 2, 'http_method': 'GET',
                              'url_path': '/', 'expected_codes': '200',
                              'name': 'hm1', 'pool_id': self.pool1_id}

        self.test_hm1_dict.update(self._common_test_dict)

        self.test_hm2_dict = copy.deepcopy(self.test_hm1_dict)
        self.test_hm2_dict['id'] = self.hm2_id
        self.test_hm2_dict['name'] = 'hm2'

        self.db_hm1 = data_models.HealthMonitor(**self.test_hm1_dict)
        self.db_hm2 = data_models.HealthMonitor(**self.test_hm2_dict)

        self.provider_hm1_dict = {'admin_state_up': True,
                                  'delay': 1, 'expected_codes': '200',
                                  'healthmonitor_id': self.hm1_id,
                                  'http_method': 'GET',
                                  'max_retries': 2,
                                  'max_retries_down': 1,
                                  'name': 'hm1',
                                  'pool_id': self.pool1_id,
                                  'timeout': 3,
                                  'type': constants.HEALTH_MONITOR_PING,
                                  'url_path': '/'}

        self.provider_hm2_dict = copy.deepcopy(self.provider_hm1_dict)
        self.provider_hm2_dict['healthmonitor_id'] = self.hm2_id
        self.provider_hm2_dict['name'] = 'hm2'

        self.provider_hm1 = driver_dm.HealthMonitor(**self.provider_hm1_dict)
        self.provider_hm2 = driver_dm.HealthMonitor(**self.provider_hm2_dict)

        # Setup Members
        self.test_member1_dict = {'id': self.member1_id,
                                  'pool_id': self.pool1_id,
                                  'ip_address': '192.0.2.16',
                                  'protocol_port': 80, 'weight': 0,
                                  'backup': False,
                                  'subnet_id': self.subnet_id,
                                  'pool': None,
                                  'name': 'member1',
                                  'monitor_address': '192.0.2.26',
                                  'monitor_port': 81}

        self.test_member1_dict.update(self._common_test_dict)

        self.test_member2_dict = copy.deepcopy(self.test_member1_dict)
        self.test_member2_dict['id'] = self.member2_id
        self.test_member2_dict['ip_address'] = '192.0.2.17'
        self.test_member2_dict['monitor_address'] = '192.0.2.27'
        self.test_member2_dict['name'] = 'member2'

        self.test_member3_dict = copy.deepcopy(self.test_member1_dict)
        self.test_member3_dict['id'] = self.member3_id
        self.test_member3_dict['ip_address'] = '192.0.2.18'
        self.test_member3_dict['monitor_address'] = '192.0.2.28'
        self.test_member3_dict['name'] = 'member3'
        self.test_member3_dict['pool_id'] = self.pool2_id

        self.test_member4_dict = copy.deepcopy(self.test_member1_dict)
        self.test_member4_dict['id'] = self.member4_id
        self.test_member4_dict['ip_address'] = '192.0.2.19'
        self.test_member4_dict['monitor_address'] = '192.0.2.29'
        self.test_member4_dict['name'] = 'member4'
        self.test_member4_dict['pool_id'] = self.pool2_id

        self.test_pool1_members_dict = [self.test_member1_dict,
                                        self.test_member2_dict]
        self.test_pool2_members_dict = [self.test_member3_dict,
                                        self.test_member4_dict]

        self.db_member1 = data_models.Member(**self.test_member1_dict)
        self.db_member2 = data_models.Member(**self.test_member2_dict)
        self.db_member3 = data_models.Member(**self.test_member3_dict)
        self.db_member4 = data_models.Member(**self.test_member4_dict)

        self.db_pool1_members = [self.db_member1, self.db_member2]
        self.db_pool2_members = [self.db_member3, self.db_member4]

        self.provider_member1_dict = {'address': '192.0.2.16',
                                      'admin_state_up': True,
                                      'member_id': self.member1_id,
                                      'monitor_address': '192.0.2.26',
                                      'monitor_port': 81,
                                      'name': 'member1',
                                      'pool_id': self.pool1_id,
                                      'protocol_port': 80,
                                      'subnet_id': self.subnet_id,
                                      'weight': 0,
                                      'backup': False}

        self.provider_member2_dict = copy.deepcopy(self.provider_member1_dict)
        self.provider_member2_dict['member_id'] = self.member2_id
        self.provider_member2_dict['address'] = '192.0.2.17'
        self.provider_member2_dict['monitor_address'] = '192.0.2.27'
        self.provider_member2_dict['name'] = 'member2'

        self.provider_member3_dict = copy.deepcopy(self.provider_member1_dict)
        self.provider_member3_dict['member_id'] = self.member3_id
        self.provider_member3_dict['address'] = '192.0.2.18'
        self.provider_member3_dict['monitor_address'] = '192.0.2.28'
        self.provider_member3_dict['name'] = 'member3'
        self.provider_member3_dict['pool_id'] = self.pool2_id

        self.provider_member4_dict = copy.deepcopy(self.provider_member1_dict)
        self.provider_member4_dict['member_id'] = self.member4_id
        self.provider_member4_dict['address'] = '192.0.2.19'
        self.provider_member4_dict['monitor_address'] = '192.0.2.29'
        self.provider_member4_dict['name'] = 'member4'
        self.provider_member4_dict['pool_id'] = self.pool2_id

        self.provider_pool1_members_dict = [self.provider_member1_dict,
                                            self.provider_member2_dict]

        self.provider_pool2_members_dict = [self.provider_member3_dict,
                                            self.provider_member4_dict]

        self.provider_member1 = driver_dm.Member(**self.provider_member1_dict)
        self.provider_member2 = driver_dm.Member(**self.provider_member2_dict)
        self.provider_member3 = driver_dm.Member(**self.provider_member3_dict)
        self.provider_member4 = driver_dm.Member(**self.provider_member4_dict)

        self.provider_pool1_members = [self.provider_member1,
                                       self.provider_member2]
        self.provider_pool2_members = [self.provider_member3,
                                       self.provider_member4]

        # Setup test pools
        self.test_pool1_dict = {'id': self.pool1_id,
                                'name': 'pool1', 'description': 'Pool 1',
                                'load_balancer_id': self.lb_id,
                                'protocol': 'avian',
                                'lb_algorithm': 'round_robin',
                                'members': self.test_pool1_members_dict,
                                'health_monitor': self.test_hm1_dict,
                                'session_persistence': {'type': 'SOURCE'},
                                'listeners': [],
                                'l7policies': []}

        self.test_pool1_dict.update(self._common_test_dict)

        self.test_pool2_dict = copy.deepcopy(self.test_pool1_dict)
        self.test_pool2_dict['id'] = self.pool2_id
        self.test_pool2_dict['name'] = 'pool2'
        self.test_pool2_dict['description'] = 'Pool 2'
        self.test_pool2_dict['members'] = self.test_pool2_members_dict

        self.test_pools = [self.test_pool1_dict, self.test_pool2_dict]

        self.db_pool1 = data_models.Pool(**self.test_pool1_dict)
        self.db_pool1.health_monitor = self.db_hm1
        self.db_pool1.members = self.db_pool1_members
        self.db_pool2 = data_models.Pool(**self.test_pool2_dict)
        self.db_pool2.health_monitor = self.db_hm2
        self.db_pool2.members = self.db_pool2_members

        self.test_db_pools = [self.db_pool1, self.db_pool2]

        self.provider_pool1_dict = {
            'admin_state_up': True,
            'description': 'Pool 1',
            'healthmonitor': self.provider_hm1_dict,
            'lb_algorithm': 'round_robin',
            'loadbalancer_id': self.lb_id,
            'members': self.provider_pool1_members_dict,
            'name': 'pool1',
            'pool_id': self.pool1_id,
            'protocol': 'avian',
            'session_persistence': {'type': 'SOURCE'}}

        self.provider_pool2_dict = copy.deepcopy(self.provider_pool1_dict)
        self.provider_pool2_dict['pool_id'] = self.pool2_id
        self.provider_pool2_dict['name'] = 'pool2'
        self.provider_pool2_dict['description'] = 'Pool 2'
        self.provider_pool2_dict['members'] = self.provider_pool2_members_dict
        self.provider_pool2_dict['healthmonitor'] = self.provider_hm2_dict

        self.provider_pool1 = driver_dm.Pool(**self.provider_pool1_dict)
        self.provider_pool1.members = self.provider_pool1_members
        self.provider_pool1.healthmonitor = self.provider_hm1
        self.provider_pool2 = driver_dm.Pool(**self.provider_pool2_dict)
        self.provider_pool2.members = self.provider_pool2_members
        self.provider_pool2.healthmonitor = self.provider_hm2

        self.provider_pools = [self.provider_pool1, self.provider_pool2]

        # Setup L7Rules
        self.test_l7rule1_dict = {'id': self.l7rule1_id,
                                  'l7policy_id': self.l7policy1_id,
                                  'type': 'o',
                                  'compare_type': 'fake_type',
                                  'key': 'fake_key',
                                  'value': 'fake_value',
                                  'l7policy': None,
                                  'invert': False}

        self.test_l7rule1_dict.update(self._common_test_dict)

        self.test_l7rule2_dict = copy.deepcopy(self.test_l7rule1_dict)
        self.test_l7rule2_dict['id'] = self.l7rule2_id

        self.test_l7rules = [self.test_l7rule1_dict, self.test_l7rule2_dict]

        self.db_l7Rule1 = data_models.L7Rule(**self.test_l7rule1_dict)
        self.db_l7Rule2 = data_models.L7Rule(**self.test_l7rule2_dict)

        self.db_l7Rules = [self.db_l7Rule1, self.db_l7Rule2]

        self.provider_l7rule1_dict = {'admin_state_up': True,
                                      'compare_type': 'fake_type',
                                      'invert': False,
                                      'key': 'fake_key',
                                      'l7policy_id': self.l7policy1_id,
                                      'l7rule_id': self.l7rule1_id,
                                      'type': 'o',
                                      'value': 'fake_value'}

        self.provider_l7rule2_dict = copy.deepcopy(self.provider_l7rule1_dict)
        self.provider_l7rule2_dict['l7rule_id'] = self.l7rule2_id
        self.provider_l7rules_dicts = [self.provider_l7rule1_dict,
                                       self.provider_l7rule2_dict]

        self.provider_l7rule1 = driver_dm.L7Rule(**self.provider_l7rule1_dict)
        self.provider_l7rule2 = driver_dm.L7Rule(**self.provider_l7rule2_dict)

        self.provider_rules = [self.provider_l7rule1, self.provider_l7rule2]

        # Setup L7Policies
        self.test_l7policy1_dict = {'id': self.l7policy1_id,
                                    'name': 'l7policy_1',
                                    'description': 'L7policy 1',
                                    'listener_id': self.listener1_id,
                                    'action': 'go',
                                    'redirect_pool_id': self.pool1_id,
                                    'redirect_url': '/index.html',
                                    'position': 1,
                                    'listener': None,
                                    'redirect_pool': None,
                                    'l7rules': self.test_l7rules}

        self.test_l7policy1_dict.update(self._common_test_dict)

        self.test_l7policy2_dict = copy.deepcopy(self.test_l7policy1_dict)
        self.test_l7policy2_dict['id'] = self.l7policy2_id
        self.test_l7policy2_dict['name'] = 'l7policy_2'
        self.test_l7policy2_dict['description'] = 'L7policy 2'

        self.test_l7policies = [self.test_l7policy1_dict,
                                self.test_l7policy2_dict]

        self.db_l7policy1 = data_models.L7Policy(**self.test_l7policy1_dict)
        self.db_l7policy2 = data_models.L7Policy(**self.test_l7policy2_dict)
        self.db_l7policy1.l7rules = self.db_l7Rules
        self.db_l7policy2.l7rules = self.db_l7Rules

        self.db_l7policies = [self.db_l7policy1, self.db_l7policy2]

        self.provider_l7policy1_dict = {'action': 'go',
                                        'admin_state_up': True,
                                        'description': 'L7policy 1',
                                        'l7policy_id': self.l7policy1_id,
                                        'listener_id': self.listener1_id,
                                        'name': 'l7policy_1',
                                        'position': 1,
                                        'redirect_pool_id': self.pool1_id,
                                        'redirect_url': '/index.html',
                                        'rules': self.provider_l7rules_dicts}

        self.provider_l7policy2_dict = copy.deepcopy(
            self.provider_l7policy1_dict)
        self.provider_l7policy2_dict['l7policy_id'] = self.l7policy2_id
        self.provider_l7policy2_dict['name'] = 'l7policy_2'
        self.provider_l7policy2_dict['description'] = 'L7policy 2'

        self.provider_l7policies_dict = [self.provider_l7policy1_dict,
                                         self.provider_l7policy2_dict]

        self.provider_l7policy1 = driver_dm.L7Policy(
            **self.provider_l7policy1_dict)
        self.provider_l7policy1.rules = self.provider_rules
        self.provider_l7policy2 = driver_dm.L7Policy(
            **self.provider_l7policy2_dict)
        self.provider_l7policy2.rules = self.provider_rules

        self.provider_l7policies = [self.provider_l7policy1,
                                    self.provider_l7policy2]

        # Setup Listeners
        self.test_listener1_dict = {
            'id': self.listener1_id,
            'name': 'listener_1',
            'description': 'Listener 1',
            'default_pool_id': self.pool1_id,
            'load_balancer_id': self.lb_id,
            'protocol': 'avian',
            'protocol_port': 90,
            'connection_limit': 10000,
            'tls_certificate_id': self.default_tls_container_ref,
            'stats': None,
            'default_pool': self.test_pool1_dict,
            'load_balancer': None,
            'sni_containers': [self.sni_container_ref_1,
                               self.sni_container_ref_2],
            'peer_port': 55,
            'l7policies': self.test_l7policies,
            'insert_headers': {},
            'pools': None,
            'timeout_client_data': 1000,
            'timeout_member_connect': 2000,
            'timeout_member_data': 3000,
            'timeout_tcp_inspect': 4000}

        self.test_listener1_dict.update(self._common_test_dict)

        self.test_listener2_dict = copy.deepcopy(self.test_listener1_dict)
        self.test_listener2_dict['id'] = self.listener2_id
        self.test_listener2_dict['name'] = 'listener_2'
        self.test_listener2_dict['description'] = 'Listener 1'
        self.test_listener2_dict['default_pool_id'] = self.pool2_id
        self.test_listener2_dict['default_pool'] = self.test_pool2_dict
        del self.test_listener2_dict['l7policies']
        del self.test_listener2_dict['sni_containers']

        self.test_listeners = [self.test_listener1_dict,
                               self.test_listener2_dict]

        self.db_listener1 = data_models.Listener(**self.test_listener1_dict)
        self.db_listener2 = data_models.Listener(**self.test_listener2_dict)
        self.db_listener1.default_pool = self.db_pool1
        self.db_listener2.default_pool = self.db_pool2
        self.db_listener1.l7policies = self.db_l7policies
        self.db_listener1.sni_containers = [
            data_models.SNI(tls_container_id='2'),
            data_models.SNI(tls_container_id='3')]

        self.test_db_listeners = [self.db_listener1, self.db_listener2]

        cert1 = data_models.TLSContainer(certificate='cert 1')
        cert2 = data_models.TLSContainer(certificate='cert 2')
        cert3 = data_models.TLSContainer(certificate='cert 3')

        self.provider_listener1_dict = {
            'admin_state_up': True,
            'connection_limit': 10000,
            'default_pool': self.provider_pool1_dict,
            'default_pool_id': self.pool1_id,
            'default_tls_container_data': cert1.to_dict(),
            'default_tls_container_ref': self.default_tls_container_ref,
            'description': 'Listener 1',
            'insert_headers': {},
            'l7policies': self.provider_l7policies_dict,
            'listener_id': self.listener1_id,
            'loadbalancer_id': self.lb_id,
            'name': 'listener_1',
            'protocol': 'avian',
            'protocol_port': 90,
            'sni_container_data': [cert2.to_dict(), cert3.to_dict()],
            'sni_container_refs': [self.sni_container_ref_1,
                                   self.sni_container_ref_2],
            'timeout_client_data': 1000,
            'timeout_member_connect': 2000,
            'timeout_member_data': 3000,
            'timeout_tcp_inspect': 4000}

        self.provider_listener2_dict = copy.deepcopy(
            self.provider_listener1_dict)
        self.provider_listener2_dict['listener_id'] = self.listener2_id
        self.provider_listener2_dict['name'] = 'listener_2'
        self.provider_listener2_dict['description'] = 'Listener 1'
        self.provider_listener2_dict['default_pool_id'] = self.pool2_id
        self.provider_listener2_dict['default_pool'] = self.provider_pool2_dict
        del self.provider_listener2_dict['l7policies']

        self.provider_listener1 = driver_dm.Listener(
            **self.provider_listener1_dict)
        self.provider_listener2 = driver_dm.Listener(
            **self.provider_listener2_dict)
        self.provider_listener1.default_pool = self.provider_pool1
        self.provider_listener2.default_pool = self.provider_pool2
        self.provider_listener1.l7policies = self.provider_l7policies

        self.provider_listeners = [self.provider_listener1,
                                   self.provider_listener2]

        self.test_vip_dict = {'ip_address': self.ip_address,
                              'network_id': self.network_id,
                              'port_id': self.port_id,
                              'subnet_id': self.subnet_id,
                              'qos_policy_id': self.qos_policy_id}

        self.provider_vip_dict = {
            'vip_address': self.ip_address,
            'vip_network_id': self.network_id,
            'vip_port_id': self.port_id,
            'vip_subnet_id': self.subnet_id,
            'vip_qos_policy_id': self.qos_policy_id}

        self.db_vip = data_models.Vip(
            ip_address=self.ip_address,
            network_id=self.network_id,
            port_id=self.port_id,
            subnet_id=self.subnet_id,
            qos_policy_id=self.qos_policy_id)
 def _create_test_port_dict(self, device_owner):
     return {'port_id': uuidutils.generate_uuid(),
             'device_owner': device_owner}
Beispiel #59
0
 def test_policy_disallow_detail(self):
     self._common_policy_check("cluster:detail",
                               self.get_json,
                               '/clusters/%s/detail' %
                               uuidutils.generate_uuid(),
                               expect_errors=True)
Beispiel #60
0
    def test_cgts_7208(self):
        hosts = [
            self._create_storage_ihost('storage-0'),
            self._create_storage_ihost('storage-1'),
            self._create_storage_ihost('storage-2'),
            self._create_storage_ihost('storage-3')
        ]

        expected_groups = {
            'storage-0': 'group-0',
            'storage-1': 'group-0',
            'storage-2': 'group-1',
            'storage-3': 'group-1'
        }

        expected_peer_hosts = {
            'storage-0': {'storage-0'},
            'storage-1': {'storage-0', 'storage-1'},
            'storage-2': {'storage-2'},
            'storage-3': {'storage-2', 'storage-3'}
        }

        saved_ihosts = []
        expected_peer_hosts2 = {
            'storage-0': {'storage-0', 'storage-1'},
            'storage-1': {'storage-0', 'storage-1'},
            'storage-2': {'storage-2', 'storage-3'},
            'storage-3': {'storage-2', 'storage-3'}
        }

        # Mock fsid with a faux cluster_uuid
        cluster_uuid = uuidutils.generate_uuid()
        with mock.patch.object(ceph.CephWrapper, 'fsid') as mock_fsid:
            mock_fsid.return_value = (mock.MagicMock(ok=True), cluster_uuid)
            self.service.start()
            self.service._init_ceph_cluster_info()
            mock_fsid.assert_called()

        for h in hosts:
            # unlock host
            self.service._ceph.update_ceph_cluster(h)
            ihost = self.dbapi.ihost_get(h.id)
            self.assertEqual(h.id, ihost.id)
            peer = self.dbapi.peer_get(ihost.peer_id)
            self.assertEqual(peer.name, expected_groups[h.hostname])
            self.assertEqual(set(peer.hosts), expected_peer_hosts[h.hostname])
            saved_ihosts.append(ihost)

        # On a swact we get a new conductor and an fresh CephOperator
        saved_ceph_uuid = self.service._ceph.cluster_ceph_uuid
        saved_db_uuid = self.service._ceph.cluster_db_uuid
        saved_cluster_id = self.service._ceph.cluster_id

        del self.service._ceph
        self.service._ceph = iceph.CephOperator(self.service.dbapi)
        self.assertEqual(self.service._ceph.cluster_ceph_uuid, saved_ceph_uuid)
        self.assertEqual(self.service._ceph.cluster_db_uuid, saved_db_uuid)
        self.assertEqual(self.service._ceph.cluster_id, saved_cluster_id)

        for h in saved_ihosts:
            # unlock host
            self.service._ceph.update_ceph_cluster(h)
            peer = self.dbapi.peer_get(h.peer_id)
            self.assertEqual(peer.name, expected_groups[h.hostname])
            self.assertEqual(set(peer.hosts), expected_peer_hosts2[h.hostname])