def test_update_router_interface(self): t = uuidutils.generate_uuid() r = uuidutils.generate_uuid() p = uuidutils.generate_uuid() router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) inf_path = "%s/interfaces/%s" % (router_path, _ofc(p)) ip_address = '10.1.1.1/24' mac_address = '11:22:33:44:55:66' body = {'ip_address': ip_address, 'mac_address': mac_address} ofc.OFCClient.do_request("PUT", inf_path, body=body) body = {'ip_address': ip_address} ofc.OFCClient.do_request("PUT", inf_path, body=body) body = {'mac_address': mac_address} ofc.OFCClient.do_request("PUT", inf_path, body=body) self.mox.ReplayAll() self.driver.update_router_interface(inf_path, ip_address, mac_address) self.driver.update_router_interface(inf_path, ip_address=ip_address) self.driver.update_router_interface(inf_path, mac_address=mac_address) self.mox.VerifyAll()
def test_create_for_sg_rule(self): sg_id = uuidutils.generate_uuid() sg_name = 'test-sg' in_chain_id = uuidutils.generate_uuid() out_chain_id = uuidutils.generate_uuid() self.mock_api_cfg.chains_in = [ _create_test_sg_in_chain(sg_id, sg_name, in_chain_id, self._tenant_id), _create_test_sg_out_chain(sg_id, sg_name, out_chain_id, self._tenant_id)] sg_rule_id = uuidutils.generate_uuid() sg_rule = _create_test_sg_rule(self._tenant_id, sg_id, sg_rule_id) props = {"os_sg_rule_id": sg_rule_id} calls = [mock.call.add_rule().port_group(None).type( 'accept').nw_proto(6).nw_src_address( '192.168.1.0').nw_src_length(24).tp_src_start( None).tp_src_end(None).tp_dst_start(1).tp_dst_end( 65535).properties(props).create()] self.client.create_for_sg_rule(sg_rule) # Egress chain rule added self.mock_api_cfg.chains_out[0].assert_has_calls(calls)
def test_create_and_show_servicechain_instance(self): scs_id = self.create_servicechain_spec()['servicechain_spec']['id'] policy_target_group_id = uuidutils.generate_uuid() classifier_id = uuidutils.generate_uuid() config_param_values = "{}" attrs = self._get_test_servicechain_instance_attrs( servicechain_spec=scs_id, provider_ptg_id=policy_target_group_id, consumer_ptg_id=policy_target_group_id, classifier_id=classifier_id, config_param_values=config_param_values) sci = self.create_servicechain_instance( servicechain_spec=scs_id, provider_ptg_id=policy_target_group_id, consumer_ptg_id=policy_target_group_id, classifier_id=classifier_id, config_param_values=config_param_values) for k, v in attrs.iteritems(): self.assertEqual(sci['servicechain_instance'][k], v) self._test_show_resource('servicechain_instance', sci['servicechain_instance']['id'], attrs) req = self.new_delete_request('servicechain_instances', sci['servicechain_instance']['id']) req.get_response(self.ext_api)
def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group["security_group"] kwargs = {"context": context, "security_group": s, "is_default": default_sg} # NOTE(armax): a callback exception here will prevent the request # from being processed. This is a hook point for backend's validation; # we raise to propagate the reason for the failure. try: registry.notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, self, **kwargs) except exceptions.CallbackFailure as e: raise ext_sg.SecurityGroupConflict(reason=e) tenant_id = self._get_tenant_id_for_create(context, s) if not default_sg: self._ensure_default_security_group(context, tenant_id) with context.session.begin(subtransactions=True): security_group_db = SecurityGroup( id=s.get("id") or (uuidutils.generate_uuid()), description=s["description"], tenant_id=tenant_id, name=s["name"], ) context.session.add(security_group_db) if default_sg: context.session.add( DefaultSecurityGroup(security_group=security_group_db, tenant_id=security_group_db["tenant_id"]) ) for ethertype in ext_sg.sg_supported_ethertypes: if default_sg: # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction="ingress", ethertype=ethertype, source_group=security_group_db, ) context.session.add(ingress_rule) egress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction="egress", ethertype=ethertype, ) context.session.add(egress_rule) secgroup_dict = self._make_security_group_dict(security_group_db) kwargs["security_group"] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict
def test_update_servicechain_instance(self): name = "new_servicechain_instance" description = 'new desc' config_param_values = "{}" scs_id = self.create_servicechain_spec()['servicechain_spec']['id'] provider_ptg_id = uuidutils.generate_uuid() consumer_ptg_id = uuidutils.generate_uuid() classifier_id = uuidutils.generate_uuid() attrs = self._get_test_servicechain_instance_attrs( name=name, description=description, servicechain_spec=scs_id, provider_ptg_id=provider_ptg_id, consumer_ptg_id=consumer_ptg_id, classifier_id=classifier_id, config_param_values=config_param_values) sci = self.create_servicechain_instance( servicechain_spec=scs_id, provider_ptg_id=provider_ptg_id, consumer_ptg_id=consumer_ptg_id, classifier_id=classifier_id, config_param_values=config_param_values) data = {'servicechain_instance': {'name': name, 'description': description, 'servicechain_spec': scs_id}} req = self.new_update_request('servicechain_instances', data, sci['servicechain_instance']['id']) res = self.deserialize(self.fmt, req.get_response(self.ext_api)) for k, v in attrs.iteritems(): self.assertEqual(res['servicechain_instance'][k], v) self._test_show_resource('servicechain_instance', sci['servicechain_instance']['id'], attrs) req = self.new_delete_request('servicechain_instances', sci['servicechain_instance']['id']) req.get_response(self.ext_api)
def create_qos(self, context, qos): """ Create a qos and its default queue. """ qos = qos['qos'] default_queue = self._extract_default_queue_from_qos_param(qos) if qos['rate'] < default_queue['rate']: raise ext_qos.QosRateTooSmall(id=None, rate=qos['rate']) qos_target = self._check_qos_target( context, qos['target_type'], qos['target_id'], qos['direction']) tenant_id = self._get_tenant_id_for_create(context, qos) qos_id = qos.get('id', uuidutils.generate_uuid()) default_queue_id = uuidutils.generate_uuid() with context.session.begin(subtransactions=True): qos_db = Qos( id=qos_id, tenant_id=tenant_id, name=qos['name'], description=qos['description'], direction=qos['direction'], port_id=qos_target['port_id'], router_id=qos_target['router_id'], rate=qos['rate'], burst=qos['burst'], cburst=qos['cburst'], default_queue_id=default_queue_id) qos_queue_db = QosQueue( id=default_queue_id, tenant_id=tenant_id, qos_id=qos_id, parent_id=None, prio=7, rate=default_queue['rate'], ceil=default_queue['ceil'], burst=default_queue['burst'], cburst=default_queue['cburst']) context.session.add(qos_db) context.session.add(qos_queue_db) return self._make_qos_dict(qos_db)
def create_net_partition(self, params): fake_net_partition = { 'nuage_entid': uuidutils.generate_uuid(), 'l3dom_id': uuidutils.generate_uuid(), 'l2dom_id': uuidutils.generate_uuid(), } return fake_net_partition
def test_list_router_routes(self): t = uuidutils.generate_uuid() r = uuidutils.generate_uuid() router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) routes_path = router_path + "/routes" routes = [("10.1.1.0/24", "192.168.100.10"), ("10.2.2.0/20", "192.168.100.20")] data = { "routes": [ {"id": self._get_route_id(route[0], route[1]), "destination": route[0], "nexthop": route[1]} for route in routes ] } self.do_request.return_value = data ret = self.driver.list_router_routes(router_path) self.do_request.assert_called_once_with("GET", routes_path) expected = [ { "id": (routes_path + "/" + self._get_route_id(route[0], route[1])), "destination": route[0], "nexthop": route[1], } for route in routes ] self.assertEqual(len(routes), len(ret)) self.assertEqual(data["routes"], expected)
def test_create_and_show_servicechain_instance(self): scs_id = self.create_servicechain_spec()["servicechain_spec"]["id"] policy_target_group_id = uuidutils.generate_uuid() classifier_id = uuidutils.generate_uuid() config_param_values = "{}" attrs = cm.get_create_servicechain_instance_default_attrs( servicechain_specs=[scs_id], provider_ptg_id=policy_target_group_id, consumer_ptg_id=policy_target_group_id, management_ptg_id=policy_target_group_id, classifier_id=classifier_id, config_param_values=config_param_values, ) sci = self.create_servicechain_instance( servicechain_specs=[scs_id], provider_ptg_id=policy_target_group_id, consumer_ptg_id=policy_target_group_id, management_ptg_id=policy_target_group_id, classifier_id=classifier_id, config_param_values=config_param_values, ) for k, v in attrs.iteritems(): self.assertEqual(v, sci["servicechain_instance"][k]) self._test_show_resource("servicechain_instance", sci["servicechain_instance"]["id"], attrs) req = self.new_delete_request("servicechain_instances", sci["servicechain_instance"]["id"]) req.get_response(self.ext_api)
def test_get_ready_devices_multiple_vips_and_pools(self): ctx = context.get_admin_context() # add 3 pools and 2 vips directly to DB # to create 2 "ready" devices and one pool without vip pools = [] for i in xrange(3): pools.append( ldb.Pool( id=uuidutils.generate_uuid(), subnet_id=self._subnet_id, protocol="HTTP", lb_method="ROUND_ROBIN", status=constants.ACTIVE, admin_state_up=True, ) ) ctx.session.add(pools[i]) vip0 = ldb.Vip( id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[0].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3, ) ctx.session.add(vip0) pools[0].vip_id = vip0.id vip1 = ldb.Vip( id=uuidutils.generate_uuid(), protocol_port=80, protocol="HTTP", pool_id=pools[1].id, status=constants.ACTIVE, admin_state_up=True, connection_limit=3, ) ctx.session.add(vip1) pools[1].vip_id = vip1.id ctx.session.flush() self.assertEqual(ctx.session.query(ldb.Pool).count(), 3) self.assertEqual(ctx.session.query(ldb.Vip).count(), 2) with mock.patch( "neutron.services.loadbalancer.agent_scheduler" ".LbaasAgentSchedulerDbMixin" ".list_pools_on_lbaas_agent" ) as mock_agent_pools: mock_agent_pools.return_value = {"pools": [{"id": pools[0].id}, {"id": pools[1].id}, {"id": pools[2].id}]} ready = self.callbacks.get_ready_devices(ctx) self.assertEqual(len(ready), 3) self.assertIn(pools[0].id, ready) self.assertIn(pools[1].id, ready) self.assertIn(pools[2].id, ready) # cleanup ctx.session.query(ldb.Pool).delete() ctx.session.query(ldb.Vip).delete()
def test_delete_neutron_ports(self): port1 = ovs_lib.VifPort("tap1234", 1, uuidutils.generate_uuid(), "ca:fe:de:ad:be:ef", "br") port2 = ovs_lib.VifPort("tap5678", 2, uuidutils.generate_uuid(), "ca:ee:de:ad:be:ef", "br") with mock.patch.object(self.br, "get_vif_ports", return_value=[port1, port2]) as get_ports: with mock.patch.object(self.br, "delete_port") as delete_port: self.br.delete_ports(all_ports=False) get_ports.assert_called_once_with() delete_port.assert_has_calls([mock.call("tap1234"), mock.call("tap5678")])
def test_get_nsx_switch_and_port_id_from_db_mappings(self): # This test is representative of the 'standard' case in which both the # switch and the port mappings were stored in the neutron db exp_ls_uuid = uuidutils.generate_uuid() exp_lp_uuid = uuidutils.generate_uuid() ret_value = exp_ls_uuid, exp_lp_uuid self._mock_port_mapping_db_calls(ret_value) self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
def get_random_params(self): """create random parameters for portinfo test.""" tenant = uuidutils.generate_uuid() network = uuidutils.generate_uuid() port = uuidutils.generate_uuid() _filter = uuidutils.generate_uuid() none = uuidutils.generate_uuid() return tenant, network, port, _filter, none
def create_subnet(self, neutron_subnet, params): nuage_subnet = { 'nuage_l2template_id': uuidutils.generate_uuid(), 'nuage_userid': uuidutils.generate_uuid(), 'nuage_groupid': uuidutils.generate_uuid(), 'nuage_l2domain_id': uuidutils.generate_uuid() } return nuage_subnet
def get_ofc_item_random_params(self): """create random parameters for ofc_item test.""" tenant_id = uuidutils.generate_uuid() network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() mac = ":".join(["%x" % random.randint(0, 255) for i in moves.xrange(6)]) portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789", port_no=1234, vlan_id=321, mac=mac) return tenant_id, network_id, portinfo
def create_router(self, neutron_router, router, params): nuage_router = { 'nuage_userid': uuidutils.generate_uuid(), 'nuage_groupid': uuidutils.generate_uuid(), 'nuage_domain_id': uuidutils.generate_uuid(), 'nuage_def_zone_id': uuidutils.generate_uuid(), } return nuage_router
def get_def_netpartition_data(self, default_net_part): if default_net_part == 'default_test_np': fake_defnetpart_data = { 'np_id': uuidutils.generate_uuid(), 'l3dom_tid': uuidutils.generate_uuid(), 'l2dom_tid': uuidutils.generate_uuid(), } return fake_defnetpart_data
def test_delete_router(self): t = uuidutils.generate_uuid() r = uuidutils.generate_uuid() router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) self.driver.delete_router(router_path) self.do_request.assert_called_once_with("DELETE", router_path)
def test_redirect_to_chain(self): classifier = self.create_policy_classifier( name="class1", protocol="tcp", direction="in", port_range="20:90") classifier_id = classifier['policy_classifier']['id'] action = self.create_policy_action( name="action1", action_type=gconst.GP_ACTION_REDIRECT, action_value=uuidutils.generate_uuid()) action_id = action['policy_action']['id'] action_id_list = [action_id] policy_rule = self.create_policy_rule( name='pr1', policy_classifier_id=classifier_id, policy_actions=action_id_list) policy_rule_id = policy_rule['policy_rule']['id'] policy_rule_list = [policy_rule_id] policy_rule_set = self.create_policy_rule_set( name="c1", policy_rules=policy_rule_list) policy_rule_set_id = policy_rule_set['policy_rule_set']['id'] self.create_policy_target_group( name="ptg1", provided_policy_rule_sets={policy_rule_set_id: None}) create_chain_instance = mock.patch.object( servicechain_plugin.ServiceChainPlugin, 'create_servicechain_instance') create_chain_instance = create_chain_instance.start() chain_instance_id = uuidutils.generate_uuid() create_chain_instance.return_value = {'id': chain_instance_id} # TODO(Magesh):Add tests which verifies that provide/consumer PTGs # are set correctly for the SCI with mock.patch.object( resource_mapping.ResourceMappingDriver, '_set_rule_servicechain_instance_mapping') as set_rule: with mock.patch.object(servicechain_db.ServiceChainDbPlugin, 'get_servicechain_spec') as sc_spec_get: sc_spec_get.return_value = {'servicechain_spec': {}} consumer_ptg = self.create_policy_target_group( name="ptg2", consumed_policy_rule_sets={policy_rule_set_id: None}) consumer_ptg_id = consumer_ptg['policy_target_group']['id'] set_rule.assert_called_once_with(mock.ANY, policy_rule_id, chain_instance_id) with mock.patch.object(servicechain_plugin.ServiceChainPlugin, 'delete_servicechain_instance'): with mock.patch.object( resource_mapping.ResourceMappingDriver, '_get_rule_servicechain_mapping') as get_rule: r_sc_map = resource_mapping.RuleServiceChainInstanceMapping() r_sc_map.rule_id = policy_rule_id r_sc_map.servicechain_instance_id = chain_instance_id get_rule.return_value = r_sc_map get_chain_inst = mock.patch.object( servicechain_db.ServiceChainDbPlugin, 'get_servicechain_instance') get_chain_inst.start() get_chain_inst.return_value = { "servicechain_instance": {'id': chain_instance_id}} req = self.new_delete_request( 'policy_target_groups', consumer_ptg_id) res = req.get_response(self.ext_api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
def test_router_create_with_nuage_rtr_template(self): nuage_rtr_template = uuidutils.generate_uuid() data = {'router': {'tenant_id': uuidutils.generate_uuid()}} data['router']['name'] = 'router1' data['router']['admin_state_up'] = True data['router']['nuage_router_template'] = nuage_rtr_template router_req = self.new_create_request('routers', data, 'json') router_res = router_req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, router_res.status_int)
def get_portinfo_random_params(self): """create random parameters for portinfo test.""" port_id = uuidutils.generate_uuid() datapath_id = hex(random.randint(0, 0xffffffff)) port_no = random.randint(1, 100) vlan_id = random.randint(q_const.MIN_VLAN_TAG, q_const.MAX_VLAN_TAG) mac = ':'.join(["%02x" % random.randint(0, 0xff) for x in range(6)]) none = uuidutils.generate_uuid() return port_id, datapath_id, port_no, vlan_id, mac, none
def test_router_create_with_nuage_rtr_template(self): nuage_rtr_template = uuidutils.generate_uuid() data = {"router": {"tenant_id": uuidutils.generate_uuid()}} data["router"]["name"] = "router1" data["router"]["admin_state_up"] = True data["router"]["nuage_router_template"] = nuage_rtr_template router_req = self.new_create_request("routers", data, "json") router_res = router_req.get_response(self.ext_api) self.assertEqual(exc.HTTPCreated.code, router_res.status_int)
def get_ofc_item_random_params(self): """create random parameters for ofc_item test.""" tenant_id = uuidutils.generate_uuid() network_id = uuidutils.generate_uuid() port_id = uuidutils.generate_uuid() portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789", port_no=1234, vlan_id=321, mac="11:22:33:44:55:66") return tenant_id, network_id, portinfo
def test_delete_router_interface(self): t = uuidutils.generate_uuid() r = uuidutils.generate_uuid() p = uuidutils.generate_uuid() router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) inf_path = "%s/interfaces/%s" % (router_path, _ofc(p)) self.driver.delete_router_interface(inf_path) self.do_request.assert_called_once_with("DELETE", inf_path)
def test_delete_router(self): t = uuidutils.generate_uuid() r = uuidutils.generate_uuid() router_path = "/tenants/%s/routers/%s" % (_ofc(t), _ofc(r)) ofc.OFCClient.do_request("DELETE", router_path) self.mox.ReplayAll() self.driver.delete_router(router_path) self.mox.VerifyAll()
def test_get_port_groups_for_sg(self): sg_id = uuidutils.generate_uuid() pg_id = uuidutils.generate_uuid() self.mock_api_cfg.port_groups_in = [ _create_test_port_group(sg_id, 'test-sg', pg_id, self._tenant_id)] pg = self.client.get_port_groups_for_sg(self._tenant_id, sg_id) self.assertIsNotNone(pg) self.assertEqual(pg.get_id(), pg_id)
def upgrade(): ip_policy = table('quark_ip_policy', column('id', sa.String(length=36)), column('tenant_id', sa.String(length=255)), column('created_at', sa.DateTime())) ip_policy_cidrs = table('quark_ip_policy_cidrs', column('id', sa.String(length=36)), column('created_at', sa.DateTime()), column('ip_policy_id', sa.String(length=36)), column('cidr', sa.String(length=64))) subnets = table('quark_subnets', column('id', sa.String(length=36)), column('_cidr', sa.String(length=64)), column('tenant_id', sa.String(length=255)), column('ip_policy_id', sa.String(length=36))) connection = op.get_bind() # 1. Find all subnets without ip_policy. data = connection.execute(select([ subnets.c.id, subnets.c._cidr, subnets.c.tenant_id]).where( subnets.c.ip_policy_id == None)).fetchall() # noqa if not data: return LOG.info("Subnet IDs without IP policies: %s", [d[0] for d in data]) # 2. Insert ip_policy rows with id. vals = [dict(id=uuidutils.generate_uuid(), created_at=timeutils.utcnow(), tenant_id=tenant_id) for id, cidr, tenant_id in data] LOG.info("IP Policy IDs to insert: %s", [v["id"] for v in vals]) connection.execute(ip_policy.insert(), *vals) # 3. Insert default ip_policy_cidrs for those ip_policy's. vals2 = [] for ((id, cidr, tenant_id), ip_policy) in zip(data, vals): cidrs = [] ip_policies.ensure_default_policy(cidrs, [dict(cidr=cidr)]) for cidr in cidrs: vals2.append(dict(id=uuidutils.generate_uuid(), created_at=timeutils.utcnow(), ip_policy_id=ip_policy["id"], cidr=str(cidr))) LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals2]) connection.execute(ip_policy_cidrs.insert(), *vals2) # 4. Set ip_policy_id rows in quark_subnets. for ((id, cidr, tenant_id), ip_policy) in zip(data, vals): connection.execute(subnets.update().values( ip_policy_id=ip_policy["id"]).where( subnets.c.id == id))
def create_l2domain(self, netpart_id, subnet): subl2dom = { 'subnet_id': subnet['id'], 'nuage_subnet_id': '52daa465-cf33-4efd-91d3-f5bc2aebd', 'net_partition_id': netpart_id, 'nuage_l2dom_tmplt_id': uuidutils.generate_uuid(), 'nuage_user_id': uuidutils.generate_uuid(), 'nuage_group_id': uuidutils.generate_uuid(), } return subl2dom
def test_delete_port_group_by_name(self): tenant_id = uuidutils.generate_uuid() pg1_id = uuidutils.generate_uuid() pg1 = _create_test_port_group(pg1_id, "pg1", tenant_id) pg2_id = uuidutils.generate_uuid() pg2 = _create_test_port_group(pg2_id, "pg2", tenant_id) self.mock_api_cfg.port_groups_in = [pg1, pg2] self.client.delete_port_group_by_name(tenant_id, "pg1") self.mock_api.delete_port_group.assert_called_once_with(pg1_id)
def setUp(self): self.conf = config.setup_conf() self.conf.register_opts(interface.OPTS) self.driver = interface.MidonetInterfaceDriver(self.conf) self.network_id = uuidutils.generate_uuid() self.port_id = uuidutils.generate_uuid() self.device_name = "tap0" self.mac_address = "aa:bb:cc:dd:ee:ff" self.bridge = "br-test" self.namespace = "ns-test" super(TestMidonetInterfaceDriver, self).setUp()
def test_get_bridge_not_found(self): self.mock_api.get_bridge.side_effect = w_exc.HTTPNotFound() self.assertRaises(midonet_lib.MidonetResourceNotFound, self.client.get_bridge, uuidutils.generate_uuid())
def test_get_port_binding_host_result_not_found(self): port_id = uuidutils.generate_uuid() port_host = ml2_db.get_port_binding_host(self.ctx.session, port_id) self.assertIsNone(port_host)
def test_generate_pfc_id_uuid(self): id_str = uuidutils.generate_uuid() exp_str = (id_str[:14] + id_str[15:]).replace('-', '')[:31] ret_str = self.driver._generate_pfc_id(id_str) self.assertEqual(exp_str, ret_str)
def test_create_lrouter_on_failure(self): self.assertRaises(api_exc.NsxApiException, routerlib.create_lrouter, self.fake_cluster, uuidutils.generate_uuid(), 'pluto', 'fake_router', 'my_hop')
def _add_lrouter(self, body): fake_lrouter = self._build_lrouter(body, uuidutils.generate_uuid()) self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter fake_lrouter['lport_count'] = 0 return fake_lrouter
def generate_random_ids(self, count=1): if count == 1: return uuidutils.generate_uuid() else: return [uuidutils.generate_uuid() for _ in xrange(count)]
def test_get_bridge_error(self): self.mock_api.get_bridge.side_effect = w_exc.HTTPInternalServerError() self.assertRaises(midonet_lib.MidonetApiException, self.client.get_bridge, uuidutils.generate_uuid())
def setUp(self): super(IpamSubnetRequestTestCase, self).setUp() self.tenant_id = uuidutils.generate_uuid() self.subnet_id = uuidutils.generate_uuid()
def _perhaps_generate_id(target, args, kwargs): if hasattr(target, 'id') and target.id is None: target.id = uuidutils.generate_uuid()
def create_vip(self, context, vip): v = vip['vip'] tenant_id = self._get_tenant_id_for_create(context, v) with context.session.begin(subtransactions=True): if v['pool_id']: pool = self._get_resource(context, Pool, v['pool_id']) # validate that the pool has same tenant if pool['tenant_id'] != tenant_id: raise n_exc.NotAuthorized() # validate that the pool has same protocol if pool['protocol'] != v['protocol']: raise loadbalancer.ProtocolMismatch( vip_proto=v['protocol'], pool_proto=pool['protocol']) if pool['status'] == constants.PENDING_DELETE: raise loadbalancer.StateInvalid(state=pool['status'], id=pool['id']) vip_db = Vip(id=uuidutils.generate_uuid(), tenant_id=tenant_id, name=v['name'], description=v['description'], port_id=None, protocol_port=v['protocol_port'], protocol=v['protocol'], pool_id=v['pool_id'], connection_limit=v['connection_limit'], admin_state_up=v['admin_state_up'], status=constants.PENDING_CREATE) session_info = v['session_persistence'] if session_info: s_p = self._create_session_persistence_db( session_info, vip_db['id']) vip_db.session_persistence = s_p try: context.session.add(vip_db) context.session.flush() except exception.DBDuplicateEntry: raise loadbalancer.VipExists(pool_id=v['pool_id']) try: # create a port to reserve address for IPAM # do it outside the transaction to avoid rpc calls self._create_port_for_vip(context, vip_db, v['subnet_id'], v.get('address')) except Exception: # catch any kind of exceptions with excutils.save_and_reraise_exception(): context.session.delete(vip_db) context.session.flush() if v['pool_id']: # fetching pool again pool = self._get_resource(context, Pool, v['pool_id']) # (NOTE): we rely on the fact that pool didn't change between # above block and here vip_db['pool_id'] = v['pool_id'] pool['vip_id'] = vip_db['id'] # explicitly flush changes as we're outside any transaction context.session.flush() return self._make_vip_dict(vip_db)
def test_get_nsx_sec_profile_id_from_db_mappings(self): # This test is representative of the 'standard' case in which the # security group mapping was stored in the neutron db exp_sec_prof_uuid = uuidutils.generate_uuid() self._mock_sec_group_mapping_db_calls(exp_sec_prof_uuid) self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid)
def test_get_nsx_router_id_from_db_mappings(self): # This test is representative of the 'standard' case in which the # router mapping was stored in the neutron db exp_lr_uuid = uuidutils.generate_uuid() self._mock_router_mapping_db_calls(exp_lr_uuid) self._verify_get_nsx_router_id(exp_lr_uuid)
def upgrade(): ip_policy_cidrs = table('quark_ip_policy_cidrs', column('id', sa.String(length=36)), column('created_at', sa.DateTime()), column('ip_policy_id', sa.String(length=36)), column('cidr', sa.String(length=64))) subnets = table('quark_subnets', column('_cidr', sa.String(length=64)), column('ip_policy_id', sa.String(length=36))) connection = op.get_bind() # 1. Find `quark_ip_policy_cidrs` rows. data = connection.execute( select([ subnets.c.ip_policy_id, subnets.c._cidr, ip_policy_cidrs.c.id, ip_policy_cidrs.c.cidr ]).where( subnets.c.ip_policy_id == ip_policy_cidrs.c.ip_policy_id).order_by( subnets.c.ip_policy_id)).fetchall() if data is None: return # 2. Accumulate with `quark_ip_policy_cidrs` rows are outside of the # subnet's cidr. ipp_to_update = dict() def _test_change_needed(ipp_id, s, ipp): if s is None or ipp is None: return diff = ipp - s if diff.size > 0: ipp_to_update[ipp_id] = ipp & s prev_ip_policy_id = '' subnet, ip_policy = None, None for ip_policy_id, cidr, ippc_id, ippc_cidr in data: if ip_policy_id != prev_ip_policy_id: _test_change_needed(prev_ip_policy_id, subnet, ip_policy) subnet, ip_policy = netaddr.IPSet([cidr]), netaddr.IPSet() ip_policy |= netaddr.IPSet([ippc_cidr]) prev_ip_policy_id = ip_policy_id _test_change_needed(prev_ip_policy_id, subnet, ip_policy) if not ipp_to_update.keys(): return LOG.info("IP Policy IDs to update: %s", ipp_to_update.keys()) # 3. Delete `quark_ip_policy_cidrs` rows that need to be replaced with rows # that are inside of the subnet's cidr. connection.execute(ip_policy_cidrs.delete().where( ip_policy_cidrs.c.ip_policy_id.in_(ipp_to_update.keys()))) # 4. Insert `quark_ip_policy_cidrs` rows with cidrs that are inside the # subnet's cidr. vals = [ dict(id=uuidutils.generate_uuid(), created_at=timeutils.utcnow(), ip_policy_id=key, cidr=str(x.cidr)) for key in ipp_to_update.keys() for x in ipp_to_update[key].iter_cidrs() ] if not vals: return LOG.info("IP Policy CIDR IDs to insert: %s", [v["id"] for v in vals]) connection.execute(ip_policy_cidrs.insert(), *vals)
def create_port(context, port): """Create a port Create a port which is a connection point of a device (e.g., a VM NIC) to attach to a L2 Neutron network. : param context: neutron api request context : param port: dictionary describing the port, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_port for tenant %s" % context.tenant_id) port_attrs = port["port"] admin_only = [ "mac_address", "device_owner", "bridge", "admin_state_up", "use_forbidden_mac_range" ] utils.filter_body(context, port_attrs, admin_only=admin_only) port_attrs = port["port"] mac_address = utils.pop_param(port_attrs, "mac_address", None) use_forbidden_mac_range = utils.pop_param(port_attrs, "use_forbidden_mac_range", False) segment_id = utils.pop_param(port_attrs, "segment_id") fixed_ips = utils.pop_param(port_attrs, "fixed_ips") if "device_id" not in port_attrs: port_attrs['device_id'] = "" device_id = port_attrs['device_id'] net_id = port_attrs["network_id"] port_id = uuidutils.generate_uuid() net = db_api.network_find(context, None, None, None, False, id=net_id, scope=db_api.ONE) if not net: raise exceptions.NetworkNotFound(net_id=net_id) _raise_if_unauthorized(context.tenant_id, net) # NOTE (Perkins): If a device_id is given, try to prevent multiple ports # from being created for a device already attached to the network if device_id: existing_ports = db_api.port_find(context, network_id=net_id, device_id=device_id, scope=db_api.ONE) if existing_ports: raise exceptions.BadRequest( resource="port", msg="This device is already connected to the " "requested network via another port") # Try to fail early on quotas and save ourselves some db overhead if fixed_ips: quota.QUOTAS.limit_check(context, context.tenant_id, fixed_ips_per_port=len(fixed_ips)) if not STRATEGY.is_parent_network(net_id): # We don't honor segmented networks when they aren't "shared" segment_id = None port_count = db_api.port_count_all(context, network_id=[net_id], tenant_id=[context.tenant_id]) quota.QUOTAS.limit_check(context, context.tenant_id, ports_per_network=port_count + 1) else: if not segment_id: raise q_exc.AmbiguousNetworkId(net_id=net_id) ipam_driver = ipam.IPAM_REGISTRY.get_strategy(net["ipam_strategy"]) net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"]) # TODO(anyone): security groups are not currently supported on port create, # nor on isolated networks today. Please see RM8615 security_groups = utils.pop_param(port_attrs, "security_groups") if security_groups is not None: raise q_exc.SecurityGroupsNotImplemented() group_ids, security_groups = _make_security_group_list( context, security_groups) quota.QUOTAS.limit_check(context, context.tenant_id, security_groups_per_port=len(group_ids)) addresses = [] backend_port = None with utils.CommandManager().execute() as cmd_mgr: @cmd_mgr.do def _allocate_ips(fixed_ips, net, port_id, segment_id, mac): fixed_ip_kwargs = {} if fixed_ips: if STRATEGY.is_parent_network(net_id) and not context.is_admin: raise exceptions.NotAuthorized() ips, subnets = split_and_validate_requested_subnets( context, net_id, segment_id, fixed_ips) fixed_ip_kwargs["ip_addresses"] = ips fixed_ip_kwargs["subnets"] = subnets ipam_driver.allocate_ip_address(context, addresses, net["id"], port_id, CONF.QUARK.ipam_reuse_after, segment_id=segment_id, mac_address=mac, **fixed_ip_kwargs) @cmd_mgr.undo def _allocate_ips_undo(addr): LOG.info("Rolling back IP addresses...") if addresses: for address in addresses: try: with context.session.begin(): ipam_driver.deallocate_ip_address(context, address) except Exception: LOG.exception("Couldn't release IP %s" % address) @cmd_mgr.do def _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=False): mac = ipam_driver.allocate_mac_address( context, net["id"], port_id, CONF.QUARK.ipam_reuse_after, mac_address=mac_address, use_forbidden_mac_range=use_forbidden_mac_range) return mac @cmd_mgr.undo def _allocate_mac_undo(mac): LOG.info("Rolling back MAC address...") if mac: try: with context.session.begin(): ipam_driver.deallocate_mac_address( context, mac["address"]) except Exception: LOG.exception("Couldn't release MAC %s" % mac) @cmd_mgr.do def _allocate_backend_port(mac, addresses, net, port_id): backend_port = net_driver.create_port(context, net["id"], port_id=port_id, security_groups=group_ids, device_id=device_id) return backend_port @cmd_mgr.undo def _allocate_back_port_undo(backend_port): LOG.info("Rolling back backend port...") try: net_driver.delete_port(context, backend_port["uuid"]) except Exception: LOG.exception("Couldn't rollback backend port %s" % backend_port) @cmd_mgr.do def _allocate_db_port(port_attrs, backend_port, addresses, mac): port_attrs["network_id"] = net["id"] port_attrs["id"] = port_id port_attrs["security_groups"] = security_groups LOG.info("Including extra plugin attrs: %s" % backend_port) port_attrs.update(backend_port) with context.session.begin(): new_port = db_api.port_create(context, addresses=addresses, mac_address=mac["address"], backend_key=backend_port["uuid"], **port_attrs) return new_port @cmd_mgr.undo def _allocate_db_port_undo(new_port): LOG.info("Rolling back database port...") if not new_port: return try: with context.session.begin(): db_api.port_delete(context, new_port) except Exception: LOG.exception("Couldn't rollback db port %s" % backend_port) # addresses, mac, backend_port, new_port mac = _allocate_mac(net, port_id, mac_address, use_forbidden_mac_range=use_forbidden_mac_range) _allocate_ips(fixed_ips, net, port_id, segment_id, mac) backend_port = _allocate_backend_port(mac, addresses, net, port_id) new_port = _allocate_db_port(port_attrs, backend_port, addresses, mac) return v._make_port_dict(new_port)
def setUp(self): super(NamespaceFixture, self).setUp() ip = ip_lib.IPWrapper() self.name = self.prefix + uuidutils.generate_uuid() self.ip_wrapper = ip.ensure_namespace(self.name) self.addCleanup(self.destroy)
def testb_delete_filter(self): t, n, p = self.get_ofc_item_random_params() f_path = "/filters/%s" % uuidutils.generate_uuid() self.driver.delete_filter(f_path) self.do_request.assert_called_once_with("DELETE", f_path)
def create_network(context, network): """Create a network. Create a network which represents an L2 network segment which can have a set of subnets and ports associated with it. : param context: neutron api request context : param network: dictionary describing the network, with keys as listed in the RESOURCE_ATTRIBUTE_MAP object in neutron/api/v2/attributes.py. All keys will be populated. """ LOG.info("create_network for tenant %s" % context.tenant_id) with context.session.begin(): # Generate a uuid that we're going to hand to the backend and db net_attrs = network["network"] net_uuid = utils.pop_param(net_attrs, "id", None) net_type = None if net_uuid and context.is_admin: net = db_api.network_find(context, id=net_uuid, scope=db_api.ONE) net_type = utils.pop_param(net_attrs, "network_plugin", None) if net: raise q_exc.NetworkAlreadyExists(id=net_uuid) else: net_uuid = uuidutils.generate_uuid() # TODO(mdietz) this will be the first component registry hook, but # lets make it work first pnet_type, phys_net, seg_id = _adapt_provider_nets(context, network) ipam_strategy = utils.pop_param(net_attrs, "ipam_strategy", None) if not ipam_strategy or not context.is_admin: ipam_strategy = CONF.QUARK.default_ipam_strategy if not ipam.IPAM_REGISTRY.is_valid_strategy(ipam_strategy): raise q_exc.InvalidIpamStrategy(strat=ipam_strategy) net_attrs["ipam_strategy"] = ipam_strategy # NOTE(mdietz) I think ideally we would create the providernet # elsewhere as a separate driver step that could be # kept in a plugin and completely removed if desired. We could # have a pre-callback/observer on the netdriver create_network # that gathers any additional parameters from the network dict default_net_type = net_type or CONF.QUARK.default_network_type net_driver = registry.DRIVER_REGISTRY.get_driver(default_net_type) net_driver.create_network(context, net_attrs["name"], network_id=net_uuid, phys_type=pnet_type, phys_net=phys_net, segment_id=seg_id) subs = net_attrs.pop("subnets", []) net_attrs["id"] = net_uuid net_attrs["tenant_id"] = context.tenant_id net_attrs["network_plugin"] = default_net_type new_net = db_api.network_create(context, **net_attrs) new_subnets = [] for sub in subs: sub["subnet"]["network_id"] = new_net["id"] sub["subnet"]["tenant_id"] = context.tenant_id s = db_api.subnet_create(context, **sub["subnet"]) new_subnets.append(s) new_net["subnets"] = new_subnets # if not security_groups.get_security_groups( # context, # filters={"id": security_groups.DEFAULT_SG_UUID}): # security_groups._create_default_security_group(context) return v._make_network_dict(new_net)
def create_filter(self, ofc_network_id, filter_dict, portinfo=None, filter_id=None, apply_ports=None): if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]: ofc_action = "ALLOW" elif filter_dict['action'].upper() in ["DROP", "DENY"]: ofc_action = "DENY" body = { 'priority': filter_dict['priority'], 'slice': self._get_network_id(ofc_network_id), 'action': ofc_action } ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"] if portinfo: body['in_datapath_id'] = portinfo.datapath_id body['in_port'] = portinfo.port_no else: body['wildcards'] = "in_datapath_id" ofp_wildcards.append("in_port") if filter_dict['src_mac']: body['dl_src'] = filter_dict['src_mac'] else: ofp_wildcards.append("dl_src") if filter_dict['dst_mac']: body['dl_dst'] = filter_dict['dst_mac'] else: ofp_wildcards.append("dl_dst") if filter_dict['src_cidr']: body['nw_src'] = filter_dict['src_cidr'] else: ofp_wildcards.append("nw_src:32") if filter_dict['dst_cidr']: body['nw_dst'] = filter_dict['dst_cidr'] else: ofp_wildcards.append("nw_dst:32") if filter_dict['protocol']: if filter_dict['protocol'].upper() == "ICMP": body['dl_type'] = "0x800" body['nw_proto'] = hex(1) elif filter_dict['protocol'].upper() == "TCP": body['dl_type'] = "0x800" body['nw_proto'] = hex(6) elif filter_dict['protocol'].upper() == "UDP": body['dl_type'] = "0x800" body['nw_proto'] = hex(17) elif filter_dict['protocol'].upper() == "ARP": body['dl_type'] = "0x806" ofp_wildcards.append("nw_proto") else: body['nw_proto'] = filter_dict['protocol'] else: ofp_wildcards.append("nw_proto") if 'dl_type' in body: pass elif filter_dict['eth_type']: body['dl_type'] = filter_dict['eth_type'] else: ofp_wildcards.append("dl_type") if filter_dict['src_port']: body['tp_src'] = hex(filter_dict['src_port']) else: ofp_wildcards.append("tp_src") if filter_dict['dst_port']: body['tp_dst'] = hex(filter_dict['dst_port']) else: ofp_wildcards.append("tp_dst") ofc_filter_id = filter_id or uuidutils.generate_uuid() body['id'] = ofc_filter_id body['ofp_wildcards'] = ','.join(ofp_wildcards) self.client.post(self.filters_path, body=body) return self.filter_path % ofc_filter_id
def test_generate_pfc_id_string(self): id_str = uuidutils.generate_uuid() + 'x' exp_str = id_str[:31].replace('-', '_') ret_str = self.driver._generate_pfc_id(id_str) self.assertEqual(exp_str, ret_str)
def create_tenant(self, description, tenant_id=None): return self._get_tenant_id(tenant_id or uuidutils.generate_uuid())
def get_random_params(self): tenant = uuidutils.generate_uuid() router = uuidutils.generate_uuid() network = uuidutils.generate_uuid() return (tenant, router, network)
def migrate_segment_dict(self, binding): binding['id'] = uuidutils.generate_uuid()
def _test_create_filter(self, filter_dict=None, filter_post=None, filter_wildcards=None, no_portinfo=False): t, n, p = self.get_ofc_item_random_params() src_mac = ':'.join( ['%x' % random.randint(0, 255) for i in moves.xrange(6)]) if filter_wildcards is None: filter_wildcards = [] f = { 'tenant_id': t, 'id': uuidutils.generate_uuid(), 'network_id': n, 'priority': 123, 'action': "ACCEPT", 'in_port': p.id, 'src_mac': src_mac, 'dst_mac': "", 'eth_type': 0, 'src_cidr': "", 'dst_cidr': "", 'src_port': 0, 'dst_port': 0, 'protocol': "TCP", 'admin_state_up': True, 'status': "ACTIVE" } if filter_dict: f.update(filter_dict) net_path = "/networks/%s" % n all_wildcards_ofp = [ 'dl_vlan', 'dl_vlan_pcp', 'nw_tos', 'in_port', 'dl_src', 'dl_dst', 'nw_src', 'nw_dst', 'dl_type', 'nw_proto', 'tp_src', 'tp_dst' ] all_wildcards_non_ofp = ['in_datapath_id', 'slice'] body = { 'id': f['id'], 'action': 'ALLOW', 'priority': 123, 'slice': n, 'in_datapath_id': '0x123456789', 'in_port': 1234, 'nw_proto': '0x6', 'dl_type': '0x800', 'dl_src': src_mac } if filter_post: body.update(filter_post) if no_portinfo: filter_wildcards += ['in_datapath_id', 'in_port'] p = None for field in filter_wildcards: if field in body: del body[field] ofp_wildcards = [ "%s:32" % _f if _f in ['nw_src', 'nw_dst'] else _f for _f in all_wildcards_ofp if _f not in body ] body['ofp_wildcards'] = set(ofp_wildcards) non_ofp_wildcards = [ _f for _f in all_wildcards_non_ofp if _f not in body ] if non_ofp_wildcards: body['wildcards'] = set(non_ofp_wildcards) ctx = mock.Mock() ctx.session = mock.sentinel.session with mock.patch('neutron.plugins.nec.db.api.get_portinfo', return_value=p) as get_portinfo: with mock.patch('neutron.plugins.nec.db.api.get_ofc_id', return_value=net_path) as get_ofc_id: ret = self.driver.create_filter(ctx, f, f['id']) # The content of 'body' is checked below. self.do_request.assert_called_once_with("POST", "/filters", body=mock.ANY) self.assertEqual(ret, '/filters/%s' % f['id']) # ofp_wildcards and wildcards in body are comma-separated # string but the order of elements are not considered, # so we check these fields as set. actual_body = self.do_request.call_args[1]['body'] if 'ofp_wildcards' in actual_body: ofp_wildcards = actual_body['ofp_wildcards'].split(',') actual_body['ofp_wildcards'] = set(ofp_wildcards) if 'wildcards' in actual_body: actual_body['wildcards'] = set(actual_body['wildcards'].split(',')) self.assertEqual(body, actual_body) get_ofc_id.assert_called_once_with(mock.sentinel.session, 'ofc_network', n) get_portinfo.assert_called_once_with(mock.sentinel.session, p.id)
def create_security_group(self, context, security_group, default_sg=False): """Create security group. If default_sg is true that means we are a default security group for a given tenant if it does not exist. """ s = security_group['security_group'] kwargs = { 'context': context, 'security_group': s, 'is_default': default_sg, } # NOTE(armax): a callback exception here will prevent the request # from being processed. This is a hook point for backend's validation; # we raise to propagate the reason for the failure. try: registry.notify(resources.SECURITY_GROUP, events.BEFORE_CREATE, self, **kwargs) except exceptions.CallbackFailure as e: raise ext_sg.SecurityGroupConflict(reason=e) tenant_id = self._get_tenant_id_for_create(context, s) if not default_sg: self._ensure_default_security_group(context, tenant_id) with context.session.begin(subtransactions=True): security_group_db = SecurityGroup(id=s.get('id') or (uuidutils.generate_uuid()), description=s['description'], tenant_id=tenant_id, name=s['name']) context.session.add(security_group_db) if default_sg: context.session.add( DefaultSecurityGroup( security_group=security_group_db, tenant_id=security_group_db['tenant_id'])) for ethertype in ext_sg.sg_supported_ethertypes: if default_sg: # Allow intercommunication ingress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='ingress', ethertype=ethertype, source_group=security_group_db) context.session.add(ingress_rule) egress_rule = SecurityGroupRule( id=uuidutils.generate_uuid(), tenant_id=tenant_id, security_group=security_group_db, direction='egress', ethertype=ethertype) context.session.add(egress_rule) secgroup_dict = self._make_security_group_dict(security_group_db) kwargs['security_group'] = secgroup_dict registry.notify(resources.SECURITY_GROUP, events.AFTER_CREATE, self, **kwargs) return secgroup_dict
def create_network(self, ofc_tenant_id, description, network_id=None): ofc_network_id = network_id or uuidutils.generate_uuid() body = {'id': ofc_network_id, 'description': description} self.client.post(self.networks_path, body=body) return self.network_path % ofc_network_id
def _add_lqueue(self, body): fake_lqueue = jsonutils.loads(body) fake_lqueue['uuid'] = uuidutils.generate_uuid() self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue return fake_lqueue
def test_get_segment_by_id_result_not_found(self): segment_uuid = uuidutils.generate_uuid() net_segment = ml2_db.get_segment_by_id(self.ctx.session, segment_uuid) self.assertIsNone(net_segment)
def _read_id(self, subnetpool): id = subnetpool.get('id', attributes.ATTR_NOT_SPECIFIED) if id is attributes.ATTR_NOT_SPECIFIED: id = uuidutils.generate_uuid() self.id = id
def test_get_nsx_switch_ids_from_db_mappings(self): # This test is representative of the 'standard' case in which the # lswitch mappings were stored in the neutron db exp_ls_uuids = [uuidutils.generate_uuid()] self._mock_network_mapping_db_calls(exp_ls_uuids) self._verify_get_nsx_switch_ids(exp_ls_uuids)
def test_delete_lrouter_port_nonexistent_port_raises(self): lrouter = routerlib.create_lrouter(self.fake_cluster, uuidutils.generate_uuid(), 'pippo', 'fake-lrouter', '10.0.0.1') self.assertRaises(exceptions.NotFound, routerlib.delete_router_lport, self.fake_cluster, lrouter['uuid'], 'abc')