def create_vn(self): #Script has run till now with the same name and subnets. Lets change it! self.vn1_name = "test_DM_v4_only" self.vn1_net = ['12.6.2.0/24'] #router external tag will cause DM ip allocation problem. Changing it to false self.vn1_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=self.vn1_name, inputs=self.inputs, subnets=self.vn1_net, router_external=False, shared=False)) #assert self.vn1_fixture.verify_on_setup() self.add_RT_basic_traffic() self.vn2_name = "test_DM_dual_stack" self.vn2_net = ['2001::101:0/120'] self.vn2_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=self.vn2_name, inputs=self.inputs, subnets=self.vn2_net)) #assert self.vn2_fixture.verify_on_setup() self.vm1_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn1_fixture.obj, vm_name='sender', node_name=None, image_name='cirros', flavor='m1.tiny'))
def setup_ipam_vn(self): # create new IPAM self.ipam1_obj = self.useFixture( IPAMFixture(project_obj=self.project, name='ipam1')) self.ipam2_obj = self.useFixture( IPAMFixture(project_obj=self.project, name='ipam2')) self.ipam3_obj = self.useFixture( IPAMFixture(project_obj=self.project, name='ipam3')) # create new VN self.VN1_fixture = self.useFixture( VNFixture(project_name=self.project.project_name, connections=self.connections, vn_name='VN1', inputs=self.inputs, subnets=['10.1.1.0/24'], ipam_fq_name=self.ipam1_obj.fq_name)) self.VN2_fixture = self.useFixture( VNFixture(project_name=self.project.project_name, connections=self.connections, vn_name='VN2', inputs=self.inputs, subnets=['10.2.1.0/24'], ipam_fq_name=self.ipam2_obj.fq_name)) self.VN3_fixture = self.useFixture( VNFixture(project_name=self.project.project_name, connections=self.connections, vn_name='VN3', inputs=self.inputs, subnets=['10.3.1.0/24'], ipam_fq_name=self.ipam3_obj.fq_name))
def create_vn(self): self.vn1_name = "test_vn" self.vn1_net = ['1.1.1.0/24'] self.vn1_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=self.vn1_name, inputs=self.inputs, subnets=self.vn1_net, router_external=True)) #assert self.vn1_fixture.verify_on_setup() self.add_RT_basic_traffic() self.vn2_name = "test_v6" self.vn2_net = ['2001::101:0/120'] self.vn2_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=self.vn2_name, inputs=self.inputs, subnets=self.vn2_net)) #assert self.vn2_fixture.verify_on_setup() self.vm1_fixture = self.useFixture( VMFixture(project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn1_fixture.obj, vm_name='sender', node_name=None, image_name='cirros', flavor='m1.tiny'))
def test_lbaas_with_different_fip(self): '''Create LB, LISTENER, POOL and MEMBER create FIP and associate it to VIP, create a VM in the FIP network verify: pool, member and vip gets created after vip creation nets ns is created in compute node and haproxy process starts , fail otherwise Verify different LB Method ''' result = True pool_members = {} members=[] fip_fix = self.useFixture(VNFixture(connections=self.connections, router_external=True)) client_vm1_fixture = self.create_vm(fip_fix, flavor='contrail_flavor_small', image_name='ubuntu') vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3) vn_vip_fixture = vn_vm_fix[0] lb_pool_servers = vn_vm_fix[1] assert client_vm1_fixture.wait_till_vm_is_up() for VMs in lb_pool_servers: members.append(VMs.vm_ip) pool_members.update({'address':members}) pool_name = get_random_name('mypool') lb_method = 'ROUND_ROBIN' protocol = 'HTTP' protocol_port = 80 vip_name = get_random_name('myvip') listener_name = get_random_name('RR') self.logger.info("Verify Round Robin Method") rr_listener = self.create_lbaas(vip_name, vn_vip_fixture.get_uuid(), pool_name=pool_name, pool_algorithm=lb_method, pool_protocol=protocol, pool_port=HTTP_PORT, members=pool_members, listener_name=listener_name, fip_net_id=fip_fix.uuid, vip_port=HTTP_PORT, vip_protocol='HTTP', hm_delay=5, hm_timeout=5, hm_max_retries=5, hm_probe_type=HTTP_PROBE) assert rr_listener.verify_on_setup() assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\ "Verify LB Method failed for ROUND ROBIN" fip_fix1 = self.useFixture(VNFixture(connections=self.connections, router_external=True)) client_vm2_fixture = self.create_vm(fip_fix1, flavor='contrail_flavor_small', image_name='ubuntu') assert client_vm2_fixture.wait_till_vm_is_up() ##Disassociate FIP and associate new FIP rr_listener.delete_fip_on_vip() rr_listener.fip_id=None rr_listener.fip_net_id = fip_fix1.uuid rr_listener.create_fip_on_vip() assert rr_listener.verify_on_setup(), "Verify on setup failed after new FIP associated" assert self.verify_lb_method(client_vm2_fixture, lb_pool_servers, rr_listener.fip_ip),\ "Verify LB Method failed for ROUND ROBIN"
def vn_add_delete(self): self.newvn_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name='newvn', inputs=self.inputs, subnets=['22.1.1.0/24'])) self.newvn_fixture.verify_on_setup() self.newvn11_fixture = self.useFixture( VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name='newvn11', inputs=self.inputs, subnets=['11.1.1.0/24'])) self.newvn11_fixture.verify_on_setup() return True
def create_vn(self, *args, **kwargs): return self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, *args, **kwargs))
def test_lbaas_health_monitor(self): '''Create LB, LISTENER, POOL and MEMBER create FIP and associate it to VIP, create a VM in the FIP network verify: pool, member and vip gets created after vip creation nets ns is created in compute node and haproxy process starts , fail otherwise Verify different LB Method ''' result = True pool_members = {} members=[] fip_fix = self.useFixture(VNFixture(connections=self.connections, router_external=True)) client_vm1_fixture = self.create_vm(fip_fix, flavor='contrail_flavor_small', image_name='ubuntu') vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3) vn_vip_fixture = vn_vm_fix[0] lb_pool_servers = vn_vm_fix[1] assert client_vm1_fixture.wait_till_vm_is_up() for VMs in lb_pool_servers: members.append(VMs.vm_ip) pool_members.update({'address':members}) pool_name = get_random_name('mypool') lb_method = 'ROUND_ROBIN' protocol = 'HTTP' protocol_port = 80 vip_name = get_random_name('myvip') listener_name = get_random_name('RR') self.logger.info("Verify Round Robin Method") rr_listener = self.create_lbaas(vip_name, vn_vip_fixture.get_uuid(), pool_name=pool_name, pool_algorithm=lb_method, pool_protocol=protocol, pool_port=HTTP_PORT, members=pool_members, listener_name=listener_name, fip_net_id=fip_fix.uuid, vip_port=HTTP_PORT, vip_protocol='HTTP', hm_delay=5, hm_timeout=5, hm_max_retries=5, hm_probe_type=HTTP_PROBE) assert rr_listener.verify_on_setup() assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip),\ "Verify LB Method failed for ROUND ROBIN" self.logger.info("Verify after stopping webserver from one of the server") lb_pool_servers[0].stop_webserver() assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[1:], rr_listener.fip_ip),\ "Verify LB Method failed for ROUND ROBIN" self.logger.info("Verify after adding few more members, and don't start the webserver on the members") for no_of_vm in range(3): lb_pool_servers.append(self.create_vm(vn_vip_fixture, flavor='contrail_flavor_small', image_name='ubuntu')) lb_pool_servers[-1].wait_till_vm_is_up() ##lb_pool_servers[-1].start_webserver(listen_port=80) rr_listener.create_member(address=lb_pool_servers[-1].vm_ip) assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers[1:-3], rr_listener.fip_ip),\ "Verify LB Method failed for ROUND ROBIN"
def create_vn(self, vn_name, subnets): return self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn_name, subnets=subnets))
def verify_vn(self, stack, env, stack_name): op = stack.stacks.get(stack_name).outputs time.sleep(5) for output in op: if output['output_key'] == 'right_net_id': vn_id = output['output_value'] vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) vn_name = str(env['parameters']['right_net_name']) subnet = str(env['parameters']['right_net_cidr']) elif output['output_key'] == 'left_net_id': vn_id = output['output_value'] vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) vn_name = str(env['parameters']['left_net_name']) subnet = str(env['parameters']['left_net_cidr']) elif output['output_key'] == 'transit_net_id': vn_id = output['output_value'] vn_obj = self.vnc_lib.virtual_network_read(id=vn_id) vn_name = str(env['parameters']['transit_net_name']) subnet = str(env['parameters']['transit_net_cidr']) vn_fix = self.useFixture(VNFixture(project_name=self.inputs.project_name, vn_name=vn_name, inputs=self.inputs, subnets=[subnet], connections=self.connections)) if vn_fix.vn_id == vn_id: self.logger.info('VN %s launched successfully via heat' % vn_name) assert vn_fix.verify_on_setup() return vn_fix
def test_check_vxlan_id_reuse(self): ''' Create a VN X Create another VN Y and check that the VNid is the next number Delete the two Vns On creating a VN again, verify that Vxlan id of X is used (i.e vxlan id gets reused) ''' vn1_name = get_random_name('vn') vn1_subnets = [get_random_cidr()] vn2_name = get_random_name('vn') vn2_subnets = [get_random_cidr()] # First VN vn1_obj = VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn1_name, subnets=vn1_subnets) vn1_obj.setUp() vxlan_id1 = vn1_obj.get_vxlan_id() # Second VN vn2_obj = VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn2_name, subnets=vn2_subnets) vn2_obj.setUp() vxlan_id2 = vn2_obj.get_vxlan_id() assert vxlan_id2 == ( vxlan_id1 + 1), ("Vxlan ID allocation is not incremental, " "Two VNs were seen to have vxlan ids %s, %s" % (vxlan_id1, vxlan_id2)) # Delete the vns vn1_obj.cleanUp() vn2_obj.cleanUp() vn3_fixture = self.create_vn() assert vn3_fixture.verify_on_setup(), "VNFixture verify failed!" new_vxlan_id = vn3_fixture.get_vxlan_id() assert new_vxlan_id == vxlan_id1, ( "Vxlan ID reuse does not seem to happen", "Expected : %s, Got : %s" % (vxlan_id1, new_vxlan_id)) self.logger.info('Vxlan ids are reused..ok')
def create_vn(self, vn_name, vn_subnets, option = 'orch'): return self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, inputs=self.inputs, vn_name=vn_name, subnets=vn_subnets, option = option))
def test_remove_policy_with_ref(self): ''' This tests the following scenarios. 1. Test to validate that policy removal will fail when it referenced with VN. 2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs. 3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs. ''' vn1_name = 'vn4' vn1_subnets = ['10.1.1.0/24'] policy_name = 'policy1' rules = [ { 'direction': '<>', 'simple_action': 'pass', 'protocol': 'icmp', 'source_network': vn1_name, 'dest_network': vn1_name, }, ] policy_fixture = self.useFixture( PolicyFixture(policy_name=policy_name, rules_list=rules, inputs=self.inputs, connections=self.connections)) vn1_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn1_name, inputs=self.inputs, subnets=vn1_subnets, policy_objs=[policy_fixture.policy_obj])) assert vn1_fixture.verify_on_setup() ret = policy_fixture.verify_on_setup() if ret['result'] == False: self.logger.error("Policy %s verification failed after setup" % policy_name) assert ret['result'], ret['msg'] self.logger.info( "Done with setup and verification, moving onto test ..") # try to remove policy which was referenced with VN. policy_removal = True pol_id = None if self.quantum_h: policy_removal = self.quantum_h.delete_policy( policy_fixture.get_id()) else: try: self.vnc_lib.network_policy_delete(id=policy_fixture.get_id()) except Exception as e: policy_removal = False self.assertFalse( policy_removal, 'Policy removal succeed as not expected since policy is referenced with VN' ) #assert vn1_fixture.verify_on_setup() # policy_fixture.verify_policy_in_api_server() return True
def config_vn(self, vn_name, vn_net): vn_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn_name, inputs=self.inputs, subnets=vn_net)) assert vn_fixture.verify_on_setup() return vn_fixture
def test_service_custom_isolation(self): """ Verify reachability of a Service in and out of custom isolated namespace Verify following reachability: 1. Pod inside custom isolated namespace should not be able to reach service within same namespace or any service outside the namespace. 2. After creating a contrail network policy between Custom VN and service VN< pod inside custom isolated namespace should be able to reach service within and outside this namespace 3. Pods inside non islated namespace should be able to reach service inside custom isolated namespace. """ client1, client2 = self.setup_common_namespaces_pods(prov_service=True) #check 1 assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client2[2], expectation=False) assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip, test_pod=client2[2], expectation=False) #check 2 #Creating the policy between custom VN ans default service VN policy_name = 'allow-btw-custom-ns-and-service' if self.inputs.slave_orchestrator == 'kubernetes': k8s_default_service_vn_name = self.connections.project_name + '-default-service-network' else: k8s_default_service_vn_name = "k8s-default-service-network" k8s_default_service_vn_fq_name = [ self.connections.domain_name, self.connections.project_name, k8s_default_service_vn_name ] k8s_default_service_vn_obj = self.vnc_lib.virtual_network_read( fq_name=k8s_default_service_vn_fq_name) k8s_service_vn_fixt = VNFixture(connections=self.connections, vn_name=k8s_default_service_vn_name, option="contrail", uuid=k8s_default_service_vn_obj.uuid) k8s_service_vn_fixt.setUp() vn_service_policy = self.setup_policy_between_vns( client2[7], k8s_service_vn_fixt, api="contrail", connections=self.connections) assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client2[2]) assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip, test_pod=client2[2]) #check 3 # Disable of service isolation required or not ? For now, its working without disabling service isolation #client2[4].disable_service_isolation() assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client1[2])
def add_multiple_vns(self): self.vn3_name = "test_vn3" self.vn3_net = ['2.1.1.0/24'] self.vn3_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, vn_name=self.vn3_name, inputs=self.inputs, subnets=self.vn3_net, router_external=True)) assert self.vn3_fixture.verify_on_setup() self.vm3_fixture = self.useFixture(VMFixture( project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn3_fixture.obj, vm_name='receiver', node_name=None, image_name='cirros', flavor='m1.tiny'))
def test_lbaas_with_https(self): result = True pool_members = {} members = [] fip_fix = self.useFixture( VNFixture(connections=self.connections, router_external=True)) client_vm1_fixture = self.create_vm(fip_fix, flavor='contrail_flavor_small', image_name='ubuntu') vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3) vn_vip_fixture = vn_vm_fix[0] lb_pool_servers = vn_vm_fix[1] assert client_vm1_fixture.wait_till_vm_is_up() for VMs in lb_pool_servers: members.append(VMs.vm_ip) pool_members.update({'address': members}) pool_name = get_random_name('mypool') lb_method = 'ROUND_ROBIN' protocol = 'HTTP' protocol_port = 80 listener_protocol = 'TERMINATED_HTTPS' listener_port = 443 vip_name = get_random_name('myvip') listener_name = get_random_name('RR') self.logger.info("Verify Round Robin Method") rr_listener = self.create_lbaas(vip_name, vn_vip_fixture.get_uuid(), pool_name=pool_name, pool_algorithm=lb_method, pool_protocol=protocol, pool_port=HTTP_PORT, members=pool_members, listener_name=listener_name, fip_net_id=fip_fix.uuid, vip_port=listener_port, vip_protocol=listener_protocol, default_tls_container='tls_container', hm_delay=5, hm_timeout=5, hm_max_retries=5, hm_probe_type=HTTP_PROBE) assert rr_listener.verify_on_setup( ), "Verify on setup failed after new FIP associated" assert client_vm1_fixture.ping_with_certainty(rr_listener.fip_ip) assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, rr_listener.fip_ip, port=listener_port, https=True),\ "Verify LB Method failed for ROUND ROBIN"
def create_vn(self, connections=None, verify=True, option='contrail', **kwargs): connections = connections or self.connections vn_fixture = self.create_fixture(VNFixture, connections=connections, option=option, **kwargs) if vn_fixture and verify: #WA of verifying using admin creds since RI etal system objects #wont be visible to the regular user vn_admin_fixture = VNFixture(connections=self.connections, option=option, uuid=vn_fixture.uuid) vn_admin_fixture.read() assert vn_admin_fixture.verify_on_setup(), 'VN verification failed' return vn_fixture
def scale_vns(self): generator = iter_iprange('111.1.1.0', '111.255.255.0', step=256) for i in range(1,2000): vn_scale_name = "test_%s_vn" % i vn_scale_net = (str(generator.next())+str('/24')).split() vn_scale_fixture = self.useFixture(VNFixture( project_name=self.inputs.project_name, connections=self.connections, option='contrail', vn_name=vn_scale_name, inputs=self.inputs, subnets=vn_scale_net, router_external=True)) #assert vn_scale_fixture.verify_on_setup() for dev_fixture in self.phy_router_fixture.values(): dev_fixture.add_virtual_network(str(vn_scale_fixture.uuid))
def test_ingress_ip_assignment(self): ''' Verify that Ingress gets a CLuster IP which is reachable to Pods in same namespace. Also verify that a Floating IP is assigned to the Ingress from the Public FIP poo. Steps: 1. Create a service with 2 pods running nginx 2. Create an ingress out of this service 3. From another Pod do a wget on the ingress Cluster ip Validate that Ingress get a IP from Public FIP pool which might/might not be accessible. Validate that service and its loadbalancing work ''' app = 'http_test' labels = {'app':app} namespace = self.setup_namespace(name='default') assert namespace.verify_on_setup() service = self.setup_http_service(namespace=namespace.name, labels=labels) pod1 = self.setup_nginx_pod(namespace=namespace.name, labels=labels) pod2 = self.setup_nginx_pod(namespace=namespace.name, labels=labels) if not getattr(self.public_vn, 'public_vn_fixture', None): vn_fixture = self.useFixture(VNFixture(project_name=self.inputs.project_name, vn_name='__public__', connections=self.connections, inputs=self.inputs, option="contrail")) assert vn_fixture.verify_on_setup() fip_pool_fixture = self.useFixture(FloatingIPFixture( project_name=self.inputs.project_name, inputs=self.inputs, connections=self.connections, pool_name='__fip_pool_public__', vn_id=vn_fixture.vn_id)) assert fip_pool_fixture.verify_on_setup() ingress = self.setup_simple_nginx_ingress(service.name, namespace=namespace.name) assert ingress.verify_on_setup() pod3 = self.setup_busybox_pod(namespace=namespace.name) self.verify_nginx_pod(pod1) self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() # Now validate ingress from within the cluster network assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip, test_pod=pod3)
def test_lbaas_after_stop_start_vrouter_agent(self): '''Create LB, LISTENER, POOL and MEMBER create FIP and associate it to VIP, create a VM in the FIP network verify: pool, member and vip gets created after vip creation nets ns is created in compute node and haproxy process starts , fail otherwise Verify HTTP traffic passes through standby netns , when the active netns vrouter fails ''' result = True pool_members = {} members=[] fip_fix = self.useFixture(VNFixture(connections=self.connections, router_external=True)) client_vm1_fixture = self.create_vm(fip_fix, flavor='contrail_flavor_small', image_name='ubuntu') vn_vm_fix = self.create_vn_and_its_vms(no_of_vm=3) vn_vip_fixture = vn_vm_fix[0] lb_pool_servers = vn_vm_fix[1] assert client_vm1_fixture.wait_till_vm_is_up() for VMs in lb_pool_servers: members.append(VMs.vm_ip) pool_members.update({'address':members}) pool_name = get_random_name('mypool') lb_method = 'ROUND_ROBIN' protocol = 'HTTP' protocol_port = 80 vip_name = get_random_name('myvip') listener_name = get_random_name('HTTP') #Call LB fixutre to create LBaaS VIP, Listener, POOL , Member and associate a Health monitor to the pool lb = self.create_lbaas(vip_name, vn_vip_fixture.get_uuid(), pool_name=pool_name, pool_algorithm=lb_method, pool_protocol=protocol, pool_port=HTTP_PORT, members=pool_members, listener_name=listener_name, fip_net_id=fip_fix.uuid, vip_port=HTTP_PORT, vip_protocol='HTTP', hm_delay=5, hm_timeout=5, hm_max_retries=5, hm_probe_type=HTTP_PROBE) #Verify all the creations are success lb.verify_on_setup() #Now stop the active netns vrouter process self.addCleanup(lb.start_active_vrouter) lb.stop_active_vrouter() lb.start_active_vrouter() assert self.verify_lb_method(client_vm1_fixture, lb_pool_servers, lb.fip_ip),\ "Verify lb method failed over standby netns on %s" %lb.standby_vr
def setup_vn(self, project_name=None, connections=None, inputs=None, vn_name=None, option="contrail"): connections = self.connections inputs = self.inputs vn_name = vn_name or get_random_name('vn_test') return self.useFixture( VNFixture(connections=connections, inputs=inputs, vn_name=vn_name, option=option))
def create_vm_start_ping(self, ping_count=10): self.vn_fix = self.useFixture( VNFixture(connections=self.connections, router_external=True, rt_number=2500, af='dual')) self.vm1_fixture = self.create_vm(self.vn_fix) self.vm2_fixture = self.create_vm(self.vn_fix) assert self.vm1_fixture.wait_till_vm_is_up() assert self.vm2_fixture.wait_till_vm_is_up() assert self.vm1_fixture.ping_with_certainty(self.vm2_fixture.vm_ip) cmd = 'ping %s -c %s -i 0.01 > %s' % (self.vm2_fixture.vm_ip, ping_count, self.result_file) self.logger.info('Starting ping on %s to %s' % (self.vm1_fixture.vm_name, self.vm2_fixture.vm_ip)) self.logger.debug('ping cmd : %s' % (cmd)) self.vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, as_daemon=True, pidfile=self.pid_file) self.ipv6_addr = ''.join( self.vm2_fixture.get_vm_ips(vn_fq_name=self.vn_fix.vn_fq_name, af='v6')) cmd = 'ping6 %s -c %s -i 0.01 > %s' % (self.ipv6_addr, ping_count, self.result6_file) self.logger.info('Starting ping on %s to %s' % (self.vm1_fixture.vm_name, self.ipv6_addr)) self.logger.debug('ping6 cmd : %s' % (cmd)) self.vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, as_daemon=True, pidfile=self.pid6_file) return True
def test_services_custom_isolation_post_kube_manager_restart(self): """ Verify that after restart of contrail-kubemanager, service reachability to and from custom isolated namespace/pod is not affected Verify following reachability: 1. Verify reachability between pods and services 2. restart contrail-kube-manager 3. Verify reachability between pods and services """ self.addCleanup(self.invalidate_kube_manager_inspect) client1, client2 = self.setup_common_namespaces_pods(prov_service=True) policy_name = 'allow-btw-custom-ns-and-service' if self.inputs.slave_orchestrator == 'kubernetes': k8s_default_service_vn_name = self.connections.project_name + '-default-service-network' else: k8s_default_service_vn_name = "k8s-default-service-network" k8s_default_service_vn_fq_name = self.connections.inputs.project_fq_name + \ [k8s_default_service_vn_name] k8s_default_service_vn_obj = self.vnc_lib.virtual_network_read( fq_name=k8s_default_service_vn_fq_name) k8s_service_vn_fixt = VNFixture(connections=self.connections, vn_name=k8s_default_service_vn_name, option="contrail", uuid=k8s_default_service_vn_obj.uuid) k8s_service_vn_fixt.setUp() vn_service_policy = self.setup_policy_between_vns( client2[6], k8s_service_vn_fixt, api="contrail", connections=self.connections) assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client2[2]) assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip, test_pod=client2[2]) assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client1[2]) self.restart_kube_manager() assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client2[2]) assert self.validate_nginx_lb([client1[0], client1[1]], client1[3].cluster_ip, test_pod=client2[2]) assert self.validate_nginx_lb([client2[0], client2[1]], client2[3].cluster_ip, test_pod=client1[2])
def create_only_vn(cls, vn_name=None, vn_subnets=None, **kwargs): '''Classmethod to do only VN creation ''' if not vn_name: vn_name = get_random_name('vn') connections = kwargs.pop('connections', None) or cls.connections project_name = kwargs.pop('project_name', None) or connections.project_name vn_fixture = VNFixture(project_name=project_name, connections=connections, inputs=connections.inputs, vn_name=vn_name, subnets=vn_subnets, **kwargs) vn_fixture.setUp() return vn_fixture
def verify_vn(self, stack, env, stack_name): op = stack.stacks.get(stack_name).outputs time.sleep(5) for output in op: if output['output_key'] == 'net_id': vn_id = output['output_value'] vn_obj = self.vnc_lib.virtual_network_read(fq_name=vn_id) vn_id = vn_obj.uuid vn_name = str(env['parameters']['name']) subnet = str(env['parameters']['subnet']) + '/' + str(env['parameters']['prefix']) vn_fix = self.useFixture(VNFixture(project_name=self.inputs.project_name, option='contrail', vn_name=vn_name, inputs=self.inputs, uuid=vn_id, empty_vn=True, connections=self.connections)) self.logger.info('VN %s launched successfully via heat' % vn_name) #assert vn_fix.verify_on_setup() return vn_fix
def create_external_network(self, connections, inputs): ext_vn_name = get_random_name('ext_vn') ext_subnets = [self.inputs.fip_pool] mx_rt = self.inputs.mx_rt ext_vn_fixture = self.useFixture( VNFixture(project_name=inputs.project_name, connections=connections, vn_name=ext_vn_name, inputs=inputs, subnets=ext_subnets, router_asn=self.inputs.router_asn, rt_number=mx_rt, router_external=True)) assert ext_vn_fixture.verify_on_setup() return ext_vn_fixture
def test_create_v6(self): """ Description: Verify v6 config is pushed to mx """ router_params = list(self.inputs.dm_mx.values())[0] self.phy_router_fixture = self.useFixture( PhysicalRouterFixture(router_params['name'], router_params['control_ip'], model=router_params['model'], vendor=router_params['vendor'], asn=router_params['asn'], ssh_username=router_params['ssh_username'], ssh_password=router_params['ssh_password'], mgmt_ip=router_params['control_ip'], connections=self.connections, dm_managed=True)) physical_dev = self.vnc_lib.physical_router_read( id=self.phy_router_fixture.phy_device.uuid) physical_dev.set_physical_router_management_ip( router_params['mgmt_ip']) physical_dev._pending_field_updates self.vnc_lib.physical_router_update(physical_dev) vn1_name = "test_vnv6sr" vn1_net = ['2001::101:0/120'] #vn1_fixture = self.config_vn(vn1_name, vn1_net) vn1_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn1_name, inputs=self.inputs, subnets=vn1_net)) assert vn1_fixture.verify_on_setup() self.extend_vn_to_physical_router(vn1_fixture, self.phy_router_fixture) sleep(20) mx_handle = self.phy_router_fixture.get_connection_obj( 'juniper', host=router_params['mgmt_ip'], username=router_params['ssh_username'], password=router_params['ssh_password'], logger=[self.logger]) cmd = 'show configuration groups __contrail__ routing-instances _contrail_%s-l3-%s' % ( vn1_name, vn1_fixture.vn_network_id) cli_output = self.get_output_from_node(mx_handle, cmd) assert (not ('invalid command' in cli_output) ), "Bug 1553316 present. v6 CIDR config not pushed to mx" return True
def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None, enable_dhcp=True, **kwargs): '''Classmethod to do only VN creation ''' if not vn_name: vn_name = get_random_name('vn') if not vn_subnets: vn_subnets = [get_random_cidr()] vn_fixture = VNFixture(project_name=cls.inputs.project_name, connections=cls.connections, inputs=cls.inputs, vn_name=vn_name, subnets=vn_subnets, vxlan_id=vxlan_id, enable_dhcp=enable_dhcp, **kwargs) vn_fixture.setUp() return vn_fixture
def create_vm_start_ping(self, ping_count=10): self.vn_fix = self.useFixture( VNFixture(connections=self.connections, router_external=True, rt_number=2500, af='dual')) self.vm1_fixture = self.create_vm(self.vn_fix, node_name=self.inputs.get_node_name( self.host_list[0])) assert self.vm1_fixture.wait_till_vm_is_up() assert self.vm1_fixture.ping_with_certainty(self.mx_loopback_ip) cmd = 'ping %s -c %s > %s' % (self.mx_loopback_ip, ping_count, self.result_file) self.logger.info('Starting ping on %s to %s' % (self.vm1_fixture.vm_name, self.mx_loopback_ip)) self.logger.debug('ping cmd : %s' % (cmd)) self.vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, as_daemon=True, pidfile=self.pid_file) cmd = 'ping6 %s -c %s > %s' % (self.mx_loopback_ip6, ping_count, self.result6_file) self.logger.info('Starting ping on %s to %s' % (self.vm1_fixture.vm_name, self.mx_loopback_ip6)) self.logger.debug('ping6 cmd : %s' % (cmd)) self.vm1_fixture.run_cmd_on_vm(cmds=[cmd], as_sudo=True, as_daemon=True, pidfile=self.pid6_file) return True
def test_create_v6(self): """ Description: Verify v6 config is pushed to mx """ router_params = self.inputs.physical_routers_data.values()[0] self.phy_router_fixture = self.useFixture( PhysicalRouterFixture(router_params['name'], router_params['mgmt_ip'], model=router_params['model'], vendor=router_params['vendor'], asn=router_params['asn'], ssh_username=router_params['ssh_username'], ssh_password=router_params['ssh_password'], mgmt_ip=router_params['mgmt_ip'], connections=self.connections)) vn1_name = "test_vnv6sr" vn1_net = ['2001::101:0/120'] #vn1_fixture = self.config_vn(vn1_name, vn1_net) vn1_fixture = self.useFixture( VNFixture(project_name=self.inputs.project_name, connections=self.connections, vn_name=vn1_name, inputs=self.inputs, subnets=vn1_net)) assert vn1_fixture.verify_on_setup() self.extend_vn_to_physical_router(vn1_fixture, self.phy_router_fixture) sleep(20) mx_handle = self.phy_router_fixture.get_connection_obj( 'juniper', host=router_params['mgmt_ip'], username=router_params['ssh_username'], password=router_params['ssh_password'], logger=[self.logger]) cmd = 'show configuration groups __contrail__ routing-instances _contrail_l3_5_%s' % vn1_name cli_output = self.get_output_from_node(mx_handle, cmd) assert (not ('invalid command' in cli_output) ), "Bug 1553316 present. v6 CIDR config not pushed to mx" return True