示例#1
0
    def test_allocate_floating_ip(self):
        """Allocate a floating IP"""
        result = True
        cidr = '10.2.3.0/24'
        floatingIpCidr = self.inputs.fip_pool
        pool_name = 'pool1'
        self.vpc1_cidr = '10.2.5.0/24'
        self.vpc1_vn1_cidr = '10.2.5.0/25'
        self.vpc1_fixture = self.useFixture(
            VPCFixture(self.vpc1_cidr, connections=self.connections))
        assert self.vpc1_fixture.verify_on_setup()
        vpc_fixture = self.vpc1_fixture
        assert vpc_fixture.verify_on_setup(), " VPC %s verification failed" % (
            cidr)

        self.logger.info(
            'Adding rules to default SG of %s to reach public vm' %
            (vpc_fixture.vpc_id))
        default_sg_name = 'default'
        rule1 = {
            'protocol': 'icmp',
            'direction': 'ingress',
            'cidr': floatingIpCidr,
        }
        rule2 = {
            'protocol': 'icmp',
            'direction': 'egress',
            'cidr': floatingIpCidr,
        }
        default_sg_id = vpc_fixture.get_security_group_id(default_sg_name)
        if not (self.createSgRule(vpc_fixture, default_sg_id, rule1)
                and self.createSgRule(vpc_fixture, default_sg_id, rule2)):
            self.logger.error('Unable to create allow in SG %s ' %
                              (default_sg_name))
            result = result and False

        # create public VN for floating ip pool

        ec2_base = EC2Base(logger=self.inputs.logger,
                           inputs=self.admin_inputs,
                           tenant=self.inputs.project_name)
        public_vn_fixture = self.public_vn_obj.public_vn_fixture
        assert public_vn_fixture.verify_on_setup(),\
            "Public VN Fixture verification failed, Check logs"

        # Assign floating IP. Internet GW is just dummy
        ec2_base = EC2Base(logger=self.inputs.logger,
                           inputs=self.inputs,
                           tenant=vpc_fixture.vpc_id)
        vpc_fip_fixture = self.useFixture(
            VPCFIPFixture(public_vn_obj=self.public_vn_obj,
                          connections=self.connections,
                          ec2_base=ec2_base))
        assert vpc_fip_fixture.verify_on_setup(
        ), "FIP pool verification failed, Pls check logs"

        # Add rules in public VM's SG to reach the private VM"
        self.set_sec_group_for_allow_all(self.inputs.stack_tenant, 'default')

        fip_vm_fixture = self.useFixture(
            VMFixture(connections=self.admin_connections,
                      vn_obj=public_vn_fixture.obj,
                      vm_name='fip_vm1'))
        assert fip_vm_fixture.verify_on_setup(
        ), "VM verification in FIP VN failed"
        assert fip_vm_fixture.wait_till_vm_is_up(),\
            "VM verification in FIP VN failed"
        self.vpc1_vn1_fixture = self.useFixture(
            VPCVNFixture(self.vpc1_fixture,
                         subnet_cidr=self.vpc1_vn1_cidr,
                         connections=self.connections))
        assert self.vpc1_vn1_fixture.verify_on_setup()
        self.vpc1_vn1_vm1_fixture = self.useFixture(
            VPCVMFixture(self.vpc1_vn1_fixture,
                         image_name='ubuntu',
                         connections=self.connections))
        assert self.vpc1_vn1_vm1_fixture.verify_on_setup()
        self.vpc1_vn1_vm1_fixture.c_vm_fixture.wait_till_vm_is_up()
        vm1_fixture = self.vpc1_vn1_vm1_fixture
        assert vm1_fixture.verify_on_setup(), "VPCVMFixture verification failed " \
            "for VM %s" % (vm1_fixture.instance_id)
        assert vm1_fixture.wait_till_vm_is_up(),\
            "VM verification failed"

        (fip, fip_alloc_id) = vpc_fip_fixture.create_and_assoc_fip(
            vm1_fixture.instance_id)
        if fip is None or fip_alloc_id is None:
            self.logger.error('FIP creation and/or association failed! ')
            result = result and False
        if result:
            self.addCleanup(vpc_fip_fixture.disassoc_and_delete_fip,
                            fip_alloc_id, fip)
            assert vpc_fip_fixture.verify_fip(
                fip), " FIP %s, %s verification failed" % (fip, fip_alloc_id)
            assert vm1_fixture.c_vm_fixture.ping_with_certainty(
                fip_vm_fixture.vm_ip), "Ping from FIP IP failed"
            assert fip_vm_fixture.ping_with_certainty(
                fip), "Ping to FIP IP  failed"

        return result
示例#2
0
    def config_basic(self, check_dm):
        #mx config using device manager
        #both dm_mx and use_device_manager knobs are required for DM
        #this check is present in is_test_applicable
        if check_dm:
            if self.inputs.use_devicemanager_for_md5:
                for i in range(len(self.inputs.dm_mx.values())):
                    router_params = self.inputs.dm_mx.values()[i]
                    if router_params['model'] == 'mx':
                        self.phy_router_fixture = self.useFixture(
                            PhysicalRouterFixture(
                                router_params['name'],
                                router_params['control_ip'],
                                model=router_params['model'],
                                vendor=router_params['vendor'],
                                asn=router_params['asn'],
                                ssh_username=router_params['ssh_username'],
                                ssh_password=router_params['ssh_password'],
                                mgmt_ip=router_params['control_ip'],
                                connections=self.connections))
                        physical_dev = self.vnc_lib.physical_router_read(
                            id=self.phy_router_fixture.phy_device.uuid)
                        physical_dev.set_physical_router_management_ip(
                            router_params['mgmt_ip'])
                        physical_dev._pending_field_updates
                        self.vnc_lib.physical_router_update(physical_dev)
        else:
            if self.inputs.ext_routers:
                for i in range(len(
                        self.inputs.physical_routers_data.values())):
                    router_params = self.inputs.physical_routers_data.values(
                    )[i]
                    if router_params['model'] == 'mx':
                        cmd = []
                        cmd.append(
                            'set groups md5_tests routing-options router-id %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests routing-options route-distinguisher-id %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests routing-options autonomous-system %s'
                            % router_params['asn'])
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests type internal'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests multihop'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests local-address %s'
                            % router_params['mgmt_ip'])
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests hold-time 90'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests keep all'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family inet-vpn unicast'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family inet6-vpn unicast'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family evpn signaling'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests family route-target'
                        )
                        cmd.append(
                            'set groups md5_tests protocols bgp group md5_tests local-as %s'
                            % router_params['asn'])
                        for node in self.inputs.bgp_control_ips:
                            cmd.append(
                                'set groups md5_tests protocols bgp group md5_tests neighbor %s peer-as %s'
                                % (node, router_params['asn']))
                        cmd.append('set apply-groups md5_tests')
                        mx_handle = NetconfConnection(
                            host=router_params['mgmt_ip'])
                        mx_handle.connect()
                        cli_output = mx_handle.config(stmts=cmd, timeout=120)

        #ipv6 not supported for vcenter so skipping config
        if self.inputs.orchestrator != 'vcenter':
            vn61_name = "test_vnv6sr"
            vn61_net = ['2001::101:0/120']
            #vn1_fixture = self.config_vn(vn1_name, vn1_net)
            vn61_fixture = self.useFixture(
                VNFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_name=vn61_name,
                          inputs=self.inputs,
                          subnets=vn61_net))
            vn62_name = "test_vnv6dn"
            vn62_net = ['2001::201:0/120']
            #vn2_fixture = self.config_vn(vn2_name, vn2_net)
            vn62_fixture = self.useFixture(
                VNFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_name=vn62_name,
                          inputs=self.inputs,
                          subnets=vn62_net))
            vm61_name = 'source_vm'
            vm62_name = 'dest_vm'
            #vm1_fixture = self.config_vm(vn1_fixture, vm1_name)
            #vm2_fixture = self.config_vm(vn2_fixture, vm2_name)
            vm61_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn61_fixture.obj,
                          vm_name=vm61_name,
                          node_name=None,
                          image_name='cirros',
                          flavor='m1.tiny'))

            vm62_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn62_fixture.obj,
                          vm_name=vm62_name,
                          node_name=None,
                          image_name='cirros',
                          flavor='m1.tiny'))
            vm61_fixture.wait_till_vm_is_up()
            vm62_fixture.wait_till_vm_is_up()

            rule = [
                {
                    'direction': '<>',
                    'protocol': 'any',
                    'source_network': vn61_name,
                    'src_ports': [0, -1],
                    'dest_network': vn62_name,
                    'dst_ports': [0, -1],
                    'simple_action': 'pass',
                },
            ]
            policy_name = 'allow_all'
            policy_fixture = self.config_policy(policy_name, rule)

            vn61_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                       vn61_fixture)
            vn62_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                       vn62_fixture)

        vn1 = "vn1"
        vn2 = "vn2"
        vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']}
        rules = [
            {
                'direction': '<>',
                'protocol': 'any',
                'source_network': vn1,
                'src_ports': [0, -1],
                'dest_network': vn2,
                'dst_ports': [0, -1],
                'simple_action': 'pass',
            },
        ]

        self.logger.info("Configure the policy with allow any")
        self.multi_vn_fixture = self.useFixture(
            MultipleVNFixture(connections=self.connections,
                              inputs=self.inputs,
                              subnet_count=2,
                              vn_name_net=vn_s,
                              project_name=self.inputs.project_name))
        vns = self.multi_vn_fixture.get_all_fixture_obj()
        (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0]
        (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1]
        self.config_policy_and_attach_to_vn(rules)

        self.multi_vm_fixture = self.useFixture(
            MultipleVMFixture(project_name=self.inputs.project_name,
                              connections=self.connections,
                              vm_count_per_vn=1,
                              vn_objs=vns,
                              image_name='cirros',
                              flavor='m1.tiny'))
        vms = self.multi_vm_fixture.get_all_fixture()
        (self.vm1_name, self.vm1_fix) = vms[0]
        (self.vm2_name, self.vm2_fix) = vms[1]
示例#3
0
    def test_sec_group_basic(self):
        """
        Description: Test basic SG features
            1. Security group create and delete
            2. Create security group with custom rules and then update it for tcp
            3. Launch VM with custom created security group and verify
            4. Remove secuity group association with VM
            5. Add back custom security group to VM and verify
            6. Try to delete security group with association to VM. It should fail.
            7. Test with ping, which should fail
            8. Test with TCP which should pass
            9. Update the rules to allow icmp, ping should pass now.
        """
        secgrp_name = get_random_name('test_sec_group')
        (prefix,
         prefix_len) = get_random_cidrs(self.inputs.get_af())[0].split('/')
        prefix_len = int(prefix_len)
        rule = [{
            'direction':
            '>',
            'protocol':
            'udp',
            'dst_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 8000,
                'end_port': 8000
            }],
            'src_ports': [{
                'start_port': 9000,
                'end_port': 9000
            }],
            'src_addresses': [{
                'security_group': 'local'
            }],
        }]
        #Create the SG
        sg_fixture = self.config_sec_group(name=secgrp_name, entries=rule)
        #Delete the SG
        self.delete_sec_group(sg_fixture)
        #Create SG again and update the rules
        sg_fixture = self.config_sec_group(name=secgrp_name, entries=rule)
        secgrp_id = sg_fixture.secgrp_id
        vn_net = get_random_cidrs(self.inputs.get_af())
        (prefix, prefix_len) = vn_net[0].split('/')
        rule = [{
            'protocol':
            'tcp',
            'dst_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_addresses': [{
                'security_group': 'local'
            }],
        }, {
            'protocol':
            'tcp',
            'src_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'dst_addresses': [{
                'security_group': 'local'
            }],
        }]
        #Update the rules
        sg_fixture.replace_rules(rule)
        #Create VN and VMs
        vn_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      subnets=vn_net))
        assert vn_fixture.verify_on_setup()
        img_name = self.inputs.get_ci_image() or 'ubuntu-traffic'

        #cannot use set_security_group in vro ,so remove default sg before adding new sg
        if self.inputs.vro_based:
            vm1_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn_fixture.obj,
                          image_name=img_name,
                          flavor='contrail_flavor_small'))
            vm2_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn_fixture.obj,
                          image_name=img_name,
                          flavor='contrail_flavor_small'))
            assert vm1_fixture.verify_on_setup()
            assert vm1_fixture.wait_till_vm_is_up()
            assert vm2_fixture.verify_on_setup()
            assert vm2_fixture.wait_till_vm_is_up()
            vm1_fixture.remove_security_group(secgrp='default')
            vm2_fixture.remove_security_group(secgrp='default')
            vm1_fixture.add_security_group(secgrp_id)
            vm2_fixture.add_security_group(secgrp_id)
        else:
            vm1_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn_fixture.obj,
                          image_name=img_name,
                          flavor='contrail_flavor_small',
                          sg_ids=[secgrp_id]))
            vm2_fixture = self.useFixture(
                VMFixture(project_name=self.inputs.project_name,
                          connections=self.connections,
                          vn_obj=vn_fixture.obj,
                          image_name=img_name,
                          flavor='contrail_flavor_small',
                          sg_ids=[secgrp_id]))
            assert vm1_fixture.verify_on_setup()
            assert vm1_fixture.wait_till_vm_is_up()
            assert vm2_fixture.verify_on_setup()
            assert vm2_fixture.wait_till_vm_is_up()

        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        assert result, msg

        #Remove secuity group association with VM and verify
        self.logger.info("Remove security group %s from VM %s", secgrp_name,
                         vm1_fixture.vm_name)
        vm1_fixture.remove_security_group(secgrp=secgrp_id)
        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        if result:
            assert False, "Security group %s is not removed from VM %s" % (
                secgrp_name, vm1_fixture.vm_name)
        #Add back security group to VM and verify
        vm1_fixture.add_security_group(secgrp=secgrp_id)
        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        assert result, msg
        #Try to delete security group with back ref
        self.logger.info("Try deleting the security group %s with back ref.",
                         secgrp_name)
        try:
            if sg_fixture.option == 'openstack':
                sg_fixture.quantum_h.delete_security_group(
                    sg_fixture.secgrp_id)
            else:
                sg_fixture.cleanUp()
        except Exception as msg:
            self.logger.info(msg)
            self.logger.info(
                "Not able to delete the security group with back ref as expected"
            )
        else:
            try:
                secgroup = self.vnc_lib.security_group_read(
                    fq_name=sg_fixture.secgrp_fq_name)
                self.logger.info(
                    "Not able to delete the security group with back ref as expected"
                )
            except NoIdError:
                errmsg = "Security group deleted, when it is attached to a VM."
                self.logger.error(errmsg)
                assert False, errmsg

        #Ping test, should fail
        assert vm1_fixture.ping_with_certainty(ip=vm2_fixture.vm_ip,
                                               expectation=False)
        self.logger.info("Ping FAILED as expected")

        #TCP test, should pass
        nc_options = '' if self.inputs.get_af() == 'v4' else '-6'
        assert vm1_fixture.nc_file_transfer(vm2_fixture, nc_options=nc_options)

        proto = '1' if self.inputs.get_af() == 'v4' else '58'
        rule = [{
            'protocol':
            proto,
            'dst_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_addresses': [{
                'security_group': 'local'
            }],
        }, {
            'protocol':
            proto,
            'src_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'dst_addresses': [{
                'security_group': 'local'
            }],
        }]
        #Update the rules
        sg_fixture.replace_rules(rule)

        #Ping should pass now
        assert vm1_fixture.ping_with_certainty(ip=vm2_fixture.vm_ip,
                                               expectation=True)
示例#4
0
    def config_v2_svc_chain(self, stack_name):
        template = self.get_template(template_name=stack_name)
        env = self.get_env(env_name=stack_name)
        stack_name = get_random_name(stack_name)
        self.nova_h.get_image(env['parameters']['image'])
        self.nova_h.get_flavor(env['parameters']['flavor'])
        for k in [
                'service_template_properties_image_name',
                'service_template1_properties_image_name',
                'service_template2_properties_image_name', 'svm1_image'
        ]:
            if k in env['parameters']:
                self.nova_h.get_image(env['parameters'][k])
        svc_pt_hs = self.config_heat_obj(stack_name, template, env)
        stack = svc_pt_hs.heat_client_obj
        op = stack.stacks.get(stack_name).outputs
        time.sleep(5)
        for output in op:
            if output['output_key'] == 'left_VM_ID':
                left_vm_id = output['output_value']
            elif output['output_key'] == 'left_VM1_ID':
                left_vm1_id = output['output_value']
            elif output['output_key'] == 'left_VM2_ID':
                left_vm2_id = output['output_value']
            elif output['output_key'] == 'right_VM_ID':
                right_vm_id = output['output_value']
            elif output['output_key'] == 'right_VM1_ID':
                right_vm1_id = output['output_value']
            elif output['output_key'] == 'right_VM2_ID':
                right_vm2_id = output['output_value']
            elif output['output_key'] == 'left_vn_FQDN':
                left_vn_fqdn = output['output_value']
            elif output['output_key'] == 'right_vn_FQDN':
                right_vn_fqdn = output['output_value']
            elif output['output_key'] == 'si_fqdn':
                si_fqdn = output['output_value']
            elif output['output_key'] == 'si2_fqdn':
                si2_fqdn = output['output_value']
                si2_fqdn = ":".join(si2_fqdn)
            elif output['output_key'] == 'left_VM1_IP_ADDRESS':
                left_vm1_ip_address = output['output_value']
                network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix = left_vm1_ip_address
                network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len = "32"
            elif output['output_key'] == 'right_VM1_IP_ADDRESS':
                right_vm1_ip_address = output['output_value']
                network_policy_entries_policy_rule_dst_addresses_subnet_ip_prefix = right_vm1_ip_address
                network_policy_entries_policy_rule_dst_addresses_subnet_ip_prefix_len = "32"

        #Update the policy
        si_fqdn = ":".join(si_fqdn)
        left_vn_fqdn = ":".join(left_vn_fqdn)
        right_vn_fqdn = ":".join(right_vn_fqdn)
        if 'multi' in stack_name:
            self.update_stack(
                svc_pt_hs,
                change_sets=[['left_vn_fqdn', left_vn_fqdn],
                             ['right_vn_fqdn', right_vn_fqdn],
                             ['service_instance1_fq_name', si_fqdn],
                             ['service_instance2_fq_name', si2_fqdn]])
        else:
            if 'cidr' in stack_name:
                if 'src_cidr' in stack_name:
                    self.update_stack(
                        svc_pt_hs,
                        change_sets=
                        [['left_vn_fqdn', left_vn_fqdn],
                         ['right_vn_fqdn', right_vn_fqdn],
                         ['service_instance_fq_name', si_fqdn],
                         [
                             'network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix',
                             network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix
                         ],
                         [
                             'network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len',
                             network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len
                         ]])
            else:
                self.update_stack(
                    svc_pt_hs,
                    change_sets=[['left_vn_fqdn', left_vn_fqdn],
                                 ['right_vn_fqdn', right_vn_fqdn],
                                 ['service_instance_fq_name', si_fqdn]])
        if 'cidr' in stack_name:
            if 'src_cidr' in stack_name:
                # 2 VMs in the left_vn
                left_vm1 = VMFixture(connections=self.connections,
                                     uuid=left_vm1_id,
                                     image_name='cirros')
                left_vm1.read()
                left_vm1.verify_on_setup()

                left_vm2 = VMFixture(connections=self.connections,
                                     uuid=left_vm2_id,
                                     image_name='cirros')
                left_vm2.read()
                left_vm2.verify_on_setup()

                # One VM in the right_vn
                right_vm = VMFixture(connections=self.connections,
                                     uuid=right_vm_id,
                                     image_name='cirros')
                right_vm.read()
                right_vm.verify_on_setup()

                # Ping from left_vm1 to right_vm should pass
                assert left_vm1.ping_with_certainty(right_vm.vm_ip,
                                                    expectation=True)

                # Ping from left_vm2 to right_vm should fail
                assert left_vm2.ping_with_certainty(right_vm.vm_ip,
                                                    expectation=False)
        else:
            left_vm = VMFixture(connections=self.connections,
                                uuid=left_vm_id,
                                image_name='cirros')
            left_vm.read()
            left_vm.verify_on_setup()
            right_vm = VMFixture(connections=self.connections,
                                 uuid=right_vm_id,
                                 image_name='cirros')
            right_vm.read()
            right_vm.verify_on_setup()
            assert left_vm.ping_with_certainty(right_vm.vm_ip,
                                               expectation=True)
示例#5
0
    def config_basic(self):
        vn61_name = "test_vnv6sr"
        vn61_net = ['2001::101:0/120']
        vn61_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn61_name,
                      inputs=self.inputs,
                      subnets=vn61_net))
        vn62_name = "test_vnv6dn"
        vn62_net = ['2001::201:0/120']
        vn62_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn62_name,
                      inputs=self.inputs,
                      subnets=vn62_net))
        vm61_name = 'source_vm'
        vm62_name = 'dest_vm'
        vm61_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn61_fixture.obj,
                      vm_name=vm61_name,
                      node_name=None,
                      image_name='cirros',
                      flavor='m1.tiny'))

        vm62_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn62_fixture.obj,
                      vm_name=vm62_name,
                      node_name=None,
                      image_name='cirros',
                      flavor='m1.tiny'))
        vm61_fixture.wait_till_vm_is_up()
        vm62_fixture.wait_till_vm_is_up()

        rule = [
            {
                'direction': '<>',
                'protocol': 'any',
                'source_network': vn61_name,
                'src_ports': [0, -1],
                'dest_network': vn62_name,
                'dst_ports': [0, -1],
                'simple_action': 'pass',
            },
        ]
        policy_name = 'allow_all'
        policy_fixture = self.config_policy(policy_name, rule)

        vn61_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                   vn61_fixture)
        vn62_policy_fix = self.attach_policy_to_vn(policy_fixture,
                                                   vn62_fixture)

        vn1 = "vn1"
        vn2 = "vn2"
        vn_s = {'vn1': '10.1.1.0/24', 'vn2': ['20.1.1.0/24']}
        rules = [
            {
                'direction': '<>',
                'protocol': 'any',
                'source_network': vn1,
                'src_ports': [0, -1],
                'dest_network': vn2,
                'dst_ports': [0, -1],
                'simple_action': 'pass',
            },
        ]

        self.logger.info("Configure the policy with allow any")
        self.multi_vn_fixture = self.useFixture(
            MultipleVNFixture(connections=self.connections,
                              inputs=self.inputs,
                              subnet_count=2,
                              vn_name_net=vn_s,
                              project_name=self.inputs.project_name))
        vns = self.multi_vn_fixture.get_all_fixture_obj()
        (self.vn1_name, self.vn1_fix) = self.multi_vn_fixture._vn_fixtures[0]
        (self.vn2_name, self.vn2_fix) = self.multi_vn_fixture._vn_fixtures[1]
        self.config_policy_and_attach_to_vn(rules)

        self.multi_vm_fixture = self.useFixture(
            MultipleVMFixture(project_name=self.inputs.project_name,
                              connections=self.connections,
                              vm_count_per_vn=1,
                              vn_objs=vns,
                              image_name='cirros',
                              flavor='m1.tiny'))
        vms = self.multi_vm_fixture.get_all_fixture()
        (self.vm1_name, self.vm1_fix) = vms[0]
        (self.vm2_name, self.vm2_fix) = vms[1]
示例#6
0
    def setup_common_objects(self, inputs, connections):
        self.inputs = inputs
        self.connections = connections
        self.base_rel = get_release() 
        (self.vn11_name, self.vn11_subnets) = ("vn11", ["192.168.1.0/24"])
        (self.vn22_name, self.vn22_subnets) = ("vn22", ["192.168.2.0/24"])
        (self.fip_vn_name, self.fip_vn_subnets) = ("fip_vn", ['200.1.1.0/24'])
        (self.vn11_vm1_name, self.vn11_vm2_name, self.vn11_vm3_name,
         self.vn11_vm4_name) = ('vn11_vm1', 'vn11_vm2', 'vn11_vm3', 'vn11_vm4')
        self.vn22_vm1_name = 'vn22_vm1'
        self.vn22_vm2_name = 'vn22_vm2'
        self.fvn_vm1_name = 'fvn_vm1' 
        
        # Configure 3 VNs, 2 of them vn11, vn22 and 1 fip_vn
        self.vn11_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections, inputs=self.inputs, vn_name=self.vn11_name, subnets=self.vn11_subnets))
        assert self.vn11_fixture.verify_on_setup()
        self.vn22_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections, inputs=self.inputs, vn_name=self.vn22_name, subnets=self.vn22_subnets))
        self.fvn_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name, connections=self.connections,
                inputs=self.inputs, vn_name=self.fip_vn_name, subnets=self.fip_vn_subnets))

        # Configure 4 VMs in VN11, 2 VM in VN22, and 1 VM in FVN
        self.vn11_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm1_name, image_name='ubuntu'))
        self.vn11_vm2_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm2_name, image_name='ubuntu'))
        self.vn11_vm3_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm3_name, image_name='ubuntu'))
        self.vn11_vm4_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn11_fixture.obj, vm_name=self.vn11_vm4_name, image_name='ubuntu'))
        self.vn22_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn22_fixture.obj, vm_name=self.vn22_vm1_name, image_name='ubuntu'))
        self.vn22_vm2_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.vn22_fixture.obj, vm_name=self.vn22_vm2_name, image_name='ubuntu'))
        self.fvn_vm1_fixture = self.useFixture(VMFixture(
            project_name=self.inputs.project_name, connections=self.connections, vn_obj=self.fvn_fixture.obj, vm_name=self.fvn_vm1_name, image_name='ubuntu'))

        # Adding Policy between vn11 and vn22  ######
        assert self.vn11_fixture.verify_on_setup()
        assert self.vn22_fixture.verify_on_setup()
        rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'any', 'src_ports': 'any',
                'dst_ports': 'any',
                'source_network': 'any',
                'dest_network': 'any',
            },
        ]
        policy_name = 'p1'
        self.policy_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name, rules_list=rules, inputs=self.inputs,
                connections=self.connections))

        policy_fq_name = [self.policy_fixture.policy_fq_name]
        self.vn11_fixture.bind_policies(
            policy_fq_name, self.vn11_fixture.vn_id)
        self.addCleanup(self.vn11_fixture.unbind_policies,
                        self.vn11_fixture.vn_id, [self.policy_fixture.policy_fq_name])
        self.vn22_fixture.bind_policies(
            policy_fq_name, self.vn22_fixture.vn_id)
        self.addCleanup(self.vn22_fixture.unbind_policies,
                        self.vn22_fixture.vn_id, [self.policy_fixture.policy_fq_name])

        # Adding Floating ip ###

        assert self.fvn_fixture.verify_on_setup()

        fip_pool_name = 'some-pool1'
        self.fip_fixture = self.useFixture(
            FloatingIPFixture(
                project_name=self.inputs.project_name, inputs=self.inputs,
                connections=self.connections, pool_name=fip_pool_name, vn_id=self.fvn_fixture.vn_id))

        self.vn11_vm1_fixture.verify_on_setup()
        self.vn11_vm1_fixture.wait_till_vm_is_up()
        self.fip_id = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn11_vm1_fixture.vm_id)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id)
        assert self.fip_fixture.verify_fip(
            self.fip_id, self.vn11_vm1_fixture, self.fvn_fixture)

        self.vn22_vm1_fixture.verify_on_setup()
        self.vn22_vm1_fixture.wait_till_vm_is_up()
        self.fip_id1 = self.fip_fixture.create_and_assoc_fip(
            self.fvn_fixture.vn_id, self.vn22_vm1_fixture.vm_id)
        assert self.fip_fixture.verify_fip(
            self.fip_id1, self.vn22_vm1_fixture, self.fvn_fixture)
        self.addCleanup(self.fip_fixture.disassoc_and_delete_fip, self.fip_id1)

        # Adding  the service chaining resources for firewall  ###
        si_count = 1
        svc_scaling = False
        max_inst = 1
        svc_mode = 'in-network'
        flavor = 'm1.medium'
        self.vn1_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn1"
        self.vn1_name = "in_network_vn1"
        self.vn1_subnets = ['10.1.1.0/24']
        self.vm1_name = 'in_network_vm1'
        self.vn2_fq_name = "default-domain:" + self.inputs.project_name + ":in_network_vn2"
        self.vn2_name = "in_network_vn2"
        self.vn2_subnets = ['20.2.2.0/24']
        self.vm2_name = 'in_network_vm2'

        self.action_list = []
        self.if_list = [['management', False], ['left', True], ['right', True]]
        self.st_name = 'in_net_svc_template_1'
        si_prefix = 'in_net_svc_instance_'
        self.policy_name = 'policy_in_network'

        self.vn1_fixture = self.config_vn(self.vn1_name, self.vn1_subnets)
        self.vn2_fixture = self.config_vn(self.vn2_name, self.vn2_subnets)
        self.st_fixture, self.si_fixtures = self.config_st_si(
            self.st_name, si_prefix, si_count, svc_scaling, max_inst,
            project=self.inputs.project_name, left_vn_fixture=self.vn1_fixture,
            right_vn_fixture=self.vn2_fixture, svc_mode=svc_mode, flavor=flavor)
        self.action_list = self.chain_si(si_count, si_prefix, self.inputs.project_name)
        self.rules = [
            {
                'direction': '<>',
                'protocol': 'any',
                'source_network': self.vn1_name,
                'src_ports': [0, -1],
                'dest_network': self.vn2_name,
                'dst_ports': [0, -1],
                'simple_action': None,
                'action_list': {'apply_service': self.action_list}
            },
        ]
        self.policy_fixtures = self.config_policy(self.policy_name, self.rules)

        self.vn1_policy_fix = self.attach_policy_to_vn(
            self.policy_fixtures, self.vn1_fixture)
        self.vn2_policy_fix = self.attach_policy_to_vn(
            self.policy_fixtures, self.vn2_fixture)
        self.vm1_fixture = self.config_vm(vn_fix=self.vn1_fixture, vm_name=self.vm1_name)
        self.vm2_fixture = self.config_vm(vn_fix=self.vn1_fixture, vm_name=self.vm1_name)
        #self.vm1_fixture.verify_on_setup()
        #self.vm2_fixture.verify_on_setup()
        self.vm1_fixture.wait_till_vm_is_up()
        self.vm2_fixture.wait_till_vm_is_up()

        result, msg = self.validate_vn(self.vn1_name, project_name=self.inputs.project_name)
        assert result, msg
        result, msg = self.validate_vn(self.vn2_name, project_name=self.inputs.project_name)
        assert result, msg

        # non-admin tenant config
        result = True
        msg = []
        self.topo_obj = sdn_topo_with_multi_project()
        self.setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, self.topo_obj))
        out = self.setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result'] == True:
            self.topo_objs, self.config_topo, vm_fip_info = out['data']
示例#7
0
    def test_policy_protocol_summary(self):
        ''' Test to validate that when policy is created with multiple rules that can be summarized by protocol

        '''
        proj_name = self.inputs.project_name
        vn1_name = 'vn40'
        vn1_subnets = ['10.1.1.0/24']
        policy1_name = 'policy1'
        policy2_name = 'policy2'

        rules2 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
        ]
        rules1 = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'any',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
        ]
        policy1_fixture = self.useFixture(
            PolicyFixture(policy_name=policy1_name,
                          rules_list=rules1,
                          inputs=self.inputs,
                          connections=self.connections))
        policy2_fixture = self.useFixture(
            PolicyFixture(policy_name=policy2_name,
                          rules_list=rules2,
                          inputs=self.inputs,
                          connections=self.connections))

        vn1_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_name=vn1_name,
                      inputs=self.inputs,
                      subnets=vn1_subnets,
                      policy_objs=[policy1_fixture.policy_obj]))
        assert vn1_fixture.verify_on_setup()

        vn1_vm1_name = 'vm1'
        vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn1_fixture.obj,
                      vm_name=vn1_vm1_name))
        assert vm1_fixture.verify_on_setup()

        inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
        vn_fq_name = inspect_h.get_vna_vn(domain='default-domain',
                                          project=proj_name,
                                          vn_name=vn1_name)['name']

        vna_acl1 = inspect_h.get_vna_acl_by_vn(vn_fq_name)

        policy1_fixture.verify_policy_in_api_server()

        if vn1_fixture.policy_objs:
            policy_fq_names = [
                self.quantum_h.get_policy_fq_name(x)
                for x in vn1_fixture.policy_objs
            ]

        policy_fq_name2 = self.quantum_h.get_policy_fq_name(
            policy2_fixture.policy_obj)
        policy_fq_names.append(policy_fq_name2)
        vn1_fixture.bind_policies(policy_fq_names, vn1_fixture.vn_id)

        vna_acl2 = inspect_h.get_vna_acl_by_vn(vn_fq_name)
        out = policy_test_utils.compare_args('policy_rules',
                                             vna_acl1['entries'],
                                             vna_acl2['entries'])

        if out:
            self.logger.info(
                "policy rules are not matching with expected %s  and actual %s"
                % (vna_acl1['entries'], vna_acl2['entries']))
            self.assertIsNone(out, "policy compare failed")

        return True
示例#8
0
 def test_policy_with_multi_vn_in_vm(self):
     ''' Test to validate policy action in VM with vnic's in  multiple VN's with different policies.
     Test flow: vm1 in vn1 and vn2; vm3 in vn3. policy to allow traffic from vn2 to vn3 and deny from vn1 to vn3.
     Default route for vm1 in vn1, which has no reachability to vn3 - verify traffic - should fail.
     Add specific route to direct vn3 traffic through vn2 - verify traffic - should pass.
     '''
     vm1_name = 'vm_mine1'
     vm2_name = 'vm_mine2'
     vn1_name = 'vn221'
     vn1_subnets = ['11.1.1.0/24']
     vn2_name = 'vn222'
     vn2_subnets = ['22.1.1.0/24']
     vn3_gateway = '22.1.1.254'
     vn3_name = 'vn223'
     vn3_subnets = ['33.1.1.0/24']
     rules1 = [
         {
             'direction': '>',
             'simple_action': 'deny',
             'protocol': 'icmp',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     rules2 = [
         {
             'direction': '<>',
             'simple_action': 'pass',
             'protocol': 'any',
             'src_ports': 'any',
             'dst_ports': 'any',
             'source_network': 'any',
             'dest_network': 'any',
         },
     ]
     policy1_name = 'p1'
     policy2_name = 'p2'
     policy1_fixture = self.useFixture(
         PolicyFixture(policy_name=policy1_name,
                       rules_list=rules1,
                       inputs=self.inputs,
                       connections=self.connections))
     policy2_fixture = self.useFixture(
         PolicyFixture(policy_name=policy2_name,
                       rules_list=rules2,
                       inputs=self.inputs,
                       connections=self.connections))
     vn1_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn1_name,
                   inputs=self.inputs,
                   subnets=vn1_subnets,
                   policy_objs=[policy1_fixture.policy_obj]))
     vn2_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn2_name,
                   inputs=self.inputs,
                   subnets=vn2_subnets,
                   disable_gateway=True,
                   policy_objs=[policy2_fixture.policy_obj]))
     vn3_fixture = self.useFixture(
         VNFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_name=vn3_name,
                   inputs=self.inputs,
                   subnets=vn3_subnets,
                   policy_objs=[policy2_fixture.policy_obj]))
     assert vn1_fixture.verify_on_setup()
     assert vn2_fixture.verify_on_setup()
     assert vn3_fixture.verify_on_setup()
     assert vn1_fixture.verify_vn_policy_in_api_server()
     assert vn2_fixture.verify_vn_policy_in_api_server()
     assert vn3_fixture.verify_vn_policy_in_api_server()
     vm1_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn1_fixture.obj, vn2_fixture.obj],
                   vm_name=vm1_name,
                   project_name=self.inputs.project_name))
     vm2_fixture = self.useFixture(
         VMFixture(connections=self.connections,
                   vn_objs=[vn3_fixture.obj],
                   vm_name=vm2_name,
                   project_name=self.inputs.project_name))
     assert vm1_fixture.verify_on_setup()
     assert vm2_fixture.verify_on_setup()
     self.nova_h.wait_till_vm_is_up(vm1_fixture.vm_obj)
     self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
     # For multi-vn vm, configure ip address for 2nd interface
     multivn_vm_ip_list = vm1_fixture.vm_ips
     interfaces = vm1_fixture.get_vm_interface_list()
     interface1 = vm1_fixture.get_vm_interface_list(
         ip=multivn_vm_ip_list[0])[0]
     interfaces.remove(interface1)
     interface2 = interfaces[0]
     if 'dual' == self.inputs.get_af():
         intf_conf_cmd = "ifconfig %s inet6 add %s" % (
             interface2, multivn_vm_ip_list[3])
     else:
         intf_conf_cmd = "ifconfig %s %s netmask 255.255.255.0" % (
             interface2, multivn_vm_ip_list[1])
     vm_cmds = (intf_conf_cmd, 'ifconfig -a')
     for cmd in vm_cmds:
         cmd_to_output = [cmd]
         vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
         output = vm1_fixture.return_output_cmd_dict[cmd]
     for ip in multivn_vm_ip_list:
         if ip not in output:
             self.logger.error("IP %s not assigned to any eth intf of %s" %
                               (ip, vm1_fixture.vm_name))
             assert False
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw of VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with no allow rule in the VN where default gw is part of, traffic should fail"
     )
     result = vm1_fixture.ping_with_certainty(expectation=False,
                                              dst_vm_fixture=vm2_fixture)
     assertEqual(result, True, "ping passed which is not expected")
     # Configure VM to reroute traffic to interface belonging to different
     # VN
     self.logger.info(
         "Direct traffic to gw which is part of VN with allow policy to destination VN, traffic should pass now"
     )
     cmd_to_output = []
     if 'dual' == self.inputs.get_af():
         cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % (
             vn3_subnets[0].split('/')[0], multivn_vm_ip_list[2],
             interface2)
         cmd_to_output.append(' ip -6 route add %s dev %s' %
                              (vn3_subnets[1], interface2))
     else:
         cmd = ' route add -net %s netmask 255.255.255.0 gw %s dev %s' % (
             vn3_subnets[0].split('/')[0], multivn_vm_ip_list[1],
             interface2)
     cmd_to_output.append(cmd)
     vm1_fixture.run_cmd_on_vm(cmds=cmd_to_output, as_sudo=True)
     output = vm1_fixture.return_output_cmd_dict[cmd]
     # Ping test from multi-vn vm to peer vn, result will be based on action
     # defined in policy attached to VN which has the default gw for VM
     self.logger.info(
         "Ping from multi-vn vm to vm2, with allow rule in the VN where network gw is part of, traffic should pass"
     )
     result = vm1_fixture.ping_with_certainty(expectation=True,
                                              dst_vm_fixture=vm2_fixture)
     assertEqual(result, True, "ping failed which is not expected")
     return True
示例#9
0
文件: config.py 项目: nuthanc/tf-test
 def config_vm(self, vn_fix, vm_name, node_name=None, image_name='ubuntu-netperf', flavor='contrail_flavor_large'):
     vm_fixture = self.useFixture(VMFixture(
                  project_name=self.inputs.project_name, connections=self.connections,
                  vn_obj=vn_fix.obj, vm_name=vm_name, node_name=node_name, image_name=image_name, flavor=flavor))
     return vm_fixture
示例#10
0
if __name__ == "__main__":
    import sys
    from vn_test import VNFixture
    from vm_test import VMFixture
    #    sys.settrace(tracefunc)
    #    obj = LBaasFixture(api_type='neutron', name='LB', connections=setup_test_infra(), network_id='4b39a2bd-4528-40e8-b848-28084e59c944', members={'vms': ['a72ad607-f1ca-44f2-b31e-e825a3f2d408'], 'address': ['192.168.1.10']}, vip_net_id='4b39a2bd-4528-40e8-b848-28084e59c944', protocol='TCP', port='22', healthmonitors=[{'delay':5, 'timeout':5, 'max_retries':5, 'probe_type':'PING'}])
    conn = setup_test_infra()
    vnfix = VNFixture(connections=conn)
    vnfix.setUp()
    vip_fix = VNFixture(connections=conn)
    vip_fix.setUp()
    fip_fix = VNFixture(connections=conn, router_external=True)
    fip_fix.setUp()
    subnet = vnfix.get_cidrs()[0]
    vm_fix = VMFixture(connections=conn, vn_obj=vnfix.obj)
    vm_fix.setUp()
    obj = LBaasFixture(api_type='neutron',
                       name='LB',
                       connections=conn,
                       network_id=vnfix.uuid,
                       members={
                           'address': [get_random_ip(subnet)],
                           'vms': [vm_fix.vm_id]
                       },
                       vip_net_id=vip_fix.uuid,
                       fip_net_id=fip_fix.uuid,
                       protocol='TCP',
                       port='22',
                       healthmonitors=[{
                           'delay': 5,
示例#11
0
    def test_config_add_change_while_control_nodes_go_down(self):
        """Tests related to configuration add, change, and delete while switching from normal mode
           to headless and back i.e. control nodes go down and come online."""

        if len(self.inputs.compute_ips) < 2:
            raise unittest.SkipTest("This test needs atleast 2 compute nodes.")
        else:
            self.logger.info(
                "Required resources are in place to run the test.")

        result = True
        topology_class_name = None

        self.compute_fixture_dict = {}
        for each_compute in self.inputs.compute_ips:
            self.compute_fixture_dict[each_compute] = self.useFixture(
                ComputeNodeFixture(connections=self.connections,
                                   node_ip=each_compute,
                                   username=self.inputs.username,
                                   password=self.inputs.password))
            mode = self.compute_fixture_dict[
                each_compute].get_agent_headless_mode()
            if mode is False:
                self.compute_fixture_dict[
                    each_compute].set_agent_headless_mode()
        #
        # Get config for test from topology
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = test_headless_vrouter_topo.sdn_headless_vrouter_topo

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))
        #
        # Create a list of compute node IP's and pass it to topo if you want to pin
        # a vm to a particular node
        topo_obj = topology_class_name(
            compute_node_list=self.inputs.compute_ips)
        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo = {}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(
            sdnTopoSetupFixture(self.connections, topo_obj))
        out = setup_obj.sdn_topo_setup()
        self.assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo_objs, config_topo, vm_fip_info = out['data']

        # Start Test
        proj = config_topo.keys()
        vms = config_topo[proj[0]]['vm'].keys()
        src_vm = config_topo[proj[0]]['vm'][vms[0]]
        dest_vm = config_topo[proj[0]]['vm'][vms[1]]
        flow_cache_timeout = 180

        # Setup Traffic.
        stream = Stream(protocol="ip",
                        proto="icmp",
                        src=src_vm.vm_ip,
                        dst=dest_vm.vm_ip)
        profile = ContinuousProfile(stream=stream, count=0, capfilter="icmp")

        tx_vm_node_ip = src_vm.vm_node_ip
        rx_vm_node_ip = dest_vm.vm_node_ip

        tx_local_host = Host(tx_vm_node_ip, self.inputs.username,
                             self.inputs.password)
        rx_local_host = Host(rx_vm_node_ip, self.inputs.username,
                             self.inputs.password)

        send_host = Host(src_vm.local_ip, src_vm.vm_username,
                         src_vm.vm_password)
        recv_host = Host(dest_vm.local_ip, dest_vm.vm_username,
                         dest_vm.vm_password)

        sender = Sender("icmp", profile, tx_local_host, send_host,
                        self.inputs.logger)
        receiver = Receiver("icmp", profile, rx_local_host, recv_host,
                            self.inputs.logger)

        receiver.start()
        sender.start()
        self.logger.info("Waiting for 5 sec for traffic to be setup ...")
        time.sleep(5)

        #self.start_ping(src_vm, dest_vm)

        flow_index_list = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        headless_vr_utils.stop_all_control_services(self)
        self.addCleanup(self.inputs.start_service,
                        'supervisor-control',
                        self.inputs.bgp_ips,
                        container='controller')
        time.sleep(10)
        headless_vr_utils.check_through_tcpdump(self, dest_vm, src_vm)

        flow_index_list2 = headless_vr_utils.get_flow_index_list(
            self, src_vm, dest_vm)

        if set(flow_index_list) == set(flow_index_list2):
            self.logger.info("Flow indexes have not changed.")
        else:
            self.logger.error(
                "Flow indexes have changed. Test Failed, Exiting")
            return False

        receiver.stop()
        sender.stop()
        project1_instance = config_topo['project1']['project']['project1']
        project1_instance.get_project_connections()
        vnet2_instance = config_topo['project1']['vn']['vnet2']
        # add VM to existing VN
        VM22_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=vnet2_instance.obj,
                vm_name='VM22',
                project_name=project1_instance.project_name))

        # create new IPAM
        ipam3_obj = self.useFixture(
            IPAMFixture(project_obj=project1_instance, name='ipam3'))
        ipam4_obj = self.useFixture(
            IPAMFixture(project_obj=project1_instance, name='ipam4'))

        # create new VN
        VN3_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections['juniper'],
                vn_name='VN3',
                inputs=project1_instance.inputs,
                subnets=['10.3.1.0/24'],
                ipam_fq_name=ipam3_obj.fq_name))

        VN4_fixture = self.useFixture(
            VNFixture(
                project_name=project1_instance.project_name,
                connections=project1_instance.project_connections['juniper'],
                vn_name='VN4',
                inputs=project1_instance.inputs,
                subnets=['10.4.1.0/24'],
                ipam_fq_name=ipam4_obj.fq_name))

        # create policy
        policy_name = 'policy34'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_network': VN4_fixture.vn_fq_name,
            'source_network': VN3_fixture.vn_fq_name,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'icmp',
            'dest_network': VN3_fixture.vn_fq_name,
            'source_network': VN4_fixture.vn_fq_name,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy34_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name,
                rules_list=rules,
                inputs=project1_instance.inputs,
                connections=project1_instance.project_connections['juniper'],
                project_fixture=project1_instance))

        # create VN to policy mapping in a dict of policy list.
        vn_policys = {
            VN3_fixture.vn_name: [policy_name],
            VN4_fixture.vn_name: [policy_name]
        }

        # create a policy object list of policies to be attached to a vm
        policy_obj_dict = {}
        policy_obj_dict[VN3_fixture.vn_name] = [policy34_fixture.policy_obj]
        policy_obj_dict[VN4_fixture.vn_name] = [policy34_fixture.policy_obj]

        # vn fixture dictionary.
        vn_obj_dict = {}
        vn_obj_dict[VN3_fixture.vn_name] = VN3_fixture
        vn_obj_dict[VN4_fixture.vn_name] = VN4_fixture

        # attach policy to VN
        VN3_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections['juniper'],
                vn_name=VN3_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[VN3_fixture.vn_name],
                project_name=project1_instance.project_name))

        VN4_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=project1_instance.project_connections['juniper'],
                vn_name=VN4_fixture.vn_name,
                policy_obj=policy_obj_dict,
                vn_obj=vn_obj_dict,
                vn_policys=vn_policys[VN4_fixture.vn_name],
                project_name=project1_instance.project_name))

        # add VM to new VN
        VM31_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=VN3_fixture.obj,
                vm_name='VM31',
                project_name=project1_instance.project_name))

        VM41_fixture = self.useFixture(
            VMFixture(
                connections=project1_instance.project_connections['juniper'],
                vn_obj=VN4_fixture.obj,
                vm_name='VM41',
                project_name=project1_instance.project_name))

        # verification routines.
        test_flag = 0
        if ((VN3_fixture.verify_vn_in_api_server())
                and (VN3_fixture.verify_vn_not_in_agent())
                and (VN3_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN3 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN3 FAILED while control nodes down.")
            test_flag = 1

        if ((VN4_fixture.verify_vn_in_api_server())
                and (VN4_fixture.verify_vn_not_in_agent())
                and (VN4_fixture.verify_vn_policy_in_api_server()['result'])):
            self.logger.info(
                "Verification of VN4 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VN4 FAILED while control nodes down.")
            test_flag = 1

        if ((VM22_fixture.verify_vm_launched())
                and (VM22_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM22 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM22 FAILED while control nodes down.")
            test_flag = 1

        if ((VM31_fixture.verify_vm_launched())
                and (VM31_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM31 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM31 FAILED while control nodes down.")
            test_flag = 1

        if ((VM41_fixture.verify_vm_launched())
                and (VM41_fixture.verify_vm_in_api_server())):
            self.logger.info(
                "Verification of VM41 PASSED while control nodes down.")
        else:
            self.logger.error(
                "Verification of VM41 FAILED while control nodes down.")
            test_flag = 1

        # start all control services.
        headless_vr_utils.start_all_control_services(self)

        # if something went wrong in the controller down state bail out here.
        if test_flag == 1:
            self.logger.error(
                "Verifications and Test failed while the controllers were down in \
                               headless state of agent. Check earlier error logs"
            )
            return False

        # wait for 3 to 5 sec for configuration sync from control nodes to the
        # agents.
        time.sleep(5)

        # wait till VM's are up.
        VM22_fixture.wait_till_vm_is_up()
        VM31_fixture.wait_till_vm_is_up()
        VM41_fixture.wait_till_vm_is_up()

        # verify vm config gets downloaded to the agents.
        if ((VM22_fixture.verify_vm_in_agent())
                and (VM31_fixture.verify_vm_in_agent())
                and (VM41_fixture.verify_vm_in_agent())):
            self.logger.info("VM verification on the agent PASSED")
        else:
            self.logger.error("VM verification on the agent FAILED")
            return False

        # check ping success between the two VM's
        assert config_topo['project1']['vm']['VM11'].ping_with_certainty(
            VM22_fixture.vm_ip, expectation=True)
        assert VM31_fixture.ping_with_certainty(VM41_fixture.vm_ip,
                                                expectation=True)
        assert VM41_fixture.ping_with_certainty(VM31_fixture.vm_ip,
                                                expectation=True)

        # verification routines.
        if ((VN3_fixture.verify_on_setup()) and (VN4_fixture.verify_on_setup())
                and (VM22_fixture.verify_on_setup())
                and (VM31_fixture.verify_on_setup())
                and (VM41_fixture.verify_on_setup())):
            self.logger.info(
                "All verifications passed after controllers came up in headless agent mode"
            )
        else:
            self.logger.error(
                "Verifications FAILED after controllers came up in headless agent mode"
            )
            return False

        return True
    def test_sec_group_basic(self):
        """
	Description: Test basic SG features
            1. Security group create and delete
            2. Create security group with custom rules and then update it for tcp
            3. Launch VM with custom created security group and verify
            4. Remove secuity group association with VM
            5. Add back custom security group to VM and verify
            6. Try to delete security group with association to VM. It should fail.
            7. Test with ping, which should fail
            8. Test with TCP which should pass
            9. Update the rules to allow icmp, ping should pass now.
        """
        secgrp_name = get_random_name('test_sec_group')
        (prefix,
         prefix_len) = get_random_cidrs(self.inputs.get_af())[0].split('/')
        prefix_len = int(prefix_len)
        rule = [{
            'direction':
            '>',
            'protocol':
            'udp',
            'dst_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 8000,
                'end_port': 8000
            }],
            'src_ports': [{
                'start_port': 9000,
                'end_port': 9000
            }],
            'src_addresses': [{
                'security_group': 'local'
            }],
        }]
        #Create the SG
        sg_fixture = self.config_sec_group(name=secgrp_name, entries=rule)
        #Delete the SG
        self.delete_sec_group(sg_fixture)
        #Create SG again and update the rules
        sg_fixture = self.config_sec_group(name=secgrp_name, entries=rule)
        secgrp_id = sg_fixture.secgrp_id
        vn_net = get_random_cidrs(self.inputs.get_af())
        (prefix, prefix_len) = vn_net[0].split('/')
        rule = [{
            'protocol':
            'tcp',
            'dst_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_addresses': [{
                'security_group': 'local'
            }],
        }, {
            'protocol':
            'tcp',
            'src_addresses': [{
                'subnet': {
                    'ip_prefix': prefix,
                    'ip_prefix_len': prefix_len
                }
            }],
            'dst_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'src_ports': [{
                'start_port': 0,
                'end_port': -1
            }],
            'dst_addresses': [{
                'security_group': 'local'
            }],
        }]
        #Update the rules
        sg_fixture.replace_rules(rule)
        #Create VN and VMs
        vn_fixture = self.useFixture(
            VNFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      inputs=self.inputs,
                      subnets=vn_net))
        assert vn_fixture.verify_on_setup()
        img_name = os.environ['ci_image'] if os.environ.has_key(
            'ci_image') else 'ubuntu-traffic'
        vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn_fixture.obj,
                      image_name=img_name,
                      flavor='contrail_flavor_small',
                      sg_ids=[secgrp_id]))
        vm2_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=vn_fixture.obj,
                      image_name=img_name,
                      flavor='contrail_flavor_small',
                      sg_ids=[secgrp_id]))
        assert vm1_fixture.verify_on_setup()
        assert vm1_fixture.wait_till_vm_is_up()
        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        assert result, msg

        #Remove secuity group association with VM and verify
        self.logger.info("Remove security group %s from VM %s", secgrp_name,
                         vm1_fixture.vm_name)
        vm1_fixture.remove_security_group(secgrp=secgrp_id)
        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        if result:
            assert False, "Security group %s is not removed from VM %s" % (
                secgrp_name, vm1_fixture.vm_name)
        #Add back security group to VM and verify
        vm1_fixture.add_security_group(secgrp=secgrp_id)
        result, msg = vm1_fixture.verify_security_group(secgrp_name)
        assert result, msg

        #Try to delete security group with back ref
        self.logger.info("Try deleting the security group %s with back ref.",
                         secgrp_name)
        try:
            if sg_fixture.option == 'openstack':
                sg_fixture.quantum_h.delete_security_group(
                    sg_fixture.secgrp_id)
            else:
                sg_fixture.cleanUp()
        except Exception, msg:
            self.logger.info(msg)
            self.logger.info(
                "Not able to delete the security group with back ref as expected"
            )
    def test_policy_cidr_src_cidr_dst_cidr(self):
        """Test cases to test policy CIDR"""
        """Policy1 Rule :- source = CIDR-VM11, destination = CIDR-VM12."""
        """Policy2 Rule :- source = CIDR-VM11, destination = CIDR-VM21."""
        result = True

        # create Ipam and VN
        self.setup_ipam_vn()
        VN1_subnet = self.VN1_fixture.get_cidrs()[0]
        VN2_subnet = self.VN2_fixture.get_cidrs()[0]
        VN3_subnet = self.VN3_fixture.get_cidrs()[0]

        # create VM
        self.setup_vm()
        self.VM12_fixture = self.useFixture(
            VMFixture(connections=self.connections,
                      vn_obj=self.VN1_fixture.obj,
                      vm_name='VM12',
                      project_name=self.project.project_name))
        self.VM12_fixture.wait_till_vm_is_up()

        #Check initial connectivity without policies in place.
        ret = self.VM11_fixture.ping_with_certainty(self.VM12_fixture.vm_ip, \
                                                    expectation=True)
        if ret == True:
            self.logger.info("ICMP traffic is allowed between VMs in same VN")
        else:
            result = False
            self.logger.error(
                "ICMP traffic is not allowed between VMs in same VN, which is wrong"
            )

        ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \
                                                    expectation=False)
        if ret == True:
            self.logger.info(
                "ICMP traffic is not allowed between VMs accross VNs")
        else:
            result = False
            self.logger.error(
                "ICMP traffic is allowed between VMs accross VNs, which is wrong"
            )
        if result == False:
            return result

        #get the VM IP Addresses with 32 bit mask in cidr format.
        vm11_ip = self.VM11_fixture.vm_ip + '/32'
        vm12_ip = self.VM12_fixture.vm_ip + '/32'
        vm21_ip = self.VM21_fixture.vm_ip + '/32'

        # create policy
        policy_name = 'policy1112'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_subnet': vm12_ip,
            'source_subnet': vm11_ip,
            'dst_ports': 'any',
            'simple_action': 'deny',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'any',
            'dest_network': 'VN1',
            'source_network': 'VN1',
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy1112_fixture = self.useFixture(
            PolicyFixture(policy_name=policy_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))

        policy_name = 'policy1211'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_subnet': vm11_ip,
            'source_subnet': vm12_ip,
            'dst_ports': 'any',
            'simple_action': 'deny',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'any',
            'dest_network': 'VN1',
            'source_network': 'VN1',
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy1211_fixture = self.useFixture(
            PolicyFixture(policy_name=policy_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))

        policy_name = 'policy1121'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_subnet': vm21_ip,
            'source_subnet': vm11_ip,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'any',
            'dest_network': 'VN2',
            'source_network': 'VN1',
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy1121_fixture = self.useFixture(
            PolicyFixture(policy_name=policy_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))

        policy_name = 'policy2111'
        rules = []
        rules = [{
            'direction': '<>',
            'protocol': 'icmp',
            'dest_subnet': vm11_ip,
            'source_subnet': vm21_ip,
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }, {
            'direction': '<>',
            'protocol': 'any',
            'dest_network': 'VN1',
            'source_network': 'VN2',
            'dst_ports': 'any',
            'simple_action': 'pass',
            'src_ports': 'any'
        }]

        policy2111_fixture = self.useFixture(
            PolicyFixture(policy_name=policy_name,
                          rules_list=rules,
                          inputs=self.inputs,
                          connections=self.connections))

        # attach policy to VN
        VN1_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=self.connections,
                vn_name=self.VN1_fixture.vn_name,
                policy_obj={self.VN1_fixture.vn_name : \
                           [policy1112_fixture.policy_obj, \
                            policy1211_fixture.policy_obj, \
                            policy1121_fixture.policy_obj]},
                vn_obj={self.VN1_fixture.vn_name : self.VN1_fixture},
                vn_policys=['policy1112','policy1211','policy1121'],
                project_name=self.project.project_name))

        VN2_policy_fixture = self.useFixture(
            VN_Policy_Fixture(
                connections=self.connections,
                vn_name=self.VN2_fixture.vn_name,
                policy_obj={self.VN2_fixture.vn_name : \
                           [policy2111_fixture.policy_obj]},
                vn_obj={self.VN2_fixture.vn_name : self.VN2_fixture},
                vn_policys=['policy2111'],
                project_name=self.project.project_name))

        #Test traffic with the policies having cidr as src and dst,
        #attached to the respective networks.
        ret = self.VM11_fixture.ping_with_certainty(self.VM12_fixture.vm_ip, \
                                                    expectation=False)
        if ret == True:
            cmd = "flow -l | grep %s -A1 | grep %s -A1 " % (
                self.VM11_fixture.vm_ip, self.VM12_fixture.vm_ip)
            cmd = cmd + "| grep 'Action:D(Policy)' | wc -l"
            flow_record = self.inputs.run_cmd_on_server(
                self.VM11_fixture.vm_node_ip, cmd, self.inputs.host_data[
                    self.VM11_fixture.vm_node_ip]['username'],
                self.inputs.host_data[
                    self.VM11_fixture.vm_node_ip]['password'])
            if flow_record > 0:
                self.logger.info(
                    "ICMP traffic is not allowed between VM11 and VM12, by policy1112 and policy1211."
                )
                self.logger.info("Above test Passed.")
            else:
                result = False
                self.logger.error(
                    "ICMP traffic is not allowed between VM11 and VM12, by policy1112 and policy1211."
                )
                self.logger.error("Above test Failed.")
        else:
            result = False
            self.logger.error(
                "ICMP traffic is not allowed between VM11 and VM12, by policy1112 and policy1211."
            )
            self.logger.error("Above test Failed.")

        ret = False
        flow_record = 0
        ret = self.VM11_fixture.ping_with_certainty(self.VM21_fixture.vm_ip, \
                                                    expectation=True)
        if ret == True:
            cmd = "flow -l | grep %s -A1 | grep %s -A1 " % (
                self.VM11_fixture.vm_ip, self.VM21_fixture.vm_ip)
            cmd = cmd + "| grep 'Action:F' | wc -l"
            flow_record = self.inputs.run_cmd_on_server(
                self.VM11_fixture.vm_node_ip, cmd, self.inputs.host_data[
                    self.VM11_fixture.vm_node_ip]['username'],
                self.inputs.host_data[
                    self.VM11_fixture.vm_node_ip]['password'])
            if flow_record > 0:
                self.logger.info(
                    "ICMP traffic is allowed between VM11 and VM21, by policy1121 and policy2111."
                )
                self.logger.info("Above test Passed.")
            else:
                result = False
                self.logger.error(
                    "ICMP traffic is allowed between VM11 and VM21, by policy1121 and policy2111."
                )
                self.logger.error("Above test Failed.")
        else:
            result = False
            self.logger.error(
                "ICMP traffic is allowed between VM11 and VM21, by policy1121 and policy2111."
            )
            self.logger.error("Above test Failed.")
        if result == False:
            return result

        return result
示例#14
0
    def config_v2_svc_chain(self, stack_name):
        svc_pt_hs = self.config_heat_obj(stack_name)
        stack = svc_pt_hs.heat_client_obj
        op = stack.stacks.get(stack_name).outputs
        time.sleep(5)
        for output in op:
            if output['output_key'] == 'left_VM_ID':
                left_vm_id = output['output_value']
            elif output['output_key'] == 'left_VM1_ID':
                left_vm1_id = output['output_value']
            elif output['output_key'] == 'left_VM2_ID':
                left_vm2_id = output['output_value']
            elif output['output_key'] == 'right_VM_ID':
                right_vm_id = output['output_value']
            elif output['output_key'] == 'right_VM1_ID':
                right_vm1_id = output['output_value']
            elif output['output_key'] == 'right_VM2_ID':
                right_vm2_id = output['output_value']
            elif output['output_key'] == 'left_vn_FQDN':
                left_vn_fqdn = output['output_value']
            elif output['output_key'] == 'right_vn_FQDN':
                right_vn_fqdn = output['output_value']
            elif output['output_key'] == 'si_fqdn':
                si_fqdn = output['output_value']
            elif output['output_key'] == 'si2_fqdn':
                si2_fqdn = output['output_value']
                si2_fqdn=":".join(si2_fqdn)
            elif output['output_key'] == 'left_VM1_IP_ADDRESS':
                left_vm1_ip_address = output['output_value']
                network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix = "left_vn"+":"+left_vm1_ip_address
                network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len = "32"
            elif output['output_key'] == 'right_VM1_IP_ADDRESS':
                right_vm1_ip_address = output['output_value']
                network_policy_entries_policy_rule_dst_addresses_subnet_ip_prefix = "right_vn"+":"+right_vm1_ip_address
                network_policy_entries_policy_rule_dst_addresses_subnet_ip_prefix_len = "32"

        #Update the policy
        si_fqdn=":".join(si_fqdn)
        left_vn_fqdn=":".join(left_vn_fqdn)
        right_vn_fqdn=":".join(right_vn_fqdn)
        if 'multi' in stack_name:
            self.update_stack(svc_pt_hs, change_sets=[['left_vn_fqdn', left_vn_fqdn], ['right_vn_fqdn', right_vn_fqdn], ['service_instance1_fq_name', si_fqdn], ['service_instance2_fq_name', si2_fqdn]])
        else:
            if 'cidr' in stack_name:
                if 'src_cidr' in stack_name:
                    self.update_stack(svc_pt_hs, change_sets=[['left_vn_fqdn', left_vn_fqdn], ['right_vn_fqdn', right_vn_fqdn], ['service_instance_fq_name', si_fqdn], ['network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix', network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix], ['network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len', network_policy_entries_policy_rule_src_addresses_subnet_ip_prefix_len]])
            else:
                self.update_stack(svc_pt_hs, change_sets=[['left_vn_fqdn', left_vn_fqdn], ['right_vn_fqdn', right_vn_fqdn], ['service_instance_fq_name', si_fqdn]])
        if 'cidr' in stack_name:
                if 'src_cidr' in stack_name:
                    # 2 VMs in the left_vn
                    left_vm1 = VMFixture(connections=self.connections,uuid = left_vm1_id, image_name = 'cirros-0.3.0-x86_64-uec')
                    left_vm1.read()
                    left_vm1.verify_on_setup()

                    left_vm2 = VMFixture(connections=self.connections,uuid = left_vm2_id, image_name = 'cirros-0.3.0-x86_64-uec')
                    left_vm2.read()
                    left_vm2.verify_on_setup()

                    # One VM in the right_vn
                    right_vm = VMFixture(connections=self.connections,uuid = right_vm_id, image_name = 'cirros-0.3.0-x86_64-uec')
                    right_vm.read()
                    right_vm.verify_on_setup()

                    # Ping from left_vm1 to right_vm should pass
                    assert left_vm1.ping_with_certainty(right_vm.vm_ip, expectation=True)

                    # Ping from left_vm2 to right_vm should fail
                    assert left_vm2.ping_with_certainty(right_vm.vm_ip, expectation=False)
        else:
            left_vm = VMFixture(connections=self.connections,uuid = left_vm_id, image_name = 'cirros-0.3.0-x86_64-uec')
            left_vm.read()
            left_vm.verify_on_setup()
            right_vm = VMFixture(connections=self.connections,uuid = right_vm_id, image_name = 'cirros-0.3.0-x86_64-uec')
            right_vm.read()
            right_vm.verify_on_setup()
            assert left_vm.ping_with_certainty(right_vm.vm_ip, expectation=True)
示例#15
0
    def test_allocate_floating_ip(self):
        """Allocate a floating IP
        1.Create an FIP pool for VN -public under admin and launch an instance
        2.Launch  instance under VPC 
        3.Associate FIP to thie  instance
        4.Ping test to and from FIP """
        self.res.verify_common_objects()
        result = True
        cidr = '10.2.3.0/24'
        floatingIpCidr = '10.2.60.0/24'
        pool_name = 'pool1'

        vpc_fixture = self.res.vpc1_fixture
        assert vpc_fixture.verify_on_setup(), " VPC %s verification failed" % (
            cidr)

        self.logger.info(
            'Adding rules to default SG of %s to reach public vm' %
            (vpc_fixture.vpc_id))
        default_sg_name = 'default'
        rule1 = {
            'protocol': 'icmp',
            'direction': 'ingress',
            'cidr': floatingIpCidr,
        }
        rule2 = {
            'protocol': 'icmp',
            'direction': 'egress',
            'cidr': floatingIpCidr,
        }
        default_sg_id = vpc_fixture.get_security_group_id(default_sg_name)
        if not (self.createSgRule(vpc_fixture, default_sg_id, rule1)
                and self.createSgRule(vpc_fixture, default_sg_id, rule2)):
            self.logger.error('Unable to create allow in SG %s ' %
                              (default_sg_name))
            result = result and False

        # create public VN for floating ip pool

        ec2_base = EC2Base(logger=self.inputs.logger,
                           inputs=self.inputs,
                           tenant=self.inputs.project_name)
        fip_vn_fixture = self.useFixture(
            VNFixture(connections=self.connections,
                      inputs=self.inputs,
                      vn_name='public',
                      subnets=[floatingIpCidr]))
        # Add rules in public VM's SG to reach the private VM"
        self.set_sec_group_for_allow_all('admin', 'default')
        assert fip_vn_fixture.verify_on_setup(
        ), "FIP VN Fixture verification failed, Check logs"

        fip_vm_fixture = self.useFixture(
            VMFixture(connections=self.connections,
                      vn_obj=fip_vn_fixture.obj,
                      vm_name='fip_vm1'))
        assert fip_vm_fixture.verify_on_setup(
        ), "VM verification in FIP VN failed"
        assert fip_vm_fixture.wait_till_vm_is_up(),\
            "VM verification in FIP VN failed"

        vm1_fixture = self.res.vpc1_vn1_vm1_fixture
        assert vm1_fixture.verify_on_setup(), "VPCVMFixture verification failed " \
            "for VM %s" % (vm1_fixture.instance_id)
        assert vm1_fixture.wait_till_vm_is_up(),\
            "VM verification failed"

        fip_fixture = self.useFixture(
            VPCFIPFixture(fip_vn_fixture=fip_vn_fixture,
                          connections=self.connections,
                          pool_name=pool_name,
                          ec2_base=ec2_base))
        assert fip_fixture.verify_on_setup(
        ), "FIP pool verification failed, Pls check logs"

        (fip, fip_alloc_id) = fip_fixture.create_and_assoc_fip(
            vm1_fixture.instance_id)
        if fip is None or fip_alloc_id is None:
            self.logger.error('FIP creation and/or association failed! ')
            result = result and False
        if result:
            self.addCleanup(fip_fixture.disassoc_and_delete_fip, fip_alloc_id,
                            fip)
            assert fip_fixture.verify_fip(
                fip), " FIP %s, %s verification failed" % (fip, fip_alloc_id)
            assert vm1_fixture.c_vm_fixture.ping_with_certainty(
                fip_vm_fixture.vm_ip), "Ping from FIP IP failed"
            assert fip_vm_fixture.ping_with_certainty(
                fip), "Ping to FIP IP  failed"

        return result