コード例 #1
0
 def __init__(self, **kwargs):
     super(LBaasV2Fixture, self).__init__(self, **kwargs)
     self.lb_name = kwargs.get('lb_name', None)
     self.lb_uuid = kwargs.get('lb_uuid', None)
     self.network_id = kwargs.get('network_id', None)
     self.vip_ip = kwargs.get('vip_ip', None)
     self.fip_id = kwargs.get('fip_id', None)
     self.fip_net_id = kwargs.get('fip_net_id', None)
     if not self.lb_uuid and not (self.lb_name and self.network_id):
         raise Exception('LB UUID or LB name and network_id is reqd')
     self.listener_name = kwargs.get('listener_name',
                                     get_random_name('Listener'))
     self.listener_uuid = kwargs.get('listener_uuid', None)
     self.vip_port = kwargs.get('vip_port', 80)
     self.vip_protocol = kwargs.get('vip_protocol', 'HTTP')
     self.pool_name = kwargs.get('pool_name', get_random_name('Pool'))
     self.pool_port = kwargs.get('pool_port', 80)
     self.pool_protocol = kwargs.get('pool_protocol', 'HTTP')
     self.pool_algorithm = kwargs.get('pool_algorithm', 'ROUND_ROBIN')
     self.members = kwargs.get('members', dict())
     self.hm_probe_type = kwargs.get('hm_probe_type', None)
     self.hm_max_retries = kwargs.get('hm_max_retries', '5')
     self.hm_delay = kwargs.get('hm_delay', '5')
     self.hm_timeout = kwargs.get('hm_timeout', '5')
     self.pool_uuid = None
     self.member_ips = list()
     self.member_ids = list()
     self.member_weight = list()
     self.deleted_member_ids = list()
     self.hmon_id = None
     self.already_present = False
コード例 #2
0
 def setup_namespaces_pods_for_fabric_restart(self, isolation=False,ip_fabric_forwarding=False):
     """ common routine to create the namesapces and the pods  by enabling the fabric forwarding
         1.create 2 namespaces (ns1,ns2:enable fabric forwarding)
         2.create pods in each namespace and verify(ns1:pod1,pod2, ns2:pod1, ns3:pod1 ,default:pod1)
     """
     namespace1_name = get_random_name("ns1")
     namespace2_name = get_random_name("ns2")
     namespace1 = self.setup_namespace(name = namespace1_name, isolation = isolation,
                                       ip_fabric_forwarding = ip_fabric_forwarding)
     namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation,
                                       ip_fabric_forwarding = ip_fabric_forwarding)
     assert namespace1.verify_on_setup()
     assert namespace2.verify_on_setup()
     label = "fabric"
     #create a pod in default namespaces
     pod1_in_default_ns = self.setup_ubuntuapp_pod()
     #create a two pods in fabric forwarding enabled namespace
     pod1_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
                                            labels={'app': label})
     pod2_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
                                            labels={'app': label})
     #create a pod in fabric and ip fabric enabled namespace
     pod1_in_ns2 = self.setup_ubuntuapp_pod(namespace=namespace2_name,
                                            labels={'app': label})
     assert pod1_in_default_ns.verify_on_setup()
     assert pod1_in_ns1.verify_on_setup()
     assert pod2_in_ns1.verify_on_setup()
     assert pod1_in_ns2.verify_on_setup()
     client1 = [pod1_in_ns1, pod2_in_ns1,  namespace1]
     client2 = [pod1_in_ns2, namespace2]
     client3 = [pod1_in_default_ns]
     return (client1, client2, client3)
コード例 #3
0
ファイル: verify.py プロジェクト: Juniper/contrail-test
 def start_tcpdump(self, session, tap_intf, vlan=None,  vm_fixtures=[], pcap_on_vm=False, no_header = False):
     filt_str = ''
     if not no_header:
         filt_str = 'udp port 8099'
     if not pcap_on_vm:
         pcap = '/tmp/mirror-%s_%s.pcap' % (tap_intf, get_random_name())
         cmd = 'rm -f %s' % pcap
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap, expect=False),'pcap file still exists'
         if vlan:
             filt_str = 'greater 1200'
         cmd = "sudo tcpdump -ni %s -U %s -w %s" % (tap_intf, filt_str, pcap)
         self.logger.info("Starting tcpdump to capture the mirrored packets.")
         execute_cmd(session, cmd, self.logger)
         assert check_pcap_file_exists(session, pcap),'pcap file does not exist'
         return pcap
     else:
         pcap = '/tmp/%s.pcap' % (get_random_name())
         cmd_to_tcpdump = [ 'tcpdump -ni %s %s -w %s 1>/dev/null 2>/dev/null' % (tap_intf, filt_str, pcap) ]
         pidfile = pcap + '.pid'
         vm_fix_pcap_pid_files =[]
         for vm_fixture in vm_fixtures:
             vm_fixture.run_cmd_on_vm(cmds=cmd_to_tcpdump, as_daemon=True, pidfile=pidfile, as_sudo=True)
             vm_fix_pcap_pid_files.append((vm_fixture, pcap, pidfile))
         return vm_fix_pcap_pid_files
コード例 #4
0
ファイル: base.py プロジェクト: Ankitja/contrail-test
 def config_svc_chain(self, rules, vn_list, heat_objs, stack_name='svc_chain'):
     res_name = 'svc_chain'
     if self.heat_api_version == 2:
         res_name += '_v2'
     template = self.get_template(res_name)
     env = self.get_env(res_name)
     env['parameters']['policy_name'] = get_random_name('sc')
     if self.heat_api_version == 2:
          template['resources']['policy']['properties']['network_policy_entries']['network_policy_entries_policy_rule'].extend(rules)
     else:
          env['parameters']['policy_name'] = get_random_name('sc')
          env['parameters']['src_vn_id'] = vn_list[1].uuid
          env['parameters']['dst_vn_id'] = vn_list[2].uuid
          template['resources']['private_policy']['properties']['entries']['policy_rule'].extend(rules)
     stack_name = get_random_name(stack_name)
     svc_hs_obj = self.config_heat_obj(stack_name, template, env)
     if self.heat_api_version != 2:
         return
     op = svc_hs_obj.heat_client_obj.stacks.get(stack_name).outputs
     for output in op:
         if output['output_key'] == 'policy_id':
             policy_id = output['output_value']
         if output['output_key'] == 'policy_fqname':
             policy_fqname = output['output_value']
     policy_fqname = ':'.join(policy_fqname)
     # Hack, policy association doesn't work through heat, rewrite after bug fix
     heat_objs[0].policys = getattr(heat_objs[0], 'policys', [])
     heat_objs[1].policys = getattr(heat_objs[1], 'policys', [])
     heat_objs[0].policys.append(policy_fqname.split(':'))
     heat_objs[1].policys.append(policy_fqname.split(':'))
     vn_list[1].bind_policies(heat_objs[0].policys, vn_list[1].uuid)
     vn_list[2].bind_policies(heat_objs[1].policys, vn_list[2].uuid)
     svc_hs_obj.addCleanup(vn_list[1].unbind_policies, vn_list[1].uuid, [policy_fqname.split(':')])
     svc_hs_obj.addCleanup(vn_list[2].unbind_policies, vn_list[2].uuid, [policy_fqname.split(':')])
     return svc_hs_obj
コード例 #5
0
    def __init__(self, inputs, project_name=None, input_file=None, logger=None,
                 username=None, password=None, domain_name=None):

        self.username = None
        self.password = None
        self.inputs = inputs
        if inputs.domain_isolation:
            self.domain_name = get_random_name(domain_name)
        else :
            self.domain_name = domain_name

        if inputs.tenant_isolation:
            self.project_name = get_random_name(project_name)
        else :
            self.project_name = project_name or inputs.stack_tenant
        if inputs.tenant_isolation and inputs.user_isolation:
            self.username = self.project_name
            self.password = self.project_name
        else:
            self.username = username or inputs.stack_user
            self.password = password or inputs.stack_password

        self.input_file = input_file
        self.logger = logger
        if self.inputs.orchestrator == 'vcenter':
            self.project_name = self.inputs.stack_tenant
            self.username = self.inputs.stack_user
            self.password = self.inputs.stack_password
        if self.inputs.vcenter_gw_setup:#Fixing tenant as vCenter for vcenter gw setup
            self.project_name = 'vCenter'
            self.username = self.inputs.stack_user
            self.password = self.inputs.stack_password
コード例 #6
0
ファイル: base.py プロジェクト: Juniper/contrail-test
    def create_2_legs(self):

        vn1_name = get_random_name('bgpaas_vn')
        vn1_subnets = [get_random_cidr()]
        vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
        test_vm = self.create_vm(vn1_fixture, 'test_vm',
                                 image_name='ubuntu-traffic')
        assert test_vm.wait_till_vm_is_up()
        vn2_name = get_random_name('bgpaas_vn')
        vn2_subnets = [get_random_cidr()]
        vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
        bgpaas_vm1 = self.useFixture(
            VMFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_objs=[
                    vn1_fixture.obj,
                    vn2_fixture.obj],
                vm_name='bgpaas_vm1',
                node_name=None,
                image_name='vsrx'))

        assert bgpaas_vm1.wait_till_vm_is_up()
        ret_dict = {
            'vn1_fixture': vn1_fixture,
            'vn2_fixture': vn2_fixture,
            'test_vm': test_vm,
            'bgpaas_vm1': bgpaas_vm1,
        }
        return ret_dict
コード例 #7
0
ファイル: test_domain.py プロジェクト: Ankitja/contrail-test
 def test_perms_with_same_user_in_diff_projects(self):
     ''' Test user roles across projects in the same domain
         1) Create project1 and project2 
         2) Create and Attach user1 to project1 with admin role and as _member_ role to project2
         3) create VN1 under Project1
         4) project2 shouldnt be able to read VN1 using project2 creds'''
     username1 = get_random_name('TestUser-1')
     password1 = get_random_name('TestUser-1')
     project_name1 = get_random_name('TestProject-1')
     project_name2 = get_random_name('TestProject-2')
     domain_name = self.connections.domain_name
     project_fix1 = self.create_project(
         domain_name,project_name1,username1,password1)
     project_fix2 = self.create_project(
         domain_name,project_name2,username1,password1)
     self.admin_connections.auth.create_user(user=username1, password=password1,
         tenant_name=project_name1, domain_name=domain_name)
     self.admin_connections.auth.add_user_to_domain(username1,'admin',domain_name)
     self.admin_connections.auth.add_user_to_project(username1,project_name1,'admin')
     self.admin_connections.auth.add_user_to_project(username1,project_name2,'_member_')
     proj_conn1 = project_fix1.get_project_connections()
     proj_conn2 = project_fix2.get_project_connections()
     vn1_fixture = self.useFixture(
         VNFixture(
             project_name=project_fix1.project_name,
             connections=proj_conn1,
             vn_name='p1-vn1',
             subnets=['10.2.2.0/24']))
     assert not self.read_vn(proj_conn2,vn1_fixture.uuid)
コード例 #8
0
ファイル: base.py プロジェクト: dattamiruke/contrail-test
 def config_svc_instance(self, stack_name, st_fix, vn_list, max_inst=1):
     res_name = 'svc_inst'
     if st_fix.if_list == ['left', 'right']:
         res_name += '_nomgmt'
     if self.pt_based_svc:
         res_name += '_pt'
     if self.heat_api_version == 2:
         if self.inputs.get_af() == 'v6':
             res_name += '_dual'
         res_name += '_v2'
     template = self.get_template(res_name)
     env = self.get_env(res_name)
     env['parameters']['service_template_fq_name'] = ':'.join(st_fix.st_fq_name)
     env['parameters']['service_instance_name'] = get_random_name(stack_name)
     if env['parameters'].get('svm_name', None):
         env['parameters']['svm_name'] = get_random_name(stack_name)
     if st_fix.svc_mode != 'transparent':
         env['parameters']['right_net_id'] = vn_list[1].vn_fq_name
         env['parameters']['left_net_id'] = vn_list[0].vn_fq_name
     else:
         env['parameters']['right_net_id'] = ''
         env['parameters']['left_net_id'] = ''
     env['parameters'][
         'service_instance_name'] = get_random_name('svc_inst')
     if not self.pt_based_svc:
         env['parameters']['max_instances'] = max_inst
     si_hs_obj = self.config_heat_obj(stack_name, template, env)
     si_name = env['parameters']['service_instance_name']
     si_fix = self.verify_si(si_hs_obj.heat_client_obj, stack_name, si_name, st_fix, max_inst, st_fix.svc_mode, st_fix.image_name)
     return si_fix, si_hs_obj
コード例 #9
0
ファイル: test_domain.py プロジェクト: Ankitja/contrail-test
    def test_domain_user_group(self):
        ''' Test user group within a domain
            1) Create project
            2) Create user
            3) Create user_group and attach user to it
            4) Attach user_group to domain and project with admin roles
            5) Get project connections with user and create projects it should be allowed
            6)Verify user_group by creating vn and vms
        '''

        username = get_random_name('TestUser-1')
        password = get_random_name('TestUser-1')
        project_name = get_random_name('TestProject-1')
        domain_name = self.connections.domain_name
        user_group = get_random_name('TestGroup-1')
        project_fix = self.create_project(
            domain_name,project_name,username,password)
        self.admin_connections.auth.create_user(
            user=username, password=password,
            tenant_name=project_name, domain_name=domain_name)
        self.admin_connections.auth.create_user_group(
            group=user_group, domain_name=domain_name)
        self.admin_connections.auth.add_user_to_group(
            user=username, group=user_group)
        self.admin_connections.auth.add_group_to_domain(
            group=user_group,role='admin', domain=domain_name)
        self.admin_connections.auth.add_group_to_tenant(
            project=project_name, group=user_group,role='admin')
        proj_conn = project_fix.get_project_connections()
        ret = self.setup_common_objects(
            connections=proj_conn, project_fix=project_fix)
        assert ret,'Failed to setup and test common objects'
コード例 #10
0
 def setup_common_namespaces_pods(self, prov_service = False):
     service_ns1 = None
     service_ns2 = None
     vn_for_namespace = self.setup_vn(vn_name = "TestVNNamespace")
     vn_dict_for_namespace = {"domain": vn_for_namespace.domain_name,
                "project" : vn_for_namespace.project_name[0],
                "name": vn_for_namespace.vn_name}
     vn_for_pod = self.setup_vn(vn_name = "TestVNPod")
     vn_dict_for_pod = {"domain": vn_for_pod.domain_name,
                "project" : vn_for_pod.project_name[0],
                "name": vn_for_pod.vn_name}
     namespace1_name = get_random_name("ns1")
     namespace2_name = get_random_name("ns2")
     namespace1 = self.setup_namespace(name = namespace1_name)
     namespace2 = self.setup_namespace(name = namespace2_name, custom_isolation = True,
                                        fq_network_name= vn_dict_for_namespace)
     assert namespace1.verify_on_setup()
     assert namespace2.verify_on_setup()
     ns_1_label = "namespace1"
     ns_2_label = "namespace2"
     client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
     client4_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
                                          custom_isolation = True,
                                          fq_network_name= vn_dict_for_pod)
     client5_ns1 = self.setup_busybox_pod(namespace=namespace1_name,
                                          custom_isolation = True,
                                          fq_network_name= vn_dict_for_pod)
     client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
     client4_ns2 = self.setup_busybox_pod(namespace=namespace2_name,
                                          custom_isolation = True,
                                          fq_network_name= vn_dict_for_pod)
     assert self.verify_nginx_pod(client1_ns1)
     assert self.verify_nginx_pod(client2_ns1)
     assert client3_ns1.verify_on_setup()
     assert client4_ns1.verify_on_setup()
     assert client5_ns1.verify_on_setup()
     assert self.verify_nginx_pod(client1_ns2)
     assert self.verify_nginx_pod(client2_ns2)
     assert client3_ns2.verify_on_setup()
     assert client4_ns2.verify_on_setup()
     if prov_service == True:
         service_ns1 = self.setup_http_service(namespace=namespace1.name,
                                       labels={'app': ns_1_label})
         service_ns2 = self.setup_http_service(namespace=namespace2.name,
                                       labels={'app': ns_2_label})
     client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
                 namespace1, client4_ns1, client5_ns1]
     client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
                 namespace2, client4_ns2]
     return (client1, client2)
コード例 #11
0
    def test_deployment_with_replica_update_for_fabric_fwd(self):
        '''
        Verifies fabric forwarding is enabled though deployment object
            1.Create a deployment with n replicas with ip fabric forwardingenabled
            2.verify the replicas able to reach the public network
            3.update the pod  replicas
            4.should be able to reach pubic network from each pod
        '''
        labels = {'app': 'test'}
        replicas = len(self.inputs.compute_ips)*1
        new_replicas = len(self.inputs.compute_ips)*2

        namespace1_name = get_random_name("ns1")
        namespace1 = self.setup_namespace(name=namespace1_name, isolation=True,
                                         ip_fabric_forwarding=True)
        assert namespace1.verify_on_setup()
        metadata = {}
        spec = {}
        name =  get_random_name('ubuntu-dep')
        template_metadata = {}

        template_metadata['labels'] = labels
        template_spec = {
                'containers': [
                    {'image': 'ubuntu-upstart',
                      "name": "c1",
                      'command': ['sleep', '1000000'],
                      'image_pull_policy': 'IfNotPresent',
                    }
              ]
        }
        spec.update({
            'replicas': replicas,
            'template': {
                'metadata': template_metadata,
                'spec': template_spec
            }
        })
        dep_1 =  self.setup_deployment(name=name, namespace=namespace1_name,
                                     metadata=metadata, spec=spec)
        assert dep_1.verify_on_setup()
        s_pod_fixtures = []
        server_pods = dep_1.get_pods_list()
        for x in server_pods:
            s_pod_fixture = self.setup_ubuntuapp_pod(name=x.metadata.name,
                                                  namespace=namespace1_name)
            s_pod_fixture.verify_on_setup()
            assert s_pod_fixture.ping_to_ip(self.ip_to_ping)

        dep_1.set_replicas(new_replicas)
        assert dep_1.verify_on_setup()
        s_pod_fixtures = []
        server_pods = dep_1.get_pods_list()
        for x in server_pods:
            s_pod_fixture = self.setup_ubuntuapp_pod(name=x.metadata.name,
                                                  namespace=namespace1_name)
            assert s_pod_fixture.verify_on_setup()
            assert s_pod_fixture.ping_to_ip(self.ip_to_ping)
コード例 #12
0
ファイル: config.py プロジェクト: Ankitja/contrail-test
    def config_svc_mirroring(self, service_mode='transparent', *args, **kwargs):
        """Validate the service chaining datapath
           Test steps:
           1. Create the SI/ST in svc_mode specified.
           2. Create vn11/vm1, vn21/vm2
           3. Create the policy rule for ICMP/UDP and attach to vn's
           4. Send the traffic from vm1 to vm2 and verify if the packets gets mirrored to the analyzer
           5. If its a single analyzer only ICMP(5 pkts) will be sent else ICMP and UDP traffic will be sent.
           Pass criteria :
           count = sent
           single node : Pkts mirrored to the analyzer should be equal to 'count'
           multinode :Pkts mirrored to the analyzer should be equal to '2xcount'
        """
        ci = self.inputs.is_ci_setup()
        create_svms = kwargs.get('create_svms', True)
        vn1_subnets = [get_random_cidr(af=self.inputs.get_af())]
        vn2_subnets = [get_random_cidr(af=self.inputs.get_af())]
        vn1_name = get_random_name('left')
        vn2_name = get_random_name('right')
        st_name = get_random_name("st1")
        action_list = []
        service_type = 'analyzer'
        si_prefix = get_random_name("mirror_si")
        policy_name = get_random_name("mirror_policy")
        vn1_fixture = self.config_vn(vn1_name, vn1_subnets)
        vn2_fixture = self.config_vn(vn2_name, vn2_subnets)

        ret_dict = self.verify_svc_chain(service_mode=service_mode,
                                         service_type=service_type,
                                         left_vn_fixture=vn1_fixture,
                                         right_vn_fixture=vn2_fixture,
                                         create_svms=create_svms, **kwargs)
        si_fixture = ret_dict['si_fixture']
        policy_fixture = ret_dict['policy_fixture']
        si_fq_name = si_fixture.fq_name_str
        rules = [{'direction': '<>',
                       'protocol': 'icmp',
                       'source_network': vn1_fixture.vn_fq_name,
                       'src_ports': [0, 65535],
                       'dest_network': vn2_fixture.vn_fq_name,
                       'dst_ports': [0, 65535],
                       'action_list': {'simple_action': 'pass',
                                       'mirror_to': {'analyzer_name': si_fq_name}}
                       },
                       {'direction': '<>',
                       'protocol': 'icmp6',
                       'source_network': vn1_fixture.vn_fq_name,
                       'src_ports': [0, 65535],
                       'dest_network': vn2_fixture.vn_fq_name,
                       'dst_ports': [0, 65535],
                       'action_list': {'simple_action': 'pass',
                                       'mirror_to': {'analyzer_name': si_fq_name}}
                       }]
        policy_fixture.update_policy_api(rules)
        ret_dict['policy_fixture'] = policy_fixture

        return ret_dict
コード例 #13
0
ファイル: base.py プロジェクト: Ankitja/contrail-test
 def config_fip_pool(self, vn):
     stack_name = get_random_name('fip_pool')
     template = self.get_template('fip_pool')
     env = self.get_env('fip_pool')
     env['parameters']['floating_pool'] = get_random_name(
         env['parameters']['floating_pool'])
     env['parameters']['vn'] = vn.get_vn_fq_name()
     fip_pool_hs_obj = self.config_heat_obj(stack_name, template, env)
     return fip_pool_hs_obj
コード例 #14
0
    def test_remove_policy_with_ref(self):
        ''' This tests the following scenarios.
           1. Test to validate that policy removal will fail when it referenced with VN.
           2. validate vn_policy data in api-s against quantum-vn data, when created and unbind policy from VN thru quantum APIs.
           3. validate policy data in api-s against quantum-policy data, when created and deleted thru quantum APIs.
        '''
        vn1_name = get_random_name('vn4')
        vn1_subnets = ['10.1.1.0/24']
        policy_name = get_random_name('policy1')
        rules = [
            {
                'direction': '<>', 'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_name,
                'dest_network': vn1_name,
            },
        ]
        policy_fixture = self.useFixture(
            PolicyFixture(
                policy_name=policy_name,
                rules_list=rules,
                inputs=self.inputs,
                connections=self.connections))
        vn1_fixture = self.useFixture(
            VNFixture(
                project_name=self.inputs.project_name,
                connections=self.connections,
                vn_name=vn1_name,
                inputs=self.inputs,
                subnets=vn1_subnets,
                policy_objs=[
                    policy_fixture.policy_obj]))
        assert vn1_fixture.verify_on_setup()
        ret = policy_fixture.verify_on_setup()
        if ret['result'] == False:
            self.logger.error(
                "Policy %s verification failed after setup" % policy_name)
            assert ret['result'], ret['msg']

        self.logger.info(
            "Done with setup and verification, moving onto test ..")
        # try to remove policy which  was referenced with VN.
        policy_removal = True
        pol_id = None
        if self.quantum_h:
            policy_removal = self.quantum_h.delete_policy(policy_fixture.get_id())
        else:
            try:
                self.vnc_lib.network_policy_delete(id=policy_fixture.get_id())
            except Exception as e:
                policy_removal = False
        self.assertFalse(
            policy_removal,
            'Policy removal succeed as not expected since policy is referenced with VN')
        #assert vn1_fixture.verify_on_setup()
        # policy_fixture.verify_policy_in_api_server()
        return True
コード例 #15
0
 def setup_common_namespaces_pods(self, prov_service = False,
                                 prov_ingress = False,
                                 isolation = False):
     operation = self.modify_cluster_project()
     service_ns1, ingress_ns1 = None, None
     service_ns2, ingress_ns2 = None, None
     namespace1_name = get_random_name("ns1")
     namespace2_name = get_random_name("ns2")
     namespace1 = self.setup_namespace(name = namespace1_name)
     namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation)
     assert namespace1.verify_on_setup()
     assert namespace2.verify_on_setup()
     if operation=="reset":
         assert namespace1.project_isolation
         assert namespace2.project_isolation
     else:
         assert (namespace1.project_isolation == False)
         assert (namespace2.project_isolation == False)
     ns_1_label = "namespace1"
     ns_2_label = "namespace2"
     client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
     client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
     assert self.verify_nginx_pod(client1_ns1)
     assert self.verify_nginx_pod(client2_ns1)
     assert client3_ns1.verify_on_setup()
     assert self.verify_nginx_pod(client1_ns2)
     assert self.verify_nginx_pod(client2_ns2)
     assert client3_ns2.verify_on_setup()
     if prov_service == True:
         service_ns1 = self.setup_http_service(namespace=namespace1.name,
                                       labels={'app': ns_1_label})
         type = "LoadBalancer" if prov_ingress == False else None 
         service_ns2 = self.setup_http_service(namespace=namespace2.name,
                                       labels={'app': ns_2_label},
                                       type=type)
     if prov_ingress == True:
         ingress_ns1 = self.setup_simple_nginx_ingress(service_ns1.name,
                                               namespace=namespace1.name)
         ingress_ns2 = self.setup_simple_nginx_ingress(service_ns2.name,
                                               namespace=namespace2.name)
         assert ingress_ns1.verify_on_setup()
         assert ingress_ns2.verify_on_setup()
     client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
                 namespace1, ingress_ns1]
     client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
                 namespace2, ingress_ns2]
     return (client1, client2)
コード例 #16
0
ファイル: base.py プロジェクト: Juniper/contrail-test-ci
    def config_sec_groups(self):
        self.sg1_name = "test_tcp_sec_group" + "_" + get_random_name()
        rule = [
            {
                "direction": "<>",
                "protocol": "tcp",
                "dst_addresses": [
                    {"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
                    {"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
                ],
                "dst_ports": [{"start_port": 0, "end_port": -1}],
                "src_ports": [{"start_port": 0, "end_port": -1}],
                "src_addresses": [{"security_group": "local"}],
            },
            {
                "direction": "<>",
                "protocol": "tcp",
                "src_addresses": [
                    {"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
                    {"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
                ],
                "src_ports": [{"start_port": 0, "end_port": -1}],
                "dst_ports": [{"start_port": 0, "end_port": -1}],
                "dst_addresses": [{"security_group": "local"}],
            },
        ]

        self.sg1_fix = self.config_sec_group(name=self.sg1_name, entries=rule)

        self.sg2_name = "test_udp_sec_group" + "_" + get_random_name()
        rule = [
            {
                "direction": "<>",
                "protocol": "udp",
                "dst_addresses": [
                    {"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
                    {"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
                ],
                "dst_ports": [{"start_port": 0, "end_port": -1}],
                "src_ports": [{"start_port": 0, "end_port": -1}],
                "src_addresses": [{"security_group": "local"}],
            },
            {
                "direction": "<>",
                "protocol": "udp",
                "src_addresses": [
                    {"subnet": {"ip_prefix": "10.1.1.0", "ip_prefix_len": 24}},
                    {"subnet": {"ip_prefix": "20.1.1.0", "ip_prefix_len": 24}},
                ],
                "src_ports": [{"start_port": 0, "end_port": -1}],
                "dst_ports": [{"start_port": 0, "end_port": -1}],
                "dst_addresses": [{"security_group": "local"}],
            },
        ]
        self.sg2_fix = self.config_sec_group(name=self.sg2_name, entries=rule)
コード例 #17
0
 def setup_common_namespaces_pods(self, prov_service = False):
     service_ns1 = None
     service_ns2 = None
     service_ns3 = None
     namespace1_name = get_random_name("ns1")
     namespace2_name = get_random_name("ns2")
     namespace3_name = get_random_name("ns3")
     namespace1 = self.setup_namespace(name = namespace1_name, isolation = True)
     namespace2 = self.setup_namespace(name = namespace2_name, isolation = True)
     namespace3 = self.setup_namespace(name = namespace3_name)
     assert namespace1.verify_on_setup()
     assert namespace2.verify_on_setup()
     assert namespace3.verify_on_setup()
     ns_1_label = "namespace1"
     ns_2_label = "namespace2"
     ns_3_label = "namespace3"
     client1_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client2_ns1 = self.setup_nginx_pod(namespace=namespace1_name,
                                          labels={'app': ns_1_label})
     client3_ns1 = self.setup_busybox_pod(namespace=namespace1_name)
     client1_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client2_ns2 = self.setup_nginx_pod(namespace=namespace2_name,
                                          labels={'app': ns_2_label})
     client3_ns2 = self.setup_busybox_pod(namespace=namespace2_name)
     client1_ns3 = self.setup_nginx_pod(namespace=namespace3_name,
                                          labels={'app': ns_3_label})
     client2_ns3 = self.setup_nginx_pod(namespace=namespace3_name,
                                          labels={'app': ns_3_label})
     client3_ns3 = self.setup_busybox_pod(namespace=namespace3_name)
     assert self.verify_nginx_pod(client1_ns1)
     assert self.verify_nginx_pod(client2_ns1)
     assert client3_ns1.verify_on_setup()
     assert self.verify_nginx_pod(client1_ns2)
     assert self.verify_nginx_pod(client2_ns2)
     assert client3_ns2.verify_on_setup()
     assert self.verify_nginx_pod(client1_ns3)
     assert self.verify_nginx_pod(client2_ns3)
     assert client3_ns3.verify_on_setup()
     if prov_service == True:
         service_ns1 = self.setup_http_service(namespace=namespace1.name,
                                       labels={'app': ns_1_label})
         service_ns2 = self.setup_http_service(namespace=namespace2.name,
                                       labels={'app': ns_2_label})
         service_ns3 = self.setup_http_service(namespace=namespace3.name,
                                       labels={'app': ns_3_label})
     client1 = [client1_ns1, client2_ns1, client3_ns1, service_ns1,\
                 namespace1]
     client2 = [client1_ns2, client2_ns2, client3_ns2, service_ns2,\
                 namespace2]
     client3 = [client1_ns3, client2_ns3, client3_ns3, service_ns3,\
                 namespace3]
     return (client1, client2, client3)
コード例 #18
0
ファイル: base.py プロジェクト: Ankitja/contrail-test
 def config_intf_rt_table(self, prefix, si_fqdn, si_intf_type):
     stack_name = get_random_name('intf_rt_table')
     template = self.get_template('intf_rt_table')
     env = self.get_env('intf_rt_table')
     env['parameters']['intf_rt_table_name'] = get_random_name(
         env['parameters']['intf_rt_table_name'])
     env['parameters']['route_prefix'] = prefix
     env['parameters']['si_fqdn'] = si_fqdn
     env['parameters']['si_intf_type'] = si_intf_type
     intf_rt_table_hs_obj = self.config_heat_obj(stack_name, template, env)
     return intf_rt_table_hs_obj
コード例 #19
0
ファイル: base.py プロジェクト: dattamiruke/contrail-test
 def config_vms(self, vn_list):
     stack_name = 'vms'
     template = self.get_template('vms')
     env = self.get_env('vms')
     env['parameters']['right_vm_name'] = get_random_name(env['parameters']['right_vm_name'])
     env['parameters']['left_vm_name'] = get_random_name(env['parameters']['left_vm_name'])
     env['parameters']['right_net_id'] = vn_list[1].vn_id
     env['parameters']['left_net_id'] = vn_list[0].vn_id
     vms_hs_obj = self.config_heat_obj(stack_name, template, env)
     stack = vms_hs_obj.heat_client_obj
     vm_fix = self.verify_vms(stack, vn_list, env, stack_name)
     return vm_fix
コード例 #20
0
 def setup_namespaces_pods_for_fabric_test(self, isolation=False,ip_fabric_forwarding=False):
     """ common routine to create the namesapces and the pods  by enabling the fabric forwarding
         1.create 2 namespaces (ns1,ns2 enable fabric forwarding)
         2.create pods in each namespace and verify(ns1:pod1,pod2(c1,c2), ns2:pod1,default:pod1)
     """
     namespace1_name = get_random_name("ns1")
     namespace2_name = get_random_name("ns2")
     namespace1 = self.setup_namespace(name = namespace1_name, isolation = isolation,
                                       ip_fabric_forwarding = ip_fabric_forwarding)
     namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation,
                                       ip_fabric_forwarding = ip_fabric_forwarding)
     #verifying namespaces have been created
     assert namespace1.verify_on_setup()
     assert namespace2.verify_on_setup()
     label = "fabric"
     #create a pod in default namespaces
     pod1_in_default_ns = self.setup_ubuntuapp_pod()
     #create a two pods in fabric forwarding enabled namespace
     #pod1 with one container and pod2 with two containers
     pod1_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
                                            labels={'app': label})
     spec =  {
             'containers': [
                 {'image': 'ubuntu-upstart',
                   "name": "c1",
                   'command': ['sleep', '1000000'],
                   'image_pull_policy': 'IfNotPresent'
                 },
                 {'image': 'ubuntu-upstart',
                  "name": "c2",
                  'command': ['sleep', '1000000'],
                  'image_pull_policy': 'IfNotPresent'
                 }
           ]
     }
     pod2_in_ns1 = self.setup_pod(namespace=namespace1_name,
                                            spec=spec,
                                            labels={'app': label})
     #create pod 3 without associating any label
     pod3_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name)
     #create a pod in fabric forwarding enabled namespace
     pod1_in_ns2 = self.setup_ubuntuapp_pod(namespace=namespace2_name,
                                            labels={'app': label})
     assert pod1_in_default_ns.verify_on_setup()
     assert pod1_in_ns1.verify_on_setup()
     assert pod2_in_ns1.verify_on_setup()
     assert pod3_in_ns1.verify_on_setup()
     assert pod1_in_ns2.verify_on_setup()
     client1 = [pod1_in_ns1, pod2_in_ns1, pod3_in_ns1, namespace1]
     client2 = [pod1_in_ns2, namespace2]
     client3 = [pod1_in_default_ns]
     return (client1, client2, client3)
コード例 #21
0
    def __init__(self, domain='default-domain', project='admin', username=None, password=None):
        #
        # Domain and project defaults: Do not change until support for
        # non-default is tested!
        self.domain = domain
        self.project = project
        self.username = username
        self.password = password
        #
        # Define VN's in the project:
        self.vnet_list = [get_random_name('vnet0')]
        #
        # Define network info for each VN:
        if self.project == 'vCenter':
            # For vcenter, only one subnet per VN is supported
            self.vn_nets = {self.vnet_list[0]: [get_random_cidr(af='v4')]}
        else:
            self.vn_nets = {self.vnet_list[0]: ['10.1.1.0/24', '11.1.1.0/24']}
        #
        # Define network policies
        self.policy_list = list()
        for i in range(10):
            self.policy_list.append(get_random_name('policy%d'%i))
        self.vn_policy = {self.vnet_list[0]: self.policy_list}
        #
        # Define VM's
        # VM distribution on available compute nodes is handled by nova
        # scheduler or contrail vm naming scheme
        self.vn_of_vm = {get_random_name('vmc0'): self.vnet_list[0]}
        #
        # Define network policy rules
        self.rules = {}

        self.rules[self.policy_list[0]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[1]] = [{'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'icmp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[2]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[3]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[4]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[5]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[6]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[7]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[8]] = [{'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'udp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]

        self.rules[self.policy_list[9]] = [{'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [0, 0]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [1, 1]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [2, 2]}, {'direction': '>', 'protocol': 'tcp', 'dest_network': self.vnet_list[0], 'source_network': self.vnet_list[0], 'dst_ports': 'any', 'simple_action': 'deny', 'src_ports': [3, 3]}]
コード例 #22
0
ファイル: base.py プロジェクト: Ankitja/contrail-test
 def config_vm(self, vn):
     stack_name = get_random_name('single_vm')
     template = self.get_template('single_vm')
     env = self.get_env('single_vm')
     env['parameters']['vm_name'] = get_random_name(
         env['parameters']['vm_name'])
     env['parameters']['net_id'] = vn.vn_id
     vm_hs_obj = self.config_heat_obj(stack_name, template, env)
     vm_fix = self.useFixture(VMFixture(project_name=self.inputs.project_name,
                                        vn_obj=vn.obj, vm_name=str(env['parameters']['vm_name']), connections=self.connections))
     # ToDo: Do we really need to wait till the VMs are up, may be move it down where we login to the VM
     assert vm_fix.wait_till_vm_is_up()
     return vm_hs_obj, vm_fix
コード例 #23
0
ファイル: base.py プロジェクト: Ankitja/contrail-test
 def config_svc_instance(self, stack_name, st_fix, vn_list, max_inst=1):
     res_name = 'svc_inst'
     if self.pt_based_svc:
         res_name += '_pt'
     if self.heat_api_version == 2:
         if self.inputs.get_af() == 'v6':
             res_name += '_dual'
         res_name += '_v2'
     template = self.get_template(res_name)
     env = self.get_env(res_name)
     env['parameters']['service_template_fq_name'] = ':'.join(st_fix.st_fq_name)
     env['parameters']['service_instance_name'] = get_random_name(stack_name)
     if env['parameters'].get('svm_name', None):
         env['parameters']['svm_name'] = get_random_name(stack_name)
     if self.pt_based_svc:
         env['parameters']['security_group_ref'] = (':').join(
             self.project_fq_name) + ':default'
         env['parameters']['mgmt_net_id'] = vn_list[0].vn_fq_name
         if self.inputs.availability_zone:
             env['parameters']['availability_zone'] = self.inputs.availability_zone
         if st_fix.service_mode == 'transparent':
             env['parameters']['image'] = 'tiny_trans_fw'
         elif st_fix.service_mode == 'in-network':
             env['parameters']['image'] = 'tiny_in_net'
         elif st_fix.service_mode == 'in-network-nat':
             env['parameters']['image'] = 'tiny_nat_fw'
         else:
             raise Exception('Unsupported ST mode %s'%(st_fix.service_mode))
         env['parameters']['flavor'] = self.nova_h.get_default_image_flavor(env['parameters']['image'])
         self.nova_h.get_image(env['parameters']['image'])
         self.nova_h.get_flavor(env['parameters']['flavor'])
     else:
         env['parameters']['max_instances'] = max_inst
     if self.pt_based_svc and st_fix.service_mode == 'transparent':
         #for transparent service, VM needs to be part of dummy virtual network
         dummy_vn1, d1_hs_obj = self.config_vn(stack_name='dummy_v1')
         dummy_vn2, d2_hs_obj = self.config_vn(stack_name='dummy_v2')
         env['parameters']['left_net_id'] = dummy_vn1.vn_fq_name
         env['parameters']['right_net_id'] = dummy_vn2.vn_fq_name
     elif not self.pt_based_svc and st_fix.service_mode == 'transparent':
         # In ase of SVC v1 and transparent, need to set the right and left net as auto
         env['parameters']['right_net_id'] = 'auto'
         env['parameters']['left_net_id'] = 'auto'
     else:
         env['parameters']['right_net_id'] = vn_list[2].vn_fq_name
         env['parameters']['left_net_id'] = vn_list[1].vn_fq_name
     stack_name = get_random_name(stack_name)
     si_hs_obj = self.config_heat_obj(stack_name, template, env)
     si_name = env['parameters']['service_instance_name']
     si_fix = self.verify_si(si_hs_obj.heat_client_obj, stack_name, si_name, st_fix, max_inst, st_fix.service_mode, st_fix.image_name)
     return si_fix, si_hs_obj
コード例 #24
0
ファイル: test_policy.py プロジェクト: Ankitja/contrail-test
 def test_policy_source_dest_cidr(self):
     '''Test CIDR as match criteria for source and destination
     1)Create vn and 3 vm's
     2)Create policy with deny traffic and pass CIDR as source and destination 
     3)Ping between vm1 and vm2 .Ping should fail 
     4)ping between vm1 and vm3 .Ping should pass'''
     vn1_name = get_random_name('vn1')
     vn1_subnets = ['192.168.10.0/24']
     policy_name = get_random_name('policy1')
     vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
     assert vn1_fixture.verify_on_setup()
     vn1_vm1_name = get_random_name('vn1_vm1')
     vn1_vm2_name = get_random_name('vn1_vm2')
     vn1_vm3_name = get_random_name('vn1_vm3')
     vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
     vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
     vm3_fixture = self.create_vm(vn1_fixture, vn1_vm3_name)
     vm1_fixture.wait_till_vm_is_up()
     vm2_fixture.wait_till_vm_is_up()
     vm3_fixture.wait_till_vm_is_up()
     rules = [
         {
             'direction': '<>', 'simple_action': 'deny',
             'protocol': 'icmp',
             'source_subnet': vm1_fixture.vm_ip + '/32',
             'dest_subnet': vm2_fixture.vm_ip + '/32',
         },
     ]
     policy_fixture = self.useFixture(
         PolicyFixture(
             policy_name=policy_name, rules_list=rules, inputs=self.inputs,
             connections=self.connections))
     vn1_fixture.bind_policies(
         [policy_fixture.policy_fq_name], vn1_fixture.vn_id)
     self.addCleanup(vn1_fixture.unbind_policies,
                     vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
     err_msg_on_pass = '******' % (
                                     vm1_fixture.vm_name,vm2_fixture.vm_name)
     err_msg_on_fail = 'Ping from %s to %s failed,expected it to Pass' % (
                                     vm1_fixture.vm_name,vm3_fixture.vm_name)
     assert not vm1_fixture.ping_to_ip(vm2_fixture.vm_ip),err_msg_on_pass
     self.logger.debug('Verify packets are dropped by policy')
     vm_node_ip = vm1_fixture.vm_node_ip
     cmd= " flow -l | grep -A3 %s | grep -A3 %s | grep 'Action:D(Policy)' " %(
                                             vm2_fixture.vm_ip,vm1_fixture.vm_ip)
     output = self.inputs.run_cmd_on_server(vm_node_ip,cmd,username='******',password='******',
                                            container='agent')
     assert output,'Packets are not dropped by policy rule'
     assert vm1_fixture.ping_to_ip(vm3_fixture.vm_ip),err_msg_on_fail
     self.logger.info('Ping from %s to %s failed,expected to fail.Test passed' %
         (vm1_fixture.vm_name, vm2_fixture.vm_name))
コード例 #25
0
ファイル: base.py プロジェクト: dattamiruke/contrail-test
 def config_pt_si(self, stack_name, st_fix, vn_list, max_inst=1):
     template = self.get_template(stack_name)
     env = self.get_env(stack_name)
     env['parameters']['service_template_fq_name'] = ':'.join(
         st_fix.st_fq_name)
     if env['parameters'].get('svm_name', None):
         env['parameters']['svm_name'] = get_random_name(stack_name)
     env['parameters']['right_net_id'] = vn_list[2].vn_fq_name
     env['parameters']['left_net_id'] = vn_list[1].vn_fq_name
     env['parameters']['mgmt_net_id'] = vn_list[0].vn_fq_name
     env['parameters'][
         'service_instance_name'] = get_random_name('svc_inst')
     pt_si_hs_obj = self.config_heat_obj(stack_name, template, env)
     return pt_si_hs_obj
コード例 #26
0
ファイル: fabric_utils.py プロジェクト: Juniper/contrail-test
    def onboard_existing_fabric(self, fabric_dict, wait_for_finish=True,
                                name=None, cleanup=False):
        interfaces = {'physical': [], 'logical': []}
        devices = list()
        
        name = get_random_name(name) if name else get_random_name('fabric')

        fq_name = ['default-global-system-config',
                   'existing_fabric_onboard_template']
        payload = {'fabric_fq_name': ["default-global-system-config", name],
                   'node_profiles': [{"node_profile_name": profile}
                       for profile in fabric_dict.get('node_profiles')\
                                      or NODE_PROFILES],
                   'device_auth': [{"username": cred['username'],
                                    "password": cred['password']}
                       for cred in fabric_dict['credentials']],
                   'overlay_ibgp_asn': fabric_dict['namespaces']['asn'][0]['min'],
                   'management_subnets': [{"cidr": mgmt["cidr"]}
                       for mgmt in fabric_dict['namespaces']['management']]
                  }
        self.logger.info('Onboarding existing fabric %s %s'%(name, payload))
        execution_id = self.vnc_h.execute_job(fq_name, payload)
        status, fabric = self._get_fabric_fixture(name)
        assert fabric, 'Create fabric seems to have failed'
        if cleanup:
            self.addCleanup(self.cleanup_fabric, fabric, devices, interfaces)
        if wait_for_finish:
            try:
                status = self.wait_for_job_to_finish(':'.join(fq_name), execution_id)
            except AssertionError:
                self.cleanup_fabric(fabric, verify=False)
                raise
            assert status, 'job %s to create fabric failed'%execution_id
            for device in fabric.fetch_associated_devices() or []:
                device_fixture = PhysicalDeviceFixture(connections=self.connections,
                                                       name=device)
                device_fixture.read()
                device_fixture.add_csn()
                devices.append(device_fixture)
            for device in devices:
                for port in device.get_physical_ports():
                    pif = PhysicalInterfaceFixture(uuid=port, connections=self.connections)
                    pif.read()
                    interfaces['physical'].append(pif)
            for pif in interfaces['physical']:
                for port in pif.get_logical_ports():
                    lif = LogicalInterfaceFixture(uuid=port, connections=self.connections)
                    lif.read()
                    interfaces['logical'].append(lif)
        return (fabric, devices, interfaces)
コード例 #27
0
ファイル: test_domain.py プロジェクト: Ankitja/contrail-test
 def setup_common_objects(self, connections, project_fix):
     vn1_name = get_random_name('TestVN-1')
     vn1_subnet = ['10.1.1.0/24']
     vn2_name = get_random_name('TestVN-2')
     vn2_subnet = ['10.2.2.0/24']
     vm1_vn1_name = get_random_name('TestVM-1')
     vm2_vn2_name = get_random_name('TestVM-2')
     policy_name = get_random_name('TestPolicy')
     rules = [{'direction': '<>',
               'protocol': 'icmp',
               'dest_network': 'any',
               'source_network': 'any',
               'dst_ports': 'any',
               'simple_action': 'pass',
               'src_ports': 'any'}]
     self.vn1_fixture = self.create_vn(project_fix,connections,vn1_name,vn1_subnet)
     self.vn2_fixture = self.create_vn(project_fix,connections,vn2_name,vn2_subnet)
     policy_fixture = self.useFixture(
         PolicyFixture(
             policy_name=policy_name,
             rules_list=rules,
             inputs=self.inputs,
             connections=connections))
     policy_fq_name = [policy_fixture.policy_fq_name]
     self.vn1_fixture.bind_policies(
         policy_fq_name, self.vn1_fixture.vn_id)
     self.addCleanup(self.vn1_fixture.unbind_policies,
                     self.vn1_fixture.vn_id, [policy_fixture.policy_fq_name])
     self.vn2_fixture.bind_policies(
         policy_fq_name, self.vn2_fixture.vn_id)
     self.addCleanup(self.vn2_fixture.unbind_policies,
                     self.vn2_fixture.vn_id, [policy_fixture.policy_fq_name])
     self.vm1_fixture = self.useFixture(
         VMFixture(
             connections=connections,
             vn_obj=self.vn1_fixture.obj,
             vm_name=vm1_vn1_name,
             project_name=project_fix.project_name))
     self.vm2_fixture = self.useFixture(
         VMFixture(
             connections=connections,
             vn_obj=self.vn2_fixture.obj,
             vm_name=vm2_vn2_name,
             project_name=project_fix.project_name))
     self.vm1_fixture.wait_till_vm_is_up()
     self.vm2_fixture.wait_till_vm_is_up()
     ret = self.vm1_fixture.ping_with_certainty(expectation=True,
                                                dst_vm_fixture=self.vm2_fixture)
     return ret
コード例 #28
0
    def setup_common_namespaces_pods(self, isolation=False, ip_fabric_snat=False,
                                     ip_fabric_forwarding=False):
        """ common routine to create the namesapces and the pods  by enabling the fabric snat
            and fabric forwarding
            1.create 3 namespaces (ns1:enable snat,ns2:enable fabric forwarding and snat,ns3:enable snat)
            2.create pods in each namespace and verify(ns1:pod1,pod2, ns2:pod1, ns3:pod1 ,default:pod1)
        """
        namespace1_name = get_random_name("ns1")
        namespace2_name = get_random_name("ns2")
        namespace3_name = get_random_name("ns3")
        namespace1 = self.setup_namespace(name = namespace1_name, isolation = isolation,
                                                 ip_fabric_snat = ip_fabric_snat,
                                                 ip_fabric_forwarding = False)
        namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation,
                                                 ip_fabric_snat = ip_fabric_snat,
                                                 ip_fabric_forwarding = ip_fabric_forwarding)
        namespace3 = self.setup_namespace(name = namespace3_name, isolation = isolation,
                                                 ip_fabric_snat = ip_fabric_snat,
                                                 ip_fabric_forwarding = False)
        assert namespace1.verify_on_setup()
        assert namespace2.verify_on_setup()
        assert namespace3.verify_on_setup()
        label1 = "snat"
        label2 = "snatfabric"
        #create a pod in default namespaces
        pod1_in_default_ns = self.setup_ubuntuapp_pod()
        #create a two pods in snat enabled namespace
        pod1_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
                                             labels={'app': label1})
        pod2_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
                                             labels={'app': label1})
        #create a pod in snat and ip fabric enabled namespace
        pod1_in_ns2 = self.setup_ubuntuapp_pod(namespace=namespace2_name,
                                             labels={'app': label2})
        #create a pod in snat enabled namespace
        pod1_in_ns3 = self.setup_ubuntuapp_pod(namespace=namespace3_name,
                                             labels={'app': label1})

        assert pod1_in_default_ns.verify_on_setup()
        assert pod1_in_ns1.verify_on_setup()
        assert pod2_in_ns1.verify_on_setup()
        assert pod1_in_ns2.verify_on_setup()
        assert pod1_in_ns3.verify_on_setup()

        client1 = [pod1_in_ns1, pod2_in_ns1,  namespace1]
        client2 = [pod1_in_ns2, namespace2]
        client3 = [pod1_in_ns3, namespace3]
        client4 = [pod1_in_default_ns]
        return (client1, client2, client3, client4)
コード例 #29
0
ファイル: test_domain.py プロジェクト: Ankitja/contrail-test
 def test_crud_domain(self):
     ''' Test create read update and delete domain.
     '''
     domain_name = get_random_name('TestDomain-1')
     username = '******'
     password = '******'
     domain_fix = self.useFixture(
         DomainFixture(connections=self.admin_connections,
                       domain_name=domain_name,
                       username=username, password=password))
     domain_name_new = get_random_name('TestDomain-New')
     domain_update = domain_fix.update_domain(domain_name_new,
                                              description='Changed the domain name as part of update.',
                                              enabled=True)
     domain_found = domain_fix.get_domain()
     assert (domain_update and domain_found)
コード例 #30
0
    def __init__(
        self, connections, domain_name=None, project_name=None, secgrp_name=None,
	    uuid=None, secgrp_entries=None,option='orch'):
	#option <'orch' or 'contrail'>
        self.connections = connections
        self.inputs = connections.inputs
        self.logger = connections.logger
        self.vnc_lib_h = connections.get_vnc_lib_h()
        self.api_s_inspect = connections.api_server_inspect
        self.domain_name = self.connections.domain_name
        self.project_name = self.inputs.project_name
        self.secgrp_name = secgrp_name or get_random_name(self.project_name)
        self.secgrp_id = uuid
        self.secgrp_entries = secgrp_entries
        self.already_present = True
        self.domain_fq_name = [self.domain_name]
        self.project_fq_name = [self.domain_name, self.project_name]
        self.project_id = self.connections.get_project_id()
        self.secgrp_fq_name = [self.domain_name,
                               self.project_name, self.secgrp_name]
        self.cn_inspect = self.connections.cn_inspect
        self.orch = self.connections.orch
        self.option = option
        self.verify_is_run = False
        if self.inputs.verify_thru_gui():
            self.webui = WebuiTest(self.connections, self.inputs)
コード例 #31
0
ファイル: base.py プロジェクト: soumilk91/contrail-test-ci
    def config_svc_template(self, stack_name=None, scaling=False, mode='in-network-nat'):
        nomgmt = False
        ver = 1
        res_name = 'svc_tmpl'
        if mode == 'in-network' and self.inputs.get_af() == 'v6':
            res_name += '_nomgmt'
            nomgmt = True
        if self.pt_based_svc:
            res_name += '_pt'
            nomgmt = True
        if self.heat_api_version == 2:
            ver = 2
            res_name += '_v2'
        template = self.get_template(res_name)
        env = self.get_env(res_name)
        env['parameters']['mode'] = mode
        env['parameters']['name'] = get_random_name(stack_name)

        if not self.pt_based_svc:
            if mode == 'transparent':
                env['parameters']['image'] = 'tiny_trans_fw'
            if mode == 'in-network':
                env['parameters']['image'] = 'tiny-in-net'
                if self.inputs.get_af() == 'v6':
                    env['parameters']['image'] = 'ubuntu-in-net'
            env['parameters']['service_scaling'] = scaling
            if scaling:
                if self.heat_api_version == 2:
                    if mode != 'in-network-nat':
                        env['parameters']['left_shared'] = True
                        env['parameters']['right_shared'] = True
                    else:
                        env['parameters']['left_shared'] = True
                        env['parameters']['right_shared'] = False
                else:
                    if mode != 'in-network-nat':
                        env['parameters']['shared_ip_list'] = 'False,True,True'
                    else:
                        env['parameters']['shared_ip_list'] = 'False,True,False'

        if env['parameters'].has_key('image'):
            self.nova_h.get_image(env['parameters']['image'])
        svc_temp_hs_obj = self.config_heat_obj(stack_name, template, env)
        st = self.verify_st(env, scaling, nomgmt, ver)
        return st
コード例 #32
0
ファイル: verify.py プロジェクト: pltf/contrail-test
    def verify_vgw_with_native_vm(self, compute_type):

        result = True

        # Verification of VN
        assert self.vn_fixture_dict[0].verify_on_setup()

        # Selection of compute to launch VM and VGW to configure
        host_list = self.connections.nova_h.get_hosts()
        vgw_compute = None
        vm_compute = None
        if len(host_list) > 1:
            for key in self.vgw_vn_list:
                if key.split(":")[3] == self.vn_fixture_dict[0].vn_name:
                    vgw_compute = self.vgw_vn_list[key]['host'].split("@")[1]

            if compute_type == 'same':
                vm_compute = self.inputs.host_data[vgw_compute]['name']
            else:
                host_list.remove(self.inputs.host_data[vgw_compute]['name'])
                vm_compute = self.inputs.host_data[host_list[0]]['name']
        else:
            vm_compute = self.inputs.host_data[host_list[0]]['name']
            vgw_compute = host_list[0]

        vm1_name = get_random_name('VGW_VM1-Native-' + vm_compute)
        # Creation of VM and validation
        vm1_fixture = self.useFixture(
            VMFixture(project_name=self.inputs.project_name,
                      connections=self.connections,
                      vn_obj=self.vn_fixture_dict[0].obj,
                      vm_name=vm1_name,
                      node_name=vm_compute))
        assert vm1_fixture.verify_on_setup()

        self.logger.info("Now trying to ping www-int.juniper.net")
        if not vm1_fixture.ping_with_certainty('www-int.juniper.net'):
            result = result and False

        if not result:
            self.logger.error(
                'Test  ping outside VN cluster from VM %s failed' % (vm1_name))
            assert result

        return True
コード例 #33
0
ファイル: base.py プロジェクト: jianwel/contrail-test-ci
 def create_only_vn(cls, vn_name=None, vn_subnets=None, vxlan_id=None,
                enable_dhcp=True, **kwargs):
     '''Classmethod to do only VN creation
     '''
     if not vn_name:
         vn_name = get_random_name('vn')
     if not vn_subnets:
         vn_subnets = [get_random_cidr()]
     vn_fixture = VNFixture(project_name=cls.inputs.project_name,
                   connections=cls.connections,
                   inputs=cls.inputs,
                   vn_name=vn_name,
                   subnets=vn_subnets,
                   vxlan_id=vxlan_id,
                   enable_dhcp=enable_dhcp,
                   **kwargs)
     vn_fixture.setUp()
     return vn_fixture
コード例 #34
0
ファイル: base.py プロジェクト: pltf/contrail-test
 def create_st(self, connections=None, verify=True):
     connections = connections or self.connections
     st_fixture = self.create_fixture(SvcTemplateFixture,
                                      connections=connections,
                                      st_name=get_random_name(
                                          connections.project_name),
                                      svc_img_name='tiny_nat_fw',
                                      service_type='firewall',
                                      if_details={
                                          'management': {},
                                          'left': {},
                                          'right': {}
                                      },
                                      service_mode='in-network-nat',
                                      svc_scaling=False)
     if st_fixture and verify:
         assert st_fixture.verify_on_setup(), 'ST verification failed'
     return st_fixture
コード例 #35
0
 def __init__(self,
              connections,
              name=None,
              namespace='default',
              metadata=None,
              spec=None):
     self.logger = connections.logger or contrail_logging.getLogger(
         __name__)
     self.inputs = connections.inputs
     self.name = name or metadata.get('name') or get_random_name('nad')
     self.namespace = namespace
     self.k8s_client = connections.k8s_client
     self.metadata = {} if metadata is None else metadata
     self.spec = {} if spec is None else spec
     self.already_exists = None
     self.connections = connections
     self.vnc_lib = connections.get_vnc_lib_h()
     self.agent_inspect = connections.agent_inspect
コード例 #36
0
 def __init__(self, connections, vdns_fq_name, virtual_DNS_record_data,
              **kwargs):
     dns_record_data = virtual_DNS_record_data
     name = kwargs.get('virtual_DNS_record_name', None)
     domain = vdns_fq_name
     self._api = kwargs.get('option', 'contrail')
     self.inputs = connections.inputs
     if name:
         uid = self._check_if_present(connections, name, [domain])
         if uid:
             super(VdnsRecordFixture,
                   self).__init__(connections=connections, uuid=uid)
             return
     else:
         name = get_random_name("vdnsRecord")
     self._construct_contrail_params(name, domain, dns_record_data, kwargs)
     super(VdnsRecordFixture, self).__init__(connections=connections,
                                             params=self._params)
コード例 #37
0
ファイル: base.py プロジェクト: vedujoshi/contrail-test-ci
 def create_vm(self,
               vn_fixture,
               vm_name=None,
               node_name=None,
               flavor='contrail_flavor_small',
               image_name='ubuntu-traffic',
               port_ids=[]):
     if not vm_name:
         vm_name = 'vm-%s' % (get_random_name(vn_fixture.vn_name))
     return self.useFixture(
         VMFixture(project_name=self.inputs.project_name,
                   connections=self.connections,
                   vn_obj=vn_fixture.obj,
                   vm_name=vm_name,
                   image_name=image_name,
                   flavor=flavor,
                   node_name=node_name,
                   port_ids=port_ids))
コード例 #38
0
ファイル: service.py プロジェクト: vvelpula/contrail-test
    def __init__(self,
                 connections,
                 name=None,
                 namespace='default',
                 metadata=None,
                 spec=None):
        self.logger = connections.logger or contrail_logging.getLogger(
            __name__)
        self.name = name or metadata.get('name') or get_random_name('service')
        self.namespace = namespace
        self.k8s_client = connections.k8s_client
        self.vnc_api_h = connections.vnc_lib
        self.metadata = {} if metadata is None else metadata
        self.spec = {} if spec is None else spec
        self.v1_h = self.k8s_client.v1_h
        self.connections = connections

        self.already_exists = None
コード例 #39
0
ファイル: config.py プロジェクト: mmithun/contrail-test-ci
 def create_service_vms(self, vns, service_mode='transparent', max_inst=1,
         svc_img_name=None, service_type='firewall',
         hosts=[]):
     non_docker_zones = [x for x in self.nova_h.zones if x != 'nova/docker']
     svm_fixtures = []
     svc_img_name = svc_img_name or SVC_TYPE_PROPS[service_type][service_mode]
     for i in range(max_inst):
         svm_name = get_random_name("pt_svm" + str(i))
         svm_fixture = self.config_and_verify_vm(
             svm_name,
             image_name=svc_img_name,
             vns=vns,
             node_name=hosts[i%len(hosts)] if hosts else None,
             zone=random.choice(non_docker_zones))
         svm_fixtures.append(svm_fixture)
         if service_type == 'analyzer':
             svm_fixture.disable_interface_policy()
     return svm_fixtures
コード例 #40
0
ファイル: base.py プロジェクト: mmithun/contrail-test-ci
 def create_st(self, connections=None, verify=True):
     connections = connections or self.connections
     st_fixture = self.create_fixture(SvcTemplateFixture,
         connections=connections,
         inputs=connections.inputs,
         domain_name=connections.domain_name,
         st_name=get_random_name(connections.project_name),
         svc_img_name='tiny_nat_fw',
         svc_type='firewall',
         if_list=[['management', False, False],
                  ['left', False, False],
                  ['right', False, False]],
         svc_mode='in-network-nat',
         svc_scaling=False,
         ordered_interfaces=True)
     if st_fixture and verify:
         assert st_fixture.verify_on_setup(), 'ST verification failed'
     return st_fixture
コード例 #41
0
 def config_basic(self):
     vn_name = get_random_name('bgpaas_vn')
     vn_subnets = [get_random_cidr()]
     vn_fixture = self.create_vn(vn_name, vn_subnets)
     test_vm = self.create_vm(vn_fixture,
                              'test_vm',
                              image_name='ubuntu-traffic')
     assert test_vm.wait_till_vm_is_up()
     bgpaas_vm1 = self.create_vm(vn_fixture,
                                 'bgpaas_vm1',
                                 image_name='vsrx')
     assert bgpaas_vm1.wait_till_vm_is_up()
     ret_dict = {
         'vn_fixture': vn_fixture,
         'test_vm': test_vm,
         'bgpaas_vm1': bgpaas_vm1,
     }
     return ret_dict
コード例 #42
0
 def __init__(self, **kwargs):
     super(HealthCheckFixture, self).__init__(self, **kwargs)
     self.name = kwargs.get('name') or get_random_name(self.project_name)
     self.uuid = kwargs.get('uuid', None)
     self.hc_type = kwargs.get('hc_type') or 'link-local'
     self.status = kwargs.get('enabled') or True
     self.probe_type = kwargs.get('probe_type') or 'PING'
     self.delay = kwargs.get('delay', None)
     self.timeout = kwargs.get('timeout', None)
     self.max_retries = kwargs.get('max_retries', None)
     self.http_method = kwargs.get('http_method', None)
     self.http_url = kwargs.get('http_url', 'local-ip')
     self.http_codes = kwargs.get('http_codes', None)
     self.created = False
     if self.inputs.verify_thru_gui():
         self.browser = self.connections.browser
         self.browser_openstack = self.connections.browser_openstack
         self.webui = WebuiTest(self.connections, self.inputs)
コード例 #43
0
    def __init__(self, *args, **kwargs):
        '''
        queue_uuid : UUID of QosQueue object
        '''
        super(QosForwardingClassFixture, self).__init__(self, *args, **kwargs)
        self.name = kwargs.get('name', get_random_name('fc'))
        self.fc_id = kwargs.get('fc_id', None)
        self.dscp = kwargs.get('dscp', None)
        self.dot1p = kwargs.get('dot1p', None)
        self.exp = kwargs.get('exp', None)
        self.uuid = kwargs.get('uuid', None)
        self.queue_uuid = kwargs.get('queue_uuid', None)

        self.is_already_present = False
        self.obj = None
        self.fq_name = None
        self.verify_is_run = False
        self.id = {}
コード例 #44
0
    def setup_http_service(self,
                           name=None,
                           namespace='default',
                           labels=None,
                           metadata=None,
                           spec=None,
                           type=None,
                           external_ips=None,
                           frontend_port=80,
                           nodePort=None,
                           backend_port=80):
        '''
        A simple helper method to create a service

        Noticed that nginx continues to listen on port 80 even if target port
        is different. So, recommended not to change backend_port for now
        '''
        metadata = metadata or {}
        spec = spec or {}
        name = name or get_random_name('nginx-svc')
        metadata.update({'name': name})
        selector_dict = {}
        labels = labels or {}
        d1 =  {'protocol': 'TCP','port': int(frontend_port),'targetPort': int(backend_port) }
        if nodePort:
            d1['nodePort'] = int(nodePort)
        spec.update({
            'ports': [d1]
        })
        if labels:
            selector_dict = {'selector': labels}
            spec.update(selector_dict)
        if type:
            type_dict = {'type': type}
            spec.update(type_dict)
        if external_ips:
            external_ips_dict = {'external_i_ps': external_ips}
            spec.update(external_ips_dict)
        return self.useFixture(ServiceFixture(
            connections=self.connections,
            name=name,
            namespace=namespace,
            metadata=metadata,
            spec=spec))
コード例 #45
0
    def test_basic_policy_allow_deny(self):
        '''
        Create 2 Vns and allow icmp traffic between them and validate with pings
        Update the policy to deny the same traffic
        Check that pings fail
        '''
        vn1_fixture = self.create_vn()
        vn2_fixture = self.create_vn()
        #        vn1_name = get_random_name('vn1')
        #        vn1_subnets = ['192.168.10.0/24']
        policy_name = get_random_name('policy1')
        rules = [
            {
                'direction': '<>',
                'simple_action': 'pass',
                'protocol': 'icmp',
                'source_network': vn1_fixture.vn_name,
                'dest_network': vn2_fixture.vn_name,
            },
        ]

        policy_fixture = self.setup_policy_between_vns(vn1_fixture,
                                                       vn2_fixture, rules)
        assert vn1_fixture.verify_on_setup()
        assert vn2_fixture.verify_on_setup()

        vm1_fixture = self.create_vm(vn1_fixture)
        vm2_fixture = self.create_vm(vn2_fixture)
        vm1_fixture.wait_till_vm_is_up()
        vm2_fixture.wait_till_vm_is_up()
        assert vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip), ('Ping failed between VNs with allow-policy')

        # Deny the same traffic
        policy_id = policy_fixture.policy_obj['policy']['id']
        rules[0]['simple_action'] = 'deny'
        policy_entries = policy_fixture.policy_obj['policy']['entries']
        policy_entries['policy_rule'][0]['action_list'][
            'simple_action'] = 'deny'
        p_rules = {'policy': {'entries': policy_entries}}
        policy_fixture.update_policy(policy_id, p_rules)
        assert vm1_fixture.ping_with_certainty(
            vm2_fixture.vm_ip,
            expectation=False), ('Ping passed between VNs with deny-policy')
コード例 #46
0
ファイル: test_domain.py プロジェクト: nuthanc/tf-test
 def test_perms_with_diff_users_in_diff_domains(self):
     ''' 1)create domain d1 user1 project1
         2)Attach user1 to d1 and project1 as 'admin'
         2)Try to create domain d2 with d1 creds it should not be allowed
         3)create domain d2 user2 project2
         4)Attach user2 to domain d2 as 'admin' and to project2 as '_member_
         5)create VN1 under Project1
         6)project2 shouldnt be able to read VN1 using project2 creds'''
     username1 = get_random_name('TestUser-1')
     password1 = get_random_name('TestUser-1')
     username2 = get_random_name('TestUser-2')
     password2 = get_random_name('TestUser-2')
     project_name1 = get_random_name('TestProject-1')
     project_name2 = get_random_name('TestProject-2')
     domain1 = get_random_name('TestDomain-1')
     domain2 = get_random_name('TestDomain-2')
     domain_fix1 = self.create_domain(domain1)
     project_fix1 = self.create_project(
         domain1,project_name1,username1,password1)
     self.admin_connections.auth.create_user(user=username1, password=password1,
         tenant_name=project_name1, domain_name=domain1)
     self.admin_connections.auth.add_user_to_domain(username1,'admin',domain1)
     domain_fix1.set_user_creds(username1,password1)
     self.admin_connections.auth.add_user_to_project(username1,project_name1,'admin')
     domain1_conn = domain_fix1.get_domain_connections(username1,password1,project_name1)
     try:
         obj = domain1_conn.auth.create_domain(domain_name=domain1)
     except:
         obj = None
     assert not obj,'Domain Created with user domain creds ,it should not be allowed.Test Failed'
     domain_fix2 = self.create_domain(domain2)
     project_fix2 = self.create_project(
         domain2,project_name2,username2,password2)
     self.admin_connections.auth.create_user(user=username2, password=password2,
         tenant_name=project_name2, domain_name=domain2)
     self.admin_connections.auth.add_user_to_domain(username2,'admin',domain2)
     self.admin_connections.auth.add_user_to_project(username2,project_name2,'_member_')
     proj_conn1 = project_fix1.get_project_connections()
     proj_conn2 = project_fix2.get_project_connections()
     vn1_fixture = self.create_vn(project_fix1,proj_conn1,'p1-vn1',['10.2.2.0/24'])
     assert not self.read_vn(proj_conn2,vn1_fixture.uuid)
コード例 #47
0
 def create_bgpaas(self,
                   bgpaas_shared='false',
                   autonomous_system='64512',
                   bgpaas_ip_address=None,
                   address_families=['inet', 'inet6'],
                   verify=True):
     '''
     Calls the BGPaaS Fixture to create the object
     '''
     bgpaas_fixture = self.useFixture(
         BGPaaSFixture(connections=self.connections,
                       name=get_random_name(self.project_name),
                       bgpaas_shared=bgpaas_shared,
                       autonomous_system=autonomous_system,
                       bgpaas_ip_address=bgpaas_ip_address,
                       address_families=address_families))
     if verify:
         bgpaas_fixture.verify_on_setup()
     return bgpaas_fixture
コード例 #48
0
    def __init__(self, connections, **kwargs):
        domain = connections.domain_name
        prj = kwargs.get('project_name') or connections.project_name
        prj_fqn = domain + ':' + prj
        name = kwargs.get('ipam_name')
        self._api = kwargs.get('option', 'contrail')
        self.inputs = connections.inputs

        if name:
            uid = self._check_if_present(connections, name, [domain, prj])
            if uid:
                super(IPAMFixture, self).__init__(connections=connections,
                                                  uuid=uid)
                return
        else:
            name = get_random_name(prj)
        self._construct_contrail_params(name, prj_fqn, kwargs)
        super(IPAMFixture, self).__init__(connections=connections,
                                          params=self._params)
コード例 #49
0
 def create_only_vm(cls,
                    vn_fixture,
                    vm_name=None,
                    node_name=None,
                    flavor='contrail_flavor_small',
                    image_name='ubuntu-traffic',
                    port_ids=[]):
     if not vm_name:
         vm_name = 'vm-%s' % (get_random_name(vn_fixture.vn_name))
     vm_obj = VMFixture(project_name=cls.inputs.project_name,
                        connections=cls.connections,
                        vn_obj=vn_fixture.obj,
                        vm_name=vm_name,
                        image_name=image_name,
                        flavor=flavor,
                        node_name=node_name,
                        port_ids=port_ids)
     vm_obj.setUp()
     return vm_obj
コード例 #50
0
 def setup_interface_route_table(self,
                                 obj=None,
                                 name=None,
                                 cleanup=True,
                                 **kwargs):
     '''
     Create interface route table and optionally add it to obj
     obj : Example : PortFixture instance
     '''
     name = name or get_random_name('irtb')
     intf_route_table = InterfaceRouteTableFixture(
         name=name, cleanup=cleanup, connections=self.connections, **kwargs)
     intf_route_table.setUp()
     if cleanup:
         self.sleep(1)
         self.addCleanup(intf_route_table.cleanUp)
     if obj:
         self.add_interface_route_table(obj, intf_route_table.obj, cleanup)
     return intf_route_table
コード例 #51
0
ファイル: config.py プロジェクト: nuthanc/tf-test
 def create_service_vms(self, vns, service_mode='transparent', max_inst=1,
         svc_img_name=None, service_type='firewall',
         hosts=[]):
     valid_zones = [x for x in self.orch.get_zones() if x not in ['nova/docker', 'nova-baremetal']]
     svm_fixtures = []
     svc_img_name = svc_img_name or SVC_TYPE_PROPS[service_type][service_mode]
     for i in range(max_inst):
         svm_name = get_random_name("pt_svm" + str(i))
         svm_fixture = self.config_vm_only(
             svm_name,
             image_name=svc_img_name,
             vns=vns,
             node_name=hosts[i%len(hosts)] if hosts else None,
             zone=random.choice(valid_zones))
         svm_fixtures.append(svm_fixture)
         if service_type == 'analyzer':
             svm_fixture.disable_interface_policy()
     self.verify_vms(svm_fixtures)
     return svm_fixtures
コード例 #52
0
    def __init__(self, *args, **kwargs):
        super(QosConfigFixture, self).__init__(self, *args, **kwargs)
        self.name = kwargs.get('name') or get_random_name('qos_config')
        self.qos_config_type = kwargs.get('qos_config_type') or 'project'
        self.uuid = kwargs.get('uuid', None)
        self.dscp_mapping = kwargs.get('dscp_mapping', {})
        self.dot1p_mapping = kwargs.get('dot1p_mapping', {})
        self.exp_mapping = kwargs.get('exp_mapping', {})
        self.vmi_uuid = kwargs.get('vmi_uuid', None)
        self.vn_uuid = kwargs.get('vn_uuid', None)
        self.default_fc_id = kwargs.get('default_fc_id', 0)

        self.is_already_present = False
        self.parent_obj = None
        self.id = {}

        if self.inputs.verify_thru_gui():
            self.webui = WebuiTest(self.connections, self.inputs)
            self.global_flag = args[0]
コード例 #53
0
 def _create_fixed_ips (self):
     lst = []
     for ip in getattr(self, '_fixed_ips'):
         args = {
             'type': 'OS::ContrailV2::InstanceIp',
             'name': get_random_name(),
             'subnet_uuid': kwargs['subnet_id'],
             'virtual_machine_interface_refs': [self.uuid],
             'virtual_network_refs': [self._vn_id],
         }
         if 'ip_address' in ip:
             args['instance_ip_address'] = ip['ip_address']
         args['instance_ip_secondary'] = ip.get('instance_ip_secondary',
                                                 False)
         fix = self.useFixture(InstanceIpFixture(
             connections=self.connections,
             params=args))
         lst.append(fix)
     self._fixed_ips = lst
コード例 #54
0
ファイル: bms_fixture.py プロジェクト: pltf/contrail-test
 def __init__(self, connections, name, is_ironic_node=False, **kwargs):
     ''' Either VN or Port Fixture is mandatory '''
     self.connections = connections
     self.inputs = connections.inputs
     self.logger = connections.logger
     self.name = name
     self.is_ironic_node = is_ironic_node
     bms_dict = self.inputs.bms_data[name]
     self.interfaces = kwargs.get('interfaces') or bms_dict['interfaces']
     self.mgmt_ip = kwargs.get('mgmt_ip') or bms_dict[
         'mgmt_ip']  # Host IP, optional
     self.username = kwargs.get('username') or bms_dict['username']
     self.password = kwargs.get('password') or bms_dict['password']
     self.namespace = get_random_name('ns')
     self.bms_ip = kwargs.get('bms_ip')  # BMS VMI IP
     self.bms_ip6 = kwargs.get('bms_ip6')  # BMS VMI IPv6
     self.bms_ip_netmask = kwargs.get('bms_ip_netmask', None)
     self.bms_ip6_netmask = kwargs.get('bms_ip6_netmask', None)
     self.vn_fixture = kwargs.get('vn_fixture')
     self.bms_gw_ip = kwargs.get('bms_gw_ip', None)
     self.bms_gw_ip6 = kwargs.get('bms_gw_ip6', None)
     self.bms_mac = kwargs.get('bms_mac')  # BMS VMI Mac
     self.static_ip = kwargs.get('static_ip',
                                 bool(not self.inputs.get_csn()))
     self.port_fixture = kwargs.get('port_fixture')
     self.fabric_fixture = kwargs.get('fabric_fixture')
     self.security_groups = kwargs.get('security_groups')  #UUID List
     self.vnc_h = connections.orch.vnc_h
     self.vlan_id = self.port_fixture.vlan_id if self.port_fixture else \
                    kwargs.get('vlan_id') or 0
     self.port_profiles = kwargs.get('port_profiles') or list()
     self.tor_port_vlan_tag = kwargs.get('tor_port_vlan_tag')
     self._port_group_name = kwargs.get('port_group_name', None)
     self.bond_name = kwargs.get(
         'bond_name') or 'bond%s' % get_random_string(
             2, chars=string.ascii_letters)
     self.bms_created = False
     self.bond_created = False
     self.mvlanintf = None
     self._interface = None
     self.ironic_node_obj = None
     self.ironic_node_id = None
     self.copied_files = dict()
コード例 #55
0
 def onboard_existing_fabric(self, fabric_dict, wait_for_finish=True):
     interfaces = {'physical': [], 'logical': []}
     devices = list()
     name = get_random_name('fabric')
     fq_name = ['default-global-system-config',
                'existing_fabric_onboard_template']
     payload = {'fabric_fq_name': ["default-global-system-config", name],
                'node_profiles': [{"node_profile_name": profile}
                    for profile in fabric_dict.get('node_profiles')\
                                   or NODE_PROFILES],
                'device_auth': [{"username": cred['username'],
                                 "password": cred['password']}
                    for cred in fabric_dict['credentials']],
                'overlay_ibgp_asn': fabric_dict['namespaces']['asn'][0]['min'],
                'management_subnets': [{"cidr": mgmt["cidr"]}
                    for mgmt in fabric_dict['namespaces']['management']]
               }
     self.logger.info('Onboarding existing fabric %s %s'%(name, payload))
     execution_id = self.vnc_h.execute_job(fq_name, payload)
     status, fabric = self._get_fabric_fixture(name)
     assert fabric, 'Create fabric seems to have failed'
     self.addCleanup(self.cleanup_fabric, fabric, devices, interfaces)
     if wait_for_finish:
         status = self.wait_for_job_to_finish(':'.join(fq_name), execution_id)
         assert status, 'job %s to create fabric failed'%execution_id
         for device in fabric.fetch_associated_devices() or []:
             device_fixture = PhysicalDeviceFixture(connections=self.connections,
                                                    name=device)
             device_fixture.read()
             device_fixture.add_csn()
             devices.append(device_fixture)
         for device in devices:
             for port in device.get_physical_ports():
                 pif = PhysicalInterfaceFixture(uuid=port, connections=self.connections)
                 pif.read()
                 interfaces['physical'].append(pif)
         for pif in interfaces['physical']:
             for port in pif.get_logical_ports():
                 lif = LogicalInterfaceFixture(uuid=port, connections=self.connections)
                 lif.read()
                 interfaces['logical'].append(lif)
     return (fabric, devices, interfaces)
コード例 #56
0
    def test_sg_multiproject(self):
        """
        Description: Test SG across projects
        Steps:
            1. define the topology for the test
            2. create the resources as defined in the topo
            3. verify the traffic
        Pass criteria: step 3 should pass
        """

        topology_class_name = None
        user = '******' + get_random_name()
        result = True
        msg = []
        if not topology_class_name:
            topology_class_name = sdn_sg_test_topo.sdn_topo_config_multiproject

        self.logger.info("Scenario for the test used is: %s" %
                         (topology_class_name))

        topo = topology_class_name(username=user, password=user)
        self.topo = topo

        #
        # Test setup: Configure policy, VN, & VM
        # return {'result':result, 'msg': err_msg, 'data': [self.topo, config_topo]}
        # Returned topo is of following format:
        # config_topo= {'policy': policy_fixt, 'vn': vn_fixture, 'vm': vm_fixture}
        topo_objs = {}
        config_topo = {}
        setup_obj = self.useFixture(sdnTopoSetupFixture(
            self.connections, topo))
        out = setup_obj.sdn_topo_setup(config_option=self.option)
        self.assertEqual(out['result'], True, out['msg'])
        if out['result']:
            topo_objs, config_topo, vm_fip_info = out['data']

        self.start_traffic_and_verify_multiproject(topo_objs,
                                                   config_topo,
                                                   traffic_reverse=False)

        return True
コード例 #57
0
ファイル: namespace.py プロジェクト: vvelpula/tf-test
 def __init__(self, connections, name=None, isolation=False,
              custom_isolation = False, ip_fabric_snat=False, ip_fabric_forwarding=False, fq_network_name = None):
     self.connections = connections
     self.logger = connections.logger or contrail_logging.getLogger(
         __name__)
     self.name = name or get_random_name('namespace')
     self.k8s_client = connections.k8s_client
     self.vnc_api_h = connections.vnc_lib
     self.isolation = isolation
     self.ip_fabric_snat = ip_fabric_snat
     self.ip_fabric_forwarding = ip_fabric_forwarding
     self.custom_isolation = custom_isolation
     self.fq_network_name = fq_network_name
     self.api_s_obj = None
     self.project_name = None
     self.project_fq_name = None
     self.inputs = self.connections.inputs
     self.project_isolation = True
     self.verify_is_run = False
     self.created = False
コード例 #58
0
 def create_hc(self,
               hc_type='link-local',
               probe_type='PING',
               delay=3,
               timeout=5,
               max_retries=2,
               http_url='local-ip',
               verify=True):
     hc_fixture = self.useFixture(
         HealthCheckFixture(connections=self.connections,
                            name=get_random_name(self.project_name),
                            hc_type=hc_type,
                            delay=delay,
                            probe_type=probe_type,
                            timeout=timeout,
                            max_retries=max_retries,
                            http_url=http_url))
     if verify:
         hc_fixture.verify_on_setup()
     return hc_fixture
コード例 #59
0
    def __init__ (self, connections,
                 **kwargs):
        name = kwargs.get('name', None)
        domain = 'default-global-system-config'
        project = 'default-global-qos-config'
        prj_fqn = domain + ':' + project
        self._api = kwargs.get('option', 'quantum')
        self.inputs = connections.inputs

        if name:
            uid = self._check_if_present(connections, name, [domain, project])
            if uid:
                super(VNFixture, self).__init__(connections=connections,
                                               uuid=uid)
                return
        else:
            name = get_random_name("QosQueue")
        self._construct_contrail_params(name, prj_fqn, kwargs)
        super(QosQueueFixture, self).__init__(connections=connections,
                                       params=self._params)
コード例 #60
0
def start_tcpdump_for_vm_intf(obj, vm_fix, vn_fq_name, filters='-v', pcap_on_vm=False, vm_intf='eth0', svm=False):
    if not pcap_on_vm:
        compute_ip = vm_fix.vm_node_ip
        compute_user = obj.inputs.host_data[compute_ip]['username']
        compute_password = obj.inputs.host_data[compute_ip]['password']
        vm_tapintf = obj.orch.get_vm_tap_interface(vm_fix.tap_intf[vn_fq_name])
        return start_tcpdump_for_intf(compute_ip, compute_user,
            compute_password, vm_tapintf, filters, logger=obj.logger)
    else:
        pcap = '/tmp/%s.pcap' % (get_random_name())
        tcpdump_cmd = 'tcpdump -ni %s -U %s -w %s 1>/dev/null 2>/dev/null'
        if svm:
            tcpdump_cmd = '/usr/local/sbin/' + tcpdump_cmd
        cmd_to_tcpdump = [ tcpdump_cmd % (vm_intf, filters, pcap) ]
        pidfile = pcap + '.pid'
        vm_fix_pcap_pid_files =[]
        for vm_fixture in vm_fix:
            vm_fixture.run_cmd_on_vm(cmds=cmd_to_tcpdump, as_daemon=True, pidfile=pidfile, as_sudo=True)
            vm_fix_pcap_pid_files.append((vm_fixture, pcap, pidfile))
        return vm_fix_pcap_pid_files