コード例 #1
0
 def restart_dhcp_server(cls, file_name, fabric_name, job_ctx):
     vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                      auth_token=job_ctx.get('auth_token'))
     headers = {
         'fabric_name': fabric_name,
         'file_name': file_name,
         'action': 'delete'
     }
     vnc_api.amqp_publish(exchange=cls.ZTP_EXCHANGE,
         exchange_type=cls.ZTP_EXCHANGE_TYPE,
         routing_key=cls.ZTP_REQUEST_ROUTING_KEY, headers=headers,
         payload={})
     return { 'status': 'success' }
コード例 #2
0
 def _publish_file(cls, name, contents, action, routing_key,
                   fabric_name, job_ctx):
     vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                      auth_token=job_ctx.get('auth_token'))
     headers = {
         'fabric_name': fabric_name,
         'file_name': name,
         'action': action
     }
     vnc_api.amqp_publish(exchange=cls.ZTP_EXCHANGE,
         exchange_type=cls.ZTP_EXCHANGE_TYPE,
         routing_key=routing_key, headers=headers,
         payload=contents)
     return { 'status': 'success' }
コード例 #3
0
    def __init__(self):
        admin_user = cfg.CONF.keystone_authtoken.admin_user
        admin_password = cfg.CONF.keystone_authtoken.admin_password
        admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
        api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
        api_srvr_port = cfg.CONF.APISERVER.api_server_port
        api_srvr_use_ssl= cfg.CONF.APISERVER.use_ssl
        try:
            auth_host = cfg.CONF.keystone_authtoken.auth_host
        except cfg.NoSuchOptError:
            auth_host = "127.0.0.1"

        try:
            auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
        except cfg.NoSuchOptError:
            auth_protocol = "http"

        try:
            auth_port = cfg.CONF.keystone_authtoken.auth_port
        except cfg.NoSuchOptError:
            auth_port = "35357"

        try:
            auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            auth_url = "/v2.0/tokens"

        try:
            auth_type = cfg.CONF.keystone_authtoken.auth_type
        except cfg.NoSuchOptError:
            auth_type = "keystone"

        try:
            api_server_url = cfg.CONF.APISERVER.api_server_url
        except cfg.NoSuchOptError:
            api_server_url = "/"

        # Retry till a api-server is up
        connected = False
        while not connected:
            try:
                self._api = VncApi(admin_user, admin_password, admin_tenant_name,
                                   api_srvr_ip, api_srvr_port, api_server_url,
                                   auth_host=auth_host, auth_port=auth_port,
                                   auth_protocol=auth_protocol, auth_url=auth_url,
                                   auth_type=auth_type, wait_for_connect=True,
                                   api_server_use_ssl=api_srvr_use_ssl)
                connected = True
            except requests.exceptions.RequestException:
                time.sleep(3)

        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)
コード例 #4
0
 def read_dhcp_leases(cls, ipam_subnets, file_name, fabric_name, job_ctx,
                      payload_key, payload_value, action='create'):
     vnc_api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                      auth_token=job_ctx.get('auth_token'))
     headers = {
         'fabric_name': fabric_name,
         'file_name': file_name,
         'action': action
     }
     payload = {
         'ipam_subnets': ipam_subnets
     }
     payload[payload_key] = payload_value
     return vnc_api.amqp_request(exchange=cls.ZTP_EXCHANGE,
         exchange_type=cls.ZTP_EXCHANGE_TYPE,
         routing_key=cls.ZTP_REQUEST_ROUTING_KEY,
         response_key=cls.ZTP_RESPONSE_ROUTING_KEY + fabric_name,
         headers=headers, payload=payload)
コード例 #5
0
    def get_ztp_dhcp_config(cls, job_ctx, fabric_uuid):
        dhcp_config = {}
        try:
            vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                            auth_token=job_ctx.get('auth_token'))
            fabric = vncapi.fabric_read(id=fabric_uuid)
            fabric_dict = vncapi.obj_to_dict(fabric)

            # From here we get the 'management' type virtual network
            vn_uuid = None
            virtual_network_refs = fabric_dict.get('virtual_network_refs') or []
            for virtual_net_ref in virtual_network_refs:
                if 'management' in virtual_net_ref['attr']['network_type']:
                    vn_uuid = virtual_net_ref['uuid']
                    break
            if vn_uuid is None:
                raise NoIdError("Cannot find mgmt virtual network on fabric")

            virtual_net = vncapi.virtual_network_read(id=vn_uuid)
            virtual_net_dict = vncapi.obj_to_dict(virtual_net)

            # Get the IPAM attached to the virtual network
            ipam_refs = virtual_net_dict.get('network_ipam_refs')
            if ipam_refs:
                ipam_ref = ipam_refs[0]
                ipam = vncapi.network_ipam_read(id=ipam_ref['uuid'])
                ipam_dict = vncapi.obj_to_dict(ipam)
                ipam_subnets = ipam_dict.get('ipam_subnets')
                if ipam_subnets:
                    dhcp_config['ipam_subnets'] = ipam_subnets.get('subnets')

            # Get static ip configuration for physical routers
            pr_refs = fabric.get_physical_router_back_refs()
            pr_uuids = [ref['uuid'] for ref in pr_refs]
            static_ips = {}
            for pr_uuid in pr_uuids:
                pr = vncapi.physical_router_read(id=pr_uuid)
                pr_dict = vncapi.obj_to_dict(pr)
                mac = pr_dict.get('physical_router_management_mac')
                ip = pr_dict.get('physical_router_management_ip')
                if mac and ip:
                    static_ips[ip] = mac
            if static_ips:
                dhcp_config['static_ips'] = static_ips
        except Exception as ex:
            logging.error("Error getting ZTP DHCP configuration: {}".format(ex))

        return dhcp_config
コード例 #6
0
 def __init__(self, user, passwd, project_name, inputs, domain='default-domain'):
     self.inputs = inputs
     self.user = user
     self.passwd = passwd
     self.domain = domain
     self.project_name = project_name
     self.vnc = VncApi(username=user, password=passwd,
                       tenant_name=project_name,
                       api_server_host=self.inputs.cfgm_ip,
                       api_server_port=self.inputs.api_server_port)
コード例 #7
0
 def __init__(self):
     # TODO: parse configuration for api-server:port and auth
     self._api = VncApi()
     self._pool_manager = \
         loadbalancer_pool.LoadbalancerPoolManager(self._api)
     self._vip_manager = virtual_ip.VirtualIpManager(self._api)
     self._member_manager = \
         loadbalancer_member.LoadbalancerMemberManager(self._api)
     self._monitor_manager = \
         loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
             self._api)
コード例 #8
0
    def get_pr_subnet(cls, job_ctx, fabric_uuid, device_fq_name):
        api = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                     auth_token=job_ctx.get('auth_token'))
        fabric = api.fabric_read(id=fabric_uuid)
        fabric_dict = api.obj_to_dict(fabric)

        vn_uuid = None
        virtual_network_refs = fabric_dict.get('virtual_network_refs') or []
        for virtual_net_ref in virtual_network_refs:
            if 'management' in virtual_net_ref['attr']['network_type']:
                vn_uuid = virtual_net_ref['uuid']
                break
        if vn_uuid is None:
            raise NoIdError("Cannot find mgmt virtual network on fabric")

        virtual_net = api.virtual_network_read(id=vn_uuid)
        virtual_net_dict = api.obj_to_dict(virtual_net)

        subnets = None
        ipam_refs = virtual_net_dict.get('network_ipam_refs')
        if ipam_refs:
            ipam_ref = ipam_refs[0]
            ipam = api.network_ipam_read(id=ipam_ref['uuid'])
            ipam_dict = api.obj_to_dict(ipam)
            ipam_subnets = ipam_dict.get('ipam_subnets')
            if ipam_subnets:
                subnets = ipam_subnets.get('subnets')

        gateway = None
        cidr = None
        if subnets:
            pr = api.physical_router_read(fq_name=device_fq_name)
            pr_dict = api.obj_to_dict(pr)
            ip = pr_dict.get('physical_router_management_ip')
            ip_addr = IPAddress(ip)
            for subnet in subnets:
                inner_subnet = subnet.get('subnet')
                cidr = inner_subnet.get('ip_prefix') + '/' + str(inner_subnet.get('ip_prefix_len'))
                if ip_addr in IPNetwork(cidr) and subnet.get('default_gateway'):
                    gateway = subnet.get('default_gateway')
                    break
        if cidr and gateway:
            return { 'cidr': cidr, 'gateway': gateway }

        raise Error("Cannot find cidr and gateway for device: %s" % str(device_fq_name))
コード例 #9
0
    def get_ztp_tftp_config(cls, job_ctx, fabric_uuid):
        tftp_config = {}
        try:
            vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                            auth_token=job_ctx.get('auth_token'))
            fabric = vncapi.fabric_read(id=fabric_uuid)
            fabric_dict = vncapi.obj_to_dict(fabric)
            fabric_creds = fabric_dict.get('fabric_credentials')
            if fabric_creds:
                device_creds = fabric_creds.get('device_credential')
                if device_creds:
                    dev_cred = device_creds[0]
                    password = JobVncApi.decrypt_password(
                        encrypted_password=dev_cred['credential']['password'],
                        admin_password=job_ctx.get(
                            'vnc_api_init_params').get(
                            'admin_password'))
                    tftp_config['password'] = password
        except Exception as ex:
            logging.error("Error getting ZTP TFTP configuration: {}".format(ex))

        return tftp_config
 def __init__(self):
     admin_user = cfg.CONF.keystone_authtoken.admin_user
     admin_password = cfg.CONF.keystone_authtoken.admin_password
     admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
     api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
     api_srvr_port = cfg.CONF.APISERVER.api_server_port
     self._api = VncApi(admin_user, admin_password, admin_tenant_name,
                        api_srvr_ip, api_srvr_port,api_auth_protocol="https")
     self._pool_manager = \
         loadbalancer_pool.LoadbalancerPoolManager(self._api)
     self._vip_manager = virtual_ip.VirtualIpManager(self._api)
     self._member_manager = \
         loadbalancer_member.LoadbalancerMemberManager(self._api)
     self._monitor_manager = \
         loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
             self._api)
コード例 #11
0
def main():
    client=VncApi()
    virtual_networks=client.virtual_networks_list()
    virtual_machine_interfaces=client.virtual_machine_interfaces_list()
    virtual_machines=client.virtual_machines_list()
    instance_ips=client.instance_ips_list()
    projects=client.projects_list()
    domains=client.domains_list()
    floating_ip_pools=client.floating_ip_pools_list()
    access_control_lists=client.access_control_lists_list()
    print domains
    for project in projects['projects']:
        id=project['uuid']
        name=project['fq_name']
        print id,name
    print projects
    print virtual_networks
    print virtual_machine_interfaces
    print virtual_machines
    print instance_ips
    print  access_control_lists
    print  floating_ip_pools
コード例 #12
0
    def __init__(self, cfg, name):
        if cfg is None:
            raise KeyError("Missing required args: cfg")
        if name is None:
            raise KeyError("Missing required args: name")

        self._name = name
        self._timeout = cfg['wait_for_job']['timeout']
        self._max_retries = cfg['wait_for_job']['max_retries']
        self._logger = SanityBase._init_logging(cfg['log'], name)
        self._api_server = cfg['api_server']
        self._analytics = cfg['analytics']
        self._api = VncApi(
            api_server_host=self._api_server['host'],
            api_server_port=self._api_server['port'],
            username=self._api_server['username'],
            password=self._api_server['password'],
            tenant_name=self._api_server['tenant'])
コード例 #13
0
ファイル: vcenter.py プロジェクト: Ankitja/contrail-test
class VcenterAuth(OrchestratorAuth):

    def __init__(self, user, passwd, project_name, inputs, domain='default-domain'):
        self.inputs = inputs
        self.user = user
        self.passwd = passwd
        self.domain = domain
        self.project_name = project_name
        use_ssl = self.inputs.api_protocol == 'https'
        self.vnc = VncApi(username=user, password=passwd,
                          tenant_name=project_name,
                          api_server_host=self.inputs.cfgm_ip,
                          api_server_port=self.inputs.api_server_port,
                          api_server_use_ssl=use_ssl)

    def get_project_id(self, project_name=None, domain_id=None):
       if not project_name:
           project_name = self.project_name
       fq_name = [unicode(self.domain), unicode(project_name)]
       obj = self.vnc.project_read(fq_name=fq_name)
       if obj:
           return obj.get_uuid()
       return None

    def reauth(self):
        raise Exception('Unimplemented interface')

    def create_project(self, name):
        raise Exception('Unimplemented interface')

    def delete_project(self, name):
        raise Exception('Unimplemented interface')

    def create_user(self, user, passwd):
        raise Exception('Unimplemented interface')

    def delete_user(self, user):
        raise Exception('Unimplemented interface')

    def add_user_to_project(self, user, project):
        raise Exception('Unimplemented interface')
コード例 #14
0
    def get_ztp_config(cls, job_ctx, fabric_uuid):
        ztp_config = {}
        try:
            vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                            auth_token=job_ctx.get('auth_token'))
            fabric = vncapi.fabric_read(id=fabric_uuid)
            fabric_dict = vncapi.obj_to_dict(fabric)
            fabric_creds = fabric_dict.get('fabric_credentials')
            if fabric_creds:
                device_creds = fabric_creds.get('device_credential')
                if device_creds:
                    dev_cred = device_creds[0]
                    ztp_config['password'] = dev_cred['credential']['password']

            # From here we get the 'management' type virtual network
            vn_uuid = None
            virtual_network_refs = fabric_dict.get('virtual_network_refs') or []
            for virtual_net_ref in virtual_network_refs:
                if "management" in virtual_net_ref['attr']['network_type']:
                    vn_uuid = virtual_net_ref['uuid']
                    break
            if vn_uuid is None:
                raise NoIdError("Cannot find mgmt virtual network on fabric")

            virtual_net = vncapi.virtual_network_read(id=vn_uuid)
            virtual_net_dict = vncapi.obj_to_dict(virtual_net)

            # Get the IPAM attached to the virtual network
            ipam_refs = virtual_net_dict.get('network_ipam_refs')
            if ipam_refs:
                ipam_ref = ipam_refs[0]
                ipam = vncapi.network_ipam_read(id=ipam_ref['uuid'])
                ipam_dict = vncapi.obj_to_dict(ipam)
                ipam_subnets = ipam_dict.get('ipam_subnets')
                if ipam_subnets:
                    ztp_config['ipam_subnets'] = ipam_subnets.get('subnets')
        except NoIdError:
            logging.error("Cannot find mgmt virtual network")
        except Exception as ex:
            logging.error("Error getting ZTP configuration: {}".format(ex))

        return ztp_config
コード例 #15
0
    def initial_processing(self, concurrent):
        self.serial_num_flag = False
        self.all_serial_num = []
        serial_num = []
        self.per_greenlet_percentage = None

        self.job_ctx['current_task_index'] = 2

        try:
            total_percent = self.job_ctx.get('playbook_job_percentage')
            if total_percent:
                total_percent = float(total_percent)

            # Calculate the total percentage of this entire greenlet based task
            # This will be equal to the percentage alloted to this task in the
            # weightage array off the total job percentage. For example:
            # if the task weightage array is [10, 85, 5] and total job %
            # is 95. Then the 2nd task's effective total percentage is 85% of
            # 95%
            total_task_percentage = self.module.calculate_job_percentage(
                self.job_ctx.get('total_task_count'),
                task_seq_number=self.job_ctx.get('current_task_index'),
                total_percent=total_percent,
                task_weightage_array=self.job_ctx.get(
                    'task_weightage_array'))[0]

            # Based on the number of greenlets spawned (i.e num of sub tasks)
            # split the total_task_percentage equally amongst the greenlets.
            self.logger.info("Number of greenlets: {} and total_percent: "
                             "{}".format(concurrent, total_task_percentage))
            self.per_greenlet_percentage = \
                self.module.calculate_job_percentage(
                    concurrent, total_percent=total_task_percentage)[0]
            self.logger.info("Per greenlet percent: "
                             "{}".format(self.per_greenlet_percentage))

            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.logger.info("Percentage calculation failed with error "
                             "{}".format(str(ex)))

        try:
            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.module.results['failed'] = True
            self.module.results['msg'] = "Failed to connect to API server " \
                "due to error: %s"\
                % str(ex)
            self.module.exit_json(**self.module.results)

        # get credentials and serial number if greenfield
        if self.total_retry_timeout:
            # get device credentials
            fabric = self.vncapi.fabric_read(id=self.fabric_uuid)
            fabric_object = self.vncapi.obj_to_dict(fabric)
            self.credentials = fabric_object.get('fabric_credentials').get(
                'device_credential')

            # get serial numbers
            fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list(
                parent_id=self.fabric_uuid, detail=True)
            fabric_namespace_list = self.vncapi.obj_to_dict(
                fabric_namespace_obj_list)

            for namespace in fabric_namespace_list:
                if namespace.get('fabric_namespace_type') == "SERIAL_NUM":
                    self.serial_num_flag = True
                    serial_num.append(namespace.get(
                        'fabric_namespace_value').get('serial_num'))

            if len(serial_num) > 1:
                for outer_list in serial_num:
                    for sn in outer_list:
                        self.all_serial_num.append(sn)

        else:
            self.credentials = self.module.params['credentials']

        for cred in self.credentials:
            if cred.get('credential', {}).get('password'):
                cred['credential']['password'] = JobVncApi.decrypt_password(
                    encrypted_password=cred.get('credential', {}).get('password'),
                    admin_password=self.job_ctx.get('vnc_api_init_params').get(
                        'admin_password'))
コード例 #16
0
        self.cidr_net, self.cidr_mask = cidr.split("/")

    def get_next_cidr(self):
        ip_network_next = self.ip_network.next()[0]
        ip_addr_next = ipaddr.IPAddress(ip_network_next)
        cidr = ip_addr_next._explode_shorthand_ip_string()
        cidr = cidr + "/" + self.cidr_mask
        self.ip_network = IPNetwork(cidr)
        return cidr


cidr_obj = CIDR(cidr)

vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='10.87.64.129')

vlan_tag = 2
proj_obj = vnc_lib.project_read(fq_name=tenant_name)
ipam_obj = vnc_lib.network_ipam_read(fq_name=ipam_fq_name)

sec_grp_obj = vnc_lib.security_group_read(
    fq_name=[u'default-domain', u'admin', u'default'])

hcs = proj_obj.get_service_health_checks()
for hc in hcs:
    #import pdb;pdb.set_trace()
    hc_obj = vnc_lib.service_health_check_read(id=hc['uuid'])
    prop = hc_obj.get_service_health_check_properties()
コード例 #17
0
       self.cidr = cidr
       self.ip_network = IPNetwork(self.cidr)
       self.ip_addr = ipaddr.IPAddress(IPNetwork(self.cidr)[0])
       self.cidr_net,self.cidr_mask = cidr.split("/")

   def get_next_cidr(self):
       ip_network_next = self.ip_network.next()[0]
       ip_addr_next = ipaddr.IPAddress(ip_network_next)
       cidr = ip_addr_next._explode_shorthand_ip_string()
       cidr = cidr + "/" + self.cidr_mask
       self.ip_network = IPNetwork(cidr)
       return cidr

cidr_obj = CIDR(cidr)

vnc_lib = VncApi(username='******', password='******', tenant_name='admin', api_server_host='10.87.64.129', api_server_port=8082, auth_host='10.87.64.129')

vlan_tag = 2 
proj_obj = vnc_lib.project_read(fq_name=tenant_name)
ipam_obj = vnc_lib.network_ipam_read(fq_name=ipam_fq_name)

sec_grp_obj = vnc_lib.security_group_read(fq_name = [u'default-domain', u'admin', u'default'])


interfaces = proj_obj.get_virtual_machine_interfaces()
for intf in interfaces:
    for name in intf['to']:
        if 'vmi.st0' in name:
            continue
        if 'vmi' in name: 
            print intf
コード例 #18
0
 def _init_vnc_api(job_ctx):
     return VncApi(
         auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
         auth_token=job_ctx.get('auth_token')
     )
コード例 #19
0
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name = ['default-domain', 'admin']
ipam_fq_name = ['default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='5.5.5.251')

#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
#import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid'])
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1)
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)
コード例 #20
0
ファイル: create_2.py プロジェクト: nuthanc/bgpaas_scale
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name  = ['default-domain','admin']
ipam_fq_name = [ 'default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******', password='******', tenant_name='admin', api_server_host='10.0.0.40', api_server_port=8082, auth_host='192.168.24.14')
#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
    #import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid']) 
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1) 
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)


sec_grp_obj = vnc_lib.security_group_read(fq_name = [u'default-domain', u'admin', u'default'])

vmi_fq_name  = [ 'default-domain', 'admin', '3c16b4a7-3613-41b0-828e-9b2e10e41003'] 
コード例 #21
0
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name = ['default-domain', 'admin']
ipam_fq_name = ['default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='10.87.64.129')

#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
#import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid'])
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1)
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)
コード例 #22
0
class LoadBalancerPluginDb(LoadBalancerPluginBase):

    def __init__(self):
        admin_user = cfg.CONF.keystone_authtoken.admin_user
        admin_password = cfg.CONF.keystone_authtoken.admin_password
        admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
        api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
        api_srvr_port = cfg.CONF.APISERVER.api_server_port
        api_srvr_use_ssl= cfg.CONF.APISERVER.use_ssl
        try:
            auth_host = cfg.CONF.keystone_authtoken.auth_host
        except cfg.NoSuchOptError:
            auth_host = "127.0.0.1"

        try:
            auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
        except cfg.NoSuchOptError:
            auth_protocol = "http"

        try:
            auth_port = cfg.CONF.keystone_authtoken.auth_port
        except cfg.NoSuchOptError:
            auth_port = "35357"

        try:
            auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            auth_url = "/v2.0/tokens"

        try:
            auth_type = cfg.CONF.keystone_authtoken.auth_type
        except cfg.NoSuchOptError:
            auth_type = "keystone"

        try:
            api_server_url = cfg.CONF.APISERVER.api_server_url
        except cfg.NoSuchOptError:
            api_server_url = "/"

        # Retry till a api-server is up
        connected = False
        while not connected:
            try:
                self._api = VncApi(admin_user, admin_password, admin_tenant_name,
                                   api_srvr_ip, api_srvr_port, api_server_url,
                                   auth_host=auth_host, auth_port=auth_port,
                                   auth_protocol=auth_protocol, auth_url=auth_url,
                                   auth_type=auth_type, wait_for_connect=True,
                                   api_server_use_ssl=api_srvr_use_ssl)
                connected = True
            except requests.exceptions.RequestException:
                time.sleep(3)

        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)

    def get_api_client(self):
        return self._api

    def get_vips(self, context, filters=None, fields=None):
        return self._vip_manager.get_collection(context, filters, fields)

    def get_vip(self, context, id, fields=None):
        return self._vip_manager.get_resource(context, id, fields)

    def create_vip(self, context, vip):
        try:
            return self._vip_manager.create(context, vip)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='vip', msg=str(ex))

    def update_vip(self, context, id, vip):
        return self._vip_manager.update(context, id, vip)

    def delete_vip(self, context, id):
        return self._vip_manager.delete(context, id)

    def get_pools(self, context, filters=None, fields=None):
        return self._pool_manager.get_collection(context, filters, fields)

    def get_pool(self, context, id, fields=None):
        return self._pool_manager.get_resource(context, id, fields)

    def create_pool(self, context, pool):
        try:
            return self._pool_manager.create(context, pool)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='pool', msg=str(ex))

    def update_pool(self, context, id, pool):
        return self._pool_manager.update(context, id, pool)

    def delete_pool(self, context, id):
        return self._pool_manager.delete(context, id)

    def stats(self, context, pool_id):
        stats = {
            'bytes_in': '0',
            'bytes_out': '0',
            'active_connections': '0',
            'total_connections': '0',
        }

        endpoint = "http://%s:%s" % (cfg.CONF.COLLECTOR.analytics_api_ip,
                                     cfg.CONF.COLLECTOR.analytics_api_port)
        analytics = analytics_client.Client(endpoint)
        path = "/analytics/uves/service-instance/"
        fqdn_uuid = "%s?cfilt=UveLoadbalancer" % pool_id
        try:
            lb_stats = analytics.request(path, fqdn_uuid)
            pool_stats = lb_stats['UveLoadbalancer']['pool_stats']
        except Exception:
            pool_stats = []

        for pool_stat in pool_stats:
            stats['bytes_in'] = str(int(stats['bytes_in']) + int(pool_stat['bytes_in']))
            stats['bytes_out'] = str(int(stats['bytes_out']) + int(pool_stat['bytes_out']))
            stats['active_connections'] = str(int(stats['active_connections']) + int(pool_stat['current_sessions']))
            stats['total_connections'] = str(int(stats['total_connections']) + int(pool_stat['total_sessions']))
        return {'stats': stats}

    def create_pool_health_monitor(self, context, health_monitor, pool_id):
        """ Associate an health monitor with a pool.
        """
        m = health_monitor['health_monitor']
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except vnc_exc.NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=pool_id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=m['id'])
        except vnc_exc.NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=m['id'])

        if not context.is_admin:
            tenant_id = str(uuid.UUID(context.tenant_id))
            if tenant_id != pool.parent_uuid or \
                    tenant_id != monitor.parent_uuid:
                raise n_exc.NotAuthorized()

        pool_refs = monitor.get_loadbalancer_pool_back_refs()
        if pool_refs is not None:
            for ref in pool_refs:
                if ref['uuid'] == pool_id:
                    raise loadbalancer.PoolMonitorAssociationExists(
                        monitor_id=m['id'], pool_id=pool_id)

        pool.add_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

        res = {
            'id': monitor.uuid,
            'tenant_id': monitor.parent_uuid.replace('-', '')
        }
        return res

    def get_pool_health_monitor(self, context, id, pool_id, fields=None):
        """ Query a specific pool, health_monitor association.
        """
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except vnc_exc.NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs() or []:
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        res = {
            'pool_id': pool_id,
            'monitor_id': id,
            'status': self._pool_manager._get_object_status(pool),
            'tenant_id': pool.parent_uuid.replace('-', '')
        }
        return self._pool_manager._fields(res, fields)

    def delete_pool_health_monitor(self, context, id, pool_id):
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except vnc_exc.NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=id)
        except vnc_exc.NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        pool.del_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

    def get_members(self, context, filters=None, fields=None):
        return self._member_manager.get_collection(context, filters, fields)

    def get_member(self, context, id, fields=None):
        return self._member_manager.get_resource(context, id, fields)

    def create_member(self, context, member):
        try:
            return self._member_manager.create(context, member)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='member', msg=str(ex))

    def update_member(self, context, id, member):
        return self._member_manager.update(context, id, member)

    def delete_member(self, context, id):
        return self._member_manager.delete(context, id)

    def get_health_monitors(self, context, filters=None, fields=None):
        return self._monitor_manager.get_collection(context, filters, fields)

    def get_health_monitor(self, context, id, fields=None):
        return self._monitor_manager.get_resource(context, id, fields)

    def create_health_monitor(self, context, health_monitor):
        try:
            return self._monitor_manager.create(context, health_monitor)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='health_monitor', msg=str(ex))

    def update_health_monitor(self, context, id, health_monitor):
        return self._monitor_manager.update(context, id, health_monitor)

    def delete_health_monitor(self, context, id):
        return self._monitor_manager.delete(context, id)
コード例 #23
0
class SanityBase(object):
    """Base class for fabric ansible sanity tests"""

    @staticmethod
    def _init_logging(cfg, name):
        logger = logging.getLogger('sanity_test')
        logger.setLevel(cfg['level'])

        file_handler = logging.FileHandler(
            '%s/fabric_ansibile_%s.log' % (cfg['file']['dir'], name), mode='w')
        file_handler.setLevel(cfg['file']['level'])
        console_handler = logging.StreamHandler()
        console_handler.setLevel(cfg['console'])

        formatter = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(message)s',
            datefmt='%Y/%m/%d %H:%M:%S')
        file_handler.setFormatter(formatter)
        console_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        logger.addHandler(console_handler)

        return logger
    # end _init_logging

    def test(self):
        """Override this method in the derived class"""
        pass

    def __init__(self, cfg, name):
        if cfg is None:
            raise KeyError("Missing required args: cfg")
        if name is None:
            raise KeyError("Missing required args: name")

        self._name = name
        self._timeout = cfg['wait_for_job']['timeout']
        self._max_retries = cfg['wait_for_job']['max_retries']
        self._logger = SanityBase._init_logging(cfg['log'], name)
        self._api_server = cfg['api_server']
        self._analytics = cfg['analytics']
        self._api = VncApi(
            api_server_host=self._api_server['host'],
            api_server_port=self._api_server['port'],
            username=self._api_server['username'],
            password=self._api_server['password'],
            tenant_name=self._api_server['tenant'])
    # end __init__

    def add_mgmt_ip_namespace(self, fab, name, cidrs):
        """add management ip prefixes as fabric namespace"""
        ns_name = 'mgmt_ip-' + name
        self._logger.info(
            'Adding management ip namespace "%s" to fabric "%s" ...',
            ns_name, fab.name)

        subnets = []
        for cidr in cidrs:
            ip_prefix = cidr.split('/')
            subnets.append({
                'ip_prefix': ip_prefix[0],
                'ip_prefix_len': ip_prefix[1]
            })
        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(
            name=ns_name,
            fq_name=ns_fq_name,
            parent_type='fabric',
            fabric_namespace_type='IPV4-CIDR',
            fabric_namespace_value={
                'ipv4_cidr': {
                    'subnet': subnets
                },
            }
        )
        namespace.set_tag_list([{'to': ['label=fabric-management-ip']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn(
                "Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace
    # end _add_mgmt_ip_namespace

    def add_asn_namespace(self, fab, asn):
        """add AS number as fabric namespace"""
        ns_name = "asn_%d" % asn
        self._logger.info(
            'Adding ASN namespace "%s" to fabric "%s" ...',
            ns_name, fab.name)

        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(
            name=ns_name,
            fq_name=ns_fq_name,
            parent_type='fabric',
            fabric_namespace_type='ASN',
            fabric_namespace_value={
                'asn': {
                    'asn': [asn]
                }
            }
        )
        namespace.set_tag_list([{'to': ['label=fabric-as-number']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn(
                "Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace
    # end _add_asn_namespace

    def create_image(self, img_name, img_uri, img_version,
                                img_family, img_vendor):

        """create image"""
        img_fqname = None
        # device_fqname = None
        try:
            self._logger.info('Creating image: %s', img_name)
            img_fqname = ['default-global-system-config', img_name]
            image = DeviceImage(
                name=img_name,
                fq_name=img_fqname,
                parent_type='global-system-config',
                device_image_file_uri=img_uri,
                device_image_os_version=img_version,
                device_image_device_family=img_family,
                device_image_vendor_name=img_vendor
            )
            img_uuid = self._api.device_image_create(image)
            image = self._api.device_image_read(id=img_uuid)

        except RefsExistError:
            self._logger.warn("Image '%s' already exists", img_name)
            image = self._api.device_image_read(fq_name=img_fqname)

        self._logger.debug(
            "Image created:\n%s",
            pprint.pformat(self._api.obj_to_dict(image), indent=4))
        return image

    # end create_image_and_device

    def cleanup_fabric(self, fab_name):
        """delete fabric including all prouters in the fabric"""
        try:
            self._logger.info('Deleting fabric "%s" ...', fab_name)
            fab_fqname = ['default-global-system-config', fab_name]
            fab = self._api.fabric_read(fq_name=fab_fqname)

            # delete all namespaces in this fabric
            fab_namespaces = self._api.fabric_namespaces_list(
                parent_id=fab.uuid)
            for namespace in fab_namespaces.get('fabric-namespaces') or []:
                self._logger.debug(
                    "Delete namespace: %s", namespace.get('fq_name'))
                self._api.fabric_namespace_delete(namespace.get('fq_name'))

            # delete fabric
            self._logger.debug("Delete fabric: %s", fab_fqname)
            self._api.fabric_delete(fab_fqname)

            # delete all prouters in this fabric
            for prouter in fab.get_physical_router_back_refs() or []:
                self._delete_prouter(prouter.get('uuid'))

        except NoIdError:
            self._logger.warn('Fabric "%s" not found', fab_name)
    # end cleanup_fabric

    def cleanup_image(self, img_name,):
        # image cleanup
        self._logger.info("Clean up image and prouter from db")
        try:
            img_fqname = ['default-global-system-config', img_name]
            img = self._api.device_image_read(fq_name=img_fqname)
            self._logger.debug(
                "Delete Image: %s", img_fqname)
            self._api.device_image_delete(img_fqname)

        except NoIdError:
            self._logger.warn('Image "%s" not found', img_name)

    def _delete_prouter(self, uuid):
        prouter = self._api.physical_router_read(id=uuid)

        # delete all physical and logical interfaces
        ifds = self._api.physical_interfaces_list(parent_id=uuid)
        for ifd in ifds.get('physical-interfaces')  or []:
            # delete all child logical interfaces
            ifls = self._api.logical_interfaces_list(parent_id=ifd.get('uuid'))
            for ifl in ifls.get('logical-interfaces') or []:
                self._logger.debug(
                    "Delete logical interface: %s", ifl.get('fq_name'))
                self._api.logical_interface_delete(ifl.get('fq_name'))

            # delete the physical interface
            self._logger.debug(
                "Delete physical interface: %s", ifd.get('fq_name'))
            self._api.physical_interface_delete(ifd.get('fq_name'))

        # delete the prouter
        self._logger.debug(
            "Delete physical router: %s", prouter.get_fq_name())
        self._api.physical_router_delete(prouter.get_fq_name())

        # delete corresponding bgp routers
        for bgp_router_ref in prouter.get_bgp_router_refs() or []:
            self._logger.debug(
                "Delete bgp router: %s", bgp_router_ref.get('to'))
            self._api.bgp_router_delete(bgp_router_ref.get('to'))
    # end _delete_prouter

    @staticmethod
    def _get_job_status_query_payload(job_execution_id, status):
        return {
            'start_time': 'now-5m',
            'end_time': 'now',
            'select_fields': ['MessageTS', 'Messagetype'],
            'table': 'ObjectJobExecutionTable',
            'where': [
                [
                    {
                        'name': 'ObjectId',
                        'value': "%s:%s" % (job_execution_id, status),
                        'op': 1
                    }
                ]
            ]
        }
    # end _get_job_status_query_payload

    @staticmethod
    def _check_job_status(url, job_execution_id, job_status):
        payload = SanityBase._get_job_status_query_payload(job_execution_id,
                                                           job_status)
        r = requests.post(url, json=payload)
        if r.status_code == 200:
            response = r.json()
            if len(response['value']) > 0:
                assert response['value'][0]['Messagetype'] == 'JobLog'
                return True
        return False
    # end _post_for_json_response

    def _wait_for_job_to_finish(self, job_name, job_execution_id):
        completed = "SUCCESS"
        failed = "FAILURE"
        url = "http://%s:%d/analytics/query" %\
              (self._analytics['host'], self._analytics['port'])
        retry_count = 0
        while True:
            # check if job completed successfully
            if SanityBase._check_job_status(url, job_execution_id, completed):
                self._logger.debug("%s job '%s' finished", job_name,
                                   job_execution_id)
                break
            # check if job failed
            if SanityBase._check_job_status(url, job_execution_id, failed):
                self._logger.debug("%s job '%s' failed", job_name,
                                   job_execution_id)
                raise Exception("%s job '%s' failed" %
                                (job_name, job_execution_id))
            if retry_count > self._max_retries:
                raise Exception("Timed out waiting for '%s' job to complete" %
                                job_name)
            retry_count += 1
            time.sleep(self._timeout)
    # end _wait_for_job_to_finish

    @staticmethod
    def _get_jobs_query_payload(job_execution_id, last_log_ts):
        now = time.time() * 1000000
        #print "***************** now=%i, last_log_ts=%i" % (now, last_log_ts)
        return {
            'start_time': int('%i' % last_log_ts),
            'end_time': int('%i' % now),
            'select_fields': ['MessageTS', 'Messagetype', 'ObjectId',
                              'ObjectLog'],
            'sort': 1,
            'sort_fields': ['MessageTS'],
            'table': 'ObjectJobExecutionTable',
            'where': [
                [
                    {
                        'name': 'ObjectId',
                        'value': "%s" % (job_execution_id),
                        'op': 7
                    },
                    {
                        'name': 'Messagetype',
                        'value': 'JobLog',
                        'op': 1
                    }
                ]
            ]
        }

    @staticmethod
    def _display_job_records(url, job_execution_id, last_log_ts,
                             percentage_complete, fabric_fq_name,
                             job_template_fq_name):
        log_ts = last_log_ts
        payload = SanityBase._get_jobs_query_payload(job_execution_id,
                                                     last_log_ts)
        r = requests.post(url, json=payload)
        if r.status_code == 200:
            response = r.json()
            if len(response['value']) > 0:
                # sort log entries by MessageTS
                log_entries = response['value']
                for log_entry in log_entries:
                    log_msg = json.loads(json.dumps\
                                  (xmltodict.parse(log_entry['ObjectLog'])))
                    log_text = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry']['message']['#text']
                    log_device_name = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry'].get('device_name')
                    if log_device_name:
                        log_device_name = log_device_name.get('#text')
                    log_details = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry'].get('details')
                    if log_details:
                        log_details = log_details.get('#text')
                    log_ts_us = int(log_entry['MessageTS'])
                    log_ts_ms = log_ts_us / 1000
                    log_ts_sec = log_ts_ms / 1000
                    log_ts_sec_gm = time.gmtime(log_ts_sec)
                    log_ts_fmt = time.strftime("%m/%d/%Y %H:%M:%S",
                                               log_ts_sec_gm) + ".%s" % \
                                               (str(log_ts_ms))[-3:]
                    if log_device_name:
                        print("[{}%] {}: [{}] {}".format(percentage_complete,
                                                             log_ts_fmt,
                                                             log_device_name,
                                                             log_text))
                    else:
                        print("[{}%] {}: {}".format(percentage_complete,
                                                             log_ts_fmt,
                                                             log_text))
                    print
                    if log_details:
                        pprint.pprint("[{}%] {}: ==> {}".format(percentage_complete,
                                                            log_ts_fmt,
                                                            log_details))
                        print
                    log_ts = (log_ts_us + 1)
                return True, log_ts
        else:
            print("RESPONSE: {}".format(r))
            log_ts = time.time() * 1000000
        return False, log_ts

    def _display_prouter_state(self, prouter_states, fabric_fq_name,
                               job_template_fq_name):
        fabric_fqname = ':'.join(map(str, fabric_fq_name))
        job_template_fqname = ':'.join(map(str, job_template_fq_name))

        for prouter_name, prouter_state in prouter_states.iteritems():
            prouter_fqname = "default-global-system-config:%s" % prouter_name
            url = "http://%s:%d/analytics/uves/job-execution/%s:%s:%s?flat" %\
                  (self._analytics['host'],
                   self._analytics['port'],
                   prouter_fqname,
                   fabric_fqname,
                   job_template_fqname
                   )
            r = requests.get(url)
            if r.status_code == 200:
                response = r.json()
                jobex = response.get('PhysicalRouterJobExecution')
                if jobex:
                    new_prouter_state = jobex.get('prouter_state')
                    if isinstance(new_prouter_state, list):
                        prouter_entry = [e for e in new_prouter_state if \
                                         "FabricAnsible" in e[1]]
                        new_prouter_state = prouter_entry[0][0]
                    if new_prouter_state != prouter_state:
                        prouter_states[prouter_name] = new_prouter_state
                        pprint.pprint("-----> {} state: {} <-----".\
                                      format(prouter_name, new_prouter_state))
                        print("")
            else:
                print("BAD RESPONSE for {}: {}".format(prouter_name, r))

    def _wait_and_display_job_progress(self, job_name, job_execution_id,
                                       fabric_fq_name, job_template_fq_name,
                                       prouter_name_list=None):
        prouter_states = {}
        if prouter_name_list:
            for prouter_name in prouter_name_list:
                prouter_states[prouter_name] = ""

        completed = "SUCCESS"
        failed = "FAILURE"
        url = "http://%s:%d/analytics/query" %\
              (self._analytics['host'], self._analytics['port'])
        retry_count = 0
        last_log_ts = time.time() * 1000000
        while True:
            # get job percentage complete
            percentage_complete = self._get_job_percentage_complete\
                (job_execution_id, fabric_fq_name, job_template_fq_name)
            # display job records
            status, last_log_ts = SanityBase._display_job_records\
                (url, job_execution_id, last_log_ts, percentage_complete,
                 fabric_fq_name, job_template_fq_name)
            if status:
                self._logger.debug("%s job '%s' log records non-zero status",
                                   job_name, job_execution_id)
            # Display prouter state, if applicable
            self._display_prouter_state(prouter_states, fabric_fq_name,
                                        job_template_fq_name)
            # check if job completed successfully
            if SanityBase._check_job_status(url, job_execution_id, completed):
                self._logger.debug("%s job '%s' finished", job_name,
                                   job_execution_id)
                break
            # check if job failed
            if SanityBase._check_job_status(url, job_execution_id, failed):
                self._logger.debug("%s job '%s' failed", job_name,
                                   job_execution_id)
                raise Exception("%s job '%s' failed" %
                                (job_name, job_execution_id))

            # Check for timeout
            if retry_count > self._max_retries:
                raise Exception("Timed out waiting for '%s' job to complete" %
                                job_name)
            retry_count += 1
            time.sleep(self._timeout)

    def _get_job_percentage_complete(self, job_execution_id, fabric_fq_name,
                                     job_template_fq_name):
        url = "http://%s:%d/analytics/uves/job-execution/%s:%s:%s:%s" %\
              (self._analytics['host'], self._analytics['port'],
               fabric_fq_name[0], fabric_fq_name[1],
               job_template_fq_name[0], job_template_fq_name[1])
        r = requests.get(url)
        if r.status_code == 200:
            response = r.json()
            job_uve = response.get('FabricJobExecution')
            if job_uve:
                percomp = "?"
                for pc in job_uve['percentage_completed']:
                    if job_execution_id in pc[1]:
                        percomp = pc[0]["#text"]
                        break
                return percomp
            else:
                return "??"
        else:
            return "???"

    def discover_fabric_device(self, fab):
        """Discover all devices specified by the fabric management namespaces
        """
        self._logger.info('Discover devices in fabric "%s" ...', fab.fq_name)
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'discover_device_template'],
            job_input={'fabric_uuid': fab.uuid}
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device discovery job started with execution id: %s",
            job_execution_id)
        self._wait_for_job_to_finish('Device discovery', job_execution_id)

        fab = self._api.fabric_read(fab.fq_name)
        discovered_prouter_refs = fab.get_physical_router_back_refs()
        self._logger.debug(
            "Disovered devices:\n%s",
            pprint.pformat(discovered_prouter_refs, indent=4))

        msg = "Discovered following devices in fabric '%s':" % fab.fq_name
        discovered_prouters = []
        for prouter_ref in discovered_prouter_refs:
            prouter = self._api.physical_router_read(prouter_ref.get('to'))
            discovered_prouters.append(prouter)
            msg += "\n - %s (%s)" % (
                prouter.name, prouter.physical_router_management_ip)

        self._logger.info(msg)
        return discovered_prouters
    # end discover_fabric_device

    def device_import(self, prouters):
        """import device inventories for the prouters specified in the
        argument"""
        self._logger.info("Import all discovered prouters in the fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'device_import_template'],
            job_input={},
            device_list=[prouter.uuid for prouter in prouters]
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device import job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('Device import', job_execution_id)

        for prouter in prouters:
            ifd_refs = self._api.physical_interfaces_list(
                parent_id=prouter.uuid)
            self._logger.info(
                "Imported %d physical interfaces to prouter: %s",
                len(ifd_refs.get('physical-interfaces')), prouter.name)
    # end device_import

    def underlay_config(self, prouters):
        """deploy underlay config to prouters in the fabric ..."""
        self._logger.info("Deploy underlay config to prouters in fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'generate_underlay_template'],
            job_input={
                'enable_lldp': 'true'
            },
            device_list=[prouter.uuid for prouter in prouters]
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device import job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('Underlay config', job_execution_id)
    # end underlay_config

    def image_upgrade(self, image, device, fabric):
        """upgrade the physical routers with specified images"""
        self._logger.info("Upgrade image on the physical router ...")
        job_template_fq_name = [
            'default-global-system-config',
            'image_upgrade_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={'image_uuid': image.uuid},
            device_list=[device.uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Image upgrade job started with execution id: %s", job_execution_id)
        self._wait_and_display_job_progress('Image upgrade', job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name)

    # end image_upgrade

    def image_upgrade_maintenance_mode(self, device_list, image_upgrade_list,
                                       advanced_params, upgrade_mode,
                                       fabric, prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'hitless_upgrade_strategy_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'image_devices': image_upgrade_list,
                'advanced_parameters': advanced_params,
                'upgrade_mode': upgrade_mode,
                'fabric_uuid': fabric.uuid
            },
            device_list=device_list
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode upgrade job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Image upgrade', job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end image_upgrade_maintenance_mode

    def container_cleanup(self, fabric_fq_name,container_name):
        job_template_fq_name = [
            'default-global-system-config',
            'container_cleanup_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'fabric_fq_name': fabric_fq_name,
                'container_name': container_name
            })
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Container cleanup job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Container cleanup',
                                            job_execution_id,
                                            fabric_fq_name,
                                            job_template_fq_name)
    #end container_cleanup


    def activate_maintenance_mode(self, device_uuid, mode,
                                  fabric, advanced_parameters,
                                  prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'maintenance_mode_activate_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'device_uuid': device_uuid,
                'fabric_uuid': fabric.uuid,
                'mode': mode,
                'advanced_parameters': advanced_parameters
            },
            device_list=[device_uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode activation job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Maintenance mode activation',
                                            job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end activate_maintenance_mode

    def deactivate_maintenance_mode(self, device_uuid, fabric,
                                    advanced_parameters, prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'maintenance_mode_deactivate_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'device_uuid': device_uuid,
                'fabric_uuid': fabric.uuid,
                'advanced_parameters': advanced_parameters
            },
            device_list=[device_uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode deactivation job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Maintenance mode deactivation',
                                            job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end deactivate_maintenance_mode

    def ztp(self, fabric_uuid):
        """run ztp for a fabric"""
        self._logger.info("Running ZTP for fabric...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'ztp_template'],
            job_input={'fabric_uuid': fabric_uuid, 'device_count': 1}
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "ZTP job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('ZTP', job_execution_id)

    # end ztp

    def workflow_abort(self, job_execution_ids, abort_mode, sleep_time):
        time.sleep(sleep_time)
        status = self._api.abort_job(
            job_input={
                'job_execution_ids': job_execution_ids,
                'abort_mode': abort_mode
            }
        )
        return status
    # end workflow_abort
    
    def _exit_with_error(self, errmsg):
        self._logger.error(errmsg)
        sys.exit(1)
コード例 #24
0
class LoadBalancerPluginDb(LoadBalancerPluginBase):
    def __init__(self):
        # TODO: parse configuration for api-server:port and auth
        self._api = VncApi()
        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)

    def get_api_client(self):
        return self._api

    def get_vips(self, context, filters=None, fields=None):
        return self._vip_manager.get_collection(context, filters, fields)

    def get_vip(self, context, id, fields=None):
        return self._vip_manager.get_resource(context, id, fields)

    def create_vip(self, context, vip):
        return self._vip_manager.create(context, vip)

    def update_vip(self, context, id, vip):
        return self._vip_manager.update(context, id, vip)

    def delete_vip(self, context, id):
        return self._vip_manager.delete(context, id)

    def get_pools(self, context, filters=None, fields=None):
        return self._pool_manager.get_collection(context, filters, fields)

    def get_pool(self, context, id, fields=None):
        return self._pool_manager.get_resource(context, id, fields)

    def create_pool(self, context, pool):
        return self._pool_manager.create(context, pool)

    def update_pool(self, context, id, pool):
        return self._pool_manager.update(context, id, pool)

    def delete_pool(self, context, id):
        return self._pool_manager.delete(context, id)

    def stats(self, context, pool_id):
        pass

    def create_pool_health_monitor(self, context, health_monitor, pool_id):
        """ Associate an health monitor with a pool.
        """
        m = health_monitor['health_monitor']
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=pool_id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=m['id'])
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=m['id'])

        if not context.is_admin:
            tenant_id = context.tenant_id
            if tenant_id != pool.parent_uuid or \
                    tenant_id != monitor.parent_uuid:
                raise n_exc.NotAuthorized()

        pool_refs = monitor.get_loadbalancer_pool_back_refs()
        if pool_refs is not None:
            for ref in pool_refs:
                if ref['uuid'] == pool_id:
                    raise loadbalancer.PoolMonitorAssociationExists(
                        monitor_id=m['id'], pool_id=pool_id)

        pool.add_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

        res = {'id': monitor.uuid, 'tenant_id': monitor.parent_uuid}
        return res

    def get_pool_health_monitor(self, context, id, pool_id, fields=None):
        """ Query a specific pool, health_monitor association.
        """
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        if not context.is_admin and context.tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(monitor_id=id,
                                                              pool_id=pool_id)

        res = {
            'pool_id': pool_id,
            'monitor_id': id,
            'status': self._pool_manager._get_object_status(pool),
            'tenant_id': pool.parent_uuid
        }
        return self._pool_manager._fields(res, fields)

    def delete_pool_health_monitor(self, context, id, pool_id):
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        if not context.is_admin and context.tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=id)
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(monitor_id=id,
                                                              pool_id=pool_id)

        pool.del_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

    def get_members(self, context, filters=None, fields=None):
        return self._member_manager.get_collection(context, filters, fields)

    def get_member(self, context, id, fields=None):
        return self._member_manager.get_resource(context, id, fields)

    def create_member(self, context, member):
        return self._member_manager.create(context, member)

    def update_member(self, context, id, member):
        return self._member_manager.update(context, id, member)

    def delete_member(self, context, id):
        return self._member_manager.delete(context, id)

    def get_health_monitors(self, context, filters=None, fields=None):
        return self._monitor_manager.get_collection(context, filters, fields)

    def get_health_monitor(self, context, id, fields=None):
        return self._monitor_manager.get_resource(context, id, fields)

    def create_health_monitor(self, context, health_monitor):
        return self._monitor_manager.create(context, health_monitor)

    def update_health_monitor(self, context, id, health_monitor):
        return self._monitor_manager.update(context, id, health_monitor)

    def delete_health_monitor(self, context, id):
        return self._monitor_manager.delete(context, id)
コード例 #25
0
    def initial_processing(self, concurrent):
        self.serial_num_flag = False
        self.all_serial_num = []
        serial_num = []
        self.per_greenlet_percentage = None

        self.job_ctx['current_task_index'] = 2

        try:
            total_percent = self.job_ctx.get('playbook_job_percentage')
            if total_percent:
                total_percent = float(total_percent)

            # Calculate the total percentage of this entire greenlet based task
            # This will be equal to the percentage alloted to this task in the
            # weightage array off the total job percentage. For example:
            # if the task weightage array is [10, 85, 5] and total job %
            # is 95. Then the 2nd task's effective total percentage is 85% of
            # 95%
            total_task_percentage = self.module.calculate_job_percentage(
                self.job_ctx.get('total_task_count'),
                task_seq_number=self.job_ctx.get('current_task_index'),
                total_percent=total_percent,
                task_weightage_array=self.job_ctx.get(
                    'task_weightage_array'))[0]

            # Based on the number of greenlets spawned (i.e num of sub tasks)
            # split the total_task_percentage equally amongst the greenlets.
            self.logger.info("Number of greenlets: {} and total_percent: "
                             "{}".format(concurrent, total_task_percentage))
            self.per_greenlet_percentage = \
                self.module.calculate_job_percentage(
                    concurrent, total_percent=total_task_percentage)[0]
            self.logger.info("Per greenlet percent: "
                             "{}".format(self.per_greenlet_percentage))

            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.logger.info("Percentage calculation failed with error "
                             "{}".format(str(ex)))

        try:
            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.module.results['failed'] = True
            self.module.results['msg'] = "Failed to connect to API server " \
                "due to error: %s"\
                % str(ex)
            self.module.exit_json(**self.module.results)

        # get credentials and serial number if greenfield
        if self.total_retry_timeout:
            # get device credentials
            fabric = self.vncapi.fabric_read(id=self.fabric_uuid)
            fabric_object = self.vncapi.obj_to_dict(fabric)
            self.credentials = fabric_object.get('fabric_credentials').get(
                'device_credential')

            # get serial numbers
            fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list(
                parent_id=self.fabric_uuid, detail=True)
            fabric_namespace_list = self.vncapi.obj_to_dict(
                fabric_namespace_obj_list)

            for namespace in fabric_namespace_list:
                if namespace.get('fabric_namespace_type') == "SERIAL_NUM":
                    self.serial_num_flag = True
                    serial_num.append(
                        namespace.get('fabric_namespace_value').get(
                            'serial_num'))

            if len(serial_num) > 1:
                for outer_list in serial_num:
                    for sn in outer_list:
                        self.all_serial_num.append(sn)

        else:
            self.credentials = self.module.params['credentials']
コード例 #26
0
class DeviceInfo(object):
    output = {}

    def __init__(self, module):
        self.module = module
        self.logger = module.logger
        self.job_ctx = module.job_ctx
        self.fabric_uuid = module.params['fabric_uuid']
        self.total_retry_timeout = float(module.params['total_retry_timeout'])

    def initial_processing(self, concurrent):
        self.serial_num_flag = False
        self.all_serial_num = []
        serial_num = []
        self.per_greenlet_percentage = None

        self.job_ctx['current_task_index'] = 2

        try:
            total_percent = self.job_ctx.get('playbook_job_percentage')
            if total_percent:
                total_percent = float(total_percent)

            # Calculate the total percentage of this entire greenlet based task
            # This will be equal to the percentage alloted to this task in the
            # weightage array off the total job percentage. For example:
            # if the task weightage array is [10, 85, 5] and total job %
            # is 95. Then the 2nd task's effective total percentage is 85% of
            # 95%
            total_task_percentage = self.module.calculate_job_percentage(
                self.job_ctx.get('total_task_count'),
                task_seq_number=self.job_ctx.get('current_task_index'),
                total_percent=total_percent,
                task_weightage_array=self.job_ctx.get(
                    'task_weightage_array'))[0]

            # Based on the number of greenlets spawned (i.e num of sub tasks)
            # split the total_task_percentage equally amongst the greenlets.
            self.logger.info("Number of greenlets: {} and total_percent: "
                             "{}".format(concurrent, total_task_percentage))
            self.per_greenlet_percentage = \
                self.module.calculate_job_percentage(
                    concurrent, total_percent=total_task_percentage)[0]
            self.logger.info("Per greenlet percent: "
                             "{}".format(self.per_greenlet_percentage))

            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.logger.info("Percentage calculation failed with error "
                             "{}".format(str(ex)))

        try:
            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.module.results['failed'] = True
            self.module.results['msg'] = "Failed to connect to API server " \
                "due to error: %s"\
                % str(ex)
            self.module.exit_json(**self.module.results)

        # get credentials and serial number if greenfield
        if self.total_retry_timeout:
            # get device credentials
            fabric = self.vncapi.fabric_read(id=self.fabric_uuid)
            fabric_object = self.vncapi.obj_to_dict(fabric)
            self.credentials = fabric_object.get('fabric_credentials').get(
                'device_credential')

            # get serial numbers
            fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list(
                parent_id=self.fabric_uuid, detail=True)
            fabric_namespace_list = self.vncapi.obj_to_dict(
                fabric_namespace_obj_list)

            for namespace in fabric_namespace_list:
                if namespace.get('fabric_namespace_type') == "SERIAL_NUM":
                    self.serial_num_flag = True
                    serial_num.append(
                        namespace.get('fabric_namespace_value').get(
                            'serial_num'))

            if len(serial_num) > 1:
                for outer_list in serial_num:
                    for sn in outer_list:
                        self.all_serial_num.append(sn)

        else:
            self.credentials = self.module.params['credentials']

    def ping_sweep(self, host):
        try:
            ping_output = subprocess.Popen(
                ['ping', '-W', '1', '-c', '1', host], stdout=subprocess.PIPE)
            ping_output.wait()
            return ping_output.returncode == 0
        except Exception as ex:
            self.logger.error(
                "ERROR: SUBPROCESS.POPEN failed with error {}".format(str(ex)))
            return False

    # end _ping_sweep

    def _get_device_vendor(self, oid, vendor_mapping):
        for vendor in vendor_mapping:
            if vendor.get('oid') in oid:
                return vendor.get('vendor')
        return None

    # end _get_device_vendor

    def oid_mapping(self, host, pysnmp_output):
        matched_oid_mapping = {}
        matched_oid = None
        device_family_info = self.module.params['device_family_info']
        vendor_mapping = self.module.params['vendor_mapping']

        if pysnmp_output.get('ansible_sysobjectid'):
            vendor = self._get_device_vendor(
                pysnmp_output['ansible_sysobjectid'], vendor_mapping)
            if not vendor:
                self.logger.info(
                    "Vendor for host {} not supported".format(host))
            else:
                device_family = next(element for element in device_family_info
                                     if element['vendor'] == vendor)
                if device_family:
                    try:
                        matched_oid = next(
                            item for item in device_family['snmp_probe']
                            if item['oid'] ==
                            pysnmp_output['ansible_sysobjectid'])
                    except StopIteration:
                        pass
                    if matched_oid:
                        matched_oid_mapping = matched_oid.copy()
                        matched_oid_mapping['hostname'] = \
                            pysnmp_output['ansible_sysname']
                        matched_oid_mapping['host'] = host
                        matched_oid_mapping['vendor'] = vendor
                    else:
                        self.logger.info(
                            "OID {} not present in the given list of device "
                            "info for the host {}".format(
                                pysnmp_output['ansible_sysobjectid'], host))
        return matched_oid_mapping

    # end _oid_mapping

    def _parse_xml_response(self, xml_response, oid_mapped):
        xml_response = xml_response.split('">')
        output = xml_response[1].split('<cli')
        final = etree.fromstring(output[0])
        if final.find('hardware-model') is not None:
            oid_mapped['product'] = final.find('hardware-model').text
        if final.find('os-name') is not None:
            oid_mapped['family'] = final.find('os-name').text
        if final.find('os-version') is not None:
            oid_mapped['os-version'] = final.find('os-version').text
        if final.find('serial-number') is not None:
            oid_mapped['serial-number'] = final.find('serial-number').text
        if final.find('host-name') is not None:
            oid_mapped['hostname'] = final.find('host-name').text

    # end _parse_xml_response

    def _ssh_connect(self, ssh_conn, username, password, hostname, commands,
                     oid_mapped):
        try:
            ssh_conn.connect(username=username,
                             password=password,
                             hostname=hostname)
            oid_mapped['username'] = username
            oid_mapped['password'] = password
            oid_mapped['host'] = hostname
        except Exception as ex:
            self.logger.info("Could not connect to host {}: {}".format(
                hostname, str(ex)))
            return False

        try:
            if commands:
                num_commands = len(commands) - 1
                for index, command in enumerate(commands):
                    stdin, stdout, stderr = ssh_conn.exec_command(
                        command['command'])
                    response = stdout.read()
                    if (not stdout
                            and stderr) or (response is None) or ('error'
                                                                  in response):
                        self.logger.info(
                            "Command {} failed on host {}:{}".format(
                                command['command'], hostname, stderr))
                        if index == num_commands:
                            raise RuntimeError(
                                "All commands failed on host {}".format(
                                    hostname))
                    else:
                        break
                self._parse_xml_response(response, oid_mapped)
            return True
        except RuntimeError as rex:
            self.logger.info("RunTimeError: {}".format(str(rex)))
            return False
        except Exception as ex:
            self.logger.info("SSH failed for host {}: {}".format(
                hostname, str(ex)))
            return False
# end _ssh_connect

    def get_device_info_ssh(self, host, oid_mapped, credentials):
        # find a credential that matches this host
        status = False
        device_family_info = self.module.params['device_family_info']

        sshconn = paramiko.SSHClient()
        sshconn.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        try:
            for info in device_family_info:
                for cred in credentials:
                    status = self._ssh_connect(sshconn,
                                               cred['credential']['username'],
                                               cred['credential']['password'],
                                               host, info['ssh_probe'],
                                               oid_mapped)
                    if status:
                        oid_mapped['vendor'] = info['vendor']
                        break
        finally:
            sshconn.close()
            return status

    # end _get_device_info_ssh

    def _detailed_cred_check(self, host, oid_mapped, credentials):
        remove_null = []
        ssh_conn = paramiko.SSHClient()
        ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        index = 0

        # check if credentials dict has both username and password defined.
        # If neither avaiable, remove the entire entry from the list.
        # Cannot check ssh connectivity with just the username or password.
        for creds in credentials[index:]:
            for user_pwd in creds.values():
                if isinstance(user_pwd, dict):
                    if user_pwd.get('username') and user_pwd.get('password'):
                        index += 1
                        break
                    else:
                        credentials.remove(creds)
                        break

        # In a list of dict for credentials, if a dict value is None
        # remove the key from the dict. Only keys with values are retained.
        for single_dict in credentials:
            remove_null.append(
                dict([(dkey, ddata) for dkey, ddata in single_dict.iteritems()
                      if ddata]))

        # Sorting based on number of keys in a dict.Max-min sorting done here
        # resulting list would have dict with max keys as first entry
        # and min as the last
        prioritized_creds = sorted(remove_null, key=len, reverse=True)
        try:
            for device_cred in prioritized_creds:
                oid_vendor = oid_mapped['vendor']
                oid_family = oid_mapped['family']
                device_family = device_cred.get('device_family', None)
                vendor = device_cred.get('vendor', None)
                cred = device_cred.get('credential', None)
                username = cred.get('username', None) if cred else None
                password = cred.get('password', None) if cred else None

                if device_family and not vendor:
                    continue
                if vendor and vendor.lower() != oid_vendor.lower():
                    continue
                if vendor and device_family and device_family not in \
                        oid_family:
                    continue
                if not username or not password:
                    continue

                response = self._ssh_connect(ssh_conn, username, password,
                                             host, None, oid_mapped)
                if response:
                    return True

                self.logger.info(
                    "Credential for '{}' didn't work on host '{}'".format(
                        cred['credential']['username'], host))
            return False
        finally:
            ssh_conn.close()

    # end _ssh_check

    def _pr_object_create_update(self, oid_mapped, fq_name, update):
        pr_uuid = None
        msg = None
        try:
            os_version = oid_mapped.get('os-version', None)
            serial_num = oid_mapped.get('serial-number', None)
            physicalrouter = PhysicalRouter(
                parent_type='global-system-config',
                fq_name=fq_name,
                physical_router_management_ip=oid_mapped.get('host'),
                physical_router_vendor_name=oid_mapped.get('vendor'),
                physical_router_product_name=oid_mapped.get('product'),
                physical_router_device_family=oid_mapped.get('family'),
                physical_router_vnc_managed=True,
                physical_router_user_credentials={
                    'username': oid_mapped.get('username'),
                    'password': oid_mapped.get('password')
                })
            if update:
                pr_unicode_obj = self.vncapi.physical_router_update(
                    physicalrouter)
                if pr_unicode_obj:
                    pr_obj_dict = ast.literal_eval(pr_unicode_obj)
                    pr_uuid = pr_obj_dict['physical-router']['uuid']
                    msg = "Updated device info for: {} : {} : {}".format(
                        oid_mapped.get('host'), fq_name[1],
                        oid_mapped.get('product'))
                    self.logger.info("Updated device info for: {} : {}".format(
                        oid_mapped.get('host'), pr_uuid))
            else:
                pr_uuid = self.vncapi.physical_router_create(physicalrouter)
                msg = "Discovered device details: {} : {} : {}".format(
                    oid_mapped.get('host'), fq_name[1],
                    oid_mapped.get('product'))
                self.logger.info("Device created with uuid- {} : {}".format(
                    oid_mapped.get('host'), pr_uuid))
                self.module.send_prouter_object_log(fq_name, "DISCOVERED",
                                                    os_version, serial_num)
        except (RefsExistError, Exception) as ex:
            if isinstance(ex, RefsExistError):
                return REF_EXISTS_ERROR, None
            self.logger.error("VNC create failed with error: {}".format(
                str(ex)))
            return False, None

        self.module.send_job_object_log(
            msg,
            JOB_IN_PROGRESS,
            None,
            job_success_percent=self.per_greenlet_percentage)
        return True, pr_uuid

    def device_info_processing(self, host, oid_mapped):
        valid_creds = False
        return_code = True

        if not oid_mapped.get('family') or not oid_mapped.get('vendor'):
            self.logger.info("Could not retrieve family/vendor info for "
                             "the host: {}, not creating PR "
                             "object".format(host))
            self.logger.info("vendor: {}, family: {}".format(
                oid_mapped.get('vendor'), oid_mapped.get('family')))
            oid_mapped = {}

        if oid_mapped.get('host'):
            valid_creds = self._detailed_cred_check(host, oid_mapped,
                                                    self.credentials)

        if not valid_creds and oid_mapped:
            self.logger.info("No credentials matched for host: {}, nothing "
                             "to update in DB".format(host))
            oid_mapped = {}

        if oid_mapped:
            if self.serial_num_flag:
                if oid_mapped.get('serial-number') not in \
                        self.all_serial_num:
                    self.logger.info(
                        "Serial number {} for host {} not present "
                        "in fabric_namespace, nothing to update "
                        "in DB".format(oid_mapped.get('serial-number'), host))
                    return

            if oid_mapped.get('hostname') is None:
                oid_mapped['hostname'] = oid_mapped.get('serial-number')

            fq_name = [
                'default-global-system-config',
                oid_mapped.get('hostname')
            ]
            return_code, pr_uuid = self._pr_object_create_update(
                oid_mapped, fq_name, False)
            if return_code == REF_EXISTS_ERROR:
                physicalrouter = self.vncapi.physical_router_read(
                    fq_name=fq_name)
                phy_router = self.vncapi.obj_to_dict(physicalrouter)
                if (phy_router.get('physical_router_management_ip') ==
                        oid_mapped.get('host')):
                    self.logger.info(
                        "Device with same mgmt ip already exists {}".format(
                            phy_router.get('physical_router_management_ip')))
                    return_code, pr_uuid = self._pr_object_create_update(
                        oid_mapped, fq_name, True)
                else:
                    fq_name = [
                        'default-global-system-config',
                        oid_mapped.get('hostname') + '_' +
                        oid_mapped.get('host')
                    ]
                    return_code, pr_uuid = self._pr_object_create_update(
                        oid_mapped, fq_name, False)
                    if return_code == REF_EXISTS_ERROR:
                        self.logger.debug("Object already exists")
            if return_code is True:
                self.vncapi.ref_update("physical_router", pr_uuid, "fabric",
                                       self.fabric_uuid, None, "ADD")
                self.logger.info(
                    "Fabric updated with physical router info for "
                    "host: {}".format(host))
                temp = {}
                temp['device_management_ip'] = oid_mapped.get('host')
                temp['device_fqname'] = fq_name
                temp['device_username'] = oid_mapped.get('username')
                temp['device_password'] = oid_mapped.get('password')
                temp['device_family'] = oid_mapped.get('family')
                temp['device_vendor'] = oid_mapped.get('vendor')
                temp['device_product'] = oid_mapped.get('product')
                temp['device_serial_number'] = oid_mapped.get('serial-number')
                DeviceInfo.output.update({pr_uuid: temp})
コード例 #27
0
    def __init__(self):
        admin_user = cfg.CONF.keystone_authtoken.admin_user
        admin_password = cfg.CONF.keystone_authtoken.admin_password
        admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
        api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
        api_srvr_port = cfg.CONF.APISERVER.api_server_port
        try:
            auth_host = cfg.CONF.keystone_authtoken.auth_host
        except cfg.NoSuchOptError:
            auth_host = "127.0.0.1"

        try:
            auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
        except cfg.NoSuchOptError:
            auth_protocol = "http"

        try:
            auth_port = cfg.CONF.keystone_authtoken.auth_port
        except cfg.NoSuchOptError:
            auth_port = "35357"

        try:
            auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            auth_url = "/v2.0/tokens"

        try:
            auth_type = cfg.CONF.keystone_authtoken.auth_type
        except cfg.NoSuchOptError:
            auth_type = "keystone"

        try:
            api_server_url = cfg.CONF.APISERVER.api_server_url
        except cfg.NoSuchOptError:
            api_server_url = "/"

        # Retry till a api-server is up
        connected = False
        while not connected:
            try:
                self._api = VncApi(admin_user,
                                   admin_password,
                                   admin_tenant_name,
                                   api_srvr_ip,
                                   api_srvr_port,
                                   api_server_url,
                                   auth_host=auth_host,
                                   auth_port=auth_port,
                                   auth_protocol=auth_protocol,
                                   auth_url=auth_url,
                                   auth_type=auth_type,
                                   wait_for_connect=True)
                connected = True
            except requests.exceptions.RequestException as e:
                time.sleep(3)

        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)
コード例 #28
0
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import (
    Fabric,
    FabricNamespace,
    VirtualNetwork,
    NetworkIpam
)

    
vnc_api = VncApi()

#import pdb; pdb.set_trace()
namespaces = vnc_api.fabric_namespaces_list(detail=True)
for ns in namespaces:
    vnc_api.fabric_namespace_delete(id=ns.uuid)

fabs = vnc_api.fabrics_list(detail=True)
for fab in fabs:
    # remove fabric->vn refs
    fab.set_virtual_network_list([])
    vnc_api.fabric_update(fab)
    
    # remove fabric->node_profile refs     
    fab.set_node_profile_list([])
    vnc_api.fabric_update(fab)
   
    # remove fabric
    vnc_api.fabric_delete(id=fab.uuid)

role_configs = vnc_api.role_configs_list(detail=True)
for role_config in role_configs:
コード例 #29
0
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name  = ['default-domain','admin']
ipam_fq_name = [ 'default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******', password='******', tenant_name='admin', api_server_host='10.0.0.40', api_server_port=8082, auth_host='192.168.24.14')

#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
    #import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid']) 
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1) 
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)


hc_objs = []
k = 0
コード例 #30
0
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import (Fabric, FabricNamespace,
                                         VirtualNetwork, NetworkIpam)

vnc_api = VncApi()

#import pdb; pdb.set_trace()
namespaces = vnc_api.fabric_namespaces_list(detail=True)
for ns in namespaces:
    vnc_api.fabric_namespace_delete(id=ns.uuid)

fabs = vnc_api.fabrics_list(detail=True)
for fab in fabs:
    # remove fabric->vn refs
    fab.set_virtual_network_list([])
    vnc_api.fabric_update(fab)

    # remove fabric->node_profile refs
    fab.set_node_profile_list([])
    vnc_api.fabric_update(fab)

    # remove fabric
    vnc_api.fabric_delete(id=fab.uuid)

role_configs = vnc_api.role_configs_list(detail=True)
for role_config in role_configs:
    vnc_api.role_config_delete(id=role_config.uuid)

node_profiles = vnc_api.node_profiles_list(detail=True)
for node_profile in node_profiles:
    node_profile.set_hardware_list([])
コード例 #31
0
from vnc_api.vnc_api import VncApi
import json

api = VncApi()
GSC = 'default-global-system-config'


def dump(res_type, fq_name):
    obj = api._object_read(res_type=res_type, fq_name=fq_name)
    dumpobj(obj)


def dumpobj(obj):
    print json.dumps(api.obj_to_dict(obj), indent=4)


def dumplist(res_type, detail=False):
    refs = api._objects_list(res_type)
    if refs:
        refs = refs.get(res_type + 's')
    if detail:
        obj_list = []
        for ref in refs or []:
            obj = api._object_read(res_type, id=ref.get('uuid'))
            obj_list.append(api.obj_to_dict(obj))
        print json.dumps({'objs': obj_list}, indent=4)
    else:
        print json.dumps(refs, indent=4)


def dump_pr(name):
コード例 #32
0
class LoadBalancerPluginDb(LoadBalancerPluginBase):
    def __init__(self):
        admin_user = cfg.CONF.keystone_authtoken.admin_user
        admin_password = cfg.CONF.keystone_authtoken.admin_password
        admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
        api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
        api_srvr_port = cfg.CONF.APISERVER.api_server_port
        try:
            auth_host = cfg.CONF.keystone_authtoken.auth_host
        except cfg.NoSuchOptError:
            auth_host = "127.0.0.1"

        try:
            auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
        except cfg.NoSuchOptError:
            auth_protocol = "http"

        try:
            auth_port = cfg.CONF.keystone_authtoken.auth_port
        except cfg.NoSuchOptError:
            auth_port = "35357"

        try:
            auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            auth_url = "/v2.0/tokens"

        try:
            auth_type = cfg.CONF.keystone_authtoken.auth_type
        except cfg.NoSuchOptError:
            auth_type = "keystone"

        try:
            api_server_url = cfg.CONF.APISERVER.api_server_url
        except cfg.NoSuchOptError:
            api_server_url = "/"

        # Retry till a api-server is up
        connected = False
        while not connected:
            try:
                self._api = VncApi(admin_user,
                                   admin_password,
                                   admin_tenant_name,
                                   api_srvr_ip,
                                   api_srvr_port,
                                   api_server_url,
                                   auth_host=auth_host,
                                   auth_port=auth_port,
                                   auth_protocol=auth_protocol,
                                   auth_url=auth_url,
                                   auth_type=auth_type,
                                   wait_for_connect=True)
                connected = True
            except requests.exceptions.RequestException as e:
                time.sleep(3)

        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)

    def get_api_client(self):
        return self._api

    def get_vips(self, context, filters=None, fields=None):
        return self._vip_manager.get_collection(context, filters, fields)

    def get_vip(self, context, id, fields=None):
        return self._vip_manager.get_resource(context, id, fields)

    def create_vip(self, context, vip):
        try:
            return self._vip_manager.create(context, vip)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='vip', msg=str(ex))

    def update_vip(self, context, id, vip):
        return self._vip_manager.update(context, id, vip)

    def delete_vip(self, context, id):
        return self._vip_manager.delete(context, id)

    def get_pools(self, context, filters=None, fields=None):
        return self._pool_manager.get_collection(context, filters, fields)

    def get_pool(self, context, id, fields=None):
        return self._pool_manager.get_resource(context, id, fields)

    def create_pool(self, context, pool):
        try:
            return self._pool_manager.create(context, pool)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='pool', msg=str(ex))

    def update_pool(self, context, id, pool):
        return self._pool_manager.update(context, id, pool)

    def delete_pool(self, context, id):
        return self._pool_manager.delete(context, id)

    def stats(self, context, pool_id):
        stats = {
            'bytes_in': '0',
            'bytes_out': '0',
            'active_connections': '0',
            'total_connections': '0',
        }

        endpoint = "http://%s:%s" % (cfg.CONF.COLLECTOR.analytics_api_ip,
                                     cfg.CONF.COLLECTOR.analytics_api_port)
        analytics = analytics_client.Client(endpoint)
        path = "/analytics/uves/service-instance/"
        fqdn_uuid = "%s?cfilt=UveLoadbalancer" % pool_id
        try:
            lb_stats = analytics.request(path, fqdn_uuid)
            pool_stats = lb_stats['UveLoadbalancer']['pool_stats']
        except Exception:
            pool_stats = []

        for pool_stat in pool_stats:
            stats['bytes_in'] = str(
                int(stats['bytes_in']) + int(pool_stat['bytes_in']))
            stats['bytes_out'] = str(
                int(stats['bytes_out']) + int(pool_stat['bytes_out']))
            stats['active_connections'] = str(
                int(stats['active_connections']) +
                int(pool_stat['current_sessions']))
            stats['total_connections'] = str(
                int(stats['total_connections']) +
                int(pool_stat['total_sessions']))
        return {'stats': stats}

    def create_pool_health_monitor(self, context, health_monitor, pool_id):
        """ Associate an health monitor with a pool.
        """
        m = health_monitor['health_monitor']
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=pool_id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=m['id'])
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=m['id'])

        if not context.is_admin:
            tenant_id = str(uuid.UUID(context.tenant_id))
            if tenant_id != pool.parent_uuid or \
                    tenant_id != monitor.parent_uuid:
                raise n_exc.NotAuthorized()

        pool_refs = monitor.get_loadbalancer_pool_back_refs()
        if pool_refs is not None:
            for ref in pool_refs:
                if ref['uuid'] == pool_id:
                    raise loadbalancer.PoolMonitorAssociationExists(
                        monitor_id=m['id'], pool_id=pool_id)

        pool.add_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

        res = {
            'id': monitor.uuid,
            'tenant_id': monitor.parent_uuid.replace('-', '')
        }
        return res

    def get_pool_health_monitor(self, context, id, pool_id, fields=None):
        """ Query a specific pool, health_monitor association.
        """
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs() or []:
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(monitor_id=id,
                                                              pool_id=pool_id)

        res = {
            'pool_id': pool_id,
            'monitor_id': id,
            'status': self._pool_manager._get_object_status(pool),
            'tenant_id': pool.parent_uuid.replace('-', '')
        }
        return self._pool_manager._fields(res, fields)

    def delete_pool_health_monitor(self, context, id, pool_id):
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=id)
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(monitor_id=id,
                                                              pool_id=pool_id)

        pool.del_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

    def get_members(self, context, filters=None, fields=None):
        return self._member_manager.get_collection(context, filters, fields)

    def get_member(self, context, id, fields=None):
        return self._member_manager.get_resource(context, id, fields)

    def create_member(self, context, member):
        try:
            return self._member_manager.create(context, member)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='member', msg=str(ex))

    def update_member(self, context, id, member):
        return self._member_manager.update(context, id, member)

    def delete_member(self, context, id):
        return self._member_manager.delete(context, id)

    def get_health_monitors(self, context, filters=None, fields=None):
        return self._monitor_manager.get_collection(context, filters, fields)

    def get_health_monitor(self, context, id, fields=None):
        return self._monitor_manager.get_resource(context, id, fields)

    def create_health_monitor(self, context, health_monitor):
        try:
            return self._monitor_manager.create(context, health_monitor)
        except vnc_exc.PermissionDenied as ex:
            raise n_exc.BadRequest(resource='health_monitor', msg=str(ex))

    def update_health_monitor(self, context, id, health_monitor):
        return self._monitor_manager.update(context, id, health_monitor)

    def delete_health_monitor(self, context, id):
        return self._monitor_manager.delete(context, id)
コード例 #33
0
    def __init__(self, dm_logger=None, args=None):
        self._args = args
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._chksum = ""
        if self._args.collectors:
            self._chksum = hashlib.md5(''.join(
                self._args.collectors)).hexdigest()

        # Initialize logger
        self.logger = dm_logger or DeviceManagerLogger(args)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    args.api_server_ip,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)
        """ @sighup
        Handle of SIGHUP for collector list config change
        """
        gevent.signal(signal.SIGHUP, self.sighup_handler)

        # Initialize amqp
        self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP,
                                      self._args)
        self._vnc_amqp.establish()

        # Initialize cassandra
        self._object_db = DMCassandraDB.get_instance(self, _zookeeper_client)
        DBBaseDM.init(self, self.logger, self._object_db)
        DBBaseDM._sandesh = self.logger._sandesh

        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            VirtualNetworkDM.locate(obj['uuid'], obj)

        for obj in RoutingInstanceDM.list_obj():
            RoutingInstanceDM.locate(obj['uuid'], obj)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        for obj in pr_obj_list:
            PhysicalRouterDM.locate(obj['uuid'], obj)

        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._object_db.handle_pr_deletes(pr_uuid_set)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'], obj)

        for obj in PhysicalInterfaceDM.list_obj():
            PhysicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in LogicalInterfaceDM.list_obj():
            LogicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in VirtualMachineInterfaceDM.list_obj():
            VirtualMachineInterfaceDM.locate(obj['uuid'], obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._object_db.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()

        DeviceManager._device_manager = self
        self._vnc_amqp._db_resync_done.set()
        try:
            gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets())
        except KeyboardInterrupt:
            DeviceManager.destroy_instance()
            raise
class LoadBalancerPluginDb(LoadBalancerPluginBase):

    def __init__(self):
        admin_user = cfg.CONF.keystone_authtoken.admin_user
        admin_password = cfg.CONF.keystone_authtoken.admin_password
        admin_tenant_name = cfg.CONF.keystone_authtoken.admin_tenant_name
        api_srvr_ip = cfg.CONF.APISERVER.api_server_ip
        api_srvr_port = cfg.CONF.APISERVER.api_server_port
        try:
            auth_host = cfg.CONF.keystone_authtoken.auth_host
        except cfg.NoSuchOptError:
            auth_host = "127.0.0.1"

        try:
            auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
        except cfg.NoSuchOptError:
            auth_protocol = "http"

        try:
            auth_port = cfg.CONF.keystone_authtoken.auth_port
        except cfg.NoSuchOptError:
            auth_port = "35357"

        try:
            auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            auth_url = "/v2.0/tokens"

        try:
            auth_type = cfg.CONF.keystone_authtoken.auth_type
        except cfg.NoSuchOptError:
            auth_type = "keystone"

        try:
            api_server_url = cfg.CONF.APISERVER.api_server_url
        except cfg.NoSuchOptError:
            api_server_url = "/"

        # Retry till a api-server is up
        connected = False
        while not connected:
            try:
                self._api = VncApi(
                     admin_user, admin_password, admin_tenant_name,
                     api_srvr_ip, api_srvr_port, api_server_url,
                     auth_host=auth_host, auth_port=auth_port,
                     auth_protocol=auth_protocol, auth_url=auth_url,
                     api_auth_protocol="https")
                connected = True
            except requests.exceptions.RequestException as e:
                time.sleep(3)

        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)

    def get_api_client(self):
        return self._api

    def get_vips(self, context, filters=None, fields=None):
        return self._vip_manager.get_collection(context, filters, fields)

    def get_vip(self, context, id, fields=None):
        return self._vip_manager.get_resource(context, id, fields)

    def create_vip(self, context, vip):
        return self._vip_manager.create(context, vip)

    def update_vip(self, context, id, vip):
        return self._vip_manager.update(context, id, vip)

    def delete_vip(self, context, id):
        return self._vip_manager.delete(context, id)

    def get_pools(self, context, filters=None, fields=None):
        return self._pool_manager.get_collection(context, filters, fields)

    def get_pool(self, context, id, fields=None):
        return self._pool_manager.get_resource(context, id, fields)

    def create_pool(self, context, pool):
        return self._pool_manager.create(context, pool)

    def update_pool(self, context, id, pool):
        return self._pool_manager.update(context, id, pool)

    def delete_pool(self, context, id):
        return self._pool_manager.delete(context, id)

    def stats(self, context, pool_id):
        pass

    def create_pool_health_monitor(self, context, health_monitor, pool_id):
        """ Associate an health monitor with a pool.
        """
        m = health_monitor['health_monitor']
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=pool_id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=m['id'])
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=m['id'])

        if not context.is_admin:
            tenant_id = str(uuid.UUID(context.tenant_id))
            if tenant_id != pool.parent_uuid or \
                    tenant_id != monitor.parent_uuid:
                raise n_exc.NotAuthorized()

        pool_refs = monitor.get_loadbalancer_pool_back_refs()
        if pool_refs is not None:
            for ref in pool_refs:
                if ref['uuid'] == pool_id:
                    raise loadbalancer.PoolMonitorAssociationExists(
                        monitor_id=m['id'], pool_id=pool_id)

        pool.add_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

        res = {
            'id': monitor.uuid,
            'tenant_id': monitor.parent_uuid.replace('-', '')
        }
        return res

    def get_pool_health_monitor(self, context, id, pool_id, fields=None):
        """ Query a specific pool, health_monitor association.
        """
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs() or []:
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        res = {
            'pool_id': pool_id,
            'monitor_id': id,
            'status': self._pool_manager._get_object_status(pool),
            'tenant_id': pool.parent_uuid.replace('-', '')
        }
        return self._pool_manager._fields(res, fields)

    def delete_pool_health_monitor(self, context, id, pool_id):
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        tenant_id = str(uuid.UUID(context.tenant_id))
        if not context.is_admin and tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=id)
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        pool.del_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

    def get_members(self, context, filters=None, fields=None):
        return self._member_manager.get_collection(context, filters, fields)

    def get_member(self, context, id, fields=None):
        return self._member_manager.get_resource(context, id, fields)

    def create_member(self, context, member):
        return self._member_manager.create(context, member)

    def update_member(self, context, id, member):
        return self._member_manager.update(context, id, member)

    def delete_member(self, context, id):
        return self._member_manager.delete(context, id)

    def get_health_monitors(self, context, filters=None, fields=None):
        return self._monitor_manager.get_collection(context, filters, fields)

    def get_health_monitor(self, context, id, fields=None):
        return self._monitor_manager.get_resource(context, id, fields)

    def create_health_monitor(self, context, health_monitor):
        return self._monitor_manager.create(context, health_monitor)

    def update_health_monitor(self, context, id, health_monitor):
        return self._monitor_manager.update(context, id, health_monitor)

    def delete_health_monitor(self, context, id):
        return self._monitor_manager.delete(context, id)
コード例 #35
0
ファイル: zone.py プロジェクト: nuthanc/bgpaas_scale
from vnc_api.vnc_api import VncApi
from vnc_api.vnc_api import ControlNodeZone
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import time

vnc = VncApi(api_server_host="10.87.64.129",
             auth_host='5.5.5.251',
             username='******',
             password='******',
             tenant_name='admin')
fq_name = 'default-global-system-config'
gsc_obj = vnc.global_system_config_read(
    id='b411d9a8-3f89-457c-864f-882ad37dc726')
#gsc_name = "default-global-system-config"

bgp_routers = ['5b4s2.novalocal', '5b4s6', '5b4s4']


def create_zones():
    for i in range(0, 2):
        cnz_name = "test-cnz-%s" % i
        cnz = ControlNodeZone(name=cnz_name, parent_obj=gsc_obj)
        vnc.control_node_zone_create(cnz)
        print("zone %s created" % cnz_name)


def delete_zones():
    for i in range(0, 2):
        cnz_name = ["default-global-system-config", "test-cnz-%s" % i]
        vnc.control_node_zone_read(fq_name=cnz_name)
コード例 #36
0
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name = ['default-domain', 'admin']
ipam_fq_name = ['default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='5.5.5.251')

#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
#import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid'])
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1)
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)
コード例 #37
0
        job_log_utils = JobLogUtils(
            sandesh_instance_id=job_input_json['job_execution_id'],
            config_args=job_input_json['args'])
        logger = job_log_utils.config_logger
    except Exception as exp:
        print >> sys.stderr, "Failed to initialize logger due "\
                             "to Exception: %s" % traceback.format_exc()
        sys.exit("Exiting due to logger initialization error: %s" % repr(exp))

    # initialize _vnc_api instance
    vnc_api = None
    try:
        auth_token = job_input_json['auth_token']

        vnc_api = VncApi(auth_token=auth_token)
        logger.info("VNC api is initialized using the auth token passed.")
    except Exception as exp:
        logger.error(
            MsgBundle.getMessage(MsgBundle.VNC_INITIALIZATION_ERROR,
                                 exc_msg=traceback.format_exc()))
        msg = MsgBundle.getMessage(MsgBundle.VNC_INITIALIZATION_ERROR,
                                   exc_msg=repr(exp))
        job_log_utils.send_job_log(job_input_json['job_template_fq_name'],
                                   job_input_json['job_execution_id'],
                                   job_input_json.get('fabric_fq_name'), msg,
                                   JobStatus.FAILURE)
        sys.exit(msg)

    # invoke job manager
    try:
コード例 #38
0
        self.cidr_net, self.cidr_mask = cidr.split("/")

    def get_next_cidr(self):
        ip_network_next = self.ip_network.next()[0]
        ip_addr_next = ipaddr.IPAddress(ip_network_next)
        cidr = ip_addr_next._explode_shorthand_ip_string()
        cidr = cidr + "/" + self.cidr_mask
        self.ip_network = IPNetwork(cidr)
        return cidr


cidr_obj = CIDR(cidr)

vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.0.0.40',
                 api_server_port=8082,
                 auth_host='192.168.24.14')

vlan_tag = 2
proj_obj = vnc_lib.project_read(fq_name=tenant_name)
ipam_obj = vnc_lib.network_ipam_read(fq_name=ipam_fq_name)

sec_grp_obj = vnc_lib.security_group_read(
    fq_name=[u'default-domain', u'admin', u'default'])

for i in xrange(0, 450):
    #for i in xrange(0,3):
    vn_name = "VN.hc.st" + str(i)
    #    import pdb;pdb.set_trace()
    #    networks = proj_obj.get_virtual_networks()
コード例 #39
0
def get_vnc_api_instance(wait_for_connect=True):
    """ Instantiates a VncApi object from configured parameters

    Read all necessary configuration options from neutron and contrail core
    plugin configuration files and instantiates a VncApi object with them.
    If authentication strategy is not define, use OpenStack Keystone
    authentication by default.

    :param wait_for_connect: retry connect infinitely when http return code is
        503
    :returns: VncApi object instance
    """
    api_server_host = cfg.CONF.APISERVER.api_server_ip.split()
    api_server_port = cfg.CONF.APISERVER.api_server_port
    api_server_base_url = cfg.CONF.APISERVER.api_server_base_url
    api_server_use_ssl = cfg.CONF.APISERVER.use_ssl
    api_server_ca_file = cfg.CONF.APISERVER.cafile
    api_server_cert_file = cfg.CONF.APISERVER.certfile
    api_server_key_file = cfg.CONF.APISERVER.keyfile

    # If VNC API needs authentication, use the same authentication strategy
    # than Neutron (default to Keystone). If not, don't provide credentials.
    if vnc_api_is_authenticated(api_server_host):
        auth_strategy = cfg.CONF.auth_strategy
        if auth_strategy not in VncApi.AUTHN_SUPPORTED_STRATEGIES:
            raise c_exc.AuthStrategyNotSupported(auth_strategy=auth_strategy)
        auth_strategy = constants.KEYSTONE_AUTH
    else:
        auth_strategy = 'noauth'

    identity_uri = None
    auth_token_url = None
    auth_protocol = None
    auth_host = None
    auth_port = None
    auth_url = None
    auth_version = None
    auth_cafile = None
    auth_certfile = None
    auth_keyfile = None
    admin_user = None
    admin_password = None
    admin_tenant_name = None
    domain_name = None
    if auth_strategy == constants.KEYSTONE_AUTH:
        try:
            ks_auth_url = cfg.CONF.keystone_authtoken.auth_url
        except cfg.NoSuchOptError:
            ks_auth_url = None
        # If APISERV.auth_token_url is define prefer it to keystone_authtoken
        # section
        auth_token_url = cfg.CONF.APISERVER.auth_token_url
        if auth_token_url is not None:
            auth_token_url_parsed = urlparse(auth_token_url)
            auth_protocol = auth_token_url_parsed.scheme
            auth_host = auth_token_url_parsed.hostname
            auth_port = auth_token_url_parsed.port
            auth_url = auth_token_url_parsed.path
        elif ks_auth_url:
            # If keystone_authtoken.auth_url is defined, prefer it to
            # keystone_authtoken.identity_uri
            auth_url_parsed = urlparse(ks_auth_url)
            auth_protocol = auth_url_parsed.scheme
            auth_host = auth_url_parsed.hostname
            auth_port = auth_url_parsed.port
        else:
            # If keystone_authtoken.identity_uri is define, prefer it to
            # specific authtoken parameters
            identity_uri = cfg.CONF.keystone_authtoken.identity_uri
            if identity_uri is not None:
                identity_uri_parsed = urlparse(identity_uri)
                auth_protocol = identity_uri_parsed.scheme
                auth_host = identity_uri_parsed.hostname
                auth_port = identity_uri_parsed.port
                auth_admin_prefix = identity_uri_parsed.path
            else:
                auth_protocol = cfg.CONF.keystone_authtoken.auth_protocol
                auth_host = cfg.CONF.keystone_authtoken.auth_host
                auth_port = cfg.CONF.keystone_authtoken.auth_port
                auth_admin_prefix =\
                    cfg.CONF.keystone_authtoken.auth_admin_prefix
                identity_uri = '%s://%s:%s/%s' % (auth_protocol, auth_host,
                                                  auth_port, auth_admin_prefix)
            # If no Keystone API version is define in indentiy_uri or in
            # specific param auth_version, use version 2.0.
            if constants.KEYSTONE_V2_REGEX.search(identity_uri):
                auth_version = constants.KEYSTONE_V2_API_VERSION
                auth_token_url = '%s/tokens' % (identity_uri)
            elif constants.KEYSTONE_V3_REGEX.search(identity_uri):
                auth_version = constants.KEYSTONE_V3_API_VERSION
                auth_token_url = '%s/tokens' % (identity_uri)
            else:
                auth_version = cfg.CONF.keystone_authtoken.auth_version or \
                    constants.KEYSTONE_V2_API_VERSION
                auth_token_url = '%s/%s/tokens' % (identity_uri, auth_version)
            auth_url = '%s/%s/tokens' % (auth_admin_prefix, auth_version)
        auth_cafile = cfg.CONF.keystone_authtoken.cafile
        auth_certfile = cfg.CONF.keystone_authtoken.certfile
        auth_keyfile = cfg.CONF.keystone_authtoken.keyfile
        (admin_user, admin_password, admin_tenant_name,
         domain_name) = get_keystone_auth_info()

    return VncApi(
        api_server_host=api_server_host,
        api_server_port=api_server_port,
        api_server_url=api_server_base_url,
        api_server_use_ssl=api_server_use_ssl,
        apicafile=api_server_ca_file,
        apicertfile=api_server_cert_file,
        apikeyfile=api_server_key_file,
        auth_type=auth_strategy,
        auth_protocol=auth_protocol,
        auth_host=auth_host,
        auth_port=auth_port,
        auth_url=auth_url,
        auth_token_url=auth_token_url,
        kscafile=auth_cafile,
        kscertfile=auth_certfile,
        kskeyfile=auth_keyfile,
        username=admin_user,
        password=admin_password,
        tenant_name=admin_tenant_name,
        domain_name=domain_name,
        wait_for_connect=wait_for_connect,
        connection_timeout=cfg.CONF.APISERVER.connection_timeout,
        timeout=cfg.CONF.APISERVER.timeout,
    )
コード例 #40
0
class DeviceInfo(object):
    output = {}

    def __init__(self, module):
        self.module = module
        self.logger = module.logger
        self.job_ctx = module.job_ctx
        self.fabric_uuid = module.params['fabric_uuid']
        self.total_retry_timeout = float(module.params['total_retry_timeout'])
        self._job_file_write = JobFileWrite(self.logger)

    def initial_processing(self, concurrent):
        self.serial_num_flag = False
        self.all_serial_num = []
        serial_num = []
        self.per_greenlet_percentage = None

        self.job_ctx['current_task_index'] = 2

        try:
            total_percent = self.job_ctx.get('playbook_job_percentage')
            if total_percent:
                total_percent = float(total_percent)

            # Calculate the total percentage of this entire greenlet based task
            # This will be equal to the percentage alloted to this task in the
            # weightage array off the total job percentage. For example:
            # if the task weightage array is [10, 85, 5] and total job %
            # is 95. Then the 2nd task's effective total percentage is 85% of
            # 95%
            total_task_percentage = self.module.calculate_job_percentage(
                self.job_ctx.get('total_task_count'),
                task_seq_number=self.job_ctx.get('current_task_index'),
                total_percent=total_percent,
                task_weightage_array=self.job_ctx.get(
                    'task_weightage_array'))[0]

            # Based on the number of greenlets spawned (i.e num of sub tasks)
            # split the total_task_percentage equally amongst the greenlets.
            self.logger.info("Number of greenlets: {} and total_percent: "
                             "{}".format(concurrent, total_task_percentage))
            self.per_greenlet_percentage = \
                self.module.calculate_job_percentage(
                    concurrent, total_percent=total_task_percentage)[0]
            self.logger.info("Per greenlet percent: "
                             "{}".format(self.per_greenlet_percentage))

            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.logger.info("Percentage calculation failed with error "
                             "{}".format(str(ex)))

        try:
            self.vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                                 auth_token=self.job_ctx.get('auth_token'))
        except Exception as ex:
            self.module.results['failed'] = True
            self.module.results['msg'] = "Failed to connect to API server " \
                "due to error: %s"\
                % str(ex)
            self.module.exit_json(**self.module.results)

        # get credentials and serial number if greenfield
        if self.total_retry_timeout:
            # get device credentials
            fabric = self.vncapi.fabric_read(id=self.fabric_uuid)
            fabric_object = self.vncapi.obj_to_dict(fabric)
            self.credentials = fabric_object.get('fabric_credentials').get(
                'device_credential')

            # get serial numbers
            fabric_namespace_obj_list = self.vncapi.fabric_namespaces_list(
                parent_id=self.fabric_uuid, detail=True)
            fabric_namespace_list = self.vncapi.obj_to_dict(
                fabric_namespace_obj_list)

            for namespace in fabric_namespace_list:
                if namespace.get('fabric_namespace_type') == "SERIAL_NUM":
                    self.serial_num_flag = True
                    serial_num.append(namespace.get(
                        'fabric_namespace_value').get('serial_num'))

            if len(serial_num) > 1:
                for outer_list in serial_num:
                    for sn in outer_list:
                        self.all_serial_num.append(sn)

        else:
            self.credentials = self.module.params['credentials']

        for cred in self.credentials:
            if cred.get('credential', {}).get('password'):
                cred['credential']['password'] = JobVncApi.decrypt_password(
                    encrypted_password=cred.get('credential', {}).get('password'),
                    admin_password=self.job_ctx.get('vnc_api_init_params').get(
                        'admin_password'))

    def ping_sweep(self, host):
        try:
            ping_output = subprocess.Popen(
                ['ping', '-W', '1', '-c', '1', host], stdout=subprocess.PIPE)
            ping_output.wait()
            return ping_output.returncode == 0
        except Exception as ex:
            self.logger.error("ERROR: SUBPROCESS.POPEN failed with error {}"
                              .format(str(ex)))
            return False
    # end _ping_sweep

    def _get_device_vendor(self, oid, vendor_mapping):
        for vendor in vendor_mapping:
            if vendor.get('oid') in oid:
                return vendor.get('vendor')
        return None
    # end _get_device_vendor

    def oid_mapping(self, host, pysnmp_output):
        matched_oid_mapping = {}
        matched_oid = None
        device_family_info = self.module.params['device_family_info']
        vendor_mapping = self.module.params['vendor_mapping']

        if pysnmp_output.get('ansible_sysobjectid'):
            vendor = self._get_device_vendor(
                pysnmp_output['ansible_sysobjectid'],
                vendor_mapping)
            if not vendor:
                self.logger.info("Vendor for host {} not supported".format(
                    host))
            else:
                device_family = next(
                    element for element in device_family_info
                    if element['vendor'] == vendor)
                if device_family:
                    try:
                        matched_oid = next(
                            item for item in device_family['snmp_probe']
                            if item['oid'] == pysnmp_output[
                                'ansible_sysobjectid'])
                    except StopIteration:
                        pass
                    if matched_oid:
                        matched_oid_mapping = matched_oid.copy()
                        matched_oid_mapping['hostname'] = \
                            pysnmp_output['ansible_sysname']
                        matched_oid_mapping['host'] = host
                        matched_oid_mapping['vendor'] = vendor
                    else:
                        self.logger.info(
                            "OID {} not present in the given list of device "
                            "info for the host {}".format(
                                pysnmp_output['ansible_sysobjectid'], host))
        return matched_oid_mapping
    # end _oid_mapping

    def _parse_xml_response(self, xml_response, oid_mapped):
        xml_response = xml_response.split('">')
        output = xml_response[1].split('<cli')
        final = etree.fromstring(output[0])
        if final.find('hardware-model') is not None:
            oid_mapped['product'] = final.find('hardware-model').text
        if final.find('os-name') is not None:
            oid_mapped['family'] = final.find('os-name').text
        if final.find('os-version') is not None:
            oid_mapped['os-version'] = final.find('os-version').text
        if final.find('serial-number') is not None:
            oid_mapped['serial-number'] = final.find('serial-number').text
        if final.find('host-name') is not None:
            oid_mapped['hostname'] = final.find('host-name').text
    # end _parse_xml_response

    def _ssh_connect(self, ssh_conn, username, password, hostname,
                     commands, oid_mapped):
        try:
            ssh_conn.connect(
                username=username,
                password=password,
                hostname=hostname)
            oid_mapped['username'] = username
            oid_mapped['password'] = password
            oid_mapped['host'] = hostname
        except Exception as ex:
            self.logger.info(
                "Could not connect to host {}: {}".format(
                    hostname, str(ex)))
            return False

        try:
            if commands:
                num_commands = len(commands) - 1
                for index, command in enumerate(commands):
                    stdin, stdout, stderr = ssh_conn.exec_command(
                        command['command'])
                    response = stdout.read()
                    if (not stdout and stderr) or (
                            response is None) or ('error' in response):
                        self.logger.info(
                            "Command {} failed on host {}:{}"
                            .format(command['command'], hostname, stderr))
                        if index == num_commands:
                            raise RuntimeError("All commands failed on host {}"
                                               .format(hostname))
                    else:
                        break
                self._parse_xml_response(response, oid_mapped)
            return True
        except RuntimeError as rex:
            self.logger.info("RunTimeError: {}".format(str(rex)))
            return False
        except Exception as ex:
            self.logger.info("SSH failed for host {}: {}".format(hostname,
                                                                 str(ex)))
            return False
# end _ssh_connect

    def get_device_info_ssh(self, host, oid_mapped, credentials):
        # find a credential that matches this host
        status = False
        device_family_info = self.module.params['device_family_info']

        sshconn = paramiko.SSHClient()
        sshconn.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        try:
            for info in device_family_info:
                for cred in credentials:
                    status = self._ssh_connect(
                        sshconn,
                        cred['credential']['username'],
                        cred['credential']['password'],
                        host,
                        info['ssh_probe'],
                        oid_mapped)
                    if status:
                        oid_mapped['vendor'] = info['vendor']
                        break
        finally:
            sshconn.close()
            return status
    # end _get_device_info_ssh

    def _detailed_cred_check(self, host, oid_mapped, credentials):
        remove_null = []
        ssh_conn = paramiko.SSHClient()
        ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        index = 0

        # check if credentials dict has both username and password defined.
        # If neither avaiable, remove the entire entry from the list.
        # Cannot check ssh connectivity with just the username or password.
        for creds in credentials[index:]:
            for user_pwd in creds.values():
                if isinstance(user_pwd, dict):
                    if user_pwd.get('username') and user_pwd.get('password'):
                        index += 1
                        break
                    else:
                        credentials.remove(creds)
                        break

        # In a list of dict for credentials, if a dict value is None
        # remove the key from the dict. Only keys with values are retained.
        for single_dict in credentials:
            remove_null.append(
                dict([(dkey, ddata)
                      for dkey, ddata in single_dict.iteritems() if ddata]))

        # Sorting based on number of keys in a dict.Max-min sorting done here
        # resulting list would have dict with max keys as first entry
        # and min as the last
        prioritized_creds = sorted(remove_null, key=len, reverse=True)
        try:
            for device_cred in prioritized_creds:
                oid_vendor = oid_mapped['vendor']
                oid_family = oid_mapped['family']
                device_family = device_cred.get('device_family', None)
                vendor = device_cred.get('vendor', None)
                cred = device_cred.get('credential', None)
                username = cred.get('username', None) if cred else None
                password = cred.get('password', None) if cred else None

                if device_family and not vendor:
                    continue
                if vendor and vendor.lower() != oid_vendor.lower():
                    continue
                if vendor and device_family and device_family not in \
                        oid_family:
                    continue
                if not username or not password:
                    continue

                response = self._ssh_connect(
                    ssh_conn,
                    username,
                    password,
                    host,
                    None,
                    oid_mapped)
                if response:
                    return True

                self.logger.info(
                    "Credential for '{}' didn't work on host '{}'".format(
                        cred['credential']['username'], host))
            return False
        finally:
            ssh_conn.close()
    # end _ssh_check

    def _pr_object_create_update(
            self,
            oid_mapped,
            fq_name,
            update):
        pr_uuid = None
        msg = None
        try:
            os_version = oid_mapped.get('os-version', None)
            serial_num = oid_mapped.get('serial-number', None)
            physicalrouter = PhysicalRouter(
                parent_type='global-system-config',
                fq_name=fq_name,
                physical_router_management_ip=oid_mapped.get('host'),
                physical_router_vendor_name=oid_mapped.get('vendor'),
                physical_router_product_name=oid_mapped.get('product'),
                physical_router_device_family=oid_mapped.get('family'),
                physical_router_vnc_managed=True,
                physical_router_hostname=fq_name[-1],
                display_name=fq_name[-1],
                physical_router_serial_number=serial_num,
                physical_router_managed_state='active',
                physical_router_user_credentials={
                    'username': oid_mapped.get('username'),
                    'password': oid_mapped.get('password')
                }
            )
            if update:
                pr_unicode_obj = self.vncapi.physical_router_update(
                    physicalrouter)
                if pr_unicode_obj:
                    pr_obj_dict = ast.literal_eval(pr_unicode_obj)
                    pr_uuid = pr_obj_dict['physical-router']['uuid']
                    msg = "Discovered %s:\n   Host name: %s\n   Vendor: %s\n" \
                          "   Model: %s" % (
                        oid_mapped.get('host'),
                        fq_name[1],
                        oid_mapped.get('vendor'),
                        oid_mapped.get('product')
                    )
                    self.logger.info("Discovered {} : {}".format(
                        oid_mapped.get('host'), pr_uuid
                    ))
            else:
                # underlay_managed flag should only be set at physical-router
                # creation time
                physicalrouter.set_physical_router_underlay_managed(
                    self.job_ctx.get('job_input').get('manage_underlay', True)
                )
                pr_uuid = self.vncapi.physical_router_create(physicalrouter)
                msg = "Discovered device details: {} : {} : {}".format(
                    oid_mapped.get('host'), fq_name[1], oid_mapped.get(
                        'product'))
                self.logger.info("Device created with uuid- {} : {}".format(
                    oid_mapped.get(
                        'host'), pr_uuid))
            self.module.send_prouter_object_log(fq_name, "DISCOVERED",
                                                os_version, serial_num)
        except(RefsExistError, Exception) as ex:
            if isinstance(ex, RefsExistError):
                return REF_EXISTS_ERROR, None
            self.logger.error("VNC create failed with error: {}".format(str(
                ex)))
            return False, None

        self.module.send_job_object_log(
            msg,
            JOB_IN_PROGRESS,
            None,
            job_success_percent=self.per_greenlet_percentage)
        self.discovery_percentage_write()
        return True, pr_uuid

    def get_hostname_from_job_input(self, serial_num):
        hostname = None
        devices_to_ztp = self.job_ctx.get('job_input').get('device_to_ztp')
        for device_info in devices_to_ztp:
            if device_info.get('serial_number') == serial_num:
                hostname = device_info.get('hostname')
                break
        return hostname

    def device_info_processing(self, host, oid_mapped):
        valid_creds = False
        return_code = True

        if not oid_mapped.get('family') or not oid_mapped.get('vendor'):
            self.logger.info("Could not retrieve family/vendor info for "
                             "the host: {}, not creating PR "
                             "object".format(host))
            self.logger.info("vendor: {}, family: {}".format(
                oid_mapped.get('vendor'), oid_mapped.get('family')))
            oid_mapped = {}

        if oid_mapped.get('host'):
            valid_creds = self._detailed_cred_check(host, oid_mapped,
                                                    self.credentials)

        if not valid_creds and oid_mapped:
            self.logger.info("No credentials matched for host: {}, nothing "
                             "to update in DB".format(host))
            oid_mapped = {}

        if oid_mapped:
            if self.serial_num_flag:
                if oid_mapped.get('serial-number') not in \
                        self.all_serial_num:
                    self.logger.info(
                        "Serial number {} for host {} not present "
                        "in fabric_namespace, nothing to update "
                        "in DB".format(
                            oid_mapped.get('serial-number'), host))
                    return

            # use the user input hostname is there. If its none check
            # for hostname derived from the device system info. If
            # that is also missing then set the hostname to the serial num
            user_input_hostname = None
            if self.job_ctx.get('job_input').get('device_to_ztp') is not None:
                user_input_hostname = \
                    self.get_hostname_from_job_input(oid_mapped.get(
                        'serial-number'))
            if user_input_hostname is not None:
                oid_mapped['hostname'] = user_input_hostname
            elif oid_mapped.get('hostname') is None:
                oid_mapped['hostname'] = oid_mapped.get('serial-number')

            fq_name = [
                'default-global-system-config',
                oid_mapped.get('hostname')]
            return_code, pr_uuid = self._pr_object_create_update(
                oid_mapped, fq_name, False)
            if return_code == REF_EXISTS_ERROR:
                physicalrouter = self.vncapi.physical_router_read(
                    fq_name=fq_name)
                phy_router = self.vncapi.obj_to_dict(physicalrouter)
                if (phy_router.get('physical_router_management_ip')
                        == oid_mapped.get('host')):
                    self.logger.info(
                        "Device with same mgmt ip already exists {}".format(
                            phy_router.get('physical_router_management_ip')))
                    return_code, pr_uuid = self._pr_object_create_update(
                        oid_mapped, fq_name, True)
                else:
                    fq_name = [
                        'default-global-system-config',
                        oid_mapped.get('hostname') +
                        '_' +
                        oid_mapped.get('host')]
                    return_code, pr_uuid = self._pr_object_create_update(
                        oid_mapped, fq_name, False)
                    if return_code == REF_EXISTS_ERROR:
                        self.logger.debug("Object already exists")
            if return_code is True:
                self.vncapi.ref_update(
                    "physical_router", pr_uuid, "fabric", self.fabric_uuid,
                    None, "ADD")
                self.logger.info(
                    "Fabric updated with physical router info for "
                    "host: {}".format(host))
                temp = {}
                temp['device_management_ip'] = oid_mapped.get('host')
                temp['device_fqname'] = fq_name
                temp['device_username'] = oid_mapped.get('username')
                temp['device_password'] = oid_mapped.get('password')
                temp['device_family'] = oid_mapped.get('family')
                temp['device_vendor'] = oid_mapped.get('vendor')
                temp['device_product'] = oid_mapped.get('product')
                temp['device_serial_number'] = oid_mapped.get('serial-number')
                DeviceInfo.output.update({pr_uuid: temp})

    def discovery_percentage_write(self):
        if self.module.results.get('percentage_completed'):
            exec_id = self.job_ctx.get('job_execution_id')
            pb_id = self.job_ctx.get('unique_pb_id')
            self._job_file_write.write_to_file(
                exec_id, pb_id, JobFileWrite.JOB_PROGRESS,
                str(self.module.results.get('percentage_completed'))
            )
コード例 #41
0
class VncMod(object):
    """
    This class encaptulates vnc crud apis and some action apis in the Ansible
    module to make it easy for Ansible playbook to invoke vnc apis
    """
    JOB_IN_PROGRESS = 1

    def __init__(self, module):
        self.cls = None

        # Fetch module params
        self.job_ctx = module.params['job_ctx']
        self.object_type = module.params['object_type']
        self.object_op = module.params['object_op']
        self.object_dict = module.params['object_dict']
        self.object_list = module.params['object_list']
        self.update_obj = module.params['update_obj_if_present']
        self.enable_job_ctx = module.params['enable_job_ctx']

        # additional validation on top of argument_spec
        self._validate_params()

        # initialize vnc_lib
        self._init_vnc_lib()

    # end __init__

    def _validate_params(self):
        if self.enable_job_ctx:
            required_keys = [
                'auth_token', 'job_template_fqname', 'job_execution_id',
                'config_args', 'job_input'
            ]
        else:
            required_keys = ['auth_token']
        for key in required_keys:
            if key not in self.job_ctx or self.job_ctx.get(key) is None:
                raise ValueError("Missing job context param: %s" % key)

    # end _validate_params

    def _init_vnc_lib(self):
        # Instantiate the VNC library
        # Retry for sometime, till API server is up
        errmsg = None
        for i in range(0, 10):
            try:
                self.vnc_lib = VncApi(
                    auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                    auth_token=self.job_ctx.get('auth_token'))
                break
            except Exception as ex:
                time.sleep(10)
                errmsg = "Failed to connect to API server due to error: %s"\
                    % str(ex)

        if self.vnc_lib is None:
            raise RuntimeError(errmsg)

    # end __init_vnc_lib

    def do_oper(self):
        """
        vnc db crud operations
        """
        # Get the class name from object type
        cls_name = camelize(self.object_type)

        # Get the class object
        self.cls = self._str_to_class(cls_name)

        if self.cls is None:
            raise ValueError("Invalid object_type: %s" % self.object_type)

        # This creates an object, if not existing, else update the object
        results = None
        if self.object_op == 'create':
            results = self._create_oper()

        elif self.object_op == 'update':
            results = self._update_oper()

        elif self.object_op == 'bulk_create':
            results = self._bulk_create_oper()

        elif self.object_op == 'bulk_update':
            results = self._bulk_update_oper()

        elif self.object_op == 'delete':
            results = self._delete_oper()

        elif self.object_op == 'read':
            results = self._read_oper()

        elif self.object_op == 'list':
            results = self._list_oper()

        elif self.object_op == 'ref_update' or self.object_op == 'ref_delete':
            results = self._ref_update_delete_oper()

        elif self.object_op == 'fq_name_to_id':
            results = self._fq_name_to_id_oper()

        elif self.object_op == 'id_to_fq_name':
            results = self._id_to_fq_name_oper()
        else:
            raise ValueError("Unsupported operation '%s' for object type '%s'",
                             self.object_op, self.object_type)

        return results

    # end do_oper

    def _obtain_vnc_method(self, operation, prepend_object_type=True):
        method_name = self.object_type + operation \
            if prepend_object_type else operation
        method = self._str_to_vnc_method(method_name)
        if method is None:
            raise ValueError("Operation '%s' is not supported on '%s'",
                             self.object_op, self.object_type)

        return method

    # end _obtain_vnc_method

    def _create_single(self, obj_dict):
        method = self._obtain_vnc_method('_create')
        results = dict()
        try:
            if obj_dict.get('uuid') is None:
                obj_dict['uuid'] = None
            instance_obj = self.cls.from_dict(**obj_dict)
            obj_uuid = method(instance_obj)
            results['uuid'] = obj_uuid
        except RefsExistError as ex:
            if self.update_obj:
                # Try to update the object, if already exist and
                # 'update_obj_if_present' flag is present
                results = self._update_single(obj_dict)
            else:
                # This is the case where caller does not want to update the
                # object. Set failed to True to let caller decide
                results['failed'] = True
                results['msg'] = \
                    "Failed to create object in database as object exists "\
                    "with same uuid '%s' or fqname '%s'" % \
                    (obj_dict.get('uuid'), obj_dict.get('fq_name'))
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to create object (uuid='%s', fq_name='%s') in the "\
                "database due to error: %s" % \
                (obj_dict.get('uuid'), obj_dict.get('fq_name'), str(ex))
        return results

    # end _create_single

    def _update_single(self, ob_dict):
        method = self._obtain_vnc_method('_update')
        results = dict()
        try:
            uuid = ob_dict.get('uuid')
            fq_name = ob_dict.get('fq_name')

            if uuid and not fq_name:
                # Get the fq_name from uuid
                ob_dict['fq_name'] = self.vnc_lib.id_to_fq_name(uuid)
            elif fq_name and not uuid:
                ob_dict['uuid'] = None

            instance_obj = self.cls.from_dict(**ob_dict)
            obj = method(instance_obj)
            obj_name = self.object_type.replace('_', '-')
            results['uuid'] = \
                ast.literal_eval(obj).get(obj_name).get('uuid')
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to update object (uuid='%s', fq_name='%s') in the "\
                "database due to error: %s" % (uuid, fq_name, str(ex))
        return results

    # end _update_single

    def _create_oper(self):
        results = dict()
        self.object_list = [self.object_dict]
        res = self._bulk_create_oper()
        results['uuid'] = res['list_uuids'][0] if res['list_uuids'] else None
        return results

    # end _create_oper

    def _bulk_create_oper(self):
        results = dict()
        results['list_uuids'] = []
        for ob_dict in self.object_list:
            res = self._create_single(ob_dict)
            if res.get('failed'):
                results['failed'] = True
                results['msg'] = res.get('msg')
                break
            else:
                results['list_uuids'].append(res['uuid'])
        return results

    # end _bulk_create_oper

    def _update_oper(self):
        results = dict()
        self.object_list = [self.object_dict]
        res = self._bulk_update_oper()
        results['uuid'] = res['list_uuids'][0] if res['list_uuids'] else None
        return results

    # end _update_oper

    def _bulk_update_oper(self):
        results = dict()
        results['list_uuids'] = []
        for ob_dict in self.object_list:
            res = self._update_single(ob_dict)
            if res.get('failed'):
                results['failed'] = True
                results['msg'] = res.get('msg')
                break
            else:
                results['list_uuids'].append(res['uuid'])
        return results

    # end _bulk_update_oper

    def _delete_oper(self):
        method = self._obtain_vnc_method('_delete')
        results = dict()
        obj_uuid = self.object_dict.get('uuid')
        obj_fq_name = self.object_dict.get('fq_name')

        try:
            if obj_uuid:
                method(id=obj_uuid)
            elif obj_fq_name:
                method(fq_name=obj_fq_name)
            else:
                results['failed'] = True
                results['msg'] = \
                    "Either uuid or fq_name should be present for delete"
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to delete object (uuid='%s', fq_name='%s') from the "\
                "database due to error: %s" % (obj_uuid, obj_fq_name, str(ex))
        return results

    # end _delete_oper

    def _read_oper(self):
        method = self._obtain_vnc_method('_read')
        results = dict()
        obj_uuid = self.object_dict.get('uuid')
        obj_fq_name = self.object_dict.get('fq_name')

        try:
            if obj_uuid:
                obj = method(id=obj_uuid)
            elif obj_fq_name:
                if isinstance(obj_fq_name, list):
                    obj = method(fq_name=obj_fq_name)
                else:
                    # convert str object to list
                    obj = method(fq_name=ast.literal_eval(obj_fq_name))
            else:
                results['failed'] = True
                results['msg'] = \
                    "Either uuid or fq_name should be present for read"
                return results

            results['obj'] = self.vnc_lib.obj_to_dict(obj)
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to read object (uuid='%s', fq_name='%s') from the "\
                "database due to error: %s" % (obj_uuid, obj_fq_name, str(ex))
        return results

    # end _read_oper

    def _list_oper(self):
        method = self._obtain_vnc_method('s_list')
        results = dict()
        filters = self.object_dict.get('filters')
        fields = self.object_dict.get('fields')
        back_ref_id = self.object_dict.get('back_ref_id')
        detail = self.object_dict.get('detail')
        try:
            if detail == 'True':
                objs = method(back_ref_id=back_ref_id,
                              filters=filters,
                              fields=fields,
                              detail=True)
                results['obj'] = []
                for obj in objs:
                    results['obj'].append(self.vnc_lib.obj_to_dict(obj))
            else:
                obj = method(back_ref_id=back_ref_id,
                             filters=filters,
                             fields=fields)
                results['obj'] = obj
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to list objects due to error: %s" % str(ex)
        return results

    # end _list_oper

    def _ref_update_delete_oper(self):
        results = dict()
        method = self._obtain_vnc_method(self.object_op, False)

        # Get the input params from the object dict
        obj_type = self.object_type.replace('_', '-')
        obj_uuid = self.object_dict.get('obj_uuid')
        ref_type = self.object_dict.get('ref_type')
        ref_uuid = self.object_dict.get('ref_uuid')
        ref_fqname = self.object_dict.get('ref_fqname')

        try:
            if self.object_op == 'ref_update':
                obj_uuid = method(obj_type, obj_uuid, ref_type, ref_uuid,
                                  ref_fqname, 'ADD')
            else:
                obj_uuid = method(obj_type, obj_uuid, ref_type, ref_uuid,
                                  ref_fqname, 'DELETE')
                results['uuid'] = obj_uuid
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to update ref (%s, %s) -> (%s, %s, %s) due to error: %s"\
                % (obj_type, obj_uuid, ref_type, ref_uuid, ref_fqname, str(ex))
        return results

    # _ref_update_delete_oper

    def _fq_name_to_id_oper(self):
        method = self._obtain_vnc_method(self.object_op, False)
        results = dict()
        try:
            obj_type = self.object_type.replace('_', '-')
            if isinstance(self.object_dict.get('fq_name'), list):
                obj_fq_name = self.object_dict.get('fq_name')
            else:
                # convert str object to list
                obj_fq_name = ast.literal_eval(self.object_dict.get('fq_name'))
            obj_uuid = method(obj_type, obj_fq_name)
            results['uuid'] = obj_uuid
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to retrieve uuid for (%s, %s) due to error: %s"\
                % (obj_type, obj_fq_name, str(ex))
        return results

    # _fq_name_to_id_oper

    def _id_to_fq_name_oper(self):
        method = self._obtain_vnc_method(self.object_op, False)
        results = dict()
        try:
            obj_uuid = self.object_dict.get('uuid')
            obj_fq_name = method(obj_uuid)
            results['fq_name'] = obj_fq_name
        except Exception as ex:
            results['failed'] = True
            results['msg'] = \
                "Failed to retrive fq_name by uuid '%s' due to error: %s"\
                % (obj_uuid, str(ex))
        return results

    # end _id_to_fq_name_oper

    def _str_to_class(self, cls_name):
        return getattr(vnc_api.gen.resource_client, cls_name, None)

    # end _str_to_class

    def _str_to_vnc_method(self, method_name):
        return getattr(self.vnc_lib, method_name, None)
コード例 #42
0
class LoadBalancerPluginDb(LoadBalancerPluginBase):

    def __init__(self):
        # TODO: parse configuration for api-server:port and auth
        self._api = VncApi()
        self._pool_manager = \
            loadbalancer_pool.LoadbalancerPoolManager(self._api)
        self._vip_manager = virtual_ip.VirtualIpManager(self._api)
        self._member_manager = \
            loadbalancer_member.LoadbalancerMemberManager(self._api)
        self._monitor_manager = \
            loadbalancer_healthmonitor.LoadbalancerHealthmonitorManager(
                self._api)

    def get_api_client(self):
        return self._api

    def get_vips(self, context, filters=None, fields=None):
        return self._vip_manager.get_collection(context, filters, fields)

    def get_vip(self, context, id, fields=None):
        return self._vip_manager.get_resource(context, id, fields)

    def create_vip(self, context, vip):
        return self._vip_manager.create(context, vip)

    def update_vip(self, context, id, vip):
        return self._vip_manager.update(context, id, vip)

    def delete_vip(self, context, id):
        return self._vip_manager.delete(context, id)

    def get_pools(self, context, filters=None, fields=None):
        return self._pool_manager.get_collection(context, filters, fields)

    def get_pool(self, context, id, fields=None):
        return self._pool_manager.get_resource(context, id, fields)

    def create_pool(self, context, pool):
        return self._pool_manager.create(context, pool)

    def update_pool(self, context, id, pool):
        return self._pool_manager.update(context, id, pool)

    def delete_pool(self, context, id):
        return self._pool_manager.delete(context, id)

    def stats(self, context, pool_id):
        pass

    def create_pool_health_monitor(self, context, health_monitor, pool_id):
        """ Associate an health monitor with a pool.
        """
        m = health_monitor['health_monitor']
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=pool_id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=m['id'])
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=m['id'])

        if not context.is_admin:
            tenant_id = context.tenant_id
            if tenant_id != pool.parent_uuid or \
                    tenant_id != monitor.parent_uuid:
                raise n_exc.NotAuthorized()

        pool_refs = monitor.get_loadbalancer_pool_back_refs()
        if pool_refs is not None:
            for ref in pool_refs:
                if ref['uuid'] == pool_id:
                    raise loadbalancer.PoolMonitorAssociationExists(
                        monitor_id=m['id'], pool_id=pool_id)

        pool.add_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

        res = {
            'id': monitor.uuid,
            'tenant_id': monitor.parent_uuid
        }
        return res

    def get_pool_health_monitor(self, context, id, pool_id, fields=None):
        """ Query a specific pool, health_monitor association.
        """
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        if not context.is_admin and context.tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        res = {
            'pool_id': pool_id,
            'monitor_id': id,
            'status': self._pool_manager._get_object_status(pool),
            'tenant_id': pool.parent_uuid
        }
        return self._pool_manager._fields(res, fields)

    def delete_pool_health_monitor(self, context, id, pool_id):
        try:
            pool = self._api.loadbalancer_pool_read(id=pool_id)
        except NoIdError:
            raise loadbalancer.PoolNotFound(pool_id=id)
        if not context.is_admin and context.tenant_id != pool.parent_uuid:
            raise loadbalancer.PoolNotFound(pool_id=id)

        try:
            monitor = self._api.loadbalancer_healthmonitor_read(id=id)
        except NoIdError:
            raise loadbalancer.HealthMonitorNotFound(monitor_id=id)

        in_list = False
        for mref in pool.get_loadbalancer_healthmonitor_refs():
            if mref['uuid'] == id:
                in_list = True
                break

        if not in_list:
            raise loadbalancer.PoolMonitorAssociationNotFound(
                monitor_id=id, pool_id=pool_id)

        pool.del_loadbalancer_healthmonitor(monitor)
        self._api.loadbalancer_pool_update(pool)

    def get_members(self, context, filters=None, fields=None):
        return self._member_manager.get_collection(context, filters, fields)

    def get_member(self, context, id, fields=None):
        return self._member_manager.get_resource(context, id, fields)

    def create_member(self, context, member):
        return self._member_manager.create(context, member)

    def update_member(self, context, id, member):
        return self._member_manager.update(context, id, member)

    def delete_member(self, context, id):
        return self._member_manager.delete(context, id)

    def get_health_monitors(self, context, filters=None, fields=None):
        return self._monitor_manager.get_collection(context, filters, fields)

    def get_health_monitor(self, context, id, fields=None):
        return self._monitor_manager.get_resource(context, id, fields)

    def create_health_monitor(self, context, health_monitor):
        return self._monitor_manager.create(context, health_monitor)

    def update_health_monitor(self, context, id, health_monitor):
        return self._monitor_manager.update(context, id, health_monitor)

    def delete_health_monitor(self, context, id):
        return self._monitor_manager.delete(context, id)
コード例 #43
0
    def __init__(self, args=None):
        self._args = args

        # Initialize discovery client
        self._disc = None
        if self._args.disc_server_ip and self._args.disc_server_port:
            self._disc = client.DiscoveryClient(
                self._args.disc_server_ip,
                self._args.disc_server_port,
                ModuleNames[Module.DEVICE_MANAGER])
        
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(bool(self._args.push_delay_enable))

        self._sandesh = Sandesh()
        # Reset the sandesh send rate limit value
        if self._args.sandesh_send_rate_limit is not None:
            SandeshSystem.set_sandesh_send_rate_limit(
                self._args.sandesh_send_rate_limit)
        module = Module.DEVICE_MANAGER
        module_name = ModuleNames[module]
        node_type = Module2NodeType[module]
        node_type_name = NodeTypeNames[node_type]
        self.table = "ObjectConfigNode"
        instance_id = INSTANCE_ID_DEFAULT
        hostname = socket.gethostname()
        self._sandesh.init_generator(
            module_name, hostname, node_type_name, instance_id,
            self._args.collectors, 'to_bgp_context',
            int(args.http_server_port),
            ['cfgm_common', 'device_manager.sandesh'], self._disc)
        self._sandesh.set_logging_params(enable_local_log=args.log_local,
                                         category=args.log_category,
                                         level=args.log_level,
                                         file=args.log_file,
                                         enable_syslog=args.use_syslog,
                                         syslog_facility=args.syslog_facility)
        PhysicalRouterDM._sandesh = self._sandesh
        ConnectionState.init(
            self._sandesh, hostname, module_name, instance_id,
            staticmethod(ConnectionState.get_process_state_cb),
            NodeStatusUVE, NodeStatus, self.table)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user, args.admin_password,
                    args.admin_tenant_name, args.api_server_ip,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        rabbit_servers = self._args.rabbit_server
        rabbit_port = self._args.rabbit_port
        rabbit_user = self._args.rabbit_user
        rabbit_password = self._args.rabbit_password
        rabbit_vhost = self._args.rabbit_vhost
        rabbit_ha_mode = self._args.rabbit_ha_mode

        self._db_resync_done = gevent.event.Event()

        q_name = 'device_manager.%s' % (socket.gethostname())
        self._vnc_kombu = VncKombuClient(rabbit_servers, rabbit_port,
                                         rabbit_user, rabbit_password,
                                         rabbit_vhost, rabbit_ha_mode,
                                         q_name, self._vnc_subscribe_callback,
                                         self.config_log, rabbit_use_ssl =
                                         self._args.rabbit_use_ssl,
                                         kombu_ssl_version =
                                         self._args.kombu_ssl_version,
                                         kombu_ssl_keyfile =
                                         self._args.kombu_ssl_keyfile,
                                         kombu_ssl_certfile =
                                         self._args.kombu_ssl_certfile,
                                         kombu_ssl_ca_certs =
                                         self._args.kombu_ssl_ca_certs)

        self._cassandra = DMCassandraDB.getInstance(self, _zookeeper_client)

        DBBaseDM.init(self, self._sandesh.logger(), self._cassandra)
        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            vn = VirtualNetworkDM.locate(obj['uuid'], obj)
            if vn is not None and vn.routing_instances is not None:
                for ri_id in vn.routing_instances:
                    ri_obj = RoutingInstanceDM.locate(ri_id)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._cassandra.handle_pr_deletes(pr_uuid_set)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'],obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._cassandra.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()

        self._db_resync_done.set()
        gevent.joinall(self._vnc_kombu.greenlets())
コード例 #44
0
class SanityBase(object):
    """Base class for fabric ansible sanity tests"""

    @staticmethod
    def _init_logging(cfg, name):
        logger = logging.getLogger('sanity_test')
        logger.setLevel(cfg['level'])

        file_handler = logging.FileHandler(
            '%s/fabric_ansibile_%s.log' % (cfg['file']['dir'], name), mode='w')
        file_handler.setLevel(cfg['file']['level'])
        console_handler = logging.StreamHandler()
        console_handler.setLevel(cfg['console'])

        formatter = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(message)s',
            datefmt='%Y/%m/%d %H:%M:%S')
        file_handler.setFormatter(formatter)
        console_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        logger.addHandler(console_handler)

        return logger
    # end _init_logging

    def test(self):
        """Override this method in the derived class"""
        pass

    def __init__(self, cfg, name):
        if cfg is None:
            raise KeyError("Missing required args: cfg")
        if name is None:
            raise KeyError("Missing required args: name")

        self._name = name
        self._timeout = cfg['wait_for_job']['timeout']
        self._max_retries = cfg['wait_for_job']['max_retries']
        self._logger = SanityBase._init_logging(cfg['log'], name)
        self._api_server = cfg['api_server']
        self._analytics = cfg['analytics']
        self._api = VncApi(
            api_server_host=self._api_server['host'],
            api_server_port=self._api_server['port'],
            username=self._api_server['username'],
            password=self._api_server['password'],
            tenant_name=self._api_server['tenant'])
    # end __init__

    def create_fabric(self, fab_name, prouter_passwords):
        """create fabric with list of device passwords"""
        self._logger.info('Creating fabric: %s', fab_name)
        fq_name = ['default-global-system-config', fab_name]
        fab = Fabric(
            name=fab_name,
            fq_name=fq_name,
            parent_type='global-system-config',
            fabric_credentials={
                'device_credential': [{
                    'credential': {
                        'username': '******', 'password': passwd
                    },
                    'vendor': 'Juniper',
                    'device_family': None
                } for passwd in prouter_passwords]
            }
        )
        try:
            fab_uuid = self._api.fabric_create(fab)
            fab = self._api.fabric_read(id=fab_uuid)
        except RefsExistError:
            self._logger.warn("Fabric '%s' already exists", fab_name)
            fab = self._api.fabric_read(fq_name=fq_name)

        self._logger.debug(
            "Fabric created:\n%s",
            pprint.pformat(self._api.obj_to_dict(fab), indent=4))
        return fab
    # end _create_fabric

    def add_mgmt_ip_namespace(self, fab, name, cidrs):
        """add management ip prefixes as fabric namespace"""
        ns_name = 'mgmt_ip-' + name
        self._logger.info(
            'Adding management ip namespace "%s" to fabric "%s" ...',
            ns_name, fab.name)

        subnets = []
        for cidr in cidrs:
            ip_prefix = cidr.split('/')
            subnets.append({
                'ip_prefix': ip_prefix[0],
                'ip_prefix_len': ip_prefix[1]
            })
        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(
            name=ns_name,
            fq_name=ns_fq_name,
            parent_type='fabric',
            fabric_namespace_type='IPV4-CIDR',
            fabric_namespace_value={
                'ipv4_cidr': {
                    'subnet': subnets
                },
            }
        )
        namespace.set_tag_list([{'to': ['label=fabric-management-ip']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn(
                "Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace
    # end _add_mgmt_ip_namespace

    def add_asn_namespace(self, fab, asn):
        """add AS number as fabric namespace"""
        ns_name = "asn_%d" % asn
        self._logger.info(
            'Adding ASN namespace "%s" to fabric "%s" ...',
            ns_name, fab.name)

        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(
            name=ns_name,
            fq_name=ns_fq_name,
            parent_type='fabric',
            fabric_namespace_type='ASN',
            fabric_namespace_value={
                'asn': {
                    'asn': [asn]
                }
            }
        )
        namespace.set_tag_list([{'to': ['label=fabric-as-number']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn(
                "Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace
    # end _add_asn_namespace

    def create_image(self, img_name, img_uri, img_version,
                                img_family, img_vendor):

        """create image"""
        img_fqname = None
        # device_fqname = None
        try:
            self._logger.info('Creating image: %s', img_name)
            img_fqname = ['default-global-system-config', img_name]
            image = DeviceImage(
                name=img_name,
                fq_name=img_fqname,
                parent_type='global-system-config',
                device_image_file_uri=img_uri,
                device_image_os_version=img_version,
                device_image_device_family=img_family,
                device_image_vendor_name=img_vendor
            )
            img_uuid = self._api.device_image_create(image)
            image = self._api.device_image_read(id=img_uuid)

        except RefsExistError:
            self._logger.warn("Image '%s' already exists", img_name)
            image = self._api.device_image_read(fq_name=img_fqname)

        self._logger.debug(
            "Image created:\n%s",
            pprint.pformat(self._api.obj_to_dict(image), indent=4))
        return image

    # end create_image_and_device

    def cleanup_fabric(self, fab_name):
        """delete fabric including all prouters in the fabric"""
        try:
            self._logger.info('Deleting fabric "%s" ...', fab_name)
            fab_fqname = ['default-global-system-config', fab_name]
            fab = self._api.fabric_read(fq_name=fab_fqname)

            # delete all namespaces in this fabric
            fab_namespaces = self._api.fabric_namespaces_list(
                parent_id=fab.uuid)
            for namespace in fab_namespaces.get('fabric-namespaces') or []:
                self._logger.debug(
                    "Delete namespace: %s", namespace.get('fq_name'))
                self._api.fabric_namespace_delete(namespace.get('fq_name'))

            # delete fabric
            self._logger.debug("Delete fabric: %s", fab_fqname)
            self._api.fabric_delete(fab_fqname)

            # delete all prouters in this fabric
            for prouter in fab.get_physical_router_back_refs() or []:
                self._delete_prouter(prouter.get('uuid'))

        except NoIdError:
            self._logger.warn('Fabric "%s" not found', fab_name)
    # end cleanup_fabric

    def cleanup_image(self, img_name,):
        # image cleanup
        self._logger.info("Clean up image and prouter from db")
        try:
            img_fqname = ['default-global-system-config', img_name]
            img = self._api.device_image_read(fq_name=img_fqname)
            self._logger.debug(
                "Delete Image: %s", img_fqname)
            self._api.device_image_delete(img_fqname)

        except NoIdError:
            self._logger.warn('Image "%s" not found', img_name)

    def _delete_prouter(self, uuid):
        prouter = self._api.physical_router_read(id=uuid)

        # delete all physical and logical interfaces
        ifds = self._api.physical_interfaces_list(parent_id=uuid)
        for ifd in ifds.get('physical-interfaces')  or []:
            # delete all child logical interfaces
            ifls = self._api.logical_interfaces_list(parent_id=ifd.get('uuid'))
            for ifl in ifls.get('logical-interfaces') or []:
                self._logger.debug(
                    "Delete logical interface: %s", ifl.get('fq_name'))
                self._api.logical_interface_delete(ifl.get('fq_name'))

            # delete the physical interface
            self._logger.debug(
                "Delete physical interface: %s", ifd.get('fq_name'))
            self._api.physical_interface_delete(ifd.get('fq_name'))

        # delete the prouter
        self._logger.debug(
            "Delete physical router: %s", prouter.get_fq_name())
        self._api.physical_router_delete(prouter.get_fq_name())

        # delete corresponding bgp routers
        for bgp_router_ref in prouter.get_bgp_router_refs() or []:
            self._logger.debug(
                "Delete bgp router: %s", bgp_router_ref.get('to'))
            self._api.bgp_router_delete(bgp_router_ref.get('to'))
    # end _delete_prouter

    @staticmethod
    def _get_job_status_query_payload(job_execution_id, status):
        return {
            'start_time': 'now-5m',
            'end_time': 'now',
            'select_fields': ['MessageTS', 'Messagetype'],
            'table': 'ObjectJobExecutionTable',
            'where': [
                [
                    {
                        'name': 'ObjectId',
                        'value': "%s:%s" % (job_execution_id, status),
                        'op': 1
                    }
                ]
            ]
        }
    # end _get_job_status_query_payload

    @staticmethod
    def _check_job_status(url, job_execution_id, job_status):
        payload = SanityBase._get_job_status_query_payload(job_execution_id,
                                                           job_status)
        r = requests.post(url, json=payload)
        if r.status_code == 200:
            response = r.json()
            if len(response['value']) > 0:
                assert response['value'][0]['Messagetype'] == 'JobLog'
                return True
        return False
    # end _post_for_json_response

    def _wait_for_job_to_finish(self, job_name, job_execution_id):
        completed = "SUCCESS"
        failed = "FAILURE"
        url = "http://%s:%d/analytics/query" %\
              (self._analytics['host'], self._analytics['port'])
        retry_count = 0
        while True:
            # check if job completed successfully
            if SanityBase._check_job_status(url, job_execution_id, completed):
                self._logger.debug("%s job '%s' finished", job_name,
                                   job_execution_id)
                break
            # check if job failed
            if SanityBase._check_job_status(url, job_execution_id, failed):
                self._logger.debug("%s job '%s' failed", job_name,
                                   job_execution_id)
                raise Exception("%s job '%s' failed" %
                                (job_name, job_execution_id))
            if retry_count > self._max_retries:
                raise Exception("Timed out waiting for '%s' job to complete" %
                                job_name)
            retry_count += 1
            time.sleep(self._timeout)
    # end _wait_for_job_to_finish

    @staticmethod
    def _get_jobs_query_payload(job_execution_id, last_log_ts):
        now = time.time() * 1000000
        #print "***************** now=%i, last_log_ts=%i" % (now, last_log_ts)
        return {
            'start_time': int('%i' % last_log_ts),
            'end_time': int('%i' % now),
            'select_fields': ['MessageTS', 'Messagetype', 'ObjectId',
                              'ObjectLog'],
            'sort': 1,
            'sort_fields': ['MessageTS'],
            'table': 'ObjectJobExecutionTable',
            'where': [
                [
                    {
                        'name': 'ObjectId',
                        'value': "%s" % (job_execution_id),
                        'op': 7
                    },
                    {
                        'name': 'Messagetype',
                        'value': 'JobLog',
                        'op': 1
                    }
                ]
            ]
        }

    @staticmethod
    def _display_job_records(url, job_execution_id, last_log_ts,
                             percentage_complete, fabric_fq_name,
                             job_template_fq_name):
        log_ts = last_log_ts
        payload = SanityBase._get_jobs_query_payload(job_execution_id,
                                                     last_log_ts)
        r = requests.post(url, json=payload)
        if r.status_code == 200:
            response = r.json()
            if len(response['value']) > 0:
                # sort log entries by MessageTS
                log_entries = response['value']
                for log_entry in log_entries:
                    log_msg = json.loads(json.dumps\
                                  (xmltodict.parse(log_entry['ObjectLog'])))
                    log_text = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry']['message']['#text']
                    log_device_name = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry'].get('device_name')
                    if log_device_name:
                        log_device_name = log_device_name.get('#text')
                    log_details = log_msg['JobLog']['log_entry']\
                        ['JobLogEntry'].get('details')
                    if log_details:
                        log_details = log_details.get('#text')
                    log_ts_us = int(log_entry['MessageTS'])
                    log_ts_ms = log_ts_us / 1000
                    log_ts_sec = log_ts_ms / 1000
                    log_ts_sec_gm = time.gmtime(log_ts_sec)
                    log_ts_fmt = time.strftime("%m/%d/%Y %H:%M:%S",
                                               log_ts_sec_gm) + ".%s" % \
                                               (str(log_ts_ms))[-3:]
                    if log_device_name:
                        print("[{}%] {}: [{}] {}".format(percentage_complete,
                                                             log_ts_fmt,
                                                             log_device_name,
                                                             log_text))
                    else:
                        print("[{}%] {}: {}".format(percentage_complete,
                                                             log_ts_fmt,
                                                             log_text))
                    print
                    if log_details:
                        pprint.pprint("[{}%] {}: ==> {}".format(percentage_complete,
                                                            log_ts_fmt,
                                                            log_details))
                        print
                    log_ts = (log_ts_us + 1)
                return True, log_ts
        else:
            print("RESPONSE: {}".format(r))
            log_ts = time.time() * 1000000
        return False, log_ts

    def _display_prouter_state(self, prouter_states, fabric_fq_name,
                               job_template_fq_name):
        fabric_fqname = ':'.join(map(str, fabric_fq_name))
        job_template_fqname = ':'.join(map(str, job_template_fq_name))

        for prouter_name, prouter_state in prouter_states.iteritems():
            prouter_fqname = "default-global-system-config:%s" % prouter_name
            url = "http://%s:%d/analytics/uves/job-execution/%s:%s:%s?flat" %\
                  (self._analytics['host'],
                   self._analytics['port'],
                   prouter_fqname,
                   fabric_fqname,
                   job_template_fqname
                   )
            r = requests.get(url)
            if r.status_code == 200:
                response = r.json()
                jobex = response.get('PhysicalRouterJobExecution')
                if jobex:
                    new_prouter_state = jobex.get('prouter_state')
                    if isinstance(new_prouter_state, list):
                        prouter_entry = [e for e in new_prouter_state if \
                                         "FabricAnsible" in e[1]]
                        new_prouter_state = prouter_entry[0][0]
                    if new_prouter_state != prouter_state:
                        prouter_states[prouter_name] = new_prouter_state
                        pprint.pprint("-----> {} state: {} <-----".\
                                      format(prouter_name, new_prouter_state))
                        print("")
            else:
                print("BAD RESPONSE for {}: {}".format(prouter_name, r))

    def _wait_and_display_job_progress(self, job_name, job_execution_id,
                                       fabric_fq_name, job_template_fq_name,
                                       prouter_name_list=None):
        prouter_states = {}
        if prouter_name_list:
            for prouter_name in prouter_name_list:
                prouter_states[prouter_name] = ""

        completed = "SUCCESS"
        failed = "FAILURE"
        url = "http://%s:%d/analytics/query" %\
              (self._analytics['host'], self._analytics['port'])
        retry_count = 0
        last_log_ts = time.time() * 1000000
        while True:
            # get job percentage complete
            percentage_complete = self._get_job_percentage_complete\
                (job_execution_id, fabric_fq_name, job_template_fq_name)
            # display job records
            status, last_log_ts = SanityBase._display_job_records\
                (url, job_execution_id, last_log_ts, percentage_complete,
                 fabric_fq_name, job_template_fq_name)
            if status:
                self._logger.debug("%s job '%s' log records non-zero status",
                                   job_name, job_execution_id)
            # Display prouter state, if applicable
            self._display_prouter_state(prouter_states, fabric_fq_name,
                                        job_template_fq_name)
            # check if job completed successfully
            if SanityBase._check_job_status(url, job_execution_id, completed):
                self._logger.debug("%s job '%s' finished", job_name,
                                   job_execution_id)
                break
            # check if job failed
            if SanityBase._check_job_status(url, job_execution_id, failed):
                self._logger.debug("%s job '%s' failed", job_name,
                                   job_execution_id)
                raise Exception("%s job '%s' failed" %
                                (job_name, job_execution_id))

            # Check for timeout
            if retry_count > self._max_retries:
                raise Exception("Timed out waiting for '%s' job to complete" %
                                job_name)
            retry_count += 1
            time.sleep(self._timeout)

    def _get_job_percentage_complete(self, job_execution_id, fabric_fq_name,
                                     job_template_fq_name):
        url = "http://%s:%d/analytics/uves/job-execution/%s:%s:%s:%s" %\
              (self._analytics['host'], self._analytics['port'],
               fabric_fq_name[0], fabric_fq_name[1],
               job_template_fq_name[0], job_template_fq_name[1])
        r = requests.get(url)
        if r.status_code == 200:
            response = r.json()
            job_uve = response.get('FabricJobExecution')
            if job_uve:
                percomp = "?"
                for pc in job_uve['percentage_completed']:
                    if job_execution_id in pc[1]:
                        percomp = pc[0]["#text"]
                        break
                return percomp
            else:
                return "??"
        else:
            return "???"

    def discover_fabric_device(self, fab):
        """Discover all devices specified by the fabric management namespaces
        """
        self._logger.info('Discover devices in fabric "%s" ...', fab.fq_name)
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'discover_device_template'],
            job_input={'fabric_uuid': fab.uuid}
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device discovery job started with execution id: %s",
            job_execution_id)
        self._wait_for_job_to_finish('Device discovery', job_execution_id)

        fab = self._api.fabric_read(fab.fq_name)
        discovered_prouter_refs = fab.get_physical_router_back_refs()
        self._logger.debug(
            "Disovered devices:\n%s",
            pprint.pformat(discovered_prouter_refs, indent=4))

        msg = "Discovered following devices in fabric '%s':" % fab.fq_name
        discovered_prouters = []
        for prouter_ref in discovered_prouter_refs:
            prouter = self._api.physical_router_read(prouter_ref.get('to'))
            discovered_prouters.append(prouter)
            msg += "\n - %s (%s)" % (
                prouter.name, prouter.physical_router_management_ip)

        self._logger.info(msg)
        return discovered_prouters
    # end discover_fabric_device

    def device_import(self, prouters):
        """import device inventories for the prouters specified in the
        argument"""
        self._logger.info("Import all discovered prouters in the fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'device_import_template'],
            job_input={},
            device_list=[prouter.uuid for prouter in prouters]
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device import job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('Device import', job_execution_id)

        for prouter in prouters:
            ifd_refs = self._api.physical_interfaces_list(
                parent_id=prouter.uuid)
            self._logger.info(
                "Imported %d physical interfaces to prouter: %s",
                len(ifd_refs.get('physical-interfaces')), prouter.name)
    # end device_import

    def underlay_config(self, prouters):
        """deploy underlay config to prouters in the fabric ..."""
        self._logger.info("Deploy underlay config to prouters in fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'generate_underlay_template'],
            job_input={
                'enable_lldp': 'true'
            },
            device_list=[prouter.uuid for prouter in prouters]
        )

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device import job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('Underlay config', job_execution_id)
    # end underlay_config

    def image_upgrade(self, image, device, fabric):
        """upgrade the physical routers with specified images"""
        self._logger.info("Upgrade image on the physical router ...")
        job_template_fq_name = [
            'default-global-system-config',
            'image_upgrade_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={'image_uuid': image.uuid},
            device_list=[device.uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Image upgrade job started with execution id: %s", job_execution_id)
        self._wait_and_display_job_progress('Image upgrade', job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name)

    # end image_upgrade

    def image_upgrade_maintenance_mode(self, device_list, image_upgrade_list,
                                       advanced_params, upgrade_mode,
                                       fabric, prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'hitless_upgrade_strategy_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'image_devices': image_upgrade_list,
                'advanced_parameters': advanced_params,
                'upgrade_mode': upgrade_mode,
                'fabric_uuid': fabric.uuid
            },
            device_list=device_list
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode upgrade job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Image upgrade', job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end image_upgrade_maintenance_mode

    def activate_maintenance_mode(self, device_uuid, mode,
                                  fabric, advanced_parameters,
                                  prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'maintenance_mode_activate_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'device_uuid': device_uuid,
                'fabric_uuid': fabric.uuid,
                'mode': mode,
                'advanced_parameters': advanced_parameters
            },
            device_list=[device_uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode activation job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Maintenance mode activation',
                                            job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end activate_maintenance_mode

    def deactivate_maintenance_mode(self, device_uuid, fabric,
                                    advanced_parameters, prouter_name_list):
        job_template_fq_name = [
            'default-global-system-config', 'maintenance_mode_deactivate_template']
        job_execution_info = self._api.execute_job(
            job_template_fq_name=job_template_fq_name,
            job_input={
                'device_uuid': device_uuid,
                'fabric_uuid': fabric.uuid,
                'advanced_parameters': advanced_parameters
            },
            device_list=[device_uuid]
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "Maintenance mode deactivation job started with execution id: %s",
            job_execution_id)
        self._wait_and_display_job_progress('Maintenance mode deactivation',
                                            job_execution_id,
                                            fabric.fq_name,
                                            job_template_fq_name,
                                            prouter_name_list=prouter_name_list)
    #end deactivate_maintenance_mode

    def ztp(self, fabric_uuid):
        """run ztp for a fabric"""
        self._logger.info("Running ZTP for fabric...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'ztp_template'],
            job_input={'fabric_uuid': fabric_uuid, 'device_count': 1}
        )
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info(
            "ZTP job started with execution id: %s", job_execution_id)
        self._wait_for_job_to_finish('ZTP', job_execution_id)

    # end ztp

    def _exit_with_error(self, errmsg):
        self._logger.error(errmsg)
        sys.exit(1)
コード例 #45
0
ファイル: hc_new_2.py プロジェクト: nuthanc/bgpaas_scale
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name = ['default-domain', 'admin']
ipam_fq_name = ['default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='10.87.64.129')

#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
#import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid'])
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1)
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)
コード例 #46
0
class SanityBase(object):
    """Base class for fabric ansible sanity tests"""
    @staticmethod
    def _init_logging(cfg, name):
        logger = logging.getLogger('sanity_test')
        logger.setLevel(cfg['level'])

        file_handler = logging.FileHandler('%s/fabric_ansibile_%s.log' %
                                           (cfg['file']['dir'], name),
                                           mode='w')
        file_handler.setLevel(cfg['file']['level'])
        console_handler = logging.StreamHandler()
        console_handler.setLevel(cfg['console'])

        formatter = logging.Formatter(
            '%(asctime)s %(levelname)-8s %(message)s',
            datefmt='%Y/%m/%d %H:%M:%S')
        file_handler.setFormatter(formatter)
        console_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        logger.addHandler(console_handler)

        return logger

    # end _init_logging

    def test(self):
        """Override this method in the derived class"""
        pass

    def __init__(self, cfg, name):
        if cfg is None:
            raise KeyError("Missing required args: cfg")
        if name is None:
            raise KeyError("Missing required args: name")

        self._name = name
        self._timeout = cfg['wait_for_job']['timeout']
        self._max_retries = cfg['wait_for_job']['max_retries']
        self._logger = SanityBase._init_logging(cfg['log'], name)
        self._api_server = cfg['api_server']
        self._analytics = cfg['analytics']
        self._api = VncApi(api_server_host=self._api_server['host'],
                           api_server_port=self._api_server['port'],
                           username=self._api_server['username'],
                           password=self._api_server['password'],
                           tenant_name=self._api_server['tenant'])

    # end __init__

    def create_fabric(self, fab_name, prouter_passwords):
        """create fabric with list of device passwords"""
        self._logger.info('Creating fabric: %s', fab_name)
        fq_name = ['default-global-system-config', fab_name]
        fab = Fabric(name=fab_name,
                     fq_name=fq_name,
                     parent_type='global-system-config',
                     fabric_credentials={
                         'device_credential': [{
                             'credential': {
                                 'username': '******',
                                 'password': passwd
                             },
                             'vendor': 'Juniper',
                             'device_family': None
                         } for passwd in prouter_passwords]
                     })
        try:
            fab_uuid = self._api.fabric_create(fab)
            fab = self._api.fabric_read(id=fab_uuid)
        except RefsExistError:
            self._logger.warn("Fabric '%s' already exists", fab_name)
            fab = self._api.fabric_read(fq_name=fq_name)

        self._logger.debug(
            "Fabric created:\n%s",
            pprint.pformat(self._api.obj_to_dict(fab), indent=4))
        return fab

    # end _create_fabric

    def add_mgmt_ip_namespace(self, fab, name, cidrs):
        """add management ip prefixes as fabric namespace"""
        ns_name = 'mgmt_ip-' + name
        self._logger.info(
            'Adding management ip namespace "%s" to fabric "%s" ...', ns_name,
            fab.name)

        subnets = []
        for cidr in cidrs:
            ip_prefix = cidr.split('/')
            subnets.append({
                'ip_prefix': ip_prefix[0],
                'ip_prefix_len': ip_prefix[1]
            })
        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(name=ns_name,
                                    fq_name=ns_fq_name,
                                    parent_type='fabric',
                                    fabric_namespace_type='IPV4-CIDR',
                                    fabric_namespace_value={
                                        'ipv4_cidr': {
                                            'subnet': subnets
                                        },
                                    })
        namespace.set_tag_list([{'to': ['label=fabric-management_ip']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn("Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace

    # end _add_mgmt_ip_namespace

    def add_asn_namespace(self, fab, asn):
        """add AS number as fabric namespace"""
        ns_name = "asn_%d" % asn
        self._logger.info('Adding ASN namespace "%s" to fabric "%s" ...',
                          ns_name, fab.name)

        ns_fq_name = fab.fq_name + [ns_name]
        namespace = FabricNamespace(
            name=ns_name,
            fq_name=ns_fq_name,
            parent_type='fabric',
            fabric_namespace_type='ASN',
            fabric_namespace_value={'asn': {
                'asn': [asn]
            }})
        namespace.set_tag_list([{'to': ['label=fabric-as_number']}])
        try:
            ns_uuid = self._api.fabric_namespace_create(namespace)
            namespace = self._api.fabric_namespace_read(id=ns_uuid)
        except RefsExistError:
            self._logger.warn("Fabric namespace '%s' already exists", ns_name)
            namespace = self._api.fabric_namespace_read(fq_name=ns_fq_name)

        self._logger.debug(
            "Fabric namespace created:\n%s",
            pprint.pformat(self._api.obj_to_dict(namespace), indent=4))
        return namespace

    # end _add_asn_namespace

    def create_image(self, img_name, img_uri, img_version, img_family,
                     img_vendor):
        """create image"""
        img_fqname = None
        # device_fqname = None
        try:
            self._logger.info('Creating image: %s', img_name)
            img_fqname = ['default-global-system-config', img_name]
            image = DeviceImage(name=img_name,
                                fq_name=img_fqname,
                                parent_type='global-system-config',
                                device_image_file_uri=img_uri,
                                device_image_os_version=img_version,
                                device_image_device_family=img_family,
                                device_image_vendor_name=img_vendor)
            img_uuid = self._api.device_image_create(image)
            image = self._api.device_image_read(id=img_uuid)

        except RefsExistError:
            self._logger.warn("Image '%s' already exists", img_name)
            image = self._api.device_image_read(fq_name=img_fqname)

        self._logger.debug(
            "Image created:\n%s",
            pprint.pformat(self._api.obj_to_dict(image), indent=4))
        return image

    # end create_image_and_device

    def cleanup_fabric(self, fab_name):
        """delete fabric including all prouters in the fabric"""
        try:
            self._logger.info('Deleting fabric "%s" ...', fab_name)
            fab_fqname = ['default-global-system-config', fab_name]
            fab = self._api.fabric_read(fq_name=fab_fqname)

            # delete all namespaces in this fabric
            fab_namespaces = self._api.fabric_namespaces_list(
                parent_id=fab.uuid)
            for namespace in fab_namespaces.get('fabric-namespaces') or []:
                self._logger.debug("Delete namespace: %s",
                                   namespace.get('fq_name'))
                self._api.fabric_namespace_delete(namespace.get('fq_name'))

            # delete fabric
            self._logger.debug("Delete fabric: %s", fab_fqname)
            self._api.fabric_delete(fab_fqname)

            # delete all prouters in this fabric
            for prouter in fab.get_physical_router_refs() or []:
                self._delete_prouter(prouter.get('uuid'))

        except NoIdError:
            self._logger.warn('Fabric "%s" not found', fab_name)

    # end cleanup_fabric

    def cleanup_image(
        self,
        img_name,
    ):
        # image cleanup
        self._logger.info("Clean up image and prouter from db")
        try:
            img_fqname = ['default-global-system-config', img_name]
            img = self._api.device_image_read(fq_name=img_fqname)
            self._logger.debug("Delete Image: %s", img_fqname)
            self._api.device_image_delete(img_fqname)

        except NoIdError:
            self._logger.warn('Image "%s" not found', img_name)

    def _delete_prouter(self, uuid):
        prouter = self._api.physical_router_read(id=uuid)

        # delete all physical and logical interfaces
        ifds = self._api.physical_interfaces_list(parent_id=uuid)
        for ifd in ifds.get('physical-interfaces') or []:
            # delete all child logical interfaces
            ifls = self._api.logical_interfaces_list(parent_id=ifd.get('uuid'))
            for ifl in ifls.get('logical-interfaces') or []:
                self._logger.debug("Delete logical interface: %s",
                                   ifl.get('fq_name'))
                self._api.logical_interface_delete(ifl.get('fq_name'))

            # delete the physical interface
            self._logger.debug("Delete physical interface: %s",
                               ifd.get('fq_name'))
            self._api.physical_interface_delete(ifd.get('fq_name'))

        # delete the prouter
        self._logger.debug("Delete physical router: %s", prouter.get_fq_name())
        self._api.physical_router_delete(prouter.get_fq_name())

        # delete corresponding bgp routers
        for bgp_router_ref in prouter.get_bgp_router_refs() or []:
            self._logger.debug("Delete bgp router: %s",
                               bgp_router_ref.get('to'))
            self._api.bgp_router_delete(bgp_router_ref.get('to'))

    # end _delete_prouter

    @staticmethod
    def _get_job_status_query_payload(job_execution_id, status):
        return {
            'start_time':
            'now-5m',
            'end_time':
            'now',
            'select_fields': ['MessageTS', 'Messagetype'],
            'table':
            'ObjectJobExecutionTable',
            'where': [[{
                'name': 'ObjectId',
                'value': "%s:%s" % (job_execution_id, status),
                'op': 1
            }]]
        }

    # end _get_job_status_query_payload

    @staticmethod
    def _check_job_status(url, job_execution_id, job_status):
        payload = SanityBase._get_job_status_query_payload(
            job_execution_id, job_status)
        r = requests.post(url, json=payload)
        if r.status_code == 200:
            response = r.json()
            if len(response['value']) > 0:
                assert response['value'][0]['Messagetype'] == 'JobLog'
                return True
        return False

    # end _post_for_json_response

    def _wait_for_job_to_finish(self, job_name, job_execution_id):
        completed = "SUCCESS"
        failed = "FAILURE"
        url = "http://%s:%d/analytics/query" %\
              (self._analytics['host'], self._analytics['port'])
        retry_count = 0
        while True:
            # check if job completed successfully
            if SanityBase._check_job_status(url, job_execution_id, completed):
                self._logger.debug("%s job '%s' finished", job_name,
                                   job_execution_id)
                break
            # check if job failed
            if SanityBase._check_job_status(url, job_execution_id, failed):
                self._logger.debug("%s job '%s' failed", job_name,
                                   job_execution_id)
                raise Exception("%s job '%s' failed" %
                                (job_name, job_execution_id))
            if retry_count > self._max_retries:
                raise Exception("Timed out waiting for '%s' job to complete" %
                                job_name)
            retry_count += 1
            time.sleep(self._timeout)

    # end _wait_for_job_to_finish

    def discover_fabric_device(self, fab):
        """Discover all devices specified by the fabric management namespaces
        """
        self._logger.info('Discover devices in fabric "%s" ...', fab.fq_name)
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'discover_device_template'
            ],
            job_input={'fabric_uuid': fab.uuid})

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug(
            "Device discovery job started with execution id: %s",
            job_execution_id)
        self._wait_for_job_to_finish('Device discovery', job_execution_id)

        fab = self._api.fabric_read(fab.fq_name)
        discovered_prouter_refs = fab.get_physical_router_refs()
        self._logger.debug("Disovered devices:\n%s",
                           pprint.pformat(discovered_prouter_refs, indent=4))

        msg = "Discovered following devices in fabric '%s':" % fab.fq_name
        discovered_prouters = []
        for prouter_ref in discovered_prouter_refs:
            prouter = self._api.physical_router_read(prouter_ref.get('to'))
            discovered_prouters.append(prouter)
            msg += "\n - %s (%s)" % (prouter.name,
                                     prouter.physical_router_management_ip)

        self._logger.info(msg)
        return discovered_prouters

    # end discover_fabric_device

    def device_import(self, prouters):
        """import device inventories for the prouters specified in the
        argument"""
        self._logger.info("Import all discovered prouters in the fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'device_import_template'
            ],
            job_input={},
            device_list=[prouter.uuid for prouter in prouters])

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug("Device import job started with execution id: %s",
                           job_execution_id)
        self._wait_for_job_to_finish('Device import', job_execution_id)

        for prouter in prouters:
            ifd_refs = self._api.physical_interfaces_list(
                parent_id=prouter.uuid)
            self._logger.info("Imported %d physical interfaces to prouter: %s",
                              len(ifd_refs.get('physical-interfaces')),
                              prouter.name)

    # end device_import

    def underlay_config(self, prouters):
        """deploy underlay config to prouters in the fabric ..."""
        self._logger.info("Deploy underlay config to prouters in fabric ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'generate_underlay_template'
            ],
            job_input={'enable_lldp': 'true'},
            device_list=[prouter.uuid for prouter in prouters])

        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.debug("Device import job started with execution id: %s",
                           job_execution_id)
        self._wait_for_job_to_finish('Underlay config', job_execution_id)

    # end underlay_config

    def image_upgrade(self, image, device):
        """upgrade the physical routers with specified images"""
        self._logger.info("Upgrade image on the physical router ...")
        job_execution_info = self._api.execute_job(
            job_template_fq_name=[
                'default-global-system-config', 'image_upgrade_template'
            ],
            job_input={'image_uuid': image.uuid},
            device_list=[device.uuid])
        job_execution_id = job_execution_info.get('job_execution_id')
        self._logger.info("Image upgrade job started with execution id: %s",
                          job_execution_id)
        self._wait_for_job_to_finish('Image upgrade', job_execution_id)

    # end image_upgrade

    def _exit_with_error(self, errmsg):
        self._logger.error(errmsg)
        sys.exit(1)
コード例 #47
0
def module_process(module):
    concurrent = module.params['pool_size']
    fabric_uuid = module.params['fabric_uuid']
    all_hosts = []

    if module.params['subnets']:
        for subnet in module.params['subnets']:
            try:
                ip_net = IPNetwork(subnet)
                all_hosts.extend(list(ip_net))
            except Exception as ex:
                _exit_with_error(
                    module,
                    "ERROR: Invalid subnet \"%s\" (%s)" % (subnet, str(ex)))

    if module.params['hosts']:
        for host in module.params['hosts']:
            try:
                ipaddr = socket.gethostbyname(host)
                all_hosts.append(ipaddr)
            except Exception as ex:
                _exit_with_error(
                    module,
                    "ERROR: Invalid ip address \"%s\" (%s)" % (host, str(ex)))

    # Verify that we receive a community when using snmp v2
    if module.params['version'] == "v2" or module.params['version'] == "v2c":
        if module.params['community'] is None:
            _exit_with_error(
                module, "ERROR: Community not set when using \
                             snmp version 2")

    if module.params['version'] == "v3":
        _exit_with_error(module, "ERROR: Donot support snmp version 3")

    module.results['msg'] = "Prefix(es) to be discovered: " + \
        ','.join(module.params['subnets'])
    module.send_job_object_log(module.results.get('msg'), JOB_IN_PROGRESS,
                               None)

    if len(all_hosts) < concurrent:
        concurrent = len(all_hosts)

    threadpool = pool.Pool(concurrent)

    try:
        vncapi = VncApi(auth_type=VncApi._KEYSTONE_AUTHN_STRATEGY,
                        auth_token=module.job_ctx.get('auth_token'))
        for host in all_hosts:
            threadpool.start(
                Greenlet(_device_info_processing, str(host), vncapi, module,
                         fabric_uuid))
        threadpool.join()
    except Exception as ex:
        module.results['failed'] = True
        module.results['msg'] = "Failed to connect to API server due to error: %s"\
            % str(ex)
        module.exit_json(**module.results)

    module.results['msg'] = "Device discovery complete"
    module.send_job_object_log(module.results.get('msg'), JOB_IN_PROGRESS,
                               None)
    module.exit_json(**module.results)
コード例 #48
0
    def __init__(self, dm_logger=None, args=None):
        DeviceManager._device_manager = self
        self._args = args
        PushConfigState.set_push_mode(int(self._args.push_mode))
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._chksum = ""
        if self._args.collectors:
            self._chksum = hashlib.md5(''.join(
                self._args.collectors)).hexdigest()

        # Initialize logger
        self.logger = dm_logger or DeviceManagerLogger(args)

        # Register Plugins
        try:
            DeviceConf.register_plugins()
        except DeviceConf.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error("Internal error while registering plugins: " +
                              str(e) + tb)

        # Register Ansible Plugins
        try:
            AnsibleBase.register_plugins()
        except AnsibleBase.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error(
                "Internal error while registering ansible plugins: " + str(e) +
                tb)

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = args.api_server_ip.split(',')
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    api_server_list,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)
        """ @sighup
        Handle of SIGHUP for collector list config change
        """
        gevent.signal(signal.SIGHUP, self.sighup_handler)

        # Initialize amqp
        self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP,
                                      self._args)
        self._vnc_amqp.establish()

        # Initialize cassandra
        self._object_db = DMCassandraDB.get_instance(self, _zookeeper_client)
        DBBaseDM.init(self, self.logger, self._object_db)
        DBBaseDM._sandesh = self.logger._sandesh

        for obj in GlobalSystemConfigDM.list_obj():
            GlobalSystemConfigDM.locate(obj['uuid'], obj)

        for obj in NodeProfileDM.list_obj():
            NodeProfileDM.locate(obj['uuid'], obj)

        for obj in RoleConfigDM.list_obj():
            RoleConfigDM.locate(obj['uuid'], obj)

        for obj in GlobalVRouterConfigDM.list_obj():
            GlobalVRouterConfigDM.locate(obj['uuid'], obj)

        for obj in VirtualNetworkDM.list_obj():
            VirtualNetworkDM.locate(obj['uuid'], obj)

        dci_obj_list = DataCenterInterconnectDM.list_obj()
        for obj in dci_obj_list or []:
            DataCenterInterconnectDM.locate(obj['uuid'], obj)

        for obj in FabricDM.list_obj():
            FabricDM.locate(obj['uuid'], obj)

        for obj in FabricNamespaceDM.list_obj():
            FabricNamespaceDM.locate(obj['uuid'], obj)

        for obj in LogicalRouterDM.list_obj():
            LogicalRouterDM.locate(obj['uuid'], obj)

        for obj in RoutingInstanceDM.list_obj():
            RoutingInstanceDM.locate(obj['uuid'], obj)

        for obj in FloatingIpPoolDM.list_obj():
            FloatingIpPoolDM.locate(obj['uuid'], obj)

        for obj in BgpRouterDM.list_obj():
            BgpRouterDM.locate(obj['uuid'], obj)

        for obj in PortTupleDM.list_obj():
            PortTupleDM.locate(obj['uuid'], obj)

        for obj in PhysicalInterfaceDM.list_obj():
            PhysicalInterfaceDM.locate(obj['uuid'], obj)

        for obj in LinkAggregationGroupDM.list_obj():
            LinkAggregationGroupDM.locate(obj['uuid'], obj)

        for obj in LogicalInterfaceDM.list_obj():
            LogicalInterfaceDM.locate(obj['uuid'], obj)

        pr_obj_list = PhysicalRouterDM.list_obj()
        for obj in pr_obj_list:
            PhysicalRouterDM.locate(obj['uuid'], obj)

        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._object_db.handle_pr_deletes(pr_uuid_set)

        dci_uuid_set = set([dci_obj['uuid'] for dci_obj in dci_obj_list])
        self._object_db.handle_dci_deletes(dci_uuid_set)

        for obj in VirtualMachineInterfaceDM.list_obj():
            VirtualMachineInterfaceDM.locate(obj['uuid'], obj)

        for obj in SecurityGroupDM.list_obj():
            SecurityGroupDM.locate(obj['uuid'], obj)

        for obj in AccessControlListDM.list_obj():
            AccessControlListDM.locate(obj['uuid'], obj)

        for obj in pr_obj_list:
            pr = PhysicalRouterDM.locate(obj['uuid'], obj)
            li_set = pr.logical_interfaces
            vmi_set = set()
            for pi_id in pr.physical_interfaces:
                pi = PhysicalInterfaceDM.locate(pi_id)
                if pi:
                    li_set |= pi.logical_interfaces
                    vmi_set |= pi.virtual_machine_interfaces
            for li_id in li_set:
                li = LogicalInterfaceDM.locate(li_id)
                if li and li.virtual_machine_interface:
                    vmi_set |= set([li.virtual_machine_interface])
            for vmi_id in vmi_set:
                vmi = VirtualMachineInterfaceDM.locate(vmi_id)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._object_db.handle_pnf_resource_deletes(si_uuid_set)

        for obj in si_obj_list:
            ServiceInstanceDM.locate(obj['uuid'], obj)

        for obj in InstanceIpDM.list_obj():
            InstanceIpDM.locate(obj['uuid'], obj)

        for obj in FloatingIpDM.list_obj():
            FloatingIpDM.locate(obj['uuid'], obj)

        for vn in VirtualNetworkDM.values():
            vn.update_instance_ip_map()

        for obj in ServiceEndpointDM.list_obj():
            ServiceEndpointDM.locate(obj['uuid'], obj)

        for obj in ServiceConnectionModuleDM.list_obj():
            ServiceConnectionModuleDM.locate(obj['uuid'], obj)

        for obj in ServiceObjectDM.list_obj():
            ServiceObjectDM.locate(obj['uuid'], obj)

        for obj in NetworkDeviceConfigDM.list_obj():
            NetworkDeviceConfigDM.locate(obj['uuid'], obj)

        for obj in E2ServiceProviderDM.list_obj():
            E2ServiceProviderDM.locate(obj['uuid'], obj)

        for obj in PeeringPolicyDM.list_obj():
            PeeringPolicyDM.locate(obj['uuid'], obj)

        for pr in PhysicalRouterDM.values():
            pr.set_config_state()
            pr.uve_send()

        self._vnc_amqp._db_resync_done.set()
        try:
            gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets())
        except KeyboardInterrupt:
            DeviceManager.destroy_instance()
            raise
コード例 #49
0
ファイル: create_3.py プロジェクト: nuthanc/bgpaas_scale
import sys
import ipaddr
from netaddr import IPNetwork
from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import *
from vnc_api.gen.resource_xsd import *
import random

#tenant_name  = ['default-domain','bgpaas-scale-3']
#ipam_fq_name = [ 'default-domain', 'bgpaas-scale-3', 'bgpaas-3.ipam']

tenant_name = ['default-domain', 'admin']
ipam_fq_name = ['default-domain', 'default-project', 'default-network-ipam']
vnc_lib = VncApi(username='******',
                 password='******',
                 tenant_name='admin',
                 api_server_host='10.87.64.129',
                 api_server_port=8082,
                 auth_host='5.5.5.251')
#hcs = proj_obj.get_service_health_checks()
#for hc  in hcs:
#import pdb;pdb.set_trace()
#    hc_obj=vnc_lib.service_health_check_read(id=hc['uuid'])
#    prop = hc_obj.get_service_health_check_properties()
#    prop.set_delay(delay=1)
#    prop.set_delayUsecs(delayUsecs=10000)
##    prop.set_timeout(timeout=2)
#    prop.set_max_retries(max_retries=5)
#    prop.set_timeoutUsecs(timeoutUsecs=10000)
#    hc_obj.set_service_health_check_properties(prop)
#    vnc_lib.service_health_check_update(hc_obj)
コード例 #50
0
#!/usr/bin/python

import logging
import os
import sys

from vnc_api.vnc_api import VncApi
from vnc_api.gen.resource_client import PhysicalRouter

DEFAULT_LOG_PATH = '/var/log/contrail/dnsmasq.log'
LOGGING_FORMAT = \
    '%(asctime)s.%(msecs)03d %(name)s [%(levelname)s]:  %(message)s'
DATE_FORMAT = "%m/%d/%Y %H:%M:%S"

vnc_api = VncApi(username=os.environ['KEYSTONE_AUTH_ADMIN_USER'],
                 password=os.environ['KEYSTONE_AUTH_ADMIN_PASSWORD'],
                 tenant_name=os.environ['KEYSTONE_AUTH_ADMIN_TENANT'],
                 api_server_host=(os.environ['CONTROLLER_NODES']).split(','))


def main():
    logging.basicConfig(filename=DEFAULT_LOG_PATH,
                        level=logging.INFO,
                        format=LOGGING_FORMAT,
                        datefmt=DATE_FORMAT)
    logger = logging.getLogger("dnsmasq")
    logger.setLevel(logging.INFO)

    if sys.argv[1] == 'read':
        # read from DB mac:ip
        mac_ip = []
        filters = {}
コード例 #51
0
    def __init__(self,
                 dm_logger=None,
                 args=None,
                 zookeeper_client=None,
                 amqp_client=None):
        """Physical Router init routine."""
        DeviceManager._instance = self
        self._args = args
        self._amqp_client = amqp_client
        self.logger = dm_logger or DeviceManagerLogger(args)
        self._vnc_amqp = DMAmqpHandle(self.logger, self.REACTION_MAP,
                                      self._args)

        PushConfigState.set_push_mode(int(self._args.push_mode))
        PushConfigState.set_repush_interval(int(self._args.repush_interval))
        PushConfigState.set_repush_max_interval(
            int(self._args.repush_max_interval))
        PushConfigState.set_push_delay_per_kb(
            float(self._args.push_delay_per_kb))
        PushConfigState.set_push_delay_max(int(self._args.push_delay_max))
        PushConfigState.set_push_delay_enable(
            bool(self._args.push_delay_enable))

        self._chksum = ""
        if self._args.collectors:
            self._chksum = hashlib.md5(''.join(
                self._args.collectors)).hexdigest()

        # Register Plugins
        try:
            DeviceConf.register_plugins()
        except DeviceConf.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error("Internal error while registering plugins: " +
                              str(e) + tb)

        # Register Ansible Plugins
        try:
            AnsibleBase.register_plugins()
        except AnsibleBase.PluginsRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error(
                "Internal error while registering ansible plugins: " + str(e) +
                tb)

        # Register Feature Plugins
        try:
            FeatureBase.register_plugins()
        except FeatureBase.PluginRegistrationFailed as e:
            self.logger.error("Exception: " + str(e))
        except Exception as e:
            tb = traceback.format_exc()
            self.logger.error(
                "Internal error while registering feature plugins: " + str(e) +
                tb)
            raise e

        # Retry till API server is up
        connected = False
        self.connection_state_update(ConnectionStatus.INIT)
        api_server_list = args.api_server_ip.split(',')
        while not connected:
            try:
                self._vnc_lib = VncApi(
                    args.admin_user,
                    args.admin_password,
                    args.admin_tenant_name,
                    api_server_list,
                    args.api_server_port,
                    api_server_use_ssl=args.api_server_use_ssl)
                connected = True
                self.connection_state_update(ConnectionStatus.UP)
            except requests.exceptions.ConnectionError as e:
                # Update connection info
                self.connection_state_update(ConnectionStatus.DOWN, str(e))
                time.sleep(3)
            except ResourceExhaustionError:  # haproxy throws 503
                time.sleep(3)

        if PushConfigState.is_push_mode_ansible():
            FabricManager.initialize(args, dm_logger, self._vnc_lib)

        # Initialize cassandra
        self._object_db = DMCassandraDB.get_instance(zookeeper_client,
                                                     self._args, self.logger)
        DBBaseDM.init(self, self.logger, self._object_db)
        DBBaseDM._sandesh = self.logger._sandesh

        # DBBaseDM.init should be called before Initializing amqp
        # Initialize amqp
        self._vnc_amqp.establish()
        GlobalSystemConfigDM.locate_all()
        FeatureFlagDM.locate_all()
        FlowNodeDM.locate_all()
        FeatureDM.locate_all()
        PhysicalRoleDM.locate_all()
        OverlayRoleDM.locate_all()
        RoleDefinitionDM.locate_all()
        FeatureConfigDM.locate_all()
        NodeProfileDM.locate_all()
        RoleConfigDM.locate_all()
        GlobalVRouterConfigDM.locate_all()
        VirtualNetworkDM.locate_all()
        DataCenterInterconnectDM.locate_all()
        FabricDM.locate_all()
        FabricNamespaceDM.locate_all()
        LogicalRouterDM.locate_all()
        RoutingInstanceDM.locate_all()
        FloatingIpPoolDM.locate_all()
        BgpRouterDM.locate_all()
        PhysicalInterfaceDM.locate_all()
        LogicalInterfaceDM.locate_all()
        IntentMapDM.locate_all()
        PhysicalRouterDM.locate_all()
        LinkAggregationGroupDM.locate_all()
        VirtualPortGroupDM.locate_all()
        PortDM.locate_all()
        TagDM.locate_all()
        NetworkIpamDM.locate_all()
        VirtualMachineInterfaceDM.locate_all()
        SecurityGroupDM.locate_all()
        AccessControlListDM.locate_all()
        PortProfileDM.locate_all()
        StormControlProfileDM.locate_all()
        TelemetryProfileDM.locate_all()
        SflowProfileDM.locate_all()
        GrpcProfileDM.locate_all()
        ServiceInstanceDM.locate_all()
        ServiceApplianceSetDM.locate_all()
        ServiceApplianceDM.locate_all()
        ServiceTemplateDM.locate_all()
        PortTupleDM.locate_all()
        InstanceIpDM.locate_all()
        FloatingIpDM.locate_all()

        for vn in list(VirtualNetworkDM.values()):
            vn.update_instance_ip_map()

        ServiceEndpointDM.locate_all()
        ServiceConnectionModuleDM.locate_all()
        ServiceObjectDM.locate_all()
        NetworkDeviceConfigDM.locate_all()
        E2ServiceProviderDM.locate_all()
        PeeringPolicyDM.locate_all()
        InterfaceRouteTableDM.locate_all()
        RoutingPolicyDM.locate_all()

        pr_obj_list = PhysicalRouterDM.list_obj()
        pr_uuid_set = set([pr_obj['uuid'] for pr_obj in pr_obj_list])
        self._object_db.handle_pr_deletes(pr_uuid_set)

        dci_obj_list = DataCenterInterconnectDM.list_obj()
        dci_uuid_set = set([dci_obj['uuid'] for dci_obj in dci_obj_list])
        self._object_db.handle_dci_deletes(dci_uuid_set)

        si_obj_list = ServiceInstanceDM.list_obj()
        si_uuid_set = set([si_obj['uuid'] for si_obj in si_obj_list])
        self._object_db.handle_pnf_resource_deletes(si_uuid_set)

        for pr in list(PhysicalRouterDM.values()):
            pr.set_config_state()
            pr.uve_send()

        self._vnc_amqp._db_resync_done.set()

        gevent.joinall(self._vnc_amqp._vnc_kombu.greenlets())