Exemple #1
0
def main():
    # Clean
    logger.info("Clean memcached before init")
    memcached_host, memcached_port = get_memcached_config()
    mem_nfv = MemcachedNFV(memcached_host, memcached_port)
    mem_nfv.clean_memcached()
    mem_nfv.disconnect()
    # Init server list
    init_server_cached_list_api()

    # Scheduler for get statistic
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    interval = int(data_config['interval'])

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors, job_defaults=job_defaults)
    scheduler.add_job(scheduler_get_statistic_job,
                      'interval',
                      seconds=interval)
    scheduler.start()
def get_used_addresses_on_bam(memcache):
    used_ipv4 = memcache.get_network("used_ipv4")
    used_ipv6 = memcache.get_network("used_ipv6")
    if used_ipv4:
        return used_ipv4, used_ipv6
    if not is_check_available_bam():
        return [], []
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    configuration_name = data_config['bam_config_name']
    configuration_id = get_configuration_id(configuration_name)
    g.user.logger.info(
        'Get list server of configure_id {}'.format(configuration_id))
    servers = get_list_servers(configuration_id)
    ipv4_addresses, ipv6_addresses = [], []
    for server in servers:
        server_mng_ip = get_list_ip_in_bam(server, "defaultInterfaceAddress")
        server_srv_ipv4 = get_list_ip_in_bam(server, "servicesIPv4Address")
        server_srv_ipv6 = get_list_ip_in_bam(server, "servicesIPv6Address")
        if server_mng_ip not in ipv4_addresses:
            ipv4_addresses.append(server_mng_ip)
        if server_srv_ipv4 is not None and server_srv_ipv4 not in ipv4_addresses:
            ipv4_addresses.append(server_srv_ipv4)
        if server_srv_ipv6 is not None and server_srv_ipv6 not in ipv6_addresses:
            ipv6_addresses.append(server_srv_ipv6)
    if ipv4_addresses:
        ipv4_str = ",".join(ipv4_addresses)
        memcache.set_network("used_ipv4", ipv4_str)
    if ipv6_addresses:
        ipv6_str = ",".join(ipv6_addresses)
        memcache.set_network("used_ipv6", ipv6_str)
    return ipv4_addresses, ipv6_addresses
Exemple #3
0
 def get_log_level(self):
     """
     Get level log
     :return:
     """
     nfv_config = read_config_json_file(NFV_CONFIG_PATH)
     logging_text_level = nfv_config["log_level"]
     return map_text_log_level(logging_text_level)
Exemple #4
0
def init_server_cached_list_api():
    """[Call api to gateway init server bam and bdds to memcached]
    """
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    logger.info("Statistics collection-init_server_cached_list_api")
    result = gateway_access.request_data(
        'gateway_nfv_plugin/init_server_cached_list',
        data_config['bam'][0]['ip'])
    logger.info(
        "Statistics collection-init_server_cached_list_api - {}".format(
            result))
def init_server_cached_list():
    """[init_server_cached_list: call api get servers in bam with CONFIGURATION_NAME]

    Returns:
        [json] -- [
                    Success:    return  {"Status": "SUCCESS"}
                    Fail:       return  {"Status": "FAIL"}]
    """
    try:
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        configuration_name = data_config['bam_config_name']
        g.user.logger.debug(
            'Init_server_cached_list - configuration_name: {}'.format(
                configuration_name))
        configuration_id = gateway_nfv_management.get_configuration_id(
            configuration_name)
        g.user.logger.info(
            'Get list server of configure_id {}'.format(configuration_id))
        list_servers = gateway_nfv_management.get_list_servers(
            configuration_id)
        g.user.logger.info(
            'Init_server_cached_list - Number of get list server: {}'.format(
                len(list_servers)))
        # Init memcached
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        # Set bam server info to memcached server
        bam_ip = data_config['bam'][0]['ip']
        bam_name = data_config['bam'][0]['name']
        mem_nfv.set_server({
            'name': bam_name,
            'ipv4_address': bam_ip
        }, ServerType.BAM)
        # Set bdds server info to memcached server
        list_udf_name = [udf['name'] for udf in data_config['udfs_for_server']]
        for server in list_servers:
            mem_nfv.set_server(server, ServerType.BDDS, bam_ip, list_udf_name)
        # Set VM_HOST server info to memcached server
        vm_host_ip = data_config["vm_host_ip"]
        vm_name = data_config['vm_host_name']
        mem_nfv.set_server({
            'name': vm_name,
            'ipv4_address': vm_host_ip
        }, ServerType.VM_HOST)
        mem_nfv.disconnect()
    except Exception as exception:
        g.user.logger.error('Init_server_cached_list - {}'.format(exception))
        g.user.logger.error(traceback.format_exc())
        return jsonify({"Status": "FAIL"})
    return jsonify({"Status": "SUCCESS"})
def get_metadata():
    """
    Get metadata from NFV_CONFIG_PATH
    :return: metadata
    """
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    list_metadata = []

    for udf in data_config['udfs_for_server']:
        tmp = udf['name'] + '=' + str(udf['default_value'])
        list_metadata.append(tmp)

    metadata = '|'.join(list_metadata)
    return metadata
def get_memcached_config():
    """[Get memcached config from nfv_config.json]
    Raises:
        Exception -- [Can not get config memcached from nfv_config.json]
    Returns:
        [String] -- [memcached_host]
        [Int] -- [memcached_host]
    """

    data_config = read_config_json_file(NFV_CONFIG_PATH)
    try:
        memcached_host = data_config["memcached_host"]
        memcached_port = data_config["memcached_port"]
    except KeyError:
        raise KeyError("Can not get config memcached from nfv_config.json")
    return memcached_host, int(memcached_port)
def is_check_available_bam():
    """
    Check bam available
    :return:
    [True]- boolean
    [False]- boolean
    """
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    bam_ip = data_config['bam'][0]['ip']
    try:
        check_status = urllib.request.urlopen(f"http://{bam_ip}").getcode()
        if check_status == 200:
            g.user.logger.info('Available for BAM IP : %s' % bam_ip)
            return True
    except URLError as ex:
        g.user.logger.info(f"Not available for BAM IP : {bam_ip} {ex}")
        return False
Exemple #9
0
def get_memcached_config():
    """[Get memcached config from NFV_CONFIG_PATH]
    Raises:
        Exception -- [Can not get config memcached from NFV_CONFIG_PATH]
    Returns:
        [String] -- [memcached_host]
        [Int] -- [memcached_host]
    """
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    try:
        memcached_host = data_config['memcached_host']
        memcached_port = data_config['memcached_port']
    except KeyError:
        raise KeyError("Can not get config memcached from NFV_CONFIG_PATH")
    logger.debug("Memcached_host: {} - Memcached_port: {}".format(
        memcached_host, memcached_port))
    return memcached_host, int(memcached_port)
def call_k1_api(result_object, timeout=1):
    """[Call K1 API]
    :param result_object:
    :param timeout: default 1
    :return:
    """
    if not result_object:
        logger.debug("Result_object is none")
        return

    payload = prepare_payload_for_k1(result_object)
    logger.info(f'Payload of K1: {payload}')

    if is_kpi_none(payload):
        logger.info(f'KPIs are none. Do not call api k1')
        return

    data_config = read_config_json_file(NFV_CONFIG_PATH)
    headers = {
        'Content-Type': 'application/json;charset=UTF-8',
    }
    try:
        host = data_config['k1_api']['address']
        port = data_config['k1_api']['port']
        uri = data_config['k1_api']['uri']
        response = requests.post(f"http://{host}:{port}{uri}",
                                 headers=headers,
                                 data=json.dumps(payload),
                                 timeout=timeout)
        result_call = {
            'content': response.content.decode('utf-8'),
            'status_code': response.status_code
        }
        logger.info(f'Result call api k1: {result_call}')
    except KeyError as key_error:
        logger.error("Cannot get {} in config file {}".format(key_error))
        logger.debug(traceback.format_exc())
    except requests.RequestException:
        logger.error("Cannot request api to {}".format(
            data_config['k1_api']['address']))
        logger.error("Payload of the failed request: {}".format(payload))
        logger.debug(traceback.format_exc())
def get_available_addresses(management_network, service_network):
    management_ipv4_network = management_network.get("ip_v4", {})
    if not management_ipv4_network:
        raise ValueError('Invalid management network data')
    management_ipv6_network = management_network.get("ip_v6", {})
    mgnt_ipv4_cidr, mgnt_ipv4_start_ip, mgnt_ipv4_end_ip, mgnt_ipv4_gateway = get_network_detail(
        management_ipv4_network)
    mngt_ipv4_net_size = mgnt_ipv4_cidr.split("/")[1]
    mgnt_ipv6_cidr, mgnt_ipv6_start_ip, mgnt_ipv6_end_ip, mgnt_ipv6_gateway = get_network_detail(
        management_ipv6_network)
    if mgnt_ipv6_cidr:
        mngt_ipv6_net_size = mgnt_ipv6_cidr.split("/")[1]
    if all(e != "" for e in [
            mgnt_ipv4_cidr, mgnt_ipv4_start_ip, mgnt_ipv4_end_ip,
            mgnt_ipv4_gateway
    ]):
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        srv_ipv4_network = service_network.get("ip_v4", {})
        srv_ipv6_network = service_network.get("ip_v6", {})
        srv_cidr, srv_start_ip, srv_end_ip, srv_gateway = get_network_detail(
            srv_ipv4_network)
        srv_ipv6_cidr, srv_ipv6_start_ip, srv_ipv6_end_ip, srv_ipv6_gateway = get_network_detail(
            srv_ipv6_network)
        used_ipv4, used_ipv6 = get_used_addresses_on_bam(mem_nfv)
        if mgnt_ipv4_gateway:
            used_ipv4.append(mgnt_ipv4_gateway)
        if mgnt_ipv6_gateway:
            used_ipv6.append(mgnt_ipv6_gateway)
        if srv_gateway:
            used_ipv4.append(srv_gateway)
        if srv_ipv6_gateway:
            used_ipv6.append(srv_ipv6_gateway)

        mgnt_ipv4 = get_ip_in_list_in_used_ips(mem_nfv, mgnt_ipv4_start_ip,
                                               mgnt_ipv4_end_ip, used_ipv4)
        mgnt_ipv4_str = mgnt_ipv4 + "/" + mngt_ipv4_net_size
        mem_nfv.set_network(mgnt_ipv4, "0", 1800)
        if mgnt_ipv6_cidr:
            mgnt_ipv6 = get_ip_in_list_in_used_ips(mem_nfv, mgnt_ipv6_start_ip,
                                                   mgnt_ipv6_end_ip, used_ipv6)
            mgnt_ipv6_str = mgnt_ipv6 + "/" + mngt_ipv6_net_size
            mem_nfv.set_network(mgnt_ipv6, "0", 1800)
        srv_ip_str = ""
        srv_ipv6_str = ""
        srv_ip = ""
        srv_ipv6_ip = ""
        if srv_cidr:
            srv_ip = get_ip_in_list_in_used_ips(mem_nfv, srv_start_ip,
                                                srv_end_ip, used_ipv4)
        if srv_ipv6_cidr:
            srv_ipv6_ip = get_ip_in_list_in_used_ips(mem_nfv,
                                                     srv_ipv6_start_ip,
                                                     srv_ipv6_end_ip,
                                                     used_ipv6)
        if srv_ip:
            srv_ipv4_net_size = srv_cidr.split("/")[1]
            srv_ip_str = srv_ip + "/" + srv_ipv4_net_size
            mem_nfv.set_network(srv_ip, "0", 1800)
        if srv_ipv6_ip:
            srv_ipv6_net_size = srv_ipv6_cidr.split("/")[1]
            srv_ipv6_str = srv_ipv6_ip + "/" + srv_ipv6_net_size
            mem_nfv.set_network(srv_ipv6_ip, "0", 1800)
        result = dict()
        result["management"] = {}
        result["management"]["ip_v4"] = {
            "cidr": mgnt_ipv4_cidr,
            "gateway": mgnt_ipv4_gateway,
            "management_ipv4": mgnt_ipv4_str
        }
        if mgnt_ipv6_cidr:
            result["management"]["ip_v6"] = {
                "cidr": mgnt_ipv6_cidr,
                "gateway": mgnt_ipv6_gateway,
                "management_ipv6": mgnt_ipv6_str
            }
        if srv_cidr or srv_ipv6_cidr:
            result["service"] = {}
            if srv_cidr:
                tmp = {
                    "cidr": srv_cidr,
                    "gateway": srv_gateway,
                    "service_ipv4": srv_ip_str
                }
                result["service"]["ip_v4"] = tmp
            if srv_ipv6_cidr:
                tmp = {
                    "cidr": srv_ipv6_cidr,
                    "gateway": srv_ipv6_gateway,
                    "service_ipv6": srv_ipv6_str
                }
                result["service"]["ip_v6"] = tmp
        return result
    else:
        raise ValueError('Invalid management network data')
def scale_out(data):
    """
    :param data: request in json
    for example:
    {
    "server_name": "bdd240s",
    "mgnt_server_ip": "192.168.88.240",
    "service_server_ipv4": "192.168.89.240",
    "service_server_ipv6": fdac::12,
    "service_server_netmask": 24,
    "service_server_v6_prefix": 64,
    "metadata": "can_scale_in=True"
    }
    :return: successful message
    """
    try:
        g.user.logger.debug("Scale out request data: {}".format(data))
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        g.user.logger.debug("NFV config data: {}".format(data_config))
        config_name = data_config['bam_config_name']
        config_id = get_configuration_id(config_name)
        if not config_id:
            return jsonify({
                "status": "Failed",
                "message": "Configuration id not found!"
            }), 404

        g.user.logger.info('Starting check available server')
        server_ip = data['mgnt_server_ip']
        server_ipv6 = None
        avail_server = is_check_available_server(
            server_ip, data_config['server_ssh_username'],
            data_config['server_ssh_password'])
        if not avail_server:
            return jsonify({
                "status": "Failed",
                "message": "No available server ip!"
            }), 404

        g.user.logger.info('Starting add server')
        server_properties = f"password={process_password.decrypt_password(data_config['server_deployment_password'])}|connected=true|upgrade=False"
        try:
            if data['metadata']:
                server_properties = f"{server_properties}|{data['metadata']}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        try:
            if (int(data['service_server_netmask']) <=
                    32) and (int(data['service_server_netmask']) >
                             0) and data['service_server_ipv4']:
                server_properties = f"{server_properties}|servicesIPv4Address={data['service_server_ipv4']}|servicesIPv4Netmask={cidr_to_netmask(data['service_server_netmask'])}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        try:
            if data['service_server_v6_prefix'] and data['service_server_ipv6']:
                server_ipv6 = data['service_server_ipv6'] + \
                              '/' + data['service_server_v6_prefix']
                server_properties = f"{server_properties}|servicesIPv6Address={data['service_server_ipv6']}|servicesIPv6Subnet={data['service_server_v6_prefix']}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        if data_config['server_cap_profile']:
            g.user.logger.debug(
                f"Add server name {data['server_name']} server_ip {server_ip} profile {data_config['server_cap_profile']} properties {server_properties}"
            )
            server_id = add_server(server_ip=server_ip,
                                   server_name=data['server_name'],
                                   config_id=config_id,
                                   profile=data_config['server_cap_profile'],
                                   properties=server_properties)
        else:
            g.user.logger.debug(
                f"Add server name {data['server_name']} server_ip {server_ip} properties {server_properties}"
            )
            server_id = add_server(server_ip=server_ip,
                                   server_name=data['server_name'],
                                   config_id=config_id,
                                   properties=server_properties)

        g.user.logger.info('Starting create deployment roles')
        deploy_role = False
        if data_config['dns_view_names']:
            role_type = data.get("deploy_role",
                                 data_config['server_deploy_role'])
            for view_name in data_config['dns_view_names']:
                g.user.logger.debug(
                    f"Create deployment role for server {data['server_name']} view_name {view_name} role_type {role_type}"
                )
                role_id = create_deployment_roles(
                    server_name=data['server_name'],
                    server_id=server_id,
                    config_id=config_id,
                    view_name=view_name,
                    role_type=role_type)
                if not role_id:
                    g.user.logger.info(
                        'Cannot create deployment role for view name: %s' %
                        view_name)
                    continue
                deploy_role = True
        else:
            g.user.logger.info('dns_view_names not found!')
        if not deploy_role:
            g.user.logger.info('Cannot create any deployment roles!')
            return jsonify({
                "status": "Failed",
                "message": "Create deployment role failed"
            }), 500

        g.user.logger.info('Starting add raw option')
        g.user.logger.info(
            'Starting deploy DNS configuration for server name: %s' %
            data['server_name'])
        deploy_server = deploy_server_config(server_id)
        if not deploy_server:
            g.user.logger.info('Deploy DNS configuration is failed!')

        g.user.logger.info(
            'Starting wait for DNS deployment for server name: %s' %
            data['server_name'])
        deploy_status = wait_for_deployment(server_id)
        g.user.logger.info(
            'Deployment status for server name %s, deploy status id %s' %
            (data['server_name'], deploy_status))

        if 'anycast_config' in data_config:
            g.user.logger.info('Starting configure any cast')
            configure_anycast(server_ip, server_ipv6,
                              data_config['server_ssh_username'],
                              data_config['server_ssh_password'],
                              data_config['anycast_config'])

        g.user.logger.info('Adding server to cache list')
        # Add BDDS to memcached
        bam_ip = data_config['bam'][0]['ip']
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        list_udf_name = [
            value.split('=')[0].strip()
            for value in data['metadata'].split('|')
        ]
        mem_nfv.set_server(
            {
                'id':
                server_id,
                'name':
                data['server_name'],
                'type':
                ServerType.BDDS,
                'properties':
                f"defaultInterfaceAddress={server_ip}|{server_properties}"
            }, ServerType.BDDS, bam_ip, list_udf_name)
        g.user.logger.info(
            f"SUCCESS: Add server to memcached with info 'id': {server_id}, 'name': {data['server_name']}, 'type': {ServerType.BDDS}, 'bam_ip': {bam_ip}"
        )
        g.user.logger.debug(f"'properties': {server_properties}")

        # Remove used candidate address on memcache
        mem_nfv.clean_network(data["mgnt_server_ip"])
        mem_nfv.clean_network(data["service_server_ipv4"])
        mem_nfv.clean_network(data["service_server_ipv6"])

        # Add addresses to used list on memcache
        used_ipv4_memcache = mem_nfv.get_network("used_ipv4")
        used_ipv6_memcache = mem_nfv.get_network("used_ipv6")
        used_ipv4_memcache.append(data["mgnt_server_ip"])
        if data["service_server_ipv4"]:
            used_ipv4_memcache.append(data["service_server_ipv4"])
        if data["service_server_ipv6"]:
            used_ipv6_memcache.append(data["service_server_ipv6"])
            ipv6_str = ",".join(used_ipv6_memcache)
            mem_nfv.set_network("used_ipv6", ipv6_str)
        ipv4_str = ",".join(used_ipv4_memcache)
        mem_nfv.set_network("used_ipv4", ipv4_str)

    except Exception as exception:
        g.user.logger.error(str(exception))
        g.user.logger.error(
            f"Failed: Haven't add server to mem cached {exception}")
        g.user.logger.error(traceback.format_exc())
        return jsonify({
            "status": "Failed",
            "message": "Scale out failed",
            "error": str(exception)
        }), 500
    return jsonify({
        "status": "Successful",
        "message": "Scale out successfully",
        "error": ""
    }), 200
def scale_in(data):
    """
    :param data: request in json
    example
    {
        "server_name": "bdd240s"
    }
    :return: successful message
    """
    try:
        g.user.logger.debug("Scale out request data: {}".format(data))
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        g.user.logger.debug("NFV config data: {}".format(data_config))
        config_name = data_config['bam_config_name']
        config_id = get_configuration_id(config_name)
        if not config_id:
            return jsonify({"message": "Configuration id not found!"}), 404

        server_name = data['server_name']
        server = get_server_by_name(config_id, server_name)
        server_id = server['id']
        if server_id == 0:
            return jsonify({
                "status":
                "Failed",
                "message":
                "Scale in failed",
                "error":
                str('Server {} not found!'.format(server_name))
            }), 403
        if server['properties'].split('|')[0].split(
                '=')[0] == "defaultInterfaceAddress":
            server_ip = server['properties'].split('|')[0].split('=')[1]
        else:
            server_ip = server['properties'].split('|')[1].split('=')[1]
        # delete server roles
        g.user.logger.info('Starting remove server roles for server name: %s' %
                           server_name)
        remove_roles = delete_server_roles(server_id)
        if not remove_roles:
            g.user.logger.info(
                'Remove server roles failed! Starting stop anycast service ...'
            )

        if 'anycast_config' in data_config:
            stop_anycast_service(server_ip, data_config['server_ssh_username'],
                                 data_config['server_ssh_password'])

        # deploy DNS server
        g.user.logger.info('Starting deploy DNS server for server name: %s' %
                           server_name)
        deploy_server = deploy_server_config(server_id)
        if not deploy_server:
            g.user.logger.info('Deploy DNS server is failed!')

        # wait for deployment
        g.user.logger.info(
            'Starting wait for DNS deployment for server name: %s' %
            server_name)
        deploy_status = wait_for_deployment(server_id)
        g.user.logger.info(
            'Deployment status for server name %s, deploy status id %s' %
            (server_name, deploy_status))
        g.user.logger.info('Anycast service stopped')

        # delete server from BAM
        g.user.logger.info('Deleting server %s from BAM' % server_name)
        delete_server = delete_entity(server_id)
        if not delete_server:
            g.user.logger.info('Delete server %s failed' % server_name)

        g.user.logger.info('Deleting server from cache list')

        bam_ip = data_config['bam'][0]['ip']
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        mem_nfv.delete_server(server_id, ServerType.BDDS, bam_ip)
        g.user.logger.info(
            f"SUCCESS: Delete server from memcached with info 'id': {server_id}"
        )

    except Exception as exception:
        g.user.logger.error(str(exception))
        g.user.logger.error(f"Failed: Can't delete server from memcached")
        g.user.logger.error(traceback.format_exc())
        return jsonify({
            "status": "Failed",
            "message": "Scale in failed",
            "error": str(exception)
        }), 500
    return jsonify({
        "status": "Successful",
        "message": "Scale in successfully",
        "error": ""
    }), 200
Exemple #14
0
def collect_statistics(mem_nfv):
    """
    :return: Array of statistic results
    """
    # Get server list from memcached
    list_bdds, list_bam, list_vmhosts = mem_nfv.get_list_servers()

    logger.debug("List_bdds: {}\nList_bam: {}\nList_vmhost: {}".format(
        list_bdds, list_bam, list_vmhosts))
    snmp_config = read_config_json_file(SNMP_CONFIG_PATH)
    logger.debug("Snmp config: {}".format(snmp_config))
    list_servers = []

    logger.info(f'List BDDS Size: {len(list_bdds)}')
    logger.info(f'Begin loop through list_bdds')
    for bdds in list_bdds:
        logger.debug(f'BDDS: {bdds}')
        bdds_config_name = get_snmp_server_config_name(snmp_config, bdds.name)
        logger.debug(f'BDDS config name {bdds_config_name}')
        try:
            list_servers.append({
                'server_type':
                ServerType.BDDS,
                'server_name':
                bdds.name,
                'address':
                bdds.ipv4_address,
                'snmp_config_data':
                snmp_config[bdds_config_name],
                'udf':
                bdds.udf
            })
        except KeyError as exception:
            logger.error(f'Exception Key Error {exception}')
            logger.error(traceback.format_exc())
            continue
    logger.info(f'List BAM Size: {len(list_bam)}')
    logger.info(f'Begin loop through list_bam')
    for bam in list_bam:
        try:
            logger.debug(f'BAM: {bam}')
            logger.debug(f'bam_name: {bam.name}')
            bam_config_name = get_snmp_server_config_name(
                snmp_config, bam.name)
            logger.debug(f'Bam config name {bam_config_name}')
            try:
                logger.info(
                    f'Begin Append BAM server list Server {list_servers}  ')
                list_servers.append({
                    'server_type':
                    ServerType.BAM,
                    'server_name':
                    bam.name,
                    'address':
                    bam.ipv4_address,
                    'snmp_config_data':
                    snmp_config[bam_config_name]
                })
                logger.info(f'End append BAM ===> List Server {list_servers}')
            except KeyError as exception:
                logger.error(f'Exception Key Error {exception}')
                logger.error(traceback.format_exc())
                continue
        except Exception as exception:
            logger.info(f'Cant get bam.ipv4_address: {exception}')
    logger.info(f'List VMHOST Size: {len(list_vmhosts)}')
    logger.info(f'Begin loop through list_vmhosts')
    for vm_host in list_vmhosts:
        try:
            logger.debug(f'VM_HOST: {vm_host}')
            logger.debug(f'vm_name: {vm_host.name}')
            vm_host_config_name = get_snmp_server_config_name(
                snmp_config, vm_host.name)
            logger.debug(f'VM_HOST config name {vm_host_config_name}')
            try:
                logger.info(
                    f'Begin Append VM_HOST server list Server {list_servers}  '
                )
                list_servers.append({
                    'server_type':
                    ServerType.VM_HOST,
                    'server_name':
                    vm_host.name,
                    'address':
                    vm_host.ipv4_address,
                    'snmp_config_data':
                    snmp_config[vm_host_config_name]
                })
                logger.info(
                    f'End append  VM_HOST  ===> List Server {list_servers}')
            except KeyError as exception:
                logger.error(f'Exception Key Error {exception}')
                logger.error(traceback.format_exc())
                continue
        except Exception as exception:
            logger.info(f'Can not get vm_host.ip_address: {exception}')

    logger.info(f'Begin get statistic with list server {list_servers}')
    result = []
    with PoolExecutor(max_workers=10) as executor:
        for result_object in executor.map(get_server_statistic, list_servers):
            result.append(result_object)
    return result