def test_set_server_vmhost_successful(self, mock_server_type):
     server = {'name': mock.Mock(), 'ipv4_address': mock.Mock()}
     server_type = "VMHost"
     mock_server_type.VMHost.return_value = server_type
     MemcachedNFV.client = mock.Mock()
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.set_server(server, server_type)
 def test_set_server_bam_exception(self, mock_server_type):
     # pylint: disable=missing-docstring
     server = {'name': mock.Mock(), 'ipv4_address': mock.Mock()}
     server_type = "BAM"
     mock_server_type.BAM.return_value = server_type
     MemcachedNFV.client = mock.Mock()
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.set_server(server, server_type)
 def test__get_connection(self, mock_base):
     # pylint: disable=missing-docstring
     connection = True
     mock_base.Client = connection
     MemcachedNFV.client = mock.Mock()
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     with self.assertRaises(Exception):
         memcached_nfv._get_connection()
 def test_get_server_bam(self, mock_server_type):
     # pylint: disable=missing-docstring
     server_id = 334498
     server_type = "BAM"
     mock_server_type.BAM.return_value = server_type
     server = None
     MemcachedNFV.client.get.return_value = server
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.get_server(server_id, server_type)
 def test_delete_server_bam(self, mock_server_type):
     # pylint: disable=missing-docstring
     server_type = "BDDS"
     bam_ip = None
     server_id = 334498
     mock_server_type.BDDS.return_value = server_type
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     key = "{}|{}|{}".format(server_type, bam_ip, server_id)
     memcached_nfv.client.delete(key)
     memcached_nfv.delete_server(server_id, server_type)
 def test_get_list_servers(self, mock_server_type):
     # pylint: disable=missing-docstring
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     keys = mock.MagicMock()
     memcached_nfv.get_list_server_keys = keys
     key = mock.Mock()
     key.decode.return_value.split.return_value.return_value = mock_server_type.VMHost
     actual = memcached_nfv.get_list_servers()
     expect = ([], [], [])
     self.assertEqual(expect, actual)
 def test_set_server_bam_successful(self, mock_log, mock_server_type):
     # pylint: disable=missing-docstring
     server = {'name': mock.Mock(), 'ipv4_address': mock.Mock()}
     server_type = "BAM"
     mock_server_type.BAM.return_value = server_type
     MemcachedNFV.client = mock.Mock()
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.set_server(server, server_type)
     key = "{}|{}".format(mock_server_type.BAM, server['ipv4_address'])
     mock_log.assert_called_with('Added {} to memcache'.format(key))
Exemple #8
0
def scheduler_get_statistic_job():
    """
    Scheduler to get statistic job
    """
    # init
    memcached_host, memcached_port = get_memcached_config()
    mem_nfv = MemcachedNFV(memcached_host, memcached_port)
    statistics = collect_statistics(mem_nfv)
    # Close
    mem_nfv.disconnect()
    logger.info('Get statistic: %s' % statistics)
    def test_get_server_exception(self, mock_log, mock_server_type):
        # pylint: disable=missing-docstring
        server_id = 334498
        server_type = "BDDS"
        mock_server_type.BDDS.return_value = server_type
        MemcachedNFV.client.get.side_effect = Exception("exception")
        memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())

        with self.assertRaises(Exception) as exception:
            memcached_nfv.get_server(server_id, server_type)
            mock_log.assert_called_with(
                'Added {} to memcached'.format(exception))
def init_server_cached_list():
    """[init_server_cached_list: call api get servers in bam with CONFIGURATION_NAME]

    Returns:
        [json] -- [
                    Success:    return  {"Status": "SUCCESS"}
                    Fail:       return  {"Status": "FAIL"}]
    """
    try:
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        configuration_name = data_config['bam_config_name']
        g.user.logger.debug(
            'Init_server_cached_list - configuration_name: {}'.format(
                configuration_name))
        configuration_id = gateway_nfv_management.get_configuration_id(
            configuration_name)
        g.user.logger.info(
            'Get list server of configure_id {}'.format(configuration_id))
        list_servers = gateway_nfv_management.get_list_servers(
            configuration_id)
        g.user.logger.info(
            'Init_server_cached_list - Number of get list server: {}'.format(
                len(list_servers)))
        # Init memcached
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        # Set bam server info to memcached server
        bam_ip = data_config['bam'][0]['ip']
        bam_name = data_config['bam'][0]['name']
        mem_nfv.set_server({
            'name': bam_name,
            'ipv4_address': bam_ip
        }, ServerType.BAM)
        # Set bdds server info to memcached server
        list_udf_name = [udf['name'] for udf in data_config['udfs_for_server']]
        for server in list_servers:
            mem_nfv.set_server(server, ServerType.BDDS, bam_ip, list_udf_name)
        # Set VM_HOST server info to memcached server
        vm_host_ip = data_config["vm_host_ip"]
        vm_name = data_config['vm_host_name']
        mem_nfv.set_server({
            'name': vm_name,
            'ipv4_address': vm_host_ip
        }, ServerType.VM_HOST)
        mem_nfv.disconnect()
    except Exception as exception:
        g.user.logger.error('Init_server_cached_list - {}'.format(exception))
        g.user.logger.error(traceback.format_exc())
        return jsonify({"Status": "FAIL"})
    return jsonify({"Status": "SUCCESS"})
 def test_init_memcached_nfv_failed(self, mock_log, mock_base,
                                    mock_get_connetion):
     # pylint: disable=missing-docstring
     host = mock.Mock()
     port = mock.Mock()
     exception_msg = "Cannot connect to memcached"
     exception = Exception(exception_msg)
     mock_get_connetion.side_effect = exception
     memcached_nfv = MemcachedNFV(host, port)
     # mock_log.assert_called_with(
     #     'MemcachedNFV-Init-{}'.format(exception_msg))
     self.assertEqual(mock_log.call_count, 2)
Exemple #12
0
def main():
    # Clean
    logger.info("Clean memcached before init")
    memcached_host, memcached_port = get_memcached_config()
    mem_nfv = MemcachedNFV(memcached_host, memcached_port)
    mem_nfv.clean_memcached()
    mem_nfv.disconnect()
    # Init server list
    init_server_cached_list_api()

    # Scheduler for get statistic
    data_config = read_config_json_file(NFV_CONFIG_PATH)
    interval = int(data_config['interval'])

    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BlockingScheduler()
    scheduler.configure(executors=executors, job_defaults=job_defaults)
    scheduler.add_job(scheduler_get_statistic_job,
                      'interval',
                      seconds=interval)
    scheduler.start()
def get_available_addresses(management_network, service_network):
    management_ipv4_network = management_network.get("ip_v4", {})
    if not management_ipv4_network:
        raise ValueError('Invalid management network data')
    management_ipv6_network = management_network.get("ip_v6", {})
    mgnt_ipv4_cidr, mgnt_ipv4_start_ip, mgnt_ipv4_end_ip, mgnt_ipv4_gateway = get_network_detail(
        management_ipv4_network)
    mngt_ipv4_net_size = mgnt_ipv4_cidr.split("/")[1]
    mgnt_ipv6_cidr, mgnt_ipv6_start_ip, mgnt_ipv6_end_ip, mgnt_ipv6_gateway = get_network_detail(
        management_ipv6_network)
    if mgnt_ipv6_cidr:
        mngt_ipv6_net_size = mgnt_ipv6_cidr.split("/")[1]
    if all(e != "" for e in [
            mgnt_ipv4_cidr, mgnt_ipv4_start_ip, mgnt_ipv4_end_ip,
            mgnt_ipv4_gateway
    ]):
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        srv_ipv4_network = service_network.get("ip_v4", {})
        srv_ipv6_network = service_network.get("ip_v6", {})
        srv_cidr, srv_start_ip, srv_end_ip, srv_gateway = get_network_detail(
            srv_ipv4_network)
        srv_ipv6_cidr, srv_ipv6_start_ip, srv_ipv6_end_ip, srv_ipv6_gateway = get_network_detail(
            srv_ipv6_network)
        used_ipv4, used_ipv6 = get_used_addresses_on_bam(mem_nfv)
        if mgnt_ipv4_gateway:
            used_ipv4.append(mgnt_ipv4_gateway)
        if mgnt_ipv6_gateway:
            used_ipv6.append(mgnt_ipv6_gateway)
        if srv_gateway:
            used_ipv4.append(srv_gateway)
        if srv_ipv6_gateway:
            used_ipv6.append(srv_ipv6_gateway)

        mgnt_ipv4 = get_ip_in_list_in_used_ips(mem_nfv, mgnt_ipv4_start_ip,
                                               mgnt_ipv4_end_ip, used_ipv4)
        mgnt_ipv4_str = mgnt_ipv4 + "/" + mngt_ipv4_net_size
        mem_nfv.set_network(mgnt_ipv4, "0", 1800)
        if mgnt_ipv6_cidr:
            mgnt_ipv6 = get_ip_in_list_in_used_ips(mem_nfv, mgnt_ipv6_start_ip,
                                                   mgnt_ipv6_end_ip, used_ipv6)
            mgnt_ipv6_str = mgnt_ipv6 + "/" + mngt_ipv6_net_size
            mem_nfv.set_network(mgnt_ipv6, "0", 1800)
        srv_ip_str = ""
        srv_ipv6_str = ""
        srv_ip = ""
        srv_ipv6_ip = ""
        if srv_cidr:
            srv_ip = get_ip_in_list_in_used_ips(mem_nfv, srv_start_ip,
                                                srv_end_ip, used_ipv4)
        if srv_ipv6_cidr:
            srv_ipv6_ip = get_ip_in_list_in_used_ips(mem_nfv,
                                                     srv_ipv6_start_ip,
                                                     srv_ipv6_end_ip,
                                                     used_ipv6)
        if srv_ip:
            srv_ipv4_net_size = srv_cidr.split("/")[1]
            srv_ip_str = srv_ip + "/" + srv_ipv4_net_size
            mem_nfv.set_network(srv_ip, "0", 1800)
        if srv_ipv6_ip:
            srv_ipv6_net_size = srv_ipv6_cidr.split("/")[1]
            srv_ipv6_str = srv_ipv6_ip + "/" + srv_ipv6_net_size
            mem_nfv.set_network(srv_ipv6_ip, "0", 1800)
        result = dict()
        result["management"] = {}
        result["management"]["ip_v4"] = {
            "cidr": mgnt_ipv4_cidr,
            "gateway": mgnt_ipv4_gateway,
            "management_ipv4": mgnt_ipv4_str
        }
        if mgnt_ipv6_cidr:
            result["management"]["ip_v6"] = {
                "cidr": mgnt_ipv6_cidr,
                "gateway": mgnt_ipv6_gateway,
                "management_ipv6": mgnt_ipv6_str
            }
        if srv_cidr or srv_ipv6_cidr:
            result["service"] = {}
            if srv_cidr:
                tmp = {
                    "cidr": srv_cidr,
                    "gateway": srv_gateway,
                    "service_ipv4": srv_ip_str
                }
                result["service"]["ip_v4"] = tmp
            if srv_ipv6_cidr:
                tmp = {
                    "cidr": srv_ipv6_cidr,
                    "gateway": srv_ipv6_gateway,
                    "service_ipv6": srv_ipv6_str
                }
                result["service"]["ip_v6"] = tmp
        return result
    else:
        raise ValueError('Invalid management network data')
def scale_out(data):
    """
    :param data: request in json
    for example:
    {
    "server_name": "bdd240s",
    "mgnt_server_ip": "192.168.88.240",
    "service_server_ipv4": "192.168.89.240",
    "service_server_ipv6": fdac::12,
    "service_server_netmask": 24,
    "service_server_v6_prefix": 64,
    "metadata": "can_scale_in=True"
    }
    :return: successful message
    """
    try:
        g.user.logger.debug("Scale out request data: {}".format(data))
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        g.user.logger.debug("NFV config data: {}".format(data_config))
        config_name = data_config['bam_config_name']
        config_id = get_configuration_id(config_name)
        if not config_id:
            return jsonify({
                "status": "Failed",
                "message": "Configuration id not found!"
            }), 404

        g.user.logger.info('Starting check available server')
        server_ip = data['mgnt_server_ip']
        server_ipv6 = None
        avail_server = is_check_available_server(
            server_ip, data_config['server_ssh_username'],
            data_config['server_ssh_password'])
        if not avail_server:
            return jsonify({
                "status": "Failed",
                "message": "No available server ip!"
            }), 404

        g.user.logger.info('Starting add server')
        server_properties = f"password={process_password.decrypt_password(data_config['server_deployment_password'])}|connected=true|upgrade=False"
        try:
            if data['metadata']:
                server_properties = f"{server_properties}|{data['metadata']}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        try:
            if (int(data['service_server_netmask']) <=
                    32) and (int(data['service_server_netmask']) >
                             0) and data['service_server_ipv4']:
                server_properties = f"{server_properties}|servicesIPv4Address={data['service_server_ipv4']}|servicesIPv4Netmask={cidr_to_netmask(data['service_server_netmask'])}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        try:
            if data['service_server_v6_prefix'] and data['service_server_ipv6']:
                server_ipv6 = data['service_server_ipv6'] + \
                              '/' + data['service_server_v6_prefix']
                server_properties = f"{server_properties}|servicesIPv6Address={data['service_server_ipv6']}|servicesIPv6Subnet={data['service_server_v6_prefix']}"
        except KeyError as exception:
            g.user.logger.error(str(exception))
            g.user.logger.error(traceback.format_exc())

        if data_config['server_cap_profile']:
            g.user.logger.debug(
                f"Add server name {data['server_name']} server_ip {server_ip} profile {data_config['server_cap_profile']} properties {server_properties}"
            )
            server_id = add_server(server_ip=server_ip,
                                   server_name=data['server_name'],
                                   config_id=config_id,
                                   profile=data_config['server_cap_profile'],
                                   properties=server_properties)
        else:
            g.user.logger.debug(
                f"Add server name {data['server_name']} server_ip {server_ip} properties {server_properties}"
            )
            server_id = add_server(server_ip=server_ip,
                                   server_name=data['server_name'],
                                   config_id=config_id,
                                   properties=server_properties)

        g.user.logger.info('Starting create deployment roles')
        deploy_role = False
        if data_config['dns_view_names']:
            role_type = data.get("deploy_role",
                                 data_config['server_deploy_role'])
            for view_name in data_config['dns_view_names']:
                g.user.logger.debug(
                    f"Create deployment role for server {data['server_name']} view_name {view_name} role_type {role_type}"
                )
                role_id = create_deployment_roles(
                    server_name=data['server_name'],
                    server_id=server_id,
                    config_id=config_id,
                    view_name=view_name,
                    role_type=role_type)
                if not role_id:
                    g.user.logger.info(
                        'Cannot create deployment role for view name: %s' %
                        view_name)
                    continue
                deploy_role = True
        else:
            g.user.logger.info('dns_view_names not found!')
        if not deploy_role:
            g.user.logger.info('Cannot create any deployment roles!')
            return jsonify({
                "status": "Failed",
                "message": "Create deployment role failed"
            }), 500

        g.user.logger.info('Starting add raw option')
        g.user.logger.info(
            'Starting deploy DNS configuration for server name: %s' %
            data['server_name'])
        deploy_server = deploy_server_config(server_id)
        if not deploy_server:
            g.user.logger.info('Deploy DNS configuration is failed!')

        g.user.logger.info(
            'Starting wait for DNS deployment for server name: %s' %
            data['server_name'])
        deploy_status = wait_for_deployment(server_id)
        g.user.logger.info(
            'Deployment status for server name %s, deploy status id %s' %
            (data['server_name'], deploy_status))

        if 'anycast_config' in data_config:
            g.user.logger.info('Starting configure any cast')
            configure_anycast(server_ip, server_ipv6,
                              data_config['server_ssh_username'],
                              data_config['server_ssh_password'],
                              data_config['anycast_config'])

        g.user.logger.info('Adding server to cache list')
        # Add BDDS to memcached
        bam_ip = data_config['bam'][0]['ip']
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        list_udf_name = [
            value.split('=')[0].strip()
            for value in data['metadata'].split('|')
        ]
        mem_nfv.set_server(
            {
                'id':
                server_id,
                'name':
                data['server_name'],
                'type':
                ServerType.BDDS,
                'properties':
                f"defaultInterfaceAddress={server_ip}|{server_properties}"
            }, ServerType.BDDS, bam_ip, list_udf_name)
        g.user.logger.info(
            f"SUCCESS: Add server to memcached with info 'id': {server_id}, 'name': {data['server_name']}, 'type': {ServerType.BDDS}, 'bam_ip': {bam_ip}"
        )
        g.user.logger.debug(f"'properties': {server_properties}")

        # Remove used candidate address on memcache
        mem_nfv.clean_network(data["mgnt_server_ip"])
        mem_nfv.clean_network(data["service_server_ipv4"])
        mem_nfv.clean_network(data["service_server_ipv6"])

        # Add addresses to used list on memcache
        used_ipv4_memcache = mem_nfv.get_network("used_ipv4")
        used_ipv6_memcache = mem_nfv.get_network("used_ipv6")
        used_ipv4_memcache.append(data["mgnt_server_ip"])
        if data["service_server_ipv4"]:
            used_ipv4_memcache.append(data["service_server_ipv4"])
        if data["service_server_ipv6"]:
            used_ipv6_memcache.append(data["service_server_ipv6"])
            ipv6_str = ",".join(used_ipv6_memcache)
            mem_nfv.set_network("used_ipv6", ipv6_str)
        ipv4_str = ",".join(used_ipv4_memcache)
        mem_nfv.set_network("used_ipv4", ipv4_str)

    except Exception as exception:
        g.user.logger.error(str(exception))
        g.user.logger.error(
            f"Failed: Haven't add server to mem cached {exception}")
        g.user.logger.error(traceback.format_exc())
        return jsonify({
            "status": "Failed",
            "message": "Scale out failed",
            "error": str(exception)
        }), 500
    return jsonify({
        "status": "Successful",
        "message": "Scale out successfully",
        "error": ""
    }), 200
def scale_in(data):
    """
    :param data: request in json
    example
    {
        "server_name": "bdd240s"
    }
    :return: successful message
    """
    try:
        g.user.logger.debug("Scale out request data: {}".format(data))
        data_config = read_config_json_file(NFV_CONFIG_PATH)
        g.user.logger.debug("NFV config data: {}".format(data_config))
        config_name = data_config['bam_config_name']
        config_id = get_configuration_id(config_name)
        if not config_id:
            return jsonify({"message": "Configuration id not found!"}), 404

        server_name = data['server_name']
        server = get_server_by_name(config_id, server_name)
        server_id = server['id']
        if server_id == 0:
            return jsonify({
                "status":
                "Failed",
                "message":
                "Scale in failed",
                "error":
                str('Server {} not found!'.format(server_name))
            }), 403
        if server['properties'].split('|')[0].split(
                '=')[0] == "defaultInterfaceAddress":
            server_ip = server['properties'].split('|')[0].split('=')[1]
        else:
            server_ip = server['properties'].split('|')[1].split('=')[1]
        # delete server roles
        g.user.logger.info('Starting remove server roles for server name: %s' %
                           server_name)
        remove_roles = delete_server_roles(server_id)
        if not remove_roles:
            g.user.logger.info(
                'Remove server roles failed! Starting stop anycast service ...'
            )

        if 'anycast_config' in data_config:
            stop_anycast_service(server_ip, data_config['server_ssh_username'],
                                 data_config['server_ssh_password'])

        # deploy DNS server
        g.user.logger.info('Starting deploy DNS server for server name: %s' %
                           server_name)
        deploy_server = deploy_server_config(server_id)
        if not deploy_server:
            g.user.logger.info('Deploy DNS server is failed!')

        # wait for deployment
        g.user.logger.info(
            'Starting wait for DNS deployment for server name: %s' %
            server_name)
        deploy_status = wait_for_deployment(server_id)
        g.user.logger.info(
            'Deployment status for server name %s, deploy status id %s' %
            (server_name, deploy_status))
        g.user.logger.info('Anycast service stopped')

        # delete server from BAM
        g.user.logger.info('Deleting server %s from BAM' % server_name)
        delete_server = delete_entity(server_id)
        if not delete_server:
            g.user.logger.info('Delete server %s failed' % server_name)

        g.user.logger.info('Deleting server from cache list')

        bam_ip = data_config['bam'][0]['ip']
        memcached_host = data_config['memcached_host']
        memcached_port = int(data_config['memcached_port'])
        mem_nfv = MemcachedNFV(memcached_host, memcached_port)
        mem_nfv.delete_server(server_id, ServerType.BDDS, bam_ip)
        g.user.logger.info(
            f"SUCCESS: Delete server from memcached with info 'id': {server_id}"
        )

    except Exception as exception:
        g.user.logger.error(str(exception))
        g.user.logger.error(f"Failed: Can't delete server from memcached")
        g.user.logger.error(traceback.format_exc())
        return jsonify({
            "status": "Failed",
            "message": "Scale in failed",
            "error": str(exception)
        }), 500
    return jsonify({
        "status": "Successful",
        "message": "Scale in successfully",
        "error": ""
    }), 200
 def test_clean_memcached(self):
     # pylint: disable=missing-docstring
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.client.flush_all()
     memcached_nfv.clean_memcached()
 def test_disconnect(self):
     # pylint: disable=missing-docstring
     MemcachedNFV.client = mock.Mock()
     memcached_nfv = MemcachedNFV(mock.Mock(), mock.Mock())
     memcached_nfv.client.close()