Exemplo n.º 1
0
def nsx_delete_security_groups(resource, event, trigger, **kwargs):
    if kwargs['force'] is False:
        if nsx_list_security_groups(resource, event, trigger, **kwargs):
            msg = ('Do you want to delete the following NSX firewall '
                   'sections/nsgroups?')
            user_confirm = admin_utils.query_yes_no(msg, default='no')

            if user_confirm is False:
                LOG.info(_LI('NSX security groups cleanup aborted by user'))
                return

    sections = firewall.list_sections()
    # NOTE(roeyc): We use -2 indexing because don't want to delete the
    # default firewall sections.
    if sections:
        NON_DEFAULT_SECURITY_GROUPS = -2
        for section in sections[:NON_DEFAULT_SECURITY_GROUPS]:
            LOG.info(_LI("Deleting firewall section %(display_name)s, "
                         "section id %(id)s"),
                     {'display_name': section['display_name'],
                      'id': section['id']})
            firewall.delete_section(section['id'])

    nsgroups = firewall.list_nsgroups()
    if nsgroups:
        for nsgroup in [nsg for nsg in nsgroups
                        if not utils.is_internal_resource(nsg)]:
            LOG.info(_LI("Deleting ns-group %(display_name)s, "
                         "ns-group id %(id)s"),
                     {'display_name': nsgroup['display_name'],
                      'id': nsgroup['id']})
            firewall.delete_nsgroup(nsgroup['id'])
Exemplo n.º 2
0
def neutron_delete_security_groups(resource, event, trigger, **kwargs):
    if kwargs['force'] is False:
        if neutron_list_security_groups(resource, event, trigger, **kwargs):
            msg = ('Do you want to delete the following neutron '
                   'security groups?')
            user_confirm = admin_utils.query_yes_no(msg, default='no')
            if user_confirm is False:
                LOG.info(_LI('Neutron security groups cleanup aborted by '
                             'user'))
                return

    security_groups = neutron_sg.get_security_groups()
    if not security_groups:
        return

    for security_group in security_groups:
        try:
            LOG.info(_LI('Trying to delete %(sg_id)s'),
                     {'sg_id': security_group['id']})
            neutron_sg.delete_security_group(security_group['id'])
            LOG.info(_LI("Deleted security group name: %(name)s id: %(id)s"),
                     {'name': security_group['name'],
                      'id': security_group['id']})
        except Exception as e:
            LOG.warning(str(e))
Exemplo n.º 3
0
def output_formatter(resource_name, resources_list, attrs):
    """Method to format the output response from NSX/Neutron.

    Depending on the --fmt cli option we format the output as
    JSON or as a table.
    """
    LOG.info(_LI('%(resource_name)s'), {'resource_name': resource_name})
    if not resources_list:
        LOG.info(_LI('No resources found'))
        return ''

    fmt = cfg.CONF.fmt
    if fmt == 'psql':
        tableout = prettytable.PrettyTable(attrs)
        tableout.padding_width = 1
        tableout.align = "l"
        for resource in resources_list:
            resource_list = []
            for attr in attrs:
                resource_list.append(resource.get(attr))
            tableout.add_row(resource_list)
        return tableout

    elif fmt == 'json':
        js_output = {}
        js_output[resource_name] = []
        for resource in resources_list:
            result = {}
            for attr in attrs:
                result[attr] = resource[attr]
            js_output[resource_name].append(result)
        return jsonutils.dumps(js_output, sort_keys=True, indent=4)
Exemplo n.º 4
0
def nsx_update_switch(resource, event, trigger, **kwargs):
    nsxv = utils.get_nsxv_client()
    if not kwargs.get('property'):
        LOG.error(_LE("Need to specify dvs-id parameter and "
                      "attribute to update. Add --property dvs-id=<dvs-id> "
                      "--property teamingpolicy=<policy>"))
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    dvs_id = properties.get('dvs-id')
    if not dvs_id:
        LOG.error(_LE("Need to specify dvs-id. "
                      "Add --property dvs-id=<dvs-id>"))
        return
    h, switch = nsxv.get_vdn_switch(dvs_id)
    policy = properties.get('teamingpolicy')
    if policy:
        if switch['teamingPolicy'] == policy:
            LOG.info(_LI("Policy already set!"))
            return
        LOG.info(_LI("Updating NSXv switch %(dvs)s teaming policy to "
                     "%(policy)s"), {'dvs': dvs_id, 'policy': policy})
        switch['teamingPolicy'] = policy
        switch = nsxv.update_vdn_switch(switch)
        LOG.info(_LI("Switch value after update: %s"), switch)
    else:
        LOG.error(_LE("No teaming policy set. "
                      "Add --property teamingpolicy=<policy>"))
        LOG.info(_LI("Current switch value is: %s"), switch)
Exemplo n.º 5
0
def list_missing_networks(resource, event, trigger, **kwargs):
    """List neutron networks that are missing the NSX backend network
    """
    plugin = db_base_plugin_v2.NeutronDbPluginV2()
    admin_cxt = neutron_context.get_admin_context()
    neutron_networks = plugin.get_networks(admin_cxt)
    networks = []
    for net in neutron_networks:
        neutron_id = net['id']
        # get the network nsx id from the mapping table
        nsx_id = get_network_nsx_id(admin_cxt, neutron_id)
        if not nsx_id:
            # skip external networks
            pass
        else:
            try:
                utils.get_connected_nsxlib().logical_switch.get(nsx_id)
            except nsx_exc.ResourceNotFound:
                networks.append({
                    'name': net['name'],
                    'neutron_id': neutron_id,
                    'nsx_id': nsx_id
                })
    if len(networks) > 0:
        title = _LI("Found %d internal networks missing from the NSX "
                    "manager:") % len(networks)
        LOG.info(
            formatters.output_formatter(title, networks,
                                        ['name', 'neutron_id', 'nsx_id']))
    else:
        LOG.info(_LI("All internal networks exist on the NSX manager"))
Exemplo n.º 6
0
def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs):
    """Delete spoofguard policy"""
    errmsg = ("Need to specify policy-id. Add --property "
              "policy-id=<policy-id>")
    if not kwargs.get('property'):
        LOG.error(_LE("%s"), errmsg)
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    policy_id = properties.get('policy-id')
    if not policy_id:
        LOG.error(_LE("%s"), errmsg)
        return
    try:
        nsxv.get_spoofguard_policy(policy_id)
    except exceptions.NeutronException as e:
        LOG.error(_LE("Unable to retrieve policy %(p)s: %(e)s"), {
            'p': policy_id,
            'e': str(e)
        })
    else:
        confirm = admin_utils.query_yes_no(
            "Do you want to delete spoofguard-policy: %s" % policy_id,
            default="no")
        if not confirm:
            LOG.info(_LI("spoofguard-policy deletion aborted by user"))
            return
        try:
            nsxv.delete_spoofguard_policy(policy_id)
        except Exception as e:
            LOG.error(_LE("%s"), str(e))
        LOG.info(_LI('spoofguard-policy successfully deleted.'))
def nsx_clean_spoofguard_policy(resource, event, trigger, **kwargs):
    """Delete spoofguard policy"""
    errmsg = ("Need to specify policy-id. Add --property "
              "policy-id=<policy-id>")
    if not kwargs.get('property'):
        LOG.error(_LE("%s"), errmsg)
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    policy_id = properties.get('policy-id')
    if not policy_id:
        LOG.error(_LE("%s"), errmsg)
        return
    try:
        nsxv.get_spoofguard_policy(policy_id)
    except exceptions.NeutronException as e:
        LOG.error(_LE("Unable to retrieve policy %(p)s: %(e)s"),
                  {'p': policy_id, 'e': str(e)})
    else:
        confirm = admin_utils.query_yes_no(
            "Do you want to delete spoofguard-policy: %s" % policy_id,
            default="no")
        if not confirm:
            LOG.info(_LI("spoofguard-policy deletion aborted by user"))
            return
        try:
            nsxv.delete_spoofguard_policy(policy_id)
        except Exception as e:
            LOG.error(_LE("%s"), str(e))
        LOG.info(_LI('spoofguard-policy successfully deleted.'))
Exemplo n.º 8
0
def nsx_list_missing_spoofguard_policies(resource, event, trigger, **kwargs):
    """List missing spoofguard policies on NSXv.

    Spoofguard policies that have a binding in Neutron Db but there is
    no policy on NSXv backend to back it.
    """
    props = kwargs.get('property')
    reverse = True if props and props[0] == 'reverse' else False
    if reverse:
        LOG.info(
            _LI("Spoofguard policies on NSXv but not present in "
                "Neutron Db"))
    else:
        LOG.info(
            _LI("Spoofguard policies in Neutron Db but not present "
                "on NSXv"))
    missing_policies = get_missing_spoofguard_policy_mappings(reverse)
    if not missing_policies:
        LOG.info(
            _LI("\nNo missing spoofguard policies found."
                "\nNeutron DB and NSXv backend are in sync\n"))
    else:
        LOG.info(missing_policies)
        missing_policies = [{'policy_id': pid} for pid in missing_policies]
        LOG.info(
            formatters.output_formatter(constants.SPOOFGUARD_POLICY,
                                        missing_policies, ['policy_id']))
Exemplo n.º 9
0
def list_missing_routers(resource, event, trigger, **kwargs):
    """List neutron routers that are missing the NSX backend router
    """
    plugin = RoutersPlugin()
    admin_cxt = neutron_context.get_admin_context()
    neutron_routers = plugin.get_routers(admin_cxt)
    router_client = get_router_client()
    routers = []
    for router in neutron_routers:
        neutron_id = router['id']
        # get the network nsx id from the mapping table
        nsx_id = nsx_db.get_nsx_router_id(admin_cxt.session, neutron_id)
        if not nsx_id:
            routers.append({
                'name': router['name'],
                'neutron_id': neutron_id,
                'nsx_id': None
            })
        else:
            try:
                router_client.get(nsx_id)
            except nsx_exc.ResourceNotFound:
                routers.append({
                    'name': router['name'],
                    'neutron_id': neutron_id,
                    'nsx_id': nsx_id
                })
    if len(routers) > 0:
        title = _LI("Found %d routers missing from the NSX "
                    "manager:") % len(routers)
        LOG.info(
            formatters.output_formatter(title, routers,
                                        ['name', 'neutron_id', 'nsx_id']))
    else:
        LOG.info(_LI("All routers exist on the NSX manager"))
Exemplo n.º 10
0
def nsx_list_missing_edges(resource, event, trigger, **kwargs):
    """List missing edges and networks serviced by those edges.

    Missing edges are NSXv edges that have a binding in Neutron DB
    but are currently missing from the NSXv backend.
    """
    LOG.info(
        _LI("NSXv edges present in Neutron DB but not present "
            "on the NSXv backend\n"))
    missing_edges = get_missing_edges()
    if not missing_edges:
        LOG.info(
            _LI("\nNo edges are missing."
                "\nNeutron DB and NSXv backend are in sync\n"))
    else:
        data = [('edge_id', 'network_id')]
        for edge in missing_edges:
            # Retrieve all networks which are serviced by this edge.
            edge_serviced_networks = get_router_edge_vnic_bindings(edge)
            if not edge_serviced_networks:
                # If the edge is missing on the backend but no network
                # is serviced by this edge, output N/A.
                data.append((edge, 'N/A'))
            for bindings in edge_serviced_networks:
                data.append((edge, bindings.network_id))
        LOG.info(formatters.tabulate_results(data))
Exemplo n.º 11
0
def handle_port_metadata_access(plugin, context, port, is_delete=False):
    if is_user_port(port, check_dev_id=True):
        network_id = port["network_id"]
        network = plugin.get_network(context, network_id)
        if network[external_net.EXTERNAL]:
            LOG.info(_LI("Network %s is external: nothing to do"),
                     network_id)
            return
        subnet_id = port["fixed_ips"][0]['subnet_id']
        host_data = {
            "instance_id": port["device_id"],
            "tenant_id": port["tenant_id"],
            "ip_address": port["fixed_ips"][0]['ip_address']
        }
        LOG.info(_LI("Configuring metadata entry for port %s"), port)
        if not is_delete:
            handler = plugin.lsn_manager.lsn_port_meta_host_add
        else:
            handler = plugin.lsn_manager.lsn_port_meta_host_remove
        try:
            handler(context, network_id, subnet_id, host_data)
        except p_exc.PortConfigurationError:
            with excutils.save_and_reraise_exception():
                if not is_delete:
                    db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                        plugin, context, port['id'])
        LOG.info(_LI("Metadata for port %s configured successfully"),
                 port['id'])
Exemplo n.º 12
0
def neutron_delete_security_groups(resource, event, trigger, **kwargs):
    if kwargs['force'] is False:
        if neutron_list_security_groups(resource, event, trigger, **kwargs):
            msg = ('Do you want to delete the following neutron '
                   'security groups?')
            user_confirm = admin_utils.query_yes_no(msg, default='no')
            if user_confirm is False:
                LOG.info(
                    _LI('Neutron security groups cleanup aborted by '
                        'user'))
                return

    security_groups = neutron_sg.get_security_groups()
    if not security_groups:
        return

    for security_group in security_groups:
        try:
            LOG.info(_LI('Trying to delete %(sg_id)s'),
                     {'sg_id': security_group['id']})
            neutron_sg.delete_security_group(security_group['id'])
            LOG.info(_LI("Deleted security group name: %(name)s id: %(id)s"), {
                'name': security_group['name'],
                'id': security_group['id']
            })
        except Exception as e:
            LOG.warning(str(e))
Exemplo n.º 13
0
def nsx_delete_security_groups(resource, event, trigger, **kwargs):
    if kwargs['force'] is False:
        if nsx_list_security_groups(resource, event, trigger, **kwargs):
            msg = ('Do you want to delete the following NSX firewall '
                   'sections/nsgroups?')
            user_confirm = admin_utils.query_yes_no(msg, default='no')

            if user_confirm is False:
                LOG.info(_LI('NSX security groups cleanup aborted by user'))
                return

    sections = firewall.list_sections()
    # NOTE(gangila): We use -1 indexing because we trying to delete default
    # security group on NSX Manager raises an exception.
    if sections:
        NON_DEFAULT_SECURITY_GROUPS = -1
        for section in sections[:NON_DEFAULT_SECURITY_GROUPS]:
            LOG.info(_LI("Deleting firewall section %(display_name)s, "
                         "section id %(id)s"),
                     {'display_name': section['display_name'],
                      'id': section['id']})
            firewall.delete_section(section['id'])

    nsgroups = firewall.list_nsgroups()
    if nsgroups:
        for nsgroup in nsgroups:
            LOG.info(_LI("Deleting ns-group %(display_name)s, "
                         "ns-group id %(id)s"),
                     {'display_name': nsgroup['display_name'],
                      'id': nsgroup['id']})
            firewall.delete_nsgroup(nsgroup['id'])
Exemplo n.º 14
0
def get_metadata_status(resource, event, trigger, **kwargs):
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        net_id = properties.get('network_id')
    else:
        net_id = None

    edgeapi = utils.NeutronDbClient()
    edge_list = nsxv_db.get_nsxv_internal_edges_by_purpose(
        edgeapi.context.session,
        vcns_constants.InternalEdgePurposes.INTER_EDGE_PURPOSE)
    md_rtr_ids = [edge['router_id'] for edge in edge_list]
    router_bindings = nsxv_db.get_nsxv_router_bindings(
        edgeapi.context.session,
        filters={'router_id': md_rtr_ids})
    edge_ids = [b['edge_id'] for b in router_bindings]
    _md_member_status('Metadata edge appliance: %s members', edge_ids)

    if net_id:
        as_provider_data = nsxv_db.get_edge_vnic_bindings_by_int_lswitch(
            edgeapi.context.session, net_id)
        providers = [asp['edge_id'] for asp in as_provider_data]
        if providers:
            LOG.info(_LI('Metadata providers for network %s'), net_id)
            _md_member_status('Edge  %s', providers)
        else:
            LOG.info(_LI('No providers found for network %s'), net_id)
Exemplo n.º 15
0
def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
    """Update Metadata proxy for NSXv3 CrossHairs."""

    nsx_version = utils.get_connected_nsxlib().get_version()
    if not nsx_utils.is_nsx_version_1_1_0(nsx_version):
        LOG.info(_LI("This utility is not available for NSX version %s"),
                 nsx_version)
        return

    metadata_proxy_uuid = None
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        metadata_proxy_uuid = properties.get('metadata_proxy_uuid')
    if not metadata_proxy_uuid:
        LOG.error(_LE("metadata_proxy_uuid is not defined"))
        return

    cfg.CONF.set_override('dhcp_agent_notification', False)
    cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3')
    cfg.CONF.set_override('metadata_proxy_uuid', metadata_proxy_uuid, 'nsx_v3')

    plugin = utils.NsxV3PluginWrapper()
    nsx_client = utils.get_nsxv3_client()
    port_resource = resources.LogicalPort(nsx_client)

    # For each Neutron network, check if it is an internal metadata network.
    # If yes, delete the network and associated router interface.
    # Otherwise, create a logical switch port with MD-Proxy attachment.
    for network in neutron_client.get_networks():
        if _is_metadata_network(network):
            # It is a metadata network, find the attached router,
            # remove the router interface and the network.
            filters = {'device_owner': const.ROUTER_INTERFACE_OWNERS,
                       'fixed_ips': {
                           'subnet_id': [network['subnets'][0]],
                           'ip_address': [nsx_rpc.METADATA_GATEWAY_IP]}}
            ports = neutron_client.get_ports(filters=filters)
            if not ports:
                continue
            router_id = ports[0]['device_id']
            interface = {'subnet_id': network['subnets'][0]}
            plugin.remove_router_interface(router_id, interface)
            LOG.info(_LI("Removed metadata interface on router %s"), router_id)
            plugin.delete_network(network['id'])
            LOG.info(_LI("Removed metadata network %s"), network['id'])
        else:
            lswitch_id = neutron_client.net_id_to_lswitch_id(network['id'])
            if not lswitch_id:
                continue
            tags = nsx_utils.build_v3_tags_payload(
                network, resource_type='os-neutron-net-id',
                project_name='admin')
            name = nsx_utils.get_name_and_uuid('%s-%s' % (
                'mdproxy', network['name'] or 'network'), network['id'])
            port_resource.create(
                lswitch_id, metadata_proxy_uuid, tags=tags, name=name,
                attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
            LOG.info(_LI("Enabled native metadata proxy for network %s"),
                     network['id'])
Exemplo n.º 16
0
    def _ensure_default_l2_gateway(self, resource, event, trigger, **kwargs):
        """
        Create a default logical L2 gateway.

        Create a logical L2 gateway in the neutron database if the
        default_bridge_cluster config parameter is set and if it is
        not previously created. If not set, return.
        """
        def_l2gw_name = cfg.CONF.nsx_v3.default_bridge_cluster
        # Return if no default_bridge_cluster set in config
        if not def_l2gw_name:
            LOG.info(
                _LI("NSX: Default bridge cluster not configured "
                    "in nsx.ini. No default L2 gateway created."))
            return
        admin_ctx = context.get_admin_context()

        def_l2gw_uuid = (self._core_plugin.nsxlib.bridge_cluster.
                         get_id_by_name_or_id(def_l2gw_name))

        # Optimistically create the default L2 gateway in neutron DB
        device = {
            'device_name': def_l2gw_uuid,
            'interfaces': [{
                'name': 'default-bridge-cluster'
            }]
        }
        def_l2gw = {'name': 'default-l2gw', 'devices': [device]}
        l2gw_dict = {self.gateway_resource: def_l2gw}
        self.create_l2_gateway(admin_ctx, l2gw_dict)
        l2_gateway = super(NsxV3Driver,
                           self).create_l2_gateway(admin_ctx, l2gw_dict)
        # Verify that only one default L2 gateway is created
        def_l2gw_exists = False
        l2gateways = self._get_l2_gateways(admin_ctx)
        for l2gateway in l2gateways:
            # Since we ensure L2 gateway is created with only 1 device, we use
            # the first device in the list.
            if l2gateway['devices'][0]['device_name'] == def_l2gw_uuid:
                if def_l2gw_exists:
                    LOG.info(_LI("Default L2 gateway is already created."))
                    try:
                        # Try deleting this duplicate default L2 gateway
                        self.validate_l2_gateway_for_delete(
                            admin_ctx, l2gateway['id'])
                        super(NsxV3Driver,
                              self).delete_l2_gateway(admin_ctx,
                                                      l2gateway['id'])
                    except l2gw_exc.L2GatewayInUse:
                        # If the L2 gateway we are trying to delete is in
                        # use then we should delete the L2 gateway which
                        # we just created ensuring there is only one
                        # default L2 gateway in the database.
                        super(NsxV3Driver,
                              self).delete_l2_gateway(admin_ctx,
                                                      l2_gateway['id'])
                else:
                    def_l2gw_exists = True
        return l2_gateway
Exemplo n.º 17
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        dvs_mng = dvs.DvsManager()
    except Exception as e:
        LOG.error(
            _LE("Cannot connect to the DVS: Please update the [dvs] "
                "section in the nsx.ini file: %s"), e)
        return

    # Go over all the compute ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    port_filters = {'device_owner': ['compute:None']}
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = dvs_mng.get_vm_moref_obj(device_id)
        vm_spec = dvs_mng.get_vm_spec(vm_moref)

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network'
                    and hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if net._type == 'DistributedVirtualPortgroup':
                        update_spec = True

        if not update_spec:
            LOG.info(_LI("No need to update the spec of vm %s"), device_id)
            continue

        # find the old interface by it's mac and delete it
        device = get_vm_network_device(dvs_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning(_LW("No device with MAC address %s exists on the VM"),
                        port['mac_address'])
            continue
        device_type = device.__class__.__name__

        LOG.info(_LI("Detaching old interface from VM %s"), device_id)
        dvs_mng.detach_vm_interface(vm_moref, device)

        # add the new interface as OpaqueNetwork
        LOG.info(_LI("Attaching new interface to VM %s"), device_id)
        nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
        dvs_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
                                    nsx_net_id, device_type)
Exemplo n.º 18
0
def delete_backend_network(resource, event, trigger, **kwargs):
    """Delete a backend network by its moref
    """
    errmsg = ("Need to specify moref property. Add --property moref=<moref>")
    if not kwargs.get('property'):
        LOG.error(_LE("%s"), errmsg)
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    moref = properties.get('moref')
    if not moref:
        LOG.error(_LE("%s"), errmsg)
        return

    backend_name = get_networks_name_map().get(moref)
    if not backend_name:
        LOG.error(_LE("Failed to find the backend network %(moref)s"),
                  {'moref': moref})
        return

    # Note: in case the backend network is attached to other backend objects,
    # like VM, the deleting may fail and through an exception

    nsxv = utils.get_nsxv_client()
    if moref.startswith(PORTGROUP_PREFIX):
        # get the dvs id from the backend name:
        dvs_id = get_dvs_id_from_backend_name(backend_name)
        if not dvs_id:
            LOG.error(
                _LE("Failed to find the DVS id of backend network "
                    "%(moref)s"), {'moref': moref})
        else:
            try:
                nsxv.delete_port_group(dvs_id, moref)
            except Exception as e:
                LOG.error(
                    _LE("Failed to delete backend network %(moref)s : "
                        "%(e)s"), {
                            'moref': moref,
                            'e': e
                        })
            else:
                LOG.info(_LI("Backend network %(moref)s was deleted"),
                         {'moref': moref})
    else:
        # Virtual wire
        try:
            nsxv.delete_virtual_wire(moref)
        except Exception as e:
            LOG.error(
                _LE("Failed to delete backend network %(moref)s : "
                    "%(e)s"), {
                        'moref': moref,
                        'e': e
                    })
        else:
            LOG.info(_LI("Backend network %(moref)s was deleted"),
                     {'moref': moref})
Exemplo n.º 19
0
def nsx_update_switch(resource, event, trigger, **kwargs):
    nsxv = utils.get_nsxv_client()
    if not kwargs.get('property'):
        LOG.error(
            _LE("Need to specify dvs-id parameter and "
                "attribute to update. Add --property dvs-id=<dvs-id> "
                "--property teamingpolicy=<policy>"))
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    dvs_id = properties.get('dvs-id')
    if not dvs_id:
        LOG.error(
            _LE("Need to specify dvs-id. "
                "Add --property dvs-id=<dvs-id>"))
        return
    try:
        h, switch = nsxv.get_vdn_switch(dvs_id)
    except exceptions.ResourceNotFound:
        LOG.error(_LE("DVS %s not found"), dvs_id)
        return
    supported_policies = [
        'ETHER_CHANNEL', 'LOADBALANCE_LOADBASED', 'LOADBALANCE_SRCID',
        'LOADBALANCE_SRCMAC', 'FAILOVER_ORDER', 'LACP_ACTIVE', 'LACP_PASSIVE',
        'LACP_V2'
    ]
    policy = properties.get('teamingpolicy')
    if policy in supported_policies:
        if switch['teamingPolicy'] == policy:
            LOG.info(_LI("Policy already set!"))
            return
        LOG.info(
            _LI("Updating NSXv switch %(dvs)s teaming policy to "
                "%(policy)s"), {
                    'dvs': dvs_id,
                    'policy': policy
                })
        switch['teamingPolicy'] = policy
        try:
            switch = nsxv.update_vdn_switch(switch)
        except exceptions.VcnsApiException as e:
            desc = jsonutils.loads(e.response)
            details = desc.get('details')
            if details.startswith("No enum constant"):
                LOG.error(_LE("Unknown teaming policy %s"), policy)
            else:
                LOG.error(_LE("Unexpected error occurred: %s"), details)
            return

        LOG.info(_LI("Switch value after update: %s"), switch)
    else:
        LOG.info(_LI("Current switch value is: %s"), switch)
        LOG.error(
            _LE("Invalid teaming policy. "
                "Add --property teamingpolicy=<policy>"))
        LOG.error(_LE("Possible values: %s"), ', '.join(supported_policies))
Exemplo n.º 20
0
    def __init__(self, plugin):
        # Mapping from type name to DriverManager
        self.drivers = {}

        LOG.info(_LI("Configured router type driver names: %s"),
                 ROUTER_TYPE_DRIVERS)
        super(RouterTypeManager, self).__init__(
            'vmware_nsx.neutron.nsxv.router_type_drivers',
            ROUTER_TYPE_DRIVERS,
            invoke_on_load=True,
            invoke_args=(plugin,))
        LOG.info(_LI("Loaded type driver names: %s"), self.names())
        self._register_types()
        self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types)
Exemplo n.º 21
0
def list_missing_dhcp_bindings(resource, event, trigger, **kwargs):
    """List missing DHCP bindings from NSXv backend.

    Missing DHCP bindings are those that exist in Neutron DB;
    but are not present on corresponding NSXv Edge.
    """

    for (edge_id, __) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge(
            neutron_db.context.session):
        LOG.info(_LI("%s"), "=" * 60)
        LOG.info(_LI("For edge: %s"), edge_id)
        nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id)
        neutron_dhcp_static_bindings = \
            neutron_get_static_bindings_by_edge(edge_id)
        LOG.info(_LI("# of DHCP bindings in Neutron DB: %s"),
                 len(neutron_dhcp_static_bindings))
        LOG.info(_LI("# of DHCP bindings on NSXv backend: %s"),
                 len(nsx_dhcp_static_bindings))
        missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings
        if not missing:
            LOG.info(_LI("No missing DHCP bindings found."))
            LOG.info(_LI("Neutron DB and NSXv backend are in sync"))
        else:
            LOG.info(_LI("Missing DHCP bindings:"))
            LOG.info(_LI("%s"), pprint.pformat(missing))
Exemplo n.º 22
0
    def __init__(self, plugin):
        # Mapping from type name to DriverManager
        self.drivers = {}

        LOG.info(_LI("Configured router type driver names: %s"),
                 ROUTER_TYPE_DRIVERS)
        super(RouterTypeManager,
              self).__init__('vmware_nsx.neutron.nsxv.router_type_drivers',
                             ROUTER_TYPE_DRIVERS,
                             invoke_on_load=True,
                             invoke_args=(plugin, ))
        LOG.info(_LI("Loaded type driver names: %s"), self.names())
        self._register_types()
        self._check_tenant_router_types(cfg.CONF.nsxv.tenant_router_types)
Exemplo n.º 23
0
def list_missing_dhcp_bindings(resource, event, trigger, **kwargs):
    """List missing DHCP bindings from NSXv backend.

    Missing DHCP bindings are those that exist in Neutron DB;
    but are not present on corresponding NSXv Edge.
    """

    for (edge_id, __) in nsxv_db.get_nsxv_dhcp_bindings_count_per_edge(
            neutron_db.context.session):
        LOG.info(_LI("%s"), "=" * 60)
        LOG.info(_LI("For edge: %s"), edge_id)
        nsx_dhcp_static_bindings = nsx_get_static_bindings_by_edge(edge_id)
        neutron_dhcp_static_bindings = \
            neutron_get_static_bindings_by_edge(edge_id)
        LOG.info(_LI("# of DHCP bindings in Neutron DB: %s"),
                 len(neutron_dhcp_static_bindings))
        LOG.info(_LI("# of DHCP bindings on NSXv backend: %s"),
                 len(nsx_dhcp_static_bindings))
        missing = neutron_dhcp_static_bindings - nsx_dhcp_static_bindings
        if not missing:
            LOG.info(_LI("No missing DHCP bindings found."))
            LOG.info(_LI("Neutron DB and NSXv backend are in sync"))
        else:
            LOG.info(_LI("Missing DHCP bindings:"))
            LOG.info(_LI("%s"), pprint.pformat(missing))
Exemplo n.º 24
0
def handle_port_dhcp_access(plugin, context, port, action):
    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
             {"action": action, "resource": port})
    if port["device_owner"] == const.DEVICE_OWNER_DHCP:
        network_id = port["network_id"]
        if action == "create_port":
            # at this point the port must have a subnet and a fixed ip
            subnet_id = port["fixed_ips"][0]['subnet_id']
            subnet = plugin.get_subnet(context, subnet_id)
            subnet_data = {
                "mac_address": port["mac_address"],
                "ip_address": subnet['cidr'],
                "subnet_id": subnet['id']
            }
            try:
                plugin.lsn_manager.lsn_port_dhcp_setup(
                    context, network_id, port['id'], subnet_data, subnet)
            except p_exc.PortConfigurationError:
                LOG.error(_LE("Error while configuring DHCP for "
                              "port %s"), port['id'])
                raise n_exc.NeutronException()
        elif action == "delete_port":
            plugin.lsn_manager.lsn_port_dispose(context, network_id,
                                                port['mac_address'])
    elif port["device_owner"] != const.DEVICE_OWNER_DHCP:
        if port.get("fixed_ips"):
            # do something only if there are IP's and dhcp is enabled
            subnet_id = port["fixed_ips"][0]['subnet_id']
            if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
                LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
                             "to do"), subnet_id)
                return
            host_data = {
                "mac_address": port["mac_address"],
                "ip_address": port["fixed_ips"][0]['ip_address']
            }
            network_id = port["network_id"]
            if action == "create_port":
                handler = plugin.lsn_manager.lsn_port_dhcp_host_add
            elif action == "delete_port":
                handler = plugin.lsn_manager.lsn_port_dhcp_host_remove
            try:
                handler(context, network_id, subnet_id, host_data)
            except p_exc.PortConfigurationError:
                with excutils.save_and_reraise_exception():
                    if action == 'create_port':
                        db_base_plugin_v2.NeutronDbPluginV2.delete_port(
                            plugin, context, port['id'])
    LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
Exemplo n.º 25
0
    def _handle_request(self):
        '''First level request handling.'''
        attempt = 0
        timeout = 0
        response = None
        while response is None and attempt <= self._retries:
            eventlet.greenthread.sleep(timeout)
            attempt += 1

            req = self._issue_request()
            # automatically raises any exceptions returned.
            if isinstance(req, httplib.HTTPResponse):
                timeout = 0
                if attempt <= self._retries:
                    if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
                        continue
                    elif req.status == httplib.SERVICE_UNAVAILABLE:
                        timeout = 0.5
                        continue
                    # else fall through to return the error code

                LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
                          ": %(status)s",
                          {'rid': self._rid(), 'method': self._method,
                           'url': self._url, 'status': req.status})
                self._request_error = None
                response = req
            else:
                LOG.info(_LI('[%(rid)d] Error while handling request: '
                             '%(req)s'),
                         {'rid': self._rid(), 'req': req})
                self._request_error = req
                response = None
        return response
Exemplo n.º 26
0
def nsx_update_edge(resource, event, trigger, **kwargs):
    """Update edge properties"""
    usage_msg = _LE("Need to specify edge-id parameter and "
                    "attribute to update. Add --property edge-id=<edge-id> "
                    "and --property highavailability=<True/False> or "
                    "--property size=<size> or --property appliances=True")
    if not kwargs.get('property'):
        LOG.error(usage_msg)
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    if not properties.get('edge-id'):
        LOG.error(
            _LE("Need to specify edge-id. "
                "Add --property edge-id=<edge-id>"))
        return
    LOG.info(_LI("Updating NSXv edge: %(edge)s with properties\n%(prop)s"), {
        'edge': properties.get('edge-id'),
        'prop': properties
    })
    if properties.get('highavailability'):
        change_edge_ha(properties['highavailability'].lower() == "true",
                       properties['edge-id'])
    elif properties.get('size'):
        change_edge_appliance_size(properties)
    elif (properties.get('appliances')
          and properties.get('appliances').lower() == "true"):
        change_edge_appliance(properties['edge-id'])
    else:
        # no attribute was specified
        LOG.error(usage_msg)
Exemplo n.º 27
0
def nsx_clean_backup_edge(resource, event, trigger, **kwargs):
    """Delete backup edge"""
    errmsg = ("Need to specify edge-id property. Add --property "
              "edge-id=<edge-id>")
    if not kwargs.get('property'):
        LOG.error(_LE("%s"), errmsg)
        return
    properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
    edge_id = properties.get('edge-id')
    if not edge_id:
        LOG.error(_LE("%s"), errmsg)
        return
    try:
        edge = nsxv.get_edge(edge_id)
    except exceptions.NeutronException as x:
        LOG.error(_LE("%s"), str(x))
    else:
        # edge[0] is response status code
        # edge[1] is response body
        backup_edges = [e['id'] for e in get_nsxv_backup_edges()]
        if (not edge[1]['name'].startswith('backup-')
            or edge[1]['id'] not in backup_edges):
            LOG.error(
                _LE('Edge: %s is not a backup edge; aborting delete'), edge_id)
            return

        confirm = admin_utils.query_yes_no(
            "Do you want to delete edge: %s" % edge_id, default="no")
        if not confirm:
            LOG.info(_LI("Backup edge deletion aborted by user"))
            return
        _delete_edge_from_nsx_and_neutron(edge_id, edge[1]['name'])
Exemplo n.º 28
0
    def acquire_connection(self, auto_login=True, headers=None, rid=-1):
        '''Check out an available HTTPConnection instance.

        Blocks until a connection is available.
        :auto_login: automatically logins before returning conn
        :headers: header to pass on to login attempt
        :param rid: request id passed in from request eventlet.
        :returns: An available HTTPConnection instance or None if no
                 api_providers are configured.
        '''
        if not self._api_providers:
            LOG.warning(_LW("[%d] no API providers currently available."), rid)
            return None
        if self._conn_pool.empty():
            LOG.debug("[%d] Waiting to acquire API client connection.", rid)
        priority, conn = self._conn_pool.get()
        now = time.time()
        if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
            LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
                         "seconds; reconnecting."),
                     {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                      'sec': now - conn.last_used})
            conn = self._create_connection(*self._conn_params(conn))

        conn.last_used = now
        conn.priority = priority  # stash current priority for release
        qsize = self._conn_pool.qsize()
        LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
                  "connection(s) available.",
                  {'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
                   'qsize': qsize})
        if auto_login and self.auth_cookie(conn) is None:
            self._wait_for_login(conn, headers)
        return conn
Exemplo n.º 29
0
    def __init__(self, **kwargs):
        self._required_attributes = REQUIRED_ATTRIBUTES[:]
        self._important_attributes = IMPORTANT_ATTRIBUTES[:]
        self._deprecated_attributes = {}
        self._sanity_check(kwargs)

        for opt, val in six.iteritems(self._deprecated_attributes):
            LOG.deprecated(
                _("Attribute '%s' has been deprecated or moved "
                  "to a new section. See new configuration file "
                  "for details."), opt)
            depr_func = getattr(self, '_process_%s' % opt, None)
            if depr_func:
                depr_func(val)

        # If everything went according to plan these two lists should be empty
        if self._required_attributes:
            raise exceptions.InvalidClusterConfiguration(
                invalid_attrs=self._required_attributes)
        if self._important_attributes:
            LOG.info(
                _LI("The following cluster attributes were "
                    "not specified: %s'"), self._important_attributes)
        # The API client will be explicitly created by users of this class
        self.api_client = None
Exemplo n.º 30
0
 def _retry_task(self, task):
     delay = 0.5
     max_retries = max(cfg.CONF.nsxv.retries, 1)
     args = task.userdata.get('args', [])
     kwargs = task.userdata.get('kwargs', {})
     retry_number = task.userdata['retry_number']
     retry_command = task.userdata['retry_command']
     try:
         retry_command(*args, **kwargs)
     except Exception as exc:
         LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s",
                   {'name': task.name,
                    'exc': exc,
                    'retry': retry_number})
         retry_number += 1
         if retry_number > max_retries:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to %s"), task.name)
         else:
             task.userdata['retry_number'] = retry_number
             # Sleep twice as long as the previous retry
             tts = (2 ** (retry_number - 1)) * delay
             time.sleep(min(tts, 60))
             return task_constants.TaskStatus.PENDING
     LOG.info(_LI("Task %(name)s completed."), {'name': task.name})
     return task_constants.TaskStatus.COMPLETED
Exemplo n.º 31
0
 def _fetch_nsx_data_chunk(self, sp):
     base_chunk_size = sp.chunk_size
     chunk_size = base_chunk_size + sp.extra_chunk_size
     LOG.info(_LI("Fetching up to %s resources "
                  "from NSX backend"), chunk_size)
     fetched = ls_count = lr_count = lp_count = 0
     lswitches = lrouters = lswitchports = []
     if sp.ls_cursor or sp.ls_cursor == 'start':
         (lswitches, sp.ls_cursor, ls_count) = self._fetch_data(
             self.LS_URI, sp.ls_cursor, chunk_size)
         fetched = len(lswitches)
     if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start':
         (lrouters, sp.lr_cursor, lr_count) = self._fetch_data(
             self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0))
     fetched += len(lrouters)
     if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start':
         (lswitchports, sp.lp_cursor, lp_count) = self._fetch_data(
             self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0))
     fetched += len(lswitchports)
     if sp.current_chunk == 0:
         # No cursors were provided. Then it must be possible to
         # calculate the total amount of data to fetch
         sp.total_size = ls_count + lr_count + lp_count
     LOG.debug("Total data size: %d", sp.total_size)
     sp.chunk_size = self._get_chunk_size(sp)
     # Calculate chunk size adjustment
     sp.extra_chunk_size = sp.chunk_size - base_chunk_size
     LOG.debug("Fetched %(num_lswitches)d logical switches, "
               "%(num_lswitchports)d logical switch ports,"
               "%(num_lrouters)d logical routers",
               {'num_lswitches': len(lswitches),
                'num_lswitchports': len(lswitchports),
                'num_lrouters': len(lrouters)})
     return (lswitches, lrouters, lswitchports)
Exemplo n.º 32
0
    def run(self):
        while True:
            try:
                if self._stopped:
                    # Gracefully terminate this thread if the _stopped
                    # attribute was set to true
                    LOG.info(_LI("Stopping TaskManager"))
                    break

                # get a task from queue, or timeout for periodic status check
                task = self._get_task()
                if task.resource_id in self._tasks:
                    # this resource already has some tasks under processing,
                    # append the task to same queue for ordered processing
                    self._enqueue(task)
                    continue

                try:
                    self._main_thread_exec_task = task
                    self._execute(task)
                finally:
                    self._main_thread_exec_task = None
                    if task.status is None:
                        # The thread is killed during _execute(). To guarantee
                        # the task been aborted correctly, put it to the queue.
                        self._enqueue(task)
                    elif task.status != constants.TaskStatus.PENDING:
                        self._result(task)
                    else:
                        self._enqueue(task)
            except Exception:
                LOG.exception(_LE("TaskManager terminating because "
                                  "of an exception"))
                break
Exemplo n.º 33
0
 def _retry_task(self, task):
     delay = 0.5
     max_retries = max(cfg.CONF.nsxv.retries, 1)
     args = task.userdata.get('args', [])
     kwargs = task.userdata.get('kwargs', {})
     retry_number = task.userdata['retry_number']
     retry_command = task.userdata['retry_command']
     try:
         retry_command(*args, **kwargs)
     except Exception as exc:
         LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s", {
             'name': task.name,
             'exc': exc,
             'retry': retry_number
         })
         retry_number += 1
         if retry_number > max_retries:
             with excutils.save_and_reraise_exception():
                 LOG.exception(_LE("Failed to %s"), task.name)
         else:
             task.userdata['retry_number'] = retry_number
             # Sleep twice as long as the previous retry
             tts = (2**(retry_number - 1)) * delay
             time.sleep(min(tts, 60))
             return task_constants.TaskStatus.PENDING
     LOG.info(_LI("Task %(name)s completed."), {'name': task.name})
     return task_constants.TaskStatus.COMPLETED
Exemplo n.º 34
0
def delete_old_dhcp_edge(context, old_edge_id, bindings):
    LOG.info(_LI("Deleting the old DHCP edge: %s"), old_edge_id)
    # using one of the router-ids in the bindings for the deleting
    dhcp_names = [binding['router_id'] for binding in bindings]
    dhcp_name = dhcp_names[0]
    with locking.LockManager.get_lock(old_edge_id):
        # Delete from NSXv backend
        # (using the first dhcp name as the "router name")
        # Note - If we will not delete the router, but free it - it will be
        # immediately used as the new one, So it is better to delete it.
        try:
            nsxv.delete_edge(old_edge_id)
        except Exception as e:
            LOG.warning(_LW("Failed to delete the old edge %(id)s: %(e)s"), {
                'id': old_edge_id,
                'e': e
            })
            # Continue the process anyway
            # The edge may have been already deleted at the backend

        try:
            # Remove bindings from Neutron DB
            nsxv_db.delete_nsxv_router_binding(context.session, dhcp_name)
            nsxv_db.clean_edge_vnic_binding(context.session, old_edge_id)
        except Exception as e:
            LOG.warning(
                _LW("Failed to delete the old edge %(id)s from the "
                    "DB : %(e)s"), {
                        'id': old_edge_id,
                        'e': e
                    })
Exemplo n.º 35
0
    def run(self):
        while True:
            try:
                if self._stopped:
                    # Gracefully terminate this thread if the _stopped
                    # attribute was set to true
                    LOG.info(_LI("Stopping TaskManager"))
                    break

                # get a task from queue, or timeout for periodic status check
                task = self._get_task()
                if task.resource_id in self._tasks:
                    # this resource already has some tasks under processing,
                    # append the task to same queue for ordered processing
                    self._enqueue(task)
                    continue

                try:
                    self._main_thread_exec_task = task
                    self._execute(task)
                finally:
                    self._main_thread_exec_task = None
                    if task.status is None:
                        # The thread is killed during _execute(). To guarantee
                        # the task been aborted correctly, put it to the queue.
                        self._enqueue(task)
                    elif task.status != constants.TaskStatus.PENDING:
                        self._result(task)
                    else:
                        self._enqueue(task)
            except Exception:
                LOG.exception(_LE("TaskManager terminating because "
                                  "of an exception"))
                break
Exemplo n.º 36
0
def recreate_vdr_dhcp_edge(context, plugin, edge_manager, old_edge_id,
                           vdr_router_id):
    """Handle the edge recreation of a VDR router DHCP.
    """
    # delete the old bindings
    nsxv_db.delete_vdr_dhcp_binding(context.session, vdr_router_id)

    # Add each interface port of this router to a new edge:
    intf_ports = plugin._get_port_by_device_id(context, vdr_router_id,
                                               l3_db.DEVICE_OWNER_ROUTER_INTF)
    for port in intf_ports:
        fixed_ips = port.get("fixed_ips", [])
        if len(fixed_ips) > 0:
            fixed_ip = fixed_ips[0]
            subnet_id = fixed_ip['subnet_id']
            subnet = plugin.get_subnet(context, subnet_id)
        do_metadata = False
        for fixed_ip in fixed_ips:
            if fixed_ip['ip_address'] == subnet['gateway_ip']:
                do_metadata = True

        if do_metadata:
            edge_manager.configure_dhcp_for_vdr_network(
                context, subnet['network_id'], vdr_router_id)

    new_binding = nsxv_db.get_vdr_dhcp_binding_by_vdr(context.session,
                                                      vdr_router_id)
    if new_binding:
        LOG.info(_LI("VDR router %(vdr_id)s was moved to edge %(edge_id)s"), {
            'vdr_id': vdr_router_id,
            'edge_id': new_binding['dhcp_edge_id']
        })
    else:
        LOG.error(_LE("VDR router %(vdr_id)s was not moved to a new edge"),
                  {'vdr_id': vdr_router_id})
Exemplo n.º 37
0
 def add_port_group(self, net_id, vlan_tag=None):
     """Add a new port group to the configured DVS."""
     pg_spec = self._get_port_group_spec(net_id, vlan_tag)
     task = self._session.invoke_api(self._session.vim,
                                     'CreateDVPortgroup_Task',
                                     self._dvs_moref,
                                     spec=pg_spec)
     try:
         # NOTE(garyk): cache the returned moref
         self._session.wait_for_task(task)
     except Exception:
         # NOTE(garyk): handle more specific exceptions
         with excutils.save_and_reraise_exception():
             LOG.exception(
                 _LE('Failed to create port group for '
                     '%(net_id)s with tag %(tag)s.'), {
                         'net_id': net_id,
                         'tag': vlan_tag
                     })
     LOG.info(
         _LI("%(net_id)s with tag %(vlan_tag)s created on %(dvs)s."), {
             'net_id': net_id,
             'vlan_tag': vlan_tag,
             'dvs': dvs_utils.dvs_name_get()
         })
Exemplo n.º 38
0
    def _ensure_default_l2_gateway(self):
        """
        Create a default logical L2 gateway.

        Create a logical L2 gateway in the neutron database if the
        default_bridge_cluster config parameter is set and if it is
        not previously created. If not set, return.
        """
        def_l2gw_name = cfg.CONF.nsx_v3.default_bridge_cluster
        # Return if no default_bridge_cluster set in config
        if not def_l2gw_name:
            LOG.info(_LI("NSX: Default bridge cluster not configured "
                         "in nsx.ini. No default L2 gateway created."))
            return
        admin_ctx = context.get_admin_context()

        def_l2gw_uuid = nsxlib.get_bridge_cluster_id_by_name_or_id(
            def_l2gw_name)

        # Optimistically create the default L2 gateway in neutron DB
        device = {'device_name': def_l2gw_uuid,
                  'interfaces': [{'name': 'default-bridge-cluster'}]}
        def_l2gw = {'name': 'default-l2gw',
                    'devices': [device]}
        l2gw_dict = {self.gateway_resource: def_l2gw}
        l2_gateway = self.create_l2_gateway(admin_ctx, l2gw_dict)
        # Verify that only one default L2 gateway is created
        def_l2gw_exists = False
        l2gateways = self._get_l2_gateways(admin_ctx)
        for l2gateway in l2gateways:
            # Since we ensure L2 gateway is created with only 1 device, we use
            # the first device in the list.
            if l2gateway['devices'][0]['device_name'] == def_l2gw_uuid:
                if def_l2gw_exists:
                    LOG.info(_LI("Default L2 gateway is already created."))
                    try:
                        # Try deleting this duplicate default L2 gateway
                        self.delete_l2_gateway(admin_ctx, l2gateway['id'])
                    except l2gw_exc.L2GatewayInUse:
                        # If the L2 gateway we are trying to delete is in
                        # use then we should delete the L2 gateway which
                        # we just created ensuring there is only one
                        # default L2 gateway in the database.
                        self.delete_l2_gateway(admin_ctx, l2_gateway['id'])
                else:
                    def_l2gw_exists = True
        return l2_gateway
Exemplo n.º 39
0
    def __init__(self):
        LOG.info(_LI("Loading VMware NSX-V Qos Service Plugin"))
        super(NsxVQosPlugin, self).__init__()

        if not cfg.CONF.nsxv.use_dvs_features:
            error = _("Cannot use the NSX-V QoS plugin without "
                      "enabling the dvs features")
            raise nsx_exc.NsxPluginException(err_msg=error)
Exemplo n.º 40
0
 def func_desc(*args, **kwargs):
     component = '[%s]' % func.__name__.split('_')[0].upper()
     op_desc = [n.capitalize() for n in func.__name__.split('_')[1:]]
     LOG.info(_LI('==== %(component)s %(operation)s ===='), {
         'component': component,
         'operation': ' '.join(op_desc)
     })
     return func(*args, **kwargs)
Exemplo n.º 41
0
def handle_network_dhcp_access(plugin, context, network, action):
    LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
             {"action": action, "resource": network})
    if action == 'create_network':
        network_id = network['id']
        if network.get(external_net.EXTERNAL):
            LOG.info(_LI("Network %s is external: no LSN to create"),
                     network_id)
            return
        plugin.lsn_manager.lsn_create(context, network_id)
    elif action == 'delete_network':
        # NOTE(armando-migliaccio): on delete_network, network
        # is just the network id
        network_id = network
        plugin.lsn_manager.lsn_delete_by_network(context, network_id)
    LOG.info(_LI("Logical Services Node for network "
                 "%s configured successfully"), network_id)
Exemplo n.º 42
0
def nsx_list_orphaned_edges(resource, event, trigger, **kwargs):
    """List orphaned Edges on NSXv.

    Orphaned edges are NSXv edges that exist on NSXv backend but
    don't have a corresponding binding in Neutron DB
    """
    LOG.info(_LI("NSXv edges present on NSXv backend but not present "
                 "in Neutron DB\n"))
    orphaned_edges = get_orphaned_edges()
    if not orphaned_edges:
        LOG.info(_LI("\nNo orphaned edges found."
                     "\nNeutron DB and NSXv backend are in sync\n"))
    else:
        LOG.info(constants.ORPHANED_EDGES)
        data = [('edge_id',)]
        for edge in orphaned_edges:
            data.append((edge,))
        LOG.info(formatters.tabulate_results(data))
Exemplo n.º 43
0
def nsx_list_orphaned_edges(resource, event, trigger, **kwargs):
    """List orphaned Edges on NSXv.

    Orphaned edges are NSXv edges that exist on NSXv backend but
    don't have a corresponding binding in Neutron DB
    """
    LOG.info(_LI("NSXv edges present on NSXv backend but not present "
                 "in Neutron DB\n"))
    orphaned_edges = get_orphaned_edges()
    if not orphaned_edges:
        LOG.info(_LI("\nNo orphaned edges found."
                     "\nNeutron DB and NSXv backend are in sync\n"))
    else:
        LOG.info(constants.ORPHANED_EDGES)
        data = [('edge_id',)]
        for edge in orphaned_edges:
            data.append((edge,))
        LOG.info(formatters.tabulate_results(data))
 def test_apirequest_start(self):
     for i in range(10):
         a = request.EventletApiRequest(
             self.client, self.url)
         a._handle_request = mock.Mock()
         a.start()
         eventlet.greenthread.sleep(0.1)
         LOG.info(_LI('_handle_request called: %s'),
                  a._handle_request.called)
     request.EventletApiRequest.joinall()
Exemplo n.º 45
0
def handle_router_metadata_access(plugin, context, router_id, interface=None):
    LOG.info(_LI("Handle metadata access via router: %(r)s and "
                 "interface %(i)s"), {'r': router_id, 'i': interface})
    if interface:
        try:
            plugin.get_port(context, interface['port_id'])
            is_enabled = True
        except n_exc.NotFound:
            is_enabled = False
        subnet_id = interface['subnet_id']
        try:
            plugin.lsn_manager.lsn_metadata_configure(
                context, subnet_id, is_enabled)
        except p_exc.NsxPluginException:
            with excutils.save_and_reraise_exception():
                if is_enabled:
                    l3_db.L3_NAT_db_mixin.remove_router_interface(
                        plugin, context, router_id, interface)
    LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
Exemplo n.º 46
0
def handle_port_metadata_access(plugin, context, port, is_delete=False):
    # For instances supporting DHCP option 121 and created in a
    # DHCP-enabled but isolated network. This method is useful
    # only when no network namespace support.
    plugin_cfg = getattr(cfg.CONF, plugin.cfg_group)
    if (plugin_cfg.metadata_mode == config.MetadataModes.INDIRECT and
        port.get('device_owner') == const.DEVICE_OWNER_DHCP):
        if not port.get('fixed_ips'):
            # If port does not have an IP, the associated subnet is in
            # deleting state.
            LOG.info(_LI('Port %s has no IP due to subnet in deleting state'),
                     port['id'])
            return
        fixed_ip = port['fixed_ips'][0]
        query = context.session.query(models_v2.Subnet)
        subnet = query.filter(
            models_v2.Subnet.id == fixed_ip['subnet_id']).one()
        # If subnet does not have a gateway, do not create metadata
        # route. This is done via the enable_isolated_metadata
        # option if desired.
        if not subnet.get('gateway_ip'):
            LOG.info(_LI('Subnet %s does not have a gateway, the '
                         'metadata route will not be created'),
                     subnet['id'])
            return
        metadata_routes = [r for r in subnet.routes
                           if r['destination'] == METADATA_DHCP_ROUTE]
        if metadata_routes:
            # We should have only a single metadata route at any time
            # because the route logic forbids two routes with the same
            # destination. Update next hop with the provided IP address
            if not is_delete:
                metadata_routes[0].nexthop = fixed_ip['ip_address']
            else:
                context.session.delete(metadata_routes[0])
        else:
            # add the metadata route
            route = models_v2.SubnetRoute(
                subnet_id=subnet.id,
                destination=METADATA_DHCP_ROUTE,
                nexthop=fixed_ip['ip_address'])
            context.session.add(route)
 def test_api_providers_non_none_api_providers(self):
     r = request.GetApiProvidersRequestEventlet(self.client)
     r.value = mock.Mock()
     r.value.body = """{
       "results": [
         { "roles": [
           { "role": "api_provider",
             "listen_addr": "pssl:1.1.1.1:1" }]}]}"""
     r.successful = mock.Mock(return_value=True)
     LOG.info(_LI('%s'), r.api_providers())
     self.assertIsNotNone(r.api_providers())
Exemplo n.º 48
0
 def _check_tenant_router_types(self, types):
     self.tenant_router_types = []
     for router_type in types:
         if router_type in self.drivers:
             self.tenant_router_types.append(router_type)
         else:
             msg = _("No type driver for tenant router_type: %s. "
                     "Service terminated!") % router_type
             LOG.error(msg)
             raise SystemExit(msg)
     LOG.info(_LI("Tenant router_types: %s"), self.tenant_router_types)
Exemplo n.º 49
0
def main(argv=sys.argv[1:]):
    _init_cfg()
    _init_resource_plugin()
    nsx_plugin_in_use = _get_plugin()
    LOG.info(_LI('NSX Plugin in use: %s'), nsx_plugin_in_use)

    _validate_resource_choice(cfg.CONF.resource, nsx_plugin_in_use)
    _validate_op_choice(cfg.CONF.operation, nsx_plugin_in_use)

    registry.notify(cfg.CONF.resource, cfg.CONF.operation, 'nsxadmin',
                    force=cfg.CONF.force, property=cfg.CONF.property)
Exemplo n.º 50
0
def get_port(cluster, network, port, relations=None):
    LOG.info(_LI("get_port() %(network)s %(port)s"),
             {'network': network, 'port': port})
    uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
    if relations:
        uri += "relations=%s" % relations
    try:
        return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
    except exception.NotFound as e:
        LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
        raise exception.PortNotFoundOnNetwork(
            port_id=port, net_id=network)
Exemplo n.º 51
0
 def stop(self):
     if self._thread is None:
         return
     self._stopped = True
     self._thread.kill()
     self._thread = None
     # Stop looping call and abort running tasks
     self._monitor.stop()
     if self._monitor_busy:
         self._monitor.wait()
     self._abort()
     LOG.info(_LI("TaskManager terminated"))
Exemplo n.º 52
0
def nsx_delete_orphaned_edges(resource, event, trigger, **kwargs):
    """Delete orphaned edges from NSXv backend"""
    orphaned_edges = get_orphaned_edges()
    LOG.info(_LI("Before delete; Orphaned Edges: %s"), orphaned_edges)

    if not kwargs['force']:
        if len(orphaned_edges):
            user_confirm = admin_utils.query_yes_no("Do you want to delete "
                                                    "orphaned edges",
                                                    default="no")
            if not user_confirm:
                LOG.info(_LI("NSXv Edge deletion aborted by user"))
                return

    nsxv = utils.get_nsxv_client()
    for edge in orphaned_edges:
        LOG.info(_LI("Deleting edge: %s"), edge)
        nsxv.delete_edge(edge)

    LOG.info(_LI("After delete; Orphaned Edges: \n%s"),
        pprint.pformat(get_orphaned_edges()))
Exemplo n.º 53
0
    def _run(self):
        '''Method executed within green thread.'''
        if self._request_timeout:
            # No timeout exception escapes the with block.
            with eventlet.timeout.Timeout(self._request_timeout, False):
                return self._handle_request()

            LOG.info(_LI('[%d] Request timeout.'), self._rid())
            self._request_error = Exception(_('Request timeout'))
            return None
        else:
            return self._handle_request()
Exemplo n.º 54
0
    def set_state(self, state):
        if self.state != state:
            LOG.info(_LI("Endpoint '%(ep)s' changing from state"
                         " '%(old)s' to '%(new)s'"),
                     {'ep': self.provider,
                      'old': self.state,
                      'new': state})
        old_state = self._state
        self._state = state

        self._last_updated = datetime.datetime.now()

        return old_state