示例#1
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        dvs_mng = dvs.DvsManager()
    except Exception as e:
        LOG.error(
            _LE("Cannot connect to the DVS: Please update the [dvs] "
                "section in the nsx.ini file: %s"), e)
        return

    # Go over all the compute ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    port_filters = {'device_owner': ['compute:None']}
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = dvs_mng.get_vm_moref_obj(device_id)
        vm_spec = dvs_mng.get_vm_spec(vm_moref)

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network'
                    and hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if net._type == 'DistributedVirtualPortgroup':
                        update_spec = True

        if not update_spec:
            LOG.info(_LI("No need to update the spec of vm %s"), device_id)
            continue

        # find the old interface by it's mac and delete it
        device = get_vm_network_device(dvs_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning(_LW("No device with MAC address %s exists on the VM"),
                        port['mac_address'])
            continue
        device_type = device.__class__.__name__

        LOG.info(_LI("Detaching old interface from VM %s"), device_id)
        dvs_mng.detach_vm_interface(vm_moref, device)

        # add the new interface as OpaqueNetwork
        LOG.info(_LI("Attaching new interface to VM %s"), device_id)
        nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
        dvs_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
                                    nsx_net_id, device_type)
示例#2
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        vm_mng = dvs.VMManager()
    except Exception as e:
        LOG.error(
            "Cannot connect to the DVS: Please update the [dvs] "
            "section in the nsx.ini file: %s", e)
        return

    port_filters = {}
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        project = properties.get('project-id')
        if project:
            port_filters['project_id'] = [project]

    # Go over all the ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        # skip non compute ports
        if (not port.get('device_owner').startswith(
                const.DEVICE_OWNER_COMPUTE_PREFIX)):
            continue
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = vm_mng.get_vm_moref_obj(device_id)
        vm_spec = vm_mng.get_vm_spec(vm_moref)
        if not vm_spec:
            LOG.error("Failed to get the spec of vm %s", device_id)
            continue

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network'
                    and hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if (net._type == 'DistributedVirtualPortgroup'
                            or net._type == 'Network'):
                        update_spec = True

        if not update_spec:
            LOG.info("No need to update the spec of vm %s", device_id)
            continue

        # find the old interface by it's mac and delete it
        device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning("No device with MAC address %s exists on the VM",
                        port['mac_address'])
            continue
        device_type = device.__class__.__name__

        LOG.info("Detaching old interface from VM %s", device_id)
        vm_mng.detach_vm_interface(vm_moref, device)

        # add the new interface as OpaqueNetwork
        LOG.info("Attaching new interface to VM %s", device_id)
        nsx_net_id = get_network_nsx_id(admin_cxt.session, port['network_id'])
        vm_mng.attach_vm_interface(vm_moref, port['id'], port['mac_address'],
                                   nsx_net_id, device_type)
示例#3
0
def list_missing_ports(resource, event, trigger, **kwargs):
    """List neutron ports that are missing the NSX backend port
    And ports with wrong switch profiles
    """
    admin_cxt = neutron_context.get_admin_context()
    filters = v3_utils.get_plugin_filters(admin_cxt)
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=filters)
        port_client, profile_client = get_port_and_profile_clients()

        # get pre-defined profile ids
        dhcp_profile_id = get_dhcp_profile_id(profile_client)
        dhcp_profile_key = resources.SwitchingProfileTypes.SWITCH_SECURITY
        spoofguard_profile_id = get_spoofguard_profile_id(profile_client)
        spoofguard_profile_key = resources.SwitchingProfileTypes.SPOOF_GUARD
        qos_profile_key = resources.SwitchingProfileTypes.QOS

        problems = []
        for port in neutron_ports:
            neutron_id = port['id']
            # get the network nsx id from the mapping table
            nsx_id = get_port_nsx_id(admin_cxt.session, neutron_id)
            if not nsx_id:
                # skip external ports
                pass
            else:
                try:
                    nsx_port = port_client.get(nsx_id)
                except nsx_exc.ResourceNotFound:
                    problems.append({
                        'neutron_id': neutron_id,
                        'nsx_id': nsx_id,
                        'error': 'Missing from backend'
                    })
                    continue

                # Port found on backend!
                # Check that it has all the expected switch profiles.
                # create a dictionary of the current profiles:
                profiles_dict = {}
                for prf in nsx_port['switching_profile_ids']:
                    profiles_dict[prf['key']] = prf['value']

                # DHCP port: neutron dhcp profile should be attached
                # to logical ports created for neutron DHCP but not
                # for native DHCP.
                if (port.get('device_owner') == const.DEVICE_OWNER_DHCP
                        and not cfg.CONF.nsx_v3.native_dhcp_metadata):
                    prf_id = profiles_dict[dhcp_profile_key]
                    if prf_id != dhcp_profile_id:
                        add_profile_mismatch(problems, neutron_id, nsx_id,
                                             prf_id, "DHCP security")

                # Port with QoS policy: a matching profile should be attached
                qos_policy_id = qos_utils.get_port_policy_id(
                    admin_cxt, neutron_id)
                if qos_policy_id:
                    qos_profile_id = nsx_db.get_switch_profile_by_qos_policy(
                        admin_cxt.session, qos_policy_id)
                    prf_id = profiles_dict[qos_profile_key]
                    if prf_id != qos_profile_id:
                        add_profile_mismatch(problems, neutron_id, nsx_id,
                                             prf_id, "QoS")

                # Port with security & fixed ips/address pairs:
                # neutron spoofguard profile should be attached
                port_sec, has_ip = plugin._determine_port_security_and_has_ip(
                    admin_cxt, port)
                addr_pair = port.get(addr_apidef.ADDRESS_PAIRS)
                if port_sec and (has_ip or addr_pair):
                    prf_id = profiles_dict[spoofguard_profile_key]
                    if prf_id != spoofguard_profile_id:
                        add_profile_mismatch(problems, neutron_id, nsx_id,
                                             prf_id, "Spoof Guard")

    if len(problems) > 0:
        title = ("Found internal ports misconfiguration on the "
                 "NSX manager:")
        LOG.info(
            formatters.output_formatter(title, problems,
                                        ['neutron_id', 'nsx_id', 'error']))
    else:
        LOG.info("All internal ports verified on the NSX manager")
示例#4
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        vm_mng = dvs.VMManager()
    except Exception as e:
        LOG.error("Cannot connect to the DVS: Please update the [dvs] "
                  "section in the nsx.ini file: %s", e)
        return

    port_filters = {}
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        project = properties.get('project-id')
        if project:
            port_filters['project_id'] = [project]
        net_name = properties.get('net-name', 'VM Network')
        LOG.info("Common network name for migration %s", net_name)
        host_moref = properties.get('host-moref')
        # TODO(garyk): We can explore the option of passing the cluster
        # moref then this will remove the need for the host-moref and the
        # resource pool moref.
        respool_moref = properties.get('respool-moref')
        datastore_moref = properties.get('datastore-moref')
        if not host_moref:
            LOG.error("Unable to migrate with no host")
            return

    # Go over all the ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        # skip non compute ports
        if (not port.get('device_owner').startswith(
            const.DEVICE_OWNER_COMPUTE_PREFIX)):
            continue
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = vm_mng.get_vm_moref_obj(device_id)
        vm_spec = vm_mng.get_vm_spec(vm_moref)
        if not vm_spec:
            LOG.error("Failed to get the spec of vm %s", device_id)
            continue

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network' and
                hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if (net._type == 'DistributedVirtualPortgroup' or
                        net._type == 'Network'):
                        update_spec = True

        if not update_spec:
            LOG.info("No need to update the spec of vm %s", device_id)
            continue

        device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning("No device with MAC address %s exists on the VM",
                        port['mac_address'])
            continue

        # Update interface to be common network
        devices = [vm_mng.update_vm_network(device, name=net_name)]
        LOG.info("Update instance %s to common network", device_id)
        vm_mng.update_vm_interface(vm_moref, devices=devices)
        LOG.info("Migrate instance %s to host %s", device_id, host_moref)
        vm_mng.relocate_vm(vm_moref, host_moref=host_moref,
                           datastore_moref=datastore_moref,
                           respool_moref=respool_moref)
        LOG.info("Update instance %s to opaque network", device_id)
        device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
        vif_info = {'nsx_id': get_network_nsx_id(admin_cxt.session,
                                                 port['network_id']),
                    'iface_id': port['id']}
        devices = [vm_mng.update_vm_opaque_spec(vif_info, device)]
        vm_mng.update_vm_interface(vm_moref, devices=devices)
        LOG.info("Instance %s successfully migrated!", device_id)
示例#5
0
def migrate_compute_ports_vms(resource, event, trigger, **kwargs):
    """Update the VMs ports on the backend after migrating nsx-v -> nsx-v3

    After using api_replay to migrate the neutron data from NSX-V to NSX-T
    we need to update the VM ports to use OpaqueNetwork instead of
    DistributedVirtualPortgroup
    """
    # Connect to the DVS manager, using the configuration parameters
    try:
        vm_mng = dvs.VMManager()
    except Exception as e:
        LOG.error(
            "Cannot connect to the DVS: Please update the [dvs] "
            "section in the nsx.ini file: %s", e)
        return

    port_filters = {}
    if kwargs.get('property'):
        properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
        project = properties.get('project-id')
        if project:
            port_filters['project_id'] = [project]
        net_name = properties.get('net-name', 'VM Network')
        LOG.info("Common network name for migration %s", net_name)
        host_moref = properties.get('host-moref')
        # TODO(garyk): We can explore the option of passing the cluster
        # moref then this will remove the need for the host-moref and the
        # resource pool moref.
        respool_moref = properties.get('respool-moref')
        datastore_moref = properties.get('datastore-moref')
        if not host_moref:
            LOG.error("Unable to migrate with no host")
            return

    # Go over all the ports from the plugin
    admin_cxt = neutron_context.get_admin_context()
    with PortsPlugin() as plugin:
        neutron_ports = plugin.get_ports(admin_cxt, filters=port_filters)

    for port in neutron_ports:
        # skip non compute ports
        if (not port.get('device_owner').startswith(
                const.DEVICE_OWNER_COMPUTE_PREFIX)):
            continue
        device_id = port.get('device_id')

        # get the vm moref & spec from the DVS
        vm_moref = vm_mng.get_vm_moref_obj(device_id)
        vm_spec = vm_mng.get_vm_spec(vm_moref)
        if not vm_spec:
            LOG.error("Failed to get the spec of vm %s", device_id)
            continue

        # Go over the VM interfaces and check if it should be updated
        update_spec = False
        for prop in vm_spec.propSet:
            if (prop.name == 'network'
                    and hasattr(prop.val, 'ManagedObjectReference')):
                for net in prop.val.ManagedObjectReference:
                    if (net._type == 'DistributedVirtualPortgroup'
                            or net._type == 'Network'):
                        update_spec = True

        if not update_spec:
            LOG.info("No need to update the spec of vm %s", device_id)
            continue

        device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
        if device is None:
            LOG.warning("No device with MAC address %s exists on the VM",
                        port['mac_address'])
            continue

        # Update interface to be common network
        devices = [vm_mng.update_vm_network(device, name=net_name)]
        LOG.info("Update instance %s to common network", device_id)
        vm_mng.update_vm_interface(vm_moref, devices=devices)
        LOG.info("Migrate instance %s to host %s", device_id, host_moref)
        vm_mng.relocate_vm(vm_moref,
                           host_moref=host_moref,
                           datastore_moref=datastore_moref,
                           respool_moref=respool_moref)
        LOG.info("Update instance %s to opaque network", device_id)
        device = get_vm_network_device(vm_mng, vm_moref, port['mac_address'])
        vif_info = {
            'nsx_id': get_network_nsx_id(admin_cxt.session,
                                         port['network_id']),
            'iface_id': port['id']
        }
        devices = [vm_mng.update_vm_opaque_spec(vif_info, device)]
        vm_mng.update_vm_interface(vm_moref, devices=devices)
        LOG.info("Instance %s successfully migrated!", device_id)