Exemple #1
0
import os
import shutil

import utils

import pluginlib_nova

pluginlib_nova.configure_logging('workarounds')


def _copy_vdis(sr_path, staging_path, vdi_uuids):
    seq_num = 0
    for vdi_uuid in vdi_uuids:
        src = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
        dst = os.path.join(staging_path, "%d.vhd" % seq_num)
        shutil.copyfile(src, dst)
        seq_num += 1


def safe_copy_vdis(session, sr_path, vdi_uuids, uuid_stack):
    staging_path = utils.make_staging_area(sr_path)
    try:
        _copy_vdis(sr_path, staging_path, vdi_uuids)
        return utils.import_vhds(sr_path, staging_path, uuid_stack)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == '__main__':
    utils.register_plugin_calls(safe_copy_vdis)
Exemple #2
0
    iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid)

    # Create staging area so we have a unique path but remove it since
    # shutil.copytree will recreate it
    staging_path = utils.make_staging_area(sr_path)
    utils.cleanup_staging_area(staging_path)

    try:
        _unbundle_iso(sr_path, iso_filename, staging_path)

        # Write Configs
        _write_file(
            os.path.join(staging_path, 'netcfg.ipxe'), NETCFG_IPXE % {
                "ip_address": ip_address,
                "netmask": netmask,
                "gateway": gateway,
                "dns": dns,
                "boot_menu_url": boot_menu_url
            })

        _write_file(os.path.join(staging_path, 'isolinux.cfg'), ISOLINUX_CFG)

        _create_iso(mkisofs_cmd, iso_filename, staging_path)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == "__main__":
    utils.register_plugin_calls(inject)
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features
"""Returns the version of the nova plugins"""

import utils

# MAJOR VERSION: Incompatible changes
# MINOR VERSION: Compatible changes, new plugins, etc

# NOTE(sfinucan): 2.0 will be equivalent to the last in the 1.x stream

# 1.0 - Initial version.
# 1.1 - New call to check GC status
# 1.2 - Added support for pci passthrough devices
# 1.3 - Add vhd2 functions for doing glance operations by url
# 1.4 - Add support of Glance v2 api
# 1.5 - Added function for network configuration on ovs bridge
# 1.6 - Add function for network configuration on Linux bridge
# 1.7 - Add Partition utilities plugin
# 1.8 - Add support for calling plug-ins with the .py suffix
PLUGIN_VERSION = "1.8"


def get_version(session):
    return PLUGIN_VERSION


if __name__ == '__main__':
    utils.register_plugin_calls(get_version)
Exemple #4
0
        rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", "-e", ssh_cmd,
                     staging_path, '%s@%s:%s' % (user, host, dest_path)]

    # NOTE(hillad): rsync's progress is carriage returned, requiring
    # universal_newlines for real-time output.

    rsync_proc = utils.make_subprocess(rsync_cmd, stdout=True, stderr=True,
                                       universal_newlines=True)
    while True:
        rsync_progress = rsync_proc.stdout.readline()
        if not rsync_progress:
            break
        logging.debug("[%s] %s" % (instance_uuid, rsync_progress))

    utils.finish_subprocess(rsync_proc, rsync_cmd)


def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num):
    """Rsyncs a VHD to an adjacent host."""
    staging_path = utils.make_staging_area(sr_path)
    try:
        utils.prepare_staging_area(sr_path, staging_path, [vdi_uuid],
                                   seq_num=seq_num)
        _rsync_vhds(instance_uuid, host, staging_path)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == '__main__':
    utils.register_plugin_calls(move_vhds_into_sr, transfer_vhd)
Exemple #5
0
        rsync_cmd = ["/usr/bin/rsync", "-av", "--progress", "-e", ssh_cmd,
                     staging_path, '%s@%s:%s' % (user, host, dest_path)]

    # NOTE(hillad): rsync's progress is carriage returned, requiring
    # universal_newlines for real-time output.

    rsync_proc = utils.make_subprocess(rsync_cmd, stdout=True, stderr=True,
                                       universal_newlines=True)
    while True:
        rsync_progress = rsync_proc.stdout.readline()
        if not rsync_progress:
            break
        logging.debug("[%s] %s" % (instance_uuid, rsync_progress))

    utils.finish_subprocess(rsync_proc, rsync_cmd)


def transfer_vhd(session, instance_uuid, host, vdi_uuid, sr_path, seq_num):
    """Rsyncs a VHD to an adjacent host."""
    staging_path = utils.make_staging_area(sr_path)
    try:
        utils.prepare_staging_area(
                sr_path, staging_path, [vdi_uuid], seq_num=seq_num)
        _rsync_vhds(instance_uuid, host, staging_path)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == '__main__':
    utils.register_plugin_calls(move_vhds_into_sr, transfer_vhd)
Exemple #6
0
    staging_path = utils.make_staging_area(sr_path)
    try:
        tarball_filename = os.path.basename(torrent_path).replace(
            '.torrent', '')
        tarball_path = os.path.join(staging_path, tarball_filename)

        # Download tarball into staging area
        _download(torrent_path, staging_path, torrent_listen_port_start,
                  torrent_listen_port_end, torrent_download_stall_cutoff)

        # Extract the tarball into the staging area
        _extract_tarball(tarball_path, staging_path)

        # Move the VHDs from the staging area into the storage repository
        vdi_list = utils.import_vhds(sr_path, staging_path, uuid_stack)

        # Seed image for others in the swarm
        _seed_if_needed(seed_cache_path, tarball_path, torrent_path,
                        torrent_seed_duration, torrent_seed_chance,
                        torrent_listen_port_start, torrent_listen_port_end,
                        torrent_max_seeder_processes_per_host)
    finally:
        utils.cleanup_staging_area(staging_path)

    return vdi_list


if __name__ == '__main__':
    utils.register_plugin_calls(download_vhd)
Exemple #7
0
        # TODO(mfedosin): remove this check when v1 is deprecated.
        if api_version == 1:
            _download_tarball_by_url_v1(sr_path, staging_path, image_id, endpoint, extra_headers)
        else:
            _download_tarball_by_url_v2(sr_path, staging_path, image_id, endpoint, extra_headers)

        # Move the VHDs from the staging area into the storage repository
        return utils.import_vhds(sr_path, staging_path, uuid_stack)
    finally:
        utils.cleanup_staging_area(staging_path)


def upload_vhd2(session, vdi_uuids, image_id, endpoint, sr_path, extra_headers, properties, api_version=1):
    """Bundle the VHDs comprising an image and then stream them into
    Glance.
    """
    staging_path = utils.make_staging_area(sr_path)
    try:
        utils.prepare_staging_area(sr_path, staging_path, vdi_uuids)
        # TODO(mfedosin): remove this check when v1 is deprecated.
        if api_version == 1:
            _upload_tarball_by_url_v1(staging_path, image_id, endpoint, extra_headers, properties)
        else:
            _upload_tarball_by_url_v2(staging_path, image_id, endpoint, extra_headers, properties)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == "__main__":
    utils.register_plugin_calls(download_vhd2, upload_vhd2)
Exemple #8
0
    try:
        return f.readlines()
    finally:
        f.close()


def _get_bandwitdth_from_proc():
    devs = [l.strip() for l in _read_proc_net()]
    # ignore headers
    devs = devs[2:]
    vif_pattern = re.compile("^vif(\d+)\.(\d+)")
    dlist = [d.split(':', 1) for d in devs if vif_pattern.match(d)]
    devmap = dict()
    for name, stats in dlist:
        slist = stats.split()
        dom, vifnum = name[3:].split('.', 1)
        dev = devmap.get(dom, {})
        # Note, we deliberately swap in and out, as instance traffic
        # shows up inverted due to going though the bridge. (mdragon)
        dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0]))
        devmap[dom] = dev
    return devmap


def fetch_all_bandwidth(session):
    return _get_bandwitdth_from_proc()


if __name__ == '__main__':
    utils.register_plugin_calls(fetch_all_bandwidth)
Exemple #9
0
    try:
        return f.readlines()
    finally:
        f.close()


def _get_bandwitdth_from_proc():
    devs = [l.strip() for l in _read_proc_net()]
    # ignore headers
    devs = devs[2:]
    vif_pattern = re.compile("^vif(\d+)\.(\d+)")
    dlist = [d.split(':', 1) for d in devs if vif_pattern.match(d)]
    devmap = dict()
    for name, stats in dlist:
        slist = stats.split()
        dom, vifnum = name[3:].split('.', 1)
        dev = devmap.get(dom, {})
        # Note, we deliberately swap in and out, as instance traffic
        # shows up inverted due to going though the bridge. (mdragon)
        dev[vifnum] = dict(bw_in=int(slist[8]), bw_out=int(slist[0]))
        devmap[dom] = dev
    return devmap


def fetch_all_bandwidth(session):
    return _get_bandwitdth_from_proc()


if __name__ == '__main__':
    utils.register_plugin_calls(fetch_all_bandwidth)
Exemple #10
0
    staging_path = utils.make_staging_area(sr_path)
    try:
        tarball_filename = os.path.basename(torrent_path).replace(
                '.torrent', '')
        tarball_path = os.path.join(staging_path, tarball_filename)

        # Download tarball into staging area
        _download(torrent_path, staging_path, torrent_listen_port_start,
                  torrent_listen_port_end, torrent_download_stall_cutoff)

        # Extract the tarball into the staging area
        _extract_tarball(tarball_path, staging_path)

        # Move the VHDs from the staging area into the storage repository
        vdi_list = utils.import_vhds(sr_path, staging_path, uuid_stack)

        # Seed image for others in the swarm
        _seed_if_needed(seed_cache_path, tarball_path, torrent_path,
                        torrent_seed_duration, torrent_seed_chance,
                        torrent_listen_port_start, torrent_listen_port_end,
                        torrent_max_seeder_processes_per_host)
    finally:
        utils.cleanup_staging_area(staging_path)

    return vdi_list


if __name__ == '__main__':
    utils.register_plugin_calls(download_vhd)
Exemple #11
0
        return utils.import_vhds(sr_path, staging_path, uuid_stack)
    finally:
        utils.cleanup_staging_area(staging_path)


def upload_vhd2(session,
                vdi_uuids,
                image_id,
                endpoint,
                sr_path,
                extra_headers,
                properties,
                api_version=1):
    """Bundle the VHDs comprising an image and then stream them into Glance"""
    staging_path = utils.make_staging_area(sr_path)
    try:
        utils.prepare_staging_area(sr_path, staging_path, vdi_uuids)
        # TODO(mfedosin): remove this check when v1 is deprecated.
        if api_version == 1:
            _upload_tarball_by_url_v1(staging_path, image_id, endpoint,
                                      extra_headers, properties)
        else:
            _upload_tarball_by_url_v2(staging_path, image_id, endpoint,
                                      extra_headers, properties)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == '__main__':
    utils.register_plugin_calls(download_vhd2, upload_vhd2)
Exemple #12
0
    elif fs == 'ext3':
        args = ['mkfs', '-t', fs]
        # add -F to force no interactive execute on non-block device.
        args.extend(['-F'])
        if label:
            args.extend(['-L', label])
    else:
        raise pluginlib.PluginError("Partition type %s not supported" % fs)
    args.append(path)
    utils.run_command(args)


def mkfs(session, dev, partnum, fs_type, fs_label):
    dev_path = utils.make_dev_path(dev)

    out = utils.run_command(['kpartx', '-avspp', dev_path])
    try:
        logging.info('kpartx output: %s' % out)
        mapperdir = os.path.join('/dev', 'mapper')
        dev_base = os.path.basename(dev)
        partition_path = os.path.join(mapperdir, "%sp%s" % (dev_base, partnum))
        _mkfs(fs_type, partition_path, fs_label)
    finally:
        # Always remove partitions otherwise we can't unplug the VBD
        utils.run_command(['kpartx', '-dvspp', dev_path])

if __name__ == "__main__":
    utils.register_plugin_calls(wait_for_dev,
                                make_partition,
                                mkfs)
Exemple #13
0
import shutil

import utils

import pluginlib_nova


pluginlib_nova.configure_logging('workarounds')


def _copy_vdis(sr_path, staging_path, vdi_uuids):
    seq_num = 0
    for vdi_uuid in vdi_uuids:
        src = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
        dst = os.path.join(staging_path, "%d.vhd" % seq_num)
        shutil.copyfile(src, dst)
        seq_num += 1


def safe_copy_vdis(session, sr_path, vdi_uuids, uuid_stack):
    staging_path = utils.make_staging_area(sr_path)
    try:
        _copy_vdis(sr_path, staging_path, vdi_uuids)
        return utils.import_vhds(sr_path, staging_path, uuid_stack)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == '__main__':
    utils.register_plugin_calls(safe_copy_vdis)
Exemple #14
0
        args = ['mkswap']
    elif fs == 'ext3':
        args = ['mkfs', '-t', fs]
        # add -F to force no interactive execute on non-block device.
        args.extend(['-F'])
        if label:
            args.extend(['-L', label])
    else:
        raise pluginlib.PluginError("Partition type %s not supported" % fs)
    args.append(path)
    utils.run_command(args)


def mkfs(session, dev, partnum, fs_type, fs_label):
    dev_path = utils.make_dev_path(dev)

    out = utils.run_command(['kpartx', '-avspp', dev_path])
    try:
        logging.info('kpartx output: %s' % out)
        mapperdir = os.path.join('/dev', 'mapper')
        dev_base = os.path.basename(dev)
        partition_path = os.path.join(mapperdir, "%sp%s" % (dev_base, partnum))
        _mkfs(fs_type, partition_path, fs_label)
    finally:
        # Always remove partitions otherwise we can't unplug the VBD
        utils.run_command(['kpartx', '-dvspp', dev_path])


if __name__ == "__main__":
    utils.register_plugin_calls(wait_for_dev, make_partition, mkfs)
Exemple #15
0
           gateway, dns, mkisofs_cmd):

    iso_filename = '%s.img' % os.path.join(sr_path, 'iso', vdi_uuid)

    # Create staging area so we have a unique path but remove it since
    # shutil.copytree will recreate it
    staging_path = utils.make_staging_area(sr_path)
    utils.cleanup_staging_area(staging_path)

    try:
        _unbundle_iso(sr_path, iso_filename, staging_path)

        # Write Configs
        _write_file(os.path.join(staging_path, 'netcfg.ipxe'),
                    NETCFG_IPXE % {"ip_address": ip_address,
                                   "netmask": netmask,
                                   "gateway": gateway,
                                   "dns": dns,
                                   "boot_menu_url": boot_menu_url})

        _write_file(os.path.join(staging_path, 'isolinux.cfg'),
                    ISOLINUX_CFG)

        _create_iso(mkisofs_cmd, iso_filename, staging_path)
    finally:
        utils.cleanup_staging_area(staging_path)


if __name__ == "__main__":
    utils.register_plugin_calls(inject)
Exemple #16
0
    output = _run_command(["ls", "/sys/bus/pci/devices/" + pci_device + "/"])

    if "physfn" in output:
        return "type-VF"
    if "virtfn" in output:
        return "type-PF"
    return "type-PCI"


if __name__ == "__main__":
    # Support both serialized and non-serialized plugin approaches
    _, methodname = xmlrpclib.loads(sys.argv[1])
    if methodname in ['query_gc', 'get_pci_device_details', 'get_pci_type',
                      'network_config']:
        utils.register_plugin_calls(query_gc,
                                    get_pci_device_details,
                                    get_pci_type,
                                    network_config)

    XenAPIPlugin.dispatch(
            {"host_data": host_data,
            "set_host_enabled": set_host_enabled,
            "host_shutdown": host_shutdown,
            "host_reboot": host_reboot,
            "host_start": host_start,
            "host_join": host_join,
            "get_config": get_config,
            "set_config": set_config,
            "iptables_config": iptables_config,
            "host_uptime": host_uptime})
# NOTE: XenServer still only supports Python 2.4 in it's dom0 userspace
# which means the Nova xenapi plugins must use only Python 2.4 features

"""Returns the version of the nova plugins"""

import utils

# MAJOR VERSION: Incompatible changes
# MINOR VERSION: Compatible changes, new plugins, etc

# NOTE(sfinucan): 2.0 will be equivalent to the last in the 1.x stream

# 1.0 - Initial version.
# 1.1 - New call to check GC status
# 1.2 - Added support for pci passthrough devices
# 1.3 - Add vhd2 functions for doing glance operations by url
# 1.4 - Add support of Glance v2 api
# 1.5 - Added function for network configuration on ovs bridge
# 1.6 - Add function for network configuration on Linux bridge
# 1.7 - Add Partition utilities plugin
# 1.8 - Add support for calling plug-ins with the .py suffix
PLUGIN_VERSION = "1.8"


def get_version(session):
    return PLUGIN_VERSION

if __name__ == '__main__':
    utils.register_plugin_calls(get_version)