示例#1
0
def install_ssh_keys(config, *hosts):
    """
    Generate and put public and private SSH keys to hosts that are listed in
    hosts to make cold migration work.
    :param config: CloudFerry config
    :param hosts: list of hosts where to install keys
    """
    ssh_user = config.cloud.ssh_user
    ssh_password = config.cloud.ssh_sudo_password
    home_path = cfglib.CONF.evacuation.nova_home_path
    nova_user = cfglib.CONF.evacuation.nova_user
    ssh_config = '\\n'.join(['UserKnownHostsFile /dev/null',
                             'StrictHostKeyChecking no'])
    ssh_path = '/'.join([home_path, '.ssh'])
    ssh_backup_base = '/'.join([home_path, '.ssh_backup'])

    key = RSA.generate(2048, os.urandom)
    public_key = key.exportKey('OpenSSH').replace('\n', '\\n')
    private_key = key.exportKey('PEM').replace('\n', '\\n')

    ssh_backups = {}
    for host in hosts:
        runner = remote_runner.RemoteRunner(host, ssh_user,
                                            password=ssh_password,
                                            sudo=True)
        ssh_backup_path = '/'.join([ssh_backup_base,
                                    os.urandom(8).encode('hex')])
        try:
            runner.run('test -e "{path}"', path=ssh_path)
            runner.run('mkdir -p {backup_base}', backup_base=ssh_backup_base)
            runner.run('mv "{path}" "{backup_path}"', path=ssh_path,
                       backup_path=ssh_backup_path)
            ssh_backups[host] = ssh_backup_path
        except remote_runner.RemoteExecutionError:
            LOG.debug("Dot SSH directory not found, skipping backup")

        runner.run('mkdir -p "{path}"', path=ssh_path)
        runner.run('echo -e "{key}" > "{path}"', key=public_key,
                   path='/'.join([ssh_path, 'authorized_keys']))
        runner.run('echo -e "{key}" > "{path}"', key=private_key,
                   path='/'.join([ssh_path, 'id_rsa']))
        runner.run('echo -e "{config}" > "{path}"', config=ssh_config,
                   path='/'.join([ssh_path, 'config']))
        runner.run('chmod 0600 "{path}"', path='/'.join([ssh_path, 'id_rsa']))
        runner.run('chown -R "{user}:{user}" "{path}"',
                   user=nova_user, path=ssh_path)
    try:
        yield
    finally:
        for host in hosts:
            runner = remote_runner.RemoteRunner(host, ssh_user,
                                                password=ssh_password,
                                                sudo=True)
            runner.run('rm -rf "{path}"', path=ssh_path)
            ssh_backup_path = ssh_backups.get(host)
            if ssh_backup_path is not None:
                runner.run_ignoring_errors(
                    'mv "{backup_path}" "{path}"',
                    backup_path=ssh_backup_path, path=ssh_path)
示例#2
0
def fix_post_cobalt_ephemeral_disk(config, instance):
    """
    Merge ephemeral disk chain if it was broken by cobalt migrate
    :param config: cloud configuration
    :param instance: VM instance (as returned by NovaClient)
    """

    host = getattr(instance, INSTANCE_HOST_ATTRIBUTE)
    instance_name = getattr(instance, INSTANCE_NAME_ATTRIBUTE)

    ssh_user = config.cloud.ssh_user
    ssh_password = config.cloud.ssh_sudo_password
    runner = remote_runner.RemoteRunner(host, ssh_user,
                                        password=ssh_password,
                                        sudo=True)
    blkinfo = runner.run('virsh domblklist {name}', name=instance_name)
    for line in blkinfo.splitlines():
        if instance.id not in line:
            continue
        tokens = line.split()
        if len(tokens) < 2:
            continue
        _, path = tokens[:2]
        if instance.id not in path:
            continue
        cobalt_base_path = path + '.base'
        info = runner.run('qemu-img info {input}', input=path)
        if cobalt_base_path not in info:
            continue
        merge_path = path + '.merge'
        runner.run('qemu-img convert -f qcow2 -O qcow2 {input} {output} &&'
                   'mv {output} {input}',
                   input=path, output=merge_path)
    def restore_image(self, image_id, host, filename):
        """
        Processing image file: copy from source to destination,
        create glance image

        :param image_id: image ID from source
        :param image_host: host of image from source
        :param diff: diff file of root disk for instance
        :return: new image if image is created
        """
        LOG.debug('Processing an image %s from host %s and filename %s',
                  image_id, host, filename)

        image_file_info = self.src_cloud.qemu_img.get_info(filename, host)
        image_resource = self.dst_cloud.resources[utils.IMAGE_RESOURCE]
        runner = remote_runner.RemoteRunner(host, self.cfg.src.ssh_user,
                                            self.cfg.src.ssh_sudo_password,
                                            True)
        file_size = files.remote_file_size(runner, filename)
        with files.RemoteStdout(
                host,
                self.cfg.src.ssh_user,
                'dd if={filename}',
                filename=image_file_info.backing_filename) as f:
            fp = file_proxy.FileProxy(f.stdout,
                                      name='image %s' % image_id,
                                      size=file_size)
            new_image = image_resource.create_image(
                id=image_id,
                name='restored image %s from host %s' % (image_id, host),
                container_format='bare',
                disk_format=image_file_info.format,
                is_public=True,
                data=fp)
            return new_image
    def _ssh_connectivity_between_controllers(self):
        src_host = self.cfg.src.ssh_host
        src_user = self.cfg.src.ssh_user
        dst_host = self.cfg.dst.ssh_host
        dst_user = self.cfg.dst.ssh_user

        LOG.info("Checking ssh connectivity between '%s' and '%s'", src_host,
                 dst_host)

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        cmd = "ssh {opts} {user}@{host} 'echo ok'".format(opts=ssh_opts,
                                                          user=dst_user,
                                                          host=dst_host)

        try:
            rr.run(cmd)
        except remote_runner.RemoteExecutionError:
            msg = ("No ssh connectivity between source host '{src_host}' and "
                   "destination host '{dst_host}'. Make sure you have keys "
                   "and correct configuration on these nodes. To verify run "
                   "'{ssh_cmd}' from '{src_host}' node")
            msg = msg.format(src_host=src_host, dst_host=dst_host, ssh_cmd=cmd)
            LOG.error(msg)
            raise exception.AbortMigrationError(msg)
示例#5
0
    def copy(self, context, source_object, destination_object):
        src_user = context.cfg.src.ssh_user
        dst_user = context.cfg.dst.ssh_user

        src_host = source_object.host
        dst_host = destination_object.host

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        try:
            progress_view = ""
            if files.is_installed(rr, "pv"):
                src_file_size = files.remote_file_size(rr, source_object.path)
                progress_view = "pv --size {size} --progress | ".format(
                    size=src_file_size)

            copy = ("dd if={src_file} | {progress_view} "
                    "ssh {ssh_opts} {dst_user}@{dst_host} "
                    "'dd of={dst_device}'")
            rr.run(
                copy.format(src_file=source_object.path,
                            dst_user=dst_user,
                            dst_host=dst_host,
                            ssh_opts=ssh_opts,
                            dst_device=destination_object.path,
                            progress_view=progress_view))
        except remote_runner.RemoteExecutionError as e:
            msg = "Cannot copy {src_object} to {dst_object}: {error}"
            msg = msg.format(src_object=source_object,
                             dst_object=destination_object,
                             error=e.message)
            raise CopyFailed(msg)
示例#6
0
    def get_volume_object(self, context, volume_id):
        """:raises: VolumeObjectNotFoundError in case object is not found"""
        controller = context.cloud_config.cloud.ssh_host
        user = context.cloud_config.cloud.ssh_user
        paths = context.cloud_config.storage.nfs_mount_point_bases
        volume_template = context.cloud_config.storage.volume_name_template

        volume_pattern = generate_volume_pattern(volume_template, volume_id)

        rr = remote_runner.RemoteRunner(controller, user, ignore_errors=True)

        for mount_point in paths:
            # errors are ignored to avoid "Filesystem loop detected" messages
            # which don't matter anyways
            find = "find {mount_point} -name '{volume_pattern}' 2>/dev/null"
            res = rr.run(find.format(mount_point=mount_point,
                                     volume_pattern=volume_pattern))

            if res:
                # there should only be one file matching
                path = res.stdout.splitlines().pop()
                return copy_mechanisms.CopyObject(host=controller, path=path)

        msg = ("Volume object for volume '{volume_id}' not found. Either "
               "volume exists in DB, but is not present on storage, or "
               "'nfs_mount_point_bases' is set incorrectly in config")
        raise base.VolumeObjectNotFoundError(msg.format(volume_id=volume_id))
 def __init__(self, init, cloud=None):
     super(ReCreateBootImage, self).__init__(init, cloud)
     self.src_user = self.cfg.src.ssh_user
     src_password = self.cfg.src.ssh_sudo_password
     self.src_host = self.cfg.src.ssh_host
     self.dst_user = self.cfg.dst.ssh_user
     dst_password = self.cfg.dst.ssh_sudo_password
     self.dst_host = self.cfg.dst.ssh_host
     self.src_runner = remote_runner.RemoteRunner(self.src_host,
                                                  self.src_user,
                                                  password=src_password,
                                                  sudo=True)
     self.dst_runner = remote_runner.RemoteRunner(self.dst_host,
                                                  self.dst_user,
                                                  password=dst_password,
                                                  sudo=True)
示例#8
0
    def runner(self, host, position, gateway=None):
        """
        Alias for creating a RemoteRunner

        :param host: Host
        :param position: 'src' or 'dst' cloud
        :param gateway: Gateway for a runner
        :return: RemoteRunner
        """
        key = (host, position)
        runner = self._runners_cache.get(key)
        if runner is None:
            if position == 'src':
                user = CONF.src.ssh_user
                password = CONF.src.ssh_sudo_password
            else:
                user = CONF.dst.ssh_user
                password = CONF.dst.ssh_sudo_password
            runner = remote_runner.RemoteRunner(
                host,
                user,
                password=password,
                sudo=True,
                gateway=gateway)
            self._runners_cache[key] = runner
        return runner
示例#9
0
    def run(self, info=None, **kwargs):
        faulty_hosts = []

        for cloud in [self.src_cloud, self.dst_cloud]:
            nova_compute = cloud.resources[utils.COMPUTE_RESOURCE]
            hosts = nova_compute.get_compute_hosts()

            for host in hosts:
                runner = remote_runner.RemoteRunner(
                    host, cloud.cloud_config.cloud.ssh_user)

                qemu_version_cmd = ("kvm -version | "
                                    "sed -E 's/QEMU emulator version "
                                    "([0-9]\\.[0-9]\\.?[0-9]?).*/\\1/'")

                version = runner.run(qemu_version_cmd)

                if self.REQUIRED_QEMU_VERSION not in version:
                    faulty_hosts.append(host)

        if faulty_hosts:
            msg = ("qemu must be upgraded to v{required} on following hosts: "
                   "{hosts}").format(required=self.REQUIRED_QEMU_VERSION,
                                     hosts=faulty_hosts)
            LOG.error(msg)
            raise RuntimeError(msg)
示例#10
0
    def test_remote_runner_raises_error_if_errors_are_not_ignored(self):
        rr = remote_runner.RemoteRunner('host',
                                        'user',
                                        'password',
                                        ignore_errors=False)

        self.assertRaises(remote_runner.RemoteExecutionError, rr.run,
                          "non existing failing command")
示例#11
0
    def test_root_user_does_not_sudo(self, _, sudo, run):
        rr = remote_runner.RemoteRunner('host',
                                        'root',
                                        key='key',
                                        sudo=True,
                                        ignore_errors=False)
        rr.run('cmd')

        assert not sudo.called
        assert run.called
示例#12
0
 def __init__(self, config, cloud):
     self.config = config
     self.host = config.cloud.host
     self.cloud = cloud
     self.identity_client = cloud.resources['identity']
     self.filter_tenant_id = None
     self.filter_image = []
     # get mysql settings
     self.mysql_connector = cloud.mysql_connector('glance')
     self.runner = remote_runner.RemoteRunner(self.host,
                                              self.config.cloud.ssh_user)
     super(GlanceImage, self).__init__(config)
示例#13
0
 def execute(self, cmd, internal_host=None, host_exec=None,
             ignore_errors=False, sudo=False):
     host = host_exec if host_exec else self.host
     runner = \
         remote_runner.RemoteRunner(host,
                                    self.cloud.ssh_user,
                                    password=self.cloud.ssh_sudo_password,
                                    sudo=sudo,
                                    ignore_errors=ignore_errors)
     if internal_host:
         return self.execute_on_inthost(runner, str(cmd), internal_host)
     else:
         return runner.run(str(cmd))
示例#14
0
    def test_errors_are_suppressed_for_run_ignoring_errors(self, *_):
        rr = remote_runner.RemoteRunner('host',
                                        'user',
                                        'password',
                                        sudo=True,
                                        ignore_errors=False)

        try:
            rr.run_ignoring_errors("failing command")

            self.assertFalse(rr.ignore_errors)
        except remote_runner.RemoteExecutionError as e:
            self.fail("run_ignoring_errors must not raise exceptions: %s" % e)
示例#15
0
def get_ips(init_host, compute_host, ssh_user):
    runner = remote_runner.RemoteRunner(host=compute_host,
                                        user=ssh_user,
                                        gateway=init_host)
    cmd = ("ifconfig | awk -F \"[: ]+\" \'/inet addr:/ "
           "{ if ($4 != \"127.0.0.1\") print $4 }\'")
    out = runner.run(cmd)
    list_ips = []
    for info in out.split():
        try:
            ipaddr.IPAddress(info)
        except ValueError:
            continue
        list_ips.append(info)
    return list_ips
示例#16
0
def has_ssh_connectivity(connection_user, user, key, src_host, dst_host):
    """:returns: True if `user@src_host` can ssh into `dst_host` with `key`"""

    rr = remote_runner.RemoteRunner(src_host,
                                    connection_user,
                                    timeout=5)

    try:
        ssh = ("ssh -i {key} "
               "-o StrictHostKeyChecking=no "
               "-o UserKnownHostsFile=/dev/null "
               "{user}@{dst_host} 'echo'")
        rr.run(ssh.format(key=key, user=user, dst_host=dst_host))
        return True
    except remote_runner.RemoteExecutionError:
        return False
示例#17
0
 def check_access(self, node):
     cfg = self.cloud.cloud_config.cloud
     gateway = cfg.ssh_host
     runner = remote_runner.RemoteRunner(node,
                                         cfg.ssh_user,
                                         password=cfg.ssh_sudo_password,
                                         timeout=60,
                                         gateway=gateway)
     try:
         with api.settings(abort_on_prompts=True):
             runner.run('echo')
     except remote_runner.RemoteExecutionError as e:
         LOG.debug('Check access error: %s', e, exc_info=True)
         LOG.warning(
             "SSH connection from '%s' to '%s' failed with error: "
             "'%s'", gateway, node, e.message)
示例#18
0
    def check_access(self, node):
        ssh_access_failed = False

        cfg = self.cloud.cloud_config.cloud
        runner = remote_runner.RemoteRunner(node, cfg.ssh_user,
                                            password=cfg.ssh_sudo_password)
        gateway = self.cloud.getIpSsh()
        ssh_attempts = self.cloud.cloud_config.migrate.ssh_connection_attempts

        try:
            with settings(gateway=gateway, connection_attempts=ssh_attempts):
                runner.run('echo')
        except Exception as error:
            LOG.error("SSH connection from '%s' to '%s' failed with error: "
                      "'%s'", gateway, node, error.message)
            ssh_access_failed = True

        return ssh_access_failed
示例#19
0
def file_transfer_engine(config, host, user, password):
    """Factory which either returns RSYNC copier if `rsync` is available on
    destination compute, or `scp` otherwise"""
    copier = RsyncCopier
    if config.migrate.ephemeral_copy_backend == 'rsync':
        try:
            src_runner = remote_runner.RemoteRunner(host,
                                                    user,
                                                    password=password,
                                                    sudo=True)
            LOG.debug("Checking if rsync is installed")
            src_runner.run("rsync --help &>/dev/null")
            LOG.debug("Using rsync copy")
        except remote_runner.RemoteExecutionError:
            LOG.debug("rsync is not available, using scp copy")
            copier = ScpCopier
    elif config.migrate.ephemeral_copy_backend == 'scp':
        copier = ScpCopier
    return copier
示例#20
0
def remove_bbcp(cloud):
    """
    Remove bbcp from the hosts were memorized in the hosts_with_bbcp variable.

    :param cloud: object of a cloud
    """
    if cloud.position == 'src':
        user = CONF.src.ssh_user
        sudo_password = CONF.src.ssh_sudo_password
    else:
        user = CONF.dst.ssh_user
        sudo_password = CONF.dst.ssh_sudo_password

    LOG.info("Removing the bbcp files from hosts of '%s' cloud: %s",
             cloud.position, cloud.hosts_with_bbcp)
    cmd = 'rm -f {path}'
    for host in cloud.hosts_with_bbcp:
        runner = remote_runner.RemoteRunner(host, user, password=sudo_password,
                                            sudo=True)
        runner.run_ignoring_errors(cmd, path=BBCP_PATH)
示例#21
0
    def transfer(self, data):
        src_host = data['host_src']
        src_path = data['path_src']
        dst_host = data['host_dst']
        dst_path = data['path_dst']

        src_user = self.cfg.src.ssh_user
        dst_user = self.cfg.dst.ssh_user
        src_password = self.cfg.src.ssh_sudo_password

        src_runner = remote_runner.RemoteRunner(src_host,
                                                src_user,
                                                password=src_password,
                                                sudo=True)

        ssh_cipher = ssh_util.get_cipher_option()
        ssh_opts = ["UserKnownHostsFile=/dev/null", "StrictHostKeyChecking=no"]

        rsync = ("rsync "
                 "--partial "
                 "--inplace "
                 "--perms "
                 "--times "
                 "--compress "
                 "--verbose "
                 "--progress "
                 "--rsh='ssh {ssh_opts} {ssh_cipher}' "
                 "{source_file} "
                 "{dst_user}@{dst_host}:{dst_path}").format(
            ssh_cipher=ssh_cipher,
            ssh_opts=" ".join(["-o {}".format(opt) for opt in ssh_opts]),
            source_file=src_path,
            dst_user=dst_user,
            dst_host=dst_host,
            dst_path=dst_path)

        src_runner.run_repeat_on_errors(rsync)
def _remote_runner(cloud):
    return remote_runner.RemoteRunner(cloud[CFG].get(HOST),
                                      cloud[CFG].ssh_user,
                                      cloud[CFG].ssh_sudo_password,
                                      sudo=True,
                                      gateway=cloud[CFG].get(SSH_HOST))
示例#23
0
    def run(self, **kwargs):
        cfg = self.cloud.cloud_config.cloud
        runner = remote_runner.RemoteRunner(cfg.ssh_host, cfg.ssh_user)

        temp_dir_name = os.popen('mktemp -dt check_band_XXXX').read().rstrip()
        temp_file_name = str(uuid.uuid4())

        claimed_bandw = self.cloud.cloud_config.initial_check.claimed_bandwidth
        test_file_size = self.cloud.cloud_config.initial_check.test_file_size

        ssh_user = self.cloud.cloud_config.cloud.ssh_user

        factor = self.cloud.cloud_config.initial_check.factor
        req_bandwidth = claimed_bandw * factor

        local_file_path = os.path.join(temp_dir_name, temp_file_name)
        remote_file_path = os.path.join(temp_dir_name, temp_file_name)

        scp_upload = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(), '',
                                     ssh_user, cfg.ssh_host, remote_file_path,
                                     temp_dir_name)

        scp_download = cmd_cfg.scp_cmd(ssh_util.get_cipher_option(),
                                       local_file_path, ssh_user, cfg.ssh_host,
                                       temp_dir_name, '')

        with files.RemoteDir(runner, temp_dir_name):
            try:
                with utils.forward_agent(env.key_filename):
                    dd_command = cmd_cfg.dd_full('/dev/zero', remote_file_path,
                                                 1, 0, test_file_size)
                    self.cloud.ssh_util.execute(dd_command)

                    LOG.info("Checking upload speed... Wait please.")
                    period_upload = utils.timer(subprocess.call,
                                                str(scp_upload),
                                                shell=True)

                    LOG.info("Checking download speed... Wait please.")
                    period_download = utils.timer(subprocess.call,
                                                  str(scp_download),
                                                  shell=True)
            finally:
                if len(temp_dir_name) > 1:
                    os.system("rm -rf {}".format(temp_dir_name))
                else:
                    raise RuntimeError('Wrong dirname %s, stopping' %
                                       temp_dir_name)

        # To have Megabits per second
        upload_speed = test_file_size / period_upload * 8
        download_speed = test_file_size / period_download * 8

        if upload_speed < req_bandwidth or download_speed < req_bandwidth:
            raise RuntimeError(
                'Bandwidth is not OK. '
                'Claimed bandwidth: %s Mb/s. '
                'Required speed: %s Mb/s. '
                'Actual upload speed: %.2f Mb/s. '
                'Actual download speed: %.2f Mb/s. '
                'Aborting migration...' %
                (claimed_bandw, req_bandwidth, upload_speed, download_speed))

        LOG.info(
            "Bandwith is OK. "
            "Required speed: %.2f Mb/s. "
            "Upload speed: %.2f Mb/s. "
            "Download speed: %.2f Mb/s", req_bandwidth, upload_speed,
            download_speed)
示例#24
0
    def transfer(self, data):
        src_host = data['host_src']
        src_path = data['path_src']
        dst_host = data['host_dst']
        dst_path = data['path_dst']

        src_user = self.cfg.src.ssh_user
        dst_user = self.cfg.dst.ssh_user
        block_size = self.cfg.migrate.ssh_chunk_size
        num_retries = self.cfg.migrate.retry
        src_password = self.cfg.src.ssh_sudo_password
        dst_password = self.cfg.dst.ssh_sudo_password

        src_runner = remote_runner.RemoteRunner(src_host,
                                                src_user,
                                                password=src_password,
                                                sudo=True)
        dst_runner = remote_runner.RemoteRunner(dst_host,
                                                dst_user,
                                                password=dst_password,
                                                sudo=True)

        file_size = files.remote_file_size_mb(src_runner, src_path)

        partial_files = []

        src_md5 = remote_md5_sum(src_runner, src_path)

        num_blocks = int(math.ceil(float(file_size) / block_size))

        src_temp_dir = os.path.join(os.path.basename(src_path), '.cf.copy')
        dst_temp_dir = os.path.join(os.path.basename(dst_path), '.cf.copy')

        with files.RemoteDir(src_runner, src_temp_dir) as src_temp, \
                files.RemoteDir(dst_runner, dst_temp_dir) as dst_temp:
            for i in xrange(num_blocks):
                part = os.path.basename(src_path) + '.part{i}'.format(i=i)
                part_path = os.path.join(src_temp.dirname, part)
                remote_split_file(src_runner, src_path, part_path, i,
                                  block_size)
                gzipped_path = remote_gzip(src_runner, part_path)
                gzipped_filename = os.path.basename(gzipped_path)
                dst_gzipped_path = os.path.join(dst_temp.dirname,
                                                gzipped_filename)

                verified_file_copy(src_runner, dst_runner, dst_user,
                                   gzipped_path, dst_gzipped_path, dst_host,
                                   num_retries)

                remote_unzip(dst_runner, dst_gzipped_path)
                partial_files.append(os.path.join(dst_temp.dirname, part))

            for i in xrange(num_blocks):
                remote_join_file(dst_runner, dst_path, partial_files[i], i,
                                 block_size)

        dst_md5 = remote_md5_sum(dst_runner, dst_path)

        if src_md5 != dst_md5:
            message = ("Error copying file from '{src_host}:{src_file}' "
                       "to '{dst_host}:{dst_file}'").format(
                src_file=src_path, src_host=src_host, dst_file=dst_path,
                dst_host=dst_host)
            LOG.error(message)
            remote_rm_file(dst_runner, dst_path)
            raise FileCopyFailure(message)
示例#25
0
    def run(self, info=None, **kwargs):
        new_id, instance = info[utils.INSTANCES_TYPE].items()[0]
        old_id = instance['old_id']

        dst_compute = self.dst_cloud.resources[utils.COMPUTE_RESOURCE]
        src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE]

        dst_compute.change_status('active', instance_id=new_id)

        dst_instance = dst_compute.get_instance(new_id)
        src_instance = src_compute.get_instance(old_id)

        # do not attempt to live migrate inactive instances
        if src_instance.status.lower() not in ['active', 'verify_resize']:
            LOG.debug(
                "Skipping live migration of VM '%s', because it's "
                "inactive", src_instance.name)
            return

        src_host = instance_host(src_instance)
        dst_host = instance_host(dst_instance)

        src_runner = remote_runner.RemoteRunner(src_host,
                                                cfglib.CONF.src.ssh_user)
        dst_runner = remote_runner.RemoteRunner(dst_host,
                                                cfglib.CONF.dst.ssh_user)

        src_libvirt = libvirt.Libvirt(src_runner)
        dst_libvirt = libvirt.Libvirt(dst_runner)

        src_virsh_name = instance_libvirt_name(src_instance)
        dst_virsh_name = instance_libvirt_name(dst_instance)

        src_vm_xml = src_libvirt.get_xml(src_virsh_name)
        dst_vm_xml = dst_libvirt.get_xml(dst_virsh_name)

        src_vm_xml.disk_file = dst_vm_xml.disk_file
        src_vm_xml.serial_file = dst_vm_xml.serial_file
        src_vm_xml.console_file = dst_vm_xml.console_file
        src_vm_xml.interfaces = dst_vm_xml.interfaces

        dst_backing_file = dst_libvirt.get_backing_file(new_id)
        src_backing_file = src_libvirt.get_backing_file(old_id)
        migration_backing_file = os.path.join(
            libvirt.nova_instances_path, '_base',
            'migration_disk_{}'.format(old_id))
        dst_compute.wait_for_status(new_id, dst_compute.get_status, 'active')

        with files.RemoteTempFile(src_runner,
                                  "migrate-{}".format(old_id),
                                  src_vm_xml.dump()) as migration_file,\
                files.RemoteSymlink(src_runner,
                                    src_backing_file,
                                    migration_backing_file),\
                files.RemoteSymlink(dst_runner,
                                    dst_backing_file,
                                    migration_backing_file),\
                ubuntu.StopNovaCompute(dst_runner),\
                libvirt.QemuBackingFileMover(src_libvirt.runner,
                                             migration_backing_file,
                                             old_id):

            destroyer = libvirt.DestNovaInstanceDestroyer(
                dst_libvirt, dst_compute, dst_virsh_name, dst_instance.id)
            try:
                destroyer.do()
                src_libvirt.live_migrate(src_virsh_name, dst_host,
                                         migration_file.filename)
            except remote_runner.RemoteExecutionError:
                destroyer.undo()
            finally:
                dst_libvirt.move_backing_file(dst_backing_file, new_id)