Exemplo n.º 1
0
def dump_db(cloud, database=ALL_DATABASES):
    cmd = ["mysqldump {database}", "--user={user}"]
    if cloud.cloud_config.mysql.db_password:
        cmd.append("--password={password}")

    db_host = get_db_host(cloud.cloud_config)
    if cloud.cloud_config.mysqldump.run_mysqldump_locally:
        cmd.append("--port={port}")
        cmd.append("--host={host}")
        run = local.run
    else:
        rr = remote_runner.RemoteRunner(
            db_host,
            cloud.cloud_config.cloud.ssh_user,
            password=cloud.cloud_config.cloud.ssh_sudo_password,
            mute_stdout=True)
        run = rr.run

    dump = run(' '.join(cmd).format(
        database=database,
        user=cloud.cloud_config.mysql.db_user,
        password=cloud.cloud_config.mysql.db_password,
        port=cloud.cloud_config.mysql.db_port,
        host=cloud.cloud_config.mysql.db_host))

    filename = cloud.cloud_config.mysqldump.db_dump_filename
    with open(
            filename.format(database=('all_databases' if database
                                      == ALL_DATABASES else database),
                            time=time.time(),
                            position=cloud.position), 'w') as f:
        f.write(dump)
    def _ssh_connectivity_between_controllers(self):
        src_host = self.cfg.src.ssh_host
        src_user = self.cfg.src.ssh_user
        dst_host = self.cfg.dst.ssh_host
        dst_user = self.cfg.dst.ssh_user

        LOG.info("Checking ssh connectivity between '%s' and '%s'", src_host,
                 dst_host)

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        cmd = "ssh {opts} {user}@{host} 'echo ok'".format(opts=ssh_opts,
                                                          user=dst_user,
                                                          host=dst_host)

        try:
            rr.run(cmd)
        except remote_runner.RemoteExecutionError:
            msg = ("No ssh connectivity between source host '{src_host}' and "
                   "destination host '{dst_host}'. Make sure you have keys "
                   "and correct configuration on these nodes. To verify run "
                   "'{ssh_cmd}' from '{src_host}' node")
            msg = msg.format(src_host=src_host, dst_host=dst_host, ssh_cmd=cmd)
            LOG.error(msg)
            raise exception.AbortMigrationError(msg)
Exemplo n.º 3
0
    def restore_image(self, image_id, host, filename):
        """
        Processing image file: copy from source to destination,
        create glance image

        :param image_id: image ID from source
        :param image_host: host of image from source
        :param diff: diff file of root disk for instance
        :return: new image if image is created
        """
        LOG.debug('Processing an image %s from host %s and filename %s',
                  image_id, host, filename)

        image_file_info = self.src_cloud.qemu_img.get_info(filename, host)
        image_resource = self.dst_cloud.resources[utils.IMAGE_RESOURCE]
        runner = remote_runner.RemoteRunner(host, self.cfg.src.ssh_user,
                                            self.cfg.src.ssh_sudo_password,
                                            True)
        file_size = files.remote_file_size(runner, filename)
        with files.RemoteStdout(
                host,
                self.cfg.src.ssh_user,
                'dd if={filename}',
                filename=image_file_info.backing_filename) as f:
            fp = file_proxy.FileProxy(f.stdout,
                                      name='image %s' % image_id,
                                      size=file_size)
            new_image = image_resource.create_image(
                id=image_id,
                name='restored image %s from host %s' % (image_id, host),
                container_format='bare',
                disk_format=image_file_info.format,
                is_public=True,
                data=fp)
            return new_image
Exemplo n.º 4
0
    def copy(self, context, source_object, destination_object):
        src_user = context.cfg.src.ssh_user
        dst_user = context.cfg.dst.ssh_user

        src_host = source_object.host
        dst_host = destination_object.host

        rr = remote_runner.RemoteRunner(src_host, src_user)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        try:
            progress_view = ""
            if files.is_installed(rr, "pv"):
                src_file_size = files.remote_file_size(rr, source_object.path)
                progress_view = "pv --size {size} --progress | ".format(
                    size=src_file_size)

            copy = ("dd if={src_file} | {progress_view} "
                    "ssh {ssh_opts} {dst_user}@{dst_host} "
                    "'dd of={dst_device}'")
            rr.run(
                copy.format(src_file=source_object.path,
                            dst_user=dst_user,
                            dst_host=dst_host,
                            ssh_opts=ssh_opts,
                            dst_device=destination_object.path,
                            progress_view=progress_view))
        except remote_runner.RemoteExecutionError as e:
            msg = "Cannot copy {src_object} to {dst_object}: {error}"
            msg = msg.format(src_object=source_object,
                             dst_object=destination_object,
                             error=e.message)
            raise CopyFailed(msg)
Exemplo n.º 5
0
    def run(self, **kwargs):
        faulty_hosts = []

        for cloud in [self.src_cloud, self.dst_cloud]:
            nova_compute = cloud.resources[utils.COMPUTE_RESOURCE]
            hosts = nova_compute.get_compute_hosts()

            for host in hosts:
                runner = remote_runner.RemoteRunner(
                    host, cloud.cloud_config.cloud.ssh_user)

                qemu_version_cmd = ("kvm -version | "
                                    "sed -E 's/QEMU emulator version "
                                    "([0-9]\\.[0-9]\\.?[0-9]?).*/\\1/'")

                version = runner.run(qemu_version_cmd)

                if self.REQUIRED_QEMU_VERSION not in version:
                    faulty_hosts.append(host)

        if faulty_hosts:
            msg = ("qemu must be upgraded to v{required} on following hosts: "
                   "{hosts}").format(required=self.REQUIRED_QEMU_VERSION,
                                     hosts=faulty_hosts)
            LOG.error(msg)
            raise RuntimeError(msg)
Exemplo n.º 6
0
    def get_volume_object(self, context, volume_id):
        """:raises: VolumeObjectNotFoundError in case object is not found"""
        controller = context.cloud_config.cloud.ssh_host
        user = context.cloud_config.cloud.ssh_user
        password = context.cloud_config.cloud.ssh_sudo_password
        paths = context.cloud_config.storage.nfs_mount_point_bases
        volume_template = context.cloud_config.storage.volume_name_template

        volume_pattern = generate_volume_pattern(volume_template, volume_id)

        rr = remote_runner.RemoteRunner(
            controller, user, ignore_errors=True, sudo=True, password=password)

        for mount_point in paths:
            # errors are ignored to avoid "Filesystem loop detected" messages
            # which don't matter anyways
            find = "find {mount_point} -name '{volume_pattern}' 2>/dev/null"
            res = rr.run(find.format(mount_point=mount_point,
                                     volume_pattern=volume_pattern))

            if res:
                # there should only be one file matching
                path = res.stdout.splitlines().pop()
                return copy_mechanisms.CopyObject(host=controller, path=path)

        msg = ("Volume object for volume '{volume_id}' not found. Either "
               "volume exists in DB, but is not present on storage, or "
               "'nfs_mount_point_bases' is set incorrectly in config")
        raise base.VolumeObjectNotFoundError(msg.format(volume_id=volume_id))
Exemplo n.º 7
0
    def test_root_user_does_not_sudo(self, _, sudo, run):
        rr = remote_runner.RemoteRunner('host', 'root',
                                        key='key', sudo=True,
                                        ignore_errors=False)
        rr.run('cmd')

        assert not sudo.called
        assert run.called
Exemplo n.º 8
0
    def test_remote_runner_raises_error_if_errors_are_not_ignored(self, _):
        rr = remote_runner.RemoteRunner('host',
                                        'user',
                                        'password',
                                        ignore_errors=False)

        self.assertRaises(remote_runner.RemoteExecutionError, rr.run,
                          "non existing failing command")
Exemplo n.º 9
0
    def get_provider_location(context, host, path):
        user = context.cloud_config.cloud.ssh_user
        password = context.cloud_config.cloud.ssh_sudo_password

        rr = remote_runner.RemoteRunner(host, user, password=password,
                                        sudo=True,
                                        ignore_errors=True)

        df = files.remote_df(rr, path=path)
        return df[0]['filesystem'] if df else None
Exemplo n.º 10
0
    def test_errors_are_suppressed_for_run_ignoring_errors(
            self, *_):
        rr = remote_runner.RemoteRunner('host', 'user', 'password', sudo=True,
                                        ignore_errors=False)

        try:
            rr.run_ignoring_errors("failing command")

            self.assertFalse(rr.ignore_errors)
        except remote_runner.RemoteExecutionError as e:
            self.fail("run_ignoring_errors must not raise exceptions: %s" % e)
Exemplo n.º 11
0
    def transfer(self, data):
        dst_host = data['host_dst']
        dst_path = data['path_dst']

        dst_runner = remote_runner.RemoteRunner(
            host=dst_host,
            user=self.config.dst.ssh_user,
            password=self.config.dst.ssh_sudo_password,
            sudo=True)

        with files.grant_all_permissions(dst_runner, dst_path):
            self.copier.transfer(data)
Exemplo n.º 12
0
 def __init__(self, config, cloud):
     self.config = config
     self.ssh_host = config.cloud.ssh_host
     self.cloud = cloud
     self.identity_client = cloud.resources['identity']
     self.filter_tenant_id = None
     self.filter_image = []
     # get mysql settings
     self.mysql_connector = cloud.mysql_connector('glance')
     self.runner = remote_runner.RemoteRunner(self.ssh_host,
                                              self.config.cloud.ssh_user)
     self._image_filter = None
     super(GlanceImage, self).__init__(config)
Exemplo n.º 13
0
def has_ssh_connectivity(connection_user, user, key, src_host, dst_host):
    """:returns: True if `user@src_host` can ssh into `dst_host` with `key`"""

    rr = remote_runner.RemoteRunner(src_host, connection_user, timeout=5)

    try:
        ssh = ("ssh -i {key} "
               "-o StrictHostKeyChecking=no "
               "-o UserKnownHostsFile=/dev/null "
               "{user}@{dst_host} 'echo'")
        rr.run(ssh.format(key=key, user=user, dst_host=dst_host))
        return True
    except remote_runner.RemoteExecutionError:
        return False
Exemplo n.º 14
0
def get_ips(init_host, compute_host, ssh_user):
    runner = remote_runner.RemoteRunner(host=compute_host, user=ssh_user,
                                        gateway=init_host)
    cmd = ("ifconfig | awk -F \"[: ]+\" \'/inet addr:/ "
           "{ if ($4 != \"127.0.0.1\") print $4 }\'")
    out = runner.run(cmd)
    list_ips = []
    for info in out.split():
        try:
            ipaddr.IPAddress(info)
        except ValueError:
            continue
        list_ips.append(info)
    return list_ips
Exemplo n.º 15
0
 def check_access(self, node):
     cfg = self.cloud.cloud_config.cloud
     gateway = cfg.ssh_host
     runner = remote_runner.RemoteRunner(node,
                                         cfg.ssh_user,
                                         password=cfg.ssh_sudo_password,
                                         timeout=60,
                                         gateway=gateway)
     try:
         with api.settings(abort_on_prompts=True):
             runner.run('echo')
     except remote_runner.RemoteExecutionError as e:
         LOG.debug('Check access error: %s', e, exc_info=True)
         LOG.warning(
             "SSH connection from '%s' to '%s' failed with error: "
             "'%s'", gateway, node, e.message)
Exemplo n.º 16
0
 def execute(self,
             cmd,
             internal_host=None,
             host_exec=None,
             ignore_errors=False,
             sudo=False):
     host = host_exec if host_exec else self.host
     runner = \
         remote_runner.RemoteRunner(host,
                                    self.cloud.ssh_user,
                                    password=self.cloud.ssh_sudo_password,
                                    sudo=sudo,
                                    ignore_errors=ignore_errors)
     if internal_host:
         return self.execute_on_inthost(runner, str(cmd), internal_host)
     else:
         return runner.run(str(cmd))
Exemplo n.º 17
0
    def __init__(self, config, cloud):
        self.config = config
        self.ssh_host = config.cloud.ssh_host
        self.cloud = cloud
        self.identity_client = cloud.resources['identity']
        self.filter_tenant_id = None
        self.filter_image = []
        # get mysql settings
        self.mysql_connector = cloud.mysql_connector('glance')
        self.runner = remote_runner.RemoteRunner(self.ssh_host,
                                                 self.config.cloud.ssh_user)
        self._image_filter = None
        self.tenant_name_map = mapper.Mapper('tenant_map')
        self.state_notifier = notifiers.MigrationStateNotifier()
        for o in cloud.migration_observers:
            self.state_notifier.add_observer(o)

        super(GlanceImage, self).__init__(config)
Exemplo n.º 18
0
    def runner(self, host, position, gateway=None, **kwargs):
        """
        Alias for creating a RemoteRunner

        :param host: Host
        :param position: 'src' or 'dst' cloud
        :param gateway: Gateway for a runner
        :return: RemoteRunner
        """
        if position == 'src':
            user = CONF.src.ssh_user
            password = CONF.src.ssh_sudo_password
        else:
            user = CONF.dst.ssh_user
            password = CONF.dst.ssh_sudo_password
        return remote_runner.RemoteRunner(host,
                                          user,
                                          password=password,
                                          sudo=True,
                                          gateway=gateway,
                                          **kwargs)
Exemplo n.º 19
0
def remove_bbcp(cloud):
    """
    Remove bbcp from the hosts were memorized in the hosts_with_bbcp variable.

    :param cloud: object of a cloud
    """
    if cloud.position == 'src':
        user = CONF.src.ssh_user
        sudo_password = CONF.src.ssh_sudo_password
    else:
        user = CONF.dst.ssh_user
        sudo_password = CONF.dst.ssh_sudo_password

    LOG.info("Removing the bbcp files from hosts of '%s' cloud: %s",
             cloud.position, cloud.hosts_with_bbcp)
    cmd = 'rm -f {path}'
    for host in cloud.hosts_with_bbcp:
        runner = remote_runner.RemoteRunner(host,
                                            user,
                                            password=sudo_password,
                                            sudo=True)
        runner.run_ignoring_errors(cmd, path=BBCP_PATH)
Exemplo n.º 20
0
 def reuse_source_volume(self, src_volume):
     """Creates volume on destination with same id from source"""
     volume_id = src_volume['id']
     original_size = src_volume['size']
     src_volume_object = self.dst_cinder_backend.get_volume_object(
         self.dst_cloud, volume_id)
     LOG.debug("Backing file for source volume on destination cloud: %s",
               src_volume_object)
     fake_volume = copy.deepcopy(src_volume)
     fake_volume['size'] = 1
     dst_volume, dst_volume_object = self._create_volume(fake_volume)
     user = self.dst_cloud.cloud_config.cloud.ssh_user
     password = self.dst_cloud.cloud_config.cloud.ssh_sudo_password
     rr = remote_runner.RemoteRunner(dst_volume_object.host, user,
                                     password=password, sudo=True,
                                     ignore_errors=True)
     files.remote_rm(rr, dst_volume_object.path)
     dst_cinder = self.dst_cloud.resources[utils.STORAGE_RESOURCE]
     dst_db = cinder_db.CinderDBBroker(dst_cinder.mysql_connector)
     dst_db.update_volume_id(dst_volume.id, volume_id)
     if original_size > 1:
         inc_size = original_size - 1
         project_id = dst_db.get_cinder_volume(volume_id).project_id
         dst_db.inc_quota_usages(project_id, 'gigabytes', inc_size)
         volume_type = (None
                        if dst_volume.volume_type == 'None'
                        else dst_volume.volume_type)
         if volume_type:
             dst_db.inc_quota_usages(project_id,
                                     'gigabytes_%s' % volume_type, inc_size)
     provider_location = self.dst_cinder_backend.get_provider_location(
         self.dst_cloud,
         dst_volume_object.host,
         src_volume_object.path
     )
     dst_db.update_volume(volume_id, provider_location=provider_location,
                          size=original_size)
     return dst_cinder.get_volume_by_id(volume_id)
Exemplo n.º 21
0
def fix_post_cobalt_ephemeral_disk(config, instance):
    """
    Merge ephemeral disk chain if it was broken by cobalt migrate
    :param config: cloud configuration
    :param instance: VM instance (as returned by NovaClient)
    """

    host = getattr(instance, INSTANCE_HOST_ATTRIBUTE)
    instance_name = getattr(instance, INSTANCE_NAME_ATTRIBUTE)

    ssh_user = config.cloud.ssh_user
    ssh_password = config.cloud.ssh_sudo_password
    runner = remote_runner.RemoteRunner(host,
                                        ssh_user,
                                        password=ssh_password,
                                        sudo=True)
    blkinfo = runner.run('virsh domblklist {name}', name=instance_name)
    for line in blkinfo.splitlines():
        if instance.id not in line:
            continue
        tokens = line.split()
        if len(tokens) < 2:
            continue
        _, path = tokens[:2]
        if instance.id not in path:
            continue
        cobalt_base_path = path + '.base'
        info = runner.run('qemu-img info {input}', input=path)
        if cobalt_base_path not in info:
            continue
        merge_path = path + '.merge'
        runner.run(
            'qemu-img convert -f qcow2 -O qcow2 {input} {output} &&'
            'mv {output} {input}',
            input=path,
            output=merge_path)
Exemplo n.º 22
0
    def copy(self, context, source_object, destination_object):
        cfg_src = context.cfg.src
        cfg_dst = context.cfg.dst

        src_user = cfg_src.ssh_user
        dst_user = cfg_dst.ssh_user

        src_host = source_object.host
        dst_host = destination_object.host

        rr_src = remote_runner.RemoteRunner(src_host,
                                            src_user,
                                            sudo=True,
                                            password=cfg_src.ssh_sudo_password)
        rr_dst = remote_runner.RemoteRunner(dst_host,
                                            dst_user,
                                            sudo=True,
                                            password=cfg_dst.ssh_sudo_password)

        ssh_opts = ('-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no')

        # Choose auxiliary port for SSH tunnel
        aux_port_start, aux_port_end = \
            context.cfg.migrate.ssh_transfer_port.split('-')
        aux_port = random.randint(int(aux_port_start), int(aux_port_end))

        session_name = self._generate_session_name()
        try:
            progress_view = ""
            if files.is_installed(rr_src, "pv"):
                src_file_size = files.remote_file_size(rr_src,
                                                       source_object.path)
                progress_view = "pv --size {size} --progress | ".format(
                    size=src_file_size)

            # First step: prepare netcat listening on aux_port on dst and
            # forwarding all the data to block device
            rr_dst.run(
                'screen -S {session_name} -d -m /bin/bash -c '
                '\'nc -l {aux_port} | dd of={dst_device} bs=64k\'; '
                'sleep 1',
                session_name=session_name,
                aux_port=aux_port,
                dst_device=destination_object.path)

            # Second step: create SSH tunnel between source and destination
            rr_src.run(
                'screen -S {session_name} -d -m ssh {ssh_opts} -L'
                ' {aux_port}:127.0.0.1:{aux_port} '
                '{dst_user}@{dst_host}; sleep 1',
                session_name=session_name,
                ssh_opts=ssh_opts,
                aux_port=aux_port,
                dst_user=dst_user,
                dst_host=dst_host)

            # Third step: push data through the tunnel
            rr_src.run(
                '/bin/bash -c \'dd if={src_file} bs=64k | '
                '{progress_view} nc 127.0.0.1 {aux_port}\'',
                aux_port=aux_port,
                progress_view=progress_view,
                src_file=source_object.path)

        except remote_runner.RemoteExecutionError as e:
            msg = "Cannot copy {src_object} to {dst_object}: {error}"
            msg = msg.format(src_object=source_object,
                             dst_object=destination_object,
                             error=e.message)
            raise CopyFailed(msg)
        finally:
            try:
                rr_src.run('screen -X -S {session_name} quit || true',
                           session_name=session_name)
                rr_dst.run('screen -X -S {session_name} quit || true',
                           session_name=session_name)
            except remote_runner.RemoteExecutionError:
                LOG.error('Failed to close copy sessions', exc_info=True)
Exemplo n.º 23
0
    def run(self, info=None, **kwargs):
        new_id, instance = list(info[utils.INSTANCES_TYPE].items())[0]
        old_id = instance['old_id']

        dst_compute = self.dst_cloud.resources[utils.COMPUTE_RESOURCE]
        src_compute = self.src_cloud.resources[utils.COMPUTE_RESOURCE]

        dst_compute.change_status('active', instance_id=new_id)

        dst_instance = dst_compute.get_instance(new_id)
        src_instance = src_compute.get_instance(old_id)

        # do not attempt to live migrate inactive instances
        if src_instance.status.lower() not in ['active', 'verify_resize']:
            LOG.debug(
                "Skipping live migration of VM '%s', because it's "
                "inactive", src_instance.name)
            return

        src_host = instance_host(src_instance)
        dst_host = instance_host(dst_instance)

        src_runner = remote_runner.RemoteRunner(src_host, CONF.src.ssh_user)
        dst_runner = remote_runner.RemoteRunner(dst_host, CONF.dst.ssh_user)

        src_libvirt = libvirt.Libvirt(src_runner)
        dst_libvirt = libvirt.Libvirt(dst_runner)

        src_virsh_name = instance_libvirt_name(src_instance)
        dst_virsh_name = instance_libvirt_name(dst_instance)

        src_vm_xml = src_libvirt.get_xml(src_virsh_name)
        dst_vm_xml = dst_libvirt.get_xml(dst_virsh_name)

        src_vm_xml.disk_file = dst_vm_xml.disk_file
        src_vm_xml.serial_file = dst_vm_xml.serial_file
        src_vm_xml.console_file = dst_vm_xml.console_file
        src_vm_xml.interfaces = dst_vm_xml.interfaces

        dst_backing_file = dst_libvirt.get_backing_file(new_id)
        src_backing_file = src_libvirt.get_backing_file(old_id)
        migration_backing_file = os.path.join(
            libvirt.nova_instances_path, '_base',
            'migration_disk_{}'.format(old_id))
        timeout = CONF.migrate.boot_timeout
        dst_compute.wait_for_status(new_id,
                                    dst_compute.get_status,
                                    'active',
                                    timeout=timeout)

        with files.RemoteTempFile(src_runner,
                                  "migrate-{}".format(old_id),
                                  src_vm_xml.dump()) as migration_file,\
                files.RemoteSymlink(src_runner,
                                    src_backing_file,
                                    migration_backing_file),\
                files.RemoteSymlink(dst_runner,
                                    dst_backing_file,
                                    migration_backing_file),\
                ubuntu.StopNovaCompute(dst_runner),\
                libvirt.QemuBackingFileMover(src_libvirt.runner,
                                             migration_backing_file,
                                             old_id):

            destroyer = libvirt.DestNovaInstanceDestroyer(
                dst_libvirt, dst_compute, dst_virsh_name, dst_instance.id)
            try:
                destroyer.do()
                src_libvirt.live_migrate(src_virsh_name, dst_host,
                                         migration_file.filename)
            except remote_runner.RemoteExecutionError:
                destroyer.undo()
            finally:
                dst_libvirt.move_backing_file(dst_backing_file, new_id)
Exemplo n.º 24
0
def install_ssh_keys(config, *hosts):
    """
    Generate and put public and private SSH keys to hosts that are listed in
    hosts to make cold migration work.
    :param config: CloudFerry config
    :param hosts: list of hosts where to install keys
    """
    ssh_user = config.cloud.ssh_user
    ssh_password = config.cloud.ssh_sudo_password
    home_path = cfglib.CONF.evacuation.nova_home_path
    nova_user = cfglib.CONF.evacuation.nova_user
    ssh_config = '\\n'.join(
        ['UserKnownHostsFile /dev/null', 'StrictHostKeyChecking no'])
    ssh_path = '/'.join([home_path, '.ssh'])
    ssh_backup_base = '/'.join([home_path, '.ssh_backup'])

    key = RSA.generate(2048, os.urandom)
    public_key = key.exportKey('OpenSSH').replace('\n', '\\n')
    private_key = key.exportKey('PEM').replace('\n', '\\n')

    ssh_backups = {}
    for host in hosts:
        runner = remote_runner.RemoteRunner(host,
                                            ssh_user,
                                            password=ssh_password,
                                            sudo=True)
        ssh_backup_path = '/'.join(
            [ssh_backup_base, os.urandom(8).encode('hex')])
        try:
            runner.run('test -e "{path}"', path=ssh_path)
            runner.run('mkdir -p {backup_base}', backup_base=ssh_backup_base)
            runner.run('mv "{path}" "{backup_path}"',
                       path=ssh_path,
                       backup_path=ssh_backup_path)
            ssh_backups[host] = ssh_backup_path
        except remote_runner.RemoteExecutionError:
            LOG.debug("Dot SSH directory not found, skipping backup")

        runner.run('mkdir -p "{path}"', path=ssh_path)
        runner.run('echo -e "{key}" > "{path}"',
                   key=public_key,
                   path='/'.join([ssh_path, 'authorized_keys']))
        runner.run('echo -e "{key}" > "{path}"',
                   key=private_key,
                   path='/'.join([ssh_path, 'id_rsa']))
        runner.run('echo -e "{config}" > "{path}"',
                   config=ssh_config,
                   path='/'.join([ssh_path, 'config']))
        runner.run('chmod 0600 "{path}"', path='/'.join([ssh_path, 'id_rsa']))
        runner.run('chown -R "{user}:{user}" "{path}"',
                   user=nova_user,
                   path=ssh_path)
    try:
        yield
    finally:
        for host in hosts:
            runner = remote_runner.RemoteRunner(host,
                                                ssh_user,
                                                password=ssh_password,
                                                sudo=True)
            runner.run('rm -rf "{path}"', path=ssh_path)
            ssh_backup_path = ssh_backups.get(host)
            if ssh_backup_path is not None:
                runner.run_ignoring_errors('mv "{backup_path}" "{path}"',
                                           backup_path=ssh_backup_path,
                                           path=ssh_path)
Exemplo n.º 25
0
def _remote_runner(cloud):
    return remote_runner.RemoteRunner(cloud[CFG].get(HOST),
                                      cloud[CFG].ssh_user,
                                      cloud[CFG].ssh_sudo_password,
                                      sudo=True,
                                      gateway=cloud[CFG].get(SSH_HOST))
Exemplo n.º 26
0
    def run(self, **kwargs):
        cfg = self.cloud.cloud_config.cloud
        runner = remote_runner.RemoteRunner(cfg.ssh_host, cfg.ssh_user)

        with files.LocalTempDir('check_band_XXXX') as local_dir:
            with files.RemoteTempDir(runner, 'check_band_XXXX') as remote_dir:
                filename = str(uuid.uuid4())
                local_filepath = os.path.join(local_dir.dirname, filename)
                remote_filepath = os.path.join(remote_dir.dirname, filename)
                claimed_bandw = self.cloud.cloud_config.initial_check.\
                    claimed_bandwidth
                filesize = self.cloud.cloud_config.initial_check.test_file_size
                factor = self.cloud.cloud_config.initial_check.factor
                req_bandwidth = claimed_bandw * factor

                scp_download = "scp {ssh_opts} {user}@{host}:{filepath} " \
                               "{dirname}"
                scp_upload = "scp {ssh_opts} {filepath} {user}@{host}:" \
                             "{dirname}"
                dd_command = "dd if=/dev/zero of={filepath} bs=1 count=0 " \
                             "seek={filesize}"
                runner.run(dd_command,
                           filepath=remote_filepath,
                           filesize=filesize)

                LOG.info("Checking download speed... Wait please.")
                period_download = utils.timer(
                    local.run,
                    scp_download.format(
                        ssh_opts=ssh_util.default_ssh_options(),
                        user=cfg.ssh_user,
                        host=cfg.ssh_host,
                        filepath=remote_filepath,
                        dirname=local_dir.dirname))

                LOG.info("Checking upload speed... Wait please.")
                period_upload = utils.timer(
                    local.run,
                    scp_upload.format(ssh_opts=ssh_util.default_ssh_options(),
                                      filepath=local_filepath,
                                      user=cfg.ssh_user,
                                      host=cfg.ssh_host,
                                      dirname=remote_dir.dirname))

        # To have Megabits per second
        upload_speed = filesize / period_upload * 8
        download_speed = filesize / period_download * 8

        if upload_speed < req_bandwidth or download_speed < req_bandwidth:
            raise RuntimeError(
                'Bandwidth is not OK. '
                'Claimed bandwidth: %s Mb/s. '
                'Required speed: %s Mb/s. '
                'Actual upload speed: %.2f Mb/s. '
                'Actual download speed: %.2f Mb/s. '
                'Aborting migration...' %
                (claimed_bandw, req_bandwidth, upload_speed, download_speed))

        LOG.info(
            "Bandwith is OK. "
            "Required speed: %.2f Mb/s. "
            "Upload speed: %.2f Mb/s. "
            "Download speed: %.2f Mb/s", req_bandwidth, upload_speed,
            download_speed)