Ejemplo n.º 1
0
 def _cleanup(self, source_vm, destination_vm):
     if self.started_on_src_host:
         with remote.RemoteExecutor(self.src_cloud,
                                    source_vm.compute_node) as rexec:
             self._close_screen_session(rexec)
     if self.started_on_dst_host:
         with remote.RemoteExecutor(self.dst_cloud,
                                    destination_vm.compute_node) as rexec:
             self._close_screen_session(rexec)
Ejemplo n.º 2
0
    def discover_one(self, uuid):
        compute_client = clients.compute_client(self.cloud)
        try:
            raw_server = self.retry(compute_client.servers.get,
                                    uuid,
                                    expected_exceptions=[exceptions.NotFound])
        except exceptions.NotFound:
            raise discover.NotFound()

        # Check if server host is available
        avail_hosts = self._list_available_compute_hosts(compute_client)
        host = getattr(raw_server, EXT_ATTR_HOST)
        if host not in avail_hosts:
            LOG.warning('Skipping server %s, host not available.', host)
            return None

        # Convert server data to model conforming format
        server = self.load_from_cloud(raw_server)
        with remote.RemoteExecutor(self.cloud,
                                   server.hypervisor_host) as remote_executor:
            _populate_ephemeral_disks(remote_executor, server)

        # Store server
        with model.Session() as session:
            session.store(server)
            if _need_image_membership(server):
                image_member_uuid = image.ImageMember.make_uuid(
                    server.image, server.tenant)
                server.image_membership = self.find_obj(
                    image.ImageMember, image_member_uuid)
        return server
Ejemplo n.º 3
0
 def upload_server_image(self, boot_disk_info, dst_image_id):
     src_cloud = self.config.clouds[self.migration.source]
     host = boot_disk_info['host']
     image_path = boot_disk_info['base_path']
     image_format = boot_disk_info['base_format']
     image_size = boot_disk_info['base_size']
     cloud = self.config.clouds[self.migration.destination]
     token = clients.get_token(cloud.credential, cloud.scope)
     endpoint = clients.get_endpoint(cloud.credential, cloud.scope,
                                     consts.ServiceType.IMAGE)
     _reset_dst_image_status(self)
     with remote.RemoteExecutor(src_cloud, host) as remote_executor:
         curl_output = remote_executor.sudo(
             'curl -X PUT -w "\\n\\n<http_status=%{{http_code}}>" '
             '-H "X-Auth-Token: {token}" '
             '-H "Content-Type: application/octet-stream" '
             '-H "x-image-meta-disk_format: {disk_format}" '
             '-H "x-image-meta-size: {image_size}" '
             '--upload-file "{image_path}" '
             '"{endpoint}/v1/images/{image_id}"',
             token=token,
             endpoint=endpoint,
             image_id=dst_image_id,
             image_path=image_path,
             disk_format=image_format,
             image_size=image_size)
         match = re.search(r'<http_status=(\d+)>', curl_output)
         if match is None or int(match.group(1)) != 200:
             LOG.error('Failed to upload image: %s', curl_output)
             return False
         return True
Ejemplo n.º 4
0
    def discover_all(self):
        compute_client = clients.compute_client(self.cloud)
        avail_hosts = self._list_available_compute_hosts(compute_client)
        servers = {}

        # Go through each tenant since nova don't return more items than
        # specified in osapi_max_limit configuration option (1000 by default)
        # in single API call
        for tenant in self._get_tenants():
            LOG.debug('Discovering servers from cloud "%s" tenant "%s"',
                      self.cloud.name, tenant.name)
            tenant_id = tenant.id
            raw_server_list = self.retry(compute_client.servers.list,
                                         search_opts={
                                             'all_tenants': True,
                                             'tenant_id': tenant_id,
                                         },
                                         returns_iterable=True)
            for raw_server in raw_server_list:
                host = getattr(raw_server, EXT_ATTR_HOST)
                if host not in avail_hosts:
                    LOG.warning(
                        'Skipping server %s in tenant %s, host not '
                        'available.', host, tenant.name)
                    continue
                # Convert server data to model conforming format
                server = self.load_from_cloud(raw_server)
                hyper_host = getattr(raw_server, EXT_ATTR_HYPER_HOST)
                servers.setdefault(hyper_host, []).append(server)

        # Collect information about ephemeral disks
        # TODO: work with different servers in parallel
        for host, host_servers in servers.items():
            LOG.debug(
                'Getting ephemeral disks information from cloud %s '
                'host %s', self.cloud.name, host)
            with remote.RemoteExecutor(self.cloud, host) as remote_executor:
                for server in host_servers:
                    _populate_ephemeral_disks(remote_executor, server)

        # Store data to local database
        with model.Session() as session:
            for host_servers in servers.values():
                for server in host_servers:
                    session.store(server)
                    if _need_image_membership(server):
                        image_member_uuid = image.ImageMember.make_uuid(
                            server.image, server.tenant)
                        server.image_membership = self.find_obj(
                            image.ImageMember, image_member_uuid)
Ejemplo n.º 5
0
 def migrate(self, **kwargs):
     target_vm = kwargs.get(self.var_name)
     with remote.RemoteExecutor(self.cloud,
                                target_vm.compute_node) as rexec:
         br_name = 'cn_local'
         rexec.sudo('brctl addbr {bridge} || true', bridge=br_name)
         rexec.sudo('ip addr add {cidr} dev {bridge} || true',
                    cidr=_first_unused_address(self.cloud),
                    bridge=br_name)
         rexec.sudo('ip link set dev {bridge} up', bridge=br_name)
         rexec.sudo(
             'virsh attach-interface {instance} --type bridge '
             '--source {bridge} --mac {mac_address} '
             '--model virtio',
             instance=target_vm.instance_name,
             bridge=br_name,
             mac_address=_random_mac())
Ejemplo n.º 6
0
 def remote_executor(self, hostname, key_file=None, ignore_errors=False):
     # pylint: disable=no-member
     key_files = []
     settings = self.ssh_settings
     if settings.key_file is not None:
         key_files.append(settings.key_file)
     if key_file is not None:
         key_files.append(key_file)
     if key_files:
         utils.ensure_ssh_key_added(key_files)
     try:
         yield remote.RemoteExecutor(
             hostname, settings.username,
             sudo_password=settings.sudo_password,
             gateway=settings.gateway,
             connection_attempts=settings.connection_attempts,
             cipher=settings.cipher,
             key_file=settings.key_file,
             ignore_errors=ignore_errors)
     finally:
         remote.RemoteExecutor.close_connection(hostname)
Ejemplo n.º 7
0
    def discover_one(self, uuid):
        hostname = uuid

        with remote.RemoteExecutor(self.cloud, hostname) as remote_executor:
            try:
                ip_addr_output = remote_executor.sudo('ip addr show')
                interfaces = _parse_interfaces(ip_addr_output)
            except remote.RemoteFailure:
                LOG.warn('Unable to get network interfaces of node: %s',
                         hostname)
                LOG.debug('Unable to get network interfaces of node: %s',
                          hostname,
                          exc_info=True)
                interfaces = {}

        # Store server
        with model.Session() as session:
            compute_node = self.load_from_cloud({
                'hostname': hostname,
                'interfaces': interfaces,
            })
            session.store(compute_node)
        return compute_node
Ejemplo n.º 8
0
    def migrate(self, source_obj, source_vm, destination_vm, *args, **kwargs):
        self.session_name = 'vol_{}_{}'.format(source_obj.object_id.cloud,
                                               source_obj.object_id.id)
        port = _allocate_port(source_vm.hypervisor_hostname, self.src_cloud)
        src_ip = source_vm.metadata['internal_address']
        dst_ip = destination_vm.metadata['internal_address']
        listen_ip = _first_unused_address(self.src_cloud).ip
        dst_private_key = self.dst_cloud.ssh_settings.private_key
        agent = remote.SSHAgent()

        try:
            if dst_private_key is not None:
                agent.start()
                agent.add_key(dst_private_key)

            with remote.RemoteExecutor(self.dst_cloud,
                                       destination_vm.compute_node) as dst_re:
                _wait_ip_accessible(self.dst_cloud, dst_re, dst_ip)
                key_path = _deploy_pkey(dst_re)
                dst_re.run(
                    'screen -S {session} -d -m '
                    'ssh -o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no -i {key_path} '
                    'root@{dst_ip} /bin/sh -c '
                    '"\'nc -l {dst_ip} 11111 | '
                    '/usr/local/bin/zstd -d | '
                    'dd of=/dev/vdb bs=512k\'"; sleep 1',
                    session=self.session_name,
                    key_path=key_path,
                    dst_ip=dst_ip)
                self.started_on_dst_host = True

            with remote.RemoteExecutor(self.src_cloud,
                                       source_vm.compute_node) as src_re:
                _wait_ip_accessible(self.src_cloud, src_re, src_ip)
                key_path = _deploy_pkey(src_re)
                # Port forwarding to remote machine
                src_re.run(
                    'screen -S {session} -d -m ssh -N '
                    '-o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no '
                    '-L {listen_ip}:{listen_port}:{forward_ip}:11111 '
                    '{dst_user}@{dst_address}; sleep 1',
                    agent=agent,
                    session=self.session_name,
                    listen_ip=listen_ip,
                    listen_port=port,
                    forward_ip=dst_ip,
                    dst_address=dst_re.hostname,
                    dst_user=self.dst_cloud.ssh_settings.username)
                self.started_on_src_host = True

                LOG.info('Starting to transfer %dGb volume %s',
                         source_obj.size, source_obj.object_id)
                data_transfer_start = time.time()
                src_re.run(
                    'ssh -t -o UserKnownHostsFile=/dev/null '
                    '-o StrictHostKeyChecking=no -i {key_path} '
                    'root@{src_ip} /bin/sh -c '
                    '"\'dd if=/dev/vdb bs=512k | pv -r -i 30 | '
                    '/usr/local/bin/zstd | '
                    'nc -w 5 {listen_ip} {listen_port}\'"',
                    session=self.session_name,
                    key_path=key_path,
                    listen_port=port,
                    listen_ip=listen_ip,
                    src_ip=src_ip)
                data_transfer_dur = time.time() - data_transfer_start

                LOG.info(
                    'Transferred %dGb volume in %.1f seconds '
                    '(avg. speed: %.2fMb/s)', source_obj.size,
                    data_transfer_dur,
                    source_obj.size * 1024 / data_transfer_dur)
        finally:
            self._cleanup(source_vm, destination_vm)
            agent.terminate()