Beispiel #1
0
    def _test(self, type_, unit, total, used, requested, limit):
        """Test if the given type of resource needed for a claim can be safely
        allocated.
        """
        LOG.info(_LI('Total %(type)s: %(total)d %(unit)s, used: %(used).02f '
                    '%(unit)s'),
                  {'type': type_, 'total': total, 'unit': unit, 'used': used},
                  instance=self.instance)

        if limit is None:
            # treat resource as unlimited:
            LOG.info(_LI('%(type)s limit not specified, defaulting to '
                        'unlimited'), {'type': type_}, instance=self.instance)
            return

        free = limit - used

        # Oversubscribed resource policy info:
        LOG.info(_LI('%(type)s limit: %(limit).02f %(unit)s, '
                     'free: %(free).02f %(unit)s'),
                  {'type': type_, 'limit': limit, 'free': free, 'unit': unit},
                  instance=self.instance)

        if requested > free:
            return (_('Free %(type)s %(free).02f '
                      '%(unit)s < requested %(requested)d %(unit)s') %
                      {'type': type_, 'free': free, 'unit': unit,
                       'requested': requested})
Beispiel #2
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.Forbidden):
            raise Fault(webob.exc.HTTPForbidden(
                    explanation=ex_value.format_message()))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(exception.ConvertedException(
                    code=ex_value.code,
                    explanation=ex_value.format_message()))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'), ex_value,
                      exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), ex_value)
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
Beispiel #3
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.Forbidden):
            raise Fault(
                webob.exc.HTTPForbidden(explanation=ex_value.format_message()))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(
                exception.ConvertedException(
                    code=ex_value.code, explanation=ex_value.format_message()))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'),
                      ex_value,
                      exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), ex_value)
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
Beispiel #4
0
    def setup_basic_filtering(self, instance, network_info):
        """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
        LOG.info(_LI('Called setup_basic_filtering in nwfilter'),
                 instance=instance)

        if self.handle_security_groups:
            # No point in setting up a filter set that we'll be overriding
            # anyway.
            return

        LOG.info(_LI('Ensuring static filters'), instance=instance)
        self._ensure_static_filters()

        nodhcp_base_filter = self.get_base_filter_list(instance, False)
        dhcp_base_filter = self.get_base_filter_list(instance, True)

        for vif in network_info:
            _base_filter = nodhcp_base_filter
            for subnet in vif['network']['subnets']:
                if subnet.get_meta('dhcp_server'):
                    _base_filter = dhcp_base_filter
                    break
            self._define_filter(self._get_instance_filter_xml(instance,
                                                              _base_filter,
                                                              vif))
Beispiel #5
0
def find_guest_agent(base_dir):
    """tries to locate a guest agent at the path
    specified by agent_rel_path
    """
    if CONF.xenserver.disable_agent:
        return False

    agent_rel_path = CONF.xenserver.agent_path
    agent_path = os.path.join(base_dir, agent_rel_path)
    if os.path.isfile(agent_path):
        # The presence of the guest agent
        # file indicates that this instance can
        # reconfigure the network from xenstore data,
        # so manipulation of files in /etc is not
        # required
        LOG.info(_LI('XenServer tools installed in this '
                     'image are capable of network injection.  '
                     'Networking files will not be'
                     'manipulated'))
        return True
    xe_daemon_filename = os.path.join(base_dir,
        'usr', 'sbin', 'xe-daemon')
    if os.path.isfile(xe_daemon_filename):
        LOG.info(_LI('XenServer tools are present '
                     'in this image but are not capable '
                     'of network injection'))
    else:
        LOG.info(_LI('XenServer tools are not '
                     'installed in this image'))
    return False
Beispiel #6
0
 def _report_final_resource_view(self, resources):
     """Report final calculate of physical memory, used virtual memory,
     disk, usable vCPUs, used virtual CPUs and PCI devices,
     including instance calculations and in-progress resource claims. These
     values will be exposed via the compute node table to the scheduler.
     """
     vcpus = resources['vcpus']
     if vcpus:
         tcpu = vcpus
         ucpu = resources['vcpus_used']
         LOG.info(_LI("Total usable vcpus: %(tcpu)s, "
                     "total allocated vcpus: %(ucpu)s"),
                     {'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
     else:
         tcpu = 0
         ucpu = 0
     pci_device_pools = resources.get('pci_device_pools')
     LOG.info(_LI("Final resource view: "
                  "name=%(node)s "
                  "phys_ram=%(phys_ram)sMB "
                  "used_ram=%(used_ram)sMB "
                  "phys_disk=%(phys_disk)sGB "
                  "used_disk=%(used_disk)sGB "
                  "total_vcpus=%(total_vcpus)s "
                  "used_vcpus=%(used_vcpus)s "
                  "pci_stats=%(pci_stats)s"),
              {'node': self.nodename,
               'phys_ram': resources['memory_mb'],
               'used_ram': resources['memory_mb_used'],
               'phys_disk': resources['local_gb'],
               'used_disk': resources['local_gb_used'],
               'total_vcpus': tcpu,
               'used_vcpus': ucpu,
               'pci_stats': pci_device_pools})
Beispiel #7
0
    def sync_instance_info(self, context, host_name, instance_uuids):
        """Receives the uuids of the instances on a host.

        This method is periodically called by the compute nodes, which send a
        list of all the UUID values for the instances on that node. This is
        used by the scheduler's HostManager to detect when its view of the
        compute node's instances is out of sync.
        """
        host_info = self._instance_info.get(host_name)
        if host_info:
            local_set = set(host_info["instances"].keys())
            compute_set = set(instance_uuids)
            if not local_set == compute_set:
                self._recreate_instance_info(context, host_name)
                LOG.info(
                    _LI("The instance sync for host '%s' did not match. "
                        "Re-created its InstanceList."), host_name)
                return
            host_info["updated"] = True
            LOG.info(_LI("Successfully synced instances from host '%s'."),
                     host_name)
        else:
            self._recreate_instance_info(context, host_name)
            LOG.info(
                _LI("Received a sync request from an unknown host '%s'. "
                    "Re-created its InstanceList."), host_name)
Beispiel #8
0
def fetch_image_ova(context, instance, session, vm_name, ds_name,
                    vm_folder_ref, res_pool_ref):
    """Download the OVA image from the glance image server to the
    Nova compute node.
    """
    image_ref = instance.image_ref
    LOG.debug(
        "Downloading OVA image file %(image_ref)s to the ESX "
        "as VM named '%(vm_name)s'", {
            'image_ref': image_ref,
            'vm_name': vm_name
        },
        instance=instance)

    metadata = IMAGE_API.get(context, image_ref)
    file_size = int(metadata['size'])

    vm_import_spec = _build_import_spec_for_import_vapp(
        session, vm_name, ds_name)

    read_iter = IMAGE_API.download(context, image_ref)
    ova_fd, ova_path = tempfile.mkstemp()

    try:
        # NOTE(arnaud): Look to eliminate first writing OVA to file system
        with os.fdopen(ova_fd, 'w') as fp:
            for chunk in read_iter:
                fp.write(chunk)
        with tarfile.open(ova_path, mode="r") as tar:
            vmdk_name = None
            for tar_info in tar:
                if tar_info and tar_info.name.endswith(".ovf"):
                    extracted = tar.extractfile(tar_info.name)
                    xmlstr = extracted.read()
                    vmdk_name = get_vmdk_name_from_ovf(xmlstr)
                elif vmdk_name and tar_info.name.startswith(vmdk_name):
                    # Actual file name is <vmdk_name>.XXXXXXX
                    extracted = tar.extractfile(tar_info.name)
                    write_handle = rw_handles.VmdkWriteHandle(
                        session, session._host, session._port, res_pool_ref,
                        vm_folder_ref, vm_import_spec, file_size)
                    start_transfer(context,
                                   extracted,
                                   file_size,
                                   write_file_handle=write_handle)
                    extracted.close()
                    LOG.info(_LI("Downloaded OVA image file %(image_ref)s"),
                             {'image_ref': instance.image_ref},
                             instance=instance)
                    imported_vm_ref = write_handle.get_imported_vm()
                    session._call_method(session.vim, "UnregisterVM",
                                         imported_vm_ref)
                    LOG.info(_LI("The imported VM was unregistered"),
                             instance=instance)
                    return
            raise exception.ImageUnacceptable(
                reason=_("Extracting vmdk from OVA failed."),
                image_id=image_ref)
    finally:
        os.unlink(ova_path)
Beispiel #9
0
    def handshake(self, req, connect_info, sockets):
        """Execute hypervisor-specific vnc auth handshaking (if needed)."""
        host = connect_info['host']
        port = int(connect_info['port'])

        server = eventlet.connect((host, port))

        # Handshake as necessary
        if connect_info.get('internal_access_path'):
            server.sendall("CONNECT %s HTTP/1.1\r\n\r\n" %
                        connect_info['internal_access_path'])

            data = ""
            while True:
                b = server.recv(1)
                if b:
                    data += b
                    if data.find("\r\n\r\n") != -1:
                        if not data.split("\r\n")[0].find("200"):
                            LOG.info(_LI("Error in handshake format: %s"),
                                     data)
                            return
                        break

                if not b or len(data) > 4096:
                    LOG.info(_LI("Error in handshake: %s"), data)
                    return

        client = req.environ['eventlet.input'].get_socket()
        client.sendall("HTTP/1.1 200 OK\r\n\r\n")
        sockets['client'] = client
        sockets['server'] = server
Beispiel #10
0
        def inner_verify_checksum():
            (stored_checksum,
             stored_timestamp) = read_stored_checksum(base_file,
                                                      timestamped=True)
            if stored_checksum:
                # NOTE(mikal): Checksums are timestamped. If we have recently
                # checksummed (possibly on another compute node if we are using
                # shared storage), then we don't need to checksum again.
                if (stored_timestamp and time.time() - stored_timestamp <
                        CONF.libvirt.checksum_interval_seconds):
                    return True

                # NOTE(mikal): If there is no timestamp, then the checksum was
                # performed by a previous version of the code.
                if not stored_timestamp:
                    write_stored_info(base_file,
                                      field='sha1',
                                      value=stored_checksum)

                current_checksum = _hash_file(base_file)

                if current_checksum != stored_checksum:
                    LOG.error(
                        _LE('image %(id)s at (%(base_file)s): image '
                            'verification failed'), {
                                'id': img_id,
                                'base_file': base_file
                            })
                    return False

                else:
                    return True

            else:
                LOG.info(
                    _LI('image %(id)s at (%(base_file)s): image '
                        'verification skipped, no hash stored'), {
                            'id': img_id,
                            'base_file': base_file
                        })

                # NOTE(mikal): If the checksum file is missing, then we should
                # create one. We don't create checksums when we download images
                # from glance because that would delay VM startup.
                if CONF.libvirt.checksum_base_images and create_if_missing:
                    LOG.info(
                        _LI('%(id)s (%(base_file)s): generating '
                            'checksum'), {
                                'id': img_id,
                                'base_file': base_file
                            })
                    write_stored_checksum(base_file)

                return None
Beispiel #11
0
    def _create_config_drive(self, instance, injected_files, admin_password,
                             network_info):
        if CONF.config_drive_format != 'iso9660':
            raise vmutils.UnsupportedConfigDriveFormatException(
                _('Invalid config_drive_format "%s"') %
                CONF.config_drive_format)

        LOG.info(_LI('Using config drive for instance'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        instance_path = self._pathutils.get_instance_dir(instance.name)
        configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
        LOG.info(_LI('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso},
                 instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Creating config drive failed with '
                                  'error: %s'),
                              e,
                              instance=instance)

        if not CONF.hyperv.config_drive_cdrom:
            configdrive_path = os.path.join(instance_path, 'configdrive.vhd')
            utils.execute(CONF.hyperv.qemu_img_cmd,
                          'convert',
                          '-f',
                          'raw',
                          '-O',
                          'vpc',
                          configdrive_path_iso,
                          configdrive_path,
                          attempts=1)
            self._pathutils.remove(configdrive_path_iso)
        else:
            configdrive_path = configdrive_path_iso

        return configdrive_path
Beispiel #12
0
    def _create_config_drive(self, instance, injected_files, admin_password,
                             network_info):
        if CONF.config_drive_format != 'iso9660':
            raise vmutils.UnsupportedConfigDriveFormatException(
                _('Invalid config_drive_format "%s"') %
                CONF.config_drive_format)

        LOG.info(_LI('Using config drive for instance'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        instance_path = self._pathutils.get_instance_dir(
            instance.name)
        configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
        LOG.info(_LI('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso}, instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Creating config drive failed with '
                                  'error: %s'),
                              e, instance=instance)

        if not CONF.hyperv.config_drive_cdrom:
            configdrive_path = os.path.join(instance_path,
                                            'configdrive.vhd')
            utils.execute(CONF.hyperv.qemu_img_cmd,
                          'convert',
                          '-f',
                          'raw',
                          '-O',
                          'vpc',
                          configdrive_path_iso,
                          configdrive_path,
                          attempts=1)
            self._pathutils.remove(configdrive_path_iso)
        else:
            configdrive_path = configdrive_path_iso

        return configdrive_path
Beispiel #13
0
    def _remove_old_enough_file(self,
                                base_file,
                                maxage,
                                remove_sig=True,
                                remove_lock=True):
        """Remove a single swap or base file if it is old enough."""
        exists, age = self._get_age_of_file(base_file)
        if not exists:
            return

        if age < maxage:
            LOG.info(_LI('Base or swap file too young to remove: %s'),
                     base_file)
        else:
            LOG.info(_LI('Removing base or swap file: %s'), base_file)
            try:
                os.remove(base_file)
                if remove_sig:
                    signature = get_info_filename(base_file)
                    if os.path.exists(signature):
                        os.remove(signature)
            except OSError as e:
                LOG.error(
                    _LE('Failed to remove %(base_file)s, '
                        'error was %(error)s'), {
                            'base_file': base_file,
                            'error': e
                        })

            if remove_lock:
                try:
                    # NOTE(jichenjc) The lock file will be constructed first
                    # time the image file was accessed. the lock file looks
                    # like patron-9e881789030568a317fad9daae82c5b1c65e0d4a
                    # or patron-03d8e206-6500-4d91-b47d-ee74897f9b4e
                    # according to the original file name
                    lock_file = os.path.split(base_file)[-1]
                    lockutils.remove_external_lock_file(
                        lock_file,
                        lock_file_prefix='patron-',
                        lock_path=self.lock_path)
                except OSError as e:
                    LOG.debug(
                        'Failed to remove %(lock_file)s, '
                        'error was %(error)s', {
                            'lock_file': lock_file,
                            'error': e
                        })
Beispiel #14
0
    def _error(self, inner, req):
        LOG.exception(_LE("Caught error: %s"), unicode(inner))

        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            user_locale = req.best_match_language()
            inner_msg = translate(inner.message, user_locale)
            outer.explanation = '%s: %s' % (inner.__class__.__name__,
                                            inner_msg)

        notifications.send_api_fault(req.url, status, inner)
        return wsgi.Fault(outer)
Beispiel #15
0
    def instance_for_image(imgfile, imgfmt, partition):
        LOG.debug("Instance for image imgfile=%(imgfile)s "
                  "imgfmt=%(imgfmt)s partition=%(partition)s",
                  {'imgfile': imgfile, 'imgfmt': imgfmt,
                   'partition': partition})

        vfs = None
        try:
            LOG.debug("Using primary VFSGuestFS")
            vfs = importutils.import_object(
                "patron.virt.disk.vfs.guestfs.VFSGuestFS",
                imgfile, imgfmt, partition)
            if not VFS.guestfs_ready:
                # Inspect for capabilities and keep
                # track of the result only if succeeded.
                vfs.inspect_capabilities()
                VFS.guestfs_ready = True
            return vfs
        except exception.PatronException:
            if vfs is not None:
                # We are able to load libguestfs but
                # something wrong happens when trying to
                # check for capabilities.
                raise
            else:
                LOG.info(_LI("Unable to import guestfs, "
                             "falling back to VFSLocalFS"))

        return importutils.import_object(
            "patron.virt.disk.vfs.localfs.VFSLocalFS",
            imgfile, imgfmt, partition)
Beispiel #16
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    LOG.info(_LI('Writing stored info to %s'), info_file)
    fileutils.ensure_tree(os.path.dirname(info_file))

    lock_name = 'info-%s' % os.path.split(target)[-1]
    lock_path = os.path.join(CONF.instances_path, 'locks')

    @utils.synchronized(lock_name, external=True, lock_path=lock_path)
    def write_file(info_file, field, value):
        d = {}

        if os.path.exists(info_file):
            with open(info_file, 'r') as f:
                d = _read_possible_json(f.read(), info_file)

        d[field] = value
        d['%s-timestamp' % field] = time.time()

        with open(info_file, 'w') as f:
            f.write(jsonutils.dumps(d))

    write_file(info_file, field, value)
Beispiel #17
0
    def authorize_console(self, context, token, console_type, host, port,
                          internal_access_path, instance_uuid,
                          access_url=None):

        token_dict = {'token': token,
                      'instance_uuid': instance_uuid,
                      'console_type': console_type,
                      'host': host,
                      'port': port,
                      'internal_access_path': internal_access_path,
                      'access_url': access_url,
                      'last_activity_at': time.time()}
        data = jsonutils.dumps(token_dict)

        # We need to log the warning message if the token is not cached
        # successfully, because the failure will cause the console for
        # instance to not be usable.
        if not self.mc.set(token.encode('UTF-8'),
                           data, CONF.console_token_ttl):
            LOG.warning(_LW("Token: %(token)s failed to save into memcached."),
                        {'token': token})
        tokens = self._get_tokens_for_instance(instance_uuid)

        # Remove the expired tokens from cache.
        tokens = [tok for tok in tokens if self.mc.get(tok.encode('UTF-8'))]
        tokens.append(token)

        if not self.mc.set(instance_uuid.encode('UTF-8'),
                           jsonutils.dumps(tokens)):
            LOG.warning(_LW("Instance: %(instance_uuid)s failed to save "
                            "into memcached"),
                        {'instance_uuid': instance_uuid})

        LOG.info(_LI("Received Token: %(token)s, %(token_dict)s"),
                  {'token': token, 'token_dict': token_dict})
Beispiel #18
0
    def associate_floating_ip(self,
                              context,
                              instance,
                              floating_address,
                              fixed_address,
                              affect_auto_assigned=False):
        """Associates a floating ip with a fixed ip.

        Ensures floating ip is allocated to the project in context.
        Does not verify ownership of the fixed ip. Caller is assumed to have
        checked that the instance is properly owned.

        """
        orig_instance_uuid = self.floating_manager.associate_floating_ip(
            context, floating_address, fixed_address, affect_auto_assigned)

        if orig_instance_uuid:
            msg_dict = dict(address=floating_address,
                            instance_id=orig_instance_uuid)
            LOG.info(
                _LI('re-assign floating IP %(address)s from '
                    'instance %(instance_id)s'), msg_dict)
            orig_instance = objects.Instance.get_by_uuid(
                context, orig_instance_uuid)

            # purge cached nw info for the original instance
            base_api.update_instance_cache_with_nw_info(
                self, context, orig_instance)
Beispiel #19
0
    def attach_volume(self, context, **kwargs):
        """Shadows the device and passes an unencrypted version to the
        instance.

        Transparent disk encryption is achieved by mounting the volume via
        dm-crypt and passing the resulting device to the instance. The
        instance is unaware of the underlying encryption due to modifying the
        original symbolic link to refer to the device mounted by dm-crypt.
        """

        key = self._get_key(context).get_encoded()
        # LUKS uses a passphrase rather than a raw key -- convert to string
        passphrase = ''.join(hex(x).replace('0x', '') for x in key)

        try:
            self._open_volume(passphrase, **kwargs)
        except processutils.ProcessExecutionError as e:
            if e.exit_code == 1 and not is_luks(self.dev_path):
                # the device has never been formatted; format it and try again
                LOG.info(_LI("%s is not a valid LUKS device;"
                             " formatting device for first use"),
                         self.dev_path)
                self._format_volume(passphrase, **kwargs)
                self._open_volume(passphrase, **kwargs)
            else:
                raise

        # modify the original symbolic link to refer to the decrypted device
        utils.execute('ln', '--symbolic', '--force',
                      '/dev/mapper/%s' % self.dev_name, self.symlink_path,
                      run_as_root=True, check_exit_code=True)
Beispiel #20
0
    def attach_volume(self, context, **kwargs):
        """Shadows the device and passes an unencrypted version to the
        instance.

        Transparent disk encryption is achieved by mounting the volume via
        dm-crypt and passing the resulting device to the instance. The
        instance is unaware of the underlying encryption due to modifying the
        original symbolic link to refer to the device mounted by dm-crypt.
        """

        key = self._get_key(context).get_encoded()
        # LUKS uses a passphrase rather than a raw key -- convert to string
        passphrase = ''.join(hex(x).replace('0x', '') for x in key)

        try:
            self._open_volume(passphrase, **kwargs)
        except processutils.ProcessExecutionError as e:
            if e.exit_code == 1 and not is_luks(self.dev_path):
                # the device has never been formatted; format it and try again
                LOG.info(
                    _LI("%s is not a valid LUKS device;"
                        " formatting device for first use"), self.dev_path)
                self._format_volume(passphrase, **kwargs)
                self._open_volume(passphrase, **kwargs)
            else:
                raise

        # modify the original symbolic link to refer to the decrypted device
        utils.execute('ln',
                      '--symbolic',
                      '--force',
                      '/dev/mapper/%s' % self.dev_name,
                      self.symlink_path,
                      run_as_root=True,
                      check_exit_code=True)
Beispiel #21
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except vmutils.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
Beispiel #22
0
 def unfilter_instance(self, instance, network_info):
     if self.instance_info.pop(instance.id, None):
         self.remove_filters_for_instance(instance)
         self.iptables.apply()
     else:
         LOG.info(_LI('Attempted to unfilter instance which is not '
                      'filtered'), instance=instance)
def downgrade(migrate_engine):
    meta, table, index = _get_table_index(migrate_engine)
    if not index:
        LOG.info(_LI('Skipped removing %s because no such index exists'),
                     INDEX_NAME)
        return
    index.drop(migrate_engine)
Beispiel #24
0
def _translate_volume_summary_view(context, vol):
    """Maps keys for volumes summary view."""
    d = {}

    d['id'] = vol['id']
    d['status'] = vol['status']
    d['size'] = vol['size']
    d['availabilityZone'] = vol['availability_zone']
    d['createdAt'] = vol['created_at']

    if vol['attach_status'] == 'attached':
        d['attachments'] = [_translate_attachment_detail_view(vol['id'],
            vol['instance_uuid'],
            vol['mountpoint'])]
    else:
        d['attachments'] = [{}]

    d['displayName'] = vol['display_name']
    d['displayDescription'] = vol['display_description']

    if vol['volume_type_id'] and vol.get('volume_type'):
        d['volumeType'] = vol['volume_type']['name']
    else:
        d['volumeType'] = vol['volume_type_id']

    d['snapshotId'] = vol['snapshot_id']
    LOG.info(_LI("vol=%s"), vol, context=context)

    if vol.get('volume_metadata'):
        d['metadata'] = vol.get('volume_metadata')
    else:
        d['metadata'] = {}

    return d
Beispiel #25
0
    def create(self, req, body):
        """Creates a new snapshot."""
        context = req.environ['patron.context']
        authorize(context)

        if not self.is_valid_body(body, 'snapshot'):
            msg = _("snapshot not specified")
            raise exc.HTTPBadRequest(explanation=msg)

        snapshot = body['snapshot']
        volume_id = snapshot['volume_id']

        LOG.info(_LI("Create snapshot from volume %s"), volume_id,
                  context=context)

        force = snapshot.get('force', False)
        try:
            force = strutils.bool_from_string(force, strict=True)
        except ValueError:
            msg = _("Invalid value '%s' for force.") % force
            raise exc.HTTPBadRequest(explanation=msg)

        if force:
            create_func = self.volume_api.create_snapshot_force
        else:
            create_func = self.volume_api.create_snapshot

        new_snapshot = create_func(context, volume_id,
                                   snapshot.get('display_name'),
                                   snapshot.get('display_description'))

        retval = _translate_snapshot_detail_view(context, new_snapshot)
        return {'snapshot': retval}
Beispiel #26
0
    def update_instance_info(self, context, host_name, instance_info):
        """Receives an InstanceList object from a compute node.

        This method receives information from a compute node when it starts up,
        or when its instances have changed, and updates its view of hosts and
        instances with it.
        """
        host_info = self._instance_info.get(host_name)
        if host_info:
            inst_dict = host_info.get("instances")
            for instance in instance_info.objects:
                # Overwrite the entry (if any) with the new info.
                inst_dict[instance.uuid] = instance
            host_info["updated"] = True
        else:
            instances = instance_info.objects
            if len(instances) > 1:
                # This is a host sending its full instance list, so use it.
                host_info = self._instance_info[host_name] = {}
                host_info["instances"] = {instance.uuid: instance
                                          for instance in instances}
                host_info["updated"] = True
            else:
                self._recreate_instance_info(context, host_name)
                LOG.info(_LI("Received an update from an unknown host '%s'. "
                             "Re-created its InstanceList."), host_name)
Beispiel #27
0
    def _claim_test(self, resources, limits=None):
        """Test if this claim can be satisfied given available resources and
        optional oversubscription limits

        This should be called before the compute node actually consumes the
        resources required to execute the claim.

        :param resources: available local compute node resources
        :returns: Return true if resources are available to claim.
        """
        if not limits:
            limits = {}

        # If an individual limit is None, the resource will be considered
        # unlimited:
        memory_mb_limit = limits.get('memory_mb')
        disk_gb_limit = limits.get('disk_gb')
        numa_topology_limit = limits.get('numa_topology')

        msg = _("Attempting claim: memory %(memory_mb)d MB, disk %(disk_gb)d "
                "GB")
        params = {'memory_mb': self.memory_mb, 'disk_gb': self.disk_gb}
        LOG.info(msg % params, instance=self.instance)

        reasons = [self._test_memory(resources, memory_mb_limit),
                   self._test_disk(resources, disk_gb_limit),
                   self._test_numa_topology(resources, numa_topology_limit),
                   self._test_pci()]
        reasons = reasons + self._test_ext_resources(limits)
        reasons = [r for r in reasons if r is not None]
        if len(reasons) > 0:
            raise exception.ComputeResourcesUnavailable(reason=
                    "; ".join(reasons))

        LOG.info(_LI('Claim successful'), instance=self.instance)
Beispiel #28
0
    def detach_volume(self, connection_info, instance_name, mountpoint):
        """Detach volume storage to VM instance."""
        LOG.debug("Detach_volume: %(instance_name)s, %(mountpoint)s", {
            'instance_name': instance_name,
            'mountpoint': mountpoint
        })

        vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)

        device_number = volume_utils.get_device_number(mountpoint)
        vbd_ref = volume_utils.find_vbd_by_number(self._session, vm_ref,
                                                  device_number)

        if vbd_ref is None:
            # NOTE(sirp): If we don't find the VBD then it must have been
            # detached previously.
            LOG.warning(
                _LW('Skipping detach because VBD for %s was '
                    'not found'), instance_name)
        else:
            self._detach_vbds_and_srs(vm_ref, [vbd_ref])
            LOG.info(
                _LI('Mountpoint %(mountpoint)s detached from instance'
                    ' %(instance_name)s'), {
                        'instance_name': instance_name,
                        'mountpoint': mountpoint
                    })
Beispiel #29
0
    def _attach_volume(self, connection_info, vm_ref=None, instance_name=None,
                       dev_number=None, hotplug=False):

        self._check_is_supported_driver_type(connection_info)

        connection_data = connection_info['data']
        sr_ref, sr_uuid = self._connect_to_volume_provider(connection_data,
                                                           instance_name)
        try:
            vdi_ref = self._connect_hypervisor_to_volume(sr_ref,
                                                         connection_data)
            vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
            LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)

            if vm_ref:
                self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
                                          dev_number, hotplug)

            return (sr_uuid, vdi_uuid)
        except Exception:
            with excutils.save_and_reraise_exception():
                # NOTE(sirp): Forgetting the SR will have the effect of
                # cleaning up the VDI and VBD records, so no need to handle
                # that explicitly.
                volume_utils.forget_sr(self._session, sr_ref)
Beispiel #30
0
    def filter_all(self, cells, filter_properties):
        """Override filter_all() which operates on the full list
        of cells...
        """
        scheduler_hints = filter_properties.get('scheduler_hints')
        if not scheduler_hints:
            return cells

        # This filter only makes sense at the top level, as a full
        # cell name is specified.  So we pop 'target_cell' out of the
        # hints dict.
        cell_name = scheduler_hints.pop('target_cell', None)
        if not cell_name:
            return cells

        # This authorization is after popping off target_cell, so
        # that in case this fails, 'target_cell' is not left in the
        # dict when child cells go to schedule.
        if not self.authorized(filter_properties['context']):
            # No filtering, if not authorized.
            return cells

        LOG.info(
            _LI("Forcing direct route to %(cell_name)s because "
                "of 'target_cell' scheduler hint"), {'cell_name': cell_name})

        scheduler = filter_properties['scheduler']
        if cell_name == filter_properties['routing_path']:
            return [scheduler.state_manager.get_my_state()]
        ctxt = filter_properties['context']

        scheduler.msg_runner.build_instances(
            ctxt, cell_name, filter_properties['host_sched_kwargs'])
Beispiel #31
0
    def _attach_volume(self,
                       connection_info,
                       vm_ref=None,
                       instance_name=None,
                       dev_number=None,
                       hotplug=False):

        self._check_is_supported_driver_type(connection_info)

        connection_data = connection_info['data']
        sr_ref, sr_uuid = self._connect_to_volume_provider(
            connection_data, instance_name)
        try:
            vdi_ref = self._connect_hypervisor_to_volume(
                sr_ref, connection_data)
            vdi_uuid = self._session.VDI.get_uuid(vdi_ref)
            LOG.info(_LI('Connected volume (vdi_uuid): %s'), vdi_uuid)

            if vm_ref:
                self._attach_volume_to_vm(vdi_ref, vm_ref, instance_name,
                                          dev_number, hotplug)

            return (sr_uuid, vdi_uuid)
        except Exception:
            with excutils.save_and_reraise_exception():
                # NOTE(sirp): Forgetting the SR will have the effect of
                # cleaning up the VDI and VBD records, so no need to handle
                # that explicitly.
                volume_utils.forget_sr(self._session, sr_ref)
Beispiel #32
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group', security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") % {
                             'name': security_group_name,
                             'project': context.project_id
                         })
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                     " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(
                    _LW("Cannot add security group %(name)s to "
                        "%(instance)s since the port %(port_id)s "
                        "does not meet security requirements"), {
                            'name': security_group_name,
                            'instance': instance.uuid,
                            'port_id': port['id']
                        })
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(
                    _LI("Adding security group %(security_group_id)s to "
                        "port %(port_id)s"), {
                            'security_group_id': security_group_id,
                            'port_id': port['id']
                        })
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
Beispiel #33
0
def write_stored_info(target, field=None, value=None):
    """Write information about an image."""

    if not field:
        return

    info_file = get_info_filename(target)
    LOG.info(_LI('Writing stored info to %s'), info_file)
    fileutils.ensure_tree(os.path.dirname(info_file))

    lock_name = 'info-%s' % os.path.split(target)[-1]
    lock_path = os.path.join(CONF.instances_path, 'locks')

    @utils.synchronized(lock_name, external=True, lock_path=lock_path)
    def write_file(info_file, field, value):
        d = {}

        if os.path.exists(info_file):
            with open(info_file, 'r') as f:
                d = _read_possible_json(f.read(), info_file)

        d[field] = value
        d['%s-timestamp' % field] = time.time()

        with open(info_file, 'w') as f:
            f.write(jsonutils.dumps(d))

    write_file(info_file, field, value)
Beispiel #34
0
    def filter_all(self, cells, filter_properties):
        """Override filter_all() which operates on the full list
        of cells...
        """
        scheduler_hints = filter_properties.get('scheduler_hints')
        if not scheduler_hints:
            return cells

        # This filter only makes sense at the top level, as a full
        # cell name is specified.  So we pop 'target_cell' out of the
        # hints dict.
        cell_name = scheduler_hints.pop('target_cell', None)
        if not cell_name:
            return cells

        # This authorization is after popping off target_cell, so
        # that in case this fails, 'target_cell' is not left in the
        # dict when child cells go to schedule.
        if not self.authorized(filter_properties['context']):
            # No filtering, if not authorized.
            return cells

        LOG.info(_LI("Forcing direct route to %(cell_name)s because "
                     "of 'target_cell' scheduler hint"),
                 {'cell_name': cell_name})

        scheduler = filter_properties['scheduler']
        if cell_name == filter_properties['routing_path']:
            return [scheduler.state_manager.get_my_state()]
        ctxt = filter_properties['context']

        scheduler.msg_runner.build_instances(ctxt, cell_name,
                filter_properties['host_sched_kwargs'])
Beispiel #35
0
    def migrate_instance_start(self, context, instance_uuid,
                               floating_addresses,
                               rxtx_factor=None, project_id=None,
                               source=None, dest=None):
        # We only care if floating_addresses are provided and we're
        # switching hosts
        if not floating_addresses or (source and source == dest):
            return

        LOG.info(_LI("Starting migration network for instance %s"),
                 instance_uuid)
        for address in floating_addresses:
            floating_ip = objects.FloatingIP.get_by_address(context, address)

            if self._is_stale_floating_ip_address(context, floating_ip):
                LOG.warning(_LW("Floating ip address |%(address)s| no longer "
                                "belongs to instance %(instance_uuid)s. "
                                "Will not migrate it "),
                            {'address': address,
                             'instance_uuid': instance_uuid})
                continue

            interface = CONF.public_interface or floating_ip.interface
            fixed_ip = floating_ip.fixed_ip
            self.l3driver.remove_floating_ip(floating_ip.address,
                                             fixed_ip.address,
                                             interface,
                                             fixed_ip.network)

            # NOTE(wenjianhn): Make this address will not be bound to public
            # interface when restarts patron-network on dest compute node
            floating_ip.host = None
            floating_ip.save()
Beispiel #36
0
    def migrate_instance_finish(self, context, instance_uuid,
                                floating_addresses, host=None,
                                rxtx_factor=None, project_id=None,
                                source=None, dest=None):
        # We only care if floating_addresses are provided and we're
        # switching hosts
        if host and not dest:
            dest = host
        if not floating_addresses or (source and source == dest):
            return

        LOG.info(_LI("Finishing migration network for instance %s"),
                 instance_uuid)

        for address in floating_addresses:
            floating_ip = objects.FloatingIP.get_by_address(context, address)

            if self._is_stale_floating_ip_address(context, floating_ip):
                LOG.warning(_LW("Floating ip address |%(address)s| no longer "
                                "belongs to instance %(instance_uuid)s. "
                                "Will not setup it."),
                            {'address': address,
                             'instance_uuid': instance_uuid})
                continue

            floating_ip.host = dest
            floating_ip.save()

            interface = CONF.public_interface or floating_ip.interface
            fixed_ip = floating_ip.fixed_ip
            self.l3driver.add_floating_ip(floating_ip.address,
                                          fixed_ip.address,
                                          interface,
                                          fixed_ip.network)
Beispiel #37
0
 def _preserve_multipath_id(self, connection_info):
     if self['connection_info'] and 'data' in self['connection_info']:
         if 'multipath_id' in self['connection_info']['data']:
             connection_info['data']['multipath_id'] =\
                 self['connection_info']['data']['multipath_id']
             LOG.info(_LI('preserve multipath_id %s'),
                      connection_info['data']['multipath_id'])
Beispiel #38
0
    def _attach_volume_to_vm(self, vdi_ref, vm_ref, instance_name, dev_number,
                             hotplug):
        LOG.debug('Attach_volume vdi: %(vdi_ref)s vm: %(vm_ref)s', {
            'vdi_ref': vdi_ref,
            'vm_ref': vm_ref
        })

        # osvol is added to the vbd so we can spot which vbds are volumes
        vbd_ref = vm_utils.create_vbd(self._session,
                                      vm_ref,
                                      vdi_ref,
                                      dev_number,
                                      bootable=False,
                                      osvol=True)
        if hotplug:
            # NOTE(johngarbutt) can only call VBD.plug on a running vm
            running = not vm_utils.is_vm_shutdown(self._session, vm_ref)
            if running:
                LOG.debug("Plugging VBD: %s", vbd_ref)
                self._session.VBD.plug(vbd_ref, vm_ref)

        LOG.info(
            _LI('Dev %(dev_number)s attached to'
                ' instance %(instance_name)s'), {
                    'instance_name': instance_name,
                    'dev_number': dev_number
                })
Beispiel #39
0
 def _preserve_multipath_id(self, connection_info):
     if self['connection_info'] and 'data' in self['connection_info']:
         if 'multipath_id' in self['connection_info']['data']:
             connection_info['data']['multipath_id'] =\
                 self['connection_info']['data']['multipath_id']
             LOG.info(_LI('preserve multipath_id %s'),
                      connection_info['data']['multipath_id'])
Beispiel #40
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the patron api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") %
                   {'security_group_name': security_group_name,
                    'instance': instance.uuid})
            self.raise_not_found(msg)
Beispiel #41
0
    def update_instance_info(self, context, host_name, instance_info):
        """Receives an InstanceList object from a compute node.

        This method receives information from a compute node when it starts up,
        or when its instances have changed, and updates its view of hosts and
        instances with it.
        """
        host_info = self._instance_info.get(host_name)
        if host_info:
            inst_dict = host_info.get("instances")
            for instance in instance_info.objects:
                # Overwrite the entry (if any) with the new info.
                inst_dict[instance.uuid] = instance
            host_info["updated"] = True
        else:
            instances = instance_info.objects
            if len(instances) > 1:
                # This is a host sending its full instance list, so use it.
                host_info = self._instance_info[host_name] = {}
                host_info["instances"] = {
                    instance.uuid: instance
                    for instance in instances
                }
                host_info["updated"] = True
            else:
                self._recreate_instance_info(context, host_name)
                LOG.info(
                    _LI("Received an update from an unknown host '%s'. "
                        "Re-created its InstanceList."), host_name)
Beispiel #42
0
    def _error(self, inner, req):
        LOG.exception(_LE("Caught error: %s"), unicode(inner))

        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            user_locale = req.best_match_language()
            inner_msg = translate(inner.message, user_locale)
            outer.explanation = '%s: %s' % (inner.__class__.__name__,
                                            inner_msg)

        notifications.send_api_fault(req.url, status, inner)
        return wsgi.Fault(outer)
Beispiel #43
0
        def inner_verify_checksum():
            (stored_checksum, stored_timestamp) = read_stored_checksum(
                base_file, timestamped=True)
            if stored_checksum:
                # NOTE(mikal): Checksums are timestamped. If we have recently
                # checksummed (possibly on another compute node if we are using
                # shared storage), then we don't need to checksum again.
                if (stored_timestamp and
                    time.time() - stored_timestamp <
                        CONF.libvirt.checksum_interval_seconds):
                    return True

                # NOTE(mikal): If there is no timestamp, then the checksum was
                # performed by a previous version of the code.
                if not stored_timestamp:
                    write_stored_info(base_file, field='sha1',
                                      value=stored_checksum)

                current_checksum = _hash_file(base_file)

                if current_checksum != stored_checksum:
                    LOG.error(_LE('image %(id)s at (%(base_file)s): image '
                                  'verification failed'),
                              {'id': img_id,
                               'base_file': base_file})
                    return False

                else:
                    return True

            else:
                LOG.info(_LI('image %(id)s at (%(base_file)s): image '
                             'verification skipped, no hash stored'),
                         {'id': img_id,
                          'base_file': base_file})

                # NOTE(mikal): If the checksum file is missing, then we should
                # create one. We don't create checksums when we download images
                # from glance because that would delay VM startup.
                if CONF.libvirt.checksum_base_images and create_if_missing:
                    LOG.info(_LI('%(id)s (%(base_file)s): generating '
                                 'checksum'),
                             {'id': img_id,
                              'base_file': base_file})
                    write_stored_checksum(base_file)

                return None
Beispiel #44
0
    def _inner_get_dev(self):
        device = self._allocate_nbd()
        if not device:
            return False

        # NOTE(mikal): qemu-nbd will return an error if the device file is
        # already in use.
        LOG.debug('Get nbd device %(dev)s for %(imgfile)s', {
            'dev': device,
            'imgfile': self.image
        })
        _out, err = utils.trycmd('qemu-nbd',
                                 '-c',
                                 device,
                                 self.image,
                                 run_as_root=True)
        if err:
            self.error = _('qemu-nbd error: %s') % err
            LOG.info(_LI('NBD mount error: %s'), self.error)
            return False

        # NOTE(vish): this forks into another process, so give it a chance
        # to set up before continuing
        pidfile = "/sys/block/%s/pid" % os.path.basename(device)
        for _i in range(CONF.timeout_nbd):
            if os.path.exists(pidfile):
                self.device = device
                break
            time.sleep(1)
        else:
            self.error = _('nbd device %s did not show up') % device
            LOG.info(_LI('NBD mount error: %s'), self.error)

            # Cleanup
            _out, err = utils.trycmd('qemu-nbd',
                                     '-d',
                                     device,
                                     run_as_root=True)
            if err:
                LOG.warning(
                    _LW('Detaching from erroneous nbd device returned '
                        'error: %s'), err)
            return False

        self.error = ''
        self.linked = True
        return True
Beispiel #45
0
    def create(self, req, server_id, body):
        """Attach an interface to an instance."""
        context = req.environ['patron.context']
        authorize(context)

        network_id = None
        port_id = None
        req_ip = None
        if body:
            attachment = body['interfaceAttachment']
            network_id = attachment.get('net_id', None)
            port_id = attachment.get('port_id', None)
            try:
                req_ip = attachment['fixed_ips'][0]['ip_address']
            except Exception:
                pass

        if network_id and port_id:
            msg = _("Must not input both network_id and port_id")
            raise exc.HTTPBadRequest(explanation=msg)
        if req_ip and not network_id:
            msg = _("Must input network_id when request IP address")
            raise exc.HTTPBadRequest(explanation=msg)

        if req_ip:
            try:
                netaddr.IPAddress(req_ip)
            except netaddr.AddrFormatError as e:
                raise exc.HTTPBadRequest(explanation=six.text_type(e))

        try:
            instance = common.get_instance(self.compute_api,
                                           context, server_id)
            LOG.info(_LI("Attach interface"), instance=instance)
            vif = self.compute_api.attach_interface(context,
                instance, network_id, port_id, req_ip)
        except (exception.PortNotFound,
                exception.NetworkNotFound) as e:
            raise exc.HTTPNotFound(explanation=e.format_message())
        except (exception.FixedIpAlreadyInUse,
                exception.NoMoreFixedIps,
                exception.PortInUse,
                exception.NetworkDuplicated,
                exception.NetworkAmbiguous,
                exception.PortNotUsable) as e:
            raise exc.HTTPBadRequest(explanation=e.format_message())
        except exception.InstanceIsLocked as e:
            raise exc.HTTPConflict(explanation=e.format_message())
        except NotImplementedError:
            msg = _("Network driver does not support this function.")
            raise webob.exc.HTTPNotImplemented(explanation=msg)
        except exception.InterfaceAttachFailed as e:
            msg = _("Failed to attach interface")
            raise webob.exc.HTTPInternalServerError(explanation=msg)
        except exception.InstanceInvalidState as state_error:
            common.raise_http_conflict_for_instance_invalid_state(state_error,
                    'attach_interface', server_id)

        return self.show(req, server_id, vif['id'])
Beispiel #46
0
 def _log_and_attach(bdm):
     context = attach_args[0]
     instance = attach_args[1]
     LOG.info(_LI('Booting with volume %(volume_id)s at %(mountpoint)s'),
               {'volume_id': bdm.volume_id,
                'mountpoint': bdm['mount_device']},
               context=context, instance=instance)
     bdm.attach(*attach_args, **attach_kwargs)
Beispiel #47
0
 def unfilter_instance(self, instance, network_info):
     if self.instance_info.pop(instance.id, None):
         self.remove_filters_for_instance(instance)
         self.iptables.apply()
     else:
         LOG.info(_LI('Attempted to unfilter instance which is not '
                      'filtered'),
                  instance=instance)
Beispiel #48
0
def get_wsgi_server():
    LOG.info(_LI("Starting patron-xvpvncproxy node (version %s)"),
              version.version_string_with_package())

    return wsgi.Server("XCP VNC Proxy",
                       XCPVNCProxy(),
                       protocol=SafeHttpProtocol,
                       host=CONF.xvpvncproxy_host,
                       port=CONF.xvpvncproxy_port)
def upgrade(migrate_engine):
    meta, table, index = _get_table_index(migrate_engine)
    if index:
        LOG.info(_LI('Skipped adding %s because an equivalent index'
                     ' already exists.'), INDEX_NAME)
        return
    columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
    index = Index(INDEX_NAME, *columns)
    index.create(migrate_engine)
Beispiel #50
0
 def check_token(self, context, token):
     token_str = self.mc.get(token.encode('UTF-8'))
     token_valid = (token_str is not None)
     LOG.info(_LI("Checking Token: %(token)s, %(token_valid)s"),
               {'token': token, 'token_valid': token_valid})
     if token_valid:
         token = jsonutils.loads(token_str)
         if self._validate_token(context, token):
             return token
Beispiel #51
0
    def _age_and_verify_cached_images(self, context, all_instances, base_dir):
        LOG.debug('Verify base images')
        # Determine what images are on disk because they're in use
        for img in self.used_images:
            fingerprint = hashlib.sha1(img).hexdigest()
            LOG.debug('Image id %(id)s yields fingerprint %(fingerprint)s',
                      {'id': img,
                       'fingerprint': fingerprint})
            for result in self._find_base_file(base_dir, fingerprint):
                base_file, image_small, image_resized = result
                self._handle_base_image(img, base_file)

                if not image_small and not image_resized:
                    self.originals.append(base_file)

        # Elements remaining in unexplained_images might be in use
        inuse_backing_images = self._list_backing_images()
        for backing_path in inuse_backing_images:
            if backing_path not in self.active_base_files:
                self.active_base_files.append(backing_path)

        # Anything left is an unknown base image
        for img in self.unexplained_images:
            LOG.warn(_LW('Unknown base file: %s'), img)
            self.removable_base_files.append(img)

        # Dump these lists
        if self.active_base_files:
            LOG.info(_LI('Active base files: %s'),
                     ' '.join(self.active_base_files))
        if self.corrupt_base_files:
            LOG.info(_LI('Corrupt base files: %s'),
                     ' '.join(self.corrupt_base_files))

        if self.removable_base_files:
            LOG.info(_LI('Removable base files: %s'),
                     ' '.join(self.removable_base_files))

            if self.remove_unused_base_images:
                for base_file in self.removable_base_files:
                    self._remove_base_file(base_file)

        # That's it
        LOG.debug('Verification complete')
Beispiel #52
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(_LW("Cannot add security group %(name)s to "
                                "%(instance)s since the port %(port_id)s "
                                "does not meet security requirements"),
                            {'name': security_group_name,
                             'instance': instance.uuid,
                             'port_id': port['id']})
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))