Example #1
0
def add_neutron_nsx_port_mapping(session, neutron_id,
                                 nsx_switch_id, nsx_port_id):
    session.begin(subtransactions=True)
    try:
        mapping = nsx_models.NeutronNsxPortMapping(
            neutron_id, nsx_switch_id, nsx_port_id)
        session.add(mapping)
        session.commit()
    except db_exc.DBDuplicateEntry:
        with excutils.save_and_reraise_exception() as ctxt:
            session.rollback()
            # do not complain if the same exact mapping is being added,
            # otherwise re-raise because even though it is possible for the
            # same neutron port to map to different back-end ports over time,
            # this should not occur whilst a mapping already exists
            current = get_nsx_switch_and_port_id(session, neutron_id)
            if current[1] == nsx_port_id:
                LOG.debug("Port mapping for %s already available",
                          neutron_id)
                ctxt.reraise = False
    except db_exc.DBError:
        with excutils.save_and_reraise_exception():
            # rollback for any other db error
            session.rollback()
    return mapping
Example #2
0
    def set_data(self, data, size=None):
        self.send_notification('image.prepare', self.repo)

        notify_error = self.notifier.error
        try:
            self.repo.set_data(data, size)
        except glance_store.StorageFull as e:
            msg = (_("Image storage media is full: %s") %
                   utils.exception_to_str(e))
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg)
        except glance_store.StorageWriteDenied as e:
            msg = (_("Insufficient permissions on image storage media: %s")
                   % utils.exception_to_str(e))
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPServiceUnavailable(explanation=msg)
        except ValueError as e:
            msg = (_("Cannot save data for image %(image_id)s: %(error)s") %
                   {'image_id': self.repo.image_id,
                    'error': utils.exception_to_str(e)})
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPBadRequest(
                explanation=utils.exception_to_str(e))
        except exception.Duplicate as e:
            msg = (_("Unable to upload duplicate image data for image"
                     "%(image_id)s: %(error)s") %
                   {'image_id': self.repo.image_id,
                    'error': utils.exception_to_str(e)})
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPConflict(explanation=msg)
        except exception.Forbidden as e:
            msg = (_("Not allowed to upload image data for image %(image_id)s:"
                     " %(error)s") % {'image_id': self.repo.image_id,
                                      'error': utils.exception_to_str(e)})
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPForbidden(explanation=msg)
        except exception.NotFound as e:
            msg = (_("Image %(image_id)s could not be found after upload."
                     " The image may have been deleted during the upload:"
                     " %(error)s") % {'image_id': self.repo.image_id,
                                      'error': utils.exception_to_str(e)})
            _send_notification(notify_error, 'image.upload', msg)
            raise webob.exc.HTTPNotFound(explanation=utils.exception_to_str(e))
        except webob.exc.HTTPError as e:
            with excutils.save_and_reraise_exception():
                msg = (_("Failed to upload image data for image %(image_id)s"
                         " due to HTTP error: %(error)s") %
                       {'image_id': self.repo.image_id,
                        'error': utils.exception_to_str(e)})
                _send_notification(notify_error, 'image.upload', msg)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                msg = (_("Failed to upload image data for image %(image_id)s "
                         "due to internal error: %(error)s") %
                       {'image_id': self.repo.image_id,
                        'error': utils.exception_to_str(e)})
                _send_notification(notify_error, 'image.upload', msg)
        else:
            self.send_notification('image.upload', self.repo)
            self.send_notification('image.activate', self.repo)
Example #3
0
    def migrate_share_to_host(self, context, share_id, host,
                              force_host_copy, notify, request_spec,
                              filter_properties=None):
        """Ensure that the host exists and can accept the share."""

        def _migrate_share_set_error(self, context, ex, request_spec):
            self._set_share_state_and_notify(
                'migrate_share_to_host',
                {'task_state': constants.TASK_STATE_MIGRATION_ERROR},
                context, ex, request_spec)

        try:
            tgt_host = self.driver.host_passes_filters(context, host,
                                                       request_spec,
                                                       filter_properties)

        except exception.NoValidHost as ex:
            with excutils.save_and_reraise_exception():
                _migrate_share_set_error(self, context, ex, request_spec)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _migrate_share_set_error(self, context, ex, request_spec)
        else:
            share_ref = db.share_get(context, share_id)
            try:
                share_rpcapi.ShareAPI().migration_start(
                    context, share_ref, tgt_host, force_host_copy, notify)
            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    _migrate_share_set_error(self, context, ex, request_spec)
Example #4
0
 def _get_reply(self, operation_id, ovsdb_identifier):
     count = 0
     while count <= n_const.MAX_RETRIES:
         response = self._recv_data(ovsdb_identifier)
         LOG.debug("Response from OVSDB server = %s", str(response))
         if response:
             try:
                 json_m = jsonutils.loads(response)
                 self.responses.append(json_m)
                 method_type = json_m.get('method', None)
                 if method_type == "echo" and self.enable_manager:
                     self.ovsdb_dicts.get(ovsdb_identifier).send(
                         jsonutils.dumps(
                             {"result": json_m.get("params", None),
                              "error": None, "id": json_m['id']}))
                 else:
                     if self._process_response(operation_id):
                         return True
             except Exception as ex:
                 with excutils.save_and_reraise_exception():
                     LOG.exception(_LE("Exception while receiving the "
                                       "response for the write request:"
                                       " [%s]"), ex)
         count += 1
     with excutils.save_and_reraise_exception():
         LOG.error(_LE("Could not obtain response from the OVSDB server "
                       "for the request"))
Example #5
0
    def _disconnect_host(self, host_name, vol_name):
        """Return value indicates if host was deleted on array or not"""
        try:
            self._array.disconnect_host(host_name, vol_name)
        except purestorage.PureHTTPError as err:
            with excutils.save_and_reraise_exception() as ctxt:
                if err.code == 400:
                    # Happens if the host and volume are not connected.
                    ctxt.reraise = False
                    LOG.error(_LE("Disconnection failed with message: "
                                  "%(msg)s."), {"msg": err.text})
        if (GENERATED_NAME.match(host_name) and
            not self._array.list_host_connections(host_name,
                                                  private=True)):
            LOG.info(_LI("Deleting unneeded host %(host_name)r."),
                     {"host_name": host_name})
            try:
                self._array.delete_host(host_name)
            except purestorage.PureHTTPError as err:
                with excutils.save_and_reraise_exception() as ctxt:
                    if err.code == 400 and ERR_MSG_NOT_EXIST in err.text:
                        # Happens if the host is already deleted.
                        # This is fine though, just treat it as a warning.
                        ctxt.reraise = False
                        LOG.warning(_LW("Purity host deletion failed: "
                                        "%(msg)s."), {"msg": err.text})
            return True

        return False
Example #6
0
def _iscsi_setup_and_handle_errors(address, port, iqn, lun):
    """Function that yields an iSCSI target device to work on.

    :param address: The iSCSI IP address.
    :param port: The iSCSI port number.
    :param iqn: The iSCSI qualified name.
    :param lun: The iSCSI logical unit number.
    """
    dev = get_dev(address, port, iqn, lun)
    discovery(address, port)
    login_iscsi(address, port, iqn)
    if not is_block_device(dev):
        raise exception.InstanceDeployFailure(_("Parent device '%s' not found")
                                              % dev)
    try:
        yield dev
    except processutils.ProcessExecutionError as err:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Deploy to address %s failed."), address)
            LOG.error(_LE("Command: %s"), err.cmd)
            LOG.error(_LE("StdOut: %r"), err.stdout)
            LOG.error(_LE("StdErr: %r"), err.stderr)
    except exception.InstanceDeployFailure as e:
        with excutils.save_and_reraise_exception():
            LOG.error(_LE("Deploy to address %s failed."), address)
            LOG.error(e)
    finally:
        logout_iscsi(address, port, iqn)
        delete_iscsi(address, port, iqn)
Example #7
0
File: vm.py Project: Juniper/nova
def delete_lpar(adapter, instance):
    """Delete an LPAR.

    :param adapter: The adapter for the pypowervm API.
    :param instance: The nova instance corresponding to the lpar to delete.
    """
    lpar_uuid = get_pvm_uuid(instance)
    # Attempt to delete the VM. To avoid failures due to open vterm, we will
    # attempt to close the vterm before issuing the delete.
    try:
        LOG.info('Deleting virtual machine.', instance=instance)
        # Ensure any vterms are closed.  Will no-op otherwise.
        vterm.close_vterm(adapter, lpar_uuid)
        # Run the LPAR delete
        resp = adapter.delete(pvm_lpar.LPAR.schema_type, root_id=lpar_uuid)
        LOG.info('Virtual machine delete status: %d', resp.status,
                 instance=instance)
        return resp
    except pvm_exc.HttpError as e:
        with excutils.save_and_reraise_exception(logger=LOG) as sare:
            if e.response and e.response.status == 404:
                # LPAR is already gone - don't fail
                sare.reraise = False
                LOG.info('Virtual Machine not found', instance=instance)
            else:
                LOG.error('HttpError deleting virtual machine.',
                          instance=instance)
    except pvm_exc.Error:
        with excutils.save_and_reraise_exception(logger=LOG):
            # Attempting to close vterm did not help so raise exception
            LOG.error('Virtual machine delete failed: LPARID=%s', lpar_uuid)
Example #8
0
    def _create_ha_network(self, context, tenant_id):
        admin_ctx = context.elevated()

        args = {'network':
                {'name': constants.HA_NETWORK_NAME % tenant_id,
                 'tenant_id': '',
                 'shared': False,
                 'admin_state_up': True,
                 'status': constants.NET_STATUS_ACTIVE}}
        self._add_ha_network_settings(args['network'])

        network = self._core_plugin.create_network(admin_ctx, args)
        try:
            ha_network = self._create_ha_network_tenant_binding(admin_ctx,
                                                                tenant_id,
                                                                network['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        try:
            self._create_ha_subnet(admin_ctx, network['id'], tenant_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network['id'])

        return ha_network
Example #9
0
    def create_router(self, host, username, password, rbridge_id, router_id):
        """create vrf and associate vrf."""
        router_id = router_id[0:11]
        vrf_name = template.OS_VRF_NAME.format(id=router_id)
        rd = router_id + ":" + router_id
        try:
            mgr = self.connect(host, username, password)
            self.create_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("NETCONF error"))
                self.close_session()
        try:
            # For Nos5.0.0
            self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
            self.configure_address_family_for_vrf(mgr, rbridge_id, vrf_name)
        except Exception:
            with excutils.save_and_reraise_exception() as ctxt:
                try:
                    # This is done because on 4.0.0 rd doesnt accept alpha
                    # character nor hyphen
                    rd = "".join(i for i in router_id if i in "0123456789")
                    rd = rd[:4] + ":" + rd[:4]
                    self.configure_rd_for_vrf(mgr, rbridge_id, vrf_name, rd)
                    self.configure_address_family_for_vrf_v1(mgr,
                                                             rbridge_id,
                                                             vrf_name)
                except Exception:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_LE("NETCONF error"))
                        self.close_session()

                ctxt.reraise = False
Example #10
0
def create_object_with_dependency(creator, dep_getter, dep_creator,
                                  dep_id_attr):
    """Creates an object that binds to a dependency while handling races.

    creator is a function that expected to take the result of either
    dep_getter or dep_creator.

    The result of dep_getter and dep_creator must have an attribute of
    dep_id_attr be used to determine if the dependency changed during object
    creation.

    dep_getter should return None if the dependency does not exist

    dep_creator can raise a DBDuplicateEntry to indicate that a concurrent
    create of the dependency occured and the process will restart to get the
    concurrently created one

    This function will return both the created object and the dependency it
    used/created.

    This function protects against all of the cases where the dependency can
    be concurrently removed by catching exceptions and restarting the
    process of creating the dependency if one no longer exists. It will
    give up after neutron.db.api.MAX_RETRIES and raise the exception it
    encounters after that.

    TODO(kevinbenton): currently this does not try to delete the dependency
    it created. This matches the semantics of the HA network logic it is used
    for but it should be modified to cleanup in the future.
    """
    result, dependency, dep_id = None, None, None
    for attempts in range(1, db_api.MAX_RETRIES + 1):
        # we go to max + 1 here so the exception handlers can raise their
        # errors at the end
        try:
            dependency = dep_getter() or dep_creator()
            dep_id = getattr(dependency, dep_id_attr)
        except db_exc.DBDuplicateEntry:
            # dependency was concurrently created.
            with excutils.save_and_reraise_exception() as ctx:
                if attempts < db_api.MAX_RETRIES:
                    # sleep for a random time between 0 and 1 second to
                    # make sure a concurrent worker doesn't retry again
                    # at exactly the same time
                    time.sleep(random.uniform(0, 1))
                    ctx.reraise = False
                    continue
        try:
            result = creator(dependency)
            break
        except Exception:
            with excutils.save_and_reraise_exception() as ctx:
                # check if dependency we tried to use was removed during
                # object creation
                if attempts < db_api.MAX_RETRIES:
                    dependency = dep_getter()
                    if not dependency or dep_id != getattr(dependency,
                                                           dep_id_attr):
                        ctx.reraise = False
    return result, dependency
    def get_nameserver_info(self, ssh_pool):
        """Get name server data from fabric.

        This method will return the connected node port wwn list(local
        and remote) for the given switch fabric

        :param ssh_pool: SSH connections for the current fabric
        """
        cli_output = None
        nsinfo_list = []
        try:
            cli_output = self._get_switch_data(ssh_pool,
                                               zone_constant.NS_SHOW)
        except exception.FCSanLookupServiceException:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed collecting nsshow info for fabric"))
        if cli_output:
            nsinfo_list = self._parse_ns_output(cli_output)
        try:
            cli_output = self._get_switch_data(ssh_pool,
                                               zone_constant.NS_CAM_SHOW)

        except exception.FCSanLookupServiceException:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed collecting nscamshow"))
        if cli_output:
            nsinfo_list.extend(self._parse_ns_output(cli_output))
        LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
        return nsinfo_list
Example #12
0
    def execute(self, file_path, **kwargs):

        target_format = CONF.image_conversion.output_format
        # TODO(jokke): Once we support other schemas we need to take them into
        # account and handle the paths here.
        src_path = file_path.split('file://')[-1]
        dest_path = "%(path)s.%(target)s" % {'path': src_path,
                                             'target': target_format}
        self.dest_path = dest_path

        try:
            stdout, stderr = putils.trycmd("qemu-img", "info",
                                           "--output=json",
                                           src_path,
                                           prlimit=utils.QEMU_IMG_PROC_LIMITS,
                                           log_errors=putils.LOG_ALL_ERRORS,)
        except OSError as exc:
            with excutils.save_and_reraise_exception():
                exc_message = encodeutils.exception_to_unicode(exc)
                msg = ("Failed to do introspection as part of image "
                       "conversion for %(iid)s: %(err)s")
                LOG.error(msg, {'iid': self.image_id, 'err': exc_message})

        if stderr:
            raise RuntimeError(stderr)

        metadata = json.loads(stdout)
        source_format = metadata.get('format')
        virtual_size = metadata.get('virtual-size', 0)
        image = self.image_repo.get(self.image_id)
        image.virtual_size = virtual_size

        if source_format == target_format:
            LOG.debug("Source is already in target format, "
                      "not doing conversion for %s", self.image_id)
            self.image_repo.save(image)
            return file_path

        try:
            stdout, stderr = putils.trycmd('qemu-img', 'convert',
                                           '-f', source_format,
                                           '-O', target_format,
                                           src_path, dest_path,
                                           log_errors=putils.LOG_ALL_ERRORS)
        except OSError as exc:
            with excutils.save_and_reraise_exception():
                exc_message = encodeutils.exception_to_unicode(exc)
                msg = "Failed to do image conversion for %(iid)s: %(err)s"
                LOG.error(msg, {'iid': self.image_id, 'err': exc_message})

        if stderr:
            raise RuntimeError(stderr)

        image.disk_format = target_format
        image.container_format = 'bare'
        self.image_repo.save(image)

        os.remove(src_path)

        return "file://%s" % dest_path
Example #13
0
def authorize(context, action, target, do_raise=True):
    """Verifies that the action is valid on the target in this context.

    :param context: instance of
        nova.api.openstack.placement.context.RequestContext
    :param action: string representing the action to be checked
        this should be colon separated for clarity, i.e.
        ``placement:resource_providers:list``
    :param target: dictionary representing the object of the action;
        for object creation this should be a dictionary representing the
        owner of the object e.g. ``{'project_id': context.project_id}``.
    :param do_raise: if True (the default), raises PolicyNotAuthorized;
        if False, returns False
    :raises nova.api.openstack.placement.exception.PolicyNotAuthorized: if
        verification fails and do_raise is True.
    :returns: non-False value (not necessarily "True") if authorized, and the
        exact value False if not authorized and do_raise is False.
    """
    init()
    credentials = context.to_policy_values()
    try:
        # NOTE(mriedem): The "action" kwarg is for the PolicyNotAuthorized exc.
        return _ENFORCER_PLACEMENT.authorize(
            action, target, credentials, do_raise=do_raise,
            exc=exception.PolicyNotAuthorized, action=action)
    except policy.PolicyNotRegistered:
        with excutils.save_and_reraise_exception():
            LOG.exception('Policy not registered')
    except Exception:
        with excutils.save_and_reraise_exception():
            LOG.debug('Policy check for %(action)s failed with credentials '
                      '%(credentials)s',
                      {'action': action, 'credentials': credentials})
Example #14
0
    def _action_recorder(self, action, expected_exceptions=tuple()):
        '''Return a context manager to record the progress of an action.

        Upon entering the context manager, the state is set to IN_PROGRESS.
        Upon exiting, the state will be set to COMPLETE if no exception was
        raised, or FAILED otherwise. Non-exit exceptions will be translated
        to ResourceFailure exceptions.

        Expected exceptions are re-raised, with the Resource left in the
        IN_PROGRESS state.
        '''
        try:
            self.state_set(action, self.IN_PROGRESS)
            yield
        except expected_exceptions as ex:
            with excutils.save_and_reraise_exception():
                LOG.debug('%s', six.text_type(ex))
        except Exception as ex:
            LOG.info('%(action)s: %(info)s', {"action": action,
                                              "info": six.text_type(self)},
                     exc_info=True)
            failure = exception.ResourceFailure(ex, self, action)
            self.state_set(action, self.FAILED, six.text_type(failure))
            raise failure
        except:  # noqa
            with excutils.save_and_reraise_exception():
                try:
                    self.state_set(action, self.FAILED, '%s aborted' % action)
                except Exception:
                    LOG.exception(_LE('Error marking resource as failed'))
        else:
            self.state_set(action, self.COMPLETE)
Example #15
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group',
                security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") %
                       {'name': security_group_name,
                        'project': context.project_id})
                self.raise_not_found(msg)
            else:
                LOG.exception(_LE("Neutron Error:"))
                raise exc_info[0], exc_info[1], exc_info[2]
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE("Neutron Error:"))

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                   " any ports") % instance.uuid)
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the nova api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(_LI("Adding security group %(security_group_id)s to "
                             "port %(port_id)s"),
                         {'security_group_id': security_group_id,
                          'port_id': port['id']})
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception(_LE("Neutron Error:"))
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") %
                   {'security_group_name': security_group_name,
                    'instance': instance.uuid})
            self.raise_not_found(msg)
Example #16
0
    def container_start(self, context, instance, image_meta, injected_files,
                        admin_password, network_info=None, block_device_info=None,
                        flavor=None):
        LOG.info(_LI('Spawning new instance'), instance=instance)
        if self.client.container_defined(instance.uuid):
            raise exception.InstanceExists(name=instance.uuid)

        try:
            LOG.debug('Fetching image from Glance.')
            self.image.fetch_image(context, instance, image_meta)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to create image for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Setting up container profiles')
            self.setup_container(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Setup Networking')
            self._start_network(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        try:
            LOG.debug('Start container')
            self._start_container(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to setup container for: %(instance)s'),
                          {'instance': instance.uuid})
                self.container_destroy(context, instance, network_info,
                                       block_device_info,
                                       destroy_disks=None, migrate_data=None)

        def _wait_for_boot():
            state = self.container_info(instance)
            if state == power_state.RUNNING:
                LOG.info(_LI("Instance spawned successfully."),
                         instance=instance)
                raise loopingcall.LoopingCallDone()

        timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot)
        timer.start(interval=0.6).wait()
Example #17
0
    def driver_detach(self, context, instance, volume_api, virt_driver):
        connection_info = self['connection_info']
        mp = self['mount_device']
        volume_id = self.volume_id

        LOG.info('Attempting to driver detach volume %(volume_id)s from '
                 'mountpoint %(mp)s', {'volume_id': volume_id, 'mp': mp},
                 instance=instance)
        try:
            if not virt_driver.instance_exists(instance):
                LOG.warning('Detaching volume from unknown instance',
                            instance=instance)

            encryption = encryptors.get_encryption_metadata(context,
                    volume_api, volume_id, connection_info)
            virt_driver.detach_volume(context, connection_info, instance, mp,
                                      encryption=encryption)
        except exception.DiskNotFound as err:
            LOG.warning('Ignoring DiskNotFound exception while '
                        'detaching volume %(volume_id)s from '
                        '%(mp)s : %(err)s',
                        {'volume_id': volume_id, 'mp': mp,
                         'err': err}, instance=instance)
        except exception.DeviceDetachFailed as err:
            with excutils.save_and_reraise_exception():
                LOG.warning('Guest refused to detach volume %(vol)s',
                            {'vol': volume_id}, instance=instance)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception('Failed to detach volume '
                              '%(volume_id)s from %(mp)s',
                              {'volume_id': volume_id, 'mp': mp},
                              instance=instance)
                volume_api.roll_detaching(context, volume_id)
Example #18
0
File: api.py Project: apporc/cinder
    def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot):
        try:
            snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                context, cgsnapshot.id)

            if not snapshots:
                msg = _("Cgsnahost is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for snapshot in snapshots:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['cgsnapshot'] = cgsnapshot
                kwargs['consistencygroup'] = group
                kwargs['snapshot'] = snapshot
                volume_type_id = snapshot.volume_type_id
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since cgsnapshot is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context,
                                           snapshot.volume_size,
                                           None,
                                           None,
                                           **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Error occurred when creating volume "
                                      "entry from snapshot in the process of "
                                      "creating consistency group %(group)s "
                                      "from cgsnapshot %(cgsnap)s."),
                                  {'group': group.id,
                                   'cgsnap': cgsnapshot.id})
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(_LE("Error occurred when creating consistency "
                                  "group %(group)s from cgsnapshot "
                                  "%(cgsnap)s."),
                              {'group': group.id,
                               'cgsnap': cgsnapshot.id})

        volumes = self.db.volume_get_all_by_group(context,
                                                  group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'],
                                  {'host': group.get('host')})

        self.volume_rpcapi.create_consistencygroup_from_src(
            context, group, cgsnapshot)
Example #19
0
File: api.py Project: apporc/cinder
    def _create_cg_from_source_cg(self, context, group, source_cg):
        try:
            source_vols = self.db.volume_get_all_by_group(context,
                                                          source_cg.id)

            if not source_vols:
                msg = _("Source CG is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for source_vol in source_vols:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['source_cg'] = source_cg
                kwargs['consistencygroup'] = group
                kwargs['source_volume'] = source_vol
                volume_type_id = source_vol.get('volume_type_id')
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since source_cg is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context,
                                           source_vol['size'],
                                           None,
                                           None,
                                           **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Error occurred when creating cloned "
                                      "volume in the process of creating "
                                      "consistency group %(group)s from "
                                      "source CG %(source_cg)s."),
                                  {'group': group.id,
                                   'source_cg': source_cg.id})
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(_LE("Error occurred when creating consistency "
                                  "group %(group)s from source CG "
                                  "%(source_cg)s."),
                              {'group': group.id,
                               'source_cg': source_cg.id})

        volumes = self.db.volume_get_all_by_group(context,
                                                  group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'],
                                  {'host': group.host})

        self.volume_rpcapi.create_consistencygroup_from_src(context, group,
                                                            None, source_cg)
Example #20
0
 def _ensure_connected(self):
     if self._socket is None:
         s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         s.setblocking(1)
         try:
             s.connect(("", self.port))
         except socket.error as e:
             with excutils.save_and_reraise_exception():
                 s.close()
                 if e.errno in (errno.ECONNREFUSED, errno.ENOTCONN,
                                errno.ECONNRESET):
                     # Don't bother with further connections...
                     self.dead = True
         read_pipe = s.makefile("rb", 0)
         write_pipe = s.makefile("wb", 0)
         try:
             msg = self._do_recv(read_pipe=read_pipe)
             su.schema_validate(msg, SCHEMAS[CHALLENGE])
             if msg != CHALLENGE:
                 raise IOError("Challenge expected not received")
             else:
                 pieces = _encode_message(self.auth_key,
                                          CHALLENGE_RESPONSE,
                                          self.identity)
                 self._do_send_and_ack(pieces, write_pipe=write_pipe,
                                       read_pipe=read_pipe)
         except Exception:
             with excutils.save_and_reraise_exception():
                 s.close()
         else:
             self._socket = s
             self._read_pipe = read_pipe
             self._write_pipe = write_pipe
Example #21
0
    def create_volume_from_snapshot(self, volume, snapshot):
        try:
            ds_snapshot_uuid = (self._get_metadata_value
                                (snapshot, self.METADATA_DS_SNAPSHOT_UUID))

            out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
                                   'clone_snapshot',
                                   1,
                                   src_lun_uuid=snapshot['volume']['name'],
                                   snapshot_uuid=ds_snapshot_uuid,
                                   cloned_lun_name=volume['name'],
                                   clone_type='CINDER')

            self.check_response(out)

        except exception.SnapshotMetadataNotFound:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to get snapshot UUID. [%s]'),
                              snapshot['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to create_volume_from_snapshot. '
                                  '[%s]'),
                              snapshot['id'])

        if not self._check_lun_status_normal(volume['name']):
            message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status '
                         'is not healthy.') %
                       {'vol': snapshot['volume']['name'],
                        'snapshot': ds_snapshot_uuid})
            raise exception.VolumeDriverException(message=message)

        if snapshot['volume_size'] < volume['size']:
            self.extend_volume(volume, volume['size'])
Example #22
0
    def consume_in_thread(self):
        """Runs the ZmqProxy service."""
        ipc_dir = CONF.rpc_zmq_ipc_dir
        consume_in = "tcp://%s:%s" % \
            (CONF.rpc_zmq_bind_address,
             CONF.rpc_zmq_port)
        consumption_proxy = InternalContext(None)

        try:
            os.makedirs(ipc_dir)
        except os.error:
            if not os.path.isdir(ipc_dir):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Required IPC directory does not exist at"
                                " %s"), ipc_dir)
        try:
            self.register(consumption_proxy,
                          consume_in,
                          zmq.PULL)
        except zmq.ZMQError:
            if os.access(ipc_dir, os.X_OK):
                with excutils.save_and_reraise_exception():
                    LOG.error(_("Permission denied to IPC directory at"
                                " %s"), ipc_dir)
            with excutils.save_and_reraise_exception():
                LOG.error(_("Could not create ZeroMQ receiver daemon. "
                            "Socket may already be in use."))

        super(ZmqProxy, self).consume_in_thread()
Example #23
0
def update_image(image_path, volume_id, hs_img_id):
    cmd_out = None
    cmd_err = None
    output = None
    try:
        cmd_arg = {}
        cmd_arg['operation'] = 'update_image'
        cmd_arg['image_path'] = image_path
        cmd_arg['volume_id'] = volume_id
        cmd_arg['hs_image_id'] = hs_img_id
        # create a json for cmd argument
        cmdarg_json = json.dumps(cmd_arg)

        (cmd_out, cmd_err) = hscli.hsexecute(cmdarg_json)

        # cmd_err should be None in case of successful execution of cmd
        if not cmd_err:
            output = process_cmd_out(cmd_out)
        else:
            LOG.error("Error %s in execution of update_image",
                      cmd_err)
            raise exception.UnableToExecuteHyperScaleCmd(
                command=cmdarg_json)
    except exception.UnableToExecuteHyperScaleCmd:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to execute update_image", exc_info=True)

    except exception.UnableToProcessHyperScaleCmdOutput:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to process update_image output",
                      exc_info=True)
    return output
Example #24
0
    def _create_ha_network(self, context, tenant_id):
        admin_ctx = context.elevated()

        args = {
            "network": {
                "name": constants.HA_NETWORK_NAME % tenant_id,
                "tenant_id": "",
                "shared": False,
                "admin_state_up": True,
            }
        }
        self._add_ha_network_settings(args["network"])
        network = p_utils.create_network(self._core_plugin, admin_ctx, args)

        try:
            ha_network = self._create_ha_network_tenant_binding(admin_ctx, tenant_id, network["id"])
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network["id"])

        try:
            self._create_ha_subnet(admin_ctx, network["id"], tenant_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._core_plugin.delete_network(admin_ctx, network["id"])

        return ha_network
Example #25
0
def episodic_snap(meta):

    cmd_out = None
    cmd_err = None
    out_meta = None
    try:
        cmd_arg = {}
        cmd_arg['operation'] = 'episodic_snap'
        cmd_arg['metadata'] = meta
        # create a json for cmd argument
        cmdarg_json = json.dumps(cmd_arg)

        # call hscli for episodic_snap
        (cmd_out, cmd_err) = hscli.hsexecute(cmdarg_json)

        # cmd_err should be None in case of successful execution of cmd
        if not cmd_err:
            processed_output = process_cmd_out(cmd_out)
            out_meta = processed_output.get('payload')
        else:
            LOG.error("Error %s in processing episodic_snap",
                      cmd_err)
            raise exception.UnableToExecuteHyperScaleCmd(
                command=cmdarg_json)
    except exception.UnableToExecuteHyperScaleCmd:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to execute episodic_snap", exc_info=True)

    except exception.UnableToProcessHyperScaleCmdOutput:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to process episodic_snap output",
                      exc_info=True)
    return out_meta
    def update_vip(self, context, old_vip, vip, pool_mapping, vip_mapping):
        LOG.debug('Updating VIP %s to %s', (old_vip, vip))

        edge_id = vip_mapping['edge_id']
        edge_vip_id = vip_mapping['edge_vse_id']
        app_profile_id = vip_mapping['edge_app_profile_id']
        app_profile = convert_lbaas_app_profile(
            vip['name'], vip.get('session_persistence') or {},
            vip.get('protocol'))
        try:
            self.vcns.update_app_profile(edge_id, app_profile_id, app_profile)
        except nsxv_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                self._lb_driver.vip_failed(context, vip)
                LOG.error(_LE('Failed to update app profile on edge: %s'),
                          edge_id)

        edge_vip = convert_lbaas_vip(vip, app_profile_id, pool_mapping)
        try:
            self.vcns.update_vip(edge_id, edge_vip_id, edge_vip)
            self._lb_driver.vip_successful(context, vip)
        except nsxv_exc.VcnsApiException:
            with excutils.save_and_reraise_exception():
                self._lb_driver.vip_failed(context, vip)
                LOG.error(_LE('Failed to update vip on edge: %s'), edge_id)
    def delete_vip(self, context, vip, vip_mapping):
        LOG.debug('Deleting VIP %s', vip)

        if not vip_mapping:
            LOG.error(_LE('No mapping found for vip %s'), vip['id'])
        else:
            edge_id = vip_mapping['edge_id']
            edge_vse_id = vip_mapping['edge_vse_id']
            app_profile_id = vip_mapping['edge_app_profile_id']

            try:
                self.vcns.delete_vip(edge_id, edge_vse_id)
                self._del_vip_as_secondary_ip(edge_id, vip['address'])
                self._del_vip_fw_rule(edge_id, vip_mapping['edge_fw_rule_id'])
            except nsxv_exc.ResourceNotFound:
                LOG.error(_LE('vip not found on edge: %s'), edge_id)
            except nsxv_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    self._lb_driver.vip_failed(context, vip)
                    LOG.error(
                        _LE('Failed to delete vip on edge: %s'), edge_id)

            try:
                self.vcns.delete_app_profile(edge_id, app_profile_id)
            except nsxv_exc.ResourceNotFound:
                LOG.error(_LE('app profile not found on edge: %s'), edge_id)
            except nsxv_exc.VcnsApiException:
                with excutils.save_and_reraise_exception():
                    self._lb_driver.vip_failed(context, vip)
                    LOG.error(
                        _LE('Failed to delete app profile on Edge: %s'),
                        edge_id)

        self._lb_driver.delete_vip_successful(context, vip)
Example #28
0
def get_datanode_id():

    dnid = None
    cmd_out = None
    cmd_err = None
    try:
        cmd_arg = {'operation': 'get_datanode_id'}
        # create a json for cmd argument
        cmdarg_json = json.dumps(cmd_arg)

        # call hscli for get_datanode_id
        (cmd_out, cmd_err) = hscli.hsexecute(cmdarg_json)

        # cmd_err should be None in case of successful execution of cmd
        if not cmd_err:
            processed_output = process_cmd_out(cmd_out)
            dnid = processed_output.get('payload')
        else:
            LOG.error("Error %s in getting datanode hypervisor id",
                      cmd_err)
            raise exception.UnableToExecuteHyperScaleCmd(
                command=cmdarg_json)
    except exception.UnableToExecuteHyperScaleCmd:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to execute get_datanode_id", exc_info=True)

    except exception.UnableToProcessHyperScaleCmdOutput:
        with excutils.save_and_reraise_exception():
            LOG.debug("Unable to process get_datanode_id output",
                      exc_info=True)
    return dnid
def _execute_nm_command(task, data, command_func, parse_func=None):
    """Execute Intel Node Manager command via send_raw().

    :param task: a TaskManager instance.
    :param data: a dict with data passed to vendor's method.
    :param command_func: a function that returns raw command bytes.
    :param parse_func: a function that parses returned raw bytes.
    :raises: IPMIFailure if Intel Node Manager is not detected on a node or if
             an error happens during command execution.
    :returns: a dict with parsed output or None if command does not return
              user's info.
    """
    try:
        channel, address = _get_nm_address(task)
    except exception.IPMIFailure as e:
        with excutils.save_and_reraise_exception():
            LOG.exception(_LE('Can not obtain Intel Node Manager address for '
                              'node %(node)s: %(err)s'),
                          {'node': task.node.uuid, 'err': six.text_type(e)})
    driver_info = task.node.driver_info
    driver_info['ipmi_bridging'] = 'single'
    driver_info['ipmi_target_channel'] = channel
    driver_info['ipmi_target_address'] = address
    cmd = _command_to_string(command_func(data))
    out = ipmi.send_raw(task, cmd)[0]
    if parse_func:
        try:
            return parse_func(out.split())
        except exception.IPMIFailure as e:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Error in returned data for node %(node)s: '
                                  '%(err)s'), {'node': task.node.uuid,
                                               'err': six.text_type(e)})
Example #30
0
    def _create_ismview_dir(self,
                            ismview_dir,
                            diskarray_name,
                            driver_name,
                            host):
        """Create ismview directory."""
        filename = diskarray_name
        if filename == '':
            filename = driver_name + '_' + host

        ismview_path = os.path.join(ismview_dir, filename)
        LOG.debug('ismview_path=%s.', ismview_path)
        try:
            if os.path.exists(ismview_path):
                os.remove(ismview_path)
        except OSError as e:
            with excutils.save_and_reraise_exception() as ctxt:
                if e.errno == errno.ENOENT:
                    ctxt.reraise = False

        try:
            os.makedirs(ismview_dir)
        except OSError as e:
            with excutils.save_and_reraise_exception() as ctxt:
                if e.errno == errno.EEXIST:
                    ctxt.reraise = False

        return ismview_path
Example #31
0
    def initialize_connection(self, volume, connector):
        # Initialize_connection will find or create a server identified by the
        # connector on the Dell backend.  It will then map the volume to it
        # and return the properties as follows..
        # {'driver_volume_type': 'iscsi',
        #  data = {'target_discovered': False,
        #          'target_iqn': preferred iqn,
        #           'target_iqns': all iqns,
        #           'target_portal': preferred portal,
        #           'target_portals': all portals,
        #           'target_lun': preferred lun,
        #           'target_luns': all luns,
        #         }

        # We use id to name the volume name as it is a
        # known unique name.
        volume_name = volume.get('id')
        provider_id = volume.get('provider_id')
        islivevol = self._is_live_vol(volume)
        initiator_name = connector.get('initiator')
        multipath = connector.get('multipath', False)
        LOG.info(
            'initialize_ connection: %(vol)s:%(pid)s:'
            '%(intr)s. Multipath is %(mp)r', {
                'vol': volume_name,
                'pid': provider_id,
                'intr': initiator_name,
                'mp': multipath
            })

        with self._client.open_connection() as api:
            try:
                # Find the volume on the storage center. Note that if this
                # is live volume and we are swapped this will be the back
                # half of the live volume.
                scvolume = api.find_volume(volume_name, provider_id, islivevol)
                if scvolume:
                    # Get the SSN it is on.
                    ssn = scvolume['instanceId'].split('.')[0]
                    # Find our server.
                    scserver = api.find_server(initiator_name, ssn)
                    # No? Create it.
                    if scserver is None:
                        scserver = api.create_server(
                            [initiator_name],
                            self.configuration.dell_server_os, ssn)

                    # if we have a server and a volume lets bring them
                    # together.
                    if scserver is not None:
                        mapping = api.map_volume(scvolume, scserver)
                        if mapping is not None:
                            # Since we just mapped our volume we had best
                            # update our sc volume object.
                            scvolume = api.get_volume(scvolume['instanceId'])
                            # Our return.
                            iscsiprops = {}

                            # Three cases that should all be satisfied with the
                            # same return of Target_Portal and Target_Portals.
                            # 1. Nova is calling us so we need to return the
                            #    Target_Portal stuff.  It should ignore the
                            #    Target_Portals stuff.
                            # 2. OS brick is calling us in multipath mode so we
                            #    want to return Target_Portals.  It will ignore
                            #    the Target_Portal stuff.
                            # 3. OS brick is calling us in single path mode so
                            #    we want to return Target_Portal and
                            #    Target_Portals as alternates.
                            iscsiprops = api.find_iscsi_properties(scvolume)

                            # If this is a live volume we need to map up our
                            # secondary volume. Note that if we have failed
                            # over we do not wish to do this.
                            if islivevol:
                                sclivevolume = api.get_live_volume(provider_id)
                                # Only map if we are not failed over.
                                if (sclivevolume and not api.is_failed_over(
                                        provider_id, sclivevolume)):
                                    secondaryprops = self.initialize_secondary(
                                        api, sclivevolume, initiator_name)
                                    # Combine with iscsiprops
                                    iscsiprops['target_iqns'] += (
                                        secondaryprops['target_iqns'])
                                    iscsiprops['target_portals'] += (
                                        secondaryprops['target_portals'])
                                    iscsiprops['target_luns'] += (
                                        secondaryprops['target_luns'])

                            # Return our iscsi properties.
                            iscsiprops['discard'] = True
                            return {
                                'driver_volume_type': 'iscsi',
                                'data': iscsiprops
                            }
            # Re-raise any backend exception.
            except exception.VolumeBackendAPIException:
                with excutils.save_and_reraise_exception():
                    LOG.error('Failed to initialize connection')
            # If there is a data structure issue then detail the exception
            # and bail with a Backend Exception.
            except Exception as error:
                LOG.error(error)
                raise exception.VolumeBackendAPIException(error)

        # We get here because our mapping is none or we have no valid iqn to
        # return so blow up.
        raise exception.VolumeBackendAPIException(_('Unable to map volume'))
    def _get_node_data(self):
        """Get and verify node configuration."""

        # Get storage system name and id
        ssh_cmd = ['svcinfo', 'lssystem', '-delim', '!']
        attributes = self._execute_command_and_parse_attributes(ssh_cmd)
        if not attributes or not ('name' in attributes):
            msg = _('Could not get system name.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)
        self._system_name = attributes['name']
        self._system_id = attributes['id']

        # Validate value of open_access_enabled flag, for now only
        # support when open_access_enabled is off
        if not attributes or not ('open_access_enabled' in attributes) or (
                attributes['open_access_enabled'] != 'off'):
            msg = _('open_access_enabled is not off.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # Validate that the array exists
        pool = FLASHSYSTEM_VOLPOOL_NAME
        ssh_cmd = ['svcinfo', 'lsmdiskgrp', '-bytes', '-delim', '!', pool]
        attributes = self._execute_command_and_parse_attributes(ssh_cmd)
        if not attributes:
            msg = _('Unable to parse attributes.')
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)
        if not ('status' in attributes) or (attributes['status'] == 'offline'):
            msg = (_('Array does not exist or is offline. '
                     'Current status of array is %s.') % attributes['status'])
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)

        # Get the iSCSI names of the FlashSystem nodes
        ssh_cmd = ['svcinfo', 'lsnode', '-delim', '!']
        out, err = self._ssh(ssh_cmd)
        self._assert_ssh_return(out.strip(), '_get_config_data', ssh_cmd, out,
                                err)

        nodes = out.strip().splitlines()
        self._assert_ssh_return(nodes, '_get_node_data', ssh_cmd, out, err)
        header = nodes.pop(0)
        for node_line in nodes:
            try:
                node_data = self._get_hdr_dic(header, node_line, '!')
            except exception.VolumeBackendAPIException:
                with excutils.save_and_reraise_exception():
                    self._log_cli_output_error('_get_node_data', ssh_cmd, out,
                                               err)
            try:
                node = {
                    'id': node_data['id'],
                    'name': node_data['name'],
                    'IO_group': node_data['IO_group_id'],
                    'WWNN': node_data['WWNN'],
                    'status': node_data['status'],
                    'WWPN': [],
                    'protocol': None,
                    'iscsi_name': node_data['iscsi_name'],
                    'config_node': node_data['config_node'],
                    'ipv4': [],
                    'ipv6': [],
                }
                if node['status'] == 'online':
                    self._storage_nodes[node['id']] = node
            except KeyError:
                self._handle_keyerror('lsnode', header)
    def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name,
                         dest_vdisk_id):
        """Copy data from src vdisk to dest vdisk.

        To be able to copy data between vdisks, we must ensure that both
        vdisks have been mapped to host. If vdisk has not been mapped,
        it must be mapped firstly. When data copy completed, vdisk
        should be restored to previous mapped or non-mapped status.
        """

        LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', {
            'src': src_vdisk_name,
            'dest': dest_vdisk_name
        })

        connector = utils.brick_get_connector_properties()
        (src_map, src_lun_id) = self._is_vdisk_map(src_vdisk_name, connector)
        (dest_map, dest_lun_id) = self._is_vdisk_map(dest_vdisk_name,
                                                     connector)

        src_map_device = None
        src_properties = None
        dest_map_device = None
        dest_properties = None

        try:
            if not src_map:
                src_lun_id = self._map_vdisk_to_host(src_vdisk_name, connector)
            if not dest_map:
                dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name,
                                                      connector)
            src_properties = self._get_vdisk_map_properties(
                connector, src_lun_id, src_vdisk_name, src_vdisk_id,
                self._get_vdisk_params(None))
            src_map_device = self._scan_device(src_properties)

            dest_properties = self._get_vdisk_map_properties(
                connector, dest_lun_id, dest_vdisk_name, dest_vdisk_id,
                self._get_vdisk_params(None))
            dest_map_device = self._scan_device(dest_properties)

            src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)

            # vdisk capacity is bytes, translate into MB
            size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi
            volume_utils.copy_volume(src_map_device['path'],
                                     dest_map_device['path'], size_in_mb,
                                     self.configuration.volume_dd_blocksize)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to copy %(src)s to %(dest)s.'), {
                    'src': src_vdisk_name,
                    'dest': dest_vdisk_name
                })
        finally:
            if not dest_map:
                self._unmap_vdisk_from_host(dest_vdisk_name, connector)
                self._remove_device(dest_properties, dest_map_device)
            if not src_map:
                self._unmap_vdisk_from_host(src_vdisk_name, connector)
                self._remove_device(src_properties, src_map_device)

        LOG.debug('leave: _copy_vdisk_data: %(src)s -> %(dest)s.', {
            'src': src_vdisk_name,
            'dest': dest_vdisk_name
        })
Example #34
0
def create_test_server(clients,
                       validatable=False,
                       validation_resources=None,
                       tenant_network=None,
                       wait_until=None,
                       volume_backed=False,
                       name=None,
                       flavor=None,
                       image_id=None,
                       **kwargs):
    """Common wrapper utility returning a test server.

    This method is a common wrapper returning a test server that can be
    pingable or sshable.

    :param clients: Client manager which provides OpenStack Tempest clients.
    :param validatable: Whether the server will be pingable or sshable.
    :param validation_resources: Resources created for the connection to the
        server. Include a keypair, a security group and an IP.
    :param tenant_network: Tenant network to be used for creating a server.
    :param wait_until: Server status to wait for the server to reach after
        its creation.
    :param volume_backed: Whether the server is volume backed or not.
                          If this is true, a volume will be created and
                          create server will be requested with
                          'block_device_mapping_v2' populated with below
                          values:
                          --------------------------------------------
                          bd_map_v2 = [{
                              'uuid': volume['volume']['id'],
                              'source_type': 'volume',
                              'destination_type': 'volume',
                              'boot_index': 0,
                              'delete_on_termination': True}]
                          kwargs['block_device_mapping_v2'] = bd_map_v2
                          ---------------------------------------------
                          If server needs to be booted from volume with other
                          combination of bdm inputs than mentioned above, then
                          pass the bdm inputs explicitly as kwargs and image_id
                          as empty string ('').
    :param name: Name of the server to be provisioned. If not defined a random
        string ending with '-instance' will be generated.
    :param flavor: Flavor of the server to be provisioned. If not defined,
        CONF.compute.flavor_ref will be used instead.
    :param image_id: ID of the image to be used to provision the server. If not
        defined, CONF.compute.image_ref will be used instead.
    :returns: a tuple
    """

    # TODO(jlanoux) add support of wait_until PINGABLE/SSHABLE

    if name is None:
        name = data_utils.rand_name(__name__ + "-instance")
    if flavor is None:
        flavor = CONF.compute.flavor_ref
    if image_id is None:
        image_id = CONF.compute.image_ref

    kwargs = fixed_network.set_networks_kwarg(tenant_network, kwargs) or {}

    multiple_create_request = (max(kwargs.get('min_count', 0),
                                   kwargs.get('max_count', 0)) > 1)

    if CONF.validation.run_validation and validatable:
        # As a first implementation, multiple pingable or sshable servers will
        # not be supported
        if multiple_create_request:
            msg = ("Multiple pingable or sshable servers not supported at "
                   "this stage.")
            raise ValueError(msg)

        if 'security_groups' in kwargs:
            kwargs['security_groups'].append(
                {'name': validation_resources['security_group']['name']})
        else:
            try:
                kwargs['security_groups'] = [{
                    'name':
                    validation_resources['security_group']['name']
                }]
            except KeyError:
                LOG.debug("No security group provided.")

        if 'key_name' not in kwargs:
            try:
                kwargs['key_name'] = validation_resources['keypair']['name']
            except KeyError:
                LOG.debug("No key provided.")

        if CONF.validation.connect_method == 'floating':
            if wait_until is None:
                wait_until = 'ACTIVE'

        if 'user_data' not in kwargs:
            # If nothing overrides the default user data script then run
            # a simple script on the host to print networking info. This is
            # to aid in debugging ssh failures.
            script = '''
                     #!/bin/sh
                     echo "Printing {user} user authorized keys"
                     cat ~{user}/.ssh/authorized_keys || true
                     '''.format(user=CONF.validation.image_ssh_user)
            script_clean = textwrap.dedent(script).lstrip().encode('utf8')
            script_b64 = base64.b64encode(script_clean)
            kwargs['user_data'] = script_b64

    if volume_backed:
        volume_name = data_utils.rand_name(__name__ + '-volume')
        volumes_client = clients.volumes_v2_client
        params = {
            'name': volume_name,
            'imageRef': image_id,
            'size': CONF.volume.volume_size
        }
        volume = volumes_client.create_volume(**params)
        waiters.wait_for_volume_resource_status(volumes_client,
                                                volume['volume']['id'],
                                                'available')

        bd_map_v2 = [{
            'uuid': volume['volume']['id'],
            'source_type': 'volume',
            'destination_type': 'volume',
            'boot_index': 0,
            'delete_on_termination': True
        }]
        kwargs['block_device_mapping_v2'] = bd_map_v2

        # Since this is boot from volume an image does not need
        # to be specified.
        image_id = ''

    body = clients.servers_client.create_server(name=name,
                                                imageRef=image_id,
                                                flavorRef=flavor,
                                                **kwargs)

    # handle the case of multiple servers
    if multiple_create_request:
        # Get servers created which name match with name param.
        body_servers = clients.servers_client.list_servers()
        servers = \
            [s for s in body_servers['servers'] if s['name'].startswith(name)]
    else:
        body = rest_client.ResponseBody(body.response, body['server'])
        servers = [body]

    # The name of the method to associate a floating IP to as server is too
    # long for PEP8 compliance so:
    assoc = clients.compute_floating_ips_client.associate_floating_ip_to_server

    if wait_until:
        for server in servers:
            try:
                waiters.wait_for_server_status(clients.servers_client,
                                               server['id'], wait_until)

                # Multiple validatable servers are not supported for now. Their
                # creation will fail with the condition above (l.58).
                if CONF.validation.run_validation and validatable:
                    if CONF.validation.connect_method == 'floating':
                        assoc(floating_ip=validation_resources['floating_ip']
                              ['ip'],
                              server_id=servers[0]['id'])

            except Exception:
                with excutils.save_and_reraise_exception():
                    for server in servers:
                        try:
                            clients.servers_client.delete_server(server['id'])
                        except Exception:
                            LOG.exception('Deleting server %s failed',
                                          server['id'])
                    for server in servers:
                        # NOTE(artom) If the servers were booted with volumes
                        # and with delete_on_termination=False we need to wait
                        # for the servers to go away before proceeding with
                        # cleanup, otherwise we'll attempt to delete the
                        # volumes while they're still attached to servers that
                        # are in the process of being deleted.
                        try:
                            waiters.wait_for_server_termination(
                                clients.servers_client, server['id'])
                        except Exception:
                            LOG.exception('Server %s failed to delete in time',
                                          server['id'])

    return body, servers
Example #35
0
def create_object_with_dependency(creator, dep_getter, dep_creator,
                                  dep_id_attr, dep_deleter):
    """Creates an object that binds to a dependency while handling races.

    creator is a function that expected to take the result of either
    dep_getter or dep_creator.

    The result of dep_getter and dep_creator must have an attribute of
    dep_id_attr be used to determine if the dependency changed during object
    creation.

    dep_deleter will be called with a the result of dep_creator if the creator
    function fails due to a non-dependency reason or the retries are exceeded.

    dep_getter should return None if the dependency does not exist.

    dep_creator can raise a DBDuplicateEntry to indicate that a concurrent
    create of the dependency occurred and the process will restart to get the
    concurrently created one.

    This function will return both the created object and the dependency it
    used/created.

    This function protects against all of the cases where the dependency can
    be concurrently removed by catching exceptions and restarting the
    process of creating the dependency if one no longer exists. It will
    give up after neutron.db.api.MAX_RETRIES and raise the exception it
    encounters after that.
    """
    result, dependency, dep_id, made_locally = None, None, None, False
    for attempts in range(1, db_api.MAX_RETRIES + 1):
        # we go to max + 1 here so the exception handlers can raise their
        # errors at the end
        try:
            dependency = dep_getter()
            if not dependency:
                dependency = dep_creator()
                made_locally = True
            dep_id = getattr(dependency, dep_id_attr)
        except db_exc.DBDuplicateEntry:
            # dependency was concurrently created.
            with excutils.save_and_reraise_exception() as ctx:
                if attempts < db_api.MAX_RETRIES:
                    # sleep for a random time between 0 and 1 second to
                    # make sure a concurrent worker doesn't retry again
                    # at exactly the same time
                    time.sleep(random.uniform(0, 1))
                    ctx.reraise = False
                    continue
        try:
            result = creator(dependency)
            break
        except Exception:
            with excutils.save_and_reraise_exception() as ctx:
                # check if dependency we tried to use was removed during
                # object creation
                if attempts < db_api.MAX_RETRIES:
                    dependency = dep_getter()
                    if not dependency or dep_id != getattr(dependency,
                                                           dep_id_attr):
                        ctx.reraise = False
                        continue
                # we have exceeded retries or have encountered a non-dependency
                # related failure so we try to clean up the dependency if we
                # created it before re-raising
                if made_locally and dependency:
                    try:
                        dep_deleter(dependency)
                    except Exception:
                        LOG.exception(_LE("Failed cleaning up dependency %s"),
                                      dep_id)
    return result, dependency
Example #36
0
 def call(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except Exception as e:
         with excutils.save_and_reraise_exception():
             self.logger(e)
Example #37
0
    def upload(self, req, image_id, data, size):
        image_repo = self.gateway.get_repo(req.context)
        image = None
        refresher = None
        cxt = req.context
        try:
            image = image_repo.get(image_id)
            image.status = 'saving'
            try:
                if CONF.data_api == 'glance.db.registry.api':
                    # create a trust if backend is registry
                    try:
                        # request user plugin for current token
                        user_plugin = req.environ.get('keystone.token_auth')
                        roles = []
                        # use roles from request environment because they
                        # are not transformed to lower-case unlike cxt.roles
                        for role_info in req.environ.get(
                                'keystone.token_info')['token']['roles']:
                            roles.append(role_info['name'])
                        refresher = trust_auth.TokenRefresher(
                            user_plugin, cxt.tenant, roles)
                    except Exception as e:
                        LOG.info(
                            _LI("Unable to create trust: %s "
                                "Use the existing user token."),
                            encodeutils.exception_to_unicode(e))

                image_repo.save(image, from_state='queued')
                image.set_data(data, size)

                try:
                    image_repo.save(image, from_state='saving')
                except exception.NotAuthenticated:
                    if refresher is not None:
                        # request a new token to update an image in database
                        cxt.auth_token = refresher.refresh_token()
                        image_repo = self.gateway.get_repo(req.context)
                        image_repo.save(image, from_state='saving')
                    else:
                        raise

                try:
                    # release resources required for re-auth
                    if refresher is not None:
                        refresher.release_resources()
                except Exception as e:
                    LOG.info(
                        _LI("Unable to delete trust %(trust)s: %(msg)s"), {
                            "trust": refresher.trust_id,
                            "msg": encodeutils.exception_to_unicode(e)
                        })

            except (glance_store.NotFound, exception.ImageNotFound,
                    exception.Conflict):
                msg = (_("Image %s could not be found after upload. "
                         "The image may have been deleted during the "
                         "upload, cleaning up the chunks uploaded.") %
                       image_id)
                LOG.warn(msg)
                # NOTE(sridevi): Cleaning up the uploaded chunks.
                try:
                    image.delete()
                except exception.ImageNotFound:
                    # NOTE(sridevi): Ignore this exception
                    pass
                raise webob.exc.HTTPGone(explanation=msg,
                                         request=req,
                                         content_type='text/plain')
            except exception.NotAuthenticated:
                msg = (_("Authentication error - the token may have "
                         "expired during file upload. Deleting image data for "
                         "%s.") % image_id)
                LOG.debug(msg)
                try:
                    image.delete()
                except exception.NotAuthenticated:
                    # NOTE: Ignore this exception
                    pass
                raise webob.exc.HTTPUnauthorized(explanation=msg,
                                                 request=req,
                                                 content_type='text/plain')
        except ValueError as e:
            LOG.debug("Cannot save data for image %(id)s: %(e)s", {
                'id': image_id,
                'e': encodeutils.exception_to_unicode(e)
            })
            self._restore(image_repo, image)
            raise webob.exc.HTTPBadRequest(
                explanation=encodeutils.exception_to_unicode(e))

        except glance_store.StoreAddDisabled:
            msg = _("Error in store configuration. Adding images to store "
                    "is disabled.")
            LOG.exception(msg)
            self._restore(image_repo, image)
            raise webob.exc.HTTPGone(explanation=msg,
                                     request=req,
                                     content_type='text/plain')

        except exception.InvalidImageStatusTransition as e:
            msg = encodeutils.exception_to_unicode(e)
            LOG.exception(msg)
            raise webob.exc.HTTPConflict(explanation=e.msg, request=req)

        except exception.Forbidden as e:
            msg = ("Not allowed to upload image data for image %s" % image_id)
            LOG.debug(msg)
            raise webob.exc.HTTPForbidden(explanation=msg, request=req)

        except exception.NotFound as e:
            raise webob.exc.HTTPNotFound(explanation=e.msg)

        except glance_store.StorageFull as e:
            msg = _("Image storage media "
                    "is full: %s") % encodeutils.exception_to_unicode(e)
            LOG.error(msg)
            self._restore(image_repo, image)
            raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
                                                      request=req)

        except exception.StorageQuotaFull as e:
            msg = _("Image exceeds the storage "
                    "quota: %s") % encodeutils.exception_to_unicode(e)
            LOG.error(msg)
            self._restore(image_repo, image)
            raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
                                                      request=req)

        except exception.ImageSizeLimitExceeded as e:
            msg = _("The incoming image is "
                    "too large: %s") % encodeutils.exception_to_unicode(e)
            LOG.error(msg)
            self._restore(image_repo, image)
            raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
                                                      request=req)

        except glance_store.StorageWriteDenied as e:
            msg = _("Insufficient permissions on image "
                    "storage media: %s") % encodeutils.exception_to_unicode(e)
            LOG.error(msg)
            self._restore(image_repo, image)
            raise webob.exc.HTTPServiceUnavailable(explanation=msg,
                                                   request=req)

        except cursive_exception.SignatureVerificationError as e:
            msg = (
                _LE("Signature verification failed for image %(id)s: %(e)s") %
                {
                    'id': image_id,
                    'e': encodeutils.exception_to_unicode(e)
                })
            LOG.error(msg)
            self._delete(image_repo, image)
            raise webob.exc.HTTPBadRequest(explanation=msg)

        except webob.exc.HTTPGone as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to upload image data due to HTTP error"))

        except webob.exc.HTTPError as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to upload image data due to HTTP error"))
                self._restore(image_repo, image)

        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.exception(
                    _LE("Failed to upload image data due to "
                        "internal error"))
                self._restore(image_repo, image)
Example #38
0
    def _create_volume_from(self, volume, src_obj):
        src_virtual_disk = self._get_virtual_disk_for(src_obj,
                                                      raise_not_found=True)

        if src_virtual_disk.DiskStatus != 'Online':
            LOG.warning(
                "Attempting to create a volume from virtual disk "
                "%(disk)s that is in %(state)s state.", {
                    'disk': src_virtual_disk.Id,
                    'state': src_virtual_disk.DiskStatus
                })

        volume_options = self._get_volume_options(volume)
        profile_id = self._get_storage_profile_id(
            volume_options[self.DATACORE_STORAGE_PROFILE_KEY])
        pool_names = volume_options[self.DATACORE_DISK_POOLS_KEY]

        volume_virtual_disk = self._create_virtual_disk_copy(
            src_virtual_disk,
            volume['id'],
            volume['display_name'],
            profile_id=profile_id,
            pool_names=pool_names)

        volume_logical_disk = datacore_utils.get_first(
            lambda disk: disk.VirtualDiskId == volume_virtual_disk.Id,
            self._api.get_logical_disks())

        try:
            volume_virtual_disk = self._set_virtual_disk_size(
                volume_virtual_disk, self._get_size_in_bytes(volume['size']))

            disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
            if disk_type == self.DATACORE_MIRRORED_DISK:
                pools = self._get_available_disk_pools(pool_names)
                selected_pool = datacore_utils.get_first_or_default(
                    lambda pool:
                    (pool.ServerId != volume_logical_disk.ServerHostId and pool
                     .Id != volume_logical_disk.PoolId), pools, None)
                if selected_pool:
                    logical_disk = self._api.create_pool_logical_disk(
                        selected_pool.Id, 'Striped',
                        volume_virtual_disk.Size.Value)
                    self._api.bind_logical_disk(volume_virtual_disk.Id,
                                                logical_disk.Id, 'Second',
                                                True, False, True)
                else:
                    msg = _("Can not create mirrored virtual disk. "
                            "Suitable disk pools not found.")
                    LOG.error(msg)
                    raise cinder_exception.VolumeDriverException(message=msg)

            volume_virtual_disk = self._await_virtual_disk_online(
                volume_virtual_disk.Id)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception("Creation of volume %(volume)s failed.",
                              {'volume': volume['id']})
                try:
                    self._api.delete_virtual_disk(volume_virtual_disk.Id, True)
                except datacore_exception.DataCoreException as e:
                    LOG.warning(
                        "An error occurred on a cleanup after failed "
                        "creation of volume %(volume)s: %(error)s.", {
                            'volume': volume['id'],
                            'error': e
                        })

        return {'provider_location': volume_virtual_disk.Id}
Example #39
0
    def _update_snat_v6_addrs_after_intf_update(self, resource, event, triger,
                                                context, subnets, port,
                                                router_id, new_interface,
                                                **kwargs):
        if new_interface:
            # _add_csnat_on_interface_create handler deals with new ports
            return
        # if not a new interface, the interface was added to a new subnet,
        # which is the first in this list
        subnet = subnets[0]
        if not subnet or subnet['ip_version'] != 6:
            return
        # NOTE: For IPv6 additional subnets added to the same
        # network we need to update the CSNAT port with respective
        # IPv6 subnet
        # Add new prefix to an existing ipv6 csnat port with the
        # same network id if one exists
        admin_ctx = context.elevated()
        router = self._get_router(admin_ctx, router_id)
        cs_port = self._find_v6_router_port_by_network_and_device_owner(
            router, subnet['network_id'], const.DEVICE_OWNER_ROUTER_SNAT)
        if not cs_port:
            return
        new_fixed_ip = {'subnet_id': subnet['id']}
        fixed_ips = list(cs_port['fixed_ips'])
        fixed_ips.append(new_fixed_ip)
        try:
            updated_port = self._core_plugin.update_port(
                admin_ctx, cs_port['id'], {'port': {
                    'fixed_ips': fixed_ips
                }})
        except Exception:
            with excutils.save_and_reraise_exception():
                # we need to try to undo the updated router
                # interface from above so it's not out of sync
                # with the csnat port.
                # TODO(kevinbenton): switch to taskflow to manage
                # these rollbacks.
                @db_api.retry_db_errors
                def revert():
                    # TODO(kevinbenton): even though we get the
                    # port each time, there is a potential race
                    # where we update the port with stale IPs if
                    # another interface operation is occurring at
                    # the same time. This can be fixed in the
                    # future with a compare-and-swap style update
                    # using the revision number of the port.
                    p = self._core_plugin.get_port(admin_ctx, port['id'])
                    rollback_fixed_ips = [
                        ip for ip in p['fixed_ips']
                        if ip['subnet_id'] != subnet['id']
                    ]
                    upd = {'port': {'fixed_ips': rollback_fixed_ips}}
                    self._core_plugin.update_port(admin_ctx, port['id'], upd)

                try:
                    revert()
                except Exception:
                    LOG.exception(
                        _LE("Failed to revert change "
                            "to router port %s."), port['id'])
        LOG.debug("CSNAT port updated for IPv6 subnet: %s", updated_port)
    def download(self,
                 context,
                 image_id,
                 data=None,
                 dst_path=None,
                 trusted_certs=None):
        """Calls out to Glance for data and writes data."""
        if CONF.glance.allowed_direct_url_schemes and dst_path is not None:
            image = self.show(context, image_id, include_locations=True)
            for entry in image.get('locations', []):
                loc_url = entry['url']
                loc_meta = entry['metadata']
                o = urlparse.urlparse(loc_url)
                xfer_mod = self._get_transfer_module(o.scheme)
                if xfer_mod:
                    try:
                        xfer_mod.download(context, o, dst_path, loc_meta)
                        LOG.info("Successfully transferred using %s", o.scheme)
                        return
                    except Exception:
                        LOG.exception("Download image error")

        try:
            image_chunks = self._client.call(context,
                                             2,
                                             'data',
                                             args=(image_id, ))
        except Exception:
            _reraise_translated_image_exception(image_id)

        if image_chunks.wrapped is None:
            # None is a valid return value, but there's nothing we can do with
            # a image with no associated data
            raise exception.ImageUnacceptable(
                image_id=image_id, reason='Image has no associated data')

        # Retrieve properties for verification of Glance image signature
        verifier = self._get_verifier(context, image_id, trusted_certs)

        close_file = False
        if data is None and dst_path:
            data = open(dst_path, 'wb')
            close_file = True

        if data is None:

            # Perform image signature verification
            if verifier:
                try:
                    for chunk in image_chunks:
                        verifier.update(chunk)
                    verifier.verify()

                    LOG.info(
                        'Image signature verification succeeded '
                        'for image: %s', image_id)

                except cryptography.exceptions.InvalidSignature:
                    with excutils.save_and_reraise_exception():
                        LOG.error(
                            'Image signature verification failed '
                            'for image: %s', image_id)
            return image_chunks
        else:
            try:
                for chunk in image_chunks:
                    if verifier:
                        verifier.update(chunk)
                    data.write(chunk)
                if verifier:
                    verifier.verify()
                    LOG.info(
                        'Image signature verification succeeded '
                        'for image %s', image_id)
            except cryptography.exceptions.InvalidSignature:
                data.truncate(0)
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        'Image signature verification failed '
                        'for image: %s', image_id)
            except Exception as ex:
                with excutils.save_and_reraise_exception():
                    LOG.error("Error writing to %(path)s: %(exception)s", {
                        'path': dst_path,
                        'exception': ex
                    })
            finally:
                if close_file:
                    # Ensure that the data is pushed all the way down to
                    # persistent storage. This ensures that in the event of a
                    # subsequent host crash we don't have running instances
                    # using a corrupt backing file.
                    data.flush()
                    self._safe_fsync(data)
                    data.close()
Example #41
0
    def retype(self, context, topic, volume_id,
               request_spec, filter_properties=None, volume=None):
        """Schedule the modification of a volume's type.

        :param context: the request context
        :param topic: the topic listened on
        :param volume_id: the ID of the volume to retype
        :param request_spec: parameters for this retype request
        :param filter_properties: parameters to filter by
        :param volume: the volume object to retype
        """

        self._wait_for_scheduler()

        # FIXME(dulek): Remove this in v3.0 of RPC API.
        if volume is None:
            # For older clients, mimic the old behavior and look up the
            # volume by its volume_id.
            volume = objects.Volume.get_by_id(context, volume_id)

        def _retype_volume_set_error(self, context, ex, request_spec,
                                     volume_ref, msg, reservations):
            if reservations:
                QUOTAS.rollback(context, reservations)
            previous_status = (
                volume_ref.previous_status or volume_ref.status)
            volume_state = {'volume_state': {'status': previous_status}}
            self._set_volume_state_and_notify('retype', volume_state,
                                              context, ex, request_spec, msg)

        reservations = request_spec.get('quota_reservations')
        old_reservations = request_spec.get('old_reservations', None)
        new_type = request_spec.get('volume_type')
        if new_type is None:
            msg = _('New volume type not specified in request_spec.')
            ex = exception.ParameterNotFound(param='volume_type')
            _retype_volume_set_error(self, context, ex, request_spec,
                                     volume, msg, reservations)

        # Default migration policy is 'never'
        migration_policy = request_spec.get('migration_policy')
        if not migration_policy:
            migration_policy = 'never'

        try:
            tgt_host = self.driver.find_retype_host(context, request_spec,
                                                    filter_properties,
                                                    migration_policy)
        except exception.NoValidHost as ex:
            msg = (_("Could not find a host for volume %(volume_id)s with "
                     "type %(type_id)s.") %
                   {'type_id': new_type['id'], 'volume_id': volume.id})
            _retype_volume_set_error(self, context, ex, request_spec,
                                     volume, msg, reservations)
        except Exception as ex:
            with excutils.save_and_reraise_exception():
                _retype_volume_set_error(self, context, ex, request_spec,
                                         volume, None, reservations)
        else:
            volume_rpcapi.VolumeAPI().retype(context, volume,
                                             new_type['id'], tgt_host,
                                             migration_policy,
                                             reservations,
                                             old_reservations)
Example #42
0
    def create_volume(self, volume):
        """Creates a volume.

        :param volume: Volume object
        :return: Dictionary of changes to the volume object to be persisted
        """

        volume_options = self._get_volume_options(volume)

        disk_type = volume_options[self.DATACORE_DISK_TYPE_KEY]
        if disk_type == self.DATACORE_MIRRORED_DISK:
            logical_disk_count = 2
            virtual_disk_type = 'MultiPathMirrored'
        elif disk_type == self.DATACORE_SINGLE_DISK:
            logical_disk_count = 1
            virtual_disk_type = 'NonMirrored'
        else:
            msg = _("Virtual disk type '%s' is not valid.") % disk_type
            LOG.error(msg)
            raise cinder_exception.VolumeDriverException(message=msg)

        profile_id = self._get_storage_profile_id(
            volume_options[self.DATACORE_STORAGE_PROFILE_KEY])

        pools = datacore_utils.get_distinct_by(
            lambda pool: pool.ServerId,
            self._get_available_disk_pools(
                volume_options[self.DATACORE_DISK_POOLS_KEY]))

        if len(pools) < logical_disk_count:
            msg = _("Suitable disk pools were not found for "
                    "creating virtual disk.")
            LOG.error(msg)
            raise cinder_exception.VolumeDriverException(message=msg)

        disk_size = self._get_size_in_bytes(volume['size'])

        logical_disks = []
        virtual_disk = None
        try:
            for logical_disk_pool in pools[:logical_disk_count]:
                logical_disks.append(
                    self._api.create_pool_logical_disk(logical_disk_pool.Id,
                                                       'Striped', disk_size))

            virtual_disk_data = self._api.build_virtual_disk_data(
                volume['id'], virtual_disk_type, disk_size,
                volume['display_name'], profile_id)

            virtual_disk = self._api.create_virtual_disk_ex2(
                virtual_disk_data, logical_disks[0].Id,
                logical_disks[1].Id if logical_disk_count == 2 else None, True)

            virtual_disk = self._await_virtual_disk_online(virtual_disk.Id)

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception("Creation of volume %(volume)s failed.",
                              {'volume': volume['id']})
                try:
                    if virtual_disk:
                        self._api.delete_virtual_disk(virtual_disk.Id, True)
                    else:
                        for logical_disk in logical_disks:
                            self._api.delete_logical_disk(logical_disk.Id)
                except datacore_exception.DataCoreException as e:
                    LOG.warning(
                        "An error occurred on a cleanup after failed "
                        "creation of volume %(volume)s: %(error)s.", {
                            'volume': volume['id'],
                            'error': e
                        })

        return {'provider_location': virtual_disk.Id}
Example #43
0
    def update(self,
               context,
               group,
               name,
               description,
               add_volumes,
               remove_volumes,
               allow_empty=False):
        """Update consistency group."""
        add_volumes_list = []
        remove_volumes_list = []
        if add_volumes:
            add_volumes = add_volumes.strip(',')
            add_volumes_list = add_volumes.split(',')
        if remove_volumes:
            remove_volumes = remove_volumes.strip(',')
            remove_volumes_list = remove_volumes.split(',')

        invalid_uuids = []
        for uuid in add_volumes_list:
            if uuid in remove_volumes_list:
                invalid_uuids.append(uuid)
        if invalid_uuids:
            msg = _("UUIDs %s are in both add and remove volume "
                    "list.") % invalid_uuids
            raise exception.InvalidVolume(reason=msg)

        # Validate name.
        if name == group.name:
            name = None

        # Validate description.
        if description == group.description:
            description = None
        self._check_update(group, name, description, add_volumes,
                           remove_volumes, allow_empty)

        fields = {'updated_at': timeutils.utcnow()}

        # Update name and description in db now. No need to
        # to send them over through an RPC call.
        if allow_empty:
            if name is not None:
                fields['name'] = name
            if description is not None:
                fields['description'] = description
        else:
            if name:
                fields['name'] = name
            if description:
                fields['description'] = description

        # NOTE(geguileo): We will use the updating status in the CG as a lock
        # mechanism to prevent volume add/remove races with other API, while we
        # figure out if we really need to add or remove volumes.
        if add_volumes or remove_volumes:
            fields['status'] = c_fields.ConsistencyGroupStatus.UPDATING

            # We cannot modify the members of this CG if the CG is being used
            # to create another CG or a CGsnapshot is being created
            filters = [
                ~db.cg_creating_from_src(cg_id=group.id),
                ~db.cgsnapshot_creating_from_src()
            ]
        else:
            filters = []

        expected = {'status': c_fields.ConsistencyGroupStatus.AVAILABLE}
        if not group.conditional_update(fields, expected, filters):
            msg = _("Cannot update consistency group %s, status must be "
                    "available, and it cannot be the source for an ongoing "
                    "CG or CG Snapshot creation.") % group.id
            raise exception.InvalidConsistencyGroup(reason=msg)

        # Now the CG is "locked" for updating
        try:
            # Validate volumes in add_volumes and remove_volumes.
            add_volumes_new = self._validate_add_volumes(
                context, group.volumes, add_volumes_list, group)
            remove_volumes_new = self._validate_remove_volumes(
                group.volumes, remove_volumes_list, group)

            self._check_update(group, name, description, add_volumes_new,
                               remove_volumes_new, allow_empty)
        except Exception:
            # If we have an error on the volume_lists we must return status to
            # available as we were doing before removing API races
            with excutils.save_and_reraise_exception():
                group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
                group.save()

        # Do an RPC call only if the update request includes
        # adding/removing volumes. add_volumes_new and remove_volumes_new
        # are strings of volume UUIDs separated by commas with no spaces
        # in between.
        if add_volumes_new or remove_volumes_new:
            self.volume_rpcapi.update_consistencygroup(
                context,
                group,
                add_volumes=add_volumes_new,
                remove_volumes=remove_volumes_new)
        # If there are no new volumes to add or remove and we had changed
        # the status to updating, turn it back to available
        elif group.status == c_fields.ConsistencyGroupStatus.UPDATING:
            group.status = c_fields.ConsistencyGroupStatus.AVAILABLE
            group.save()
Example #44
0
    def initialize_connection(self, volume, connector):
        """Perform necessary work to make a FC connection.

        To be able to create an FC connection from a given host to a
        volume, we must:
        1. Translate the given WWNN to a host name
        2. Create new host on the storage system if it does not yet exist
        3. Map the volume to the host if it is not already done
        4. Return the connection information for relevant nodes (in the
           proper I/O group)

        """
        volume_name = self._get_target_vol(volume)

        # Check if a host object is defined for this host name
        host_name = self._assistant.get_host_from_connector(connector)
        if host_name is None:
            # Host does not exist - add a new host to InStorage/MCS
            host_name = self._assistant.create_host(connector)

        volume_attributes = self._assistant.get_vdisk_attributes(volume_name)
        if volume_attributes is None:
            msg = (_('initialize_connection: Failed to get attributes'
                     ' for volume %s.') % volume_name)
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        lun_id = self._assistant.map_vol_to_host(volume_name, host_name, True)

        try:
            preferred_node = volume_attributes['preferred_node_id']
            IO_group = volume_attributes['IO_group_id']
        except KeyError as e:
            LOG.error('Did not find expected column name in '
                      'lsvdisk: %s.', e)
            raise exception.VolumeBackendAPIException(
                data=_('initialize_connection: Missing volume attribute for '
                       'volume %s.') % volume_name)

        try:
            # Get preferred node and other nodes in I/O group
            preferred_node_entry = None
            io_group_nodes = []
            for node in self._state['storage_nodes'].values():
                if node['id'] == preferred_node:
                    preferred_node_entry = node
                if node['IO_group'] == IO_group:
                    io_group_nodes.append(node)

            if not len(io_group_nodes):
                msg = (_('initialize_connection: No node found in '
                         'I/O group %(gid)s for volume %(vol)s.') % {
                             'gid': IO_group,
                             'vol': volume_name
                         })
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

            if not preferred_node_entry:
                # Get 1st node in I/O group
                preferred_node_entry = io_group_nodes[0]
                LOG.warning(
                    'initialize_connection: Did not find a '
                    'preferred node for volume %s.', volume_name)

            properties = {}
            properties['target_discovered'] = False
            properties['target_lun'] = lun_id
            properties['volume_id'] = volume.id

            conn_wwpns = self._assistant.get_conn_fc_wwpns(host_name)

            # If conn_wwpns is empty, then that means that there were
            # no target ports with visibility to any of the initiators
            # so we return all target ports.
            if len(conn_wwpns) == 0:
                for node in self._state['storage_nodes'].values():
                    conn_wwpns.extend(node['WWPN'])

            properties['target_wwn'] = conn_wwpns

            i_t_map = self.make_initiator_target_all2all_map(
                connector['wwpns'], conn_wwpns)
            properties['initiator_target_map'] = i_t_map

        except Exception:
            with excutils.save_and_reraise_exception():
                self._do_terminate_connection(volume, connector)
                LOG.error(
                    'initialize_connection: Failed '
                    'to collect return '
                    'properties for volume %(vol)s and connector '
                    '%(conn)s.\n', {
                        'vol': volume,
                        'conn': connector
                    })

        info = {
            'driver_volume_type': 'fibre_channel',
            'data': properties,
        }
        fczm_utils.add_fc_zone(info)
        return info
Example #45
0
    def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot_id):
        try:
            cgsnapshot = objects.CGSnapshot.get_by_id(context, cgsnapshot_id)
            snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                context, cgsnapshot.id)

            if not snapshots:
                msg = _("Cgsnahost is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for snapshot in snapshots:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['cgsnapshot'] = cgsnapshot
                kwargs['consistencygroup'] = group
                kwargs['snapshot'] = snapshot
                volume_type_id = snapshot.volume_type_id
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since cgsnapshot is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context, snapshot.volume_size, None,
                                           None, **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(
                            _LE("Error occurred when creating volume "
                                "entry from snapshot in the process of "
                                "creating consistency group %(group)s "
                                "from cgsnapshot %(cgsnap)s."), {
                                    'group': group.id,
                                    'cgsnap': cgsnapshot.id
                                })
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(
                        _LE("Error occurred when creating consistency "
                            "group %(group)s from cgsnapshot "
                            "%(cgsnap)s."), {
                                'group': group.id,
                                'cgsnap': cgsnapshot.id
                            })

        volumes = self.db.volume_get_all_by_group(context, group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'],
                                  {'host': group.get('host')})

        self.volume_rpcapi.create_consistencygroup_from_src(
            context, group, cgsnapshot)
Example #46
0
    def setup_server(self, network_info, metadata=None):
        """Set up and configure share server.

        Sets up and configures share server with given network parameters.
        """
        # Only support single security service with type 'active_directory'
        vdm_name = network_info['server_id']
        vlan_id = network_info['segmentation_id']
        active_directory = None
        allocated_interfaces = []

        if network_info.get('security_services'):
            is_valid, active_directory = self._get_valid_security_service(
                network_info['security_services'])

            if not is_valid:
                raise exception.EMCVmaxXMLAPIError(err=active_directory)

        try:
            if not self._vdm_exist(vdm_name):
                LOG.debug(
                    'Share server %s not found, creating '
                    'share server...', vdm_name)
                self._get_context('VDM').create(vdm_name, self.mover_name)

            devices = self.get_managed_ports()

            for net_info in network_info['network_allocations']:
                random.shuffle(devices)
                ip_version = net_info['ip_version']
                interface = {
                    'name': net_info['id'][-12:],
                    'device_name': devices[0],
                    'ip': net_info['ip_address'],
                    'mover_name': self.mover_name,
                    'vlan_id': vlan_id if vlan_id else -1,
                }
                if ip_version == 6:
                    interface['ip_version'] = ip_version
                    interface['net_mask'] = six.text_type(
                        utils.cidr_to_prefixlen(network_info['cidr']))
                else:
                    interface['net_mask'] = utils.cidr_to_netmask(
                        network_info['cidr'])

                self._get_context('MoverInterface').create(interface)

                allocated_interfaces.append(interface)

            cifs_interface = allocated_interfaces[0]
            nfs_interface = allocated_interfaces[1]
            if active_directory:
                self._configure_active_directory(active_directory, vdm_name,
                                                 cifs_interface)

            self._get_context('VDM').attach_nfs_interface(
                vdm_name, nfs_interface['name'])

            return {
                'share_server_name': vdm_name,
                'cifs_if': cifs_interface['ip'],
                'nfs_if': nfs_interface['ip'],
            }

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception('Could not setup server')
                server_details = self._construct_backend_details(
                    vdm_name, allocated_interfaces)
                self.teardown_server(server_details,
                                     network_info['security_services'])
Example #47
0
    def _cast_create_consistencygroup(self, context, group, request_spec_list,
                                      filter_properties_list):

        try:
            for request_spec in request_spec_list:
                volume_type = request_spec.get('volume_type', None)
                volume_type_id = None
                if volume_type:
                    volume_type_id = volume_type.get('id', None)

                specs = {}
                if volume_type_id:
                    qos_specs = volume_types.get_volume_type_qos_specs(
                        volume_type_id)
                    specs = qos_specs['qos_specs']
                if not specs:
                    # to make sure we don't pass empty dict
                    specs = None

                volume_properties = {
                    'size':
                    0,  # Need to populate size for the scheduler
                    'user_id':
                    context.user_id,
                    'project_id':
                    context.project_id,
                    'status':
                    'creating',
                    'attach_status':
                    'detached',
                    'encryption_key_id':
                    request_spec.get('encryption_key_id', None),
                    'display_description':
                    request_spec.get('description', None),
                    'display_name':
                    request_spec.get('name', None),
                    'volume_type_id':
                    volume_type_id,
                }

                request_spec['volume_properties'] = volume_properties
                request_spec['qos_specs'] = specs

        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(
                        _LE("Error occurred when building "
                            "request spec list for consistency group "
                            "%s."), group.id)

        # Cast to the scheduler and let it handle whatever is needed
        # to select the target host for this group.
        self.scheduler_rpcapi.create_consistencygroup(
            context,
            CONF.volume_topic,
            group,
            request_spec_list=request_spec_list,
            filter_properties_list=filter_properties_list)
Example #48
0
    def _create_cg_from_source_cg(self, context, group, source_cgid):
        try:
            source_cg = objects.ConsistencyGroup.get_by_id(
                context, source_cgid)
            source_vols = self.db.volume_get_all_by_group(
                context, source_cg.id)

            if not source_vols:
                msg = _("Source CG is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for source_vol in source_vols:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['source_cg'] = source_cg
                kwargs['consistencygroup'] = group
                kwargs['source_volume'] = source_vol
                volume_type_id = source_vol.get('volume_type_id')
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since source_cg is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context, source_vol['size'], None,
                                           None, **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(
                            _LE("Error occurred when creating cloned "
                                "volume in the process of creating "
                                "consistency group %(group)s from "
                                "source CG %(source_cg)s."), {
                                    'group': group.id,
                                    'source_cg': source_cg.id
                                })
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(
                        _LE("Error occurred when creating consistency "
                            "group %(group)s from source CG "
                            "%(source_cg)s."), {
                                'group': group.id,
                                'source_cg': source_cg.id
                            })

        volumes = self.db.volume_get_all_by_group(context, group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'], {'host': group.host})

        self.volume_rpcapi.create_consistencygroup_from_src(
            context, group, None, source_cg)
Example #49
0
    def create(self,
               context,
               name,
               description,
               cg_volume_types,
               availability_zone=None):
        check_policy(context, 'create')

        volume_type_list = None
        volume_type_list = cg_volume_types.split(',')

        req_volume_types = []
        # NOTE: Admin context is required to get extra_specs of volume_types.
        req_volume_types = (self.db.volume_types_get_by_name_or_id(
            context.elevated(), volume_type_list))

        req_volume_type_ids = ""
        for voltype in req_volume_types:
            req_volume_type_ids = (req_volume_type_ids + voltype.get('id') +
                                   ",")
        if len(req_volume_type_ids) == 0:
            req_volume_type_ids = None

        availability_zone = self._extract_availability_zone(availability_zone)
        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'availability_zone': availability_zone,
            'status': c_fields.ConsistencyGroupStatus.CREATING,
            'name': name,
            'description': description,
            'volume_type_id': req_volume_type_ids
        }
        group = None
        try:
            group = objects.ConsistencyGroup(context=context, **kwargs)
            group.create()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error occurred when creating consistency group"
                        " %s."), name)

        request_spec_list = []
        filter_properties_list = []
        for req_volume_type in req_volume_types:
            request_spec = {
                'volume_type': req_volume_type.copy(),
                'consistencygroup_id': group.id
            }
            filter_properties = {}
            request_spec_list.append(request_spec)
            filter_properties_list.append(filter_properties)

        # Update quota for consistencygroups
        self.update_quota(context, group, 1)

        self._cast_create_consistencygroup(context, group, request_spec_list,
                                           filter_properties_list)

        return group
Example #50
0
    def create_from_src(self,
                        context,
                        name,
                        description=None,
                        cgsnapshot_id=None,
                        source_cgid=None):
        check_policy(context, 'create')

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': c_fields.ConsistencyGroupStatus.CREATING,
            'name': name,
            'description': description,
            'cgsnapshot_id': cgsnapshot_id,
            'source_cgid': source_cgid,
        }

        group = None
        try:
            group = objects.ConsistencyGroup(context=context, **kwargs)
            group.create(cg_snap_id=cgsnapshot_id, cg_id=source_cgid)
        except exception.ConsistencyGroupNotFound:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Source CG %(source_cg)s not found when "
                        "creating consistency group %(cg)s from "
                        "source."), {
                            'cg': name,
                            'source_cg': source_cgid
                        })
        except exception.CgSnapshotNotFound:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("CG snapshot %(cgsnap)s not found when creating "
                        "consistency group %(cg)s from source."), {
                            'cg': name,
                            'cgsnap': cgsnapshot_id
                        })
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error occurred when creating consistency group"
                        " %(cg)s from cgsnapshot %(cgsnap)s."), {
                            'cg': name,
                            'cgsnap': cgsnapshot_id
                        })

        # Update quota for consistencygroups
        self.update_quota(context, group, 1)

        if not group.host:
            msg = _("No host to create consistency group %s.") % group.id
            LOG.error(msg)
            raise exception.InvalidConsistencyGroup(reason=msg)

        if cgsnapshot_id:
            self._create_cg_from_cgsnapshot(context, group, cgsnapshot_id)
        elif source_cgid:
            self._create_cg_from_source_cg(context, group, source_cgid)

        return group
Example #51
0
    def _update(self, request, id, body, **kwargs):
        body = Controller.prepare_request_body(request.context,
                                               body,
                                               False,
                                               self._resource,
                                               self._attr_info,
                                               allow_bulk=self._allow_bulk)
        action = self._plugin_handlers[self.UPDATE]
        # Load object to check authz
        # but pass only attributes in the original body and required
        # by the policy engine to the policy 'brain'
        field_list = [
            name for (name, value) in six.iteritems(self._attr_info)
            if (value.get('required_by_policy') or value.get('primary_key')
                or 'default' not in value)
        ]
        # Ensure policy engine is initialized
        policy.init()
        parent_id = kwargs.get(self._parent_id_name)
        orig_obj = self._item(request,
                              id,
                              field_list=field_list,
                              parent_id=parent_id)
        orig_object_copy = copy.copy(orig_obj)
        orig_obj.update(body[self._resource])
        # Make a list of attributes to be updated to inform the policy engine
        # which attributes are set explicitly so that it can distinguish them
        # from the ones that are set to their default values.
        orig_obj[const.ATTRIBUTES_TO_UPDATE] = body[self._resource].keys()
        try:
            policy.enforce(request.context,
                           action,
                           orig_obj,
                           pluralized=self._collection)
        except oslo_policy.PolicyNotAuthorized:
            with excutils.save_and_reraise_exception() as ctxt:
                # If a tenant is modifying it's own object, it's safe to return
                # a 403. Otherwise, pretend that it doesn't exist to avoid
                # giving away information.
                if request.context.tenant_id != orig_obj['tenant_id']:
                    ctxt.reraise = False
            msg = _('The resource could not be found.')
            raise webob.exc.HTTPNotFound(msg)

        obj_updater = getattr(self._plugin, action)
        kwargs = {self._resource: body}
        if parent_id:
            kwargs[self._parent_id_name] = parent_id
        obj = obj_updater(request.context, id, **kwargs)
        # Usually an update operation does not alter resource usage, but as
        # there might be side effects it might be worth checking for changes
        # in resource usage here as well (e.g: a tenant port is created when a
        # router interface is added)
        resource_registry.set_resources_dirty(request.context)

        result = {self._resource: self._view(request.context, obj)}
        notifier_method = self._resource + '.update.end'
        self._notifier.info(request.context, notifier_method, result)
        self._send_dhcp_notification(request.context, result, notifier_method)
        self._send_nova_notification(action, orig_object_copy, result)
        return result
Example #52
0
    def create_from_src(self,
                        context,
                        name,
                        description=None,
                        cgsnapshot_id=None,
                        source_cgid=None):
        check_policy(context, 'create')
        cgsnapshot = None
        orig_cg = None
        if cgsnapshot_id:
            try:
                cgsnapshot = objects.CGSnapshot.get_by_id(
                    context, cgsnapshot_id)
            except exception.CgSnapshotNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("CG snapshot %(cgsnap)s not found when "
                            "creating consistency group %(cg)s from "
                            "source."), {
                                'cg': name,
                                'cgsnap': cgsnapshot_id
                            })
            else:
                orig_cg = cgsnapshot.consistencygroup

        source_cg = None
        if source_cgid:
            try:
                source_cg = objects.ConsistencyGroup.get_by_id(
                    context, source_cgid)
            except exception.ConsistencyGroupNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Source CG %(source_cg)s not found when "
                            "creating consistency group %(cg)s from "
                            "source."), {
                                'cg': name,
                                'source_cg': source_cgid
                            })

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': c_fields.ConsistencyGroupStatus.CREATING,
            'name': name,
            'description': description,
            'cgsnapshot_id': cgsnapshot_id,
            'source_cgid': source_cgid,
        }

        if orig_cg:
            kwargs['volume_type_id'] = orig_cg.volume_type_id
            kwargs['availability_zone'] = orig_cg.availability_zone
            kwargs['host'] = orig_cg.host

        if source_cg:
            kwargs['volume_type_id'] = source_cg.volume_type_id
            kwargs['availability_zone'] = source_cg.availability_zone
            kwargs['host'] = source_cg.host

        group = None
        try:
            group = objects.ConsistencyGroup(context=context, **kwargs)
            group.create()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error occurred when creating consistency group"
                        " %(cg)s from cgsnapshot %(cgsnap)s."), {
                            'cg': name,
                            'cgsnap': cgsnapshot_id
                        })

        # Update quota for consistencygroups
        self.update_quota(context, group, 1)

        if not group.host:
            msg = _("No host to create consistency group %s.") % group.id
            LOG.error(msg)
            raise exception.InvalidConsistencyGroup(reason=msg)

        if cgsnapshot:
            self._create_cg_from_cgsnapshot(context, group, cgsnapshot)
        elif source_cg:
            self._create_cg_from_source_cg(context, group, source_cg)

        return group
Example #53
0
    def _process_router(self, ri):
        """Process a router, apply latest configuration and update router_info.

        Get the router dict from  RouterInfo and proceed to detect changes
        from the last known state. When new ports or deleted ports are
        detected, `internal_network_added()` or `internal_networks_removed()`
        are called accordingly. Similarly changes in ex_gw_port causes
         `external_gateway_added()` or `external_gateway_removed()` calls.
        Next, floating_ips and routes are processed. Also, latest state is
        stored in ri.internal_ports and ri.ex_gw_port for future comparisons.

        :param ri : RouterInfo object of the router being processed.
        :return:None
        :raises: networking_cisco.plugins.cisco.cfg_agent.cfg_exceptions.
        DriverException if the configuration operation fails.
        """
        try:
            ex_gw_port = ri.router.get('gw_port')
            ri.ha_info = ri.router.get('ha_info', None)
            internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])

            existing_port_ids = set([p['id'] for p in ri.internal_ports])
            current_port_ids = set(
                [p['id'] for p in internal_ports if p['admin_state_up']])
            new_ports = [
                p for p in internal_ports
                if p['id'] in (current_port_ids - existing_port_ids)
            ]
            old_ports = [
                p for p in ri.internal_ports if p['id'] not in current_port_ids
            ]

            new_port_ids = [p['id'] for p in new_ports]
            old_port_ids = [p['id'] for p in old_ports]
            list_port_ids_up = []
            LOG.debug("++ new_port_ids = %s" % (pp.pformat(new_port_ids)))
            LOG.debug("++ old_port_ids = %s" % (pp.pformat(old_port_ids)))

            for p in new_ports:
                self._set_subnet_info(p)
                self._internal_network_added(ri, p, ex_gw_port)
                ri.internal_ports.append(p)
                list_port_ids_up.append(p['id'])

            for p in old_ports:
                self._internal_network_removed(ri, p, ri.ex_gw_port)
                ri.internal_ports.remove(p)

            if ex_gw_port and not ri.ex_gw_port:
                self._set_subnet_info(ex_gw_port)
                self._external_gateway_added(ri, ex_gw_port)
                list_port_ids_up.append(ex_gw_port['id'])
            elif not ex_gw_port and ri.ex_gw_port:
                self._external_gateway_removed(ri, ri.ex_gw_port)

            self._send_update_port_statuses(list_port_ids_up,
                                            l3_constants.PORT_STATUS_ACTIVE)
            if ex_gw_port:
                self._process_router_floating_ips(ri, ex_gw_port)

            ri.ex_gw_port = ex_gw_port
            self._routes_updated(ri)
        except cfg_exceptions.DriverException as e:
            with excutils.save_and_reraise_exception():
                self.updated_routers.update([ri.router_id])
                LOG.error(e)
Example #54
0
    def backup(self, backup, volume_file, backup_metadata=True):
        """Backup the given volume.

           If backup['parent_id'] is given, then an incremental backup
           is performed.
        """
        if self.chunk_size_bytes % self.sha_block_size_bytes:
            err = _('Chunk size is not multiple of '
                    'block size for creating hash.')
            raise exception.InvalidBackup(reason=err)

        # Read the shafile of the parent backup if backup['parent_id']
        # is given.
        parent_backup_shafile = None
        parent_backup = None
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(self.context,
                                                     backup.parent_id)
            parent_backup_shafile = self._read_sha256file(parent_backup)
            parent_backup_shalist = parent_backup_shafile['sha256s']
            if (parent_backup_shafile['chunk_size'] !=
                    self.sha_block_size_bytes):
                err = (_('Hash block size has changed since the last '
                         'backup. New hash block size: %(new)s. Old hash '
                         'block size: %(old)s. Do a full backup.')
                       % {'old': parent_backup_shafile['chunk_size'],
                          'new': self.sha_block_size_bytes})
                raise exception.InvalidBackup(reason=err)
            # If the volume size increased since the last backup, fail
            # the incremental backup and ask user to do a full backup.
            if backup.size > parent_backup.size:
                err = _('Volume size increased since the last '
                        'backup. Do a full backup.')
                raise exception.InvalidBackup(reason=err)

        if sys.platform == 'win32':
            # When dealing with Windows physical disks, we need the exact
            # size of the disk. Attempting to read passed this boundary will
            # lead to an IOError exception. At the same time, we cannot
            # seek to the end of file.
            win32_disk_size = self._get_win32_phys_disk_size(volume_file.name)

        (object_meta, object_sha256, extra_metadata, container,
         volume_size_bytes) = self._prepare_backup(backup)

        counter = 0
        total_block_sent_num = 0

        # There are two mechanisms to send the progress notification.
        # 1. The notifications are periodically sent in a certain interval.
        # 2. The notifications are sent after a certain number of chunks.
        # Both of them are working simultaneously during the volume backup,
        # when "chunked" backup drivers are deployed.
        def _notify_progress():
            self._send_progress_notification(self.context, backup,
                                             object_meta,
                                             total_block_sent_num,
                                             volume_size_bytes)
        timer = loopingcall.FixedIntervalLoopingCall(
            _notify_progress)
        if self.enable_progress_timer:
            timer.start(interval=self.backup_timer_interval)

        sha256_list = object_sha256['sha256s']
        shaindex = 0
        is_backup_canceled = False
        while True:
            # First of all, we check the status of this backup. If it
            # has been changed to delete or has been deleted, we cancel the
            # backup process to do forcing delete.
            with backup.as_read_deleted():
                backup.refresh()
            if backup.status in (fields.BackupStatus.DELETING,
                                 fields.BackupStatus.DELETED):
                is_backup_canceled = True
                # To avoid the chunk left when deletion complete, need to
                # clean up the object of chunk again.
                self.delete_backup(backup)
                LOG.debug('Cancel the backup process of %s.', backup.id)
                break
            data_offset = volume_file.tell()

            if sys.platform == 'win32':
                read_bytes = min(self.chunk_size_bytes,
                                 win32_disk_size - data_offset)
            else:
                read_bytes = self.chunk_size_bytes
            data = volume_file.read(read_bytes)

            if data == b'':
                break

            # Calculate new shas with the datablock.
            shalist = eventlet.tpool.execute(self._calculate_sha, data)
            sha256_list.extend(shalist)

            # If parent_backup is not None, that means an incremental
            # backup will be performed.
            if parent_backup:
                # Find the extent that needs to be backed up.
                extent_off = -1
                for idx, sha in enumerate(shalist):
                    if sha != parent_backup_shalist[shaindex]:
                        if extent_off == -1:
                            # Start of new extent.
                            extent_off = idx * self.sha_block_size_bytes
                    else:
                        if extent_off != -1:
                            # We've reached the end of extent.
                            extent_end = idx * self.sha_block_size_bytes
                            segment = data[extent_off:extent_end]
                            self._backup_chunk(backup, container, segment,
                                               data_offset + extent_off,
                                               object_meta,
                                               extra_metadata)
                            extent_off = -1
                    shaindex += 1

                # The last extent extends to the end of data buffer.
                if extent_off != -1:
                    extent_end = len(data)
                    segment = data[extent_off:extent_end]
                    self._backup_chunk(backup, container, segment,
                                       data_offset + extent_off,
                                       object_meta, extra_metadata)
                    extent_off = -1
            else:  # Do a full backup.
                self._backup_chunk(backup, container, data, data_offset,
                                   object_meta, extra_metadata)

            # Notifications
            total_block_sent_num += self.data_block_num
            counter += 1
            if counter == self.data_block_num:
                # Send the notification to Ceilometer when the chunk
                # number reaches the data_block_num.  The backup percentage
                # is put in the metadata as the extra information.
                self._send_progress_notification(self.context, backup,
                                                 object_meta,
                                                 total_block_sent_num,
                                                 volume_size_bytes)
                # Reset the counter
                counter = 0

        # Stop the timer.
        timer.stop()
        # If backup has been cancelled we have nothing more to do
        # but timer.stop().
        if is_backup_canceled:
            return
        # All the data have been sent, the backup_percent reaches 100.
        self._send_progress_end(self.context, backup, object_meta)

        object_sha256['sha256s'] = sha256_list
        if backup_metadata:
            try:
                self._backup_metadata(backup, object_meta)
            # Whatever goes wrong, we want to log, cleanup, and re-raise.
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception("Backup volume metadata failed.")
                    self.delete_backup(backup)

        self._finalize_backup(backup, container, object_meta, object_sha256)
Example #55
0
    def add_to_instance(self, context, instance, security_group_name):
        """Add security group to the instance."""

        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group', security_group_name,
                context.project_id)
        except n_exc.NeutronClientNoUniqueMatch as e:
            raise exception.NoUniqueMatch(six.text_type(e))
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") % {
                             'name': security_group_name,
                             'project': context.project_id
                         })
                self.raise_not_found(msg)
            else:
                six.reraise(*exc_info)
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception("Neutron Error:")

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                     " any ports") % instance.uuid)
            self.raise_not_found(msg)

        for port in ports:
            if not self._has_security_group_requirements(port):
                LOG.warning(
                    "Cannot add security group %(name)s to "
                    "%(instance)s since the port %(port_id)s "
                    "does not meet security requirements", {
                        'name': security_group_name,
                        'instance': instance.uuid,
                        'port_id': port['id']
                    })
                raise exception.SecurityGroupCannotBeApplied()
            if 'security_groups' not in port:
                port['security_groups'] = []
            port['security_groups'].append(security_group_id)
            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(
                    "Adding security group %(security_group_id)s to "
                    "port %(port_id)s", {
                        'security_group_id': security_group_id,
                        'port_id': port['id']
                    })
                neutron.update_port(port['id'], {'port': updated_port})
            except n_exc.NeutronClientException as e:
                exc_info = sys.exc_info()
                if e.status_code == 400:
                    raise exception.SecurityGroupCannotBeApplied(
                        six.text_type(e))
                else:
                    six.reraise(*exc_info)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception("Neutron Error:")
    def add(self,
            image_id,
            image_file,
            image_size,
            context=None,
            verifier=None):
        """Stores an image file with supplied identifier to the backend
        storage system and returns a tuple containing information
        about the stored image.

        :param image_id: The opaque image identifier
        :param image_file: The image data to write, as a file-like object
        :param image_size: The size of the image data to write, in bytes
        :param verifier: An object used to verify signatures for images
        :retval tuple of URL in backing store, bytes written, checksum
                and a dictionary with storage system specific information
        :raises: `glance.common.exceptions.Duplicate` if the image already
                existed
                `glance.common.exceptions.UnexpectedStatus` if the upload
                request returned an unexpected status. The expected responses
                are 201 Created and 200 OK.
        """
        ds = self.select_datastore(image_size)
        image_file = _Reader(image_file, verifier)
        headers = {}
        if image_size > 0:
            headers.update({'Content-Length': image_size})
            data = image_file
        else:
            data = utils.chunkiter(image_file, CHUNKSIZE)
        loc = StoreLocation(
            {
                'scheme': self.scheme,
                'server_host': self.server_host,
                'image_dir': self.store_image_dir,
                'datacenter_path': ds.datacenter.path,
                'datastore_name': ds.name,
                'image_id': image_id
            }, self.conf)
        # NOTE(arnaud): use a decorator when the config is not tied to self
        cookie = self._build_vim_cookie_header(True)
        headers = dict(headers)
        headers.update({'Cookie': cookie})
        session = new_session(self.api_insecure, self.ca_file)

        url = loc.https_url
        try:
            response = session.put(url, data=data, headers=headers)
        except IOError as e:
            # TODO(sigmavirus24): Figure out what the new exception type would
            # be in requests.
            # When a session is not authenticated, the socket is closed by
            # the server after sending the response. http_client has an open
            # issue with https that raises Broken Pipe
            # error instead of returning the response.
            # See http://bugs.python.org/issue16062. Here, we log the error
            # and continue to look into the response.
            msg = _LE('Communication error sending http %(method)s request '
                      'to the url %(url)s.\n'
                      'Got IOError %(e)s') % {
                          'method': 'PUT',
                          'url': url,
                          'e': e
                      }
            LOG.error(msg)
            raise exceptions.BackendException(msg)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(
                    _LE('Failed to upload content of image '
                        '%(image)s'), {'image': image_id})

        res = response.raw
        if res.status == requests.codes.conflict:
            raise exceptions.Duplicate(
                _("Image file %(image_id)s already "
                  "exists!") % {'image_id': image_id})

        if res.status not in (requests.codes.created, requests.codes.ok):
            msg = (_LE('Failed to upload content of image %(image)s. '
                       'The request returned an unexpected status: %(status)s.'
                       '\nThe response body:\n%(body)s') % {
                           'image': image_id,
                           'status': res.status,
                           'body': getattr(res, 'body', None)
                       })
            LOG.error(msg)
            raise exceptions.BackendException(msg)

        return (loc.get_uri(), image_file.size,
                image_file.checksum.hexdigest(), {})
Example #57
0
    def start(self):
        """Start serving a WSGI application.

        :returns: None
        """
        # The server socket object will be closed after server exits,
        # but the underlying file descriptor will remain open, and will
        # give bad file descriptor error. So duplicating the socket object,
        # to keep file descriptor usable.

        dup_socket = self._socket.dup()
        dup_socket.setsockopt(socket.SOL_SOCKET,
                              socket.SO_REUSEADDR, 1)
        # sockets can hang around forever without keepalive
        dup_socket.setsockopt(socket.SOL_SOCKET,
                              socket.SO_KEEPALIVE, 1)

        # This option isn't available in the OS X version of eventlet
        if hasattr(socket, 'TCP_KEEPIDLE'):
            dup_socket.setsockopt(socket.IPPROTO_TCP,
                                  socket.TCP_KEEPIDLE,
                                  CONF.tcp_keepidle)

        if self._use_ssl:
            try:
                ca_file = CONF.ssl_ca_file
                cert_file = CONF.ssl_cert_file
                key_file = CONF.ssl_key_file

                if cert_file and not os.path.exists(cert_file):
                    raise RuntimeError(
                          _("Unable to find cert_file : %s") % cert_file)

                if ca_file and not os.path.exists(ca_file):
                    raise RuntimeError(
                          _("Unable to find ca_file : %s") % ca_file)

                if key_file and not os.path.exists(key_file):
                    raise RuntimeError(
                          _("Unable to find key_file : %s") % key_file)

                if self._use_ssl and (not cert_file or not key_file):
                    raise RuntimeError(
                          _("When running server in SSL mode, you must "
                            "specify both a cert_file and key_file "
                            "option value in your configuration file"))
                ssl_kwargs = {
                    'server_side': True,
                    'certfile': cert_file,
                    'keyfile': key_file,
                    'cert_reqs': ssl.CERT_NONE,
                }

                if CONF.ssl_ca_file:
                    ssl_kwargs['ca_certs'] = ca_file
                    ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED

                dup_socket = eventlet.wrap_ssl(dup_socket,
                                               **ssl_kwargs)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Failed to start %(name)s on %(host)s"
                                  ":%(port)s with SSL support"),
                              {'name': self.name, 'host': self.host,
                               'port': self.port})

        wsgi_kwargs = {
            'func': eventlet.wsgi.server,
            'sock': dup_socket,
            'site': self.app,
            'protocol': self._protocol,
            'custom_pool': self._pool,
            'log': self._wsgi_logger,
            'log_format': CONF.wsgi_log_format,
            'debug': False,
            'keepalive': CONF.wsgi_keep_alive,
            'socket_timeout': self.client_socket_timeout
            }

        if self._max_url_len:
            wsgi_kwargs['url_length_limit'] = self._max_url_len

        self._server = eventlet.spawn(**wsgi_kwargs)
Example #58
0
    def remove_from_instance(self, context, instance, security_group_name):
        """Remove the security group associated with the instance."""
        neutron = neutronapi.get_client(context)
        try:
            security_group_id = neutronv20.find_resourceid_by_name_or_id(
                neutron, 'security_group', security_group_name,
                context.project_id)
        except n_exc.NeutronClientException as e:
            exc_info = sys.exc_info()
            if e.status_code == 404:
                msg = (_("Security group %(name)s is not found for "
                         "project %(project)s") % {
                             'name': security_group_name,
                             'project': context.project_id
                         })
                self.raise_not_found(msg)
            else:
                six.reraise(*exc_info)
        params = {'device_id': instance.uuid}
        try:
            ports = neutron.list_ports(**params).get('ports')
        except n_exc.NeutronClientException:
            with excutils.save_and_reraise_exception():
                LOG.exception("Neutron Error:")

        if not ports:
            msg = (_("instance_id %s could not be found as device id on"
                     " any ports") % instance.uuid)
            self.raise_not_found(msg)

        found_security_group = False
        for port in ports:
            try:
                port.get('security_groups', []).remove(security_group_id)
            except ValueError:
                # When removing a security group from an instance the security
                # group should be on both ports since it was added this way if
                # done through the nova api. In case it is not a 404 is only
                # raised if the security group is not found on any of the
                # ports on the instance.
                continue

            updated_port = {'security_groups': port['security_groups']}
            try:
                LOG.info(
                    "Removing security group %(security_group_id)s from "
                    "port %(port_id)s", {
                        'security_group_id': security_group_id,
                        'port_id': port['id']
                    })
                neutron.update_port(port['id'], {'port': updated_port})
                found_security_group = True
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.exception("Neutron Error:")
        if not found_security_group:
            msg = (_("Security group %(security_group_name)s not associated "
                     "with the instance %(instance)s") % {
                         'security_group_name': security_group_name,
                         'instance': instance.uuid
                     })
            self.raise_not_found(msg)
Example #59
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Create volume from snapshot."""

        LOG.debug("Create volume from snapshot")
        model_update = {}
        try:
            LOG.debug(
                "Clone new volume %(t_id)s from snapshot with id"
                " %(s_id)s", {
                    "t_id": volume['id'],
                    "s_id": volume['snapshot_id']
                })
            # 1. Make a call to DN
            # Check if current_dn_owner is set.
            # Route the snapshot creation request to current_dn_owner

            rt_key = None

            # Get metadata for volume
            snap_vol = snapshot['volume']
            metadata = snap_vol['metadata']
            rt_key = self._get_volume_metadata_value(metadata,
                                                     'current_dn_owner')
            if rt_key is None:
                rt_key = self.dn_routing_key

            util.message_data_plane(
                rt_key,
                'hyperscale.storage.dm.volume.clone.create',
                pool_name=POOL_NAME,
                display_name=util.get_guid_with_curly_brackets(volume['id']),
                version_name=util.get_guid_with_curly_brackets(
                    volume['snapshot_id']),
                volume_raw_size=volume['size'],
                volume_qos=1,
                parent_volume_guid=util.get_guid_with_curly_brackets(
                    snapshot['volume_id']),
                user_id=util.get_guid_with_curly_brackets(volume['user_id']),
                project_id=util.get_guid_with_curly_brackets(
                    volume['project_id']),
                volume_guid=util.get_guid_with_curly_brackets(volume['id']))

            LOG.debug("Volume created successfully on data node")

            # Get metadata for volume
            volume_metadata = self._get_volume_metadata(volume)
            parent_cur_dn = self._get_volume_metadata_value(
                metadata, 'current_dn_ip')

            metadata_update = {}
            metadata_update['snapshot_id'] = snapshot['id']
            metadata_update['parent_volume_guid'] = (
                util.get_guid_with_curly_brackets(snapshot['volume_id']))
            metadata_update['Primary_datanode_ip'] = parent_cur_dn
            metadata_update['current_dn_owner'] = rt_key
            metadata_update['current_dn_ip'] = parent_cur_dn

            # 2. Choose a potential replica here.
            # The actual decision to have potential replica is made in NOVA.
            rt_key, rt_dn_ip = self._select_rt(volume,
                                               volume_metadata,
                                               only_select=True)

            if rt_key and rt_dn_ip:
                metadata_update['Potential_secondary_key'] = rt_key
                metadata_update['Potential_secondary_ip'] = rt_dn_ip

        except (exception.UnableToExecuteHyperScaleCmd,
                exception.UnableToProcessHyperScaleCmdOutput):
            with excutils.save_and_reraise_exception():
                LOG.exception('Exception in creating volume from snapshot')
        except exception.InvalidMetadataType:
            with excutils.save_and_reraise_exception():
                LOG.exception('Exception updating metadata in create'
                              ' volume from snapshot')

        volume_metadata.update(metadata_update)

        volume['provider_location'] = PROVIDER_LOCATION
        model_update = {
            'provider_location': volume['provider_location'],
            'metadata': volume_metadata
        }

        return model_update
Example #60
0
        def wrapper(*args, **kwargs):
            if args and isinstance(args[0], test.BaseTestCase):
                test_obj = args[0]
            else:
                raise rbac_exceptions.RbacResourceSetupFailed(
                    '`rbac_rule_validation` decorator can only be applied to '
                    'an instance of `tempest.test.BaseTestCase`.')

            allowed = True
            disallowed_rules = []
            for rule in rules:
                _allowed = _is_authorized(test_obj, service, rule,
                                          extra_target_data)
                if not _allowed:
                    disallowed_rules.append(rule)
                allowed = allowed and _allowed

            if disallowed_rules:
                # Choose the first disallowed rule and expect the error
                # code corresponding to it.
                first_error_index = rules.index(disallowed_rules[0])
                exp_error_code = expected_error_codes[first_error_index]
                LOG.debug("%s: Expecting %d to be raised for policy name: %s",
                          test_func.__name__, exp_error_code,
                          disallowed_rules[0])
            else:
                exp_error_code = expected_error_codes[0]

            expected_exception, irregular_msg = _get_exception_type(
                exp_error_code)

            caught_exception = None
            test_status = 'Allowed'

            try:
                test_func(*args, **kwargs)
            except rbac_exceptions.RbacInvalidServiceException:
                with excutils.save_and_reraise_exception():
                    msg = ("%s is not a valid service." % service)
                    # FIXME(felipemonteiro): This test_status is logged too
                    # late. Need a function to log it before re-raising.
                    test_status = ('Error, %s' % (msg))
                    LOG.error(msg)
            except (expected_exception,
                    rbac_exceptions.BasePatroleResponseBodyException) \
                    as actual_exception:
                caught_exception = actual_exception
                test_status = 'Denied'

                if irregular_msg:
                    LOG.warning(irregular_msg, test_func.__name__,
                                ', '.join(rules), service)

                if allowed:
                    msg = ("User with roles %s was not allowed to perform the "
                           "following actions: %s. Expected allowed actions: "
                           "%s. Expected disallowed actions: %s." %
                           (roles, sorted(rules),
                            sorted(set(rules) - set(disallowed_rules)),
                            sorted(disallowed_rules)))
                    LOG.error(msg)
                    raise rbac_exceptions.RbacUnderPermissionException(
                        "%s Exception was: %s" % (msg, actual_exception))
            except Exception as actual_exception:
                caught_exception = actual_exception

                if _check_for_expected_mismatch_exception(
                        expected_exception, actual_exception):
                    LOG.error(
                        'Expected and actual exceptions do not match. '
                        'Expected: %s. Actual: %s.', expected_exception,
                        actual_exception.__class__)
                    raise rbac_exceptions.RbacExpectedWrongException(
                        expected=expected_exception,
                        actual=actual_exception.__class__,
                        exception=actual_exception)
                else:
                    with excutils.save_and_reraise_exception():
                        exc_info = sys.exc_info()
                        error_details = six.text_type(exc_info[1])
                        msg = ("An unexpected exception has occurred during "
                               "test: %s. Exception was: %s" %
                               (test_func.__name__, error_details))
                        test_status = 'Error, %s' % (error_details)
                        LOG.error(msg)
            else:
                if not allowed:
                    msg = (
                        "OverPermission: Role %s was allowed to perform the "
                        "following disallowed actions: %s" %
                        (roles, sorted(disallowed_rules)))
                    LOG.error(msg)
                    raise rbac_exceptions.RbacOverPermissionException(msg)
            finally:
                if CONF.patrole_log.enable_reporting:
                    RBACLOG.info(
                        "[Service]: %s, [Test]: %s, [Rules]: %s, "
                        "[Expected]: %s, [Actual]: %s", service,
                        test_func.__name__, ', '.join(rules),
                        "Allowed" if allowed else "Denied", test_status)

                # Sanity-check that ``override_role`` was called to eliminate
                # false-positives and bad test flows resulting from exceptions
                # getting raised too early, too late or not at all, within
                # the scope of an RBAC test.
                _validate_override_role_called(
                    test_obj, actual_exception=caught_exception)