Пример #1
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(
                _("Account for Volume ID %s was not found on "
                  "the SolidFire Cluster while attempting "
                  "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {
            'volumeID': int(sf_vol['volumeID']),
            'attributes': attributes
        }

        data = self._issue_api_request('ModifyVolume', params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Пример #2
0
    def unmanage(self, volume):
        """Mark SolidFire Volume as unmanaged (export from Cinder)."""

        LOG.debug("Enter SolidFire unmanage...")
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(_("Account for Volume ID %s was not found on "
                        "the SolidFire Cluster while attempting "
                        "unmanage operation!") % volume['id'])
            raise exception.SolidFireAPIException("Failed to find account "
                                                  "for volume.")

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        export_time = timeutils.strtime()
        attributes = sf_vol['attributes']
        attributes['os_exported_at'] = export_time
        params = {'volumeID': int(sf_vol['volumeID']),
                  'attributes': attributes}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
Пример #3
0
    def to_dict(self):
        user_idt = (
            self.user_idt_format.format(user=self.user or '-',
                                        tenant=self.tenant or '-',
                                        domain=self.domain or '-',
                                        user_domain=self.user_domain or '-',
                                        p_domain=self.project_domain or '-'))

        return {'user_id': self.user_id,
                'project_id': self.project_id,
                'project_name': self.project_name,
                'domain': self.domain,
                'user_domain': self.user_domain,
                'project_domain': self.project_domain,
                'is_admin': self.is_admin,
                'read_deleted': self.read_deleted,
                'roles': self.roles,
                'remote_address': self.remote_address,
                'timestamp': timeutils.strtime(self.timestamp),
                'request_id': self.request_id,
                'auth_token': self.auth_token,
                'quota_class': self.quota_class,
                'service_catalog': self.service_catalog,
                'tenant': self.tenant,
                'user': self.user,
                'user_identity': user_idt}
Пример #4
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(context.elevated(), volume_id)
            if volume["status"] == "attaching":
                if volume["instance_uuid"] and volume["instance_uuid"] != instance_uuid:
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if volume["attached_host"] and volume["attached_host"] != host_name:
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if volume_metadata.get("attached_mode") and volume_metadata.get("attached_mode") != mode:
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume["status"] != "available":
                msg = _("status must be available or attaching")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume, "attach.start")
            self.db.volume_update(
                context,
                volume_id,
                {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": timeutils.strtime(),
                },
            )
            self.db.volume_admin_metadata_update(context.elevated(), volume_id, {"attached_mode": mode}, False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id, {"status": "error_attaching"})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get("readonly") == "True" and mode != "ro":
                self.db.volume_update(context, volume_id, {"status": "error_attaching"})
                raise exception.InvalidVolumeAttachMode(mode=mode, volume_id=volume_id)
            try:
                # NOTE(flaper87): Verify the driver is enabled
                # before going forward. The exception will be caught
                # and the volume status updated.
                utils.require_driver_initialized(self.driver)

                self.driver.attach_volume(context, volume, instance_uuid, host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id, {"status": "error_attaching"})

            volume = self.db.volume_attached(
                context.elevated(), volume_id, instance_uuid, host_name_sanitized, mountpoint
            )
            self._notify_about_volume_usage(context, volume, "attach.end")
Пример #5
0
    def to_dict(self):
        user_idt = (
            self.user_idt_format.format(user=self.user or '-',
                                        tenant=self.tenant or '-',
                                        domain=self.domain or '-',
                                        user_domain=self.user_domain or '-',
                                        p_domain=self.project_domain or '-'))

        return {'user_id': self.user_id,
                'project_id': self.project_id,
                'project_name': self.project_name,
                'domain': self.domain,
                'user_domain': self.user_domain,
                'project_domain': self.project_domain,
                'is_admin': self.is_admin,
                'read_deleted': self.read_deleted,
                'roles': self.roles,
                'remote_address': self.remote_address,
                'timestamp': timeutils.strtime(self.timestamp),
                'request_id': self.request_id,
                'auth_token': self.auth_token,
                'quota_class': self.quota_class,
                'service_catalog': self.service_catalog,
                'tenant': self.tenant,
                'user': self.user,
                'user_identity': user_idt}
Пример #6
0
 def _generate_swift_object_name_prefix(self, backup):
     az = "az_%s" % self.az
     backup_name = "%s_backup_%s" % (az, backup["id"])
     volume = "volume_%s" % (backup["volume_id"])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + "/" + timestamp + "/" + backup_name
     LOG.debug("_generate_swift_object_name_prefix: %s" % prefix)
     return prefix
Пример #7
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix)
     return prefix
Пример #8
0
 def _generate_swift_object_name_prefix(self, backup):
     az = 'az_%s' % self.az
     backup_name = '%s_backup_%s' % (az, backup['id'])
     volume = 'volume_%s' % (backup['volume_id'])
     timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
     prefix = volume + '/' + timestamp + '/' + backup_name
     LOG.debug(_('_generate_swift_object_name_prefix: %s') % prefix)
     return prefix
Пример #9
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid, 'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'os_imported_at': import_time,
            'old_name': sfname
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': volume['name'],
            'volumeID': sf_ref['volumeID'],
            'accountID': sfaccount['accountID'],
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        data = self._issue_api_request('ModifyVolume', params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Пример #10
0
    def manage_existing(self, volume, external_ref):
        """Manages an existing SolidFire Volume (import to Cinder).

        Renames the Volume to match the expected name for the volume.
        Also need to consider things like QoS, Emulation, account/tenant.
        """

        sfid = external_ref.get('source-id', None)
        sfname = external_ref.get('name', None)
        if sfid is None:
            raise exception.SolidFireAPIException("Manage existing volume "
                                                  "requires 'source-id'.")

        # First get the volume on the SF cluster (MUST be active)
        params = {'startVolumeID': sfid,
                  'limit': 1}
        data = self._issue_api_request('ListActiveVolumes', params)
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)
        sf_ref = data['result']['volumes'][0]

        sfaccount = self._create_sfaccount(volume['project_id'])

        attributes = {}
        qos = {}
        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        import_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'os_imported_at': import_time,
                      'old_name': sfname}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': volume['name'],
                  'volumeID': sf_ref['volumeID'],
                  'accountID': sfaccount['accountID'],
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        data = self._issue_api_request('ModifyVolume',
                                       params, version='5.0')
        if 'result' not in data:
            raise exception.SolidFireAPIDataException(data=data)

        return self._get_model_info(sfaccount, sf_ref['volumeID'])
Пример #11
0
 def to_dict(self):
     return {'user_id': self.user_id,
             'project_id': self.project_id,
             'is_admin': self.is_admin,
             'read_deleted': self.read_deleted,
             'roles': self.roles,
             'remote_address': self.remote_address,
             'timestamp': timeutils.strtime(self.timestamp),
             'request_id': self.request_id,
             'auth_token': self.auth_token,
             'quota_class': self.quota_class}
Пример #12
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos and
                volume.get('volume_metadata')is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {'uuid': volume['id'],
                      'is_clone': 'False',
                      'created_at': create_time}
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {'name': 'UUID-%s' % volume['id'],
                  'accountID': None,
                  'sliceCount': slice_count,
                  'totalSize': int(volume['size'] * units.Gi),
                  'enable512e': self.configuration.sf_emulate_512,
                  'attributes': attributes,
                  'qos': qos}

        # NOTE(jdg): Check if we're a migration tgt, if so
        # use the old volume-id here for the SF Name
        migration_status = volume.get('migration_status', None)
        if migration_status and 'target' in migration_status:
            k, v = migration_status.split(':')
            params['name'] = 'UUID-%s' % v
            params['attributes']['migration_uuid'] = volume['id']
            params['attributes']['uuid'] = v

        return self._do_volume_create(volume['project_id'], params)
Пример #13
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host'] and volume['attached_host'] !=
                        host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            now = timeutils.strtime()
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "attached_host": host_name,
                                   "status": "attaching",
                                   "attach_time": now})

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context,
                                      volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)
            try:
                self.driver.attach_volume(context,
                                          volume,
                                          instance_uuid,
                                          host_name_sanitized,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context,
                                          volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(),
                                    volume_id,
                                    instance_uuid,
                                    host_name_sanitized,
                                    mountpoint)
Пример #14
0
 def to_dict(self):
     return {
         'user_id': self.user_id,
         'project_id': self.project_id,
         'is_admin': self.is_admin,
         'read_deleted': self.read_deleted,
         'roles': self.roles,
         'remote_address': self.remote_address,
         'timestamp': timeutils.strtime(self.timestamp),
         'request_id': self.request_id,
         'auth_token': self.auth_token,
         'quota_class': self.quota_class
     }
Пример #15
0
 def to_dict(self):
     return {'user_id': self.user_id,
             'project_id': self.project_id,
             'project_name': self.project_name,
             'is_admin': self.is_admin,
             'read_deleted': self.read_deleted,
             'roles': self.roles,
             'remote_address': self.remote_address,
             'timestamp': timeutils.strtime(self.timestamp),
             'request_id': self.request_id,
             'auth_token': self.auth_token,
             'quota_class': self.quota_class,
             'service_catalog': self.service_catalog,
             'tenant': self.tenant,
             'user': self.user}
Пример #16
0
 def to_dict(self):
     return {'user_id': self.user_id,
             'project_id': self.project_id,
             'project_name': self.project_name,
             'is_admin': self.is_admin,
             'read_deleted': self.read_deleted,
             'roles': self.roles,
             'remote_address': self.remote_address,
             'timestamp': timeutils.strtime(self.timestamp),
             'request_id': self.request_id,
             'auth_token': self.auth_token,
             'quota_class': self.quota_class,
             'service_catalog': self.service_catalog,
             'tenant': self.tenant,
             'user': self.user}
Пример #17
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid']
                        and volume['instance_uuid'] != instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host']
                        and volume['attached_host'] != host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            now = timeutils.strtime()
            self.db.volume_update(
                context, volume_id, {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": now
                })

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)
            try:
                self.driver.attach_volume(context, volume, instance_uuid,
                                          host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            self.db.volume_attached(context.elevated(), volume_id,
                                    instance_uuid, host_name_sanitized,
                                    mountpoint)
Пример #18
0
    def create_volume(self, volume):
        """Create volume on SolidFire device.

        The account is where CHAP settings are derived from, volume is
        created and exported.  Note that the new volume is immediately ready
        for use.

        One caveat here is that an existing user account must be specified
        in the API call to create a new volume.  We use a set algorithm to
        determine account info based on passed in cinder volume object.  First
        we check to see if the account already exists (and use it), or if it
        does not already exist, we'll go ahead and create it.

        """
        slice_count = 1
        attributes = {}
        qos = {}

        if (self.configuration.sf_allow_tenant_qos
                and volume.get('volume_metadata') is not None):
            qos = self._set_qos_presets(volume)

        ctxt = context.get_admin_context()
        type_id = volume['volume_type_id']
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        create_time = timeutils.strtime(volume['created_at'])
        attributes = {
            'uuid': volume['id'],
            'is_clone': 'False',
            'created_at': create_time
        }
        if qos:
            for k, v in qos.items():
                attributes[k] = str(v)

        params = {
            'name': 'UUID-%s' % volume['id'],
            'accountID': None,
            'sliceCount': slice_count,
            'totalSize': int(volume['size'] * self.GB),
            'enable512e': self.configuration.sf_emulate_512,
            'attributes': attributes,
            'qos': qos
        }

        return self._do_volume_create(volume['project_id'], params)
Пример #19
0
    def retype(self, ctxt, volume, new_type, diff, host):
        """Convert the volume to be of the new type.

        Returns a boolean indicating whether the retype occurred.

        :param ctxt: Context
        :param volume: A dictionary describing the volume to migrate
        :param new_type: A dictionary describing the volume type to convert to
        :param diff: A dictionary with the difference between the two types
        :param host: A dictionary describing the host to migrate to, where
                     host['host'] is its name, and host['capabilities'] is a
                     dictionary of its reported capabilities (Not Used).

        """
        qos = {}
        attributes = {}

        sfaccount = self._get_sfaccount(volume['project_id'])
        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)

        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        attributes = sf_vol['attributes']
        attributes['retyped_at'] = timeutils.strtime()
        params = {'volumeID': sf_vol['volumeID']}
        qos = self._set_qos_by_volume_type(ctxt, new_type['id'])

        if qos:
            params['qos'] = qos
            for k, v in qos.items():
                attributes[k] = str(v)
            params['attributes'] = attributes

        self._issue_api_request('ModifyVolume', params)
        return True
Пример #20
0
    def retype(self, ctxt, volume, new_type, diff, host):
        """Convert the volume to be of the new type.

        Returns a boolean indicating whether the retype occurred.

        :param ctxt: Context
        :param volume: A dictionary describing the volume to migrate
        :param new_type: A dictionary describing the volume type to convert to
        :param diff: A dictionary with the difference between the two types
        :param host: A dictionary describing the host to migrate to, where
                     host['host'] is its name, and host['capabilities'] is a
                     dictionary of its reported capabilities (Not Used).

        """
        qos = {}
        attributes = {}

        sfaccount = self._get_sfaccount(volume['project_id'])
        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)

        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=volume['id'])

        attributes = sf_vol['attributes']
        attributes['retyped_at'] = timeutils.strtime()
        params = {'volumeID': sf_vol['volumeID']}
        qos = self._set_qos_by_volume_type(ctxt, new_type['id'])

        if qos:
            params['qos'] = qos
            for k, v in qos.items():
                attributes[k] = str(v)
            params['attributes'] = attributes

        self._issue_api_request('ModifyVolume', params)
        return True
Пример #21
0
    with _cd(stack_path + '/tempest'):
        #NOTE(jdg): We're not using putils here intentionally because we want
        #to wait and do some things that we typicall don't do in OpenStack
        proc = subprocess.Popen(['./run_tests.sh', './tempest/api/volume/*'],
                                stdout=subprocess.PIPE, shell=True)
        (out, err) = proc.communicate()

    return out, err


if __name__ == '__main__':

    if len(sys.argv) == 1:
        sys.argv.append('-h')

    time_stamp = timeutils.strtime()
    (options, args) = _process_options()
    print('Gathering devstack env info...')
    devstack_info = _get_devstack_info(options.devstack_path)
    stack_path = _get_stack_path(devstack_info['local_rc'])
    print('Set stack_path to %s' % stack_path)
    print('Gathering cinder env info...')
    cinder_info = _get_cinder_info(stack_path + '/cinder')
    results_file = '/tmp/test.out'
    print('Running tempest api volume tests...')
    out, err = _run_tempest_api_tests(stack_path)

    print ('Stdout results:')
    for line in out.split('\n'):
        print line
Пример #22
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid']
                        and volume['instance_uuid'] != instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host']
                        and volume['attached_host'] != host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode')
                        and volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume, "attach.start")
            self.db.volume_update(
                context, volume_id, {
                    "instance_uuid": instance_uuid,
                    "attached_host": host_name,
                    "status": "attaching",
                    "attach_time": timeutils.strtime()
                })
            self.db.volume_admin_metadata_update(context.elevated(), volume_id,
                                                 {"attached_mode": mode},
                                                 False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get('readonly') == 'True' and mode != 'ro':
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidVolumeAttachMode(mode=mode,
                                                        volume_id=volume_id)
            try:
                self.driver.attach_volume(context, volume, instance_uuid,
                                          host_name_sanitized, mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            volume = self.db.volume_attached(context.elevated(), volume_id,
                                             instance_uuid,
                                             host_name_sanitized, mountpoint)
            self._notify_about_volume_usage(context, volume, "attach.end")
Пример #23
0
    def _do_clone_volume(self, src_uuid, src_project_id, v_ref):
        """Create a clone of an existing volume.

        Currently snapshots are the same as clones on the SF cluster.
        Due to the way the SF cluster works there's no loss in efficiency
        or space usage between the two.  The only thing different right
        now is the restore snapshot functionality which has not been
        implemented in the pre-release version of the SolidFire Cluster.

        """
        attributes = {}
        qos = {}

        sfaccount = self._get_sfaccount(src_project_id)
        params = {'accountID': sfaccount['accountID']}

        sf_vol = self._get_sf_volume(src_uuid, params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=src_uuid)

        if src_project_id != v_ref['project_id']:
            sfaccount = self._create_sfaccount(v_ref['project_id'])

        if v_ref.get('size', None):
            new_size = v_ref['size']
        else:
            new_size = v_ref['volume_size']

        params = {'volumeID': int(sf_vol['volumeID']),
                  'name': 'UUID-%s' % v_ref['id'],
                  'newSize': int(new_size * units.Gi),
                  'newAccountID': sfaccount['accountID']}
        data = self._issue_api_request('CloneVolume', params)

        if (('result' not in data) or ('volumeID' not in data['result'])):
            msg = _("API response: %s") % data
            raise exception.SolidFireAPIException(msg)
        sf_volume_id = data['result']['volumeID']

        if (self.configuration.sf_allow_tenant_qos and
                v_ref.get('volume_metadata')is not None):
            qos = self._set_qos_presets(v_ref)

        ctxt = context.get_admin_context()
        type_id = v_ref.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        # NOTE(jdg): all attributes are copied via clone, need to do an update
        # to set any that were provided
        params = {'volumeID': sf_volume_id}

        create_time = timeutils.strtime(v_ref['created_at'])
        attributes = {'uuid': v_ref['id'],
                      'is_clone': 'True',
                      'src_uuid': src_uuid,
                      'created_at': create_time}
        if qos:
            params['qos'] = qos
            for k, v in qos.items():
                attributes[k] = str(v)

        params['attributes'] = attributes
        data = self._issue_api_request('ModifyVolume', params)

        model_update = self._get_model_info(sfaccount, sf_volume_id)
        if model_update is None:
            mesg = _('Failed to get model update from clone')
            raise exception.SolidFireAPIException(mesg)

        return (data, sfaccount, model_update)
Пример #24
0
        def do_attach():
            # check the volume status before attaching
            volume = self.db.volume_get(context, volume_id)
            volume_metadata = self.db.volume_admin_metadata_get(
                context.elevated(), volume_id)
            if volume['status'] == 'attaching':
                if (volume['instance_uuid'] and volume['instance_uuid'] !=
                        instance_uuid):
                    msg = _("being attached by another instance")
                    raise exception.InvalidVolume(reason=msg)
                if (volume['attached_host'] and volume['attached_host'] !=
                        host_name):
                    msg = _("being attached by another host")
                    raise exception.InvalidVolume(reason=msg)
                if (volume_metadata.get('attached_mode') and
                        volume_metadata.get('attached_mode') != mode):
                    msg = _("being attached by different mode")
                    raise exception.InvalidVolume(reason=msg)
            elif volume['status'] != "available":
                msg = _("status must be available")
                raise exception.InvalidVolume(reason=msg)

            # TODO(jdg): attach_time column is currently varchar
            # we should update this to a date-time object
            # also consider adding detach_time?
            self._notify_about_volume_usage(context, volume,
                                            "attach.start")
            self.db.volume_update(context, volume_id,
                                  {"instance_uuid": instance_uuid,
                                   "attached_host": host_name,
                                   "status": "attaching",
                                   "attach_time": timeutils.strtime()})
            self.db.volume_admin_metadata_update(context.elevated(),
                                                 volume_id,
                                                 {"attached_mode": mode},
                                                 False)

            if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidUUID(uuid=instance_uuid)

            host_name_sanitized = utils.sanitize_hostname(
                host_name) if host_name else None

            volume = self.db.volume_get(context, volume_id)

            if volume_metadata.get('readonly') == 'True' and mode != 'ro':
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_attaching'})
                raise exception.InvalidVolumeAttachMode(mode=mode,
                                                        volume_id=volume_id)
            try:
                self.driver.attach_volume(context,
                                          volume,
                                          instance_uuid,
                                          host_name_sanitized,
                                          mountpoint)
            except Exception:
                with excutils.save_and_reraise_exception():
                    self.db.volume_update(context, volume_id,
                                          {'status': 'error_attaching'})

            volume = self.db.volume_attached(context.elevated(),
                                             volume_id,
                                             instance_uuid,
                                             host_name_sanitized,
                                             mountpoint)
            self._notify_about_volume_usage(context, volume, "attach.end")
Пример #25
0
def to_primitive(value, convert_instances=False, convert_datetime=True,
                 level=0, max_depth=3):
    """Convert a complex object into primitives.

    Handy for JSON serialization. We can optionally handle instances,
    but since this is a recursive function, we could have cyclical
    data structures.

    To handle cyclical data structures we could track the actual objects
    visited in a set, but not all objects are hashable. Instead we just
    track the depth of the object inspections and don't go too deep.

    Therefore, convert_instances=True is lossy ... be aware.

    """
    # handle obvious types first - order of basic types determined by running
    # full tests on nova project, resulting in the following counts:
    # 572754 <type 'NoneType'>
    # 460353 <type 'int'>
    # 379632 <type 'unicode'>
    # 274610 <type 'str'>
    # 199918 <type 'dict'>
    # 114200 <type 'datetime.datetime'>
    #  51817 <type 'bool'>
    #  26164 <type 'list'>
    #   6491 <type 'float'>
    #    283 <type 'tuple'>
    #     19 <type 'long'>
    if isinstance(value, _simple_types):
        return value

    if isinstance(value, datetime.datetime):
        if convert_datetime:
            return timeutils.strtime(value)
        else:
            return value

    # value of itertools.count doesn't get caught by nasty_type_tests
    # and results in infinite loop when list(value) is called.
    if type(value) == itertools.count:
        return six.text_type(value)

    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
    #              tests that raise an exception in a mocked method that
    #              has a @wrap_exception with a notifier will fail. If
    #              we up the dependency to 0.5.4 (when it is released) we
    #              can remove this workaround.
    if getattr(value, '__module__', None) == 'mox':
        return 'mock'

    if level > max_depth:
        return '?'

    # The try block may not be necessary after the class check above,
    # but just in case ...
    try:
        recursive = functools.partial(to_primitive,
                                      convert_instances=convert_instances,
                                      convert_datetime=convert_datetime,
                                      level=level,
                                      max_depth=max_depth)
        if isinstance(value, dict):
            return dict((k, recursive(v)) for k, v in six.iteritems(value))
        elif isinstance(value, (list, tuple)):
            return [recursive(lv) for lv in value]

        # It's not clear why xmlrpclib created their own DateTime type, but
        # for our purposes, make it a datetime type which is explicitly
        # handled
        if isinstance(value, xmlrpclib.DateTime):
            value = datetime.datetime(*tuple(value.timetuple())[:6])

        if convert_datetime and isinstance(value, datetime.datetime):
            return timeutils.strtime(value)
        elif isinstance(value, gettextutils.Message):
            return value.data
        elif hasattr(value, 'iteritems'):
            return recursive(dict(value.iteritems()), level=level + 1)
        elif hasattr(value, '__iter__'):
            return recursive(list(value))
        elif convert_instances and hasattr(value, '__dict__'):
            # Likely an instance of something. Watch for cycles.
            # Ignore class member vars.
            return recursive(value.__dict__, level=level + 1)
        elif netaddr and isinstance(value, netaddr.IPAddress):
            return six.text_type(value)
        else:
            if any(test(value) for test in _nasty_type_tests):
                return six.text_type(value)
            return value
    except TypeError:
        # Class objects are tricky since they may define something like
        # __iter__ defined but it isn't callable as list().
        return six.text_type(value)
Пример #26
0
        #NOTE(jdg): We're not using putils here intentionally because we want
        #to wait and do some things that we typicall don't do in OpenStack
        proc = subprocess.Popen(['./run_tests.sh', './tempest/api/volume/*'],
                                stdout=subprocess.PIPE,
                                shell=True)
        (out, err) = proc.communicate()

    return out, err


if __name__ == '__main__':

    if len(sys.argv) == 1:
        sys.argv.append('-h')

    time_stamp = timeutils.strtime()
    (options, args) = _process_options()
    print('Gathering devstack env info...')
    devstack_info = _get_devstack_info(options.devstack_path)
    stack_path = _get_stack_path(devstack_info['local_rc'])
    print('Set stack_path to %s' % stack_path)
    print('Gathering cinder env info...')
    cinder_info = _get_cinder_info(stack_path + '/cinder')
    results_file = '/tmp/test.out'
    print('Running tempest api volume tests...')
    out, err = _run_tempest_api_tests(stack_path)

    print('Stdout results:')
    for line in out.split('\n'):
        print line
Пример #27
0
    def _do_clone_volume(self, src_uuid, src_project_id, v_ref):
        """Create a clone of an existing volume.

        Currently snapshots are the same as clones on the SF cluster.
        Due to the way the SF cluster works there's no loss in efficiency
        or space usage between the two.  The only thing different right
        now is the restore snapshot functionality which has not been
        implemented in the pre-release version of the SolidFire Cluster.

        """
        attributes = {}
        qos = {}

        sfaccount = self._get_sfaccount(src_project_id)
        params = {'accountID': sfaccount['accountID']}

        sf_vol = self._get_sf_volume(src_uuid, params)
        if sf_vol is None:
            raise exception.VolumeNotFound(volume_id=src_uuid)

        if src_project_id != v_ref['project_id']:
            sfaccount = self._create_sfaccount(v_ref['project_id'])

        params = {'volumeID': int(sf_vol['volumeID']),
                  'name': 'UUID-%s' % v_ref['id'],
                  'newSize': int(v_ref['size'] * self.GB),
                  'newAccountID': sfaccount['accountID']}
        data = self._issue_api_request('CloneVolume', params)

        if (('result' not in data) or ('volumeID' not in data['result'])):
            msg = _("API response: %s") % data
            raise exception.SolidFireAPIException(msg)
        sf_volume_id = data['result']['volumeID']

        if (self.configuration.sf_allow_tenant_qos and
                v_ref.get('volume_metadata')is not None):
            qos = self._set_qos_presets(v_ref)

        ctxt = context.get_admin_context()
        type_id = v_ref.get('volume_type_id', None)
        if type_id is not None:
            qos = self._set_qos_by_volume_type(ctxt, type_id)

        # NOTE(jdg): all attributes are copied via clone, need to do an update
        # to set any that were provided
        params = {'volumeID': sf_volume_id}

        create_time = timeutils.strtime(v_ref['created_at'])
        attributes = {'uuid': v_ref['id'],
                      'is_clone': 'True',
                      'src_uuid': src_uuid,
                      'created_at': create_time}
        if qos:
            params['qos'] = qos
            for k, v in qos.items():
                attributes[k] = str(v)

        params['attributes'] = attributes
        data = self._issue_api_request('ModifyVolume', params)

        model_update = self._get_model_info(sfaccount, sf_volume_id)
        if model_update is None:
            mesg = _('Failed to get model update from clone')
            raise exception.SolidFireAPIException(mesg)

        return (data, sfaccount, model_update)
Пример #28
0
def to_primitive(value,
                 convert_instances=False,
                 convert_datetime=True,
                 level=0,
                 max_depth=3):
    """Convert a complex object into primitives.

    Handy for JSON serialization. We can optionally handle instances,
    but since this is a recursive function, we could have cyclical
    data structures.

    To handle cyclical data structures we could track the actual objects
    visited in a set, but not all objects are hashable. Instead we just
    track the depth of the object inspections and don't go too deep.

    Therefore, convert_instances=True is lossy ... be aware.

    """
    # handle obvious types first - order of basic types determined by running
    # full tests on nova project, resulting in the following counts:
    # 572754 <type 'NoneType'>
    # 460353 <type 'int'>
    # 379632 <type 'unicode'>
    # 274610 <type 'str'>
    # 199918 <type 'dict'>
    # 114200 <type 'datetime.datetime'>
    #  51817 <type 'bool'>
    #  26164 <type 'list'>
    #   6491 <type 'float'>
    #    283 <type 'tuple'>
    #     19 <type 'long'>
    if isinstance(value, _simple_types):
        return value

    if isinstance(value, datetime.datetime):
        if convert_datetime:
            return timeutils.strtime(value)
        else:
            return value

    # value of itertools.count doesn't get caught by nasty_type_tests
    # and results in infinite loop when list(value) is called.
    if type(value) == itertools.count:
        return six.text_type(value)

    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
    #              tests that raise an exception in a mocked method that
    #              has a @wrap_exception with a notifier will fail. If
    #              we up the dependency to 0.5.4 (when it is released) we
    #              can remove this workaround.
    if getattr(value, '__module__', None) == 'mox':
        return 'mock'

    if level > max_depth:
        return '?'

    # The try block may not be necessary after the class check above,
    # but just in case ...
    try:
        recursive = functools.partial(to_primitive,
                                      convert_instances=convert_instances,
                                      convert_datetime=convert_datetime,
                                      level=level,
                                      max_depth=max_depth)
        if isinstance(value, dict):
            return dict((k, recursive(v)) for k, v in six.iteritems(value))
        elif isinstance(value, (list, tuple)):
            return [recursive(lv) for lv in value]

        # It's not clear why xmlrpclib created their own DateTime type, but
        # for our purposes, make it a datetime type which is explicitly
        # handled
        if isinstance(value, xmlrpclib.DateTime):
            value = datetime.datetime(*tuple(value.timetuple())[:6])

        if convert_datetime and isinstance(value, datetime.datetime):
            return timeutils.strtime(value)
        elif isinstance(value, gettextutils.Message):
            return value.data
        elif hasattr(value, 'iteritems'):
            return recursive(dict(value.iteritems()), level=level + 1)
        elif hasattr(value, '__iter__'):
            return recursive(list(value))
        elif convert_instances and hasattr(value, '__dict__'):
            # Likely an instance of something. Watch for cycles.
            # Ignore class member vars.
            return recursive(value.__dict__, level=level + 1)
        elif netaddr and isinstance(value, netaddr.IPAddress):
            return six.text_type(value)
        else:
            if any(test(value) for test in _nasty_type_tests):
                return six.text_type(value)
            return value
    except TypeError:
        # Class objects are tricky since they may define something like
        # __iter__ defined but it isn't callable as list().
        return six.text_type(value)
Пример #29
0
def to_primitive(value, convert_instances=False, convert_datetime=True,
                 level=0, max_depth=3):
    """Convert a complex object into primitives.

    Handy for JSON serialization. We can optionally handle instances,
    but since this is a recursive function, we could have cyclical
    data structures.

    To handle cyclical data structures we could track the actual objects
    visited in a set, but not all objects are hashable. Instead we just
    track the depth of the object inspections and don't go too deep.

    Therefore, convert_instances=True is lossy ... be aware.

    """
    nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
             inspect.isfunction, inspect.isgeneratorfunction,
             inspect.isgenerator, inspect.istraceback, inspect.isframe,
             inspect.iscode, inspect.isbuiltin, inspect.isroutine,
             inspect.isabstract]
    for test in nasty:
        if test(value):
            return unicode(value)

    # value of itertools.count doesn't get caught by inspects
    # above and results in infinite loop when list(value) is called.
    if type(value) == itertools.count:
        return unicode(value)

    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
    #              tests that raise an exception in a mocked method that
    #              has a @wrap_exception with a notifier will fail. If
    #              we up the dependency to 0.5.4 (when it is released) we
    #              can remove this workaround.
    if getattr(value, '__module__', None) == 'mox':
        return 'mock'

    if level > max_depth:
        return '?'

    # The try block may not be necessary after the class check above,
    # but just in case ...
    try:
        recursive = functools.partial(to_primitive,
                                      convert_instances=convert_instances,
                                      convert_datetime=convert_datetime,
                                      level=level,
                                      max_depth=max_depth)
        # It's not clear why xmlrpclib created their own DateTime type, but
        # for our purposes, make it a datetime type which is explicitly
        # handled
        if isinstance(value, xmlrpclib.DateTime):
            value = datetime.datetime(*tuple(value.timetuple())[:6])

        if isinstance(value, (list, tuple)):
            return [recursive(v) for v in value]
        elif isinstance(value, dict):
            return dict((k, recursive(v)) for k, v in value.iteritems())
        elif convert_datetime and isinstance(value, datetime.datetime):
            return timeutils.strtime(value)
        elif hasattr(value, 'iteritems'):
            return recursive(dict(value.iteritems()), level=level + 1)
        elif hasattr(value, '__iter__'):
            return recursive(list(value))
        elif convert_instances and hasattr(value, '__dict__'):
            # Likely an instance of something. Watch for cycles.
            # Ignore class member vars.
            return recursive(value.__dict__, level=level + 1)
        else:
            return value
    except TypeError:
        # Class objects are tricky since they may define something like
        # __iter__ defined but it isn't callable as list().
        return unicode(value)
Пример #30
0
    def _heal_volume_status(self, context):

        TIME_SHIFT_TOLERANCE = 3

        heal_interval = CONF.volume_sync_interval

        if not heal_interval:
            return

        curr_time = time.time()
        LOG.info(_('Cascade info: last volume update time:%s'),
                 self._last_info_volume_state_heal)
        LOG.info(_('Cascade info: heal interval:%s'), heal_interval)
        LOG.info(_('Cascade info: curr_time:%s'), curr_time)

        if self._last_info_volume_state_heal + heal_interval > curr_time:
            return
        self._last_info_volume_state_heal = curr_time

        cinderClient = self._get_cinder_cascaded_admin_client()

        try:
            if self._change_since_time is None:
                search_opt = {'all_tenants': True}
                volumes = cinderClient.volumes.list(search_opts=search_opt)
                volumetypes = cinderClient.volume_types.list()
                LOG.info(_('Cascade info: change since time is none,'
                           'volumes:%s'), volumes)
            else:
                change_since_isotime = \
                    timeutils.parse_isotime(self._change_since_time)
                changesine_timestamp = change_since_isotime - \
                    datetime.timedelta(seconds=TIME_SHIFT_TOLERANCE)
                timestr = time.mktime(changesine_timestamp.timetuple())
                new_change_since_isotime = \
                    timeutils.iso8601_from_timestamp(timestr)

                search_op = {'all_tenants': True,
                             'changes-since': new_change_since_isotime}
                volumes = cinderClient.volumes.list(search_opts=search_op)
                volumetypes = cinderClient.volume_types.list()
                LOG.info(_('Cascade info: search time is not none,'
                           'volumes:%s'), volumes)

            self._change_since_time = timeutils.isotime()

            if len(volumes) > 0:
                LOG.debug(_('Updated the volumes %s'), volumes)

            for volume in volumes:
                volume_id = volume._info['metadata']['logicalVolumeId']
                volume_status = volume._info['status']
                if volume_status == "in-use":
                    self.db.volume_update(context, volume_id,
                                          {'status': volume._info['status'],
                                           'attach_status': 'attached',
                                           'attach_time': timeutils.strtime()
                                           })
                elif volume_status == "available":
                    self.db.volume_update(context, volume_id,
                                          {'status': volume._info['status'],
                                           'attach_status': 'detached',
                                           'instance_uuid': None,
                                           'attached_host': None,
                                           'mountpoint': None,
                                           'attach_time': None
                                           })
                else:
                    self.db.volume_update(context, volume_id,
                                          {'status': volume._info['status']})
                LOG.info(_('Cascade info: Updated the volume  %s status from'
                           'cinder-proxy'), volume_id)

            vol_types = self.db.volume_type_get_all(context, inactive=False)
            for volumetype in volumetypes:
                volume_type_name = volumetype._info['name']
                if volume_type_name not in vol_types.keys():
                    extra_specs = volumetype._info['extra_specs']
                    self.db.volume_type_create(
                        context,
                        dict(name=volume_type_name, extra_specs=extra_specs))

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_('Failed to sys volume status to db.'))
Пример #31
0
def to_primitive(value,
                 convert_instances=False,
                 convert_datetime=True,
                 level=0,
                 max_depth=3):
    """Convert a complex object into primitives.

    Handy for JSON serialization. We can optionally handle instances,
    but since this is a recursive function, we could have cyclical
    data structures.

    To handle cyclical data structures we could track the actual objects
    visited in a set, but not all objects are hashable. Instead we just
    track the depth of the object inspections and don't go too deep.

    Therefore, convert_instances=True is lossy ... be aware.

    """
    nasty = [
        inspect.ismodule, inspect.isclass, inspect.ismethod,
        inspect.isfunction, inspect.isgeneratorfunction, inspect.isgenerator,
        inspect.istraceback, inspect.isframe, inspect.iscode,
        inspect.isbuiltin, inspect.isroutine, inspect.isabstract
    ]
    for test in nasty:
        if test(value):
            return unicode(value)

    # value of itertools.count doesn't get caught by inspects
    # above and results in infinite loop when list(value) is called.
    if type(value) == itertools.count:
        return unicode(value)

    # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
    #              tests that raise an exception in a mocked method that
    #              has a @wrap_exception with a notifier will fail. If
    #              we up the dependency to 0.5.4 (when it is released) we
    #              can remove this workaround.
    if getattr(value, '__module__', None) == 'mox':
        return 'mock'

    if level > max_depth:
        return '?'

    # The try block may not be necessary after the class check above,
    # but just in case ...
    try:
        recursive = functools.partial(to_primitive,
                                      convert_instances=convert_instances,
                                      convert_datetime=convert_datetime,
                                      level=level,
                                      max_depth=max_depth)
        # It's not clear why xmlrpclib created their own DateTime type, but
        # for our purposes, make it a datetime type which is explicitly
        # handled
        if isinstance(value, xmlrpclib.DateTime):
            value = datetime.datetime(*tuple(value.timetuple())[:6])

        if isinstance(value, (list, tuple)):
            return [recursive(v) for v in value]
        elif isinstance(value, dict):
            return dict((k, recursive(v)) for k, v in value.iteritems())
        elif convert_datetime and isinstance(value, datetime.datetime):
            return timeutils.strtime(value)
        elif hasattr(value, 'iteritems'):
            return recursive(dict(value.iteritems()), level=level + 1)
        elif hasattr(value, '__iter__'):
            return recursive(list(value))
        elif convert_instances and hasattr(value, '__dict__'):
            # Likely an instance of something. Watch for cycles.
            # Ignore class member vars.
            return recursive(value.__dict__, level=level + 1)
        else:
            return value
    except TypeError:
        # Class objects are tricky since they may define something like
        # __iter__ defined but it isn't callable as list().
        return unicode(value)