def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session), ( '_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def storage_pool_create(context, values, update=False): session = get_session() pool_results = [] with session.begin(): try: for pool in values: try: pool_list = storage_pool_get(context, pool, session) for pool_info in pool_list: pool_info['services'] = merge_services( pool_info.get('services'), pool.get('services')) upd_rec = dict(pool_info) upd_rec.pop('id') # no need to update id upd_rec['updated_at'] = timeutils.utcnow() model_query(context, models.StoragePools, session=session, read_deleted="no").\ filter_by(id=pool_info['id']).\ update(upd_rec) pool_results.append(pool_info) except (exception.StoragePoolNotFound): pool_info = dict(pool) pool_info['id'] = str(uuid.uuid4()) pool_ref = models.StoragePools() pool_ref.update(dict(dict(deleted=False), **pool_info)) pool_ref.save(session=session) pool_results.append(pool_info) except Exception as e: raise db_exc.DBError(e) return pool_results
def fake_db_qos_specs_create(context, values): if values['name'] == 'DupQoSName': raise exception.QoSSpecsExists(specs_id=values['name']) elif values['name'] == 'FailQoSName': raise db_exc.DBError() pass
def storage_pool_delete(context, values): session = get_session() with session.begin(): try: for pool in values: pool_list = storage_pool_get(context, pool, session) for pool_info in pool_list: if pool.get('services'): pool_info['services'] = delete_services( pool_info.get('services'), pool.get('services')) else: pool_info['services'] = None filters = {} for attr in [ 'id', 'pool', 'backend_name', 'storage_backend_id', 'storage_tier_id' ]: if pool.get(attr): filters[attr] = pool.get(attr) # delete record if it doesn't have any entries in services, otherwise just change services if pool_info.get('services') and pool_info['services'] != "": model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'services': pool_info['services']}) else: model_query(context, models.StoragePools, session=session).\ filter_by(**filters). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) except Exception as e: raise db_exc.DBError(e)
def update_record(context, record, **kwargs): session = get_session(context) try: for key, value in kwargs.iteritems(): if hasattr(record, key): setattr(record, key, value) with session.begin(subtransactions=True): session.add(record) except Exception as e: raise os_db_exception.DBError(e)
def _raise_for_remaining_DBAPIError(error, match, engine_name, is_disconnect): """Filter for remaining DBAPIErrors. Filter for remaining DBAPIErrors and wrap if they represent a disconnect error. """ if is_disconnect: raise exception.DBConnectionError(error) else: LOG.exception(_LE('DBAPIError exception wrapped from %s') % error) raise exception.DBError(error)
def storage_backend_create(context, values, update=False): session = get_session() with session.begin(): try: # check if storage system exists _info = _find_storage_backend(context, values, update, session) # if record already exists if _info: _info['capability_specs'] = values.get('capability_specs') if not _info.get('capability_specs_id') and values.get( 'capability_specs'): _info['capability_specs_id'] = str(uuid.uuid4()) _info['config_specs'] = values.get('config_specs') if not _info.get('config_specs_id') and values.get( 'config_specs'): _info['config_specs_id'] = str(uuid.uuid4()) if not _info.get('driver') and values.get('driver'): _info['driver'] = values.get('driver') self._storage_backend_update(context, _info, session) else: _info = values # generate uuid if it doesn't exist if not _info.get('id'): _info['id'] = str(uuid.uuid4()) if _info.get('capability_specs'): _info['capability_specs_id'] = str(uuid.uuid4()) if values.get('config_specs'): _info['config_specs_id'] = str(uuid.uuid4()) # create storage system storage_backend_ref = models.StorageBackends() storage_backend_ref.update(_info) storage_backend_ref.save(session=session) # insert all specification entries for QoS specs if values.get('capability_specs'): _storage_backend_capability_specs_update_or_create( context, _info, session) # insert all specification entries for QoS specs if values.get('config_specs'): _storage_backend_config_specs_update_or_create( context, _info, session) except (exception.StorageBackendExists, exception.StorageBackendNotFound, exception.StorageBackendMissingKey): raise except Exception as e: raise db_exc.DBError(e) return _info
def storage_tier_create(context, values, update=False): session = get_session() with session.begin(): try: _storage_backend = _find_storage_backend( context, dict(name=values.get('backend_name'), id=values.get('backend_id')), True, session) _tier_ref = { 'storage_backend_id': _storage_backend['id'], 'name': values.get('tier_name') } # check if tier exists - exception is raised if it insert and if it already exists _info = _find_storage_tier(context, _tier_ref, update, session) if _info: _tier_ref = _info if values.get('capability_specs'): if not _tier_ref['capability_specs_id']: _tier_ref['capability_specs_id'] = str(uuid.uuid4()) else: if not values.get('tier_id'): _tier_ref['id'] = str(uuid.uuid4()) else: _tier_ref['id'] = values.get('tier_id') if values.get('capability_specs'): _tier_ref['capability_specs_id'] = str(uuid.uuid4()) storage_tier_ref = models.StorageBackendTiers() storage_tier_ref.update(_tier_ref) storage_tier_ref.save(session=session) # insert all specification entries for QoS specs if values.get('capability_specs'): _tier_ref['capability_specs'] = values.get('capability_specs') _storage_tier_capability_specs_update_or_create( context, _tier_ref, session) except (exception.StorageBackendExists, exception.StorageBackendNotFound, exception.StorageBackendMissingKey): raise except (exception.StorageTierExists, exception.StorageTierNotFound, exception.StorageTierMissingKey): raise except Exception as e: raise db_exc.DBError(e) return _tier_ref
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval)
def fake_db_associate_get(context, id): if id == 'Trouble': raise db_exc.DBError() return [{'name': 'type-1', 'id': 'id-1'}, {'name': 'type-2', 'id': 'id-2'}]
def __init__(self): self._cell_data_sync = mock.Mock() self._cell_data_sync.side_effect = [db_exc.DBError(), []] super(TestCellStateManagerDB, self).__init__()
def update(self): raise db_exception.DBError( inner_exception=exc.IntegrityError('a', 'a', 'a'))
def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass
def fake_db_update(context, specs_id, values): raise db_exc.DBError()
def _raise_for_all_others(error, match, engine_name, is_disconnect): LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(error)
def fake_db_disassociate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass
def test_create_port_catch_db_error(self): self._test__port_action_with_failures(exc=db_exc.DBError(), action='create_port')