def register_models(base=BASE): """Register Models and create properties.""" try: with contextlib.closing(db_session.get_engine().connect()) as conn: base.metadata.create_all(conn) except sqlalchemy.exc.DBAPIError as e: LOG.error(_("Database registration exception: %s"), e) LOG.error(_("Statement: %s"), e.statement) return False return True
def job_state_humanize(state): if state in constants.JOB_STATUS_SUCCESS_LIST: return '' elif state in constants.JOB_STATUS_FAILURE_LIST: return ''#_(u"失败") elif state in constants.JOB_STATUS_RUNNING_LIST: return _(u"任务中") elif state in constants.JOB_STATUS_PENDING_LIST: return _(u"等待中") return ''
def _mysql_check_effective_sql_mode(engine): """Logs a message based on the effective SQL mode for MySQL connections.""" realmode = _mysql_get_effective_sql_mode(engine) if realmode is None: LOG.warning(_('Unable to detect effective SQL mode')) return LOG.debug('MySQL server mode set to %s', realmode) # 'TRADITIONAL' mode enables several other modes, so # we need a substring match here if not ('TRADITIONAL' in realmode.upper() or 'STRICT_ALL_TABLES' in realmode.upper()): LOG.warning( _("MySQL SQL mode is '%s', " "consider enabling TRADITIONAL or STRICT_ALL_TABLES"), realmode)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy): """Ensures that MySQL and DB2 connections are alive. Borrowed from: http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f """ cursor = dbapi_conn.cursor() try: ping_sql = 'select 1' if engine.name == 'ibm_db_sa': # DB2 requires a table expression ping_sql = 'select 1 from (values (1)) AS t1' cursor.execute(ping_sql) except Exception as ex: if engine.dialect.is_disconnect(ex, dbapi_conn, cursor): msg = _('Database server has gone away: %s') % ex LOG.warning(msg) # if the database server has gone away, all connections in the pool # have become invalid and we can safely close all of them here, # rather than waste time on checking of every single connection engine.dispose() # this will be handled by SQLAlchemy and will force it to create # a new connection and retry the original action raise sqla_exc.DisconnectionError(msg) else: raise
def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session), ( '_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_('DB exception wrapped.')) raise exception.DBError(e)
def unregister_models(base=BASE): """Unregister Models, useful clearing out data before testing.""" try: with contextlib.closing(db_session.get_engine().connect()) as conn: base.metadata.drop_all(conn) except Exception as err: LOG.error(_("Database exception")) LOG.exception(err)
def status_from_state(vm_state, task_state='default'): """Given vm_state and task_state, return a status string.""" task_map = _STATE_MAP.get(vm_state, dict(default='UNKNOWN')) status = task_map.get(task_state, task_map['default']) if status == "UNKNOWN": LOG.error( _("status is UNKNOWN from vm_state=%(vm_state)s " "task_state=%(task_state)s. Bad upgrade or db " "corrupted?"), { 'vm_state': vm_state, 'task_state': task_state }) return status
def commit(self, context, reservations, user_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. """ try: self._driver.commit(context, reservations, user_id=user_id) except Exception: # NOTE(Vek): Ignoring exceptions here is safe, because the # usage resynchronization and the reservation expiration # mechanisms will resolve the issue. The exception is # logged, however, because this is less than optimal. LOG.exception(_("Failed to commit reservations %s"), reservations) return LOG.debug("Committed reservations %s", reservations)
ALLOW_START = [STOPPED] ALLOW_SHUTDOWN = [ACTIVE] ALLOW_RENAME = [ACTIVE] ALLOW_RESIZE = [ACTIVE] ALLOW_PAUSE = [ACTIVE] ALLOW_UNPAUSE = [PAUSED] ALLOW_SUSPEND = [ACTIVE] ALLOW_RESUME = [SUSPENDED] ALLOW_BACKUP = [ACTIVE] ALLOW_CHANGE_PASSWD = [STOPPED] ALLOW_CHANGE_IOPS = [ACTIVE] ALLOW_ATTACH_VOLUME = [ACTIVE] + [STOPPED] # Openstack Client Actions ACTIONS = ( _('start'), _('change'), _('modify'), _('pause'), _('reboot'), _('resume'), _('shutdown'), _('suspend'), _('unsuspend'), _('unpause'), _('backup'), _('change_admin_password'), _('change_iops'), ) ALLOW_ACTION_MAP = {
class DBInvalidUnicodeParameter(Exception): message = _("Invalid Parameter: " "Unicode is not supported by the current database.")
# -*- coding: utf-8 -*- from nebula.core.i18n import _ import math _B = _(u"B") _KB = _(u"KB") _MB = _(u"MB") _GB = _(u"GB") _TB = _(u"TB") _PB = _(u"PB") def bt_to_g(bytes): if isinstance(bytes, basestring): bytes = int(bytes) return math.ceil(bytes / 1024.0 / 1024 / 1024 * 100) / 100 def human_size(size_bytes): """ format a size in bytes into a 'human' file size, e.g. bytes, KB, MB, GB, TB, PB Note that bytes/KB will be reported in whole numbers but MB and above will have greater precision e.g. 1 byte, 43 bytes, 443 KB, 4.3 MB, 4.43 GB, etc """ if size_bytes == 1: # because I really hate unnecessary plurals return "1 byte" suffixes_table = [(_B, 0), (_KB, 0), (_MB, 1), (_GB, 2), (_TB, 2), (_PB, 2)]
def gettext(text): return _(text)
# coding=utf-8 from nebula.core.i18n import _ QUOTA_NAMES = [ _('instances'), _('cores'), _('ram'), _('images'), _('floating_ips'), _('fixed_ips'), _('metadata_items'), _('injected_files'), _('injected_file_content_bytes'), _('injected_file_path_length'), _('instance_attach_volumes'), _('instance_attach_ports'), _('instance_backups'), _('volume_backups'), _('volume_capacity'), _('instance_cores_min'), _('instance_cores_max'), _('instance_ram_min'), _('instance_ram_max'), _('network_vlan_min'), _('network_vlan_max'), _('virtual_routers'), _('firewalls'), _('bandwidth_tx'), _('bandwidth_rx'), _('binding_publicips'),
portal_bp.add_url_rule('/cdh/datainchange', view_func=CDHChange.as_view('datainchange'), methods=['POST', 'GET']) portal_bp.add_url_rule('/cdh/datainreport', view_func=CDHReport.as_view('datainreport'), methods=['POST']) from .empty import ( EMPTYView ) portal_bp.add_url_rule('/empty', view_func=EMPTYView.as_view('empty'), methods=['GET'], defaults={'view_desc': _(u"Empty - Going...")}) from .etltools import ( ETLToolsView, ETLChange, ETLReport, ETLServerDelete, ETLToolsGetPage, ETLTaskRun, ETLJobRun, ETLTaskLog, ETLJobLog, ) portal_bp.add_url_rule('/etltools', view_func=ETLToolsView.as_view('etltools'),
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None, idle_timeout=1800, echo=None, connection_debug=0, max_pool_size=None, max_overflow=None, pool_timeout=None, sqlite_synchronous=True, connection_trace=False, max_retries=10, retry_interval=10, **kwargs): """Return a new SQLAlchemy engine.""" connection_dict = sqlalchemy.engine.url.make_url(sql_connection) engine_args = { "pool_recycle": idle_timeout, 'convert_unicode': True, } logger = logging.getLogger('sqlalchemy.engine') using_scope = CONF.portal.using_scope if using_scope: LOG.info('* Updating engine strategy to threadlocal') engine_args['strategy'] = 'threadlocal' # Map SQL debug level to Python log level if connection_debug >= 100: logger.setLevel(logging.DEBUG) elif connection_debug >= 50: logger.setLevel(logging.INFO) else: logger.setLevel(logging.WARNING) if "sqlite" in connection_dict.drivername: if sqlite_fk: engine_args["listeners"] = [SqliteForeignKeysListener()] engine_args["poolclass"] = NullPool if sql_connection == "sqlite://": engine_args["poolclass"] = StaticPool engine_args["connect_args"] = {'check_same_thread': False} else: if max_pool_size is not None: engine_args['pool_size'] = max_pool_size if max_overflow is not None: engine_args['max_overflow'] = max_overflow if pool_timeout is not None: engine_args['pool_timeout'] = pool_timeout if echo is not None: engine_args['echo'] = echo if kwargs.get('executor') is not None: engine_args.update(kwargs) engine = sqlalchemy.create_engine(sql_connection, **engine_args) if using_scope: LOG.info('* Updating engine pool to _use_threadlocal') engine.pool._use_threadlocal = True # Since its just a mock connection, we do not need to setup events if engine.__class__.__name__ == 'MockConnection': return engine sqlalchemy.event.listen(engine, 'checkin', _thread_yield) if engine.name in ['mysql', 'ibm_db_sa']: ping_callback = functools.partial(_ping_listener, engine) sqlalchemy.event.listen(engine, 'checkout', ping_callback) if engine.name == 'mysql': if mysql_sql_mode: _mysql_set_mode_callback(engine, mysql_sql_mode) elif 'sqlite' in connection_dict.drivername: if not sqlite_synchronous: sqlalchemy.event.listen(engine, 'connect', _synchronous_switch_listener) sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener) if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb': _patch_mysqldb_with_stacktrace_comments() try: engine.connect() except sqla_exc.OperationalError as e: if not _is_db_connection_error(e.args[0]): raise remaining = max_retries if remaining == -1: remaining = 'infinite' while True: msg = _('SQL connection failed. %s attempts left.') LOG.warning(msg % remaining) if remaining != 'infinite': remaining -= 1 time.sleep(retry_interval) try: engine.connect() break except sqla_exc.OperationalError as e: if (remaining != 'infinite' and remaining == 0) or \ not _is_db_connection_error(e.args[0]): raise return engine