def __init__(self, server_side_cursors=False, use_native_unicode=True, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.supports_unicode_binds = use_native_unicode
def __init__(self, server_side_cursors=False, use_native_unicode=True, **kwargs): PGDialect.__init__(self, **kwargs) self.server_side_cursors = server_side_cursors self.use_native_unicode = use_native_unicode self.supports_unicode_binds = use_native_unicode if self.dbapi and hasattr(self.dbapi, '__version__'): m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', self.dbapi.__version__) if m: self.psycopg2_version = tuple( int(x) for x in m.group(1, 2, 3) if x is not None)
class Db_Postgis(BaseDb): """Functionality for using sqlalchemy to connect to a PostGIS database.""" preparer = PGIdentifierPreparer(PGDialect()) @classmethod def create_engine(cls, pgurl): def _on_checkout(dbapi_connection, connection_record, connection_proxy): with dbapi_connection.cursor() as dbcur: dbcur.execute("SET timezone='UTC';") dbcur.execute("SET intervalstyle='iso_8601';") # don't drop precision from floats near the edge of their supported range dbcur.execute("SET extra_float_digits = 3;") pgurl = cls._append_query_to_url(pgurl, {"fallback_application_name": "kart"}) engine = sqlalchemy.create_engine(pgurl, module=psycopg2, poolclass=cls._pool_class()) sqlalchemy.event.listen(engine, "checkout", _on_checkout) return engine @classmethod def list_tables(cls, sess, db_schema=None): if db_schema is not None: name_clause = "c.relname" schema_clause = "n.nspname = :db_schema" params = {"db_schema": db_schema} else: name_clause = "format('%s.%s', n.nspname, c.relname)" schema_clause = "n.nspname NOT IN ('information_schema', 'pg_catalog', 'tiger', 'topology')" params = {} r = sess.execute( sqlalchemy.text(f""" SELECT {name_clause} as name, obj_description(c.oid, 'pg_class') as title FROM pg_catalog.pg_class c INNER JOIN pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r', 'v') AND {schema_clause} AND c.relname NOT LIKE '_kart_%' AND c.oid NOT IN ( SELECT d.objid FROM pg_catalog.pg_extension AS e INNER JOIN pg_catalog.pg_depend AS d ON (d.refobjid = e.oid) WHERE d.deptype = 'e' AND e.extname = 'postgis' ) ORDER BY {name_clause}; """), params, ) return {row["name"]: row["title"] for row in r} @classmethod def db_schema_searchpath(cls, sess): return sess.scalar("SELECT current_schemas(true);")
async def init_pool(self): # dsn for connections dsn = 'postgresql://%s:%s@%s:%s/%s' % \ (self._config['user'], self._config['password'], self._config['host'], self._config['port'], self._config['database']) # create pool self._pool = await create_pool( dsn=dsn, min_size=self._config.get('min_size', 5), max_size=self._config.get('max_size', 10), dialect=PGDialect())
def __init__(self, project=None, rpc=None): """ Args: project (str, optional): A Project Identifier rpc (Connect, optional): An RPC Connection object """ if rpc: self.rpc = rpc else: self.rpc = Connect() if project: try: # See if this is a project ID already uuid.UUID(project) self._project_id = six.text_type(project) except ValueError: if '/' in project: # This is a path lookup self._project_id = self.rpc.analyze.project.lookup_by_full_path(path=project) else: # This is a name lookup self._project_id = self.rpc.analyze.project.lookup_by_name(name=project) else: self._project_id = rpc.project_id _dialect_kind = 'greenplum' # This should come from the project primary database setting eventually if _dialect_kind == 'greenplum': self.dialect = GreenplumDialect() elif _dialect_kind == 'hana': self.dialect = HANABaseDialect() elif _dialect_kind == 'hive': raise Exception('Hive not supported from plaidtools currently.') elif _dialect_kind == 'spark': raise Exception('Spark not supported from plaidtools currently.') elif _dialect_kind == 'oracle': raise Exception('Oracle not supported from plaidtools currently.') elif _dialect_kind == 'mssql': raise Exception('MS SQL Server not supported from plaidtools currently.') else: self.dialect = PGDialect()
def import_schema(self, schema, srv_options, options, restriction_type, restricts): """ Reflects the remote schema. """ metadata = MetaData() url = _parse_url_from_options(srv_options) engine = create_engine(url) dialect = PGDialect() if restriction_type == 'limit': only = restricts elif restriction_type == 'except': only = lambda t, _: t not in restricts else: only = None metadata.reflect(bind=engine, schema=schema, only=only) to_import = [] for _, table in sorted(metadata.tables.items()): ftable = TableDefinition(table.name) ftable.options['schema'] = schema ftable.options['tablename'] = table.name for c in table.c: # Force collation to None to prevent imcompatibilities setattr(c.type, "collation", None) # If the type is specialized, call the generic # superclass method if type(c.type) in CONVERSION_MAP: class_name = CONVERSION_MAP[type(c.type)] old_args = c.type.__dict__ c.type = class_name() c.type.__dict__.update(old_args) if c.primary_key: ftable.options['primary_key'] = c.name ftable.columns.append(ColumnDefinition( c.name, type_name=c.type.compile(dialect))) to_import.append(ftable) return to_import
from pybigquery.sqlalchemy_bigquery import BigQueryDialect from snowflake.sqlalchemy.snowdialect import SnowflakeDialect from sqlalchemy.dialects.mysql.base import MySQLDialect from sqlalchemy.dialects.postgresql.base import PGDialect from panoramic.cli.husky.service.types.enums import HuskyQueryRuntime RUNTIME_DIALECTS = { HuskyQueryRuntime.snowflake: SnowflakeDialect(), HuskyQueryRuntime.bigquery: BigQueryDialect(), HuskyQueryRuntime.mysql: MySQLDialect(), HuskyQueryRuntime.postgres: PGDialect(), }