예제 #1
0
 def __init__(self, postgres_url, inst_data=None):
     self.postgres_url = postgres_url
     self._engine = create_engine(
         self.postgres_url, isolation_level='AUTOCOMMIT', poolclass=db.pool.NullPool
     )
     RunStorageSqlMetadata.create_all(self._engine)
     self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
예제 #2
0
 def wipe_storage(mysql_url):
     engine = create_engine(mysql_url,
                            isolation_level="AUTOCOMMIT",
                            poolclass=db.pool.NullPool)
     try:
         RunStorageSqlMetadata.drop_all(engine)
     finally:
         engine.dispose()
예제 #3
0
 def create_clean_storage(postgres_url):
     engine = create_engine(postgres_url,
                            isolation_level='AUTOCOMMIT',
                            poolclass=db.pool.NullPool)
     try:
         RunStorageSqlMetadata.drop_all(engine)
     finally:
         engine.dispose()
     return PostgresRunStorage(postgres_url)
예제 #4
0
 def create_clean_storage(postgres_url, should_autocreate_tables=True):
     engine = create_engine(postgres_url,
                            isolation_level="AUTOCOMMIT",
                            poolclass=db.pool.NullPool)
     try:
         RunStorageSqlMetadata.drop_all(engine)
     finally:
         engine.dispose()
     return PostgresRunStorage(postgres_url, should_autocreate_tables)
예제 #5
0
    def __init__(self, postgres_url, inst_data=None):
        self.postgres_url = postgres_url
        self._engine = create_engine(self.postgres_url,
                                     isolation_level="AUTOCOMMIT",
                                     poolclass=db.pool.NullPool)
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)

        with self.connect() as conn:
            RunStorageSqlMetadata.create_all(conn)
예제 #6
0
    def __init__(self, postgres_url, inst_data=None):
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)
        self.postgres_url = postgres_url

        # Default to not holding any connections open to prevent accumulating connections per DagsterInstance
        self._engine = create_engine(
            self.postgres_url,
            isolation_level="AUTOCOMMIT",
            poolclass=db.pool.NullPool,
        )

        with self.connect() as conn:
            RunStorageSqlMetadata.create_all(conn)
예제 #7
0
    def __init__(self, mysql_url, inst_data=None):
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)
        self.mysql_url = mysql_url

        # Default to not holding any connections open to prevent accumulating connections per DagsterInstance
        self._engine = create_engine(
            self.mysql_url,
            isolation_level="AUTOCOMMIT",
            poolclass=db.pool.NullPool,
        )

        self._index_migration_cache = {}
        table_names = retry_mysql_connection_fn(
            db.inspect(self._engine).get_table_names)

        if "runs" not in table_names:
            with self.connect() as conn:
                alembic_config = mysql_alembic_config(__file__)
                retry_mysql_creation_fn(
                    lambda: RunStorageSqlMetadata.create_all(conn))
                stamp_alembic_rev(alembic_config, conn)
            self.build_missing_indexes()

        super().__init__()
예제 #8
0
    def __init__(self, postgres_url, inst_data=None):
        self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
        self.postgres_url = postgres_url

        # Default to not holding any connections open to prevent accumulating connections per DagsterInstance
        self._engine = create_engine(
            self.postgres_url,
            isolation_level="AUTOCOMMIT",
            poolclass=db.pool.NullPool,
        )

        self._index_migration_cache = {}
        table_names = retry_pg_connection_fn(lambda: db.inspect(self._engine).get_table_names())

        # Stamp and create tables if there's no previously stamped revision and the main table
        # doesn't exist (since we used to not stamp postgres storage when it was first created)
        if "runs" not in table_names:
            with self.connect() as conn:
                retry_pg_creation_fn(lambda: RunStorageSqlMetadata.create_all(conn))

                # This revision may be shared by any other dagster storage classes using the same DB
                stamp_alembic_rev(pg_alembic_config(__file__), conn)

            # mark all secondary indexes as built
            self.build_missing_indexes()

        super().__init__()
예제 #9
0
 def _init_db(self):
     with self.connect() as conn:
         with conn.begin():
             RunStorageSqlMetadata.create_all(conn)
             # This revision may be shared by any other dagster storage classes using the same DB
             stamp_alembic_rev(pg_alembic_config(__file__), conn)
예제 #10
0
 def __init__(self, postgres_url, inst_data=None):
     self.postgres_url = postgres_url
     with self.get_engine() as engine:
         RunStorageSqlMetadata.create_all(engine)
     self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
예제 #11
0
 def _init_db(self):
     with self.connect() as conn:
         with conn.begin():
             RunStorageSqlMetadata.create_all(conn)
             stamp_alembic_rev(mysql_alembic_config(__file__), conn)