def __init__(self, postgres_url, inst_data=None): self.postgres_url = postgres_url self._engine = create_engine( self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool ) ScheduleStorageSqlMetadata.create_all(self._engine) self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)
def wipe_storage(mysql_url): engine = create_engine(mysql_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool) try: ScheduleStorageSqlMetadata.drop_all(engine) finally: engine.dispose()
def create_clean_storage(postgres_url, should_autocreate_tables=True): engine = create_engine(postgres_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool) try: ScheduleStorageSqlMetadata.drop_all(engine) finally: engine.dispose() return PostgresScheduleStorage(postgres_url, should_autocreate_tables)
def _init_db(self): with self.connect() as conn: with conn.begin(): ScheduleStorageSqlMetadata.create_all(conn) stamp_alembic_rev(pg_alembic_config(__file__), conn) # mark all the data migrations as applied self.migrate() self.optimize()
def create_clean_storage(postgres_url): engine = create_engine(postgres_url, isolation_level='AUTOCOMMIT', poolclass=db.pool.NullPool) try: ScheduleStorageSqlMetadata.drop_all(engine) finally: engine.dispose() return PostgresScheduleStorage(postgres_url)
def __init__(self, postgres_url, inst_data=None): self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) self.postgres_url = postgres_url # Default to not holding any connections open to prevent accumulating connections per DagsterInstance self._engine = create_engine(self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool) with self.connect() as conn: ScheduleStorageSqlMetadata.create_all(conn)
def __init__(self, postgres_url, should_autocreate_tables=True, inst_data=None): self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) self.postgres_url = postgres_url self.should_autocreate_tables = check.bool_param( should_autocreate_tables, "should_autocreate_tables") # Default to not holding any connections open to prevent accumulating connections per DagsterInstance self._engine = create_engine(self.postgres_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool) table_names = retry_pg_connection_fn( lambda: db.inspect(self._engine).get_table_names()) missing_main_table = "schedules" not in table_names and "jobs" not in table_names if self.should_autocreate_tables and missing_main_table: with self.connect() as conn: alembic_config = pg_alembic_config(__file__) retry_pg_creation_fn( lambda: ScheduleStorageSqlMetadata.create_all(conn)) # This revision may be shared by any other dagster storage classes using the same DB stamp_alembic_rev(alembic_config, conn) super().__init__()
def __init__(self, mysql_url, inst_data=None): self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData) self.mysql_url = mysql_url # Default to not holding any connections open to prevent accumulating connections per DagsterInstance self._engine = create_engine(self.mysql_url, isolation_level="AUTOCOMMIT", poolclass=db.pool.NullPool) table_names = retry_mysql_connection_fn( db.inspect(self._engine).get_table_names) if "jobs" not in table_names: with self.connect() as conn: alembic_config = mysql_alembic_config(__file__) retry_mysql_creation_fn( lambda: ScheduleStorageSqlMetadata.create_all(conn)) stamp_alembic_rev(alembic_config, conn) super().__init__()
def __init__(self, postgres_url, inst_data=None): self.postgres_url = postgres_url with self.get_engine() as engine: ScheduleStorageSqlMetadata.create_all(engine) self._inst_data = check.opt_inst_param(inst_data, 'inst_data', ConfigurableClassData)
def _init_db(self): with self.connect() as conn: with conn.begin(): ScheduleStorageSqlMetadata.create_all(conn) stamp_alembic_rev(pg_alembic_config(__file__), conn)