예제 #1
0
def create_pg_connection(engine, dunder_file, storage_type_desc=None):
    check.inst_param(engine, "engine", sqlalchemy.engine.Engine)
    check.str_param(dunder_file, "dunder_file")
    check.opt_str_param(storage_type_desc, "storage_type_desc", "")

    if storage_type_desc:
        storage_type_desc += " "
    else:
        storage_type_desc = ""

    conn = None
    try:
        # Retry connection to gracefully handle transient connection issues
        conn = retry_pg_connection_fn(engine.connect)
        with handle_schema_errors(
            conn,
            get_alembic_config(dunder_file),
            msg="Postgres {}storage requires migration".format(storage_type_desc),
        ):
            yield conn
    finally:
        if conn:
            conn.close()
예제 #2
0
    def from_local(base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "runs")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_mark_indexes = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(alembic_config, connection)
            if not (db_revision and head_revision):
                RunStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_mark_indexes = True

        run_storage = SqliteRunStorage(conn_string, inst_data)

        if should_mark_indexes:
            # mark all secondary indexes
            run_storage.build_missing_indexes()

        return run_storage
예제 #3
0
    def from_local(cls, base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "schedules")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_migrate_data = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(
                alembic_config, connection)
            if not (db_revision and head_revision):
                ScheduleStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_migrate_data = True

        schedule_storage = cls(conn_string, inst_data)
        if should_migrate_data:
            schedule_storage.migrate()
            schedule_storage.optimize()

        return schedule_storage
예제 #4
0
    def connect(self, run_id=None):
        with self._db_lock:
            check.str_param(run_id, "run_id")

            conn_string = self.conn_string_for_run_id(run_id)
            engine = create_engine(conn_string, poolclass=NullPool)

            if not run_id in self._initialized_dbs:
                self._initdb(engine)
                self._initialized_dbs.add(run_id)

            conn = engine.connect()

            try:
                with handle_schema_errors(
                    conn,
                    get_alembic_config(__file__),
                    msg="SqliteEventLogStorage for run {run_id}".format(run_id=run_id),
                ):
                    yield conn
            finally:
                conn.close()
            engine.dispose()
예제 #5
0
    def __init__(self, postgres_url, inst_data=None):
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)
        self.postgres_url = check.str_param(postgres_url, "postgres_url")
        self._disposed = False

        self._event_watcher = PostgresEventWatcher(self.postgres_url)

        # Default to not holding any connections open to prevent accumulating connections per DagsterInstance
        self._engine = create_engine(self.postgres_url,
                                     isolation_level="AUTOCOMMIT",
                                     poolclass=db.pool.NullPool)
        self._secondary_index_cache = {}

        table_names = db.inspect(self._engine).get_table_names()
        if "event_logs" not in table_names:
            with self.connect() as conn:
                alembic_config = get_alembic_config(__file__)
                retry_pg_creation_fn(
                    lambda: SqlEventLogStorageMetadata.create_all(conn))

                # This revision may be shared by any other dagster storage classes using the same DB
                stamp_alembic_rev(alembic_config, self._engine)
예제 #6
0
 def upgrade(self):
     alembic_config = get_alembic_config(__file__)
     run_alembic_upgrade(alembic_config, self._engine)
예제 #7
0
 def upgrade(self):
     alembic_config = get_alembic_config(__file__)
     with self.connect() as conn:
         run_alembic_upgrade(alembic_config, conn)
예제 #8
0
 def upgrade(self):
     alembic_config = get_alembic_config(__file__)
     with self.get_engine() as engine:
         run_alembic_upgrade(alembic_config, engine)
예제 #9
0
def pg_alembic_config(dunder_file):
    return get_alembic_config(dunder_file,
                              config_path="../alembic/alembic.ini",
                              script_path="../alembic/")
예제 #10
0
 def _alembic_upgrade(self, rev='head'):
     alembic_config = get_alembic_config(__file__)
     with self.connect() as conn:
         run_alembic_upgrade(alembic_config, conn, rev=rev)
예제 #11
0
 def _alembic_downgrade(self, rev="head"):
     alembic_config = get_alembic_config(__file__)
     with self.connect() as conn:
         run_alembic_downgrade(alembic_config, conn, rev=rev)