Пример #1
0
    def from_local(cls, base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "runs")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_mark_indexes = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(
                alembic_config, connection)
            if not (db_revision and head_revision):
                RunStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_mark_indexes = True

            table_names = db.inspect(engine).get_table_names()
            if "instance_info" not in table_names:
                InstanceInfo.create(engine)

        run_storage = cls(conn_string, inst_data)

        if should_mark_indexes:
            run_storage.migrate()
            run_storage.optimize()

        return run_storage
Пример #2
0
 def _init_db(self):
     mkdir_p(self._base_dir)
     engine = create_engine(self._conn_string, poolclass=NullPool)
     SqlEventLogStorageMetadata.create_all(engine)
     engine.execute('PRAGMA journal_mode=WAL;')
     alembic_config = get_alembic_config(__file__)
     connection = engine.connect()
     db_revision, head_revision = check_alembic_revision(alembic_config, connection)
     if not (db_revision and head_revision):
         stamp_alembic_rev(alembic_config, engine)
Пример #3
0
    def from_local(base_dir, inst_data=None):
        check.str_param(base_dir, 'base_dir')
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, 'schedules')
        engine = create_engine(conn_string, poolclass=NullPool)
        engine.execute('PRAGMA journal_mode=WAL;')
        ScheduleStorageSqlMetadata.create_all(engine)
        connection = engine.connect()
        alembic_config = get_alembic_config(__file__)
        db_revision, head_revision = check_alembic_revision(alembic_config, connection)
        if not (db_revision and head_revision):
            stamp_alembic_rev(alembic_config, engine)

        return SqliteScheduleStorage(conn_string, inst_data)
Пример #4
0
    def from_local(base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "runs")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(alembic_config, connection)
            if not (db_revision and head_revision):
                RunStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)

        return SqliteRunStorage(conn_string, inst_data)
Пример #5
0
    def _initdb(self, engine):
        alembic_config = get_alembic_config(__file__)

        retry_limit = 10

        while True:
            try:
                SqlEventLogStorageMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")

                with engine.connect() as connection:
                    db_revision, head_revision = check_alembic_revision(alembic_config, connection)

                if not (db_revision and head_revision):
                    stamp_alembic_rev(alembic_config, engine)

                break
            except (db.exc.DatabaseError, sqlite3.DatabaseError, sqlite3.OperationalError) as exc:
                # This is SQLite-specific handling for concurrency issues that can arise when
                # multiple processes (e.g. the dagit process and user code process) contend with
                # each other to init the db. When we hit the following errors, we know that another
                # process is on the case and we should retry.
                err_msg = str(exc)

                if not (
                    "table asset_keys already exists" in err_msg
                    or "table secondary_indexes already exists" in err_msg
                    or "table event_logs already exists" in err_msg
                    or "database is locked" in err_msg
                    or "table alembic_version already exists" in err_msg
                    or "UNIQUE constraint failed: alembic_version.version_num" in err_msg
                ):
                    raise

                if retry_limit == 0:
                    raise
                else:
                    logging.info(
                        "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "
                        "retrying ({retry_limit} retries left). Exception: {str_exc}".format(
                            retry_limit=retry_limit, str_exc=err_msg
                        )
                    )
                    time.sleep(0.2)
                    retry_limit -= 1
Пример #6
0
    def _init_db(self):
        mkdir_p(self._base_dir)
        engine = create_engine(self._conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_mark_indexes = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(alembic_config, connection)
            if not (db_revision and head_revision):
                SqlEventLogStorageMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_mark_indexes = True

        if should_mark_indexes:
            # mark all secondary indexes
            self.reindex_events()
            self.reindex_assets()
Пример #7
0
    def from_local(base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "runs")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_mark_indexes = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(alembic_config, connection)
            if not (db_revision and head_revision):
                RunStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_mark_indexes = True

        run_storage = SqliteRunStorage(conn_string, inst_data)

        if should_mark_indexes:
            # mark all secondary indexes
            run_storage.build_missing_indexes()

        return run_storage
Пример #8
0
    def from_local(cls, base_dir, inst_data=None):
        check.str_param(base_dir, "base_dir")
        mkdir_p(base_dir)
        conn_string = create_db_conn_string(base_dir, "schedules")
        engine = create_engine(conn_string, poolclass=NullPool)
        alembic_config = get_alembic_config(__file__)

        should_migrate_data = False
        with engine.connect() as connection:
            db_revision, head_revision = check_alembic_revision(
                alembic_config, connection)
            if not (db_revision and head_revision):
                ScheduleStorageSqlMetadata.create_all(engine)
                engine.execute("PRAGMA journal_mode=WAL;")
                stamp_alembic_rev(alembic_config, connection)
                should_migrate_data = True

        schedule_storage = cls(conn_string, inst_data)
        if should_migrate_data:
            schedule_storage.migrate()
            schedule_storage.optimize()

        return schedule_storage