Exemple #1
0
def test_create_db_new_file(tmp_path, caplog):
    path = tmp_path / "foo.db"
    orm.create_db(str(path))
    # if the function worked the file should now exist.
    assert path.exists()
    assert "Missing migrations:" in caplog.text
    # for new files, all migrations should be missing. If the 1st is missing,
    # that implies that the rest are missing as well.
    assert "0001" in caplog.text
Exemple #2
0
def test_create_db_failure(broken_db, caplog):
    # orm.db.init changes the file so we can't look at a "before" hash.

    # Run the migration (will fail)
    orm.create_db(str(broken_db))

    # Make sure our original file and backup files exist, and are the same
    backup_file = Path("{}.{}".format(str(broken_db), "20190228_150202"))
    assert broken_db.exists()
    assert backup_file.exists()
    assert _hash_file(broken_db) == _hash_file(backup_file)

    # and lastly check our logs.
    assert "Missing migrations:" in caplog.text
    assert "Created database backup" in caplog.text
    assert "Failed to apply database migrations" in caplog.text
    assert "Reverting to backup file" in caplog.text
def test_migration_0005_to_0006(db_0005_with_data):
    """
    There is an issue with upgrading from migration 0005 to 0006, introduced
    in #143 / 6d6b050d4bf47d5b3cdc07fef8321c54861cfea1.

    The issue is tracked in #158. Basically the upgrade would drop the
    contents of the `datapoint` table, likely because of the `ON DELETE
    CASCADE` that was added in migration 0005.
    """
    # Verify we have data
    with orm.db:
        metric_0005 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
        data_0005 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
        migrations = orm.db.execute_sql(
            'SELECT * FROM "migration_history"').fetchall()

    assert len(metric_0005) != 0
    assert len(data_0005) != 0
    # Make sure we do not have migration 0006 applied.
    msg = "Migration 0006 applied when it shouldn't be."
    assert not any("0006" in m[1] for m in migrations), msg

    # Then upgrade to 0006
    # Note: we can't use manager.upgrade, as that doesn't reproduce the issue
    orm.create_db(str(db_0005_with_data))

    with orm.db:
        metric_0006 = orm.db.execute_sql('SELECT * FROM "Metric"').fetchall()
        data_0006 = orm.db.execute_sql('SELECT * FROM "Datapoint"').fetchall()
        migrations = orm.db.execute_sql(
            'SELECT * FROM "migration_history"').fetchall()

    # Ensure that migration 0006 *is* applied.
    msg = "Migration 0006 is not applied, it should be."
    assert any(["0006" in m[1] for m in migrations]), msg

    # And that data still matches.
    assert len(metric_0006) != 0
    assert metric_0006 == metric_0005
    assert len(data_0006) != 0
    assert data_0006 == data_0005
Exemple #4
0
def app(tmp_path):
    """
    A test app.

    Needed to do things like creating a test client.
    """
    db_file = Path(tmp_path) / "test.db"

    # Mock out create_db - we'll do it later. If we let it happen now, then
    # a database will be made using the `app.config['DATABASE']` value,
    # which we don't want. (since that'll typically be `./internal.db`)
    with patch('trendlines.orm.create_db', MagicMock()):
        app = create_app()

    # Since the `create_db` function modifies orm.db directly, we can simply
    # call it here. I guess *technically* what's happening is whatever
    # DATABASE value set in default_config.py or TRENDLINES_CONFIG_FILE is
    # actually made first, but I don't think I really care right now.
    create_db(str(db_file))
    app.testing = True
    yield app
Exemple #5
0
def create_app():
    """
    Primary application factory.
    """
    _logging.setup_logging(logger)

    logger.debug("Creating app.")
    app = Flask(__name__)
    app.config.from_object('trendlines.default_config')

    try:
        app.config.from_envvar(CFG_VAR)
        logger.info("Loaded config file '%s'" % os.environ[CFG_VAR])
    except FileNotFoundError:
        msg = "Failed to load config file. The file %s='%s' was not found."
        logger.warning(msg % (CFG_VAR, os.environ[CFG_VAR]))
    except RuntimeError as err:
        # Flask's error for missing env var is sufficient.
        logger.warning(str(err))
    except Exception as err:
        logger.warning("An unknown error occured while reading from the"
                       " config file. See debug stack trace for details.")
        logger.debug(format_exc())

    logger.debug("Registering blueprints.")
    app.register_blueprint(routes.pages)

    # Initialize flask-rest-api for OpenAPI (Swagger) documentation
    routes.api_class.init_app(app)
    routes.api_class.register_blueprint(routes.api)
    routes.api_class.register_blueprint(routes.api_datapoint)
    routes.api_class.register_blueprint(routes.api_metric)

    # Create the database file and populate initial tables if needed.
    orm.create_db(app.config['DATABASE'])

    # If I redesign the architecture a bit, then these could be moved so
    # that they only act on the `api` blueprint instead of the entire app.
    #
    # Also, note that this is safe for multiple simultaneous requests, since
    # the database object is thread local:
    #   "Peewee uses thread local storage to manage connection state, so
    #    this pattern can be used with multi-threaded WSGI servers."
    # http://docs.peewee-orm.com/en/latest/peewee/example.html#establishing-a-database-connection
    @app.before_request
    def before_request():
        """
        Attach the ORM to the flask ``g`` object before every request.

        Why not just use ``from orm import db`` most of the time? Well, it's
        because:
        (a) that's how I'm used to from SQLAlchemy
        (b) I may need ``db`` somewhere where I *can't* import ``orm``
        (c) because that's how the example PeeWee project is set up. /shrug.
            https://github.com/coleifer/peewee/blob/master/examples/twitter/app.py#L152
        """
        g.db = orm.db
        try:
            g.db.connect()
        except OperationalError:
            pass

    @app.after_request
    def after_request(response):
        g.db.close()
        return response

    return app
Exemple #6
0
def test_create_db_logs_and_raises_operational_error(caplog):
    with pytest.raises(OperationalError):
        orm.create_db("foo")

    assert "Unable to create/open database file" in caplog.text
    assert "foo" in caplog.text
Exemple #7
0
def test_create_db(tmp_path):
    path = tmp_path / "foo.db"
    orm.create_db(str(path))
    # if the function worked the file should now exist.
    assert path.exists()
Exemple #8
0
def test_create_db_run_migrations_in_docker(outdated_db, caplog):
    orm.create_db(str(outdated_db))
    assert "Failed to open default migration directory" in caplog.text
    assert "Success" in caplog.text
    assert "Successfully applied database migrations" in caplog.text
Exemple #9
0
def test_create_db_run_migrations_in_docker_fails(outdated_db, caplog):
    with pytest.raises(PermissionError):
        orm.create_db(str(outdated_db))
    assert "Failed to open default migration directory" in caplog.text
    assert "Success" not in caplog.text
    assert "Successfully applied database migrations" not in caplog.text
Exemple #10
0
def test_create_db_migration_failure_on_new_file(tmp_path, caplog):
    orm.create_db(str(tmp_path / "foo.db"))
    expected = "Failed to apply database migrations to the new file"
    assert expected in caplog.text
Exemple #11
0
def test_create_db_operational_error_and_file_exists(up_to_date_db, caplog):
    with pytest.raises(OperationalError):
        orm.create_db(str(up_to_date_db))

    assert "but we're unable to connect" in caplog.text
Exemple #12
0
def test_create_db_applies_missing_migrations(outdated_db, caplog):
    orm.create_db(str(outdated_db))
    assert "Missing migrations:" in caplog.text
    assert "Created database backup" in caplog.text
    assert "Successfully applied database migrations" in caplog.text
Exemple #13
0
def test_create_db_nothing_to_do(up_to_date_db, caplog):
    orm.create_db(str(up_to_date_db))
    assert "Database is up to date." in caplog.text