def import_metadata(verbose: bool = False):
    """
    Import metadata for measurements.
    """
    data_path = get_data_directory()

    fn = data_path / "WiscAr_metadata.xlsx"
    assert fn.exists()

    app = get_sparrow_app()
    app = sparrow.get_app()
    importer = MetadataImporter(app, fn, verbose=verbose)
Beispiel #2
0
    def __init__(self, app=None, **kwargs):
        if app is None:
            app = sparrow.get_app()
        self.app = app
        self.db = self.app.database

        self.m = self.db.model
        print_sql = kwargs.pop("print_sql", False)
        self.verbose = kwargs.pop("verbose", False)
        # We shouldn't have to do this,

        # This is kinda unsatisfying
        self.basedir = environ.get("SPARROW_DATA_DIR", None)

        # Allow us to turn of change-tracking for speed
        self.__dirty = set()
        self.__new = set()
        self.__deleted = set()

        @event.listens_for(self.db.session, "before_flush")
        def on_before_flush(session, flush_context, instances):
            self.__dirty |= set(session.dirty)
            self.__new |= set(session.new)
            self.__deleted |= set(session.deleted)

        @event.listens_for(self.db.session, "after_commit")
        def on_after_commit(session):
            self.__dirty = set()
            self.__new = set()
            self.__deleted = set()

        if print_sql:

            @event.listens_for(self.db.engine,
                               "after_cursor_execute",
                               named=True)
            def receive_after_cursor_execute(**kw):
                statement = kw.pop("statement")
                if statement.startswith("SELECT"):
                    return
                secho(str(statement).strip())

        if self.file_type is not None:
            v = self.db.get_or_create(self.m.data_file_type, id=self.file_type)
            self.add(v)
            self.db.session.commit()

        # Deprecated
        self.models = self.m
def import_noblesse(
    redo: bool = False,
    stop_on_error: bool = False,
    verbose: bool = False,
    show_data: bool = False,
):
    """
    Import WiscAr Noblesse spectrometer data (ArArCalc files) in bulk.
    """
    data_base = get_data_directory()
    data_path = data_base / "Noblesse-test-data"

    # Make sure we are working in the data directory (for some reason this is important)
    # TODO: fix in sparrow
    chdir(str(data_base))

    app = sparrow.get_app()
    importer = NoblesseImporter(app, verbose=verbose, show_data=show_data)
    # TODO: fix for both xls and xlsx files
    importer.iterfiles(data_path.glob("**/*.xlsx"), redo=redo)
    importer.iterfiles(data_path.glob("**/*.xls"), redo=redo)
def import_map(
    redo: bool = False,
    stop_on_error: bool = False,
    verbose: bool = False,
    show_data: bool = False,
):
    """
    Import WiscAr MAP spectrometer data (ArArCalc files) in bulk.
    """
    data_base = get_data_directory()
    data_path = data_base / "MAP-Irradiations"

    # Make sure we are working in the data directory (for some reason this is important)
    # TODO: fix in sparrow
    chdir(str(data_base))

    app = sparrow.get_app()
    importer = MAPImporter(app, verbose=verbose, show_data=show_data)
    importer.iterfiles(data_path.glob("**/*.xls"), redo=redo)

    # Clean up data inconsistencies
    #fp = relative_path(__file__, "sql", "clean-data.sql")
    db.exec_sql(fp)
def pychron_import_command(redo: bool = False):
    """Import PyChron Interpreted Age files."""
    app = sparrow.get_app()
    importer = PyChronImporter(app, verbose=True)
    importer.import_all(redo=redo)
Beispiel #6
0
from celery import Celery
from celery.signals import after_setup_logger
from sparrow_utils import setup_stderr_logs
import sparrow
import redis
import json
import time

_app = sparrow.get_app()
_app.setup_database()

app = _app.plugins.get("task-manager").celery

queue = redis.StrictRedis(host="broker", port=6379, db=0)
channel = queue.pubsub()


@app.task(bind=True, name="background-task")
def background_task(self):
    for tick in range(5):
        time.sleep(1)
        print("Hello, world")
        queue.publish("task:background_task", json.dumps({"progress": tick}))
    queue.publish("task:background_task", json.dumps({"success": True}))
    return True


# celery = Celery("tasks", broker="redis://broker//")

# # Expire results quickly so that we don't fill up Redi's broker queue
# celery.conf.result_expires = 60