Ejemplo n.º 1
0
def make_app(cfg, baselayer_handlers, baselayer_settings):
    """Create and return a `tornado.web.Application` object with specified
    handlers and settings.

    Parameters
    ----------
    cfg : Config
        Loaded configuration.  Can be specified with '--config'
        (multiple uses allowed).
    baselayer_handlers : list
        Tornado handlers needed for baselayer to function.
    baselayer_settings : cfg
        Settings needed for baselayer to function.

    """
    if cfg['cookie_secret'] == 'abc01234':
        print('!' * 80)
        print('  Your server is insecure. Please update the secret string ')
        print('  in the configuration file!')
        print('!' * 80)

    handlers = baselayer_handlers + [
    #    (r'/some_url(/.*)?', MyTornadoHandler),
        (r'/example_compute', ExampleComputationHandler)
    ]

    settings = baselayer_settings
    settings.update({})  # Specify any additional Tornado settings here

    app = tornado.web.Application(handlers, **settings)
    models.init_db(**cfg['database'])
    model_util.create_tables()
    app.cfg = cfg

    return app
Ejemplo n.º 2
0
def make_app(cfg,
             baselayer_handlers,
             baselayer_settings,
             process=None,
             env=None):
    """Create and return a `tornado.web.Application` object with specified
    handlers and settings.

    Parameters
    ----------
    cfg : Config
        Loaded configuration.  Can be specified with '--config'
        (multiple uses allowed).
    baselayer_handlers : list
        Tornado handlers needed for baselayer to function.
    baselayer_settings : cfg
        Settings needed for baselayer to function.
    process : int
        When launching multiple app servers, which number is this?
    env : dict
        Environment in which the app was launched.  Currently only has
        one key, 'debug'---true if launched with `--debug`.

    """
    if cfg['cookie_secret'] == 'abc01234':
        print('!' * 80)
        print('  Your server is insecure. Please update the secret string ')
        print('  in the configuration file!')
        print('!' * 80)

    handlers = baselayer_handlers + [
        #    (r'/some_url(/.*)?', MyTornadoHandler),
        (r'/example_compute', ExampleComputationHandler),
        (r'/push_notification', PushNotificationHandler)
    ]

    settings = baselayer_settings
    settings.update({})  # Specify any additional Tornado settings here

    app = tornado.web.Application(handlers, **settings)
    models.init_db(**cfg['database'])

    if process == 0:
        model_util.create_tables(add=env.debug)

    app.cfg = cfg

    return app
Ejemplo n.º 3
0
def run_migrations_online():
    """Run migrations in 'online' mode.

    In this scenario we need to create an Engine
    and associate a connection with the context.

    """
    connectable = init_db(**cfg['database'])

    with connectable.connect() as connection:
        context.configure(connection=connection,
                          target_metadata=target_metadata)

        with context.begin_transaction():
            context.run_migrations()
Ejemplo n.º 4
0
import json
import requests

import sqlalchemy as sa

from baselayer.app.models import init_db
from baselayer.app.env import load_env
from skyportal.models import (
    DBSession,
    FollowupRequest,
    FacilityTransactionRequest,
)

env, cfg = load_env()

init_db(**cfg['database'])

request_session = requests.Session()
request_session.trust_env = (
    False  # Otherwise pre-existing netrc config will override auth headers
)

WAIT_TIME_BETWEEN_QUERIES = timedelta(seconds=120)


class FacilityQueue(asyncio.Queue):
    async def load_from_db(self):
        # Load items from database into queue

        with DBSession() as session:
            requests = (
Ejemplo n.º 5
0
    Source,
    Candidate,
    Role,
    User,
    Allocation,
    FollowupRequest,
)

import astroplan

print("Loading test configuration from _test_config.yaml")
basedir = pathlib.Path(os.path.dirname(__file__))
cfg = load_config([(basedir / "../../test_config.yaml").absolute()])
set_server_url(f'http://localhost:{cfg["ports.app"]}')
print("Setting test database to:", cfg["database"])
models.init_db(**cfg["database"])

# Add a "test factory" User so that all factory-generated comments have a
# proper author, if it doesn't already exist (the user may already be in
# there if running the test server and running tests individually)
if not DBSession.query(User).filter(User.username == "test factory").scalar():
    DBSession.add(User(username="******"))
    DBSession.commit()


def pytest_runtest_setup(item):
    # Print timestamp when running each test
    print(datetime.now().strftime('[%H:%M:%S] '), end='')


# set up a hook to be able to check if a test has failed
Ejemplo n.º 6
0
import argparse
import uuid
import os

from baselayer.app.config import load_config
from baselayer.app.models import init_db
from cesium_app import model_util

cfg_paths = [os.path.join(os.path.dirname(__file__),
                          '../config.yaml.defaults'),
             os.path.join(os.path.dirname(__file__),
                          '../cesium_web/config.yaml.defaults')]

cfg = load_config(cfg_paths)
conn = init_db(**cfg['database'])


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('bot_name')

    args = parser.parse_args()

    token = model_util.create_token(
        [], 1, args.bot_name)

    token_path = os.path.abspath(os.path.join(
        cfg['paths']['cesium_web_login_token_folder'], 'cesium_web_token'))
    if not os.path.exists(os.path.dirname(token_path)):
        print(f'{os.path.dirname(token_path)} does not exist - creating it now')
Ejemplo n.º 7
0
def setup_survey_db():
    # if os.getcwd().endswith('survey_app'):
    #     os.chdir('./cesium_web')
    env, cfg = load_env()
    for data_dir in cfg['paths'].values():
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)


    db_session = init_db(**baselayer.app.config.load_config()['database'])
    # Drop & create tables
    with status('Dropping and re-creating tables'):
        drop_tables()
        create_tables()

    # Add testuser
    with status('Adding testuser'):
        u = models.User(username='******')
        models.DBSession().add(u)
        models.DBSession().commit()

    # Add project
    with status('Adding project'):
        p = models.Project(name='Survey Classifier', users=[u])
        models.DBSession().add(p)
        models.DBSession().commit()

    # Add datasets
    with status('Adding datasets'):
        for dataset_name, ts_data_dir in [
                ['Survey Light Curve Data',
                 'survey_classifier_data/data/lightcurves'],
                ['ASAS',
                 'survey_classifier_data/data/ASAS_lcs'],
                ['Noisified to CoRoT',
                 'survey_classifier_data/data/noisified_CoRoT_lcs'],
                ['Noisified to HATNet',
                 'survey_classifier_data/data/noisified_HATNet_lcs'],
                ['Noisified to Hipparcos',
                 'survey_classifier_data/data/noisified_Hipparcos_lcs'],
                ['Noisified to KELT',
                 'survey_classifier_data/data/noisified_KELT_lcs'],
                ['Noisified to Kepler',
                 'survey_classifier_data/data/noisified_Kepler_lcs'],
                ['Noisified to LINEAR',
                 'survey_classifier_data/data/noisified_LINEAR_lcs'],
                ['Noisified to OGLE-III',
                 'survey_classifier_data/data/noisified_OGLE-III_lcs'],
                ['Noisified to SuperWASP',
                 'survey_classifier_data/data/noisified_SuperWASP_lcs'],
                ['Noisified to TrES',
                 'survey_classifier_data/data/noisified_TrES_lcs']]:

            ts_paths = []
            # As these are only ever accessed to determine meta features, only
            # copy first ten (arbitrary) TS
            for src in glob.glob(os.path.join(os.path.abspath(ts_data_dir),
                                              '*.npz'))[:10]:
                # Add the path to the copied file in cesium data directory
                ts_paths.append(os.path.abspath(shutil.copy(
                    os.path.abspath(src), cfg['paths']['ts_data_folder'])))
            try:
                meta_features = list(load_ts(ts_paths[0])
                                     .meta_features.keys())
            except IndexError: # No TS data on disk
                meta_features = None
            files = [models.DatasetFile(uri=ts_path) for ts_path in ts_paths]
            dataset = models.Dataset(name=dataset_name, project=p, files=files,
                                     meta_features=meta_features)
            models.DBSession().add_all(files + [dataset])
            models.DBSession().commit()
            print(f'Added dataset {dataset.id}')

    # Add featuresets
    fset_dict = {}
    for fset_name, orig_fset_path, features_list in [
            ['Survey LC Cadence/Error Features',
             './survey_classifier_data/data/survey_lc_features.npz',
             CADENCE_FEATS],
            ['ASAS',
             './survey_classifier_data/data/ASAS_features.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['CoRoT',
             './survey_classifier_data/data/noisified_CoRoT_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['HATNet',
             './survey_classifier_data/data/noisified_HATNet_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['Hipparcos',
             './survey_classifier_data/data/noisified_Hipparcos_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['KELT',
             './survey_classifier_data/data/noisified_KELT_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['Kepler',
             './survey_classifier_data/data/noisified_Kepler_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['LINEAR',
             './survey_classifier_data/data/noisified_LINEAR_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['OGLE-III',
             './survey_classifier_data/data/noisified_OGLE-III_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['SuperWASP',
             './survey_classifier_data/data/noisified_SuperWASP_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS],
            ['TrES',
             './survey_classifier_data/data/noisified_TrES_features_100.npz',
             GENERAL_FEATS + LOMB_SCARGLE_FEATS]]:
        fset_path = os.path.abspath(
            shutil.copy(os.path.abspath(orig_fset_path),
                        cfg['paths']['features_folder']))
        fset = models.Featureset(name=fset_name, file_uri=fset_path,
                                 project=p, features_list=features_list,
                                 task_id=None, finished=datetime.datetime.now())
        models.DBSession().add(fset)
        models.DBSession().commit()
        # fset.task_id = None
        # fset.finished = datetime.datetime.now()
        # fset.save()
        fset_dict[fset_name] = fset
        print(f'Added featureset {fset.id}')

    # Add models
    # TODO: Add actual model params
    for model_name, orig_model_path, model_type, params, fset_name in [
            ['Survey LCs RFC',
             os.path.abspath('./survey_classifier_data/data/survey_classifier.pkl'),
             'RandomForestClassifier', {}, 'Survey LC Cadence/Error Features'],
            ['ASAS',
             os.path.abspath('./survey_classifier_data/data/ASAS_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'ASAS'],
            ['CoRoT',
             os.path.abspath('./survey_classifier_data/data/noisified_CoRoT_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'CoRoT'],
            ['HATNet',
             os.path.abspath('./survey_classifier_data/data/noisified_HATNet_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'HATNet'],
            ['Hipparcos',
             os.path.abspath('./survey_classifier_data/data/noisified_Hipparcos_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'Hipparcos'],
            ['KELT',
             os.path.abspath('./survey_classifier_data/data/noisified_KELT_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'KELT'],
            ['Kepler',
             os.path.abspath('./survey_classifier_data/data/noisified_Kepler_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'Kepler'],
            ['LINEAR',
             os.path.abspath('./survey_classifier_data/data/noisified_LINEAR_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'LINEAR'],
            ['OGLE-III',
             os.path.abspath('./survey_classifier_data/data/noisified_OGLE-III_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'OGLE-III'],
            ['SuperWASP',
             os.path.abspath('./survey_classifier_data/data/noisified_SuperWASP_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'SuperWASP'],
            ['TrES',
             os.path.abspath('./survey_classifier_data/data/noisified_TrES_model_compressed.pkl'),
             'RandomForestClassifier', {}, 'TrES']]:
        model_path = os.path.abspath(
            shutil.copy(orig_model_path, cfg['paths']['models_folder']))
        model = models.Model(name=model_name, file_uri=model_path,
                             featureset_id=fset_dict[fset_name].id, project=p,
                             project_id=p.id,
                             params=params, type=model_type, task_id=None,
                             finished=datetime.datetime.now())
        models.DBSession().add(model)
        models.DBSession().commit()
        # model.task_id = None
        # model.finished = datetime.datetime.now()
        # model.save()
        print(f'Added model {model.id}')
    print(cfg)
Ejemplo n.º 8
0
import os
import pathlib
from psycopg2 import OperationalError
from baselayer.app import models
from baselayer.app.config import load_config
from baselayer.app.test_util import (driver, MyCustomWebDriver, set_server_url,
                                     reset_state)
from skyportal.tests.fixtures import TMP_DIR, SourceFactory, GroupFactory, UserFactory
from skyportal.model_util import create_token

print('Loading test configuration from _test_config.yaml')
basedir = pathlib.Path(os.path.dirname(__file__))
cfg = load_config([(basedir / '../../test_config.yaml').absolute()])
set_server_url(f'http://localhost:{cfg["ports:app"]}')
print('Setting test database to:', cfg['database'])
models.init_db(**cfg['database'])


@pytest.fixture()
def public_group():
    return GroupFactory()


@pytest.fixture()
def public_source(public_group):
    return SourceFactory(groups=[public_group])


@pytest.fixture()
def public_sources_205(public_group):
    return [SourceFactory(groups=[public_group]) for _ in range(205)]
Ejemplo n.º 9
0
import time
from datetime import datetime

import yaml
from dateutil.parser import parse as parse_time

from baselayer.app.env import load_env
from baselayer.app.models import CronJobRun, DBSession, init_db
from baselayer.log import make_log

log = make_log("cron")

env, cfg = load_env()
jobs = cfg.get("cron", [])

init_db(**cfg["database"])

timestamp_file = ".jobs_timestamps.yaml"


class TimeCache:
    def __init__(self):
        if os.path.exists(timestamp_file):
            with open(timestamp_file) as f:
                timestamps = yaml.full_load(f)["timestamps"]
        else:
            timestamps = {}

        self.ts = timestamps

    def should_run(self, key, interval, limit=None):
Ejemplo n.º 10
0
        help="Save JUnit xml output to `test-results/junit.xml`",
    )
    parser.add_argument("--headless",
                        action="store_true",
                        help="Run browser headlessly")
    args = parser.parse_args()

    # Initialize the test database connection
    log("Connecting to test database")
    from baselayer.app.config import load_config
    from baselayer.app.models import init_db

    basedir = pathlib.Path(os.path.dirname(__file__)) / ".." / ".."
    cfg = load_config([basedir / TEST_CONFIG])
    app_name = cfg["app.factory"].split(".")[0]
    engine = init_db(**cfg["database"])
    engine.connect()

    if args.test_spec is not None:
        test_spec = args.test_spec
    else:
        test_spec = basedir / app_name / "tests"

    if args.xml:
        test_outdir = basedir / "test-results"
        if not test_outdir.exists():
            test_outdir.mkdir()
        xml = f"--junitxml={test_outdir}/junit.xml"
    else:
        xml = ""