def post_fork(server, worker):
    server.log.info("Worker spawned (pid: %s)", worker.pid)
    from util.cass import _init
    from preload_database.database import create_engine_from_url, create_scoped_session
    from ooi_data.postgres.model import MetadataBase
    with worker_lock:
        worker.log.debug('Connecting worker to cassandra')
        _init()
        worker.log.debug('Connected worker to cassandra')
        engine = create_engine_from_url(None)
        Session = create_scoped_session(engine)
        MetadataBase.query = Session.query_property()
def post_fork(server, worker):
    server.log.info("Worker spawned (pid: %s)", worker.pid)
    from util.cass import _init
    from preload_database.database import create_engine_from_url, create_scoped_session
    from ooi_data.postgres.model import MetadataBase
    with worker_lock:
        worker.log.debug('Connecting worker to cassandra')
        _init()
        worker.log.debug('Connected worker to cassandra')
        engine = create_engine_from_url(None)
        Session = create_scoped_session(engine)
        MetadataBase.query = Session.query_property()
Example #3
0
from preload_database.database import create_engine_from_url, create_scoped_session
from ooi_data.postgres.model import Parameter, MetadataBase
from util.asset_management import AssetEvents
from util.common import StreamKey, TimeRange, StreamEngineException, InvalidParameterException, read_size_config
from util.csvresponse import CsvGenerator
from util.jsonresponse import JsonResponse
from util.netcdf_generator import NetcdfGenerator
from util.netcdf_utils import rename_glider_lat_lon
from util.stream_dataset import StreamDataset
from util.stream_request import StreamRequest, SIZE_ESTIMATES
from util.calc import execute_stream_request, validate

TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, 'data')

engine = create_engine_from_url(None)
session = create_scoped_session(engine)
MetadataBase.query = session.query_property()

logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
metadata = pd.read_csv(os.path.join(DATA_DIR, 'stream_metadata.csv'))


def get_available_time_range(sk):
    rows = metadata[(metadata.subsite == sk.subsite)
                    & (metadata.node == sk.node) &
                    (metadata.sensor == sk.sensor) &
                    (metadata.method == sk.method) &
                    (metadata.stream == sk.stream.name)]
import shutil
import tempfile
import unittest

import numpy as np
import xarray as xr
from ooi_data.postgres.model import MetadataBase

from preload_database.database import create_engine_from_url, create_scoped_session
from util.aggregation import aggregate_netcdf_group

logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)

engine = create_engine_from_url(None)
session = create_scoped_session(engine)
MetadataBase.query = session.query_property()

TEST_DIR = os.path.dirname(__file__)
DATA_DIR = os.path.join(TEST_DIR, 'data')


class AggregationTest(unittest.TestCase):
    def setUp(self):
        self.tempdir = tempfile.mkdtemp()

    def tearDown(self):
        shutil.rmtree(self.tempdir)

    def test_aggregate_netcdf_group_simple(self):
Example #5
0
#!/usr/bin/env python
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker

from engine.routes import app
from util.cass import _init
from ooi_data.postgres.model import MetadataBase
from preload_database.database import create_engine_from_url

engine = create_engine_from_url(r'postgresql://*****:*****@localhost/metadata')
Session = scoped_session(
    sessionmaker(autocommit=False, autoflush=False, bind=engine))
MetadataBase.query = Session.query_property()

_init()
# The reloader must be disabled so stream engine
# runs on the main thread
app.run(debug=True, use_reloader=False)