示例#1
0
 def test_create_check_delete(self):
     # DB should be absent to start off with
     assert db_utils.check_database_exists(testdb_temp_url) == False
     # Now create it
     db_utils.create_empty_database(default_admin_db_url, testdb_temp_url.database)
     assert db_utils.check_database_exists(testdb_temp_url) == True
     # And delete it again
     db_utils.delete_database(default_admin_db_url, testdb_temp_url.database)
     assert db_utils.check_database_exists(testdb_temp_url) == False
示例#2
0
 def test_create_check_delete(self):
     # DB should be absent to start off with
     assert db_utils.check_database_exists(testdb_temp_url) == False
     # Now create it
     db_utils.create_empty_database(default_admin_db_url,
                                    testdb_temp_url.database)
     assert db_utils.check_database_exists(testdb_temp_url) == True
     # And delete it again
     db_utils.delete_database(default_admin_db_url,
                              testdb_temp_url.database)
     assert db_utils.check_database_exists(testdb_temp_url) == False
示例#3
0
def empty_db_connection():
    """
    Connect to db, create tables and begin connection-level (outer) transaction.

    Will also create the database first if it does not exist.

    Maybe overkill, but a neat trick, cf:
    http://alextechrants.blogspot.co.uk/2013/08/unit-testing-sqlalchemy-apps.html
    We use a connection-level (non-ORM) transaction that encloses everything
    **including the table creation**.
    This ensures that the models and queries are always in sync for testing.

    Note that the transaction must be explicitly rolled back. An alternative
    approach would be to delete all tables, but presumably that would
    take slightly longer than a rollback (be interesting to check).
    """
    # Could be parameterized, but then it wouldn't make sense as a session-level fixture
    # Unless PyTest is smart enough to maintain fixtures in parallel?
    # test_db_url = getattr(request.module, "test_db_url", testdb_empty_url)
    test_db_url = testdb_empty_url
    if not db_utils.check_database_exists(test_db_url):
        db_utils.create_empty_database(default_admin_db_url, test_db_url.database)
    engine = create_engine(test_db_url)
    connection = engine.connect()
    transaction = connection.begin()
    # Create tables (will be rolled back to clean)
    db_utils.create_tables_and_indexes(connection)
    # Return to test function
    yield connection
    # TearDown
    transaction.rollback()
    connection.close()
    engine.dispose()
示例#4
0
def main():
    args = handle_args()
    logger = setup_logging(args.logfile_path)
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")
    if six.PY3:
        stdin = sys.stdin.buffer.read()
    else:
        stdin = sys.stdin.read()  # Py2

    v = voeventparse.loads(stdin)

    session = Session(bind=create_engine(dburl))
    try:
        conv.safe_insert_voevent(session, v)
        session.commit()
    except:
        logger.exception(
            "Could not insert packet with ivorn {} into {}".format(
                v.attrib['ivorn'], args.dbname))

    logger.info("Loaded packet with ivorn {} into {}".format(
        v.attrib['ivorn'], args.dbname))
    return 0
示例#5
0
def empty_db_connection():
    """
    Connect to db, create tables and begin connection-level (outer) transaction.

    Will also create the database first if it does not exist.

    Maybe overkill, but a neat trick, cf:
    http://alextechrants.blogspot.co.uk/2013/08/unit-testing-sqlalchemy-apps.html
    We use a connection-level (non-ORM) transaction that encloses everything
    **including the table creation**.
    This ensures that the models and queries are always in sync for testing.

    Note that the transaction must be explicitly rolled back. An alternative
    approach would be to delete all tables, but presumably that would
    take slightly longer than a rollback (be interesting to check).
    """
    # Could be parameterized, but then it wouldn't make sense as a session-level fixture
    # Unless PyTest is smart enough to maintain fixtures in parallel?
    # test_db_url = getattr(request.module, "test_db_url", testdb_empty_url)
    test_db_url = testdb_empty_url
    if not db_utils.check_database_exists(test_db_url):
        db_utils.create_empty_database(default_admin_db_url,
                                       test_db_url.database)
    engine = create_engine(test_db_url)
    connection = engine.connect()
    transaction = connection.begin()
    # Create tables (will be rolled back to clean)
    db_utils.create_tables_and_indexes(connection)
    # Return to test function
    yield connection
    # TearDown
    transaction.rollback()
    connection.close()
    engine.dispose()
示例#6
0
def test_ingest_packet():
    #Must create corpusdb first:
    assert db_utils.check_database_exists(testdb_corpus_url)
    engine = sqlalchemy.create_engine(testdb_corpus_url)
    s = sqlalchemy.orm.Session(bind=engine)
    from voeventdb.server import __path__ as root_path
    root_path = root_path[0]
    script_path = os.path.join(root_path, 'bin', 'voeventdb_ingest_packet.py')

    print("Testing script at ", script_path)
    print("Using executable:", sys.executable)

    # Do stuff
    n_before = s.query(Voevent).count()
    proc = subprocess.Popen(
        [
            script_path,
            '-d={}'.format(testdb_corpus_url.database),
            '-l={}'.format('/tmp/vdbingest-test.log'),
        ],
        stdin=subprocess.PIPE,
    )
    proc.communicate(voeventparse.dumps(
        fake.heartbeat_packets(n_packets=1)[0]))
    proc.wait()

    assert proc.returncode == 0
    assert s.query(Voevent).count() == n_before + 1
示例#7
0
def main():
    args = handle_args()
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        db_utils.create_empty_database(dbconfig.default_admin_db_url,
                                       args.dbname)
    logger.info('Database "{}" created.'.format(dburl.database))
    engine = create_engine(dburl)
    db_utils.create_tables_and_indexes(engine.connect())
    return 0
示例#8
0
def main():
    args = handle_args()
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        db_utils.create_empty_database(dbconfig.default_admin_db_url,
                                       args.dbname)
    logger.info('Database "{}" created.'.format(dburl.database))
    engine = create_engine(dburl)
    db_utils.create_tables_and_indexes(engine.connect())
    return 0
def main():
    args = handle_args()
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")

    filecount = 1
    n_packets_written = 0

    def get_tarfile_path():
        if args.nsplit:
            suffix = '.{0:03d}.tar.bz2'.format(filecount)
        else:
            suffix = '.tar.bz2'
        return args.tarfile_pathstem + suffix

    session = Session(bind=create_engine(dburl))
    if args.prefetch:
        qry = session.query(Voevent.ivorn, Voevent.xml)
    else:
        qry = session.query(Voevent)

    if args.all:
        logger.info("Dumping **all** packets currently in database")
    else:
        qry = qry.filter(Voevent.author_datetime < args.end)
        if args.start is not None:
            qry = qry.filter(Voevent.author_datetime >= args.start)
            logger.info("Fetching packets from {}".format(args.start))
        else:
            logger.info("Fetching packets from beginning of time")
        logger.info("...until: {}".format(args.end))
    qry = qry.order_by(Voevent.id)

    n_matching = qry.count()
    logger.info("Dumping {} packets".format(n_matching))
    start_time = datetime.datetime.now()
    while n_packets_written < n_matching:
        logger.debug("Fetching batch of up to {} packets".format(args.nsplit))
        voevents = qry.limit(args.nsplit).offset(n_packets_written).all()

        n_packets_written += write_tarball(voevents,
                                           get_tarfile_path())
        elapsed = (datetime.datetime.now() - start_time).total_seconds()
        logger.info(
            "{} packets dumped so far, in {} ({:.0f} kilopacket/s)".format(
                n_packets_written,
                elapsed,
                n_packets_written / elapsed
            ))
        filecount += 1
    session.close()
    logger.info("Wrote {} packets".format(n_packets_written))
    return 0
def main():
    args = handle_args()
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")

    filecount = 1
    n_packets_written = 0

    def get_tarfile_path():
        if args.nsplit:
            suffix = '.{0:03d}.tar.bz2'.format(filecount)
        else:
            suffix = '.tar.bz2'
        return args.tarfile_pathstem + suffix

    session = Session(bind=create_engine(dburl))
    if args.prefetch:
        qry = session.query(Voevent.ivorn, Voevent.xml)
    else:
        qry = session.query(Voevent)

    if args.all:
        logger.info("Dumping **all** packets currently in database")
    else:
        qry = qry.filter(Voevent.author_datetime < args.end)
        if args.start is not None:
            qry = qry.filter(Voevent.author_datetime >= args.start)
            logger.info("Fetching packets from {}".format(args.start))
        else:
            logger.info("Fetching packets from beginning of time")
        logger.info("...until: {}".format(args.end))
    qry = qry.order_by(Voevent.id)

    n_matching = qry.count()
    logger.info("Dumping {} packets".format(n_matching))
    start_time = datetime.datetime.now()
    while n_packets_written < n_matching:
        logger.debug("Fetching batch of up to {} packets".format(args.nsplit))
        voevents = qry.limit(args.nsplit).offset(n_packets_written).all()

        n_packets_written += write_tarball(voevents, get_tarfile_path())
        elapsed = (datetime.datetime.now() - start_time).total_seconds()
        logger.info(
            "{} packets dumped so far, in {} ({:.0f} kilopacket/s)".format(
                n_packets_written, elapsed, n_packets_written / elapsed))
        filecount += 1
    session.close()
    logger.info("Wrote {} packets".format(n_packets_written))
    return 0
示例#11
0
def main(dbname, check, tarballs):
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")

    with click.progressbar(tarballs) as tarball_bar:
        for tbpath in tarball_bar:
            session = Session(bind=create_engine(dburl))
            n_parsed, n_loaded = ingest.load_from_tarfile(
                session, tarfile_path=tbpath, check_for_duplicates=check)
            logger.info("Loaded {} packets into {} from {}".format(
                n_loaded, dbname, tbpath))
            session.close()
    return 0
def main(dbname, check, tarballs):
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")

    with click.progressbar(tarballs) as tarball_bar:
        for tbpath in tarball_bar:
            session = Session(bind=create_engine(dburl))
            n_parsed, n_loaded = ingest.load_from_tarfile(
                    session,
                    tarfile_path=tbpath,
                    check_for_duplicates=check)
            logger.info("Loaded {} packets into {} from {}".format(
                    n_loaded, dbname, tbpath))
            session.close()
    return 0
def main():
    args = handle_args()
    logger = setup_logging(args.logfile_path)
    dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, args.dbname)
    if not db_utils.check_database_exists(dburl):
        raise RuntimeError("Database not found")

    stdin = sys.stdin.read()
    v = voeventparse.loads(stdin)

    session = Session(bind=create_engine(dburl))
    try:
        conv.safe_insert_voevent(session, v)
        session.commit()
    except:
        logger.exception("Could not insert packet with ivorn {} into {}".format(
            v.attrib['ivorn'], args.dbname))

    logger.info("Loaded packet with ivorn {} into {}".format(
        v.attrib['ivorn'], args.dbname))
    return 0
示例#14
0
def test_ingest_packet():
    # Must create corpusdb first:
    assert db_utils.check_database_exists(testdb_corpus_url)
    engine = sqlalchemy.create_engine(testdb_corpus_url)
    s = sqlalchemy.orm.Session(bind=engine)
    from voeventdb.server import __path__ as root_path

    root_path = root_path[0]
    script_path = os.path.join(root_path, "bin", "voeventdb_ingest_packet.py")

    print("Testing script at ", script_path)
    print("Using executable:", sys.executable)

    # Do stuff
    n_before = s.query(Voevent).count()
    proc = subprocess.Popen(
        [script_path, "-d={}".format(testdb_corpus_url.database), "-l={}".format("/tmp/vdbingest-test.log")],
        stdin=subprocess.PIPE,
    )
    proc.communicate(voeventparse.dumps(fake.heartbeat_packets(n_packets=1)[0]))
    proc.wait()

    assert proc.returncode == 0
    assert s.query(Voevent).count() == n_before + 1
示例#15
0
 def test_starting_conditions(self):
     # Make sure we have access to an admin database, and the tempdb
     # has not already been created.
     assert db_utils.check_database_exists(testdb_temp_url) == False
     assert db_utils.check_database_exists(default_admin_db_url) == True
示例#16
0
# logger = logging.getLogger(__name__)
logger = get_task_logger(__name__)

dummy_email_mode = os.environ.get(fps_env_vars.use_dummy_mode, None)
if dummy_email_mode is not None:
    fps.comms.email.send_email = fps.comms.email.dummy_email_send_function
    fps.comms.comet.send_voevent = fps.comms.comet.dummy_send_to_comet_stub
    logger.warning("Dummy stub-functions engaged!")

voeventdb_dbname = os.environ.get(fps_env_vars.voeventdb_dbname,
                                  dbconfig.testdb_corpus_url.database)

dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params,
                             voeventdb_dbname)
if not db_utils.check_database_exists(dburl):
    raise RuntimeError(
        "voeventdb database not found: {}".format(voeventdb_dbname))
dbengine = create_engine(dburl)


@fps_app.task()
def process_voevent_celerytask(bytestring):
    """
    Process the voevent using the 'voevent_logic'

    i.e. the function defined in
    `fourpisky.scripts.process_voevent`.
    """
    v = voeventparse.loads(bytestring)
    logger.debug("Load for processing: " + v.attrib['ivorn'])
示例#17
0
from comet.icomet import IHandler
import comet.log as log
import os
import voeventparse
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import voeventdb.server.database.config as dbconfig
from voeventdb.server.database import db_utils
import voeventdb.server.database.convenience as dbconv


voeventdb_dbname = os.environ.get("VOEVENTDB_DBNAME",
                                  dbconfig.testdb_corpus_url.database)

dburl = dbconfig.make_db_url(dbconfig.default_admin_db_params, voeventdb_dbname)
if not db_utils.check_database_exists(dburl):
    log.warn("voeventdb database not found: {}".format(
        voeventdb_dbname))
dbengine = create_engine(dburl)

@implementer(IPlugin, IHandler)
class VoeventdbInserter(object):
    name = "voeventdb-insert"

    # When the handler is called, it is passed an instance of
    # comet.utility.xml.xml_document.
    def __call__(self, event):
        """
        Add an event to the celery processing queue
        """
        log.debug("Passing to voeventdb: %s" % (event.attrib['ivorn'],))
示例#18
0
 def test_starting_conditions(self):
     # Make sure we have access to an admin database, and the tempdb
     # has not already been created.
     assert db_utils.check_database_exists(testdb_temp_url) == False
     assert db_utils.check_database_exists(default_admin_db_url) == True