Beispiel #1
0
    def __init__(self):
        """
        Initialize tornado server for receiving/grading submissions
        """
        endpoint = ARGS.endpoint or os.environ.get("OTTER_ENDPOINT", None)
        assert endpoint is not None, "no endpoint address provided"
        assert os.path.isdir(OTTER_SERVICE_DIR), "{} does not exist".format(
            OTTER_SERVICE_DIR)
        settings = dict(google_oauth={
            "key":
            ARGS.google_key or os.environ.get("GOOGLE_CLIENT_KEY", None),
            "secret":
            ARGS.google_secret or os.environ.get("GOOGLE_CLIENT_SECRET", None)
        },
                        notebook_dir=os.path.join(OTTER_SERVICE_DIR,
                                                  "submissions"),
                        auth_redirect_uri=os.path.join(endpoint,
                                                       "auth/callback"))
        handlers = [(r"/submit", SubmissionHandler),
                    (r"/auth/google", GoogleOAuth2LoginHandler),
                    (r"/auth/callback", GoogleOAuth2LoginHandler),
                    (r"/auth", LoginHandler)]
        tornado.web.Application.__init__(self, handlers, **settings)

        # Initialize database session
        self.db = queries.TornadoSession(
            queries.uri(host=ARGS.db_host,
                        port=ARGS.db_port,
                        dbname='otter_db',
                        user=ARGS.db_user,
                        password=ARGS.db_pass))
Beispiel #2
0
 def __init__(self):
     settings = dict(auth_redirect_uri=None)
     handlers = [(r"/auth/google", start.GoogleOAuth2LoginHandler),
                 (r"/auth/callback",
                  start.GoogleOAuth2LoginHandler),
                 (r"/auth", start.LoginHandler)]
     Application.__init__(self, handlers, **settings)
     self.db = queries.TornadoSession(queries.uri(**dsn))
 def setUp(self):
     super(TornadoSessionIntegrationTests, self).setUp()
     self.session = queries.TornadoSession(queries.uri('localhost',
                                                       5432,
                                                       'postgres',
                                                       'postgres'),
                                           pool_max_size=10,
                                           io_loop=self.io_loop)
Beispiel #4
0
def getPartitionCount(master, database, username, password, table):
    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    with queries.Session(hawqURI) as session:
        queryDDL = "SELECT count(*) AS partitions FROM   pg_inherits i WHERE  i.inhparent = '" + table + "'::regclass;"
        result = session.query(queryDDL)
        partitions = result.items()[0]["partitions"]

    return partitions
Beispiel #5
0
def executeQueries(master, database, username, password, queryList, hostsFile, explain,adminUser, adminPassword, scale,
                   emailAddress=""):
    loggerInfo = buildReportLogger("queries")
    reportName = loggerInfo[0]
    report = loggerInfo[1]
    header = []
    explainString=""
    if explain:
        explainString = "explain analyze "
    startString = "Query Execution Phase"
    uniInfoLog(startString, report)
    header = "Executing HAWQ Queries for a "+str(scale)+" Data Set"
    uniInfoLog(header, report)
    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    queryLocations = []

    if int(int(queryList[0])) <> 0:

        # Loop
        for queryNum in queryList:
            uniInfoLog("Running Query " + queryNum, report)

            if int(queryNum) < 10:
                queryNum = "0" + queryNum
            queryLocations.append('./hawq-ddl/queries/"+scale+"/query_' + str(queryNum) + '.sql')
    else:
        uniInfoLog("Running all Queries", report)
        queryLocations = sorted(glob.glob("./hawq-ddl/queries/" + scale + "/*.sql"))

    with queries.Session(hawqURI) as session:
        for query in queryLocations:
            uniInfoLog(clearBuffers(hostsFile, adminUser, adminPassword), report)
            ddlFile = open(query, "r")
            queryName = ((query.split("/")[4]).split(".")[0]).rstrip()
            queryString = ddlFile.read()
            queryString = explainString+queryString
            startTime = time.time()
            result = session.query(queryString)
            if explain:
                queryPlan=""
                uniInfoLog(result.query,report)
                for row in result:
                    queryPlan = queryPlan+str(row['QUERY PLAN']+"\n")
                uniInfoLog(queryPlan,report)
            stopTime = time.time()
            queryTime = stopTime - startTime
            results = "Query Complete: %s   Execution Time(s): %0.2f  Rows Returned: %s" % (
                queryName, queryTime, str(result.count()))
            uniInfoLog(results, report)
            if emailAddress:
                Email.sendEmail(emailAddress, results[:25], results)
        if (emailAddress):
            messageLines = []
            with open(reportName, "r") as reportMsg:
                for line in reportMsg.readlines():
                    messageLines.append(line)
                message = " ".join(messageLines)
                Email.sendEmail(emailAddress, "Query Final Report: " + (reportName.split('/')[2])[:-4], message)
Beispiel #6
0
def getDatabase(master, username, password):
    hawqURI = queries.uri(master, port=5432, dbname='gpadmin', user=username, password=password)
    try:
        dbName = input("Please Enter a Name for the HAWQ TPC-DS Database:")
        with queries.Session(hawqURI) as session:
            result = session.query("create database " + dbName)
    except psycopg2.ProgrammingError as e:
        print "Database already exists. "
    return dbName
Beispiel #7
0
    def __init__(self):

        pqsql_uri = queries.uri(options.pqsql_host, options.pqsql_port,
                                options.pqsql_db, options.pqsql_user,
                                options.pqsql_password)

        self.pool = queries.TornadoSession(pqsql_uri,
                                           pool_max_size=options.pool_max_size,
                                           pool_idle_ttl=options.pool_idle_ttl)
        self.cur = ''
Beispiel #8
0
def createTables(master, database, username, password):
    pbLogger.info("---------------------------------")
    pbLogger.info("Creating HAWQ Internal Tables")
    pbLogger.info("---------------------------------")
    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    tableList = sorted(glob.glob('./hawq-ddl/hawq/*.sql'))

    with queries.Session(hawqURI) as session:
        for table in tableList:
            ddlFile = open(table, "r")
            tableName = (table.split("/")[3]).split(".")[0]
            print "Creating Table: " + tableName
            tableDDL = ddlFile.read()
            result = session.query(tableDDL)
Beispiel #9
0
def createPXFTables(master, database, username, password, scale, base, namenode):
    pbLogger.info("---------------------------------")
    pbLogger.info("Creating HAWQ PXF External Tables")
    pbLogger.info("---------------------------------")
    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    tableList = sorted(glob.glob('./hawq-ddl/pxf/*.sql'))

    with queries.Session(hawqURI) as session:
        for table in tableList:
            ddlFile = open(table, "r")
            tableName = (table.split("/")[3]).split(".")[0]
            print "Creating PXF External Table: " + tableName
            tableDDL = ddlFile.read()
            tableDDL = tableDDL.replace("$NAMENODE", namenode)
            tableDDL = tableDDL.replace("$SCALE", scale)
            tableDDL = tableDDL.replace("$BASE", base[1:])
            result = session.query(tableDDL)
Beispiel #10
0
def analyzeHawqTables(master, database, username, password, emailAddress=""):
    loggerInfo = buildReportLogger("analyze")
    reportName = loggerInfo[0]
    report = loggerInfo[1]
    header = []
    startString = "Analyze Database Tables to Generate Statistics"
    uniInfoLog(startString, report)
    header = "Analyzing HAWQ Tables"
    uniInfoLog(header, report)

    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    with queries.Session(hawqURI) as session:
        uniInfoLog("Analyze Dimension Tables", report)
        for table in dimensionTables:
            ddlString = "Analyze " + table
            startTime = datetime.datetime.now()
            uniInfoLog("Start " + ddlString + ": " + str(startTime), report)
            result = session.query(ddlString)
            stopTime = datetime.datetime.now()
            resultString = "Completed " + ddlString + ": " + str(stopTime) + " Elapsed Time: " + str(
                stopTime - startTime)
            uniInfoLog(resultString, report)
            if emailAddress:
                Email.sendEmail(emailAddress, ddlString + " Complete", resultString)
        uniInfoLog("Analyze Fact Tables", report)

        for table in factTables:
            ddlString = "analyze " + table
            startTime = datetime.datetime.now()
            uniInfoLog("Start " + ddlString + ": " + str(startTime), report)
            result = session.query(ddlString)
            stopTime = datetime.datetime.now()
            resultString = "Completed " + ddlString + ": " + str(stopTime) + " Elapsed Time: " + str(
                stopTime - startTime)
            uniInfoLog(resultString, report)
            if emailAddress:
                Email.sendEmail(emailAddress, ddlString + " Complete", resultString)

        if (emailAddress):
            messageLines = []
            with open(reportName, "r") as reportMsg:
                for line in reportMsg.readlines():
                    messageLines.append(line)
                message = " ".join(messageLines)
                Email.sendEmail(emailAddress, "Table Analyze Final Report: " + (reportName.split('/')[2])[:-4], message)
Beispiel #11
0
 def __init__(self):
     handlers = [(r"/submit", SubmissionHandler),
                 (r"/google_auth", GoogleOAuth2LoginHandler)]
     with open("conf.yml") as f:
         config = yaml.safe_load(f)
     settings = dict(google_oauth={
         'key': config['google_auth_key'],
         'secret': config['google_auth_secret'],
     },
                     notebook_dir=config['notebook_dir'],
                     auth_redirect_uri=config['auth_redirect_uri'])
     tornado.web.Application.__init__(self, handlers, **settings)
     # Initialize database session
     self.db = queries.TornadoSession(
         queries.uri(host=config['db_host'],
                     port=config['db_port'],
                     dbname='otter_db',
                     user=config['db_user'],
                     password=config['db_pass']))
Beispiel #12
0
def connection(host='localhost',
               port=5432,
               dbname='postgres',
               user='******',
               password=None):
    """ Create a new Database connection

    Args:
        host (str): ip address or hostname of running postgres process
        port (int): port the running postgres is listening on
        dbname (str): name of database to connect to
        user (str): credentials username authorized to connect to db
        password (str): credentials password to authenticate user

    Returns:
        queries.Session: new database connection
    """
    connection_string = queries.uri(host, port, dbname, user, password)
    return queries.Session(connection_string)
Beispiel #13
0
    def test_polling_stops_after_connection_error(self):
        # Abort the test right away if postgres isn't running.
        yield self.assertPostgresConnected()

        # Use an invalid user to force an OperationalError during connection
        bad_uri = queries.uri(os.getenv('PGHOST', 'localhost'),
                              int(os.getenv('PGPORT', '5432')), 'invalid')
        session = queries.TornadoSession(bad_uri)

        self.count = 0
        real_poll_connection = session._poll_connection

        def count_polls(*args, **kwargs):
            self.count += 1
            real_poll_connection(*args, **kwargs)
        session._poll_connection = count_polls

        with self.assertRaises(queries.OperationalError):
            yield session.query('SELECT 1')
        yield gen.sleep(0.05)
        self.assertLess(self.count, 20)
Beispiel #14
0
def loadDatabase():
    dbURI = queries.uri(os.environ.get("DBHOST"),
                        port=os.environ.get("DBPORT"),
                        dbname="gpadmin",
                        user="******",
                        password="******")
    with queries.Session(dbURI) as session:
        result = session.query("drop table if exists customers CASCADE ;")
        result = session.query(
            "create table customers(existingLines int,birthDate date,creditAmount int,guarantors int,creditDuration int,cardNumber text,existingLinesBank int,city text,typeResidence int,zip text,employmentType int,mostValAsset int,streetAddress text,state text,creditPercent int,phoneNumber text,latitude float,employmentLength int,accountBalanceStatus int,job text ,paymentStatusPrevCredit int,emailAddress text,purpose int,foreignWorker int,sexMaritalStatus int,creditability int,firstName text,accountBalance float,lastName text,age int,longitude float,savingsValue int,socialsecurityNumber text,dependents int,customerNumber bigint,durationAddess int,telephoneAvail int) with (appendonly=true) DISTRIBUTED RANDOMLY;"
        )

        with open('./data/customers.csv') as csvfile:
            reader = csv.reader(csvfile)
            next(reader, None)
            for row in reader:
                rowString = "'" + "','".join(row) + "'"
                result = session.query("insert into customers VALUES (" +
                                       rowString + ");")

                # This is to post new customers.   Not implementing yet.
        result = session.query("drop table if exists transactions CASCADE ;")
        result = session.query(
            "drop table if exists transactions_hive CASCADE ;")

        result = session.query(
            "create table transactions(city text,zip integer,amount float,flagged int,state text,longitude float,id text,streetaddress text,latitude float,transactiontimestamp timestamp,customerNumber bigint) with (appendonly=true) DISTRIBUTED RANDOMLY;"
        )
        result = session.query(
            "create table transactions_hive(like transactions);")

        result = session.query(
            "drop external table if exists transactions_pxf CASCADE ;")
        result = session.query(
            "create external table transactions_pxf(like transactions) LOCATION('pxf://"
            + os.environ.get("DBHOST") +
            ":51200/scdf/*.txt?PROFILE=HDFSTextSimple') FORMAT 'CSV' (QUOTE '''')  LOG ERRORS INTO err_transactions SEGMENT REJECT LIMIT 500;"
        )
Beispiel #15
0
def loadHawqTables(master, username, password, database, emailAddress):
    loggerInfo = buildReportLogger("load")
    reportName = loggerInfo[0]
    report = loggerInfo[1]
    uniInfoLog("Load HAWQ Internal Tables", report)

    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    loadList = sorted(glob.glob('./hawq-ddl/load/*.sql'))

    for load in loadList:
        ddlFile = open(load, "r")
        tableName = ((load.split("/")[3]).split(".")[0])[:-5]
        loadDDL = ddlFile.read()
        startTime = datetime.datetime.now()
        pxfName = tableName
        if tableName in factTables:
            tableName = tableName + "_nopart"
        uniInfoLog("Starting Load of " + tableName, report)
        with queries.Session(hawqURI) as session:
            result = session.query(loadDDL)
        stopTime = datetime.datetime.now()
        uniInfoLog("Completed Load of " + tableName, report)
        uniInfoLog("Load Time: " + str(stopTime - startTime), report)
        rowsPXF = rowCount(master, database, username, password, pxfName + "_pxf")
        rows = rowCount(master, database, username, password, tableName)
        uniInfoLog("Expected Rows: " + str(rowsPXF), report)
        uniInfoLog("Actual Rows  : " + str(rows), report)
        if emailAddress:
            Email.sendEmail(emailAddress, "Completed Load of " + tableName, "Loaded " + str(rows) + " Rows")
    if (emailAddress):
        messageLines = []
        with open(reportName, "r") as reportMsg:
            for line in reportMsg.readlines():
                messageLines.append(line)
            message = " ".join(messageLines)
            Email.sendEmail(emailAddress, "Table Load Final Report: " + (reportName.split('/')[2])[:-4], message)
Beispiel #16
0
def loadTrainingSets():
    print "LOADING TRAINING DATA SETS"
    dbURI = queries.uri(os.environ.get("DBHOST"),
                        port=os.environ.get("DBPORT"),
                        dbname="gpadmin",
                        user="******",
                        password="******")
    with queries.Session(dbURI) as session:
        result = session.query("drop table if exists customers_train CASCADE;")
        result = session.query(
            "create table customers_train(existingLines int,birthDate date,creditAmount int,guarantors int,creditDuration int,cardNumber text,existingLinesBank int,city text,typeResidence int,zip text,employmentType int,mostValAsset int,streetAddress text,state text,creditPercent int,phoneNumber text,latitude float,employmentLength int,accountBalanceStatus int,job text ,paymentStatusPrevCredit int,emailAddress text,purpose int,foreignWorker int,sexMaritalStatus int,creditability int,firstName text,accountBalance float,lastName text,age int,longitude float,savingsValue int,socialsecurityNumber text,dependents int,customerNumber bigint,durationAddess int,telephoneAvail int) with (appendonly=true) DISTRIBUTED RANDOMLY;"
        )

        with open('./data/customers-training.csv') as csvfile:
            reader = csv.reader(csvfile)
            next(reader, None)
            for row in reader:
                rowString = "'" + "','".join(row) + "'"
                result = session.query("insert into customers_train VALUES (" +
                                       rowString + ");")

        result = session.query(
            "drop table if exists transactions_train CASCADE;")
        result = session.query(
            "create table transactions_train(city text,zip integer,amount float,flagged int,state text,longitude float,id text,streetaddress text,latitude float,transactiontimestamp timestamp,customerNumber bigint) with (appendonly=true) DISTRIBUTED RANDOMLY;"
        )

        with open('./data/transactions-training.csv') as csvfile:
            reader = csv.reader(csvfile)
            next(reader, None)
            for row in reader:
                rowString = "'" + "','".join(row) + "'"
                result = session.query(
                    "insert into transactions_train VALUES (" + rowString +
                    ");")
    print "TRAINING DATA LOADED"
Beispiel #17
0
def launch_migration():
    SECRETS = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                           'secrets.json')

    with open(SECRETS, 'r') as f:
        raw = json.loads(f.read().strip())

    DB_INFO = {
        'host': raw["DATABASE_HOST"],
        'port': 5432,
        'dbname': raw["DATABASE_NAME"],
        'user': raw["DATABASE_USER"],
        'password': raw["DATABASE_PASS"],
    }
    db_endpoint = queries.uri(**DB_INFO)
    print('connection string')
    import sqlalchemy as sa
    print('doing engine')
    db_engine = sa.create_engine(db_endpoint)
    print('creating stuff')
    db.create_all(bind=db_engine)
    print('stoping stuff')
    db_engine.dispose()
    print('done')
Beispiel #18
0
 def pg_uri(self):
     return queries.uri(os.getenv('PGHOST', 'localhost'),
                        int(os.getenv('PGPORT', '5432')), 'postgres')
Beispiel #19
0
def partitionTables(master, parts, username, password, database, orientation, byPart, compression, rowGroupSize,
                    emailAddress=""):
    loggerInfo = buildReportLogger("partitioning")
    reportName = loggerInfo[0]
    report = loggerInfo[1]
    startString = "Partitioning Tables into " + str(parts) + " Day Partitions in " + orientation + " Format"
    uniInfoLog(startString, report)
    if orientation.upper() == "PARQUET":
        # orientation = "PARQUET,ROWGROUPSIZE=1073741823,COMPRESSTYPE=snappy"
        orientation = "PARQUET,ROWGROUPSIZE=" + rowGroupSize + ",COMPRESSTYPE=" + compression

    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    if byPart:
        loadList = sorted(glob.glob('./hawq-ddl/load-partbypart/*.sql'))
    else:
        loadList = sorted(glob.glob('./hawq-ddl/load-part/*.sql'))

    tableList = sorted(glob.glob('./hawq-ddl/hawq-part/*.sql'))
    with queries.Session(hawqURI) as session:
        for table in tableList:
            ddlFile = open(table, "r")
            tableName = (table.split("/")[3]).split(".")[0]
            createStatus = "Creating Table: " + tableName
            uniInfoLog(createStatus, report)
            tableDDL = ddlFile.read()
            tableDDL = tableDDL.replace("$PARTS", parts)
            tableDDL = tableDDL.replace("$ORIENTATION", orientation)
            result = session.query(tableDDL)
            createStatus = "Table Created: " + tableName
            uniInfoLog(createStatus, report)
            if emailAddress:
                Email.sendEmail(emailAddress, createStatus, createStatus)


    # Hard Coded for now because Schema is HardCoded as well
    startDate = 2450815
    endDate = 2453005
    totalDays = endDate - startDate

    for load in loadList:
        ddlFile = open(load, "r")
        loadDDL = ddlFile.read()
        if byPart:
            tableName = ((load.split("/")[3]).split(".")[0])[:-13]
        else:
            tableName = ((load.split("/")[3]).split(".")[0])[:-5]
        loadStatus = "Loading: " + tableName
        uniInfoLog(loadStatus, report)
        ddlFile = open(load, "r")
        loadDDL = ddlFile.read()
        if byPart:
            partCount = getPartitionCount(master, database, username, password, "inventory")
            partStart = startDate

            for partNum in range(2, partCount + 1):
                modDDL = loadDDL
                # with queries.Session(hawqURI) as session:

                partName = tableName + "_1_prt_" + str(partNum)
                # End of part is num days in the part added to the first day
                partEnd = partStart + (int(parts) - 1)
                modDDL = modDDL.replace("$PARTNAME", str(partName))
                modDDL = modDDL.replace("$PARTVALUE1", str(partStart))
                modDDL = modDDL.replace("$PARTVALUE2", str(partEnd))

                with queries.Session(hawqURI) as session:
                    result = session.query(modDDL)
                partStart = partEnd + 1
                createStatus = "Table Partition Loaded: " + partName
                uniInfoLog(createStatus, report)
            createStatus = "Table Loaded: " + tableName
            uniInfoLog(createStatus, report)
            if emailAddress:
                Email.sendEmail(emailAddress, createStatus, createStatus)
                # alterniatve
                # SELECT partitionboundary, partitiontablename, partitionname, partitionlevel, partitionrank FROM pg_partitions WHERE tablename='catalog_returns';

        else:
            with queries.Session(hawqURI) as session:
                result = session.query(loadDDL)
            createStatus = "Table Loaded: " + tableName
            uniInfoLog(createStatus, report)
            if emailAddress:
                Email.sendEmail(emailAddress, createStatus, createStatus)

    if (emailAddress):
        messageLines = []
        with open(reportName, "r") as reportMsg:
            for line in reportMsg.readlines():
                messageLines.append(line)
            message = " ".join(messageLines)
            Email.sendEmail(emailAddress, "Repartition Final Report: " + (reportName.split('/')[2])[:-4], message)
 def setUp(self):
     uri = queries.uri('localhost', 5432, 'postgres', 'postgres')
     try:
         self.session = queries.Session(uri, pool_max_size=10)
     except queries.OperationalError as error:
         raise unittest.SkipTest(str(error).split('\n')[0])
Beispiel #21
0
 def uri(self):
     return queries.uri(self.host, int(self.port),
                        self.database, self.username, self.password)
Beispiel #22
0
 def test_default_uri(self):
     expectation = 'postgresql://postgres@localhost:5432/postgres'
     self.assertEqual(queries.uri(), expectation)
Beispiel #23
0
 def test_default_uri(self):
     expectation = 'postgresql://postgres@localhost:5432/postgres'
     self.assertEqual(queries.uri(), expectation)
Beispiel #24
0
def main():
    '''
        Manage Asynchronous Number General ORGs

        Organizations of Roman Generality.
    '''
    # Now we can run a few servers and processes

    # Server daemons and ports
    server_push_port = "5556"
    server_pub_port = "5558"

    # System frontend & backend routers and ports
    frontend_port = "4144"
    backend_port = "4188"

    # Servers
    Process(target=server_push, args=(server_push_port,)).start()
    Process(target=server_pub, args=(server_pub_port,)).start()
    Process(target=server_router, args=(frontend_port,backend_port,)).start()

    # Clients
    Process(target=client, args=(server_push_port,server_pub_port,)).start()

    # Start background tasks
    def start(task, *args):
        process = Process(target=task, args=args)
        process.daemon = True
        process.start()
    for i in range(NBR_CLIENTS):
        start(client_task, i)
    for i in range(NBR_WORKERS):
        start(worker_task, i)

    # Initialize main loop state
    count = NBR_CLIENTS
    
    # daemon options
    opts = options.options()

    # Set document database
    document = motor.MotorClient(opts.mongo_host, opts.mongo_port).mango

    # Set memcached backend
    memcache = mc.Client(
        [opts.memcached_host],
        binary=opts.memcached_binary,
        behaviors={
            "tcp_nodelay": opts.memcached_tcp_nodelay,
            "ketama": opts.memcached_ketama
        }
    )

    # Set SQL URI
    postgresql_uri = queries.uri(
        host=opts.sql_host,
        port=opts.sql_port,
        dbname=opts.sql_database,
        user=opts.sql_user,
        password=None
    )

    # Set kvalue database
    kvalue = False

    # Set default cache
    global cache
    cache = memcache

    # Set SQL session
    global sql
    sql = queries.TornadoSession(uri=postgresql_uri)

    # Set default database
    global db
    db = document

    # logging database hosts
    logging.info('MongoDB server: {0}:{1}'.format(opts.mongo_host, opts.mongo_port))
    logging.info('PostgreSQL server: {0}:{1}'.format(opts.sql_host, opts.sql_port))

    # Ensure 
    if opts.ensure_indexes:
        logging.info('Ensuring indexes...')
        indexes.ensure_indexes(db)
        logging.info('DONE.')

    # base url
    base_url = opts.base_url

    # system cache
    cache_enabled = opts.cache_enabled

    if cache_enabled:
        logging.info('Memcached server: {0}:{1}'.format(opts.memcached_host, opts.memcached_port))

    # mango web application daemon
    application = web.Application(

        [
            # Mango system knowledge (quotes) and realtime events.
            (r'/system/?', MangoHandler),

            # Basic-Auth session
            (r'/login/?', LoginHandler),
            (r'/logout/?', LogoutHandler),

            # ORGs records
            (r'/orgs/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/orgs/(?P<account>.+)/records/page/(?P<page_num>\d+)/?', accounts.RecordsHandler),

            (r'/orgs/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/orgs/(?P<account>.+)/records/page/(?P<page_num>\d+)/?', accounts.RecordsHandler),

            # ORGs teams
            (r'/orgs/(?P<account>.+)/teams/page/(?P<page_num>\d+)/?', accounts.TeamsHandler),
            (r'/orgs/(?P<account>.+)/teams/(?P<team_uuid>.+)/?', accounts.TeamsHandler),
            (r'/orgs/(?P<account>.+)/teams/?', accounts.TeamsHandler),

            # ORGs members
            (r'/orgs/(?P<account>.+)/members/page/(?P<page_num>\d+)/?', accounts.MembersHandler),
            (r'/orgs/(?P<account>.+)/members/(?P<user>.+)/?', accounts.MembersHandler),
            (r'/orgs/(?P<account>.+)/members/?', accounts.MembersHandler),

            # Organizations of Random Generality.
            (r'/orgs/?', accounts.OrgsHandler),
            (r'/orgs/(?P<account>.+)/?', accounts.OrgsHandler),

            # Users records 
            (r'/users/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/users/(?P<account>.+)/records/page/(?P<page_num>\d+)/?', accounts.RecordsHandler),

            # Users billing routes
            (r'/users/(?P<account>.+)/routes/?', accounts.RoutesHandler),

            # Users
            (r'/users/?', accounts.UsersHandler),
            (r'/users/(?P<account>.+)/?', accounts.UsersHandler),

            # Records
            (r'/records/start/(?P<start>.*)/end/(?P<end>.*)/?', records.Handler),
            (r'/records/start/(?P<start>.*)/?', records.Handler),
            (r'/records/end/(?P<end>.*)/?', records.Handler),
            (r'/records/page/(?P<page_num>\d+)/?', records.Handler),

            # Public records 
            (r'/records/public/?', records.PublicHandler),
            (r'/records/public/page/(?P<page_num>\d+)/?', records.PublicHandler),

            # Unassigned records
            (r'/records/unassigned/?', records.UnassignedHandler),
            (r'/records/unassigned/page/(?P<page_num>\d+)/?', records.UnassignedHandler),

            # Records summary
            # (r'/records/summary/<lapse>/<value>/?', records.SummaryHandler),

            # Return last (n) of lapse
            # (r'/records/summary/<lapse>/lasts/(?P<int>\d+)/?', records.SummaryHandler),

            # Statistical projection based on the previous data.
            # (r'/records/summary/<lapse>/nexts/(?P<int>\d+)/?', records.SummaryHandler),

            # Records summary
            (r'/records/summary/start/(?P<start>.*)/end/(?P<end>.*)/?', records.SummaryHandler),
            
            (r'/records/summary/start/(?P<start>.*)/?', records.SummaryHandler),
            
            (r'/records/summary/end/(?P<end>.*)/?', records.SummaryHandler),

            (r'/records/summary/(?P<lapse>.*)/start/(?P<start>.*)/end/(?P<end>.*)/?', records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/start/(?P<start>.*)/?', records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/end/(?P<end>.*)/?', records.SummaryHandler),

            # Return last (n) of lapse
            # (r'/records/summary/(?P<lapse>.*)/lasts/(?P<int>\d+)/?', records.SummaryHandler),

            (r'/records/summary/(?P<lapse>.*)/?', records.SummaryHandler),
            (r'/records/summary/?', records.SummaryHandler),

            # Records summaries
            (r'/records/summaries/start/(?P<start>.*)/end/(?P<end>.*)/?', records.SummariesHandler),
            (r'/records/summaries/start/(?P<start>.*)/?', records.SummariesHandler),
            (r'/records/summaries/end/(?P<end>.*)/?', records.SummariesHandler),

            (r'/records/summaries/(?P<lapse>.*)/start/(?P<start>.*)/end/(?P<end>.*)/?', records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/start/(?P<start>.*)/?', records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/end/(?P<end>.*)/?', records.SummariesHandler),

            (r'/records/summaries/(?P<lapse>.*)/?', records.SummariesHandler),
            (r'/records/summaries/?', records.SummariesHandler),

            # Records
            (r'/records/(?P<record_uuid>.+)/?', records.Handler),
            (r'/records/?', records.Handler),

            # Records
            (r'/tasks/(?P<task_uuid>.+)/?', tasks.Handler),
            (r'/tasks/?', tasks.Handler),

            # Billings
            (r'/billings/(?P<billing_uuid>.+)/?', billings.RecordsHandler),
            (r'/billings/?', billings.RecordsHandler),

            # Billings records
            (r'/billings/records/start/(?P<start>.*)/end/(?P<end>.*)/?', billings.RecordsHandler),
            (r'/billings/records/start/(?P<start>.*)/?', billings.RecordsHandler),
            (r'/billings/records/end/(?P<end>.*)/?', billings.RecordsHandler),
            (r'/billings/records/?', billings.RecordsHandler)
        ],

        # system database
        db=db,

        # system cache
        cache=cache,

        # cache enabled flag
        cache_enabled=cache_enabled,

        # document datastorage
        document=document,

        # kvalue datastorage
        kvalue=kvalue,

        # sql datastorage
        sql=sql,

        # debug mode
        debug=opts.debug,

        # application domain
        domain=opts.domain,

        # application timezone
        timezone=opts.timezone,

        # pagination page size
        page_size=opts.page_size,

        # cookie settings
        cookie_secret=opts.cookie_secret,

        # login url
        login_url='/login/'
    )

    # Mango periodic cast callbacks
    periodic_records = PeriodicCast(periodic_get_records, 5000)
    periodic_records.start()

    # Setting up mango processor
    application.listen(opts.port)
    logging.info('Listening on http://%s:%s' % (opts.host, opts.port))
    ioloop.IOLoop.instance().start()
Beispiel #25
0
def main():
    '''
        Manage Asynchronous Number General ORGs

        Organizations of Roman Generality.
    '''
    # Now we can run a few servers and processes

    # Server daemons and ports
    server_push_port = "5556"
    server_pub_port = "5558"

    # System frontend & backend routers and ports
    frontend_port = "4144"
    backend_port = "4188"

    # Servers
    Process(target=server_push, args=(server_push_port, )).start()
    Process(target=server_pub, args=(server_pub_port, )).start()
    Process(target=server_router, args=(
        frontend_port,
        backend_port,
    )).start()

    # Clients
    Process(target=client, args=(
        server_push_port,
        server_pub_port,
    )).start()

    # Start background tasks
    def start(task, *args):
        process = Process(target=task, args=args)
        process.daemon = True
        process.start()

    for i in range(NBR_CLIENTS):
        start(client_task, i)
    for i in range(NBR_WORKERS):
        start(worker_task, i)

    # Initialize main loop state
    count = NBR_CLIENTS

    # daemon options
    opts = options.options()

    # Set document database
    document = motor.MotorClient(opts.mongo_host, opts.mongo_port).mango

    # Set memcached backend
    memcache = mc.Client([opts.memcached_host],
                         binary=opts.memcached_binary,
                         behaviors={
                             "tcp_nodelay": opts.memcached_tcp_nodelay,
                             "ketama": opts.memcached_ketama
                         })

    # Set SQL URI
    postgresql_uri = queries.uri(host=opts.sql_host,
                                 port=opts.sql_port,
                                 dbname=opts.sql_database,
                                 user=opts.sql_user,
                                 password=None)

    # Set kvalue database
    kvalue = False

    # Set default cache
    global cache
    cache = memcache

    # Set SQL session
    global sql
    sql = queries.TornadoSession(uri=postgresql_uri)

    # Set default database
    global db
    db = document

    # logging database hosts
    logging.info('MongoDB server: {0}:{1}'.format(opts.mongo_host,
                                                  opts.mongo_port))
    logging.info('PostgreSQL server: {0}:{1}'.format(opts.sql_host,
                                                     opts.sql_port))

    # Ensure
    if opts.ensure_indexes:
        logging.info('Ensuring indexes...')
        indexes.ensure_indexes(db)
        logging.info('DONE.')

    # base url
    base_url = opts.base_url

    # system cache
    cache_enabled = opts.cache_enabled

    if cache_enabled:
        logging.info('Memcached server: {0}:{1}'.format(
            opts.memcached_host, opts.memcached_port))

    # mango web application daemon
    application = web.Application(
        [
            # Mango system knowledge (quotes) and realtime events.
            (r'/system/?', MangoHandler),

            # Basic-Auth session
            (r'/login/?', LoginHandler),
            (r'/logout/?', LogoutHandler),

            # ORGs records
            (r'/orgs/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/orgs/(?P<account>.+)/records/page/(?P<page_num>\d+)/?',
             accounts.RecordsHandler),
            (r'/orgs/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/orgs/(?P<account>.+)/records/page/(?P<page_num>\d+)/?',
             accounts.RecordsHandler),

            # ORGs teams
            (r'/orgs/(?P<account>.+)/teams/page/(?P<page_num>\d+)/?',
             accounts.TeamsHandler),
            (r'/orgs/(?P<account>.+)/teams/(?P<team_uuid>.+)/?',
             accounts.TeamsHandler),
            (r'/orgs/(?P<account>.+)/teams/?', accounts.TeamsHandler),

            # ORGs members
            (r'/orgs/(?P<account>.+)/members/page/(?P<page_num>\d+)/?',
             accounts.MembersHandler),
            (r'/orgs/(?P<account>.+)/members/(?P<user>.+)/?',
             accounts.MembersHandler),
            (r'/orgs/(?P<account>.+)/members/?', accounts.MembersHandler),

            # Organizations of Random Generality.
            (r'/orgs/?', accounts.OrgsHandler),
            (r'/orgs/(?P<account>.+)/?', accounts.OrgsHandler),

            # Users records
            (r'/users/(?P<account>.+)/records/?', accounts.RecordsHandler),
            (r'/users/(?P<account>.+)/records/page/(?P<page_num>\d+)/?',
             accounts.RecordsHandler),

            # Users billing routes
            (r'/users/(?P<account>.+)/routes/?', accounts.RoutesHandler),

            # Users
            (r'/users/?', accounts.UsersHandler),
            (r'/users/(?P<account>.+)/?', accounts.UsersHandler),

            # Records
            (r'/records/start/(?P<start>.*)/end/(?P<end>.*)/?', records.Handler
             ),
            (r'/records/start/(?P<start>.*)/?', records.Handler),
            (r'/records/end/(?P<end>.*)/?', records.Handler),
            (r'/records/page/(?P<page_num>\d+)/?', records.Handler),

            # Public records
            (r'/records/public/?', records.PublicHandler),
            (r'/records/public/page/(?P<page_num>\d+)/?',
             records.PublicHandler),

            # Unassigned records
            (r'/records/unassigned/?', records.UnassignedHandler),
            (r'/records/unassigned/page/(?P<page_num>\d+)/?',
             records.UnassignedHandler),

            # Records summary
            # (r'/records/summary/<lapse>/<value>/?', records.SummaryHandler),

            # Return last (n) of lapse
            # (r'/records/summary/<lapse>/lasts/(?P<int>\d+)/?', records.SummaryHandler),

            # Statistical projection based on the previous data.
            # (r'/records/summary/<lapse>/nexts/(?P<int>\d+)/?', records.SummaryHandler),

            # Records summary
            (r'/records/summary/start/(?P<start>.*)/end/(?P<end>.*)/?',
             records.SummaryHandler),
            (r'/records/summary/start/(?P<start>.*)/?',
             records.SummaryHandler),
            (r'/records/summary/end/(?P<end>.*)/?', records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/start/(?P<start>.*)/end/(?P<end>.*)/?',
             records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/start/(?P<start>.*)/?',
             records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/end/(?P<end>.*)/?',
             records.SummaryHandler),

            # Return last (n) of lapse
            # (r'/records/summary/(?P<lapse>.*)/lasts/(?P<int>\d+)/?', records.SummaryHandler),
            (r'/records/summary/(?P<lapse>.*)/?', records.SummaryHandler),
            (r'/records/summary/?', records.SummaryHandler),

            # Records summaries
            (r'/records/summaries/start/(?P<start>.*)/end/(?P<end>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/start/(?P<start>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/end/(?P<end>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/start/(?P<start>.*)/end/(?P<end>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/start/(?P<start>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/end/(?P<end>.*)/?',
             records.SummariesHandler),
            (r'/records/summaries/(?P<lapse>.*)/?', records.SummariesHandler),
            (r'/records/summaries/?', records.SummariesHandler),

            # Records
            (r'/records/(?P<record_uuid>.+)/?', records.Handler),
            (r'/records/?', records.Handler),

            # Records
            (r'/tasks/(?P<task_uuid>.+)/?', tasks.Handler),
            (r'/tasks/?', tasks.Handler),

            # Billings
            (r'/billings/(?P<billing_uuid>.+)/?', billings.RecordsHandler),
            (r'/billings/?', billings.RecordsHandler),

            # Billings records
            (r'/billings/records/start/(?P<start>.*)/end/(?P<end>.*)/?',
             billings.RecordsHandler),
            (r'/billings/records/start/(?P<start>.*)/?',
             billings.RecordsHandler),
            (r'/billings/records/end/(?P<end>.*)/?', billings.RecordsHandler),
            (r'/billings/records/?', billings.RecordsHandler)
        ],

        # system database
        db=db,

        # system cache
        cache=cache,

        # cache enabled flag
        cache_enabled=cache_enabled,

        # document datastorage
        document=document,

        # kvalue datastorage
        kvalue=kvalue,

        # sql datastorage
        sql=sql,

        # debug mode
        debug=opts.debug,

        # application domain
        domain=opts.domain,

        # application timezone
        timezone=opts.timezone,

        # pagination page size
        page_size=opts.page_size,

        # cookie settings
        cookie_secret=opts.cookie_secret,

        # login url
        login_url='/login/')

    # Mango periodic cast callbacks
    periodic_records = PeriodicCast(periodic_get_records, 5000)
    periodic_records.start()

    # Setting up mango processor
    application.listen(opts.port)
    logging.info('Listening on http://%s:%s' % (opts.host, opts.port))
    ioloop.IOLoop.instance().start()
Beispiel #26
0
def main():
    '''
        Starfruit communication carambolas.
    '''
    log = logging.getLogger(__name__)
    # daemon options
    opts = options.options()
    # Come on dude, grow this fruit up
    #parser = create_parser(
    #    description="Tornado-based AMI starfruit client")
    # more options base on argument parser
    #more_options, args = parse_args(parser)

    # Set document database
    document = motor.MotorClient(opts.mongo_host, opts.mongo_port).starfruit

    # Set memcached backend
    memcache = mc.Client(
        [opts.memcached_host],
        binary=opts.memcached_binary,
        behaviors={
            "tcp_nodelay": opts.memcached_tcp_nodelay,
            "ketama": opts.memcached_ketama
        }
    )

    # Set SQL URI
    postgresql_uri = queries.uri(
        host=opts.sql_host,
        port=opts.sql_port,
        dbname=opts.sql_database,
        user=opts.sql_user,
        password=None
    )

    # Set kvalue database
    global kvalue
    kvalue = kvalue

    # Set default cache
    global cache
    cache = memcache

    # Set SQL session
    global sql
    sql = queries.TornadoSession(uri=postgresql_uri)

    # Set default database
    global db
    db = document
    
    system_uuid = uuid.uuid4()
    # logging system spawned
    logging.info('Starfruit system {0} spawned'.format(system_uuid))

    # logging database hosts
    logging.info('MongoDB server: {0}:{1}'.format(opts.mongo_host, opts.mongo_port))
    logging.info('PostgreSQL server: {0}:{1}'.format(opts.sql_host, opts.sql_port))

    # Ensure 
    if opts.ensure_indexes:
        logging.info('Ensuring indexes...')
        indexes.ensure_indexes(db)
        logging.info('DONE.')

    # base url
    base_url = opts.base_url

    # system cache
    cache_enabled = opts.cache_enabled
    if cache_enabled:
        logging.info('Memcached server: {0}:{1}'.format(opts.memcached_host, opts.memcached_port))

    external_log = opts.external_log
    if external_log:
        global logger
        logger = zmq_external_logger()

    # starfruit web application daemon
    application = web.Application(

        [
            # Starfruit system knowledge (quotes) <-- on realtime events.
            (r'/system/?', CarambolaHandler),

            # Asterisks
            (r'/asterisks/(?P<asterisk_uuid>.+)/?', asterisks.Handler),
            (r'/asterisks/?', asterisks.Handler),
        ],

        # system database
        db=db,

        # system cache
        cache=cache,

        # cache enabled flag
        cache_enabled=cache_enabled,

        # document datastorage
        document=document,

        # kvalue datastorage
        kvalue=kvalue,

        # sql datastorage
        sql=sql,

        # debug mode
        debug=opts.debug,

        # application domain
        domain=opts.domain,

        # application timezone
        timezone=opts.timezone,

        # pagination page size
        page_size=opts.page_size,

        # cookie settings
        cookie_secret=opts.cookie_secret,

        # login url
        login_url='/login/'
    )

    # Starfruit periodic cast callbacks
    #periodic_records = PeriodicCast(periodic_get_records, 5000)
    #periodic_records.start()

    # Setting up starfruit HTTP listener
    application.listen(opts.port)
    logging.info('Listening on http://%s:%s' % (opts.host, opts.port))
    
    loop = ioloop.IOLoop.instance()

    # Setting up starfruit protocol listener

    more_options = {
        'username': opts.ami_user,
        'secret': opts.ami_secret,
        'host': opts.ami_host,
        'port': opts.ami_port
    }
    
    proto = StarFruitProtocol(loop, more_options)

    adapter = TornadoAdapter(proto)
    logging.info('Listening starfruit on tcp://%s:%s' % (opts.host, opts.port))
    stream = IOStream(socket.socket(), loop)

    stream.connect((opts.ami_host, int(opts.ami_port)),
                   lambda: adapter.bind_stream(stream))

    try:
        loop.start()
    except KeyboardInterrupt:
        pass
 def setUp(self):
     super(TornadoSessionIntegrationTests, self).setUp()
     self.session = queries.TornadoSession(queries.uri(
         'localhost', 5432, 'postgres', 'postgres'),
                                           pool_max_size=10,
                                           io_loop=self.io_loop)
 def setUp(self):
     uri = queries.uri('localhost', 5432, 'postgres', 'postgres')
     try:
         self.session = queries.Session(uri, pool_max_size=10)
     except queries.OperationalError as error:
         raise unittest.SkipTest(str(error).split('\n')[0])
Beispiel #29
0
 def test_uri_without_password(self):
     expectation = 'postgresql://foo@baz:5433/qux'
     self.assertEqual(queries.uri('baz', 5433, 'qux', 'foo'), expectation)
Beispiel #30
0
from os import environ

import queries
from dotenv import load_dotenv

load_dotenv()

URI = queries.uri(host=environ['PG_HOST'],
                  port=environ['PG_PORT'],
                  dbname=environ['PG_DBNAME'],
                  user=environ['PG_USER'],
                  password=environ['PG_PASSWORD'])


def get_session():
    return queries.Session(URI)
Beispiel #31
0
 def test_uri_without_password(self):
     expectation = 'postgresql://foo@baz:5433/qux'
     self.assertEqual(queries.uri('baz', 5433, 'qux', 'foo'),
                      expectation)
Beispiel #32
0
 def __init__(self):
     settings = dict(notebook_dir="test_submissions")
     handlers = [(r"/submit", start.SubmissionHandler)]
     Application.__init__(self, handlers, **settings)
     self.db = queries.TornadoSession(queries.uri(**dsn))
Beispiel #33
0
def main():
    # daemon options
    opts = options.options()

    @gen.coroutine
    def check_tree():
        os.environ['HOME'] = '/opt/treehouse/'
        process = Popen([treehouse_rel, "ping", "."], stdout=PIPE)
        (output, err) = process.communicate()
        exit_code = process.wait()
        if 'not responding to pings' in output:
            logging.error(output)
            process = Popen([treehouse_rel, "start", "."], stdout=PIPE)
            (output, err) = process.communicate()
            exit_code = process.wait()
        elif 'pong' in output:
            # ping pong yeah!
            pass
        else:
            global von_count
            von_count += 1
            logging.error(von_count)
            if von_count > 5:
                # Crash circusd monitor
                circus = Popen(["/etc/init.d/circusd", "stop", "."], stdout=PIPE)
                (output, err) = circus.communicate()

    # Set memcached backend
    memcache = mc.Client(
        [opts.memcached_host],
        binary=opts.memcached_binary,
        behaviors={
            "tcp_nodelay": opts.memcached_tcp_nodelay,
            "ketama": opts.memcached_ketama
        }
    )

    # Set SQL URI
    postgresql_uri = queries.uri(
        host=opts.sql_host,
        port=opts.sql_port,
        dbname='forge',
        user=opts.sql_user,
        password=None
    )

    # Set system uuid
    global system_uuid
    system_uuid = system_uuid
    # Set treehouse release
    global treehouse_rel
    treehouse_rel = treehouse_rel
    # Set kvalue database
    global kvalue
    kvalue = kvalue
    # Set default cache
    global cache
    cache = memcache
    # Set SQL session
    global sql
    sql = queries.TornadoSession(uri=postgresql_uri)
    # Set default database
    global db
    db = document

    # logging system spawned
    logging.info('Treehouse system {0} spawned'.format(system_uuid))

    # logging database hosts
    logging.info('PostgreSQL server: {0}:{1}'.format(opts.sql_host, opts.sql_port))

    # base url
    base_url = opts.base_url

    # system cache
    cache_enabled = opts.cache_enabled
    if cache_enabled:
        logging.info('Memcached server: {0}:{1}'.format(opts.memcached_host, opts.memcached_port))

    # treehouse web application daemon
    application = web.Application(

        [
            # Treehouse system knowledge (quotes) and realtime events.
            (r'/tree/?', TreeHandler),

            # experiment with WS
            (r'/ws/alerts', TreeWSHandler),

            # Imps resource
            (r'/imps/(?P<imp_uuid>.+)/?', imps.Handler),
            (r'/imps/?', imps.Handler),

             # Indexes resource
            (r'/indexes/(?P<index_uuid>.+)/?', indexes.Handler),
            (r'/indexes/?', indexes.Handler),

            # Nodes resource
            (r'/nodes/(?P<node_uuid>.+)/?', nodes.Handler),
            (r'/nodes/?', nodes.Handler)
            
        ],

        # system database
        db=db,
        # system cache
        cache=cache,
        # cache enabled flag
        cache_enabled=cache_enabled,
        # kvalue datastorage
        kvalue=kvalue,
        # sql datastorage
        sql=sql,
        # debug mode
        debug=opts.debug,
        # application domain
        domain=opts.domain,
        # application timezone
        timezone=opts.timezone,
        # pagination page size
        page_size=opts.page_size,
        # cookie settings
        cookie_secret=opts.cookie_secret,
        # login url
        login_url='/login/'
    )
    # Treehouse periodic cast callbacks

    check_node_tree = PeriodicCast(check_tree, 5000)
    check_node_tree.start()

    # Setting up treehouse processor
    application.listen(opts.port)
    logging.info('Listening on http://%s:%s' % (opts.host, opts.port))
    loop = ioloop.IOLoop.instance()

    # Process heartbeat SUB/PUB
    #loop.add_callback(subscriber)
    #loop.add_callback(publisher, opts.treehouse_host)

    loop.start()
Beispiel #34
0
def rowCount(master, database, username, password, table):
    hawqURI = queries.uri(master, port=5432, dbname=database, user=username, password=password)
    with queries.Session(hawqURI) as session:
        result = session.query("select count(*) from " + table + ";")
        return str(result.items()[0]['count'])