Esempio n. 1
0
    def test_multi(self):
        streams = range(1, int(1e4), int(1e3))
        for i in streams:
            self.infill_stream(i)

        rdb.db_setup('localhost', port)
        fetch = random.sample(streams, 3)
        data = rdb.db_multiple(fetch, 0, 10000)

        # check grabbing three random streams
        self.assertEqual(len(data), 3)
        for dv in data:
            self.assertEqual(dv.shape, (10000, 2))
            self.assertEqual(np.sum(dv[:, 0] - np.arange(0, 10000)), 0)
            self.assertEqual(np.sum(dv[:, 1] - np.arange(0, 10000)), 0)

        # grab some streams without data
        data = rdb.db_multiple([2,3,4,6], 0, 10000)
        self.assertEqual(len(data), 4)
        for dv in data:
            self.assertEqual(dv.shape, (0, 2))
Esempio n. 2
0
    def test_multi(self):
        streams = range(1, int(1e4), int(1e3))
        for i in streams:
            self.infill_stream(i)

        rdb.db_setup('localhost', port)
        fetch = random.sample(streams, 3)
        data = rdb.db_multiple(fetch, 0, 10000)

        # check grabbing three random streams
        self.assertEqual(len(data), 3)
        for dv in data:
            self.assertEqual(dv.shape, (10000, 2))
            self.assertEqual(np.sum(dv[:, 0] - np.arange(0, 10000)), 0)
            self.assertEqual(np.sum(dv[:, 1] - np.arange(0, 10000)), 0)

        # grab some streams without data
        data = rdb.db_multiple([2, 3, 4, 6], 0, 10000)
        self.assertEqual(len(data), 4)
        for dv in data:
            self.assertEqual(dv.shape, (0, 2))
Esempio n. 3
0
import sys
import time
import readingdb as rdb
import _readingdb

import numpy as np

print "using readingdb", rdb.__file__
print _readingdb.__file__

end = 1304102690

rdb.db_setup('localhost', 4242)
db = rdb.db_open(host='localhost', port=4242)
# db = rdb.db_open()

def next(id, ref, n=1): 
    return rdb.db_next(id, ref, n=n, conn=db)[0].tolist()

def prev(id, ref, n=1, conn=db): 
    return rdb.db_prev(id, ref, n=n, conn=db)[0].tolist()

S1MAX = 1000 * 100
if len(sys.argv) == 1:
    print "%s [-a | -r | -n | -d | -c]" % sys.argv[0]
elif sys.argv[1] == '-a':
    # substream 1 has every bucket filled
    for i in range(0, 1000):
        data = [(x, x, x) for x in xrange(i * 100, i * 100 + 100)]
        rdb.db_add(db, 1, data)
Esempio n. 4
0
import sys
import time
import readingdb as rdb
import _readingdb

import numpy as np

print "using readingdb", rdb.__file__
print _readingdb.__file__

end = 1304102690
PORT = 4242

rdb.db_setup("localhost", PORT)
db = rdb.db_open(host="localhost", port=PORT)
# db = rdb.db_open()


def next(id, ref, n=1):
    return rdb.db_next(id, ref, n=n, conn=db)[0].tolist()


def prev(id, ref, n=1, conn=db):
    return rdb.db_prev(id, ref, n=n, conn=db)[0].tolist()


S1MAX = 1000 * 100
if len(sys.argv) == 1:
    print "%s [-a | -r | -n | -d | -c]" % sys.argv[0]
elif sys.argv[1] == "-a":
    # substream 1 has every bucket filled
Esempio n. 5
0
#!/usr/bin/python

import readingdb as rdb
import sys
import time

rdb.db_setup("localhost", 4242)
a = rdb.db_open("localhost")

b = rdb.db_prev(1, 100000000000, conn=a)
lasttime = b[0][0][0]

rdb.db_close(a)

print(lasttime)

# ltime = file('tempfiles/lasttime', 'w')
# ltime.write(str(lasttime))
# ltime.close()

sys.exit()
Esempio n. 6
0
import readingdb as rdb
rdb.db_setup('localhost', 4242)
a = rdb.db_open('localhost')

rdb.db_add(a, 1, [(x, 0, x) for x in xrange(0, 100)])
print rdb.db_query(1, 0, 100, conn=a)
rdb.db_close(a)
Esempio n. 7
0
    parser.add_option(
        "-n", "--no-action", dest="noop", default=False, action="store_true", help="don't actually insert the data"
    )
    parser.add_option("-f", "--map-file", dest="mapfile", default=False, help="import using a map file")
    opts, hosts = parser.parse_args()
    if len(hosts) != 2:
        parser.print_help()
        sys.exit(1)

    old_db = parse_netloc(hosts[0])
    new_db = parse_netloc(hosts[1])

    print "Importing data from %s:%i to %s:%i" % (old_db + new_db)
    print "substream: %i" % int(opts.substream)

    rdb4.db_setup(old_db[0], old_db[1])
    db0 = rdb4.db_open(host=old_db[0], port=old_db[1])
    db1 = rdb4.db_open(host=new_db[0], port=new_db[1])

    # rdb4.db_substream(db0, int(opts.substream))
    # rdb4.db_substream(db1, int(opts.substream))

    if not opts.zero:
        IMPORT_START = int(time.time()) - (int(opts.ago) * 3600)
        IMPORT_STOP = 2 ** 32 - 10
    else:
        IMPORT_START = 1
        IMPORT_STOP = int(time.time()) - (int(opts.ago) * 3600)
    print "Importing from %i to %i" % (IMPORT_START, IMPORT_STOP)

    if not opts.mapfile:
Esempio n. 8
0
from smap.archiver.data import SmapData
from smap.core import Timeseries

# materializer specific imports
from wrappers import *
from mat_utils import *

# misc imports
import json
import sys
import readingdb
import shelve
from ast import literal_eval

# config
readingdb.db_setup('localhost', 4242)
URL_TO_USE = "http://localhost:8079/api/query?"
QUERYSTR_TO_USE = "select * where not has Metadata/Extra/Operator"

REPUBLISH_LISTEN_ON = False
# end config

class Materializer:
    def __init__(self):
        """ Initialize our list of existing streams, we eventually want to
        allow this to be loaded from file. Additionally, here we load 
        information about which operators should be applied to which drivers
        including an 'all' field."""

        self.republisher = None
        self.stream_persist = StreamShelf('stream')
Esempio n. 9
0
    parser.add_option('-n', '--no-action', dest='noop', default=False, action='store_true',
                      help='don\'t actually insert the data')
    parser.add_option('-f', '--map-file', dest='mapfile', default=False,
                      help='import using a map file')
    opts, hosts = parser.parse_args()
    if len(hosts) != 2:
        parser.print_help()
        sys.exit(1)

    old_db = parse_netloc(hosts[0])
    new_db = parse_netloc(hosts[1])

    print "Importing data from %s:%i to %s:%i" % (old_db + new_db)
    print "substream: %i" % int(opts.substream)
    
    rdb4.db_setup(old_db[0], old_db[1])
    db0 = rdb4.db_open(host=old_db[0], port=old_db[1])
    db1 = rdb4.db_open(host=new_db[0], port=new_db[1])

    # rdb4.db_substream(db0, int(opts.substream))
    # rdb4.db_substream(db1, int(opts.substream))

    if not opts.zero:
        IMPORT_START = int(time.time()) - (int(opts.ago) * 3600)
        IMPORT_STOP = 2**32 - 10
    else:
        IMPORT_START = 1
        IMPORT_STOP = int(time.time()) - (int(opts.ago) * 3600)
    print "Importing from %i to %i" % (IMPORT_START, IMPORT_STOP)

    if not opts.mapfile:
Esempio n. 10
0
import sys
import time
import readingdb as rdb
import _readingdb

import numpy as np

print "using readingdb", rdb.__file__
print _readingdb.__file__

end = 1304102690
PORT = 4242

rdb.db_setup('localhost', PORT)
db = rdb.db_open(host='localhost', port=PORT)
# db = rdb.db_open()


def next(id, ref, n=1):
    return rdb.db_next(id, ref, n=n, conn=db)[0].tolist()


def prev(id, ref, n=1, conn=db):
    return rdb.db_prev(id, ref, n=n, conn=db)[0].tolist()


S1MAX = 1000 * 100
if len(sys.argv) == 1:
    print "%s [-a | -r | -n | -d | -c]" % sys.argv[0]
elif sys.argv[1] == '-a':
    # substream 1 has every bucket filled