def run(self):
            """Dequeue requests until we're done working"""
            if self.host == None:
                db = rdb4.db_open()
            else:
                db = rdb4.db_open(host=self.host[0], port=self.host[1])
            while True:
                try:
                    request = self.parent.requests.get_nowait()
                except Queue.Empty:
                    break
                if request.has_key("substream"):
                    rdb4.db_substream(db, request['substream'])
                else:
                    rdb4.db_substream(db, 0)

                first = True
                result = []
                last = []

                while first or len(last) == 10000:
                    last = rdb4.db_query(db, int(request['streamid']),
                                         int(request['starttime']),
                                         int(request['endtime']))
                    first = False
                    print len(last)
                    result.extend(last)
                    if not self.full: break

                if self.parent.as_numpy and len(result) > 0:
                    result = np.array(result)
                    result = result[:,[0,2]]                      
                self.parent.returns[request['streamid']] = result

            rdb4.db_close(db)
Beispiel #2
0
        def run(self):
            """Dequeue requests until we're done working"""
            if self.host == None:
                db = rdb4.db_open()
            else:
                db = rdb4.db_open(host=self.host[0], port=self.host[1])
            while True:
                try:
                    request = self.parent.requests.get_nowait()
                except Queue.Empty:
                    break
                if request.has_key("substream"):
                    rdb4.db_substream(db, request['substream'])
                else:
                    rdb4.db_substream(db, 0)

                first = True
                result = []
                last = []

                while first or len(last) == 10000:
                    last = rdb4.db_query(db, int(request['streamid']),
                                         int(request['starttime']),
                                         int(request['endtime']))
                    first = False
                    print len(last)
                    result.extend(last)
                    if not self.full: break

                if self.parent.as_numpy and len(result) > 0:
                    result = np.array(result)
                    result = result[:, [0, 2]]
                self.parent.returns[request['streamid']] = result

            rdb4.db_close(db)
    def test_simple(self):
        self.infill_stream(1)

        d = rdb.db_query(self.conn, 1, 0, 10000)
        self.assertEqual(len(d), 10000)
        for i in xrange(0, 10000):
            self.assertEqual(d[i][0], i)
            self.assertEqual(d[i][1], i)
Beispiel #4
0
    def test_simple(self):
        self.infill_stream(1)

        d = rdb.db_query(self.conn, 1, 0, 10000)
        self.assertEqual(len(d), 10000)
        for i in xrange(0, 10000):
            self.assertEqual(d[i][0], i)
            self.assertEqual(d[i][1], i)
Beispiel #5
0
S1MAX = 1000 * 100
if len(sys.argv) == 1:
    print "%s [-a | -r | -n | -d | -c]" % sys.argv[0]
elif sys.argv[1] == '-a':
    # substream 1 has every bucket filled
    for i in range(0, 1000):
        data = [(x, x, x) for x in xrange(i * 100, i * 100 + 100)]
        rdb.db_add(db, 1, data)

    # substream 2 has points one hour apart
    for i in range(0, 10000):
        rdb.db_add(db, 2, [(i * 3600, 0, i * 3600)])
elif sys.argv[1] == '-r':
    # test that we read back what we wrote
    d = rdb.db_query(1, 0, 10000)
    assert len(d) == 1
    d = d[0].tolist()
    assert len(d) == 10000
    
    for i in xrange(0, 10000):
        assert d[i][0] == i
        assert d[i][1] == i

    d = rdb.db_query(2, 0, 3600 * 10000)
    assert len(d) == 1
    d = d[0].tolist()
    print d[0:10]
    for i in xrange(0, 10000):
        assert d[i][0] == i * 3600
elif sys.argv[1] == '-d': 
Beispiel #6
0
#!/usr/bin/python

import readingdb as rdb
import sys
import time

rdb.db_setup('localhost', 4242)
a = rdb.db_open('localhost')

#print(sys.argv[1])
debug = eval(sys.argv[1])

starttime = time.time()
temp = rdb.db_query(list(range(1, 10001)), 0, 1000000000000)
endtime = time.time()
completiontime = endtime-starttime

if debug:
    removeempty = lambda x: x != []
    debugout = file('tempfiles/debugout', 'w')
    processed = list(temp)
    processed = list(map(list, processed))
    for x in range(len(processed)):
        processed[x] = list(map(lambda z: [x+1, int(z[0])], processed[x]))
    processed = filter(removeempty, processed)
    for line in processed:
        debugout.write(str(line) + "\n")
    debugout.close()

rdb.db_close(a)
Beispiel #7
0
import readingdb as rdb
rdb.db_setup('localhost', 4242)
a = rdb.db_open('localhost')

rdb.db_add(a, 1, [(x, 0, x) for x in xrange(0, 100)])
print rdb.db_query(1, 0, 100, conn=a)
rdb.db_close(a)
Beispiel #8
0
import readingdb as rdb

# specify default host/port
rdb.db_setup('localhost', 4242)

# create a connection
db = rdb.db_open('localhost')

# add data.  the tuples are  (timestamp, seqno, value)
rdb.db_add(db, 1, [(x, 0, x) for x in xrange(0, 100)])

# read back the data we just wrote using the existing connection
# the args are streamid, start_timestamp, end_timestamp
print rdb.db_query(1, 0, 100, conn=db)
# close
rdb.db_close(db)

# read back the data again using a connection pool this time.  You can
# specify a list of streamids to range-query multiple streams at once.
rdb.db_query([1], 0, 100) 
Beispiel #9
0
        user=settings.DATABASE_USER,
    )
    rdb = readingdb.db_open()
    loader = TsdbLoader()
    loader.start()

    for s in Stream.objects.all().order_by("id"):
        if s.id <= 1:
            next
        first = True
        data = []
        startts = 0
        extra = "source=%s path=%s" % (s.subscription, s.path())
        print extra
        while first or len(data) == 10000:
            first = False
            data = readingdb.db_query(rdb, s.id, startts, 2 ** 31)

            if len(data) > 0:
                startts = data[-1][0]
                loader.q.put({"method": "put", "id": s.id, "data": data, "extra": extra})

            print startts
            # print data
        break
    loader.q.put({"method": "quit"})
    loader.join()

    readingdb.db_close(rdb)
    db.close()
Beispiel #10
0
import readingdb as rdb
import sys
import time

rdb.db_setup('localhost', 4242)
a = rdb.db_open('localhost')

debug = eval(sys.argv[2])

params = eval(sys.argv[1])
streams = params[0]
qstarttime = params[1]
qendtime = params[2]

starttime = time.time()
temp = rdb.db_query(list(range(1, 1+streams)), qstarttime, qendtime)
endtime = time.time()
completiontime = endtime-starttime


if debug:
    removeempty = lambda x: x != []
    debugout = file('tempfiles/debugout', 'w')
    processed = list(temp)
    processed = list(map(list, processed))
    for x in range(len(processed)):
        processed[x] = list(map(lambda z: [x+1, int(z[0])], processed[x]))
    processed = filter(removeempty, processed)
    for line in processed:
        debugout.write(str(line) + "\n")
    debugout.close()
Beispiel #11
0
        import_map = [(x, x) for x in xrange(start, int(opts.maxid))]
    else:
        import_map = []
        with open(opts.mapfile, "r") as fp:
            for line in fp.readlines():
                line = re.sub("\#.*$", "", line)
                ids = re.split("[ \t]+", line)
                for id in ids[1:]:
                    import_map.append((int(ids[0]), int(id)))

    for to_stream, from_stream in import_map:
        print "starting %i <- %i" % (to_stream, from_stream)
        first = True
        vec = [(IMPORT_START,)]
        t = tic()
        data = rdb4.db_query(from_stream, IMPORT_START, IMPORT_STOP, limit=100000000, conn=db0)
        if not len(data):
            continue
        data = data[0]
        print "received", data.shape
        toc(t)

        t = tic()
        if opts.noop:
            continue
        bound = (int(data.shape[0]) / 100) + 1
        for i in xrange(0, bound):
            vec = (data[(i * 100) : (i * 100) + 100, :]).tolist()
            # print time.ctime(vec[0][0])
            rdb4.db_add(db1, to_stream, map(tuple, vec))
            if len(vec) < 100:
Beispiel #12
0
                    user=settings.DATABASE_USER)
    rdb = readingdb.db_open()
    loader = TsdbLoader()
    loader.start()
    
    for s in Stream.objects.all().order_by('id'):
        if s.id <= 1:
            next
        first = True
        data = []
        startts = 0
        extra = 'source=%s path=%s' % (s.subscription, s.path())
        print extra
        while first or len(data) == 10000:
            first = False
            data = readingdb.db_query(rdb, s.id, startts, 2 ** 31)

            if len(data) > 0:
                startts = data[-1][0]
                loader.q.put({'method': 'put',
                              'id': s.id,
                              'data': data,
                              'extra': extra})
                
            print startts
            # print data
        break
    loader.q.put({'method': 'quit'})
    loader.join()
    
    readingdb.db_close(rdb)
Beispiel #13
0
        import_map = [(x, x) for x in xrange(start, int(opts.maxid))]
    else:
        import_map = []
        with open(opts.mapfile, "r") as fp:
            for line in fp.readlines():
                line = re.sub('\#.*$', '', line)
                ids = re.split('[ \t]+', line)
                for id in ids[1:]:
                    import_map.append((int(ids[0]), int(id)))
        
    for to_stream, from_stream in import_map:
        print "starting %i <- %i" % (to_stream, from_stream) 
        first = True
        vec = [(IMPORT_START,)]
        t = tic()
        data = rdb4.db_query(from_stream, IMPORT_START, IMPORT_STOP, limit=100000000, conn=db0)
        if not len(data): continue
        data = data[0]
        print "received", data.shape
        toc(t)

        t = tic()
        if opts.noop: continue
        bound = (int(data.shape[0]) / 100) + 1
        for i in xrange(0, bound):
            vec = (data[(i*100):(i*100) + 100, :]).tolist()
            # print time.ctime(vec[0][0])
            rdb4.db_add(db1, to_stream, map(tuple, vec))
            if len(vec) < 100: break
        print "inserted", to_stream
        toc(t)
Beispiel #14
0
S1MAX = 1000 * 100
if len(sys.argv) == 1:
    print "%s [-a | -r | -n | -d | -c]" % sys.argv[0]
elif sys.argv[1] == '-a':
    # substream 1 has every bucket filled
    for i in range(0, 1000):
        data = [(x, x, x) for x in xrange(i * 100, i * 100 + 100)]
        rdb.db_add(db, 1, data)

    # substream 2 has points one hour apart
    for i in range(0, 10000):
        rdb.db_add(db, 2, [(i * 3600, 0, i * 3600)])
elif sys.argv[1] == '-r':
    # test that we read back what we wrote
    d = rdb.db_query(1, 0, 10000)
    assert len(d) == 1
    d = d[0].tolist()
    assert len(d) == 10000

    for i in xrange(0, 10000):
        assert d[i][0] == i
        assert d[i][1] == i

    d = rdb.db_query(2, 0, 3600 * 10000)
    assert len(d) == 1
    d = d[0].tolist()
    print d[0:10]
    for i in xrange(0, 10000):
        assert d[i][0] == i * 3600
elif sys.argv[1] == '-d':