コード例 #1
0
 def __init__(self, host='0.0.0.0', port=3000, client=0):
     self.channel = grpc.insecure_channel('%s:%d' % (host, port))
     self.stub = datastore_pb2_grpc.DatastoreStub(self.channel)
     self.db = rocksdb.DB("client" + str(client) + ".db",
                          rocksdb.Options(create_if_missing=True))
コード例 #2
0
 def test_allow_concurrent_memtable_write(self):
     opts = rocksdb.Options()
     self.assertEqual(opts.allow_concurrent_memtable_write, True)
     opts.allow_concurrent_memtable_write = False
     self.assertEqual(opts.allow_concurrent_memtable_write, False)
コード例 #3
0
 def setUp(self):
     opts = rocksdb.Options(create_if_missing=True)
     self._clean()
     self.db = rocksdb.DB("/tmp/test", opts)
コード例 #4
0
ファイル: test_db.py プロジェクト: zuarbase/python-rocksdb
 def setUp(self):
     opts = rocksdb.Options(create_if_missing=True)
     opts.prefix_extractor = StaticPrefix()
     self._clean()
     self.db = rocksdb.DB('/tmp/test', opts)
コード例 #5
0
 def test_row_cache(self):
     opts = rocksdb.Options()
     self.assertIsNone(opts.row_cache)
     opts.row_cache = cache = rocksdb.LRUCache(2 * 1024 * 1024)
     self.assertEqual(cache, opts.row_cache)
コード例 #6
0
 def __init__(self, host='0.0.0.0', port=PORT):
     self.channel = grpc.insecure_channel('%s:%d' % (host, port))
     self.stub = replicator_pb2_grpc.ReplicatorStub(self.channel)
     self.db = rocksdb.DB("client-{}.db".format(CLIENT_ID),
                          rocksdb.Options(create_if_missing=True))
コード例 #7
0
ファイル: test_db.py プロジェクト: zuarbase/python-rocksdb
 def test_unicode_path(self):
     name = b'/tmp/M\xc3\xbcnchen'.decode('utf8')
     rocksdb.DB(name, rocksdb.Options(create_if_missing=True))
     self.addCleanup(shutil.rmtree, name)
     self.assertTrue(os.path.isdir(name))
コード例 #8
0
 def __init__(self, path='./storage.db', with_index=True):
     super().__init__(with_index=with_index)
     self._db = rocksdb.DB(path, rocksdb.Options(create_if_missing=True))
コード例 #9
0
def generate_csv(BLOCK_PATH, INDEX_PATH, start):
    """
    Processes a chunk of Bitcoin blocks and returns the values that will be written into the csv files

    :param BLOCK_PATH:  str, the path to the Bitcoin blocks
    :param INDEX_PATH:  str, the path to the LevelDB Bitcoin index
    :param start:       int, the block height to start at
    :return:            tuple, a tuple of lists. Each entry in the list corresponds to one row in the csv
    """

    # Connect to Transaction Output Database. No weird hacks requires as RocksDB natively supports concurrent reads.
    opts = rocksdb.Options()
    db = rocksdb.DB(DB_PATH, opts, read_only=True)

    # Load Blockchain, ignore Read Locks imposed by other instances of the process
    blockchain = Blockchain(BLOCK_PATH, ignoreLocks=True)
    blockchain = blockchain.get_ordered_blocks(INDEX_PATH,
                                               start=start,
                                               end=start + 1000)

    # Create output lists
    address_data = []
    blocks_data = []
    transaction_data = []
    before_data = []
    belongs_data = []
    receives_data = []
    sends_data = []

    for block in blockchain:
        # Get Block parameters
        block_height = block.height
        block_hash = block.hash
        block_timestamp = block.header.timestamp.strftime('%Y-%m-%dT%H:%M')
        block_date = block.header.timestamp.strftime('%Y-%m-%d')
        previous_block_hash = block.header.previous_block_hash

        # Append block data to lists. Note: List of lists, as the csv writer will interpret each list
        # as a new row in the file.
        blocks_data.append([block_hash, block_height, block_timestamp])
        before_data.append([previous_block_hash, block_hash, 'PRECEDES'])
        for tx in block.transactions:
            tx_id = tx.txid
            # Initialize summing variables
            inSum = 0
            outSum = 0
            inDegree = 0
            for o in range(len(tx.outputs)):
                try:
                    addr = tx.outputs[o].addresses[0].address
                    val = tx.outputs[o].value
                    outSum += val
                    receives_data.append([tx_id, val, o, addr, 'RECEIVES'])
                    address_data.append([addr])
                # Some transactions contain irregular outputs (Spam, Attacks on Bitcoin,...). These will be ignored.
                except Exception as e:
                    val = tx.outputs[o].value
                    outSum += val
                    pass
            tx_in = tx.inputs
            # Coinbase transactions (newly generated coins) have no sending address. So there's no need to look it up.
            if not tx.is_coinbase():
                # Iterate over all transaction inputs
                for i in tx_in:
                    inDegree += 1
                    # Get hash of the transaction the coins have been last spent in
                    in_hash = i.transaction_hash
                    # Get the index of the transaction output the coins have been last spent in
                    in_index = i.transaction_index
                    try:
                        # Retrieve last spending transaction from database
                        in_transaction = pickle.loads(db.get(in_hash.encode()))
                        # Get value and receiving address of last transaction (i.e. spending address in this tx)
                        in_value = in_transaction[in_index][0]
                        in_address = in_transaction[in_index][1]
                        # Append data to return list
                        sends_data.append(
                            [in_address, in_value, tx_id, 'SENDS'])
                        inSum += in_value
                    # Catch exceptions that might occur when dealing with certain kinds of ominous transactions.
                    # This is very rare and should not break everything.
                    except Exception as e:
                        print(e)
                        continue
                    del in_transaction, in_address, in_value, in_hash, in_index
            else:
                # Simplified parsing for coinbase transactions
                sends = [[
                    "coinbase",
                    sum(map(lambda x: x.value, tx.outputs)), tx_id, 'SENDS'
                ]]
                inSum = sends[0][1]
                inDegree = 1

            # In-Degree is length of sending adddresses, out-degree the number of tx outputs
            outDegree = len(tx.outputs)

            transaction_data.append([
                tx_id,
                str(block_date)[0:10], inDegree, outDegree, inSum, outSum
            ])
            belongs_data.append([tx_id, block_hash, 'BELONGS_TO'])

    # Return Lists

    return (address_data, blocks_data, transaction_data, before_data,
            belongs_data, receives_data, sends_data)
コード例 #10
0
def get_columns(db_loc, name):
    cols = rocksdb.list_column_families(
        os.path.join(db_loc, name),
        rocksdb.Options(),
    )
    return cols
コード例 #11
0
def test_open_skiplist_memtable_factory():
    clean_db()
    opts = rocksdb.Options()
    opts.memtable_factory = rocksdb.SkipListMemtableFactory()
    opts.create_if_missing = True
    test_db = rocksdb.DB("/tmp/test", opts)
コード例 #12
0
def get(scriptid):
    db = rocksdb.DB("assign1.db", rocksdb.Options(create_if_missing=True))
    filename = db.get(scriptid.encode()).decode()
    r1 = str(os.path.join(UPLOAD_FOLDER, filename))
    resp = subprocess.check_output(['python3.6', r1])
    return resp, 200
コード例 #13
0
 def __init__(self, dbname, model=None):
     self.dbname = dbname
     self.model = model
     self.opts = rocksdb.Options()
     self.db = None
コード例 #14
0
ファイル: server.py プロジェクト: hchen0402/cmpe273
 def __init__(self):
     self.db = rocksdb.DB("assignment2_master.db", rocksdb.Options(create_if_missing=True))
コード例 #15
0
ファイル: readDB.py プロジェクト: manhnvan/place_crawling
import rocksdb
import json
import ast
from pymongo import MongoClient

if __name__ == '__main__':

    opts = rocksdb.Options()
    opts.create_if_missing = True
    opts.compaction_pri = rocksdb.CompactionPri.by_compensated_size
    opts.compression = rocksdb.CompressionType.no_compression
    opts.compaction_style = 'level'
    db = rocksdb.DB('/home/micheal/data/datawarehouse.hot', opts)

    client = MongoClient(
        'mongodb+srv://admin:[email protected]/myFirstDatabase?retryWrites=true&w=majority'
    )

    mongodb = client['datawarehouse']

    place_model = mongodb.places

    it = db.itervalues()
    it.seek_to_first()
    for item in list(it):

        try:
            data = ast.literal_eval(item.decode('utf-8'))
            doc = {
                "place_name": data['place_name'],
                "place_info_url": data['place_info_url'],
コード例 #16
0
 def __init__(self, host='0.0.0.0', port=PORT):
     self.db = self.db = rocksdb.DB("replicator_db.db",
                                    rocksdb.Options(create_if_missing=True))
     self.channel = grpc.insecure_channel('%s:%d' % (host, port))
     self.stub = datastore_pb2.ReplicatorStub(self.channel)
コード例 #17
0
#Creates a DB called inverted_index and adds all the key-value mapping from the inverted_index.txt file
import rocksdb

db = rocksdb.DB("inverted_index.db", rocksdb.Options(create_if_missing=True))

filepath = 'inverted_index.txt'
batch = rocksdb.WriteBatch()
with open(filepath) as fp:
    line = fp.readline()
    strs = ""
    while line:
        line = fp.readline()
        strs = line.strip("\n")
        strs = strs.strip("()")
        strs = strs.split(',', 1)
        batch.put(bytes(strs[0], 'utf-8'), bytes(strs[-1], 'utf-8'))

    print("*************** Writing ***************")
    db.write(batch)

    print("**************** Print content ****************")
    it = db.iteritems()
    it.seek_to_first()

    print(list(it))
コード例 #18
0
 def __init__(self):
     print("INIT FROM DB")
     self.db = rocksdb.DB("db/script.db", rocksdb.Options(create_if_missing=True))
コード例 #19
0
ファイル: test_db.py プロジェクト: zuarbase/python-rocksdb
 def setUp(self):
     opts = rocksdb.Options()
     opts.create_if_missing = True
     opts.merge_operator = AssocCounter()
     self._clean()
     self.db = rocksdb.DB('/tmp/test', opts)
コード例 #20
0
 def create_tmp_rocksdb_db(self) -> 'rocksdb.DB':
     import rocksdb
     directory = tempfile.mkdtemp()
     self.tmpdirs.append(directory)
     options = rocksdb.Options(create_if_missing=True, error_if_exists=True)
     return rocksdb.DB(directory, options)
コード例 #21
0
ファイル: test_db.py プロジェクト: zuarbase/python-rocksdb
 def setUp(self):
     opts = rocksdb.Options()
     opts.create_if_missing = True
     opts.comparator = SimpleComparator()
     self._clean()
     self.db = rocksdb.DB('/tmp/test', opts)
コード例 #22
0
ファイル: assignment1.py プロジェクト: avnigulati/CMPE-273
def get_file(script_id):
    db = rocksdb.DB("assignment1.db", rocksdb.Options(create_if_missing=True))
    foo_id = db.get(script_id.encode('utf-8'))
    response = subprocess.check_output(["python3.6", "-c", foo_id])
    return response
コード例 #23
0
    def test_table_factory(self):
        opts = rocksdb.Options()
        self.assertIsNone(opts.table_factory)

        opts.table_factory = rocksdb.BlockBasedTableFactory()
        opts.table_factory = rocksdb.PlainTableFactory()
コード例 #24
0
'''
################################## slave.py #############################
# 
################################## slave.py #############################
'''
import grpc
import replicator_pb2
import argparse
import rocksdb 

PORT = 3000

slavedb = rocksdb.DB("slavedb1.db", rocksdb.Options(create_if_missing=True))

class Slave():
    
    def __init__(self, host='0.0.0.0', port=PORT):
        self.channel = grpc.insecure_channel('%s:%d' % (host, port))
        self.stub = replicator_pb2.ReplicatorStub(self.channel)
        
    def run(self):
        
        action = self.stub.slaveConnector(replicator_pb2.SlaveRequest())
        for a in action:
            if a.action == 'put':
                print("# Put {} : {} to slave db".format(a.key, a.value))
                #inserting data into slave db
                slavedb.put(a.key.encode(), a.value.encode())
                print ("# Successfulyy added data to slavedb")
                
                #fetch value from slave db to check
コード例 #25
0
 def test_enable_write_thread_adaptive_yield(self):
     opts = rocksdb.Options()
     self.assertEqual(opts.enable_write_thread_adaptive_yield, True)
     opts.enable_write_thread_adaptive_yield = False
     self.assertEqual(opts.enable_write_thread_adaptive_yield, False)
コード例 #26
0
 def __init__(self):
     self.db = rocksdb.DB("lab1.db", rocksdb.Options(create_if_missing=True))
コード例 #27
0
 def open(self):
     opts = rocksdb.Options()
     opts.create_if_missing = True
     opts.comparator = IntegerComparator()
     self._db = rocksdb.DB(self._db_path, opts)
コード例 #28
0
def _load_rocksdb_with_embeddings(_db, _size):
    for _ in range(_size):
        _db.put(str.encode(str(uuid.uuid1())), _random_embedding().tobytes())


def _import_df_2_rocksdb(_db, _df):
    for index, row in _df.iterrows():
        upc_number = row['upc_number']
        embedding = row['embedding']

        _db.put(str.encode(str(upc_number)), embedding.tobytes())


if __name__ == "__main__":
    _db = rocksdb.DB("test.db", rocksdb.Options(create_if_missing=True))
    # _df = _read_feather_with_embeddings()

    _load_rocksdb_with_embeddings(_db, 100000)
    # _import_df_2_rocksdb(_db, _df)

    # 1154613188
    # logger.debug(np.frombuffer(_db.get(str.encode("999221"))))

    it = _db.iterkeys()
    it.seek_to_first()
    all_keys = list(it)
    # # logger.debug(all_keys)
    logger.debug(len((all_keys)))

    logger.debug(all_keys[234])
コード例 #29
0
 def setUp(self):
     opts = rocksdb.Options()
     opts.create_if_missing = True
     opts.merge_operator = StringAppendOperator()
     self._clean()
     self.db = rocksdb.DB('/tmp/test', opts)
コード例 #30
0
def upload_file():
	f = request.files['data']
	db = rocksdb.DB("mydb.db", rocksdb.Options(create_if_missing=True))
	key = uuid.uuid4().hex
	db.put(key.encode('utf-8'),f.stream.read().encode('utf-8'))
	  return jsonify(scriptid=key),201