def __init__(self, fp, dataDir, settings, chain):
        self.is_running = True
        self.fail_code = 0

        self.fp = fp
        self.dataDir = dataDir
        self.settings = settings
        self.core_version = None  # Set during start()
        self.daemon_running = False

        self.blockBuffer = 100  # Work n blocks from the tip to avoid forks, should be > COINBASE_MATURITY

        self.mode = settings.get('mode', 'master')
        self.binDir = os.path.expanduser(settings['ghostbindir'])
        self.ghostDataDir = os.path.expanduser(settings['ghostdatadir'])
        self.chain = chain
        self.debug = settings.get('debug', DEBUG)

        self.poolAddrHrp = 'gcs' if self.chain == 'mainnet' else 'tpcs'

        self.poolAddr = settings['pooladdress']
        self.poolAddrReward = settings['rewardaddress']

        self.poolHeight = settings.get('startheight', 0)

        self.maxOutputsPerTx = settings.get('maxoutputspertx', 48)

        # Default parameters
        self.poolFeePercent = 2
        self.stakeBonusPercent = 5

        self.payoutThreshold = int(0.5 * COIN)
        self.minBlocksBetweenPayments = 100  # Minimum number of blocks between payment runs

        self.minOutputValue = int(
            0.1 * COIN
        )  # Ignore any outputs of lower value when accumulating rewards
        self.tx_fee_per_kb = None
        self.smsg_fee_rate_target = None

        self.dbPath = os.path.join(dataDir, 'stakepooldb')

        db = plyvel.DB(self.dbPath, create_if_missing=True)
        n = db.get(bytes([DBT_DATA]) + b'current_height')
        if n is None:
            logmt(self.fp, 'First run\n')
            db.put(
                bytes([DBT_DATA]) + b'db_version',
                struct.pack('>i', CURRENT_DB_VERSION))
        else:
            self.poolHeight = struct.unpack('>i', n)[0]

        self.lastHeightParametersSet = -1
        self.setParameters(self.poolHeight)

        self.zmqContext = zmq.Context()
        self.zmqSubscriber = self.zmqContext.socket(zmq.SUB)

        self.zmqSubscriber.connect(self.settings['zmqhost'] + ':' +
                                   str(self.settings['zmqport']))
        self.zmqSubscriber.setsockopt_string(zmq.SUBSCRIBE, 'hashblock')

        self.debugDir = os.path.join(dataDir, 'poolDebug')
        if self.debug and not os.path.exists(self.debugDir):
            os.makedirs(self.debugDir)
            with open(os.path.join(self.debugDir, 'pool.csv'), 'a') as fp:
                fp.write(
                    'height,blockReward,blockOutput,poolReward,poolRewardTotal,poolCoinTotal,Disbursed,fees,totalFees\n'
                )

        if self.mode == 'master':
            try:
                self.min_blocks_between_withdrawals = self.settings[
                    'poolownerwithdrawal']['frequency']
                assert (self.min_blocks_between_withdrawals > self.blockBuffer)
                self.owner_withdrawal_addr = self.settings[
                    'poolownerwithdrawal']['address']
                assert (self.settings['poolownerwithdrawal']['reserve'] >=
                        0.005)
                assert (self.settings['poolownerwithdrawal']['threshold'] >=
                        0.0)
                self.have_withdrawal_info = True
            except Exception:
                traceback.print_exc()
                self.have_withdrawal_info = False

            # If pool was synced in observer mode 'pool_fees_detected' may be higher than 'pool_fees'
            # 'pool_fees_detected' is tracked at chain tip - buffer, while 'pool_fees' is tracked as the pool makes transactions
            n = db.get(bytes([DBT_DATA]) + b'pool_fees_detected')
            pool_fees_detected = 0 if n is None else int.from_bytes(n, 'big')

            dbkey = bytes([DBT_DATA]) + b'pool_fees'
            n = db.get(dbkey)
            pool_fees = 0 if n is None else int.from_bytes(n, 'big')

            if pool_fees_detected > pool_fees:
                logmt(
                    self.fp,
                    'Replacing pool_fees with pool_fees_detected: %s, %s' %
                    (format8(pool_fees), format8(pool_fees_detected)))
                db.put(dbkey, pool_fees_detected.to_bytes(8, 'big'))
        else:
            self.have_withdrawal_info = False

        addr = db.get(bytes([DBT_DATA]) + b'pool_addr')
        if addr is not None:
            self.poolAddr = bech32Encode(self.poolAddrHrp, addr)
        else:
            db.put(
                bytes([DBT_DATA]) + b'pool_addr',
                bech32Decode(self.poolAddrHrp, self.poolAddr))

        addr = db.get(bytes([DBT_DATA]) + b'reward_addr')
        if addr is not None:
            self.poolAddrReward = encodeAddress(addr)
        else:
            db.put(
                bytes([DBT_DATA]) + b'reward_addr',
                decodeAddress(self.poolAddrReward))

        n = db.get(bytes([DBT_DATA]) + b'db_version')
        self.db_version = 0 if n is None else struct.unpack('>i', n)[0]
        db.close()

        # Wait for daemon to start
        authcookiepath = os.path.join(
            self.ghostDataDir, '' if self.chain == 'mainnet' else self.chain,
            '.cookie')
        for i in range(10):
            if not os.path.exists(authcookiepath):
                time.sleep(0.5)
        with open(authcookiepath) as fp:
            self.rpc_auth = fp.read()

        # Todo: Read rpc port from .conf file
        self.rpc_port = settings.get(
            'rpcport', 51725 if self.chain == 'mainnet' else 51925)
示例#2
0
#!/usr/bin/python3

import plyvel
import json
import sys
from os import path

data = json.load(sys.stdin)

if len(sys.argv) < 3:
    sys.stderr.write("configure.py chrome-dir ext-id")
    sys.exit(1)

chrome_dir = sys.argv[1]
ext_id = sys.argv[2]

db_path = path.join(chrome_dir, "Default/Local Extension Settings", ext_id)
db = plyvel.DB(db_path, create_if_missing=True)
for k, v in data.items():
    db.put(bytes(k, encoding='utf-8'), bytes(v, encoding='utf-8'))
db.close()
def num2Bytes(n):
    return n.to_bytes(8, "big")


def addressFromHex(hex_str):
    return tronapi.common.account.Address().from_hex(hex_str)


def addressFromBytes(addr):
    return tronapi.common.account.Address().from_hex(bytes.decode(b2hs(addr)))
    # 会遇到问题 UnicodeDecodeError: 'utf-8' codec can't decode byte 0xb6 in position 3: invalid start byte


# db = plyvel.DB("/data2/20210425/output-directory/database/trans")
blockIndexDB = plyvel.DB(
    "/data2/20210425/output-directory/database/block-index")
blockDB = plyvel.DB("/data2/20210425/output-directory/database/block")

blockIndexIT = blockIndexDB.iterator()

k, v = next(blockIndexIT)
k = b2l(k)
print(k)
vs = binascii.hexlify(v)
print(vs)

blockIndexITrev = blockIndexDB.iterator(reverse=True)

k, v = next(blockIndexITrev)
k = b2l(k)
print(k)
示例#4
0
 def post_init(self):
     import plyvel
     self._db = plyvel.DB(self.data_path, create_if_missing=True)
示例#5
0
 def open(self):
     #check if pickle exists
     self.db = plyvel.DB(self.name + '/leveldb/', create_if_missing=True) 
示例#6
0
def _init_db(db):
    lookup = plyvel.DB(os.path.abspath(db))
    return lookup
    def writelines(self, datas):
        self.stream.writelines(datas)
        self.stream.flush()

    def __getattr__(self, attr):
        return getattr(self.stream, attr)


import sys
sys.stdout = Unbuffered(sys.stdout)

# ======== DO NOT TOUCH ==================

level_dan = plyvel.DB("./danju-gzipJson.ldb",
                      create_if_missing=True,
                      compression=None)
level_shuang = plyvel.DB("./shuangju-gzipJson.ldb",
                         create_if_missing=True,
                         compression=None)

Data = glob.glob("./json-zhcn/poet.song.*.json") + glob.glob(
    "./json-zhcn/poet.tang.*.json")

#======= 先制造集合,再转成正则 ==========

Comma = ",,。、!!??"  # 分隔符,逗号/顿号/句号/感叹号/问号
NotComma = "".join(
    list(set(zhon.hanzi.punctuation + string.punctuation) - set(Comma)))

Comma = "[%s]" % (re.escape(Comma))
示例#8
0
    with levelDBMapLock:
        if index not in levelDBMap:
            db = plyvel.DB(index, compression=None)
            blockIndexes = [
                DBBlockIndex(format_hash(k[1:]), v) for k, v in db.iterator()
                if k[0] == ord('b')
            ]
            db.close()
            blockIndexes.sort(key=lambda x: x.height)
            levelDBMap[index] = blockIndexes
        return levelDBMap[index]


blockchain.__getBlockIndexes = loadIndex

pldb = plyvel.DB('/blockchain/tx_to_block/', create_if_missing=False)
pldbLock = threading.Lock()

app.logger.debug("OK")


class IllegalState(Exception):
    pass


@app.before_request
def before_request():
    g.request_start_time = time.time()
    g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time)

示例#9
0
def LevelDBAggregator(manager_params, status_queue, batch_size=100):
    """
     Receives <key, value> pairs from other processes and writes them to the central database.
     Executes queries until being told to die (then it will finish work and shut down).
     This process should never be terminated un-gracefully.

     <manager_params> TaskManager configuration parameters
     <status_queue> is a queue connect to the TaskManager used for communication
     <batch_size> is the size of the write batch
    """

    # sets up logging connection
    logger = loggingclient(*manager_params['logger_address'])

    # sets up the serversocket to start accepting connections
    sock = serversocket()
    status_queue.put(sock.sock.getsockname())  # let TM know location
    sock.start_accepting()

    # sets up DB connection
    db_path = os.path.join(manager_params['data_directory'], 'javascript.ldb')
    db = plyvel.DB(db_path,
            create_if_missing = True,
            lru_cache_size = 10**9,
            write_buffer_size = 128*10**4,
            compression = None)
    batch = db.write_batch()

    counter = 0  # number of executions made since last write
    commit_time = 0  # keep track of time since last write
    while True:
        # received KILL command from TaskManager
        if not status_queue.empty():
            status_queue.get()
            sock.close()
            drain_queue(sock.queue, batch, db, counter, logger)
            break

        # no command for now -> sleep to avoid pegging CPU on blocking get
        if sock.queue.empty():
            time.sleep(0.1)

            # commit every five seconds to avoid blocking the db for too long
            if counter > 0 and time.time() - commit_time > 5:
                batch.write()
                batch = db.write_batch()
            continue

        # process record
        script = sock.queue.get()
        process_script(script, batch, db, counter, logger)

        # batch commit if necessary
        if counter >= batch_size:
            counter = 0
            commit_time = time.time()
            batch.write()
            batch = db.write_batch()

    # finishes work and gracefully stops
    batch.write()
    db.close()
                        help='how number of url this program will scrape')
    parser.add_argument('--mode', help='you can specify mode...')
    parser.add_argument('--refresh', help='create snapshot(true|false)')
    parser.add_argument('--file', help='input filespath')
    parser.add_argument('--active', help='spcific active thread number')

    args_obj = vars(parser.parse_args())
    depth = (lambda x: int(x) if x else 10)(args_obj.get('depth'))
    mode = (lambda x: x if x else 'undefined')(args_obj.get('mode'))
    refresh = (lambda x: False
               if x == 'false' else True)(args_obj.get('refresh'))
    active = (lambda x: 15 if x == None else int(x))(args_obj.get('active'))
    filename = args_obj.get('file')

    if mode == 'scrape' or mode == 'level':
        db = plyvel.DB('pixiv_htmls', create_if_missing=True)
        """
    SnapshotDealデーモンが出力するデータをもとにスクレイピングを行う
    NOTE: SQLの全アクセスは非常に動作コストが高く、推奨されない
    NOTE: Snapshotが何もない場合、initialize_parse_and_map_data_to_local_dbを呼び出して初期化を行う
    """
        seed = "http://www.pixiv.net/member_illust.php?mode=medium&illust_id=60675452"

        def save_links(links):
            """ 一応 pickleで保存 """
            open("links.pkl", "wb").write(pickle.dumps(links))

        try:
            links = pickle.loads(open("links.pkl", "rb").read())
            links.add(seed)
        except FileNotFoundError as e:
示例#11
0
 def __init__(self):
     self.tmpdir = tempfile.TemporaryDirectory()
     self.db = DB_ENGINE.DB(self.tmpdir.name, create_if_missing=True)
示例#12
0
#!/usr/bin/env python3
from datetime import datetime
import plyvel

print('Recovery Start : ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
plyvel.DB('./.storage/db_icon_dex',
          create_if_missing=True,
          max_open_files=1024)
print('Recovery End : ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
示例#13
0
def main():
		my_DataBese = plyvel.DB('test.ldb', create_if_missing=True) 
		print my_DataBese.get(sys.argc[1])
示例#14
0
import json
import gzip
import plyvel
import pickle

my_db = plyvel.DB('database', create_if_missing = True)
with gzip.open('artist.json.gz', 'r', 'utf-8') as f:
    for line in f:
        obj = json.loads(line.decode('utf-8'))
        name = obj['name']
        if 'area' in obj:
            area = obj['area']
        else:
            area = '無い'

        my_db.put(pickle.dumps(name), pickle.dumps(area))
my_db.close()
示例#15
0
 def make_db(path: str, create_if_missing: bool = True) -> plyvel.DB:
     if not os.path.exists(path):
         os.makedirs(path)
     return plyvel.DB(path, create_if_missing=create_if_missing)
示例#16
0
def initialize():
    db = plyvel.DB('/home/jiapeng/leveldb', create_if_missing=True)
    return db
示例#17
0
LOG = []     # The log as a list of dictionaries
AUTHORS = [] # The list if author names
DATES = []   # The first days the authors commit
FONT = ImageFont.truetype('/usr/share/fonts/truetype/ubuntu-font-family/Ubuntu-M.ttf', 12)
LONGEST_NAME = 0
LONGEST_DATE = 0
NUM_WEEKS = 0
FIRST_DATE = None
LAST_DATE = None

def find_week(date):
    return int((date - FIRST_DATE).days / 7)

if __name__ == '__main__':
    commitdb = plyvel.DB('../data/commits/', create_if_missing=True)
    orgs = commitdb.prefixed_db(b'zalando')
    project = orgs.prefixed_db(b'nakadi')

    # Data preparation
    for date, commit_data in project:
        commit = json.loads(commit_data)

        # Convert the date to a datetime obj
        dt = parser.parse(date)
        if not FIRST_DATE: FIRST_DATE = dt
        LAST_DATE = dt

        # Check if we know the name
        # Check if this is the longest name
        if not commit['commit']['author']['name'] in AUTHORS:
示例#18
0
from core import core, transmission
from storage import config
import plyvel
import json
import os

db = plyvel.DB(os.getenv('DB_PATH_OVERRIDE', config.get('database', 'path')), create_if_missing=True)

def get_head():
	'''
	Get the blockId of the newest block
	'''
	return db.get('head'.encode('utf-8')).decode('utf-8')
def _set_head(blockId):
	'''
	Internal function: Set newest blockId
	'''
	db.put('head'.encode('utf-8'), blockId.encode('utf-8'))

def put_block(block):
	'''
	Put a core/Transmission block into the chain
	'''
	if not isinstance(block, transmission.Transmission):
		raise TypeError('Not a Core.Transmission block object')

	assert block.previous_hash == get_head()

	db.put(block.transmission_hash.encode('utf-8'), block.to_json().encode('utf-8'))
	_set_head(block.transmission_hash)
	
示例#19
0
 def init_leveldb(self):
     self.db_attack = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/train_data_attack/")
     self.db_noattack = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/train_data_noattack/")
     self.db_attack_test = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_attack/")
     self.db_noattack_test = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_noattack/")
     self.db_attack_poi = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/train_data_attack_poi/")
     self.db_noattack_poi = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/train_data_noattack_poi/"
     )
     self.db_attack_test_poi = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_attack_poi/")
     self.db_noattack_test_poi = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_noattack_poi/")
     self.db_partial_attack = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack/",
     )
     self.db_partial_attack_google = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack_google/",
     )
     self.db_partial_attack_poi = plyvel.DB(
         "/home/bourne/Workstation/AntiGPS/results/test_data_partial_attack_poi/",
         create_if_missing=True,
     )
示例#20
0
#!/usr/bin/python3

import plyvel
import pickle
import sys
import time
import block
import os

ts = time.time()
db = plyvel.DB("/home/ben/mof-bc")
blocks = 0 
with db.iterator() as it:
    for k,v in it:
        if k == b'last':
            continue
        blocks += 1
        block = pickle.loads(v)
        if block.merkle_tree.root != 'root':
            block.merkle_tree.print_tree_txs()
        else:
            print(block, '-> root')
tf = time.time()
print(blocks, 'blocks')
print("Time taken", tf-ts, "seconds")
示例#21
0
    for x in args:
        while x:
            ret += bytes([(x & 0x7f) | (int(x > 0x07f) << 7)])
            x >>= 7
    return ret


def unvarint(bs):
    ret = 0
    for byte in reversed(bs):
        ret <<= 7
        ret |= byte & (0x7f)
    return ret


db = plyvel.DB('../server/run')
# list(db.iterator(start=b'M'+varint(1,1), stop=b'M'+varint(1,2)))

ssm = lambda x: SignedServerMessage.FromString(x)
sm = lambda x: SignedServerMessage.ServerMessage.FromString(x)


def SSM(*args):
    return [
        ssm(v)
        for (k, v) in db.iterator(start=b'M' + varint(*args),
                                  stop=b'M' +
                                  varint(*(list(args[:-1]) + [args[-1] + 1])))
    ]

示例#22
0
        if (dty < 3):
            num += 1
            dtyc = np_utils.to_categorical(dty, 2)
            dtyc.shape = (1, 2)
            df = (dtx, dtyc, log)
            df1 = pickle.dumps(df, protocol=2)
            db.put(struct.pack('>l', num), df1)
            del (dtx)
            del (df)
            del (df1)
    return (num)


if __name__ == '__main__':

    db = plyvel.DB('../eegdb5_10/', create_if_missing=True)

    a = glob.glob('../Raw/*.vhdr')
    j = 0
    k = str(len(a))
    num = 0
    for i in a:
        j += 1
        log = str(j) + "/" + k + " " + str(i)
        alldata, allid = readraweeg(i, (-.5, .5))
        num = putdb(alldata, allid, db, j, log, num=num)
    indexdb = np.random.randint(num, size=num)
    np.savetxt("indexdb5_10.csv", indexdb)
    print(num)
    db.close()
示例#23
0
red = redis.Redis(host="localhost", port=6379, db=0)
red.flushall()

blockchain = Blockchain("datas")

# force the creation of the index
if os.path.exists("super-big-index.pickle"):
    os.remove("super-big-index.pickle")

print("Creating index")
next(
    blockchain.get_ordered_blocks("datas/index",
                                  cache="super-big-index.pickle"))
print("Index created")

pldb = plyvel.DB('tx_to_block/', create_if_missing=False)


class IllegalState(Exception):
    pass


@app.before_request
def before_request():
    g.request_start_time = time.time()
    g.request_time = lambda: "%.5fs" % (time.time() - g.request_start_time)


def get_block_transactions(block_height):
    blockchain = Blockchain("datas")
示例#24
0
def create_benchmark_archive(blk_hash):
    blk = json.loads(subprocess.check_output([BITCOINZ_CLI, 'getblock', blk_hash]))
    print 'Height: %d' % blk['height']
    print 'Transactions: %d' % len(blk['tx'])

    os.mkdir('benchmark')
    with open('benchmark/block-%d.dat' % blk['height'], 'wb') as f:
        f.write(binascii.unhexlify(subprocess.check_output([BITCOINZ_CLI, 'getblock', blk_hash, 'false']).strip()))

    txs = [json.loads(subprocess.check_output([BITCOINZ_CLI, 'getrawtransaction', tx, '1'])
                     ) for tx in blk['tx']]

    js_txs = len([tx for tx in txs if len(tx['vjoinsplit']) > 0])
    if js_txs:
        print 'Block contains %d JoinSplit-containing transactions' % js_txs
        return

    inputs = [(x['txid'], x['vout']) for tx in txs for x in tx['vin'] if x.has_key('txid')]
    print 'Total inputs: %d' % len(inputs)

    unique_inputs = {}
    for i in sorted(inputs):
        if unique_inputs.has_key(i[0]):
            unique_inputs[i[0]].append(i[1])
        else:
            unique_inputs[i[0]] = [i[1]]
    print 'Unique input transactions: %d' % len(unique_inputs)

    db_path = 'benchmark/block-%d-inputs' % blk['height']
    db = plyvel.DB(db_path, create_if_missing=True)
    wb = db.write_batch()
    bar = progressbar.ProgressBar(redirect_stdout=True)
    print 'Collecting input coins for block'
    for tx in bar(unique_inputs.keys()):
        rawtx = json.loads(subprocess.check_output([BITCOINZ_CLI, 'getrawtransaction', tx, '1']))

        mask_size = 0
        mask_code = 0
        b = 0
        while 2+b*8 < len(rawtx['vout']):
            zero = True
            i = 0
            while i < 8 and 2+b*8+i < len(rawtx['vout']):
                if 2+b*8+i in unique_inputs[tx]:
                    zero = False
                i += 1
            if not zero:
                mask_size = b + 1
                mask_code += 1
            b += 1

        coinbase = len(rawtx['vin']) == 1 and 'coinbase' in rawtx['vin'][0]
        first = len(rawtx['vout']) > 0 and 0 in unique_inputs[tx]
        second = len(rawtx['vout']) > 1 and 1 in unique_inputs[tx]
        code = 8*(mask_code - (0 if first or second else 1)) + \
            (1 if coinbase else 0) + \
            (2 if first else 0) + \
            (4 if second else 0)

        coins = bytearray()
        # Serialized format:
        # - VARINT(nVersion)
        coins.extend(encode_varint(rawtx['version']))
        # - VARINT(nCode)
        coins.extend(encode_varint(code))
        # - unspentness bitvector, for vout[2] and further; least significant byte first
        for b in range(mask_size):
            avail = 0
            i = 0
            while i < 8 and 2+b*8+i < len(rawtx['vout']):
                if 2+b*8+i in unique_inputs[tx]:
                    avail |= (1 << i)
                i += 1
            coins.append(avail)
        # - the non-spent CTxOuts (via CTxOutCompressor)
        for i in range(len(rawtx['vout'])):
            if i in unique_inputs[tx]:
                coins.extend(encode_varint(compress_amount(int(rawtx['vout'][i]['valueZat']))))
                coins.extend(compress_script(
                    binascii.unhexlify(rawtx['vout'][i]['scriptPubKey']['hex'])))
        # - VARINT(nHeight)
        coins.extend(encode_varint(json.loads(
            subprocess.check_output([BITCOINZ_CLI, 'getblockheader', rawtx['blockhash']])
            )['height']))

        db_key = b'c' + bytes(binascii.unhexlify(tx)[::-1])
        db_val = bytes(coins)
        wb.put(db_key, db_val)

    wb.write()
    db.close()

    # Make reproducible archive
    os.remove('%s/LOG' % db_path)
    files = subprocess.check_output(['find', 'benchmark']).strip().split('\n')
    archive_name = 'block-%d.tar' % blk['height']
    tar = tarfile.open(archive_name, 'w')
    for name in sorted(files):
        tar.add(name, recursive=False, filter=deterministic_filter)
    tar.close()
    subprocess.check_call(['xz', '-6', archive_name])
    print 'Created archive %s.xz' % archive_name
    subprocess.call(['rm', '-r', 'benchmark'])
def extraction_core_txs_optimized_four(directory, args):
    logging.info("processing db dir: " + directory + "/txs")
    logging.info("algo: extract-core optimized 4")
    if (args.sorted):
        logging.info("sorting: True")
    else:
        logging.info("sorting: False")
    if not args.liteonmemory:
        logging.info("intensive on memory, but fastest.")
    else:
        logging.info("lite on memory, but slower.")
    number_of_addreses = 0
    count = 0
    show_interval = 1000
    currentaddress = ["thisisatemplateusedforbootstrapping-", 0]
    currentaddresslen = len(currentaddress)
    list_of_addresses = ""
    #logging.info("--------------------------------" )
    if not args.dry_run:
        output_file = codecs.open(args.output_file, 'w', 'utf8')
    for key, value in plyvel.DB(directory + "/txs", create_if_missing=False):
        #print key[0][0:2]
        if key[2] != "a":
            break
        if not currentaddress == key[5:currentaddresslen + 5]:
            currentaddress = key[5:6 + (key[5:].index("-"))]
            currentaddresslen = len(currentaddress)
            number_of_addreses += 1
            if not args.liteonmemory:
                list_of_addresses = list_of_addresses + currentaddress
            else:
                if not args.dry_run:
                    output_file.write(currentaddress)
        count += 1
        if (count % show_interval) == 0:
            logging.info("processed txs: " + "\t" + str(count))
            show_interval = show_interval * 2
    if not args.dry_run:
        if not args.liteonmemory:
            output_file.write(list_of_addresses)
        output_file.close()
        if (args.sorted):
            logging.info('sorting addresses')
            subprocess.call([
                "tr '-' '\n' < " + args.output_file + " | sort -o " +
                args.output_file
            ],
                            shell=True)
        else:
            logging.info('processing file')
            subprocess.call([
                "tr '-' '\n' < " + args.output_file + " > " + "temp_" +
                args.output_file
            ],
                            shell=True)
            subprocess.call([
                "mv -f" + " temp_" + args.output_file + "  " + args.output_file
            ],
                            shell=True)
    #subprocess.call(["awk -F'-' '{print $2}' " + args.output_file + " | sort -u -o " + args.output_file], shell=True)
        logging.info('output file: ' + args.output_file)
    logging.info('# of transactions: ' + str(count))
    logging.info('   # of addresses: ' + str(number_of_addreses))
    logging.info("total time: " + '\033[92m' +
                 (datetime.datetime.now() -
                  start_time).__str__().split('.')[0] + '\033[0m' + ' on ' +
                 datetime.datetime.today().strftime('%b, %d, %Y'))
    logging.info('\033[92m' + 'done.' + '\033[0m')
    print("")
    print("")
    exit(0)
示例#26
0
文件: nsrl.py 项目: nikolaifa/NSRL
 def __init__(self, db, records, **kwargs):
     self.db = plyvel.DB(db, **kwargs, create_if_missing=True)
示例#27
0
def main():
    my_DataBese = plyvel.DB('test.ldb', create_if_missing=True)
    print my_DataBese.get('黒木メイサ')
示例#28
0
import plyvel
import utils2
import types
import re
import time

import timeout_decorator

g = types.SimpleNamespace()

g.shuangju = plyvel.DB("./shuangju-gzipJson.ldb", compression='snappy')
g.danju = plyvel.DB("./danju-gzipJson.ldb", compression='snappy')


def danshuangTransfer(startKeys, targetKeys, mode):
    # 全部key都应该是顺序无关的

    if mode == "SHUANGSTART":
        startShuang = {i: list() for i in startKeys}
        startDan = {}
    elif mode == "DANSTART":
        startShuang = {}
        startDan = {i: list() for i in startKeys}
    else:
        raise Exception("Unknown MODE: <%s>" % mode)

    targetDan = {i: list() for i in targetKeys}
    targetShuang = {i: list() for i in targetKeys}

    print(targetDan, targetShuang, startDan, startShuang, sep="\n")
示例#29
0
import plyvel
import json

db = plyvel.DB('./db_test/', create_if_missing=True)

with open('artist.json') as f:
    for l in f:
        js = json.loads(l)
        key = js['name'] + '\t' + str(js['id'])
        value = js.get('area')
        if value == None:
            value = 'None'
        db.put(key.encode(), value.encode())
    def processBlock(self, height):
        logmt(self.fp, 'processBlock height %d' % (height))

        reward = callrpc(self.rpc_port, self.rpc_auth, 'getblockreward', [
            height,
        ])

        db = plyvel.DB(self.dbPath, create_if_missing=True)

        n = db.get(bytes([DBT_DATA]) + b'current_height')
        if n is not None:
            poolDBHeight = struct.unpack('>i', n)[0]
            if poolDBHeight >= height:
                logmt(
                    self.fp, 'Block %d already processed, pooldb height %d' %
                    (height, poolDBHeight))
                self.poolHeight = poolDBHeight
                db.close()
                return

        self.setParameters(height)

        if 'coinstake' not in reward:
            # logm('No coinstake txn found in block ' + str(height))
            db.put(
                bytes([DBT_DATA]) + b'current_height',
                struct.pack('>i', height))
            db.close()
            self.poolHeight = height
            return

        batchBalances = dict()
        b = db.write_batch(transaction=True)
        b.put(bytes([DBT_DATA]) + b'current_height', struct.pack('>i', height))

        self.findPayments(height, reward['coinstake'], db, b, batchBalances)

        for out in reward['outputs']:
            try:
                if self.poolAddrReward == out['script']['spendaddr']:
                    if out['value'] != reward['blockreward']:
                        logmt(
                            self.fp,
                            'WARNING: Pool reward mismatch at height %d\n' %
                            (height))
                    try:
                        self.processPoolBlock(height, reward, db, b,
                                              batchBalances)
                    except Exception:
                        exc_type, exc_value, exc_tb = sys.exc_info()
                        traceback.print_exception(exc_type, exc_value, exc_tb)
                        traceback.print_exception(exc_type,
                                                  exc_value,
                                                  exc_tb,
                                                  file=self.fp)
                        self.fp.flush()
                    break
            except Exception:
                pass

        b.write()

        n = db.get(bytes([DBT_DATA]) + b'last_payment_run')
        lastPaymentRunHeight = 0 if n is None else struct.unpack('>i', n)[0]
        if lastPaymentRunHeight + self.minBlocksBetweenPayments <= height:
            with db.write_batch(transaction=True) as b:
                self.processPayments(height, db, b)

        if self.have_withdrawal_info:
            n = db.get(bytes([DBT_DATA]) + b'last_withdrawal_run')
            last_withdrawal_run = 0 if n is None else struct.unpack('>i', n)[0]
            if last_withdrawal_run + self.min_blocks_between_withdrawals <= height:
                with db.write_batch(transaction=True) as b:
                    self.processPoolRewardWithdrawal(height, db, b)

        db.close()
        self.poolHeight = height