Пример #1
0
    else: raise e
env = bsddb.db.DBEnv()
envflags = bsddb.db.DB_CREATE|bsddb.db.DB_RECOVER|bsddb.db.DB_INIT_LOCK|bsddb.db.DB_INIT_LOG|bsddb.db.DB_INIT_TXN|bsddb.db.DB_INIT_MPOOL|bsddb.db.DB_THREAD
print "open dbenv in", dbdir
env.open(dbdir, envflags)

allow_uncommitted = False
db = bsddb.db.DB(env)
db.set_flags(bsddb.db.DB_DUPSORT|bsddb.db.DB_RECNUM)
dbflags = bsddb.db.DB_CREATE|bsddb.db.DB_AUTO_COMMIT|bsddb.db.DB_THREAD
#dbflags = bsddb.db.DB_CREATE|bsddb.db.DB_AUTO_COMMIT|bsddb.db.DB_THREAD|bsddb.db.DB_MULTIVERSION
if allow_uncommitted:
    dbflags = dbflags|bsddb.db.DB_READ_UNCOMMITTED
dbfile = dbdir + "/dbtest.db4"
print "open db", dbfile
db.open(dbfile, dbtype=bsddb.db.DB_BTREE, flags=dbflags)

#seq_flags = bsddb.db.DB_CREATE|bsddb.db.DB_THREAD
#uidseq = bsddb.db.DBSequence(db)
#uidseq.open("uidnumber", None, seq_flags)
#usnseq = bsddb.db.DBSequence(db)
#usnseq.open("usn", None, seq_flags)

def loaddb():
    for ii in xrange(0,10):
        db.put("key" + str(ii), "data" + str(ii))
    flags = bsddb.db.DB_NODUPDATA
#    flags = bsddb.db.DB_NOOVERWRITE
    for ii in xrange(0,10):
        db.put("multikey", "multidata" + str(ii), None, flags)
loaddb()
wallet_filename = os.path.abspath(sys.argv[1])

with open(wallet_filename, "rb") as wallet_file:
    wallet_file.seek(12)
    if wallet_file.read(
            8) != b"\x62\x31\x05\x00\x09\x00\x00\x00":  # BDB magic, Btree v9
        print(prog + ": error: file is not a Bitcoin Core wallet",
              file=sys.stderr)
        sys.exit(1)

db_env = bsddb.db.DBEnv()
db_env.open(os.path.dirname(wallet_filename),
            bsddb.db.DB_CREATE | bsddb.db.DB_INIT_MPOOL)
db = bsddb.db.DB(db_env)
db.open(wallet_filename, b"main", bsddb.db.DB_BTREE, bsddb.db.DB_RDONLY)
mkey = db.get(b"\x04mkey\x01\x00\x00\x00")
db.close()
db_env.close()

if not mkey:
    raise ValueError(
        "Encrypted master key #1 not found in the Bitcoin Core wallet file.\n"
        +
        "(is this wallet encrypted? is this a standard Bitcoin Core wallet?)")

# This is a little fragile because it assumes the encrypted key and salt sizes are
# 48 and 8 bytes long respectively, which although currently true may not always be:
# (it will loudly fail if this isn't the case; if smarter it could gracefully succeed):
encrypted_master_key, salt, method, iter_count = struct.unpack_from(
    "< 49p 9p I I", mkey)
if len(sys.argv) != 2 or sys.argv[1].startswith("-"):
    print("usage:", prog, "BITCOINCORE_WALLET_FILE", file=sys.stderr)
    sys.exit(2)

wallet_filename = os.path.abspath(sys.argv[1])

with open(wallet_filename, "rb") as wallet_file:
    wallet_file.seek(12)
    if wallet_file.read(8) != b"\x62\x31\x05\x00\x09\x00\x00\x00":  # BDB magic, Btree v9
        print(prog+": error: file is not a Bitcoin Core wallet", file=sys.stderr)
        sys.exit(1)

db_env = bsddb.db.DBEnv()
db_env.open(os.path.dirname(wallet_filename), bsddb.db.DB_CREATE | bsddb.db.DB_INIT_MPOOL)
db = bsddb.db.DB(db_env)
db.open(wallet_filename, b"main", bsddb.db.DB_BTREE, bsddb.db.DB_RDONLY)
mkey = db.get(b"\x04mkey\x01\x00\x00\x00")
db.close()
db_env.close()

if not mkey:
    raise ValueError("Encrypted master key #1 not found in the Bitcoin Core wallet file.\n"+
                     "(is this wallet encrypted? is this a standard Bitcoin Core wallet?)")

# This is a little fragile because it assumes the encrypted key and salt sizes are
# 48 and 8 bytes long respectively, which although currently true may not always be:
# (it will loudly fail if this isn't the case; if smarter it could gracefully succeed):
encrypted_master_key, salt, method, iter_count = struct.unpack_from("< 49p 9p I I", mkey)
if method != 0:
    print(prog+": warning: unexpected Bitcoin Core key derivation method", str(method), file=sys.stderr)
Пример #4
0
    def load(self, datadir, prog): # evaluate the given scomp program

        if len(self.varctx) > 0:
            prog = self.replvars(prog)

        # get the list of stats
        statsmatchers = []
        if prog.has_key('stats'):
            for x in prog['stats']:
                x = '^' + x.replace('*', '.*') + '$'
                r = re.compile(x)
                statsmatchers.append(r)
        else:
            statsmatchers.append(re.compile('^.*$'))

        # load specified configs
        self.configs = {}
        self.configlist = []

        if prog.has_key('accept'):
            self.accept_rules = prog['accept']
        else:
            self.accept_rules = []

        multisep = False
        if prog.has_key('options') and 'multisep' in prog['options']:
            multisep = True

        if False:
            try:
                db = bsddb.db.DB()
                path = '/tmp/scomp_cache_' + os.path.abspath(datadir).replace('/', '_') + '.db'
                db.open(path, flags=bsddb.db.DB_CREATE, dbtype=bsddb.db.DB_HASH)
            except:
                db = None
        else:
            db = None

        benchsets = []
        benchsets_present = []
        benchsets_absent = {}
        confignames = []
        statssets = []
        for c in prog['configs']:
            longname = c[0]
            shortname = c[1]

            l = []
            if longname.find('*') != -1:
                r = re.compile(longname.replace('*', '(.*)'))
                l = []
                for d in os.listdir(datadir):
                    if os.path.isdir(datadir + '/' + d):
                        m = r.match(d)
                        if m is None: continue
                        wildcard = m.groups()[0]
                        s = shortname.replace('$1', wildcard)
                        l.append( (d, s) )

            else:
                l = [ (longname, shortname) ]

            for p in l:
                cfg = runs.Config(db, statsmatchers, datadir + '/' + p[0], self.accept_rules, None, None, None, multisep)
                self.configs[p[1]] = cfg
                self.configlist.append(p[1])
                self.exprs[p[1]] = {}
                confignames.append(p[1])
                benchsets.append(set(cfg.benches))
                benchsets_present.append(cfg.benches_present)
                statssets.append(cfg.stats)


        # take union of all available benches
        self.benches = reduce(lambda x,y: x | y, benchsets)
        # also produce list of benches for which all results are present
        self.benches_present = reduce(lambda x,y: x & y, benchsets_present)

        # determine which configs are missing each partial result
        for i in range(len(confignames)):
            for absent in (self.benches - benchsets_present[i]):
                if not benchsets_absent.has_key(absent): benchsets_absent[absent] = set()
                benchsets_absent[absent].add(confignames[i])

        if prog.has_key('badbenches'):
            self.badbenches = prog['badbenches']
        else:
            self.badbenches = []

        if prog.has_key('benchonly') and prog['benchonly'] != '':
            if multisep:
                self.benches = set()
                for d in glob.glob(datadir + '/*/%s/sim.*.out' % (prog['benchonly'])):
                    parts = d.split('.')
                    i = int(parts[-2])
                    self.benches.add(prog['benchonly'] + ('.%d' % i))
            else:
                self.benches = set([prog['benchonly']])

        if prog.has_key('benchmap'):
            self.benchmap = prog['benchmap']
        else:
            self.benchmap = {}
            for k in self.benches:
                self.benchmap[k] = k

        # option: exclude all benchmarks for which some results are not
        # present.
        if prog.has_key('options') and prog['options'].has_key('exclude_partial'):
            for b in self.benches:
                if not b in self.benches_present:
                    print "Excluding benchmark with partial results:", b, "(missing: ", ','.join(benchsets_absent[b]), ")"
            self.benches = self.benches_present

        stats_any = reduce(lambda x,y: x | y, statssets)
        stats_all = reduce(lambda x,y: x & y, statssets)

        self.stats = []
        for m in statsmatchers:
            matching = filter(lambda x: m.match(x), stats_any)
            for x in matching:
                if not x in self.stats:
                    self.stats.append(x)

        # read the list of exprs, matching and applying to appropriate configs
        self.exprlist = []
        if prog.has_key('exprs'):
            for e in prog['exprs']:
                config_filter, statlist = e
                r = re.compile(config_filter.replace('*', '.*'))
                for c in self.configlist:
                    if r.match(c):
                        for s in statlist:
                            name, expr = s
                            if not name in self.exprlist: self.exprlist.append(name)
                            self.exprs[c][name] = Expr()
                            self.exprs[c][name].parse(expr, c)

        # parse exprs, construct dependences
        deps = {} # dict of lists
        affects = {} # dist of lists
        for c in self.configlist:
            for name in self.exprs[c].keys():
                fullname = c + '.' + name
                affects[fullname] = []
                deps[fullname] = self.exprs[c][name].deps

        # get forward flow from backward flow
        for x in deps.keys():
            for source in deps[x]:
                if affects.has_key(source):
                    affects[source].append(x)
                    
        self.affects = affects

        if prog.has_key('sheets'):
            self.out_sheets = prog['sheets']
        else:
            self.out_sheets = [ ['all', 'full', ['*']] ]

        if prog.has_key('plots'):
            self.out_plots = prog['plots']
        else:
            self.out_plots = []