def main(args): root, options = parse_args(args) print(options) t0 = dt.now() t = get_directory_tree(root, exclude_dirs=options['exclude-dirs']) t1 = dt.now() delta_t = ((t1 - t0).seconds + (t1 - t0).microseconds) / 1e6 print('%f sec to scan %s / %s files' % (delta_t, format_bytes(t.size), get_total_children(t))) data = { 'tree': tree_to_dict(t), 'root': os.path.realpath(root), 'host': os.getenv('MACHINE', socket.gethostname()) } # pprint(data) # print(jsonpickle.encode(t)) # print_directory_tree(t) width = 800 height = 600 # rects = compute_rectangles(t, [0, width], [0, height]) # render_tk(rects, width, height, title=os.path.realpath(root)) # render_class(rects, width, height, title=os.path.realpath(root)) render_class(t, compute_rectangles, width, height, title=os.path.realpath(root))
def update_sharelist(self): self.sharelist.clear() self.folders = {} for (shareid, share) in filesharing.shares.items(): for (sharepath, ftype) in share.list_recursively().items(): size = '' if ftype == FTYPE_FILE: nativepath = share.native_path(sharepath) size = format_bytes(filesize(nativepath)) self.add_item(share.meta, shareid, sharepath, size, ftype == FTYPE_DIRECTORY)
def update_stats(self, stats_dict): """Updates the progress_stats dict from the given dictionary.""" assert isinstance(stats_dict, dict) for key in stats_dict: if key in self.progress_stats: value = stats_dict[key] #if not isinstance(value, basestring) or not value: # self.progress_stats[key] = self.default_values[key] #else: self.progress_stats[key] = value # Extract extra stuff if "playlist_index" in stats_dict: self.playlist_index_changed = True if "filename" in stats_dict: # Reset filenames, extensions & filesizes lists when changing playlist item if self.playlist_index_changed: self.filenames = [] self.extensions = [] self.filesizes = [] self.playlist_index_changed = False self.filenames.append(stats_dict["filename"]) if "extension" in stats_dict: self.extensions.append(stats_dict["extension"]) if "path" in stats_dict: self.path = stats_dict["path"] if "filesize" in stats_dict: if stats_dict["percent"] == "100%" and len(self.filesizes) < len( self.filenames): filesize = stats_dict["filesize"].lstrip( "~") # HLS downloader etc self.filesizes.append(to_bytes(filesize)) if "status" in stats_dict: # If we are post processing try to calculate the size of # the output file since youtube-dl does not if stats_dict["status"] == self.ACTIVE_STAGES[2] and len( self.filesizes) == 2: post_proc_filesize = self.filesizes[0] + self.filesizes[1] self.filesizes.append(post_proc_filesize) self.progress_stats["filesize"] = format_bytes( post_proc_filesize) self._set_stage(stats_dict["status"])
def __str__(self): size = format_bytes(self.size) info_list = [size] if self.children: children = '%d children' % len(self.children) info_list.append(children) if 'lines' in self.details: details = '%d lines' % self.details['lines'] info_list.append(details) info = ', '.join(info_list) return '<%s: %s>' % (self.name, info)
def query_results(self, user, allresults, metadict, ctx): if allresults == None: notification.notify('Unable to query shares from %s' % user.tag(), True) return for (shareid, sharepath, fsize, ftype) in allresults: meta = metadict[shareid] size = '' if ftype == FTYPE_FILE: size = format_bytes(fsize) self.add_item(meta, user, shareid, sharepath, size, ftype == FTYPE_DIRECTORY) self.items += 1 self.title.set_text('Showing %d items' % self.items)
# tree stuff path = abspath(treepath) t0 = dt.now() t = gettree(path) from utils import format_bytes t1 = dt.now() # printtree(t) if isdir(path) & 1: drawtree(t, canv, [3, w - 4], [3, h - 4]) t2 = dt.now() delta_t = ((t1 - t0).seconds + (t1 - t0).microseconds)/1e6 print('%f sec to scan %s / %s files' % (delta_t, format_bytes(t[1]), get_total_children(t))) print('%f sec to draw' % ((t2 - t1).seconds + (t2 - t1).microseconds/1e6)) root.wm_attributes("-topmost", 1) root.focus_force() root.lift() root.after(100, lambda: root.focus_force()) root.mainloop() else: print "input is not a directory ???" if __name__ == '__main__': # if executing standalone, call main pass # main()
async def processor(self): utxo_sequence = 0 stxo_sequence = 0 tx_sequence = 0 best_fee = 1 dbs = set() dbs_childs = set() outputs, inputs, transactions = self.refresh_stat() truncate_dbs_table = True best_fee_hourly = ListCache(60 * 60) best_fee_4h = ListCache(60 * 60 * 4) async with self.db_pool.acquire() as conn: rows = await conn.fetch( "SELECT minute, transactions->'feeRate'->'best' as best FROM mempool_analytica " "order by minute desc LIMIT 240;") c = 0 for row in rows: if row["best"] is not None: if c < 60: best_fee_hourly.set(float(row["best"])) best_fee_4h.set(float(row["best"])) c += 1 while True: try: if not self.bootstrap_completed: async with self.db_pool.acquire() as conn: v = await conn.fetchval( "SELECT value FROM service " "WHERE name = 'bootstrap_completed' LIMIT 1;") if v == '1': self.bootstrap_completed = True self.log.info("Mempool analytica task started") async with self.db_pool.acquire() as conn: self.last_day = await conn.fetchval( "SELECT max(day) FROM mempool_analytica WHERE day IS NOT NULL " ) self.last_hour = await conn.fetchval( "SELECT max(hour) FROM mempool_analytica WHERE day IS NOT NULL " ) if self.last_day is None: self.last_day = 0 if self.last_hour is None: self.last_hour = 0 else: await asyncio.sleep(60) q = time.time() await self.load_block_map() async with self.db_pool.acquire() as conn: async with conn.transaction(): last_hash = await conn.fetchval( "SELECT height FROM blocks order by height desc LIMIT 1;" ) stxo = await conn.fetch( "SELECT tx_id, out_tx_id, address, amount, pointer, sequence, outpoint, id " "FROM connector_unconfirmed_stxo " "WHERE id > $1;", stxo_sequence) utxo = await conn.fetch( "SELECT out_tx_id as tx_id, address, amount, id " "FROM connector_unconfirmed_utxo " "WHERE id > $1;", utxo_sequence) tx = await conn.fetch( "SELECT tx_id, size, b_size, rbf, fee, " "amount, segwit, timestamp, id FROM unconfirmed_transaction " "WHERE id > $1;", tx_sequence) row = await conn.fetchval( "select min(feerate) " "from (select feerate, sum((size + b_size * 4)/4) " "over (order by feerate desc) as block " "from unconfirmed_transaction) t where block <= 920000;" ) if row is not None: best_fee = row if last_hash != self.last_hash: self.last_hash = last_hash outputs, inputs, transactions = self.refresh_stat() utxo_sequence = 0 stxo_sequence = 0 tx_sequence = 0 dbs = set() dbs_childs = set() truncate_dbs_table = True if not tx: s_minute = int(time.time()) // 60 if s_minute % 60 == 0 and self.last_hour < s_minute // 60: s_hour = s_minute // 60 self.last_hour = s_hour if s_hour % 24 == 0 and self.last_day < s_hour // 24: s_day = s_hour // 24 self.last_day = s_day else: s_day = None else: s_hour = None s_day = None async with self.db_pool.acquire() as conn: async with conn.transaction(): await conn.execute( "INSERT INTO mempool_analytica " "(minute, hour, day, inputs, outputs, transactions)" " VALUES " "($1, $2, $3, $4, $5, $6) " "ON CONFLICT (minute) " "DO UPDATE SET " " inputs = $4," " outputs = $5, " " transactions = $6", s_minute, s_hour, s_day, json.dumps(inputs), json.dumps(outputs), json.dumps(transactions)) txsi = set() txso = set() dbs_outs = set() dbs_set = set() if tx: inputs["count"] += len(stxo) for row in stxo: if stxo_sequence < row["id"]: stxo_sequence = row["id"] if row["sequence"] > 0: dbs_outs.add(row["outpoint"]) dbs_set.add(row["tx_id"]) txsi.add(row["tx_id"]) inputs["amount"]["total"] += row["amount"] if inputs["amount"]["max"]["value"] is None or \ inputs["amount"]["max"]["value"] < row["amount"]: inputs["amount"]["max"]["value"] = row["amount"] inputs["amount"]["max"]["txId"] = rh2s( row["tx_id"]) if inputs["amount"]["min"]["value"] is None or \ inputs["amount"]["min"]["value"] > row["amount"]: inputs["amount"]["min"]["value"] = row["amount"] inputs["amount"]["min"]["txId"] = rh2s( row["tx_id"]) try: inputs["typeMap"][row["address"][0]]["count"] += 1 inputs["typeMap"][row["address"] [0]]["amount"] += row["amount"] except: inputs["typeMap"][row["address"][0]] = { "count": 1, "amount": row["amount"] } amount = row["amount"] key = None if amount == 0 else str( math.floor(math.log10(amount))) try: inputs["amountMap"][key]["count"] += 1 inputs["amountMap"][key]["amount"] += row["amount"] except: inputs["amountMap"][key] = { "count": 1, "amount": row["amount"] } try: key = time.time() - self.block_map_timestamp[ row["pointer"] >> 39] if key < 3600: key = "1h" elif key < 43200: key = "12h" elif key < 86400: key = "1d" elif key < 259200: key = "3d" elif key < 604800: key = "1w" elif key < 2592000: key = "1m" else: key = "%sy" % (int(key // 31536000) + 1) except: key = None try: inputs["ageMap"][key]["count"] += 1 inputs["ageMap"][key]["amount"] += row["amount"] except: inputs["ageMap"][key] = { "count": 1, "amount": row["amount"] } async with self.db_pool.acquire() as conn: dbs_rows = await conn.fetch( "SELECT tx_id, outpoint " "FROM connector_unconfirmed_stxo " "WHERE outpoint = ANY($1);", dbs_outs) out_map = set() for row in dbs_rows: if row["outpoint"] in out_map: if row["tx_id"] in dbs_set: dbs.add(row["tx_id"]) else: out_map.add(row["outpoint"]) l_dbs_size = 0 while True: for row in stxo: if row["out_tx_id"] in dbs or row[ "out_tx_id"] in dbs_childs: if row["tx_id"] not in dbs: dbs_childs.add(row["tx_id"]) if l_dbs_size != len(dbs_childs): l_dbs_size = len(dbs_childs) else: break outputs["count"] += len(utxo) for row in utxo: if utxo_sequence < row["id"]: utxo_sequence = row["id"] txso.add(row["tx_id"]) outputs["amount"]["total"] += row["amount"] if outputs["amount"]["max"]["value"] is None or \ outputs["amount"]["max"]["value"] < row["amount"]: outputs["amount"]["max"]["value"] = row["amount"] outputs["amount"]["max"]["txId"] = rh2s( row["tx_id"]) if outputs["amount"]["min"]["value"] is None or \ outputs["amount"]["min"]["value"] > row["amount"]: if row["amount"] > 0: outputs["amount"]["min"]["value"] = row[ "amount"] outputs["amount"]["min"]["txId"] = rh2s( row["tx_id"]) try: outputs["typeMap"][row["address"][0]]["count"] += 1 outputs["typeMap"][row["address"] [0]]["amount"] += row["amount"] except: outputs["typeMap"][row["address"][0]] = { "count": 1, "amount": row["amount"] } amount = row["amount"] key = None if amount == 0 else str( math.floor(math.log10(amount))) try: outputs["amountMap"][key]["count"] += 1 outputs["amountMap"][key]["amount"] += row[ "amount"] except: outputs["amountMap"][key] = { "count": 1, "amount": row["amount"] } transactions["doublespend"]["count"] = len(dbs) transactions["doublespendChilds"]["count"] = len( dbs_childs) transactions["count"] += len(tx) dbs_records = deque() for row in tx: v_size = math.ceil( (row["b_size"] * 3 + row["size"]) / 4) if tx_sequence < row["id"]: tx_sequence = row["id"] if row["tx_id"] in dbs: transactions["doublespend"]["amount"] += row[ "amount"] transactions["doublespend"]["size"] += row["size"] transactions["doublespend"]["vSize"] += v_size dbs_records.append( (row["tx_id"], row["timestamp"], 0)) if row["tx_id"] in dbs_childs: transactions["doublespendChilds"]["amount"] += row[ "amount"] transactions["doublespendChilds"]["size"] += row[ "size"] transactions["doublespendChilds"][ "vSize"] += v_size dbs_records.append( (row["tx_id"], row["timestamp"], 1)) if row["amount"] > 0: transactions["amount"]["total"] += row["amount"] if transactions["amount"]["max"]["value"] is None or \ transactions["amount"]["max"]["value"] < row["amount"]: transactions["amount"]["max"]["value"] = row[ "amount"] transactions["amount"]["max"]["txId"] = rh2s( row["tx_id"]) if transactions["amount"]["min"]["value"] is None or \ transactions["amount"]["min"]["value"] > row["amount"]: transactions["amount"]["min"]["value"] = row[ "amount"] transactions["amount"]["min"]["txId"] = rh2s( row["tx_id"]) if row["fee"] is not None: transactions["fee"]["total"] += row["fee"] if transactions["fee"]["max"]["value"] is None or \ transactions["fee"]["max"]["value"] < row["fee"]: transactions["fee"]["max"]["value"] = row[ "fee"] transactions["fee"]["max"]["txId"] = rh2s( row["tx_id"]) if transactions["fee"]["min"]["value"] is None or \ transactions["fee"]["min"]["value"] > row["fee"]: transactions["fee"]["min"]["value"] = row[ "fee"] transactions["fee"]["min"]["txId"] = rh2s( row["tx_id"]) fee_rate = math.ceil(row["fee"] / v_size) if transactions["feeRate"]["max"]["value"] is None or \ transactions["feeRate"]["max"]["value"] < fee_rate: transactions["feeRate"]["max"][ "value"] = fee_rate transactions["feeRate"]["max"]["txId"] = rh2s( row["tx_id"]) if transactions["feeRate"]["min"]["value"] is None or \ transactions["feeRate"]["min"]["value"] > fee_rate: transactions["feeRate"]["min"][ "value"] = fee_rate transactions["feeRate"]["min"]["txId"] = rh2s( row["tx_id"]) key = fee_rate if key > 10 and key < 20: key = math.floor(key / 2) * 2 elif key > 20 and key < 200: key = math.floor(key / 10) * 10 elif key > 200: key = math.floor(key / 25) * 25 try: transactions["feeRateMap"][key]["count"] += 1 transactions["feeRateMap"][key]["size"] += row[ "size"] transactions["feeRateMap"][key][ "vSize"] += v_size except: transactions["feeRateMap"][key] = { "count": 1, "size": row["size"], "vSize": v_size } if row["rbf"]: transactions["rbfCount"] += 1 if row["segwit"]: transactions["segwitCount"] += 1 if row["size"]: transactions["size"]["total"] += row["size"] transactions["vSize"]["total"] += v_size if transactions["size"]["max"]["value"] is None or \ transactions["size"]["max"]["value"] < row["size"]: transactions["size"]["max"]["value"] = row[ "size"] transactions["size"]["max"]["txId"] = rh2s( row["tx_id"]) if transactions["vSize"]["max"]["value"] is None or \ transactions["vSize"]["max"]["value"] < v_size: transactions["vSize"]["max"]["value"] = v_size transactions["vSize"]["max"]["txId"] = rh2s( row["tx_id"]) if transactions["size"]["min"]["value"] is None or \ transactions["size"]["min"]["value"] > row["size"]: transactions["size"]["min"]["value"] = row[ "size"] transactions["size"]["min"]["txId"] = rh2s( row["tx_id"]) if transactions["vSize"]["min"]["value"] is None or \ transactions["vSize"]["min"]["value"] > v_size: transactions["vSize"]["min"]["value"] = v_size transactions["vSize"]["min"]["txId"] = rh2s( row["tx_id"]) if transactions["vSize"]["total"] > 1000000 / 10: transactions["feeRate"]["best"] = round(best_fee, 2) else: transactions["feeRate"]["best"] = 1 async with self.db_pool.acquire() as conn: async with conn.transaction(): if truncate_dbs_table: await conn.execute( "truncate table mempool_dbs;") truncate_dbs_table = False await conn.copy_records_to_table( 'mempool_dbs', columns=["tx_id", "timestamp", "child"], records=dbs_records) s_minute = int(time.time()) // 60 if s_minute % 60 == 0 and self.last_hour < s_minute // 60: s_hour = s_minute // 60 self.last_hour = s_hour if s_hour % 24 == 0 and self.last_day < s_hour // 24: s_day = s_hour // 24 self.last_day = s_day else: s_day = None else: s_hour = None s_day = None if self.last_minute != s_minute or transactions[ "feeRate"]["bestHourly"] == 1: best_fee_hourly.set( transactions["feeRate"]["best"]) f = 0 for i in best_fee_hourly.items: f += i f4 = 0 for i in best_fee_4h.items: f4 += i if len(best_fee_hourly.items): transactions["feeRate"][ "bestHourly"] = round( f / len(best_fee_hourly.items), 2) else: transactions["feeRate"][ "bestHourly"] = transactions[ "feeRate"]["best"] if len(best_fee_4h.items): transactions["feeRate"]["best4h"] = round( f4 / len(best_fee_4h.items), 2) else: transactions["feeRate"][ "best4h"] = transactions["feeRate"][ "best"] await conn.execute( "INSERT INTO mempool_analytica " "(minute, hour, day, inputs, outputs, transactions)" " VALUES " "($1, $2, $3, $4, $5, $6) " "ON CONFLICT (minute) " "DO UPDATE SET " " inputs = $4," " outputs = $5, " " transactions = $6", s_minute, s_hour, s_day, json.dumps(inputs), json.dumps(outputs), json.dumps(transactions)) if s_hour is not None: self.log.warning( "Mempool analytica hourly point saved %s" % s_hour) self.log.info( "Mempool transactions %s; STXO : %s; UTXO %s; DBS %s; round time %s;" % (transactions["count"], inputs["count"], outputs["count"], transactions["doublespend"]["count"] + transactions["doublespendChilds"]["count"], q)) q = time.time() - q if q < 1: await asyncio.sleep(1 - q) if q > 10: self.log.warning("Mempool analytica is to slow %s" % q) if self.last_minute != s_minute or transactions["feeRate"][ "best4h"] == 1: self.last_minute = s_minute self.log.debug( "Mempool TX %s; STXO %s; UTXO %s; DBS %s; %s; %s; Best fee %s/%s/%s; Round time %s;" % (transactions["count"], inputs["count"], outputs["count"], transactions["doublespend"]["count"] + transactions["doublespendChilds"]["count"], format_bytes(transactions["size"]["total"]), format_vbytes(transactions["vSize"]["total"]), transactions["feeRate"]["best"], transactions["feeRate"]["bestHourly"], transactions["feeRate"]["best4h"], round(q, 4))) else: await asyncio.sleep(2) # assert len(tx) == len(txsi) # assert len(tx) == len(txso) # # async with self.db_pool.acquire() as conn: # v = await conn.fetch("SELECT invalid_transaction.tx_id FROM invalid_transaction " # " JOIN connector_unconfirmed_stxo ON connector_unconfirmed_stxo.tx_id = invalid_transaction.tx_id " # " ;") # k = [t["tx_id"] for t in v] # for t in v: # print(rh2s(t["tx_id"])) # v = await conn.fetch("SELECT outpoint, sequence FROM connector_unconfirmed_stxo WHERE tx_id = ANY($1);", k) # print("u", len(v)) # uu = set() # pp = set() # for r in v: # uu.add(r["outpoint"]) # pp.add((r["outpoint"], r["sequence"])) # v = await conn.fetch("SELECT outpoint, sequence FROM invalid_stxo WHERE tx_id = ANY($1);", k) # print("i", len(v)) # ii = set() # for r in v: # ii.add((r["outpoint"], r["sequence"])) # e = 0 # for i in ii: # if i[0] not in uu: # print("none", i[1]) # else: # e += 1 # print(">>", e) # # v = await conn.fetch("SELECT count(*) from connector_unconfirmed_utxo WHERE out_tx_id = ANY($1);", k) # print("connector_unconfirmed_utxo", v) # v = await conn.fetch("SELECT count(*) from unconfirmed_transaction WHERE tx_id = ANY($1);", k) # print("unconfirmed_transaction", v) # v = await conn.fetch("SELECT count(*) from unconfirmed_transaction_map WHERE tx_id = ANY($1);", k) # print("unconfirmed_transaction_map", v) # ff = 0 # for i in pp: # v = await conn.fetchval("SELECT count(*) from invalid_stxo WHERE outpoint = $1 and sequence = $2;", i[0], i[1]) # ff += v # print("ff", ff) # ll = list() # v = await conn.fetch("SELECT outpoint, sequence, out_tx_id, tx_id, input_index, address, amount, pointer from connector_unconfirmed_stxo WHERE tx_id = ANY($1);", k) # for i in v: # ll.append((i["outpoint"], # i["sequence"], # i["out_tx_id"], # i["tx_id"], # i["input_index"], # i["address"], # i["amount"], # i["pointer"], # )) # print("ll", len(ll)) # try: # # await conn.copy_records_to_table('invalid_stxo', # # columns=["outpoint", # # "sequence", # # "out_tx_id", # # "tx_id", # # "input_index", # # "address", # # "amount", # # "pointer",], # # records=ll) # # print("iok") # ###v = await conn.fetch("DELETE FROM connector_unconfirmed_stxo WHERE tx_id = ANY($1);", k) # except Exception as err: # print(err) # await asyncio.sleep(50000) # v = await conn.fetch("DELETE FROM unconfirmed_transaction_map WHERE tx_id = ANY($1);", k) # print(v) # # v = await conn.fetch("DELETE FROM connector_unconfirmed_stxo WHERE tx_id = ANY($1);", k) # # print(v) # v = await conn.fetch("SELECT tx_id FROM connector_unconfirmed_stxo WHERE tx_id = ANY($1);", k) # print(v) # v = await conn.fetch("SELECT out_tx_id FROM connector_unconfirmed_utxo WHERE out_tx_id = ANY($1);", k) # print(v) # v = await conn.fetch("DELETE FROM connector_unconfirmed_utxo WHERE out_tx_id = ANY($1);", k) # print(v) # v = await conn.fetch("SELECT out_tx_id FROM connector_unconfirmed_utxo WHERE out_tx_id = ANY($1);", k) # print(v) # if v == []: # await conn.fetch("DELETE FROM unconfirmed_transaction WHERE tx_id = ANY($1);", k) except asyncio.CancelledError: self.log.warning("Mempool analytica task canceled") break except Exception as err: self.log.error("Mempool analytica task error: %s" % err) print(traceback.format_exc()) await asyncio.sleep(10)
packages.add(arg) chanko = Chanko() candidates = chanko.get_package_candidates(packages, nodeps) if len(candidates) == 0: print "Nothing to get..." return bytes = 0 for candidate in candidates: bytes += candidate.bytes print candidate.filename print "Amount of packages: %i" % len(candidates) print "Need to get %s of archives" % format_bytes(bytes) if pretend: return if not force: print "Do you want to continue [y/N]?", if not raw_input() in ['Y', 'y']: print "aborted by user" return result = chanko.get_packages(candidates=candidates) if result: plan_name = "nodeps" if nodeps else "main" chanko.plan.update(packages, plan_name)
def compute_rectangles(node, xlim, ylim, recurse_level=0, dir_level=0, rects=[]): # this function has to handle 3 cases: # - single file: return a single rect # - full directory: return single rect, divide and recurse on child files # - "file group": return no rect, divide and recurse on child files # # tree.path # tree.size # tree.children = [tree, tree, ...] # TODO: need to enforce append order of rects such that # drawing in iteration order ensures proper z ordering if (dir_level > max_filesystem_depth or xlim[1] - xlim[0] < min_box_size or ylim[1] - ylim[0] < min_box_size): return if type(node) == list: node_type = 'file_group' elif len(node.children) > 0: node_type = 'directory' else: node_type = 'file' if node_type in ['directory', 'file']: # - define a box and text # - if directory, compute new padded bounds for child boxes txt = node.path if recurse_level == 0 else node.name txt += ' (%s)' % format_bytes(node.size) rect = { 'x': xlim[0] + 1, 'y': ylim[0] + 1, 'dx': xlim[1] - xlim[0], 'dy': ylim[1] - ylim[0], 'depth': dir_level, 'text': txt, 'type': node_type, } rects.append(rect) if node_type == 'directory': # directory padding ylim[0] = ylim[0] + text_size + dir_text_offset ylim[1] = ylim[1] - 3 xlim[0] = xlim[0] + 3 xlim[1] = xlim[1] - 3 if node_type in ['file_group', 'directory']: # - divide children into nearly equal parts, # - recurse on both halves if node_type == 'directory': children = node.children subdir_level = 1 total_size = node.size else: children = node subdir_level = 0 total_size = sum([x.size for x in node]) groupA, xA, yA, groupB, xB, yB = squarify(children, xlim, ylim, total_size=total_size) # recurse compute_rectangles(groupA, xA, yA, recurse_level + 1, dir_level + subdir_level) compute_rectangles(groupB, xB, yB, recurse_level + 1, dir_level + subdir_level) return rects
def parse_log_file(self): self.runtime = None self.sent = None self.received = None self.speed = None logger.debug('Parsing log file for snapshot. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, }) try: with open(self.log_path) as log: start_time = None line = log.readline() try: line_split = shlex.split(line) if len(line_split) >= 2: # Get epoch time epoch = line_split[0] + 'T' + line_split[1] epoch = datetime.datetime.strptime(epoch, '%Y/%m/%dT%H:%M:%S') start_time = int(time.mktime(epoch.timetuple())) else: logger.warning('Failed to get snapshot start ' + \ 'time from log, line split length invalid. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, 'log_line': line, }) except ValueError: logger.warning('Failed to get snapshot start ' + \ 'time from log, value error. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, 'log_line': line, }) # Get last kilobyte of file log.seek(0, os.SEEK_END) file_size = log.tell() log.seek(max(file_size - 1024, 0)) lines = log.readlines() # Find rsync sent command line output for line in lines: try: line_split = shlex.split(line) except ValueError: continue if len(line_split) < 10: continue # Get rsync command command = line_split[3] if command == 'sent': if start_time: # Get runtime epoch = line_split[0] + 'T' + line_split[1] epoch = datetime.datetime.strptime(epoch, '%Y/%m/%dT%H:%M:%S') epoch = int(time.mktime(epoch.timetuple())) self.runtime = utils.format_seconds( epoch - start_time) # Get snapshot info try: self.sent = utils.format_bytes(line_split[4]) except ValueError: logger.warning('Failed to get sent bytes ' + \ 'from snapshot log, value error. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, 'log_line': line, }) try: self.received = utils.format_bytes(line_split[7]) except ValueError: logger.warning('Failed to get received bytes ' + \ 'from snapshot log, value error. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, 'log_line': line, }) try: self.speed = utils.format_bytes( line_split[9]) + '/sec' except ValueError: logger.warning('Failed to get transfer speed ' + \ 'from snapshot log, value error. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, 'log_line': line, }) except IOError: logger.debug('Failed to read log file for ' + \ 'snapshot, IOError. %r' % { 'volume_id': self.volume.id, 'snapshot_id': self.id, })
hasrecord = (len(records) > 0) if hasrecord: totalprice = float(bytesin + bytesout) / 1000 / 1000 / 1000 * price_per_gb starttime = records[-1].time starthash = records[-1].hashstr endtime = records[0].time endhash = records[0].hashstr log = add_log(title="BillCalc", params=[ ("user",username), ("starttime",starttime.strftime(timefmt)), ("starthash",starthash), ("endtime",endtime.strftime(timefmt)), ("endhash",endhash), ("bytesin",bytesin), ("bytesout",bytesout), ("duration",duration), ("totalprice",totalprice) ]) if hasrecord: print("Usage for user [%s]:" % username) print(" From <%s> to <%s>" % (starttime.strftime("%Y-%m-%d %H:%M:%S"), endtime.strftime("%Y-%m-%d %H:%M:%S"))) print(" Total upload: " + format_bytes(bytesin)) print(" Total download: " + format_bytes(bytesout)) print(" Total usage: " + format_seconds(duration)) print(" Total price: " + str(totalprice)) print(" Hash code: " + log.hashstr) else: print("No usage for user [%s] since <%s>" % (username, datetime.datetime.strptime(starttime,timefmt).strftime("%Y-%m-%d %H:%M:%S") if starttime else "ever"))