示例#1
0
def main():
    inputHandle = loadFile("08/input.txt")
    content = inputHandle.read()
    inputList = list(content)
    inputIntList = list(map(int, inputList))

    horizontalChunks = list(chunks(inputIntList, 25))
    layerChunks = list(chunks(horizontalChunks, 6))

    leastZeros = None
    leastZerosIndex = None
    for lIndex, layer in enumerate(layerChunks):
        zeroCount = 0
        for line in layer:
            zeroCount += line.count(0)
        if leastZeros is None:
            leastZeros = zeroCount
            leastZerosIndex = lIndex
            continue
        if zeroCount < leastZeros:
            leastZeros = zeroCount
            leastZerosIndex = lIndex

    ones = 0
    twos = 0
    for line in layerChunks[leastZerosIndex]:
        ones += line.count(1)
        twos += line.count(2)
    print(ones * twos)
示例#2
0
def main():
    inputHandle = loadFile("08/input.txt")
    content = inputHandle.read()
    inputList = list(content)
    inputIntList = list(map(int, inputList))

    horizontalChunks = list(chunks(inputIntList, 25))
    layerChunks = list(chunks(horizontalChunks, 6))
    layerChunks.reverse()

    # Flatten image
    imageValues = None
    for i, layer in enumerate(layerChunks):
        if i == 0:
            imageValues = layer
            continue
        for ii, row in enumerate(layer):
            for iii, digit in enumerate(row):
                if digit != 2:
                    imageValues[ii][iii] = digit

    # Convert to RGBa
    for ii, row in enumerate(imageValues):
        for iii, digit in enumerate(row):
            if digit == 0:  # Black
                imageValues[ii][iii] = (0, 0, 0, 255)
            if digit == 1:  # White
                imageValues[ii][iii] = (255, 255, 255, 255)
            if digit == 2:  # Transparent
                imageValues[ii][iii] = (0, 0, 0, 0)

    flattened = [val for sublist in imageValues for val in sublist]
    img = Image.new("RGBA", (25, 6))
    img.putdata(flattened)
    img.show()
示例#3
0
def iter_page_data():
    """
    *in templete.html*
    for tr_repos in tabulated_repos:
        for filename, tubled_inforows in tr_repost:
            for string,url ,do_herfin tubled_inforow:
    """
    content_tinydb = load_db()
    all_repo = content_tinydb.all()
    all_repo = [r for r in all_repo if r['gif_success']]
    sortkey_dict = {'most_stars': "stargazers_count",
                    'most_forks': "forks",
                    'recently_updated': "updated_at", }
    for filename, headline_menu in iter_headline():
        sortkey = sortkey_dict[filename]
        all_repo.sort(key=lambda repo: repo[sortkey], reverse=True)
        chunked_repos = chunks(all_repo, 9)
        max_page_num = len(chunked_repos)
        for page_index, nine_repo in enumerate(chunked_repos):
            tubled_inforows = [to_tubled_inforow(repo) for repo in nine_repo]
            tabulated_repos = chunks(tubled_inforows, 3)
            yield filename, page_index + 1, headline_menu, tabulated_repos, max_page_num, 30
    sortkey = "stargazers_count"
    all_repo.sort(key=lambda repo: repo[sortkey], reverse=True)
    user_tags_dict = {d['username'].lower(): d['tags']
                      for d in location_db.all()}
    # print(user_tags_dict.keys())
    tag_users_dict = {}
    for username, tags in user_tags_dict.items():
        for tag in tags:
            tag_users_dict.setdefault(tag, []).append(username)
    user_repos_dict = {}
    for repo in all_repo:
        username = repo['full_name'].split('/')[0].lower()
        # print(username)
        user_repos_dict.setdefault(username, []).append(repo)
    for tag, count in tags_info:
        usernames = tag_users_dict[tag]
        tag_repos = []
        for username in usernames:
            tag_repos.extend(user_repos_dict.get(username, []))
            # print(username)
        chunked_repos = chunks(tag_repos, 9)
        max_page_num = len(chunked_repos)
        for page_index, nine_repo in enumerate(chunked_repos):
            tubled_inforows = [to_tubled_inforow(repo) for repo in nine_repo]
            tabulated_repos = chunks(tubled_inforows, 3)
            yield 'location-' + tag, page_index + 1, deactivated_headline, tabulated_repos, max_page_num, 30
    yield 'locations', '0', deactivated_headline, [], 1, 9999999
	def query_utxo_set(self, txout):
		if not isinstance(txout, list):
			txout = [txout]
		txids = [h[:64] for h in txout]
		txids = list(set(txids)) #remove duplicates
		#self.BLOCKR_MAX_ADDR_REQ_COUNT = 2
		if len(txids) > self.BLOCKR_MAX_ADDR_REQ_COUNT:
			txids = common.chunks(txids, self.BLOCKR_MAX_ADDR_REQ_COUNT)
		else:
			txids = [txids]
		data = []
		for ids in txids:
			blockr_url = 'http://' + self.blockr_domain + '.blockr.io/api/v1/tx/info/'
			blockr_data = json.loads(btc.make_request(blockr_url + ','.join(ids)))['data']
			if not isinstance(blockr_data, list):
				blockr_data = [blockr_data]
			data += blockr_data
		result = []
		for txo in txout:
			txdata = [d for d in data if d['tx'] == txo[:64]][0]
			vout = [v for v in txdata['vouts'] if v['n'] == int(txo[65:])][0]
			if vout['is_spent'] == 1:
				result.append(None)
			else:
				result.append({'value': int(Decimal(vout['amount'])*Decimal('1e8')),
					'address': vout['address'], 'script': vout['extras']['script']})
		return result
def dbxref_dict(server, seqfeature_ids):
    db_qv = {}
    for feat_chunk in chunks(seqfeature_ids, 900):
        #sql = "SELECT s.seqfeature_id, d.dbname || ':' || d.accession AS kegg_id "\
        #        "FROM seqfeature_dbxref s "\
        #        "JOIN dbxref d USING(dbxref_id) "\
        #        "WHERE s.seqfeature_id IN ({})".format(generate_placeholders(len(feat_chunk)))
        sql = "SELECT s.seqfeature_id, d.dbname, d.accession, t.name, dqv.value "\
                "FROM seqfeature_dbxref s "\
                "JOIN dbxref d USING(dbxref_id) "\
                "LEFT JOIN dbxref_qualifier_value dqv USING(dbxref_id) "\
                "LEFT JOIN term t USING(term_id) "\
                "WHERE s.seqfeature_id IN ({}) "\
                "ORDER BY s.seqfeature_id, d.dbname, s.rank".format(generate_placeholders(len(feat_chunk)))
        for seqfeature_id, dbname, dbxref, name, value in server.adaptor.execute_and_fetchall(sql, tuple(feat_chunk)):
        #for seqfeature_id, dbxref in server.adaptor.execute_and_fetchall(sql, tuple(feat_chunk)):
            try:
                db_qv[seqfeature_id][dbname] = dbxref
            except KeyError:
                db_qv[seqfeature_id] = {}
                db_qv[seqfeature_id][dbname] = dbxref

            if name:
                db_qv[seqfeature_id][name] = value
    return db_qv
 def query_utxo_set(self, txout):
     if not isinstance(txout, list):
         txout = [txout]
     txids = [h[:64] for h in txout]
     txids = list(set(txids))  #remove duplicates
     #self.BLOCKR_MAX_ADDR_REQ_COUNT = 2
     if len(txids) > self.BLOCKR_MAX_ADDR_REQ_COUNT:
         txids = common.chunks(txids, self.BLOCKR_MAX_ADDR_REQ_COUNT)
     else:
         txids = [txids]
     data = []
     for ids in txids:
         blockr_url = 'https://' + self.blockr_domain + '.blockr.io/api/v1/tx/info/'
         blockr_data = json.loads(
             btc.make_request(blockr_url + ','.join(ids)))['data']
         if not isinstance(blockr_data, list):
             blockr_data = [blockr_data]
         data += blockr_data
     result = []
     for txo in txout:
         txdata = [d for d in data if d['tx'] == txo[:64]][0]
         vout = [v for v in txdata['vouts'] if v['n'] == int(txo[65:])][0]
         if vout['is_spent'] == 1:
             result.append(None)
         else:
             result.append({
                 'value':
                 int(Decimal(vout['amount']) * Decimal('1e8')),
                 'address':
                 vout['address'],
                 'script':
                 vout['extras']['script']
             })
     return result
def qv_dict(server, seqfeature_ids):
    qv = {}
    for feat_chunk in chunks(seqfeature_ids, 900):
        feat_chunk2 = tuple(feat_chunk)
        qual_select_sql = 'SELECT seqfeature_id, name, value FROM seqfeature_qualifier_value qv JOIN term t ON t.term_id = qv.term_id WHERE seqfeature_id IN ({})'.format(generate_placeholders(len(feat_chunk)))

        taxonomy_sql = 'SELECT seqfeature_id, bioentry.name, biodatabase.name, lineage.lineage FROM seqfeature JOIN bioentry USING(bioentry_id) JOIN biodatabase USING(biodatabase_id) LEFT JOIN lineage ON taxon_id = lineage.id WHERE seqfeature_id IN ({})'.format(generate_placeholders(len(feat_chunk)))
        for seqfeature_id, contig, namespace, lineage in server.adaptor.execute_and_fetchall(taxonomy_sql, feat_chunk2):
            try:
                if lineage:
                    qv[seqfeature_id]['taxonomy'] = lineage
                    qv[seqfeature_id]['organism'] = lineage.split(';')[-1]

                qv[seqfeature_id]['bioentry'] = contig
                qv[seqfeature_id]['sample'] = namespace
            except KeyError:
                qv[seqfeature_id] = {}
                if lineage:
                    qv[seqfeature_id]['taxonomy'] = lineage
                    qv[seqfeature_id]['organism'] = lineage.split(';')[-1]

                qv[seqfeature_id]['bioentry'] = contig
                qv[seqfeature_id]['sample'] = namespace

        for seqfeature_id, name, value in server.adaptor.execute_and_fetchall(qual_select_sql, feat_chunk2):
            if not name:
                continue
            try:
                qv[seqfeature_id][name] = value
            except KeyError:
                qv[seqfeature_id] = {}
                qv[seqfeature_id][name] = value

    return qv
def qv_dict(server, seqfeature_ids):
    qv = {}
    for feat_chunk in chunks(seqfeature_ids, 900):
        feat_chunk2 = tuple(feat_chunk)
        qual_select_sql = 'SELECT seqfeature_id, name, value FROM seqfeature_qualifier_value qv, term t WHERE seqfeature_id IN ({}) AND t.term_id = qv.term_id'.format(generate_placeholders(len(feat_chunk)))

        taxonomy_sql = 'SELECT seqfeature_id, lineage.lineage FROM seqfeature JOIN bioentry USING(bioentry_id) JOIN lineage ON taxon_id = lineage.id WHERE seqfeature_id IN ({})'.format(generate_placeholders(len(feat_chunk)))
        for seqfeature_id, lineage in server.adaptor.execute_and_fetchall(taxonomy_sql, feat_chunk2):
            try:
                qv[seqfeature_id]['taxonomy'] = lineage
                qv[seqfeature_id]['organism'] = lineage.split(';')[-1]
            except KeyError:
                qv[seqfeature_id] = {}
                qv[seqfeature_id]['taxonomy'] = lineage
                qv[seqfeature_id]['organism'] = lineage.split(';')[-1]

        for seqfeature_id, name, value in server.adaptor.execute_and_fetchall(qual_select_sql, feat_chunk2):
            if not name:
                continue
            try:
                qv[seqfeature_id][name] = value
            except KeyError:
                qv[seqfeature_id] = {}
                qv[seqfeature_id][name] = value

    return qv
def dbxref_dict(server, seqfeature_ids):
    db_qv = {}
    for feat_chunk in chunks(seqfeature_ids, 900):
        #sql = "SELECT s.seqfeature_id, d.dbname || ':' || d.accession AS kegg_id "\
        #        "FROM seqfeature_dbxref s "\
        #        "JOIN dbxref d USING(dbxref_id) "\
        #        "WHERE s.seqfeature_id IN ({})".format(generate_placeholders(len(feat_chunk)))
        sql = "SELECT s.seqfeature_id, d.dbname, d.accession, t.name, dqv.value "\
                "FROM seqfeature_dbxref s "\
                "JOIN dbxref d USING(dbxref_id) "\
                "LEFT JOIN dbxref_qualifier_value dqv USING(dbxref_id) "\
                "LEFT JOIN term t USING(term_id) "\
                "WHERE s.seqfeature_id IN ({}) "\
                "ORDER BY s.seqfeature_id, d.dbname, s.rank".format(generate_placeholders(len(feat_chunk)))
        for seqfeature_id, dbname, dbxref, name, value in server.adaptor.execute_and_fetchall(sql, tuple(feat_chunk)):
        #for seqfeature_id, dbxref in server.adaptor.execute_and_fetchall(sql, tuple(feat_chunk)):
            try:
                db_qv[seqfeature_id][dbname] = dbxref
            except KeyError:
                db_qv[seqfeature_id] = {}
                db_qv[seqfeature_id][dbname] = dbxref

            if name:
                db_qv[seqfeature_id][name] = value
    return db_qv
示例#10
0
async def send_to_remote(delta: Delta) -> None:
    """
    Send files to remote with adb push.
    adb is run in parallel to increase speed.
    This could be turned of by setting batch_size to 1
    NOTE: in order to actually preserve metadata, we have to send and touch the file.
    I hate you Android.
    """
    base_adb_command = make_adb_command('push')

    def run_cmd(file_stat: FileStat):
        source = file_stat.filename
        destination = delta.remote_path / file_stat.relname
        adb_command = [*base_adb_command, source, destination.as_posix()]
        return run_adb_command(adb_command)

    with bar(len(delta.upload),
             f'INFO:{__name__}:Pushing files to remote') as progress:
        for chunk in chunks(delta.upload, config.adb_batch_size):
            send_queue = map(run_cmd, chunk)
            send_result = await asyncio.gather(*send_queue)
            filtered_chunk = [
                stat for stat, (errored, _) in zip(chunk, send_result)
                if not errored
            ]

            await touch_files(delta, filtered_chunk)
            progress.update(config.adb_batch_size)
示例#11
0
def make_wallets(n, wallet_structures=None, mean_amt=1, sdev_amt=0):
    '''n: number of wallets to be created
       wallet_structure: array of n arrays , each subarray
       specifying the number of addresses to be populated with coins
       at each depth (for now, this will only populate coins into 'receive' addresses)
       mean_amt: the number of coins (in btc units) in each address as above
       sdev_amt: if randomness in amouts is desired, specify here.
       Returns: a dict of dicts of form {0:{'seed':seed,'wallet':Wallet object},1:..,}'''
    if len(wallet_structures) != n:
        raise Exception("Number of wallets doesn't match wallet structures")
    seeds = common.chunks(binascii.hexlify(os.urandom(15 * n)), n)
    wallets = {}
    for i in range(n):
        wallets[i] = {
            'seed': seeds[i],
            'wallet': common.Wallet(seeds[i], max_mix_depth=5)
        }
        for j in range(5):
            for k in range(wallet_structures[i][j]):
                deviation = sdev_amt * random.random()
                amt = mean_amt - sdev_amt / 2.0 + deviation
                if amt < 0: amt = 0.001
                common.bc_interface.grab_coins(
                    wallets[i]['wallet'].get_receive_addr(j), amt)
    return wallets
示例#12
0
 def applyArgs(self, args):
     config.color = args.color
     config.warningsOnly = args.warnings_only
     config.throttle = args.throttle
     config.useCheckClassNames = args.useCheckClassNames
     if args.verbose:
         log.startLogging(sys.stdout)
     self.chunks = list(common.chunks(args.addresses, 100))
     self.args = args
示例#13
0
 def applyArgs(self, args):
     config.color = args.color
     config.warningsOnly = args.warnings_only
     config.throttle = args.throttle
     config.useCheckClassNames = args.useCheckClassNames
     if args.verbose:
         log.startLogging(sys.stdout)
     self.chunks = list(common.chunks(args.addresses, 100))
     self.args = args
示例#14
0
async def del_remote(delta: Delta) -> None:
    """Delete files from remote with the rm command"""
    with bar(len(delta.remove),
             f'INFO:{__name__}:Deleting remote files') as progress:
        base_adb_command = make_adb_command('shell')
        for chunk in chunks(delta.remove, config.command_batch_size):
            adb_command = [
                *base_adb_command, 'rm', '-r',
                *[shlex.quote(file_stat.filename) for file_stat in chunk]
            ]
            await run_adb_command(adb_command)
            progress.update(config.command_batch_size)
示例#15
0
def mix_pop(population):
    # gather() population into root node

    all_pop = COMM.gather(population, root=0)

    print("MIXING")
    if rank == 0:
        # flatten gathered list of lists
        all_pop = [b for val in all_pop for b in val]
        print(all_pop[0])
        np.random.shuffle(all_pop)  # shuffle population
        # chunk it for transmission
        all_pop = list(chunks(all_pop, len(all_pop) // size))
    new_pop = COMM.scatter(all_pop, root=0)
    return new_pop
示例#16
0
    def setUp(self):
        #create 7 new random wallets.
        #put about 10 coins in each, spread over random mixdepths
	#in units of 0.5
        
	seeds = common.chunks(binascii.hexlify(os.urandom(15*7)),7)
        self.wallets = {}
	for i in range(7):
	    self.wallets[i] = {'seed':seeds[i], 'wallet': common.Wallet(seeds[i], max_mix_depth=5)}
	#adding coins somewhat randomly, spread over all 5 depths    
        for i in range(7):
	    w = self.wallets[i]['wallet']
	    for j in range(5):
		for k in range(4):
		    base = 0.001 if i==6 else 1.0
		    amt = base + random.random() #average is 0.5 for tumbler, else 1.5
		    common.bc_interface.grab_coins(w.get_receive_addr(j),amt)	
示例#17
0
async def send_to_local(delta: Delta) -> None:
    """Pull files from remote to local"""
    base_adb_command = [*make_adb_command('pull'), '-a']

    def run_cmd(file_stat: FileStat):
        source = file_stat.filename
        destination = delta.local_path / file_stat.relname
        # mkdir parent path just in case because adb on Windows will error
        destination.parent.mkdir(parents=True, exist_ok=True)
        adb_command = [*base_adb_command, source, str(destination)]
        return run_adb_command(adb_command)

    with bar(len(delta.upload),
             f'INFO:{__name__}:Pulling files to local') as progress:
        for chunk in chunks(delta.upload, config.adb_batch_size):
            send_queue = map(run_cmd, chunk)
            await asyncio.gather(*send_queue)
            progress.update(config.adb_batch_size)
示例#18
0
文件: env_path.py 项目: SiegeLord/dil
def append2PATH(paths, tmp_path):
  """ Appends the given argument to the PATH in the registry.
      The paths argument can contain multiple paths separated by ';'. """
  from common import is_win32, Path, call_read, subprocess, \
    chunks, tounicode, tounicodes
  paths, tmp_path = tounicodes((paths, tmp_path))
  sep = ";"
  path_list = paths.split(sep)

  # 1. Get the current PATH value.
  echo_cmd = ["wine", "cmd.exe", "/c", "echo", "%PATH%"][is_win32:]
  CUR_PATH = call_read(echo_cmd).rstrip('\r\n') # Remove trailing \r\n.
  CUR_PATH = tounicode(CUR_PATH)
  if not is_win32 and '/' in paths: # Convert the Unix paths to Windows paths.
    winepath = lambda p: call_read(["winepath", "-w", p])[:-1]
    path_list = map(winepath, path_list)
    path_list = tounicodes(path_list)

  # 2. Avoid appending the same path(s) again. Strip '\' before comparing.
  op_list = [p.rstrip('\\') for p in CUR_PATH.split(sep)]
  path_list = filter((lambda p: p.rstrip('\\') not in op_list), path_list)
  if not path_list: return

  # 3. Create a temporary reg file.
  paths = sep.join(path_list)
  NEW_PATH = CUR_PATH + sep + paths + '\0' # Join with the current PATH.
  if 1 or is_win32: # Encode in hex.
    NEW_PATH = NEW_PATH.encode('utf-16')[2:].encode('hex')
    NEW_PATH = ','.join(chunks(NEW_PATH, 2)) # Comma separated byte values.
    var_type = 'hex(2)'
  #else: # Escape backslashes and wrap in quotes.
    #NEW_PATH = '"%s"' % NEW_PATH.replace('\\', '\\\\')
    #var_type = 'str(2)'
  # Write to "tmp_path/newpath.reg".
  tmp_reg = Path(tmp_path)/"newpath.reg"
  tmp_reg.open("w", "utf-16").write("""Windows Registry Editor Version 5.00\r
\r
[HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\
Control\\Session Manager\\Environment]\r
"Path"=%s:%s\r\n""" % (var_type, NEW_PATH))

  # 4. Apply the reg file to the registry. "/s" means run silently.
  regedit = ["wine", "regedit.exe", "/s", tmp_reg][is_win32:]
  subprocess.call(regedit)
示例#19
0
def append2PATH(paths, tmp_path=""):
  """ Appends the given argument to the PATH variable in the Windows registry.
      The paths argument can contain multiple paths separated by ';'. """
  from common import is_win32, Path, call_proc, call_read, chunks
  sep = ";"
  # Split by sep, make absolute and normalize.
  path_list = map(lambda p: Path(p).abspath.normpath, paths.split(sep))

  # 1. Get the current PATH value.
  echo_cmd = ["wine", "cmd.exe", "/c", "echo", "%PATH%"][is_win32:]
  CUR_PATH = call_read(echo_cmd).rstrip('\r\n') # Remove trailing \r\n.
  if not is_win32 and '/' in paths: # Convert the Unix paths to Windows paths.
    winepath = lambda p: call_read("winepath", "-w", p)[:-1]
    path_list = map(winepath, path_list)

  # 2. Avoid appending the same path(s) again. Strip '\' before comparing.
  op_list = [p.rstrip('\\') for p in CUR_PATH.split(sep)]
  path_list = filter((lambda p: p.rstrip('\\') not in op_list), path_list)
  if not path_list: return

  # 3. Create a temporary reg file.
  paths = sep.join(path_list)
  NEW_PATH = CUR_PATH + sep + paths + '\0' # Join with the current PATH.
  if 1 or is_win32: # Encode in hex.
    NEW_PATH = NEW_PATH.encode('utf-16')[2:].encode('hex')
    NEW_PATH = ','.join(chunks(NEW_PATH, 2)) # Comma separated byte values.
    var_type = 'hex(2)'
  #else: # Escape backslashes and wrap in quotes.
    #NEW_PATH = '"%s"' % NEW_PATH.replace('\\', '\\\\')
    #var_type = 'str(2)'
  # Write to "tmp_path/newpath.reg".
  tmp_reg = Path(tmp_path)/"newpath.reg"
  tmp_reg.write("""Windows Registry Editor Version 5.00\r
\r
[HKEY_LOCAL_MACHINE\\System\\CurrentControlSet\\\
Control\\Session Manager\\Environment]\r
"Path"=%s:%s\r\n""" % (var_type, NEW_PATH), encoding="utf-16")

  # 4. Apply the reg file to the registry. "/s" means run silently.
  regedit = ["wine", "regedit.exe", "/s", tmp_reg][is_win32:]
  call_proc(regedit)
示例#20
0
def make_wallets(n, wallet_structures = None, mean_amt=1, sdev_amt=0):
    '''n: number of wallets to be created
       wallet_structure: array of n arrays , each subarray
       specifying the number of addresses to be populated with coins
       at each depth (for now, this will only populate coins into 'receive' addresses)
       mean_amt: the number of coins (in btc units) in each address as above
       sdev_amt: if randomness in amouts is desired, specify here.
       Returns: a dict of dicts of form {0:{'seed':seed,'wallet':Wallet object},1:..,}'''
    if len(wallet_structures) != n:
	raise Exception("Number of wallets doesn't match wallet structures")
    seeds = common.chunks(binascii.hexlify(os.urandom(15*n)),n)
    wallets = {}
    for i in range(n):
	wallets[i] = {'seed':seeds[i], 'wallet': common.Wallet(seeds[i], max_mix_depth=5)}
	for j in range(5):
	    for k in range(wallet_structures[i][j]):
		deviation = sdev_amt*random.random()
		amt = mean_amt - sdev_amt/2.0 + deviation
		if amt < 0: amt = 0.001
		common.bc_interface.grab_coins(wallets[i]['wallet'].get_receive_addr(j),amt)
    return wallets
示例#21
0
async def get_remote_files(path: str,
                           max_depth: int = 1,
                           output_dict: FileStatDict = {}) -> FileStatDict:
    """
    This uses find. May not be available in most systems, but I don't care.
    Updated to use find and stat seperately with chunking to reduce overhead
    """
    base_adb_command = make_adb_command('shell')
    adb_command = [
        *base_adb_command, 'find', path, '-type', 'f', '-maxdepth',
        str(max_depth)
    ]
    log_adb_command(adb_command)

    errored, find_output = await run_adb_command(adb_command,
                                                 combine=False,
                                                 bypass_dry_run=True)
    if errored:
        raise Exception('Unable to run find command on remote')

    find_output = find_output.splitlines()
    with bar(len(find_output),
             f'INFO:{__name__}:Creating remote file list') as slider:
        for find_slice in chunks(find_output, config.command_batch_size):
            adb_command = [
                *base_adb_command, 'stat', '-c', '%N:%s:%Y',
                *map(shlex.quote, find_slice)
            ]
            log_adb_command(adb_command)

            errored, output = await run_adb_command(adb_command,
                                                    combine=False,
                                                    bypass_dry_run=True)
            if not errored:
                for line in output.splitlines():
                    file_stat = RemoteFileStat(path, line, ':')
                    output_dict[file_stat.relname] = file_stat
            slider.update(config.command_batch_size)

    return output_dict
示例#22
0
 def plot_stats_for_system(self,sys_name,stat,
                           max_responses_per_fig:int=5,
                           subplot_kwargs={}
                           ):
     """
     Produces a plot of a given statistic, taken across multiple analyses
     for a specified sub-system
     
     Required:
     
     * `sys_name`, string giving name of system
     
     * `stat`, string to specify statistic to be plotted. E.g. 'absmax'
     
     Optional:
         
     * `max_responses_per_fig`, integer to denote maximum number of 
       responses to be plotted in each figure. If None, all responses will 
       be plotted via a single figure. Default value (=4) should give 
       nice plots in most cases.
       
       _Users are advised to tweak the appearance 
       of figures, e.g. using the `pyplot.subplots_adjust()` method._
       
     * `subplot_kwargs`, dict of keyword arguments to be passed to 
       `pyplot.subplots()` method, to customise subplots (e.g. share axes)        
       
     """
     
     # Re-collate statistics as required
     if self.stats_df is None:
         stats_df = self.collate_stats()
     else:
         stats_df = self.stats_df
     
     # Slice for requested stat
     try:
         stats_df = stats_df.xs(stat,level=-1,axis=1)
     except KeyError:
         raise KeyError("Invalid statistic selected!")
         
     # Get stats for just this system
     df_thissys = stats_df.xs(sys_name,level=0,axis=0)
         
     # Obtain responses names for this subsystem
     response_names = df_thissys.index.values
     nResponses = len(response_names)
     
     if max_responses_per_fig is None:
         max_responses_per_fig = nResponses
     
     fig_list = []
     
     for _response_names in chunks(response_names,max_responses_per_fig):
         
         # Create figure, with one subplot per response
         fig, axlist = plt.subplots(len(_response_names),
                                    sharex=True,
                                    **subplot_kwargs)
         
         fig_list.append(fig)
     
         for i, (r,ax) in enumerate(zip(_response_names,axlist)):
                             
             # Get series for this response
             df = df_thissys.loc[r]
             
             # Reshape such that index will be x-variable for plot
             df = df.unstack()
             
             # Make plot
             ax = df.plot(ax=ax,legend=False)
             
             ax.set_ylabel(r,
                           rotation=0,
                           fontsize='small',
                           horizontalAlignment='right',
                           verticalAlignment='center')
             
             if i==0:
                 # Add legend to figure
                 fig.legend(ax.lines, df.columns,fontsize='x-small') 
                 
         fig.subplots_adjust(left=0.15,right=0.95)
         fig.align_ylabels()
                 
     return fig_list