def merge_port_trees(): for i in config['port_trees']: info(e('Merging ports tree ${i}')) uids = "%s/%s" % (i, "UIDs") gids = "%s/%s" % (i, "GIDs") for p in glob('${i}/*/*'): portpath = '/'.join(p.split('/')[-2:]) if portpath.startswith('Mk'): if os.path.isdir(e('${PORTS_OVERLAY}/${portpath}')): sh('cp -lf ${p}/* ${PORTS_OVERLAY}/${portpath}/') else: sh('cp -l ${p} ${PORTS_OVERLAY}/${portpath}') else: sh('rm -rf ${PORTS_OVERLAY}/${portpath}') sh('mkdir -p ${PORTS_OVERLAY}/${portpath}') sh('cp -lr ${p}/ ${PORTS_OVERLAY}/${portpath}') if os.path.exists(uids): sh('rm -f ${PORTS_OVERLAY}/UIDs') sh('cp -l ${uids} ${PORTS_OVERLAY}/UIDs') if os.path.exists(gids): sh('rm -rf ${PORTS_OVERLAY}/GIDs') sh('cp -l ${gids} ${PORTS_OVERLAY}/GIDs')
def merge_port_trees(): for i in config['port_trees']: info(e('Merging ports tree ${i}')) for p in glob('${i}/*/*'): portpath = '/'.join(p.split('/')[-2:]) sh('rm -rf ${PORTS_OVERLAY}/${portpath}') sh('mkdir -p ${PORTS_OVERLAY}/${portpath}') sh('cp -lr ${p}/ ${PORTS_OVERLAY}/${portpath}')
def show_matches(text_pattern, path_pattern, re_options, recursive, names_only, counts, invert_match, suppress_prefix, max_lines, match_only): """Show matches of regular expression given by <text_pattern> and regex options <re_options> in files matched by <path_pattern>. If not <path_pattern> then read from stdin. If <recursive> then recurse search through sub-directories. If <names_only> then show only file names and not lines. If <counts> then show file names and line counts. If <invert_match> then show files that don't match. If <suppress_prefix> then don't show line numbers and file names. If <match_only> then show only contents of match, not whole line. """ regex = re.compile(text_pattern, re_options) is_match = lambda x: regex.search(x) is not None unique_matches = {} if invert_match: names_only = True if match_only: # Store the unique matches # Return the matches in case we ever decide to print them as they are found def get_match(line, unique_matches): matches = [m.group(0) for m in regex.finditer(line)] for s in matches: unique_matches[s] = unique_matches.get(s,0) + 1 return ', '.join(matches) def output(s): pass else: # Print whole line that contains match def get_match(line, unique_matches): return line def output(s): print s if not path_pattern: # stdin case for _, line in _get_matches_for_file(sys.stdin, is_match, max_lines): output(get_match(line)) else: # file pattern cases for path in utils.glob(path_pattern, recursive=recursive): if names_only: if bool(any(_get_matches_for_path(path, is_match, max_lines))) != invert_match: output(path) elif counts: print '%s:%d' % (path, len([x for x in _get_matches_for_path(path, is_match, max_lines)])) else: if suppress_prefix: for _,line in _get_matches_for_path(path, is_match, max_lines): output(get_match(line)) else: for j, line in _get_matches_for_path(path, is_match, max_lines): output('%s:%d:%s' % (path, j, get_match(line, unique_matches))) if match_only: print '-' * 80 print 'unique_matches' print unique_matches
def main(): if e('${PLAYGROUND}'): info('Type RETURN to kill VM') input() vm_proc.kill() return tests_total = len(glob('${TESTS_ROOT}/os/*.py')) tests_success = [] tests_failure = [] for t in sorted(glob('${TESTS_ROOT}/os/*.py')): testname = os.path.splitext(os.path.basename(t))[0] logfile = objdir('logs/test-${testname}.log') info('Running test {0} (logfile {1})', testname, logfile) mod = imp.load_source(testname, t) success, reason = mod.run(lambda x: ssh(x, logfile)) # Give VM a while if panic happened time.sleep(2) if vm_proc.returncode is not None: # VM crashed! error('Test {0} caused VM crash', testname) if success is None: error('Test {0} returned aborted test schedule: {1}', testname, reason) elif success: tests_success.append(testname) else: info('Failed: {0}', reason) tests_failure.append(testname) vm_proc.kill() info('{0} total tests', tests_total) info('{0} successes', len(tests_success)) info('{0} failures', len(tests_failure)) if len(tests_failure) > 0: info('Failed tests: {0}', ', '.join(tests_failure))
def main(): if e('${PLAYGROUND}'): info('Type RETURN to kill VM') input() vm_proc.kill() return tests_total = len(glob('${TESTS_ROOT}/trueos/*.py')) tests_success = [] tests_failure = [] for t in sorted(glob('${TESTS_ROOT}/trueos/*.py')): testname = os.path.splitext(os.path.basename(t))[0] logfile = objdir('logs/test-${testname}.log') info('Running test {0} (logfile {1})', testname, logfile) mod = imp.load_source(testname, t) success, reason = mod.run(lambda x: ssh(x, logfile)) # Give VM a while if panic happened time.sleep(2) if vm_proc.returncode is not None: # VM crashed! error('Test {0} caused VM crash', testname) if success is None: error('Test {0} returned aborted test schedule: {1}', testname, reason) elif success: tests_success.append(testname) else: info('Failed: {0}', reason) tests_failure.append(testname) vm_proc.kill() info('{0} total tests', tests_total) info('{0} successes', len(tests_success)) info('{0} failures', len(tests_failure)) if len(tests_failure) > 0: info('Failed tests: {0}', ', '.join(tests_failure))
def main(): info("Available profiles:") for i in glob("${BUILD_PROFILES}/*"): dsl = load_file(e("${i}/config.pyd"), os.environ) if dsl is None: continue profile = dsl["profile"] selected = e("${PROFILE}") == profile["name"] info('* {0}{1}', profile["name"], " [selected]" if selected else "") info('\tDescription: {0}', profile["description"]) info('\tOwner: {0}', profile["owner"]) info('\tStatus: {0}', profile["status"])
def main(): pkg_paths = [] pkg_names = [] for i in glob('${OBJDIR}/ports/packages/*/All/${package}*.txz'): pkg_paths.append(i) pkg_names.append(os.path.basename(i)) pkg_dest_paths = ' '.join([os.path.join('/tmp', i) for i in pkg_names]) pkg_paths = ' '.join(pkg_paths) pkg_names = ' '.join(pkg_names) sh('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${pkg_paths} ${host}:/tmp/' ) sh('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t ${host} pkg add -f ${pkg_dest_paths}' )
def populate_ufsroot(): info('Populating UFS root') for i in purge_dirs: for name in glob('${INSTUFS_DESTDIR}${i}/*'): name = os.path.basename(name) if e('${i}/${name}') in files_to_preserve: continue sh('chflags -f 0 ${INSTUFS_DESTDIR}${i}/${name}') sh('rm -f ${INSTUFS_DESTDIR}${i}/${name}') for k, v in symlinks.items(): p = os.path.join('/rescue', k) sh('ln -s ${p} ${INSTUFS_DESTDIR}${v}')
def proc_corpus(wav_dir, feat_dir): feat_list = path.join(feat_dir, "feature.list") utils.ensure_dir_for(feat_list) with open(feat_list, "w", encoding="utf8") as wf, Executor(max_workers=None) as e: fs = [] for whisper_wav in utils.glob(wav_dir, ".*whisper10/.*.wav", relpath=False, noext=False): norm_wav = whisper_wav.replace("whisper10", "parallel100") if not path.exists(norm_wav): continue norm_feat = utils.replace_ext(norm_wav.replace(wav_dir, feat_dir), ".mcc") whisper_feat = utils.replace_ext(whisper_wav.replace(wav_dir, feat_dir), ".mcc") print(f"{norm_feat} {whisper_feat}", file=wf) fs.append(e.submit(proc_feat, norm_wav, norm_feat)) fs.append(e.submit(proc_feat, whisper_wav, whisper_feat)) wait(fs)
def get_blocks_names(self, client='*', block_type='*', number='*'): """ Return list of paths of blocks matching the given arguments. Args: client (str, optional): The client directory. block_type (str, optional): The required block type. number (str, optional): The required block number. Returns: list of str: list of matching paths. """ file_name = build_file_name( block_type=block_type, name=self.virtual_file, number=number) return glob(self.temp, client, file_name)
def main(): pkg_paths = [] pkg_names = [] def append_packages(name): for i in glob('${OBJDIR}/ports/packages/*/All/'+'{0}*.txz'.format(name)): pkg_paths.append(i) pkg_names.append(os.path.basename(i)) if 'install_latest' in sys.argv: for i in glob('${OBJDIR}/ports/logs/bulk/*/latest/logs/*.log'): current_pkg = os.path.basename(i).rsplit('.log')[0] append_packages(current_pkg) else: append_packages('${package}') pkg_dest_paths = ' '.join([os.path.join('/tmp', i) for i in pkg_names]) pkg_paths = ' '.join(pkg_paths) pkg_names = ' '.join(pkg_names) sh('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${pkg_paths} ${host}:/tmp/') sh('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t ${host} pkg add -f ${pkg_dest_paths}')
def main(): pkg_paths = [] pkg_names = [] def append_packages(name): for i in glob('${OBJDIR}/ports/data/packages/*/All/'+'{0}*.txz'.format(name)): pkg_paths.append(i) pkg_names.append(os.path.basename(i)) if 'install_latest' in sys.argv: for i in glob('${OBJDIR}/ports/data/logs/bulk/*/latest/logs/*.log'): current_pkg = os.path.basename(i).rsplit('.log')[0] append_packages(current_pkg) else: append_packages('${package}') pkg_dest_paths = ' '.join([os.path.join('/tmp', i) for i in pkg_names]) pkg_paths = ' '.join(pkg_paths) pkg_names = ' '.join(pkg_names) sh('scp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ${pkg_paths} ${host}:/tmp/') sh('ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -t ${host} pkg add -f ${pkg_dest_paths}')
def stage_non_installed_ports(): """ If a port was given the `install` key and it was set to False, then the port was not installed but we still should save the associated *.txz file in the final release directory. """ config = load_profile_config() glob_pattern = '/*.txz' non_installed_ports = [] for i in config.ports: if isinstance(i, dict) and not i.get('install', True): if 'package' in i: non_installed_ports.append(i.package) else: non_installed_ports.append(i.name) if non_installed_ports: sh('mkdir -p ${RELEASE_STAGEDIR}/${BUILD_ARCH_SHORT}/${NON_INSTALLED_DIR}') pkgdir = subprocess.run( ['find', objdir('ports/data/packages'), '-name', 'All', '-type', 'd'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) if pkgdir.stdout: pkgdir = pkgdir.stdout.decode().strip() for i in non_installed_ports: # port name will have the directory it resides in # so make sure to remove it if it's there if '/' in i: i = i.split('/')[1] for t in glob(pkgdir + glob_pattern): pkg = t.split('/')[-1] if pkg.startswith(i): sh('cp ${t} ${RELEASE_STAGEDIR}/${BUILD_ARCH_SHORT}/${NON_INSTALLED_DIR}')
def append_packages(name): for i in glob('${OBJDIR}/ports/packages/*/All/'+'{0}*.txz'.format(name)): pkg_paths.append(i) pkg_names.append(os.path.basename(i))
def run(self): """Execute the logic thread.""" while self.running: message = self.logic_queue.get() message_type = message['type'] self.logger.info( 'received message of type %s' % message_type) # store a block if message_type == 'send_block': file_name = build_file_name( name=message['name'], number=message['number'], block_type=message['block_type']) file_path = os.path.join(self.data_path, file_name) content = message['content'] try: with open(file_path, 'wb') as f: f.write(content) except: self.logger.exception( 'an error acurred while trying to write to file:\n') # send a block to the server elif message_type == 'ask_block': file_name = build_file_name( name=message['name'], number=message['number'], block_type=message['block_type']) for block in glob(self.data_path, file_name): try: with open(block, 'rb') as f: content = f.read() except: self.logger.exception( 'an error acurred while reading file: %s' % block) else: real_file = os.path.basename(block) block_info = parse_file_name(real_file) net_message = protocol.client.block( block_type=block_info['block_type'], name=block_info['name'], number=block_info['number'], content=content) thread_message = protocol.thread.send( message=net_message) self.network_queue.put(thread_message) # announce the server that all block were sent name = message['name'] net_message_finished = protocol.client.file_sent(name) thread_message_finished = protocol.thread.send( message=net_message_finished) self.network_queue.put(thread_message_finished) # delete blocks elif message_type == 'delete_block': file_name = file_name = build_file_name( name=message['name'], number=message['number'], block_type=message['block_type']) block_list = glob(self.data_path, file_name) for file in block_list: os.remove(file) # send the disk state to the server elif message_type == 'ask_disk_state': total = diskutil.total() free = diskutil.free() net_message = protocol.client.disk_state( total=total, free=free) message = protocol.thread.send( message=net_message) self.network_queue.put(message) # send the storage state to the server elif message_type == 'ask_storage_state': block_list = glob(self.data_path, '*_*.*') block_list = map(os.path.basename, block_list) block_list = map(parse_file_name, block_list) net_message = protocol.client.storage_state(blocks=block_list) message = protocol.thread.send( message=net_message) self.network_queue.put(message) # end the thread elif message_type == 'kill': self.network_queue.put(message) self.running = False
def run(self): """Execute the logic thread.""" while self.running: message = self.logic_queue.get() message_type = message['type'] self.logger.info('received message of type %s' % message_type) # store a block if message_type == 'send_block': file_name = build_file_name(name=message['name'], number=message['number'], block_type=message['block_type']) file_path = os.path.join(self.data_path, file_name) content = message['content'] try: with open(file_path, 'wb') as f: f.write(content) except: self.logger.exception( 'an error acurred while trying to write to file:\n') # send a block to the server elif message_type == 'ask_block': file_name = build_file_name(name=message['name'], number=message['number'], block_type=message['block_type']) for block in glob(self.data_path, file_name): try: with open(block, 'rb') as f: content = f.read() except: self.logger.exception( 'an error acurred while reading file: %s' % block) else: real_file = os.path.basename(block) block_info = parse_file_name(real_file) net_message = protocol.client.block( block_type=block_info['block_type'], name=block_info['name'], number=block_info['number'], content=content) thread_message = protocol.thread.send( message=net_message) self.network_queue.put(thread_message) # announce the server that all block were sent name = message['name'] net_message_finished = protocol.client.file_sent(name) thread_message_finished = protocol.thread.send( message=net_message_finished) self.network_queue.put(thread_message_finished) # delete blocks elif message_type == 'delete_block': file_name = file_name = build_file_name( name=message['name'], number=message['number'], block_type=message['block_type']) block_list = glob(self.data_path, file_name) for file in block_list: os.remove(file) # send the disk state to the server elif message_type == 'ask_disk_state': total = diskutil.total() free = diskutil.free() net_message = protocol.client.disk_state(total=total, free=free) message = protocol.thread.send(message=net_message) self.network_queue.put(message) # send the storage state to the server elif message_type == 'ask_storage_state': block_list = glob(self.data_path, '*_*.*') block_list = map(os.path.basename, block_list) block_list = map(parse_file_name, block_list) net_message = protocol.client.storage_state(blocks=block_list) message = protocol.thread.send(message=net_message) self.network_queue.put(message) # end the thread elif message_type == 'kill': self.network_queue.put(message) self.running = False
def append_packages(name): for i in glob('${OBJDIR}/ports/data/packages/*/All/'+'{0}*.txz'.format(name)): pkg_paths.append(i) pkg_names.append(os.path.basename(i))
def keep_wrkdirs(): if e('${SAVE_DEBUG}'): for p in glob('${PORTS_OVERLAY}/*/*'): if os.path.isdir(p): setfile('${p}/.keep', '')