def main(): modules = find_glu_modules() out = StringIO() out.write('Welcome to GLU: The Genotype Library and Utilities\n') if not modules: out.write('\nNo modules found!\n') return # FIXME: Figure out what to do when width is too large width = max(len(m[1]) for m in modules)+4 inheading = None for heading,name,module in modules: if heading != inheading: inheading = heading out.write('\n%s:\n\n' % heading) abstract = getattr(module,'__abstract__','') initial_width = max(0,width-len(name)-4) text = textwrap.wrap(abstract, 78, initial_indent=' '*initial_width, subsequent_indent=' '*width) out.write(' %s %s\n' % (name,'\n'.join(text))) pydoc.pager(out.getvalue())
def printout(max_lines, result): output = generate_output(result) if max_lines > 0 and output.count('\n') < max_lines: output = codes.get(result.code, ENDC) + output + ENDC print(output) else: pager(output)
def main(): parser = argparse.ArgumentParser() parser.description = DESCRIPTION parser.add_argument('-l', '--list', dest='lst', action='store_true', default=False, help='List build results') parser.add_argument('name', help='Name of project') parser.add_argument('build_id', help='Build number (defaults to last)', nargs='?') args = parser.parse_args() project_name = args.name config = Config() if not config.has_project(project_name): parser.error('Invalid project name {}'.format(project_name)) log_base_dir = Builder.get_log_base_dir(config, project_name) if args.lst: list_builds(log_base_dir) return 0 if args.build_id is None: ids = [int(x) for x in os.listdir(log_base_dir) if x.isdigit()] build_id = str(max(ids)) else: build_id = args.build_id log_dir = os.path.join(log_base_dir, build_id) log_path = os.path.join(log_dir, Builder.BUILD_LOG_NAME) pager(open(log_path, 'rt').read())
def page_string(str_to_page, pager_cmd): """ Page str_to_page via the pager. Tries to do a bit of fail-safe checking. For example, if the command starts with less but less doesn't appear to be installed on the system, it will resort to the pydoc.pager method. """ # By default, we expect the command to be `less -R`. If that is the # pager_cmd, but they don't have less on their machine, odds are they're # just using the default value. In this case the pager will fail, so we'll # just go via pydoc.pager, which tries to do smarter checking that we don't # want to bother trying to replicate. # import ipdb; ipdb.set_trace() use_fallback_page_function = False if pager_cmd is None: use_fallback_page_function = True elif pager_cmd == FLAG_FALLBACK: use_fallback_page_function = True elif pager_cmd.startswith('less'): # stealing this check from pydoc.getpager() if hasattr(os, 'system') and os.system('(less) 2>/dev/null') != 0: # no less! use_fallback_page_function = True if use_fallback_page_function: pydoc.pager(str_to_page) else: # Otherwise, obey the user. pydoc.pipepager(str_to_page, cmd=pager_cmd)
def search(args: object) -> int: """ Search for search_term in filename. """ filename = args.filename search_term = args.search_term # Read the accounts dictionary into accounts_dict. accounts_dict, _, master_key = read_file(filename) search_str = search_term.lower() # String in which to store all matching account information. account_str = '' for account_data in accounts_dict.values(): # Search throuth every account. account_dict = crypt_to_dict(account_data, master_key) # The string representation of a dict is good enough for # searching in. if search_str in str(account_dict): account_str += '\n' + dict_to_str(account_dict) import pydoc pydoc.pager(account_str) return 0
def list_info(args: object) -> int: """ List the info in the account or file. """ filename = args.filename account = args.account # Read the accounts dictionary into accounts_dict. accounts_dict, _, master_key = read_file(filename) account_str = '' if account == 'ALL': # List all accounts. for account_data in accounts_dict.values(): account_dict = crypt_to_dict(account_data, master_key) if account_dict: account_str += '\n' + dict_to_str(account_dict) else: # Get the sha512 hash of the account name. hashed_account = hash_name(account) account_data = accounts_dict.get(hashed_account, '') # If there was no account data exit. if not account_data: print("Account %s not found." % account) return 0 account_str = dict_to_str(crypt_to_dict(account_data, master_key)) import pydoc pydoc.pager(account_str)
def __call__(self, parser, namespace, values, option_string=None): output = StringIO() parser.print_help(output) text = output.getvalue() output.close() pydoc.pager(text) parser.exit()
def print_sorted_donor_list(donor_objects, input_string): if input_string not in ("2", "13"): input_donor_data_pairs = { "1": "name", "3": "gender", "4": "dateofbirth", "5": "lastdonationdate", "6": "wassick", "7": "uniqueid", "8": "expofid", "9": "bloodtype", "10": "hemoglobin", "11": "emailaddress", "12": "mobilnumber", } list_to_print = sorted(donor_objects, key=attrgetter(input_donor_data_pairs[input_string])) elif input_string == "2": list_to_print = sorted(donor_objects, key=lambda x: int(x.weight)) elif input_string == "13": list_to_print = sorted(donor_objects, key=lambda x: int(x.age)) text = "" for don in list_to_print: text += "------------------------------\n" text += don.data_out() + "\n" text += "------------------------------\n" pydoc.pager(text) input("\n Press (ENTER) to go back") clear()
def search_in_donors(): with open("Data/donors.csv", "r") as f: content = [] for line in f: content.append(line.strip()) del (content[0]) if len(content) < 1: print("\n No entry found\n") input("\n Press (ENTER) to go back") clear() return None else: string_to_search = input("Search for donor: ") found_items = [] for donor in content: if string_to_search.upper() in donor: found_items.append(donor) donor_object_list = [] for i in found_items: l = i.split(",") donor_object_list.append(Donor()) donor_object_list[-1].name = l[0] donor_object_list[-1].weight = l[1] donor_object_list[-1].dateofbirth = l[3] donor_object_list[-1].emailaddress = l[-2] donor_object_list[-1].age = donor_object_list[-1].donor_age() szoveg = "" for i in donor_object_list: szoveg += "------------------------------\n" szoveg += i.data_out() + "\n" szoveg += "------------------------------\n" pydoc.pager(szoveg) input("\n Press (ENTER) to go back") clear()
def _rest_pager(x): out = x[2] terminal_size = shutil.get_terminal_size(25).lines; if terminal_size <= len(out.split('\n')): pydoc.pager(out) else: print(out)
def _run(self): withmeta = bool(self['meta'] or self['meta_like']) withuser = bool(self['user_id'] or self['user_name']) detail = self['detail'] or withmeta or withuser images = self.client.list_images(detail) images = self._filter_by_name(images) images = self._filter_by_id(images) if withuser: images = self._filter_by_user(images) if withmeta: images = self._filter_by_metadata(images) if self['detail'] and not ( self['json_output'] or self['output_format']): images = self._add_name(self._add_name(images, 'tenant_id')) elif detail and not self['detail']: for img in images: for key in set(img).difference(self.PERMANENTS): img.pop(key) kwargs = dict(with_enumeration=self['enum']) if self['limit']: images = images[:self['limit']] if self['more']: kwargs['out'] = StringIO() kwargs['title'] = () self._print(images, **kwargs) if self['more']: pager(kwargs['out'].getvalue())
def print_genres(): """ Print a genre listing. Uses python's built-in paging mechanism to print a genre listing to the terminal. """ genres = dict(filter( lambda x: type(x[1]) is unicode and x[1] != "<not-set>", eyed3.id3.genres.iteritems())) longest = len(max(genres.values(), key=len)) # 3 for genre ID, 2*1 for spaces. genre_cols = term_size()[0] / (3 + 1 + 1 + longest) def pack_n(container, n): """ Generator returning slices of n from container's elements. """ idx = 0 while idx < len(container)-n: yield container[idx:idx+n] idx += n yield container[idx:] pydoc.pager( "\n".join( [" ".join(x) for x in pack_n( ["{:3} {:{width}}".format(a[0], a[1], width=longest) for a in \ genres.iteritems()], genre_cols) ] ) )
def func(parser, options, args): """Show the tree diff """ args = git.ls_files(args) directory.cd_to_topdir() if options.revs: rev_list = options.revs.split('..') rev_list_len = len(rev_list) if rev_list_len == 1: rev1 = rev_list[0] rev2 = None elif rev_list_len == 2: rev1 = rev_list[0] rev2 = rev_list[1] else: parser.error('incorrect parameters to -r') else: rev1 = 'HEAD' rev2 = None if not options.stat: options.diff_flags.extend(color_diff_flags()) diff_str = git.diff(args, rev1 and git_id(crt_series, rev1), rev2 and git_id(crt_series, rev2), diff_flags = options.diff_flags) if options.stat: out.stdout_raw(gitlib.diffstat(diff_str) + '\n') else: if diff_str: pager(diff_str)
def get_changelog(pkg_name:str, interactive:bool=False, output:bool=False, paged_output:bool=False, no_local:bool=False, max_download_size:int=0): """ Returns changelog for given package name, if any, and if within size-restrictions """ changelog = None try: if not apt_changelog: __init__(interactive) if int(max_download_size) > 0: apt_changelog.max_download_size = int(max_download_size) else: apt_changelog.max_download_size = apt_changelog.max_download_size_default changelog = apt_changelog.get_changelog(pkg_name, no_local) except SystemExit: if interactive: raise except KeyboardInterrupt: sys.exit(130) else: if output: if not changelog: # empty changelog apt_changelog.exit_on_fail(7) if paged_output: try: from pydoc import pager pager(changelog) except Exception as e: _generic_exception_handler(e) paged_output = False else: print(changelog) return changelog
def _run(self): withmeta = bool(self["meta"] or self["meta_like"]) withuser = bool(self["user_id"] or self["user_name"]) detail = self["detail"] or withmeta or withuser images = self.client.list_images(detail) images = self._filter_by_name(images) images = self._filter_by_id(images) if withuser: images = self._filter_by_user(images) if withmeta: images = self._filter_by_metadata(images) if self["detail"] and not self["output_format"]: images = self._add_name(self._add_name(images, "tenant_id")) elif detail and not self["detail"]: for img in images: for key in set(img).difference(["id", "name"]): img.pop(key) kwargs = dict(with_enumeration=self["enum"]) if self["limit"]: images = images[: self["limit"]] if self["more"]: kwargs["out"] = StringIO() kwargs["title"] = () self.print_(images, **kwargs) if self["more"]: pager(kwargs["out"].getvalue())
def launch_program(self): """launch the main program""" # Make pythia8 print "Running make for pythia8 directory" misc.compile(cwd=os.path.join(self.running_dir, os.path.pardir), mode='cpp') if self.model_dir: print "Running make in %s" % self.model_dir misc.compile(cwd=self.model_dir, mode='cpp') # Finally run make for executable makefile = self.executable.replace("main_","Makefile_") print "Running make with %s" % makefile misc.compile(arg=['-f', makefile], cwd=self.running_dir, mode='cpp') print "Running " + self.executable output = open(os.path.join(self.running_dir, self.name), 'w') if not self.executable.startswith('./'): self.executable = os.path.join(".", self.executable) subprocess.call([self.executable], stdout = output, stderr = output, cwd=self.running_dir) # Display the cross-section to the screen path = os.path.join(self.running_dir, self.name) pydoc.pager(open(path).read()) print "Output of the run is found at " + \ os.path.realpath(os.path.join(self.running_dir, self.name))
def list_info(args: object) -> int: """ List the info in the account or file. """ filename = args.filename account = args.account with PassFile(filename) as passfile: account_str = '' if account == 'ALL': # List all accounts. for account_dict in passfile.accounts(): account_str += '\n' + dict_to_str(account_dict) else: if account not in passfile: print("Account %s not found." % account) return 0 account_str = dict_to_str(passfile.get(account)) import pydoc pydoc.pager(account_str) return 0
def main(): parser = argparse.ArgumentParser(prog='rfcs', description=description) parser.add_argument('--version', action='store_true') subparsers = parser.add_subparsers(title='subcommands', dest='command') parser_search = subparsers.add_parser('search', help='search for RFCs matching a query') parser_search.add_argument('query', type=str, help='search query') parser_search.add_argument('--maxresults', type=int, dest='N', metavar='N', default=5, help='maximum number of displayed results') parser_info = subparsers.add_parser('info', help='get information on a particular RFC') parser_info.add_argument('rfc', type=int, help='number of RFC') parser_text = subparsers.add_parser('text', help='view the text of a particular RFC') parser_text.add_argument('rfc', type=int, help='number of RFC') parser_text.add_argument('--nopager', action='store_true', help='write to stdout instead of opening pager') parser_url = subparsers.add_parser('url', help='get a URL to view a particular RFC') parser_url.add_argument('rfc', type=int, help='number of RFC') parser_url.add_argument('--format', choices=['text', 'html', 'pdf', 'bibtex'], default='text') args = parser.parse_args() if args.version: print(__version__) elif args.command == 'search': print(search(args.query, args.N)) elif args.command == 'info': print(info(args.rfc)) elif args.command == 'text': if args.nopager: print(text(args.rfc)) else: pydoc.pager(text(args.rfc)) elif args.command == 'url': print(url(args.rfc, args.format)) else: parser.print_help()
def doc2(thing, title="Python Library Documentation: %s", forceload=0): """Display text documentation, given an object or a path to an object.""" import types try: object, name = pydoc.resolve(thing, forceload) desc = pydoc.describe(object) module = mygetmodule(object) if name and "." in name: desc += " in " + name[: name.rfind(".")] elif module and module is not object: desc += " in module " + module.__name__ if not ( inspect.ismodule(object) or inspect.isclass(object) or inspect.isroutine(object) or isinstance(object, property) ): # If the passed object is a piece of data or an instance, # document its available methods instead of its value. # if this is a instance of used defined old-style class if type(object) == types.InstanceType: object = object.__class__ else: object = type(object) desc += " object" pydoc.pager(title % desc + "\n\n" + pydoc.text.document(object, name)) except (ImportError, pydoc.ErrorDuringImport), value: print value
def func(parser, options, args): """Show commit log and diff """ applied = crt_series.get_applied() unapplied = crt_series.get_unapplied() if options.applied: patches = applied elif options.unapplied: patches = unapplied elif len(args) == 0: patches = ['HEAD'] else: if len(args) == 1 and args[0].find('..') == -1 \ and not crt_series.patch_exists(args[0]): # it might be just a commit id patches = args else: patches = parse_patches(args, applied + unapplied +\ crt_series.get_hidden(), len(applied)) if options.diff_opts: diff_flags = options.diff_opts.split() else: diff_flags = [] commit_ids = [git_id(patch) for patch in patches] commit_str = '\n'.join([git.pretty_commit(commit_id, diff_flags=diff_flags) for commit_id in commit_ids]) if commit_str: pager(commit_str)
def help(self, request): global _img topbar = '_' * 72 + '\n' # 72-character divider if hasattr(request, '__name__'): pydoc.pager(topbar + 'Help on ' + pydoc.text.bold(request.__name__) + ':\n\n' + pydoc.getdoc(request)) else: opts = _img.opts.__class__.__dict__ try: opt = opts[request] desc_list = str(opt.doc()).split('\n') desc = '\n\n'.join(desc_list) default_val = opt._default if isinstance(default_val, str): valstr = "'" + default_val + "'" else: valstr = str(default_val) default_val_text = 'Default value: ' + valstr if opt.group() != None and opt.group() != 'hidden': group_text = '\nBelongs to group: ' + opt.group() else: group_text = '' desc_text = lofar.bdsm.interface.wrap(desc, 72) desc_text = '\n'.join(desc_text) pydoc.pager(topbar + 'Help on the ' + pydoc.text.bold(request) + ' parameter:\n\n' + default_val_text + group_text + '\n\n' + desc_text) except(KeyError): print "Parameter '" + request + "' not recognized."
def uncaught_main(): #parser.add_option("-f", "--file", dest="filename", #help="write report to FILE", metavar="FILE") #parser.add_option("-q", "--quiet", #action="store_false", dest="verbose", default=True, #help="don't print status messages to stdout") #parser.add_option num_args = len(sys.argv) - 1 if num_args < 1 : writeln("No command specified. Try help") return cmd = to_unicode(sys.argv[1], input_encoding) if cmd == 'help': pydoc.pager(help_text) return p = MyPlan(cmd == 'reset') if cmd == 'reset': return # we don't need to do anything if cmd in ['change', 'fetch', 'follow', 'following', 'info', 'post', 'stdin', 'unfollow']: eval_cmd = u'p.{0}()'.format(cmd) eval(eval_cmd) else: writeln("Unrecognised command: " + cmd) return
def from_statement(): os.system('clear') print "Het 'from' statement importeert een module." print "" print "Nadat je op enter hebt gedrukt vind je een html fetch van een pagina wat uitlegt wat import en from doet." print "" print "Bron: http://effbot.org/zone/import-confusion.htm" print "" print "In deze functie staan technieken om html om te zetten naar plaintext en ook om de plaintext door een pager te halen (pydoc)." raw_input(">Hit enter to continue...") # hit enter to continue print "" print "" print "" print "" print "" print "" html_fetch = urllib2.urlopen('http://effbot.org/zone/import-confusion.htm') # module urllib2 haalt http pagina's op. # print http_fetch.info() # info() haalt de html headers op. raw_html = html_fetch.read() # read leest plain text. zoals in een normaal bestand, maar dan de rauwe html. plain_text = html2text.html2text(raw_html) #html2text parser zet de html om naar plain text. pydoc.pager(plain_text) html_fetch.close() raw_input(">Hit enter to continue...") # hit enter to continue os.system('clear') print_keywords() start()
def search(args: object) -> int: """ Search for search_term in filename. """ filename = args.filename search_term = args.search_term search_str = search_term.lower() # String in which to store all matching account information. account_str = "" with PassFile(filename) as passfile: for account_dict in passfile.accounts(): # The string representation of a dict is good enough for # searching in. if search_str in str(account_dict): account_str += "\n" + dict_to_str(account_dict) import pydoc pydoc.pager(account_str) return 0
def func(parser, options, args): """Show commit log and diff """ if options.applied: patches = crt_series.get_applied() elif options.unapplied: patches = crt_series.get_unapplied() elif len(args) == 0: patches = ['HEAD'] elif '..' in ' '.join(args): # patch ranges applied = crt_series.get_applied() unapplied = crt_series.get_unapplied() patches = parse_patches(args, applied + unapplied + \ crt_series.get_hidden(), len(applied)) else: # individual patches or commit ids patches = args if not options.stat: options.diff_flags.extend(color_diff_flags()) commit_ids = [git_id(crt_series, patch) for patch in patches] commit_str = '\n'.join([git.pretty_commit(commit_id, flags = options.diff_flags) for commit_id in commit_ids]) if options.stat: commit_str = gitlib.diffstat(commit_str) if commit_str: pager(commit_str)
def search(args: object) -> int: """ Search for search_term in filename. """ filename = args.filename search_term = args.search_term # Read the accounts dictionary into accounts_dict. accounts_dict, _, master_key = read_file(filename) search_str = search_term.lower() # String to store all matching account information. account_str = "" for account_data in accounts_dict.values(): # Search through every account that can be decrypted with # the password, or ask for a password for each account. account_dict = crypt_to_dict(account_data, master_key) # If the password could decrypt the account, info search # throuth every key and value in the account. if account_dict: for key, value in account_dict.items(): if search_str in key.lower() or search_str in value.lower(): account_str += "\n" + dict_to_str(account_dict) # Don't add the same account more than once. break import pydoc pydoc.pager(account_str) return 0
def initializeOoniprobe(global_options): print("It looks like this is the first time you are running ooniprobe") if not sys.stdin.isatty(): print("ERROR: STDIN is not attached to a tty. Quiting.") sys.exit(8) print("Please take a minute to read through the informed consent documentation and " "understand what are the risks associated with running ooniprobe.") print("Press enter to continue...") raw_input() with open(os.path.join(OONIPROBE_ROOT, 'ui', 'consent-form.md')) as f: consent_form_text = ''.join(f.readlines()) from pydoc import pager pager(consent_form_text) answer = "" while answer.lower() != "yes": print('Type "yes" if you are fully aware of the risks associated with using ooniprobe and you wish to proceed') answer = raw_input("> ") print("") print("Now help us configure some things!") answer = raw_input('Should we upload measurements to a collector? (Y/n) ') should_upload = True if answer.lower().startswith("n"): should_upload = False answer = raw_input('Should we include your IP in measurements? (y/N) ') include_ip = False if answer.lower().startswith("y"): include_ip = True answer = raw_input('Should we include your ASN (your network) in ' 'measurements? (Y/n) ') include_asn = True if answer.lower().startswith("n"): include_asn = False answer = raw_input('Should we include your Country in ' 'measurements? (Y/n) ') include_country = True if answer.lower().startswith("n"): include_country = False answer = raw_input('How would you like reports to be uploaded? (onion, ' 'https, cloudfront) ') preferred_backend = 'onion' if answer.lower().startswith("https"): preferred_backend = 'https' elif answer.lower().startswith("cloudfront"): preferred_backend = 'cloudfront' config.create_config_file(include_ip=include_ip, include_asn=include_asn, include_country=include_country, should_upload=should_upload, preferred_backend=preferred_backend) config.set_initialized() print("ooniprobe is now initialized. You can begin using it!")
def choose_backup(self, sftp): backups = [] for fname in sftp.listdir(): try: if not stat.S_ISDIR(sftp.stat(fname).st_mode): continue except IOError: continue try: manifest = sftp.open(os.path.join(fname, 'manifest.json'), 'r') data = json.loads(manifest.read()) manifest.close() except IOError: continue except ValueError: continue backups.append(self.BackupEntry( fname, data['created-at'], data['build'], data['with-data'], data['compression'] if 'compression' in data else False) ) if len(backups) == 0: print('No backups found in given directory!') return None if len(backups) == 1: backup = backups[0] print('Found single backup in given directory:') self.print_backup_details(backup) if raw_input('Restore whole FreeNAS installation from that backup? (y/n): ').lower() == 'y': return backup else: return None backups.sort(key=lambda x: x.created_at, reverse=True) buffer = ' {:<40}{:<24}{:<12}\n'.format('Backup name', 'Backup timestamp', 'With data?') for idx, i in enumerate(backups, start=1): buffer += '{:>2}. {:<40}{:<24}{:<12}\n'.format(idx, i.name, i.created_at, 'yes' if i.with_data else 'no') pydoc.pager(buffer) choose = raw_input('Type backup no. to restore or leave field blank to use newest one: ') if not choose.strip().isdigit(): idx = 0 else: idx = int(choose.strip()) - 1 backup = backups[idx] print('Backup details:') self.print_backup_details(backup) if raw_input('Restore whole FreeNAS installation from that backup? (y/n): ').lower() == 'y': return backup else: return None
def inspect_source(obj): import inspect import pydoc try: pydoc.pager(''.join(inspect.getsourcelines(obj)[0])) return None except: return help(obj)
def pager(filename,reclen,totrec): if totrec > int(LINES) or reclen > int(COLUMNS): pydoc.pager(open(filename).read()) else: print open(filename).read() if _FD: _FD.write(open(filename).read()) os.unlink(filename)
class LocalSandbox(Sandbox): def __init__(self, sdrroot): super(LocalSandbox, self).__init__() self.__components = {} self.__services = {} self._sdrroot = LocalSdrRoot(sdrroot) self.__container = None def _getComponentHost(self): if self.__container is None: self.__container = self._launchComponentHost() return self.__container def _launchComponentHost(self, instanceName=None): # Directly create the sandbox object instead of going through launch() profile = self._sdrroot.domPath( '/mgr/rh/ComponentHost/ComponentHost.spd.xml') spd, scd, prf = self._sdrroot.readProfile(profile) if instanceName is None: instanceName = self._createInstanceName('ComponentHost', 'resource') refid = str(uuid4()) comp = ComponentHost(self, profile, spd, scd, prf, instanceName, refid, None) # Likewise, since the specific component type is known, create the # launcher directly. The deployment root is overridden to point to the # root of the local filesystem; all component paths provided to the # component host will be absolute. execparams = {'RH::DEPLOYMENT_ROOT': '/'} comp._launcher = LocalComponentLauncher(execparams, {}, True, {}, None, None, None, False) comp._kick() return comp def _getComponentContainer(self, componentType): if componentType == 'service': return self.__services else: return self.__components def _createInstanceName(self, softpkgName, componentType='resource'): # Use one-up counter to make instance name unique. container = self._getComponentContainer(componentType) counter = len(container) + 1 while True: name = '%s_%d' % (softpkgName.replace('.', '_'), counter) if name not in container: return name counter += 1 def _checkInstanceName(self, instanceName, componentType='resource'): # Ensure instance name is unique. container = self._getComponentContainer(componentType) return instanceName not in container def _checkInstanceId(self, refid, componentType): # Ensure refid is unique. container = self._getComponentContainer(componentType) for component in container.values(): if refid == component._refid: return False return True def _createLauncher(self, comptype, execparams, initProps, initialize, configProps, debugger, window, timeout, shared, stdout): if comptype == 'resource': clazz = LocalComponentLauncher elif comptype in ('device', 'loadabledevice', 'executabledevice'): clazz = LocalDeviceLauncher elif comptype == 'service': clazz = LocalServiceLauncher else: return None return clazz(execparams, initProps, initialize, configProps, debugger, window, timeout, shared, stdout) def getComponents(self): return self.__components.values() def getServices(self): return self.__services.values() def _registerComponent(self, component): # Add the component to the sandbox state. self.__components[component._instanceName] = component def _unregisterComponent(self, component): name = component._instanceName if name in self.__components: del self.__components[name] def _addService(self, service): self.__services[service._instanceName] = service def getComponent(self, name): return self.__components.get(name, None) def retrieve(self, name): return self.__components.get(name, None) def getComponentByRefid(self, refid): for component in self.__components.itervalues(): if refid == component._refid: return component return None def getService(self, name): return self.__services.get(name, None) def getSdrRoot(self): return self._sdrroot def setSdrRoot(self, path): # Validate new root. if not os.path.isdir(path): raise RuntimeError, 'invalid SDRROOT, directory does not exist' if not os.path.isdir(os.path.join(path, 'dom')): raise RuntimeError, 'invalid SDRROOT, dom directory does not exist' if not os.path.isdir(os.path.join(path, 'dev')): raise RuntimeError, 'invalid SDRROOT, dev directory does not exist' self._sdrroot = LocalSdrRoot(path) def shutdown(self): ConnectionManager.instance().cleanup() self.stop() # Clean up all components for name, component in self.__components.items(): log.debug("Releasing component '%s'", name) try: component.releaseObject() except: log.debug("Component '%s' raised an exception while exiting", name) self.__components = {} # Terminate all services for name, service in self.__services.items(): log.debug("Terminating service '%s'", name) try: service._terminate() except: log.debug("Service '%s' raised an exception while terminating", name) self.__services = {} # Clean up the component host if self.__container: log.debug('Releasing component host') try: self.__container.releaseObject() except: log.debug( 'Component host raised an exception while terminating') self.__container = None super(LocalSandbox, self).shutdown() def browse(self, searchPath=None, objType=None, withDescription=False): if not searchPath: if objType == None or objType == "all": pathsToSearch = [os.path.join(self.getSdrRoot().getLocation(), 'dom', 'components'), \ os.path.join(self.getSdrRoot().getLocation(), 'dev', 'devices'), \ os.path.join(self.getSdrRoot().getLocation(), 'dev', 'services')] elif objType == "components": pathsToSearch = [ os.path.join(self.getSdrRoot().getLocation(), 'dom', 'components') ] elif objType == "devices": pathsToSearch = [ os.path.join(self.getSdrRoot().getLocation(), 'dev', 'devices') ] elif objType == "services": pathsToSearch = [ os.path.join(self.getSdrRoot().getLocation(), 'dev', 'services') ] else: raise ValueError, "'%s' is not a valid object type" % objType else: pathsToSearch = [searchPath] output_text = "" for path in pathsToSearch: rsrcDict = {} path.rstrip("/") objType = path.split("/")[-1] if objType == "components": pathPrefix = "$SDRROOT/dom/components" elif objType == "devices": pathPrefix = "$SDRROOT/dev/devices" elif objType == "services": pathPrefix = "$SDRROOT/dev/services" else: pathPrefix = path for root, dirs, fnames in os.walk(path): for filename in fnmatch.filter(fnames, "*spd.xml"): filename = os.path.join(root, filename) try: spd = parsers.spd.parse(filename) full_namespace = root[root.find(path) + len(path) + 1:] namespace = full_namespace[:full_namespace. find(spd.get_name())] if namespace == '': namespace = pathPrefix if rsrcDict.has_key(namespace) == False: rsrcDict[namespace] = [] if withDescription == True: new_item = {} new_item['name'] = spd.get_name() if spd.description == None: if spd.get_implementation()[0].description == None or \ spd.get_implementation()[0].description == "The implementation contains descriptive information about the template for a software component.": new_item['description'] = None else: new_item[ 'description'] = spd.get_implementation( )[0].description.encode("utf-8") else: new_item['description'] = spd.description rsrcDict[namespace].append(new_item) else: rsrcDict[namespace].append(spd.get_name()) except Exception, e: print str(e) print 'Could not parse %s', filename for key in sorted(rsrcDict.iterkeys()): if key == pathPrefix: output_text += "************************ " + str( key) + " ***************************\n" else: output_text += "************************ " + str( pathPrefix + "/" + key) + " ***************************\n" value = rsrcDict[key] value.sort() if withDescription == True: for item in value: if item['description']: output_text += str(item['name']) + " - " + str( item['description']) + "\n" else: output_text += str(item['name']) + "\n" output_text += "--------------------------------------\n" else: l = value while len(l) % 4 != 0: l.append(" ") split = len(l) / 4 l1 = l[0:split] l2 = l[split:2 * split] l3 = l[2 * split:3 * split] l4 = l[3 * split:4 * split] for v1, v2, v3, v4 in zip(l1, l2, l3, l4): output_text += '%-30s%-30s%-30s%-30s\n' % (v1, v2, v3, v4) output_text += "\n" pydoc.pager(output_text)
def check_targets(player, targets, from_file, output_format, show_exceeds, show_initials, show_on_market, show_faulty_targets, show_targets): if not show_initials and not show_on_market and not show_targets: click.echo("Not showing anything", err=True) raise click.exceptions.Exit(99) parsed_targets = Targets() parsed_targets.init_macros() for file in from_file: with file: for line in file: line = line.rstrip() try: parsed_targets.load_line(line) except: click.echo(f"error when parsing line `{line}'", err=True) raise for target in targets: for t in target.split(","): parsed_targets.load_line(t) parsed_targets.cleanup_macros() if show_targets: for key in sorted(parsed_targets.keys()): parsed_target = parsed_targets[key] click.echo(f"{key} = {parsed_target}") elif show_faulty_targets: for key in sorted(parsed_targets.keys()): parsed_target = parsed_targets[key] if (None not in (parsed_target.holding_min, parsed_target.holding_max) and parsed_target.holding_min > parsed_target.holding_max) or \ (None not in (parsed_target.tower_min, parsed_target.tower_max) and parsed_target.tower_min > parsed_target.tower_max): click.echo(f"{key} = {parsed_target}") if not any(parsed_targets.values()): print("no target specified at all") raise click.exceptions.Exit(11) in_initial = [] checks, initialized = _check_current(parsed_targets, player, show_exceeds, show_on_market) if checks: for c in batch_character_info(player, checks): cid = c.character_id parsed_target = parsed_targets[cid] stocks_for_me = parsed_target.holding_min + parsed_target.tower_min if isinstance(c, TICO): if show_initials: try: my_initial = get_my_ico(player, c.id) my_investment = my_initial.amount except ServerSentError as e: if e.message != '尚未参加ICO。': raise my_investment = 0 total_investment = c.total total_investors = c.users end_date = c.end.replace(tzinfo=None) colored_end_date = time_color(end_date) lo_lv, up_lv = sorted( (ico_now_level_by_investment(total_investment), ico_now_level_by_investors(total_investors))) if lo_lv < 1: lo_lv = 1 for level in range(lo_lv, up_lv + 2): min_investment = ico_minimal_investment_for_level( level) min_investors = ico_minimal_investors_for_level(level) offerings = ico_offerings_for_level(level) more_investment = max( 0, min_investment - total_investment) more_investors = max(0, min_investors - total_investors) stocks_for_others = offerings - stocks_for_me investment_others_part = total_investment - my_investment + more_investors * 5000 investment_my_part = max( math.ceil(investment_others_part / stocks_for_others * stocks_for_me), ico_minimal_investment_for_level(level) / ico_offerings_for_level(level) * stocks_for_me) more_investment_my_part = investment_my_part - my_investment in_initial.append([ (end_date, level), [ f"#{cid}", c.name, colored_end_date, f"{parsed_target}({stocks_for_me})", level_colors(level), f"{offerings}", f"{my_investment}", f"{total_investment}", f"{total_investors}", fall_to_met(more_investment), fall_to_met(more_investors), f"{investment_my_part}", fall_to_met(more_investment_my_part), ] ]) in_initial.append([(end_date, 100), []]) else: if show_on_market: if parsed_target.check(0, 0) != ('match', 'match'): initialized[cid] = [ f"#{cid}", c.name, str(parsed_target), parsed_target.colored_comparison(0, 0) ] checks.remove(cid) if not initialized and not in_initial: click.echo("Nothing to show", err=True) raise click.exceptions.Exit(99) with io.StringIO() as output: if initialized: print("In market:", file=output) print(tabulate( [initialized[key] for key in sorted(initialized.keys())], ('CID', 'Name', 'Target', 'Actual'), output_format), file=output) if in_initial and initialized: print(file=output) if in_initial: print("ICOs:", file=output) sorted_in_initial = [ x[1] for x in sorted(in_initial, key=lambda x: x[0]) ] while not sorted_in_initial[-1]: del sorted_in_initial[-1] print(tabulate( sorted_in_initial, ('CID', '名字', '结束时间', '目标', 'Lv', '总发行', colored('自投入₵', 'yellow'), colored('总投入₵', 'yellow'), colored('人数', 'yellow'), '还需₵', '还需人数', '目标投入₵', '还需投入₵'), output_format, disable_numparse=True), file=output) pydoc.pager(output.getvalue()) if not in_initial and not initialized: click.echo("Nothing to show", err=True)
def echo_with_pager(output): pydoc.pager(output)
def print_tree(minmax=False): if minmax: s = alice.print_tree_minmax(0, level_down=99) else: s = alice.print_tree_maxmin(0, level_down=99) pydoc.pager('\n'.join(s))
def paginate(text): """Print text in pages of lines.""" pydoc.pager(text)
"--page", dest="page", action="store_true", help="use pager to scroll output", ) args = parser.parse_args() from rich.console import Console with open(args.path, "rt", encoding="utf-8") as markdown_file: markdown = Markdown( markdown_file.read(), justify="full" if args.justify else "left", code_theme=args.code_theme, hyperlinks=args.hyperlinks, inline_code_lexer=args.inline_code_lexer, ) if args.page: import pydoc import io console = Console(file=io.StringIO(), force_terminal=args.force_color, width=args.width) console.print(markdown) pydoc.pager(console.file.getvalue()) # type: ignore else: console = Console(force_terminal=args.force_color, width=args.width) console.print(markdown)
def parse_cl_vars(): """ Construct the argparse object and parse all the command line arguments into a dictionary to return """ description = (""" Utility for displaying/recording FPGA resource utilization for HDL OpenCPI assets.\n Usage Examples: \n show utilization for a single worker (using build results from all platforms): ocpidev utilization worker <worker-name> show utilization for a single worker (using build results from a single platform): ocpidev utilization worker <worker-name> --hdl-platform <hdl-platform> show utilization for a single worker (using build results from a single target): ocpidev utilization worker <worker-name> --hdl-target <hdl-target> show utilization for all workers (in the current project/library/etc): ocpidev utilization workers show utilization for a single worker in a named library: ocpidev utilization worker <worker-name> -l <library> show utilization for a single HDL Platform: ocpidev utilization hdl platform <platform-name> show utilization for all HDL Platforms (in the current project): ocpidev utilization hdl platforms show utilization for a single HDL Assembly: ocpidev utilization hdl assembly <assembly-name> show utilization for all HDL Assemblies (in the current project): ocpidev utilization hdl assemblies record utilization for a single HDL Assembly in LaTeX format: ocpidev utilization hdl assembly <assembly-name> --format=latex show utilization for all supported assets in a project: ocpidev utilization project record utilization for all supported assets in a project in LaTeX format: ocpidev utilization project --format=latex """) parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) parser.print_help = types.MethodType(lambda self, _=None: pydoc.pager("\n" + self.format_help()), parser) parser.print_usage = types.MethodType(lambda self, _=None: pydoc.pager("\n" + self.format_usage()), parser) # This displays the error AND help screen when there is a usage error or no arguments provided parser.error = types.MethodType( lambda self, error_message: ( pydoc.pager(error_message + "\n\n" + self.format_help()), exit(1) ), parser) parser.add_argument("noun", type=str, nargs='?', help="This is either the noun to show or the" + " authoring model to operate on. If choosing an authoring model " + "(hdl), there are secondary nouns that can follow.\nValid " + "nouns are: " + ", ".join(FIRST_NOUNS) + "\nValid secondary " + "nouns for 'hdl' are: " + ", ".join(SUBNOUNS['hdl']), choices=FIRST_NOUNS) parser.add_argument("-v", "--verbose", action="store_true", help="Be verbose with output.") parser.add_argument("--format", dest="output_format", default="table", choices=ReportableAsset.valid_formats, help='Format to output utilization information. "latex" results in ' + 'silent stdout, and all output goes to "utilization.inc" files ' + 'in the directories for the assets acted on.') parser.add_argument("--hdl-platform", metavar="HDL_PLAT", dest="hdl_plats", action="append", help="Specify which HDL platform from the list of buildable " + "platforms to show utilization for.") parser.add_argument("--hdl-target", metavar="HDL_TGT", dest="hdl_tgts", action="append", help="Specify which HDL target from the list of buildable " + "targets to show utilization for. Only valid for workers (not assemblies)") parser.add_argument("-d", dest="cur_dir", default=os.path.curdir, help="Change directory to the specified path before proceeding. " + "Changing directory may have no effect for some commands.") parser.add_argument("-l", "--library", dest="library", default=None, help="Specify the component library in which this operation will be " + "performed.") parser.add_argument("--hdl-library", dest="hdl_library", default=None, help="Specify the hdl library in which this operation will be " + "performed.") parser.add_argument("-P", dest="hdl_plat_dir", default=None, help="Specify the hdl platform subdirectory to operate in.") first_pass_args, remaining_args0 = parser.parse_known_args() # Create a subparser with similar initialization as the top-level parser # This will parse sub-nouns (e.g. after authoring model) and the actual name/asset # to act on subparser = argparse.ArgumentParser(description=description) subparser.print_help = parser.print_help subparser.print_usage = parser.print_usage subparser.error = parser.error # If this is a sub-parser-noun, add an argument to handle the list of subnouns first_noun = first_pass_args.noun if first_noun in SUB_PARSER_NOUNS: subnouns = SUBNOUNS[first_pass_args.noun] subparser.add_argument("subnoun", type=str, help='The sub-noun to operate on (after ' + 'the specified first noun "' + first_noun + '"). Options are ' + '"' + ", ".join(subnouns) + '".', choices=subnouns) # finally, parse the actual name of the asset to act on subparser.add_argument("name", default=".", type=str, action="store", nargs='?', help="This is the name of the asset to show utilization for.") args, remaining_args1 = subparser.parse_known_args(remaining_args0, namespace=first_pass_args) vars_args = vars(args) if "subnoun" in vars_args: vars_args["noun"] = first_pass_args.noun + "-" + vars_args["subnoun"] if vars_args["noun"] is None: vars_args["noun"] = "" if remaining_args1: ocpiutil.logging.error("invalid options were used: " + " ".join(remaining_args1)) sys.exit(1) return vars_args
for mid, sub in subjects: #message id, subjects print(mid, sub) choice = 'sub' #just to initiate while choice: choice = input( "Which article would you like to read now? Provide it's message id, or skip to quit: " ) for mid, sub in subjects: if str(mid) == choice: #diplay article in separate page stuff = n.article( mid ) #by the way we can load the article without checking the mid ''' we could've used the string join method (that would've been faster), but we had to check each part for potential unicode errors, hence the loops. ''' string = '%s\n%s\n' % (stuff[1][0], stuff[1][1]) for line in stuff[1][2]: try: string += line.decode('unicode_escape') + '\n' except UnicodeDecodeError: pass pydoc.pager(string) break print("Thank you.")
def get_pages(user, max_page, quiet, nt): output = [] for page in range(1, max_page + 1): output.append(get_page(user, page, quiet, nt)) pydoc.pager("\n".join(output))
def interactiveMode(file = None): peAnalyzer = None matcher = None vt = None print("Entering interactive mode...") if file is None: print("Please specify file to analyze or type help") else: file = file.replace("~", os.path.expanduser("~")) if not os.path.isfile(file): print(constants.BLUE + "Could not find the specified file %s" % file + constants.RESET) else: peAnalyzer = PeAnalyzer(file) matcher = SignatureMatcher(file) vt = VirusTotalClient(file) def complete(text, state): text = text.replace("~", os.path.expanduser("~")) return (glob.glob(text+'*')+[None])[state] readline.set_completer_delims(' \t\n;') readline.parse_and_bind("tab: complete") readline.set_completer(complete) no_user_in = True while no_user_in: try: user_in = input(">> ") no_user_in = False except EOFError: print("Please confirm with enter, don't use ctrl-D") no_user_in = True while user_in != "q" and user_in != "quit": if user_in.startswith("file ") or user_in.startswith("f "): # File is specified args = user_in.split(" ") if len(args) > 2: print("Please use the command only with one argument: file|f <filename>") else: file = args[1] file = file.replace("~", os.path.expanduser("~")) if not os.path.isfile(file): print(constants.BLUE + "Could not find the specified file %s" % file + constants.RESET) else: peAnalyzer = PeAnalyzer(file) matcher = SignatureMatcher(file) vt = VirusTotalClient(file) elif user_in.startswith("yara ") or user_in.startswith("y "): args = user_in.split(" ") if len(args) > 2: print("Please use the command only with one argument: yara|y <rule-file>") else: yarafile = args[1] yarafile = yarafile.replace("~", os.path.expanduser("~")) checkYara(file, yarafile) elif user_in == "header" or user_in == "h": print("Printing header") peAnalyzer.printHeaderInformation() elif user_in == "sections": print("Printing sections") peAnalyzer.printSections() elif user_in == "imports" or user_in == "i": peAnalyzer.printImportInformation() elif user_in == "exports" or user_in == "e": peAnalyzer.printExports() elif user_in == "resources" or user_in == "r": blacklistedResources = peAnalyzer.blacklistedResources() print("Blacklisted resources found: " + str(blacklistedResources) if len(blacklistedResources) > 0 else "No blacklisted resources found") peAnalyzer.showAllResources() elif user_in == "virusTotal" or user_in == "v": print(vt.printReport()) elif user_in == "tlsCallbacks" or user_in == "t": peAnalyzer.printTLS() elif user_in == "relocations": peAnalyzer.printRelocations() elif user_in == "strings -a": pydoc.pager(peAnalyzer.printAllStrings()) elif user_in == "strings -b": peAnalyzer.getBlacklistedStrings() elif user_in == "dump_resources" or user_in == "d": peAnalyzer.dumpResourcesToFile() elif user_in == "signatures" or user_in == "s": packers = matcher.findPackers() if len(packers): print(constants.RED + "The signature of the following packer was found: " + str(packers) + constants.RESET) else: print(constants.GREEN + "No packer signature was found in the PE file" + constants.RESET) elif user_in == "indicators": collectIndicators(vt, peAnalyzer, matcher) elif user_in == "indicators -a": collectIndicators(vt, peAnalyzer, matcher, all) elif user_in == "urls" or user_in == "u": urls = peAnalyzer.findURLS() if len(urls) > 0: print("The following (maybe non-malicious) URLs have been found:") for url in urls: print("\t" + url) else: print("No URL found in the file's strings") else: if user_in != "help": print("Command '" + user_in + "' is unknown.") print("Known commands:") print("\v/virusTotal - submit the file to VirusTotal and display a summary of the result") print("\tf/file <filename> - specify which file should be analyzed") print("\tq/quit - quit the program") print("\tindicators - show indicators of malware in the PE file") print("\tindicators -a - show indicators of malware in the PE file (show all checks)") print("\th/header - show information extracted from the header") print("\ti/imports - show imports of the PE file") print("\te/exports - show exports of the PE file") print("\tr/resources - show resources of the PE file") print("\td/dump_resources - Dump every resource to a file in folder ./resources/ with resource ID or resource name as filename ") print("\tt/tlsCallbacks - show TLS callback addresses of the PE file") print("\trelocations - show relocation table of the PE file") print("\ts/signatures - find signatures of malicious patterns or packers in the PE file") print("\tsections - show all sections in the file") print("\tstrings -a - show all strings we can find in the PE file") print("\tstrings -b - show blacklisted strings we can find in the PE file") print("\tu/urls - list all URLs found in the PE file") print("\thelp - print this help text") no_user_in = True while no_user_in: try: user_in = input(">> ") no_user_in = False except EOFError: print("Please confirm with enter, don't use ctrl-D") no_user_in = True
def help_commands(args): help_text = 'logonmgr commands:\n' for c in command_help: help_text += '\n' + str(command_help[c]) + '\n' pydoc.pager(help_text)
def __call__(self, request=None): if type(request)==type(h): pydoc.pager(header+request.__doc__+"\n\n\n"+doc_asstring(request)) else: pydoc.Helper.__call__(self,request)
def _print_output(self, output_lines): output = '\n'.join(output_lines) if interactive and getattr(settings, 'pager', False) and len(output) > TERM_SIZE[0]: pager(output) else: print output
import pydoc pydoc.pager("SOME_VERY_LONG_TEXT")
for item in data: if isinstance(item, tuple): mid = item[0].decode('unicode_escape').split()[0] for line in item[1].decode('unicode_escape').split('\r\n'): if line.startswith('Subject:'): sub = line break titles.append(mid + ' - ' + sub) string = '\n'.join(titles[::-1]) string = 'Use message id to access a specific email.\n\n' + string print(string) while 1: mid = input( "Enter the mid of the message you want to read, or skip to quit: ") if not mid: break rsp, data = recvSvr.fetch(mid, '(RFC822)') stuff = data[0][1].decode('unicode_escape').split('\r\n') for i in range(len(stuff)): if stuff[i].startswith('From:'): break headers = stuff[i:i + 3] breaker = stuff.index('') body = stuff[breaker:] #includes the empty string msg_to_be_displayed = '\n'.join(headers + body) pydoc.pager(msg_to_be_displayed) print("Thanks for using the client. Exit done.")
def page(text): pydoc.pager(text)
items = [ StatusItemSchedd( cmd=["condor_status","-schedd"], header=[0], separator="", match="lpcschedd", ), StatusItemSubmitters( cmd=["condor_status","-submitters"], header=[0], separator="", match="lpcschedd", replace=[("group_cmslpc.",""),("@fnal.gov","")], ), StatusItemUserprio( cmd=["condor_userprio","-grouporder"], header=[1,2], separator="", match="group_cmslpc", replace=[("group_cmslpc.",""),("@fnal.gov",""),("User Name","User__Name"),("In Use","In__Use"),("Total Usage","Total__Usage"),("Time Since","Time__Since"),("Last Usage","Last__Usage")], ), ] result = [] for item in items: result.extend(item.call()) result.extend(['']) pydoc.pager('\n'.join(result))