def process_command(c): if c == 1 or c == 2: ate = c == 1 calories = click.prompt('Number of calories', type=int) action = "eat" if ate else "do" note = click.prompt('What did you {}?'.format(action)) create_entry(calories, note, ate) elif c == 3: weight = click.prompt('Enter your new weight', type=int) create_entry(weight) elif c == 4: #TODO: Lookup items via pager pass elif c == 5: click.echo('(F)ood or (E)xercise? ') c = click.getchar().lower() ate = c == 'F' calories = click.prompt('Number of calories', type=int) note = click.prompt('Name this entry') create_entry(calories, note, ate) add_shortcut(calories, note) elif c == 6: show_status() elif c == 7: show_log() elif c == 8: click.edit(filename=bogg_utils.CONFIG_PATH) elif c == 9: return False return True
def clip(save, edit, view, overwrite): """convert html in the clipboard to markdown""" path = save html = get_clipboard_html() if html is None: click.echo('No html in the clipboard') return if path is None: content = html2md.html_to_markdown(html).strip() click.echo(content) return if not path.endswith('.md'): click.echo('Note must have extension ".md"') return note = util.abs_path(path) if os.path.exists(note) and not overwrite: click.echo('Note already exists at "{}" (specify `--overwrite` to overwrite)'.format(note)) return html = parsers.rewrite_external_images(html, note) content = html2md.html_to_markdown(html).strip() with open(note, 'w') as f: f.write(content) if edit: click.edit(filename=note) if view: compile_note(note, '/tmp', view=True)
def clip(save, edit, browser, overwrite): """convert html in the clipboard to markdown""" html = clipboard.get_clipboard_html() if html is None: click.echo('No html in the clipboard') return if save is None: content = html2md.html_to_markdown(html).strip() click.echo(content) return if not save.endswith('.md'): click.echo('Note must have extension ".md"') return note = Note(save) if os.path.exists(note.path.abs) and not overwrite: click.echo('Note already exists at "{}" (specify `--overwrite` to overwrite)'.format(note.path.abs)) return html = parsers.rewrite_external_images(html, note) content = html2md.html_to_markdown(html).strip() note.write(content) if browser: click.launch('http://localhost:{0}/{1}'.format(conf.PORT, note.path.rel)) if edit: click.edit(filename=note.path.abs)
def edit(ctx, pass_name): """Insert a new password or edit an existing one using the editor specified by either EDITOR or VISUAL or falling back on the platform default if both are not set. """ try: data = ctx.obj.get_key(pass_name) except FileNotFoundError: data = '' except StoreNotInitialisedError: click.echo(MSG_STORE_NOT_INITIALISED_ERROR) return 1 except PermissionError: click.echo(MSG_PERMISSION_ERROR) return 1 if 'EDITOR' in os.environ: data = click.edit(text=data, editor=os.environ['EDITOR']) else: data = click.edit(text=data) if data is None: click.echo('Password unchanged.') return 1 ctx.obj.set_key(pass_name, data, force=True)
def search(query, browser, include_pdf): """search through notes""" results = [] for idx, (note, highlights) in enumerate(nomadic.search(query, delimiters=(Fore.RED, Fore.RESET), include_pdf=include_pdf)): path = note.path.rel results.append(path) # Show all the results. header = ('['+Fore.GREEN+'{0}'+Fore.RESET+'] ').format(idx) echo('\n' + header + Fore.BLUE + path + Fore.RESET) for highlight in highlights: echo(highlight) echo('\n---') if len(results) > 0: # Ask for an id and open the # file in the default editor. id = click.prompt('Select a note', type=int) path = results[id] abs_path = os.path.join(conf.ROOT, path) if os.path.splitext(path)[1] == '.pdf': click.launch(abs_path) else: if not browser: click.edit(filename=abs_path) else: click.launch('http://localhost:{0}/{1}'.format(conf.PORT, path)) else: echo('\nNo results for ' + Fore.RED + query + Fore.RESET + '\n')
def display_info(obj, ids, text, abstract): info('Info ' + ' '.join(ids)) with obj.get('app').test_request_context(): for _id in ids: target = models.Writing.query.get(_id) if not target: error('ID {} not found'.format(_id)) continue for prop in sqlalchemy.inspect(target).attrs: if prop.key in DEFAULTS.get('excluded'): continue elif prop.key.startswith('_'): continue elif prop.key == 'text' or prop.key == 'abstract': continue elif isinstance(prop.value, six.string_types): val = prop.value elif isinstance(prop.value, datetime): val = prop.value.strftime('%m/%d/%Y') else: val = prop.value click.echo(click.style(prop.key, bold=True), nl=False) click.echo(' ', nl=False) click.echo(val) if text: click.edit(text=target.text) click.echo('Please note that no changes were saved. To edit, please use the <update> command.') if abstract: click.edit(text=target.abstract) click.echo('Please note that no changes were saved. To edit, please use the <update> command.')
def search(query): """ Search through notes. """ results = [] for idx, (note, highlights) in enumerate(nomadic.index.search(query)): path = note.path.rel results.append(path) # Show all the results. header = ('['+Fore.GREEN+'{0}'+Fore.RESET+'] ').format(idx) echo('\n' + header + Fore.BLUE + path + Fore.RESET) echo(highlights) echo('\n---') if len(results) > 0: # Ask for an id and open the # file in the default editor. id = click.prompt('Select a note', type=int) path = results[id] if os.path.splitext(path)[1] == '.pdf': click.launch(path) else: click.edit(filename=os.path.join(conf.ROOT, path)) else: echo('\nNo results for ' + Fore.RED + query + Fore.RESET + '\n')
def add(db, fullname, password, random, pattern, interactive, comment, force, copy): if random or pattern: pattern = pattern if pattern else db.config['genpass_pattern'] password = genpass(pattern=pattern) elif not password: password = click.prompt('Password [empty]', hide_input=True, confirmation_prompt=True, show_default=False, default="") found = db.credential(fullname=fullname) if found and not force: message = u"Credential {} already exists. --force to overwrite".format( fullname) raise click.ClickException(click.style(message, fg='yellow')) encrypted = encrypt(password, recipient=db.config['recipient'], homedir=db.config['homedir']) db.add(fullname=fullname, password=encrypted, comment=comment) if interactive: click.edit(filename=db.filename(fullname)) if copy: clipboard.copy(password) click.secho('Password copied to clipboard', fg='yellow') message = u'Added {}{}'.format(fullname, ' [--force]' if force else '') db.repo.commit(message=message)
def config(context, key, value, edit): """ Get and set configuration options. If value is not provided, the content of the key is displayed. Else, the given value is set. You can edit the config file with an editor with the `--edit` option. Example: \b $ watson config backend.token 7e329263e329 $ watson config backend.token 7e329263e329 """ watson = context.obj config = watson.config if edit: click.edit(filename=watson.config_file, extension='.ini') try: watson.config = None watson.config except WatsonCliError: watson.config = config watson.save() raise return if not key: click.echo(context.get_help()) return try: section, option = key.split('.') except ValueError: raise click.ClickException( "The key must have the format 'section.option'" ) if value is None: if not config.has_section(section): raise click.ClickException("No such section {}".format(section)) if not config.has_option(section, option): raise click.ClickException( "No such option {} in {}".format(option, section) ) click.echo(config.get(section, option)) else: if not config.has_section(section): config.add_section(section) config.set(section, option, value) watson.config = config watson.save()
def redis(install,edit_config,docker_operate,rundir,instance): """ install redis by docker mode""" if install: redis_serv.docker_install(rundir,instance) elif docker_operate: redis_serv.docker_op(rundir,instance,docker_operate) elif edit_config: click.edit(filename="{0}/{1}/docker-compose.yml".format(rundir,instance))
def upconfig(): """Copy over default config file, open in editor""" cf = os.path.join(active_sitefolder, 'settings.cfg') if conf.cfg_check(cf): click.edit(filename=cf) else: misc.copy_config()
def edit(): if not configuration_path.is_file(): if not configuration_directory.is_dir(): configuration_directory.mkdir(parents=True) configuration_path.touch() text = yaml.dump(default_configuration, default_flow_style=False) configuration_path.write_text(text) logger.debug(f'Configuration file created in {configuration_path}') click.edit(filename=str(configuration_path))
def radius(install,edit_config, docker_operate,rundir,instance,worker_num,release): """ install toughradius by docker mode""" if install and release == 'commcial': licence = click.prompt('Please enter your commcial licence:', default='') elif install and release in ('dev','stable'): radius_serv.docker_install(rundir,instance,worker_num,release) elif docker_operate: radius_serv.docker_op(rundir,instance,docker_operate) elif edit_config: click.edit(filename="{0}/{1}/docker-compose.yml".format(rundir,instance))
def config(edit): '''Show/modify user configuration''' if edit: try: click.edit(filename=Configuration.find_config_file()) except GTDException: # There is no configuration file click.secho("Could not find config file! Please run onboard if you haven't already", fg='red') else: print(Configuration.from_file())
def wlan(install,scale,edit_config, docker_operate,rundir,instance,worker_num): """ install toughwlan by docker mode""" if install: wlan_serv.docker_install(rundir,instance,worker_num) elif docker_operate: wlan_serv.docker_op(rundir,instance,docker_operate) elif edit_config: click.edit(filename="{0}/{1}/docker-compose.yml".format(rundir,instance)) elif scale: wlan_serv.docker_scale(rundir,instance,worker_num)
def edit(text=''): """Use click to call a text editor for editing a text""" try: edited_text = click.edit(text, editor='nano -t', require_save=False) except click.ClickException: edited_text = click.edit(text, require_save=False) except click.Abort: return text return edited_text
def native_wlan(install,initdb,upgrade,edit_config,release): """ install toughwlan by native mode""" if install and release == 'commcial': licence = click.prompt('Please enter your commcial licence:', default='') elif install and release in ('dev','stable'): wlan_serv.native_install(release) elif initdb: wlan_serv.native_initdb() elif upgrade: wlan_serv.native_upgrade(release) elif edit_config: click.edit(filename="/etc/toughwlan.json")
def cli_get(slug, edit): """Get information about a tag.""" tag = tag_get(slug) click.secho('{name}'.format(**tag), fg='green') for post in tag['posts']: click.echo('{title} - {file_path}'.format(**post)) if edit: for post in tag['posts']: click.edit(filename=post['file_path'])
def edit_ignores(repo): """Open an existing or a new ignores file in an editor. Must be called from inside an existing repository. """ try: repo.open(default_repo_path()) except PathError as e: error(e) abort() click.edit(filename=repo.ignore_path)
def new(notebook, note): """create a new note""" nb = select_notebook(notebook) if nb is None: echo('The notebook `{0}` doesn\'t exist.'.format(notebook)) return # Assume Markdown if no ext specified. _, ext = os.path.splitext(note) if not ext: note += '.md' path = os.path.join(nb.path.abs, note) click.edit(filename=path)
def pop(api, drop): """Edit or delete the latest tweet.""" latest_tweet = get_latest_tweet(api) if not latest_tweet: click.echo('No tweet found.', err=True) return None # Delete the latest tweet if drop: return api.DestroyStatus(status_id=latest_tweet.id) text = click.edit(latest_tweet.text) # Do nothing if as you exited without saving if text is None: return latest_tweet # Delete the latest tweet if content is empty text = text.strip() if not text: return api.DestroyStatus(status_id=latest_tweet.id) # Post new tweet first and then delete the old tweet tweet = api.PostUpdate(text) api.DestroyStatus(status_id=latest_tweet.id) return tweet
def edit_text(edit_type: Optional[str] = 'default', data: Optional[str] = None) -> str: """Edit data with external editor. Args: edit_type: Template to use in editor data: Information to pass to template Returns: User supplied text Raises: EmptyMessageError: No message given EmptyMessageError: Message not edited """ template = get_template('edit', '{}.mkd'.format(edit_type)) comment_char = utils.get_git_config_val('core.commentchar', '#') if not data: data = {} data['comment_char'] = comment_char text = click.edit(template.render(data), require_save=True, extension='.mkd') if text: text = ''.join(filter(lambda s: not s.startswith(comment_char), text.splitlines())).strip() if not text: raise EmptyMessageError('No message given') return text.strip()
def cli(env, title, subject_id, body, hardware_identifier, virtual_identifier): """Create a support ticket.""" ticket_mgr = SoftLayer.TicketManager(env.client) if body is None: body = click.edit('\n\n' + ticket.TEMPLATE_MSG) created_ticket = ticket_mgr.create_ticket( title=title, body=body, subject=subject_id) if hardware_identifier: hardware_mgr = SoftLayer.HardwareManager(env.client) hardware_id = helpers.resolve_id(hardware_mgr.resolve_ids, hardware_identifier, 'hardware') ticket_mgr.attach_hardware(created_ticket['id'], hardware_id) if virtual_identifier: vs_mgr = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vs_mgr.resolve_ids, virtual_identifier, 'VS') ticket_mgr.attach_virtual_server(created_ticket['id'], vs_id) env.fout(ticket.get_ticket_results(ticket_mgr, created_ticket['id']))
def commit(repo, files, message): """Commits outstanding changes. Commit changes to the given files into the repository. You will need to "repo push" to push up your changes to other repositories. If a list of files is omitted, all changes reported by "repo status" will be committed. """ if not message: marker = '# Files to be committed:' hint = ['', '', marker, '#'] for file in files: hint.append('# U %s' % file) message = click.edit('\n'.join(hint)) if message is None: click.echo('Aborted!') return msg = message.split(marker)[0].rstrip() if not msg: click.echo('Aborted! Empty commit message') return else: msg = '\n'.join(message) click.echo('Files to be committed: %s' % (files,)) click.echo('Commit message:\n' + msg)
def execute(self): content = yield self.instance.view_group(*self.args, **self.kwargs) updated = click.edit(json.dumps(content, indent=4)) if updated is not None: updated = json.loads(updated) self.kwargs['updated'] = updated yield super(Edit, self).execute()
def edit_yaml(content='', footer=''): MARKER = '# Everything below is ignored\n\n' message = click.edit(content + '\n\n' + MARKER + footer, extension='.yaml') if message is not None: yaml_content = message.split(MARKER, 1)[0].rstrip('\n') return yaml.safe_load(yaml_content)
def config(ctx, key, value, remove, edit): """Get or set config item.""" conf = ctx.obj["conf"] if not edit and not key: raise click.BadArgumentUsage("You have to specify either a key or use --edit.") if edit: return click.edit(filename=conf.config_file) if remove: try: conf.cfg.remove_option(key[0], key[1]) except Exception as e: logger.debug(e) else: conf.write_config() return if not value: try: click.echo(conf.cfg.get(key[0], key[1])) except Exception as e: logger.debug(e) return if not conf.cfg.has_section(key[0]): conf.cfg.add_section(key[0]) conf.cfg.set(key[0], key[1], value) conf.write_config()
def edit_template(template_resource, **context): try: extension = template_resource.rsplit(".", 1)[1] except IndexError: extension = None rendered_content = render_resource(template_resource, **context) return click.edit(rendered_content, extension=extension)
def update(rtype): """Updates clusters or resources""" if rtype == "cluster": # list of clusters from yaml ddict = dict(enumerate(udata.clusters(), start=1)) elif rtype == "resource": ddict = dict(enumerate(udata.resources(), start=1)) else: print ("Unexpected type")# %s" % name) ddict = None for num, val in ddict.iteritems(): print ("{0}) {1}".format(num, val)) num = click.prompt("Choose to update", type=int) click.edit(filename=udata.get_filepath(name=ddict[num], rtype=rtype))
def open_external_editor(filename=None, sql=None): """Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element. """ message = None filename = filename.strip().split(' ', 1)[0] if filename else None sql = sql or '' MARKER = '# Type your query above this line.\n' # Populate the editor buffer with the partial sql (if available) and a # placeholder comment. query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER), filename=filename, extension='.sql') if filename: try: with open(filename, encoding='utf-8') as f: query = f.read() except IOError: message = 'Error reading file: %s.' % filename if query is not None: query = query.split(MARKER, 1)[0].rstrip('\n') else: # Don't return None for the caller to deal with. # Empty string is ok. query = sql return (query, message)
def bash(name, open, force, description, body, from_alias, flowdeps, source_bash_helpers): """Create a bash custom command""" if name.endswith(".sh"): LOGGER.warning("Removing the extra .sh so that clk won't confuse it" " with a command name.") name = name[:len(".sh")] script_path = Path(config.customcommands.profile.location) / "bin" / name makedirs(script_path.parent) if script_path.exists() and not force: raise click.UsageError(f"Won't overwrite {script_path} unless" " explicitly asked so with --force") options = [] arguments = [] flags = [] remaining = "" args = "" if from_alias: if body: body = body + "\n" body = body + "\n".join( config.main_command.path + " " + " ".join(map(quote, command)) for command in config.settings["alias"][from_alias]["commands"]) flowdeps = list(flowdeps) + get_flow_commands_to_run(from_alias) alias_cmd = get_command(from_alias) if description: description = description + "\n" description = description + f"Converted from the alias {from_alias}" def guess_type(param): if type(param.type) == click.Choice: return json.dumps(list(param.type.choices)) elif param.type == int: return "int" elif param.type == float: return "float" else: return "str" for param in alias_cmd.params: if type(param) == Option: if param.is_flag: flags.append( f"F:{','.join(param.opts)}:{param.help}:{param.default}" ) args += f""" if [ "${{{config.main_command.path.upper()}___{param.name.upper()}}}" == "True" ] then args+=({param.opts[-1]}) fi""" else: options.append( f"O:{','.join(param.opts)}:{guess_type(param)}:{param.help}" ) args += f""" if [ -n "${{{config.main_command.path.upper()}___{param.name.upper()}}}" ] then args+=({param.opts[-1]} "${{{config.main_command.path.upper()}___{param.name.upper()}}}") fi""" elif type(param) == Argument: if param.nargs == -1: remaining = param.help else: arguments.append( f"A:{','.join(param.opts)}:{guess_type(param)}:{param.help}" ) args += f""" args+=("${{{config.main_command.path.upper()}___{param.name.upper()}}}") """ if args: args = """# Build the arguments of the last command of the alias args=()""" + args body += ' "${args[@]}"' if remaining: body += ' "${@}"' if flowdeps: flowdeps_str = "flowdepends: " + ", ".join(flowdeps) + "\n" else: flowdeps_str = "" if options: options_str = "\n".join(options) + "\n" else: options_str = "" if arguments: arguments_str = "\n".join(arguments) + "\n" else: arguments_str = "" if flags: flags_str = "\n".join(flags) + "\n" else: flags_str = "" if remaining: remaining_str = f"N:{remaining}\n" else: remaining_str = "" script_path.write_text(f"""#!/bin/bash -eu source "${{CLK_INSTALL_LOCATION}}/commands/customcommand/_clk.sh" clk_usage () {{ cat<<EOF $0 {description} -- {flowdeps_str}{options_str}{flags_str}{arguments_str}{remaining_str} EOF }} clk_help_handler "$@" {args} {body} """) chmod(script_path, 0o755) if open: click.edit(filename=str(script_path))
def refactor( current_dir: Path, paths_to_refactor: Iterable[Path] ) -> Union[List[Path], None, NoReturn]: """ * refactors all of the provided paths. * checks that the refactored paths are valid. - prompts the user to try again or quit the program if the paths were not refactored appropriately, or the editor was closed before saving. :param current_dir: [Path] to the current directory :param paths_to_refactor: [List(Path)] to refactor :return: list(Path) with refactored names. - or None, if the editor was not closed properly """ # GET: new file names # - new_names_str will be equal to None, if the editor is closed # before saving the file. names_list = [f.name for f in paths_to_refactor] new_names_str = click.edit(f"Directory: {str(current_dir)}\n" + "\n".join(names_list)) # CHECK: if editor was saved if new_names_str is not None: # GET: list of names to perform refactoring on. # - ignores the first line in the file that contains # the directories name. new_names_list = new_names_str.split("\n")[1:] # GET: new paths refactored_paths = [ ptr.parent / nnl for ptr, nnl in zip(paths_to_refactor, new_names_list) ] # CHECK: to see if the new paths are valid valid = check_valid_paths(current_dir, paths_to_refactor, refactored_paths) # Prompt user when: # 1. Valid file names were not provided # 2. The editor was not saved if new_names_str is None or not valid: given = "default" while given not in "rcx": given = click.prompt("[Retry|Continue|Exit]? [r|c|x]") print() if given == "r": return refactor(current_dir, paths_to_refactor) elif given == "x": click.echo( click.style(f'Aborted Refactoring: {current_dir}', fg='red')) sys.exit(0) else: return return refactored_paths
def config(edit, test_se): """tvol's config information. Show information of where various files are, (config.ini, database) and a list of the search engines and the url's they use. """ if edit: click.edit(filename=Config.user_config) return if test_se: search = Search() search.test_each(test_se) return import shutil title = 'green' bold = True ul = True # file locations click.echo() click.secho('File locations:', fg=title, bold=bold, underline=ul) click.echo() click.echo('config file: %s' % Config.user_config) click.echo('Database file: %s' % Config.db_file) click.echo('NZB staging dir: %s' % Config.staging) click.echo('TV dir: %s' % Config.tv_dir) click.echo('Alt client: %s' % Config.client) click.echo('Magnet dir: %s' % Config.magnet_dir) click.echo('Template: %s' % Config.template) click.echo() for script in ['tvol', 'transmission_done', 'deluge_done']: loc = shutil.which(script) script = script + ':' click.echo('%s %s' % (script.ljust(18), loc)) # search engines click.echo() click.secho('Search engines:', fg=title, bold=bold, underline=ul) search = Search() engines_types = [search.torrent_engines, search.newsgroup_engines] for engines in engines_types: for engine in engines: click.echo() click.secho(engine.Provider.name, bold=True, nl=False) click.echo(' (%s)' % engine.Provider.shortname) for url in engine.Provider.provider_urls: click.echo(' %s' % url) # blacklisted search engines if Config.blacklist: click.echo() click.secho('Search engine blacklist:', fg=title, bold=bold, underline=ul) click.echo() for bl in Config.blacklist: click.echo(bl) # ip addresses click.echo() click.secho('Ip address information:', fg=title, bold=bold, underline=ul) click.echo() l = Location() click.echo('Your public ip address:') click.secho(' %s' % l.ip, bold=True) if Config.ip: click.echo() click.echo('Your whitelisted ip addresses:') short = '.'.join(l.ip.split('.')[:Config.parts_to_match]) for ip in Config.ip: color = None if ip.startswith(short): color = 'green' click.secho(' %s' % ip, fg=color)
def build(cls, path=None, rule_type=None, required_only=True, save=True, verbose=False, **kwargs): """Build a rule from data and prompts.""" from .misc import schema_prompt if verbose and path: click.echo(f'[+] Building rule for {path}') kwargs = copy.deepcopy(kwargs) if 'rule' in kwargs and 'metadata' in kwargs: kwargs.update(kwargs.pop('metadata')) kwargs.update(kwargs.pop('rule')) rule_type = rule_type or kwargs.get('type') or \ click.prompt('Rule type ({})'.format(', '.join(CurrentSchema.RULE_TYPES)), type=click.Choice(CurrentSchema.RULE_TYPES)) schema = CurrentSchema.get_schema(role=rule_type) props = schema['properties'] opt_reqs = schema.get('required', []) contents = {} skipped = [] for name, options in props.items(): if name == 'type': contents[name] = rule_type continue # these are set at package release time if name == 'version': continue if required_only and name not in opt_reqs: continue # build this from technique ID if name == 'threat': threat_map = [] while click.confirm('add mitre tactic?'): tactic = schema_prompt('mitre tactic name', type='string', enum=tactics, required=True) technique_ids = schema_prompt( f'technique or sub-technique IDs for {tactic}', type='array', required=False, enum=list(matrix[tactic])) or [] try: threat_map.append( build_threat_map_entry(tactic, *technique_ids)) except KeyError as e: click.secho( f'Unknown ID: {e.args[0]} - entry not saved for: {tactic}', fg='red', err=True) continue except ValueError as e: click.secho(f'{e} - entry not saved for: {tactic}', fg='red', err=True) continue if len(threat_map) > 0: contents[name] = threat_map continue if name == 'threshold': contents[name] = { n: schema_prompt(f'threshold {n}', required=n in options['required'], **opts.copy()) for n, opts in options['properties'].items() } continue if kwargs.get(name): contents[name] = schema_prompt(name, value=kwargs.pop(name)) continue result = schema_prompt(name, required=name in opt_reqs, **options.copy()) if result: if name not in opt_reqs and result == options.get( 'default', ''): skipped.append(name) continue contents[name] = result suggested_path = os.path.join( RULES_DIR, contents['name']) # TODO: UPDATE BASED ON RULE STRUCTURE path = os.path.realpath( path or input('File path for rule [{}]: '.format(suggested_path)) or suggested_path) rule = None try: rule = cls(path, {'rule': contents}) except kql.KqlParseError as e: if e.error_msg == 'Unknown field': warning = ( 'If using a non-ECS field, you must update "ecs{}.non-ecs-schema.json" under `beats` or ' '`legacy-endgame` (Non-ECS fields should be used minimally).' .format(os.path.sep)) click.secho(e.args[0], fg='red', err=True) click.secho(warning, fg='yellow', err=True) click.pause() # if failing due to a query, loop until resolved or terminated while True: try: contents['query'] = click.edit(contents['query'], extension='.eql') rule = cls(path, {'rule': contents}) except kql.KqlParseError as e: click.secho(e.args[0], fg='red', err=True) click.pause() if e.error_msg.startswith("Unknown field"): # get the latest schema for schema errors clear_caches() ecs.get_kql_schema(indexes=contents.get("index", [])) continue break if save: rule.save(verbose=True, as_rule=True) if skipped: print( 'Did not set the following values because they are un-required when set to the default value' ) print(' - {}'.format('\n - '.join(skipped))) # rta_mappings.add_rule_to_mapping_file(rule) # click.echo('Placeholder added to rule-mapping.yml') click.echo( 'Rule will validate against the latest ECS schema available (and beats if necessary)' ) click.echo( ' - to have a rule validate against specific ECS schemas, add them to metadata->ecs_versions' ) click.echo( ' - to have a rule validate against a specific beats schema, add it to metadata->beats_version' ) return rule
def main(csv_name): welcome = """ ______ ___ __ /_ __/___ / | ____ / /__ / / / __ \/ /| | / __ \/ //_/ / / / /_/ / ___ |/ / / / ,< /_/ \____/_/ |_/_/ /_/_/|_| """ click.secho(welcome, fg="blue") titles = list(get_titles()) undone = list() # unfinished notes from previous sessions with open("undone.txt", "r") as f: for word in f: word = word.strip() if not word: continue #assert len(word.split()) == 1 undone.append(word) if undone: click.echo(f"""Restore session with {len(undone)} notes?\n""") prompt = click.getchar() if prompt != 'y': click.echo("Select a book") for i, title in enumerate(titles): click.echo(f"{i} {title}") while True: try: n = int(input("number of title\n")) title = titles[n] break except: pass new = read_notes(notes_file, log_file, title) undone.extend(new) if not undone: click.echo("No new notes!") return books = os.listdir(os.path.join(os.curdir, "books")) click.echo( """These books are available for context searching. If you don't find what you want convert your epub with ebook-convert .epub .txt and put in the ./books/ directory\n Make your choice: """ ) for i, book in enumerate(books): click.echo(f"{i} {book}") #b = input("""These books are available for context searching. If you don't #find what you want convert your epub with ebook-convert .epub #.txt and put in the ./books/ directory\n Make your choice: """) #try: #booktxt = books[int(b)] #except: #booktxt = None family = input("Name the family of the words:\n") or title with open(csv_name, "a+") as csvfile, open("savednotes", "a+") as savednotes: try: notewriter = csv.writer(csvfile, delimiter="|") while undone: note = undone.pop().lower() example = "" pos = "" #if booktxt: args = f"ag --ignore-case -w {note} books/".split() p = subprocess.Popen(args, stdout=subprocess.PIPE) output, error = p.communicate() if error: raise Exception(error) output = output.decode("utf-8") example = re.search(f"[^.:?!]*{note}[^.?!]*\.", output) f_example = "" if example: example = example.group() pos = determine_pos(example, note) # truncate the context to 10 words before/after ex = example.split() try: ind = ex.index(note) example = " ".join( ex[max(0, ind - 10):min(len(ex) - 1, ind + 10)]) except: pass f_example = " ".join( map( lambda x: click.style(x, fg="blue") if x.lower() == note else x, ex)) click.clear() click.echo(f"{click.style(note, fg='blue')}\n {f_example}\n") click.echo( """'c' to change note\n 'd' to delete note\n 's' to save note\n else anything different to continue""" ) prompt = click.getchar() if prompt == "d": # discard continue elif prompt == "c": # change the nte new_note = input("New: ").strip().lower() elif prompt == "s": savednotes.write(note + "\n") continue else: new_note = note args = f"dict -d fd-eng-deu {new_note}".split() p = subprocess.Popen(args, stdout=subprocess.PIPE) output, error = p.communicate() output = output.decode("utf-8") if error: raise Exception(error) if output: phonetic = re.search(r"/(.*)/", output) truncated = re.search(r"/([^/]*)$", output) try: phonetic = phonetic.group(1).strip() output = truncated.group(1).strip() except: pass else: phonetic = "" click.echo( f"{click.style(pos, fg='green')} /{phonetic}/ \n {output} \n" ) separator = "### Text below will be ignored ###" text = f"""{output}\n\n\n {separator}\n {new_note} /{phonetic}/\n{example}""" click.launch(f'https://dict.leo.org/german-english/{new_note}') edited = click.edit(text) edited = output if not edited else edited translation = edited.split(separator)[0].rstrip() npos = input("Adjust pos?\n") if npos: pos = npos #translation = input("Translation: ") #args = f"dict {note}".split() #p = subprocess.Popen(args) #output, error = p.communicate() #if error: #raise Exception(error) #click.echo(output) #ee = input("Description (E):\n") ee = "" if not pos: pos = input("POS: ") if not example: example = input("Example: ") l, m, r = example.partition(note) example = l + "<span class={{POS}}>" + m + "</span>" + r #E, G, Pos, ex , not, note, ee, notg, norev, fam, phon notewriter.writerow([ new_note, translation, pos, example, "", "", ee, "", "", family, phonetic ]) except Exception as e: raise e finally: with open("undone.txt", "w") as f: f.write(note + "\n") for u in undone: f.write(u + "\n") click.echo("Everything done. Import the csv to anki")
def edit(profile_name): """Opens the profiles configuration file in the default editor""" configFile = manager.getConfigPath() + '/' + profile_name + '.yaml' click.edit(filename=configFile)
def launch(self, job_template=None, monitor=False, timeout=None, no_input=True, extra_vars=None, **kwargs): """Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. """ # Get the job template from Ansible Tower. # This is used as the baseline for starting the job. tags = kwargs.get('tags', None) use_job_endpoint = kwargs.pop('use_job_endpoint', False) jt_resource = get_resource('job_template') jt = jt_resource.get(job_template) # Update the job data by adding an automatically-generated job name, # and removing the ID. data = copy(jt) data['job_template'] = data.pop('id') data['name'] = '%s [invoked via. Tower CLI]' % data['name'] if tags: data['job_tags'] = tags # Initialize an extra_vars list that starts with the job template # preferences first, if they exist extra_vars_list = [] if 'extra_vars' in data and len(data['extra_vars']) > 0: # But only do this for versions before 2.3 debug.log('Getting version of Tower.', header='details') r = client.get('/config/') if LooseVersion(r.json()['version']) < LooseVersion('2.4'): extra_vars_list = [data['extra_vars']] # Add the runtime extra_vars to this list if extra_vars: extra_vars_list += list(extra_vars) # accept tuples # If the job template requires prompting for extra variables, # do so (unless --no-input is set). if data.pop('ask_variables_on_launch', False) and not no_input \ and not extra_vars: # If JT extra_vars are JSON, echo them to user as YAML initial = parser.process_extra_vars([data['extra_vars']], force_json=False) initial = '\n'.join(( '# Specify extra variables (if any) here as YAML.', '# Lines beginning with "#" denote comments.', initial, )) extra_vars = click.edit(initial) or '' if extra_vars != initial: extra_vars_list = [extra_vars] # Data is starting out with JT variables, and we only want to # include extra_vars that come from the algorithm here. data.pop('extra_vars', None) # Replace/populate data fields if prompted. modified = set() for resource in PROMPT_LIST: if data.pop('ask_' + resource + '_on_launch', False) \ and not no_input or use_job_endpoint: resource_object = kwargs.get(resource, None) if type(resource_object) == types.Related: resource_class = get_resource(resource) resource_object = resource_class.get(resource).\ pop('id', None) if resource_object is None: if not use_job_endpoint: debug.log( '{0} is asked at launch but not provided'.format( resource), header='warning') elif resource != 'tags': data[resource] = resource_object modified.add(resource) # Dump extra_vars into JSON string for launching job if len(extra_vars_list) > 0: data['extra_vars'] = parser.process_extra_vars(extra_vars_list, force_json=True) # In Tower 2.1 and later, we create the new job with # /job_templates/N/launch/; in Tower 2.0 and before, there is a two # step process of posting to /jobs/ and then /jobs/N/start/. supports_job_template_launch = False if 'launch' in jt['related']: supports_job_template_launch = True # Create the new job in Ansible Tower. start_data = {} if supports_job_template_launch and not use_job_endpoint: endpoint = '/job_templates/%d/launch/' % jt['id'] if 'extra_vars' in data and len(data['extra_vars']) > 0: start_data['extra_vars'] = data['extra_vars'] if tags: start_data['job_tags'] = data['job_tags'] for resource in PROMPT_LIST: if resource in modified: start_data[resource] = data[resource] else: debug.log('Creating the job.', header='details') job = client.post('/jobs/', data=data).json() job_id = job['id'] endpoint = '/jobs/%d/start/' % job_id # There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.', header='details') job_start_info = client.get(endpoint).json() for password in job_start_info.get('passwords_needed_to_start', []): start_data[password] = getpass('Password for %s: ' % password) # Actually start the job. debug.log('Launching the job.', header='details') self._pop_none(kwargs) kwargs.update(start_data) job_started = client.post(endpoint, data=kwargs) # If this used the /job_template/N/launch/ route, get the job # ID from the result. if supports_job_template_launch and not use_job_endpoint: job_id = job_started.json()['job'] # Get some information about the running job to print result = self.status(pk=job_id, detail=True) result['changed'] = True # If we were told to monitor the job once it started, then call # monitor from here. if monitor: return self.monitor(job_id, timeout=timeout) return result
def editor(content='', marker='# Enter a description, markdown is allowed!\n'): message = click.edit(content + '\n\n' + marker) if message is not None: return message.split(marker, 1)[0].rstrip('\n')
def modify_fstab(self, be_mountpoint: str): be_fstab = os.path.join(be_mountpoint, "etc/fstab") temp_fstab = os.path.join(be_mountpoint, "fstab.zedenv.new") try: shutil.copy(be_fstab, temp_fstab) except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to {temp_fstab}\n{e}" }, exit_on_error=True) except IOError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"IOError writing to {temp_fstab}\n{e}" }, exit_on_error=True) replace_pattern = r'(^{esp}/{env}/?)(.*)(\s.*{boot}\s.*$)'.format( esp=self.esp, env=self.env_dir, boot=self.boot_mountpoint) target = re.compile(replace_pattern) """ Find match for: $esp/$env_dir/$boot_environment $boot_location <fstab stuff> eg: /mnt/efi/env/default-3 /boot none rw,defaults,bind 0 0 """ with open(temp_fstab) as in_f: lines = in_f.readlines() match = next(((i, target.search(m)) for i, m in enumerate(lines) if target.search(m)), None) """ Replace BE name with new one """ if match: old_fstab_entry = lines[match[0]] new_fstab_entry = re.sub(replace_pattern, r"\1" + self.new_entry + r"\3", lines[match[0]]) lines[match[0]] = new_fstab_entry with open(temp_fstab, 'w') as out_f: out_f.writelines(lines) else: ZELogger.log({ "level": "INFO", "message": (f"Couldn't find bindmounted directory to replace, your system " "may not be configured for boot environments with systemdboot." ) }) if not self.noop: try: shutil.copy(be_fstab, f"{be_fstab}.bak") except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to {be_fstab}.bak\n{e}" }, exit_on_error=True) except IOError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"IOError writing to {be_fstab}.bak\n{e}" }, exit_on_error=True) if not self.noconfirm: if click.confirm( "Would you like to edit the generated 'fstab'?", default=True): click.edit(filename=temp_fstab) try: shutil.copy(temp_fstab, be_fstab) except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to {be_fstab}\n{e}" }, exit_on_error=True) except IOError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"IOError writing to {be_fstab}\n{e}" }, exit_on_error=True) ZELogger.log({ "level": "INFO", "message": (f"Replaced fstab entry:\n{old_fstab_entry}\nWith new entry:\n" f"{new_fstab_entry}\nIn the boot environment's " f"'/etc/fstab'. A copy of the original " "'/etc/fstab' can be found at '/etc/fstab.bak'.\n") })
def _editor(self): self._parent._loop.screen.clear() new_text = click.edit(self.get_edit_text()) if new_text is not None: self.set_edit_text(new_text.strip())
def edit_bootloader_entry(self, temp_esp: str): temp_entries_dir = os.path.join(temp_esp, "loader/entries") temp_bootloader_file = os.path.join(temp_entries_dir, f"{self.new_entry}.conf") real_entries_dir = os.path.join(self.esp, "loader/entries") real_bootloader_file = os.path.join(real_entries_dir, f"{self.old_entry}.conf") try: os.makedirs(temp_entries_dir) except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to {temp_entries_dir}\n{e}" }, exit_on_error=True) except OSError as os_err: ZELogger.log({ "level": "EXCEPTION", "message": os_err }, exit_on_error=True) config_entries = os.listdir(real_entries_dir) entry_guess_list = [ f"title Boot Environment [{self.boot_environment}]\n", f"linux /env/{self.new_entry}/vmlinuz-linux\n", f"initrd /env/{self.new_entry}/initramfs-linux.img\n", f"options zfs={self.be_root}/{self.boot_environment}\n" ] config_matches = [ en.split(".conf")[0] for en in config_entries if en.split(".conf")[0] == (self.old_entry or self.new_entry) ] old_conf = True if self.old_entry in config_matches else False new_conf = True if self.new_entry in config_matches else False if old_conf and (self.old_boot_environment == self.boot_environment): ZELogger.log({ "level": "INFO", "message": (f"Attempting to activate same boot environment while config " f"'{self.old_entry}.conf' " "already exists. Will not modify old configuration.\n") }) elif new_conf: ZELogger.log({ "level": "INFO", "message": (f"Attempting to activate boot environment while config for " f"'{self.new_entry}.conf' already exists. " "Will not modify old configuration.\n") }) else: if old_conf: ZELogger.log({ "level": "INFO", "message": (f"Using existing entry {self.old_entry} as template " f"taking best guess at creating one at " f"{self.new_entry}.conf\n") }) with open(real_bootloader_file, "r") as old_conf: old_conf_list = old_conf.readlines() new_entry_list = [ l.replace(self.old_boot_environment, self.boot_environment) for l in old_conf_list ] else: entry_guess_full = '\n'.join(entry_guess_list) ZELogger.log({ "level": "INFO", "message": (f"You have no matching bootloader entries in {real_entries_dir}, " f"taking best guess at creating one at {real_bootloader_file}:\n" f"{entry_guess_full}.\n") }) new_entry_list = entry_guess_list if not self.noop: with open(temp_bootloader_file, "w") as boot_conf: boot_conf.writelines(new_entry_list) if not self.noconfirm: if click.confirm( "Would you like to edit the generated bootloader config?", default=True): click.edit(filename=temp_bootloader_file)
def get_description(): MARKER = "# Write your description above" message = click.edit("\n\n" + MARKER) if message is not None: return message.split(MARKER, 1)[0].rstrip("\n")
def launch(self, job_template, tags=None, monitor=False, timeout=None, no_input=True, extra_vars=None): """Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately stats it, and returns back an ID in order for its status to be monitored. """ # Get the job template from Ansible Tower. # This is used as the baseline for starting the job. jt_resource = get_resource('job_template') jt = jt_resource.get(job_template) # Update the job data by adding an automatically-generated job name, # and removing the ID. data = copy(jt) data['job_template'] = data.pop('id') data['name'] = '%s [invoked via. Tower CLI]' % data['name'] if tags: data['job_tags'] = tags # If the job template requires prompting for extra variables, # do so (unless --no-input is set). if extra_vars: if hasattr(extra_vars, 'read'): extra_vars = extra_vars.read() data['extra_vars'] = extra_vars elif data.pop('ask_variables_on_launch', False) and not no_input: initial = data['extra_vars'] initial = '\n'.join(( '# Specify extra variables (if any) here.', '# Lines beginning with "#" are ignored.', initial, )) extra_vars = click.edit(initial) or '' extra_vars = '\n'.join( [i for i in extra_vars.split('\n') if not i.startswith('#')]) data['extra_vars'] = extra_vars # In Tower 2.1 and later, we create the new job with # /job_templates/N/launch/; in Tower 2.0 and before, there is a two # step process of posting to /jobs/ and then /jobs/N/start/. supports_job_template_launch = False if 'launch' in jt['related']: supports_job_template_launch = True # Create the new job in Ansible Tower. start_data = {} if supports_job_template_launch: endpoint = '/job_templates/%d/launch/' % jt['id'] if 'extra_vars' in data: start_data['extra_vars'] = data['extra_vars'] if tags: start_data['job_tags'] = data['job_tags'] else: debug.log('Creating the job.', header='details') job = client.post('/jobs/', data=data).json() job_id = job['id'] endpoint = '/jobs/%d/start/' % job_id # There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.', header='details') job_start_info = client.get(endpoint).json() for password in job_start_info.get('passwords_needed_to_start', []): start_data[password] = getpass('Password for %s: ' % password) # Actually start the job. debug.log('Launching the job.', header='details') result = client.post(endpoint, start_data) # If this used the /job_template/N/launch/ route, get the job # ID from the result. if supports_job_template_launch: job_id = result.json()['job'] # If we were told to monitor the job once it started, then call # monitor from here. if monitor: return self.monitor(job_id, timeout=timeout) # Return the job ID. return { 'changed': True, 'id': job_id, }
def launch(self, job_template=None, monitor=False, wait=False, timeout=None, no_input=True, extra_vars=None, **kwargs): """Launch a new job based on a job template. Creates a new job in Ansible Tower, immediately starts it, and returns back an ID in order for its status to be monitored. =====API DOCS===== Launch a new job based on a job template. :param job_template: Primary key or name of the job template to launch new job. :type job_template: str :param monitor: Flag that if set, immediately calls ``monitor`` on the newly launched job rather than exiting with a success. :type monitor: bool :param wait: Flag that if set, monitor the status of the job, but do not print while job is in progress. :type wait: bool :param timeout: If provided with ``monitor`` flag set, this attempt will time out after the given number of seconds. :type timeout: int :param no_input: Flag that if set, suppress any requests for input. :type no_input: bool :param extra_vars: yaml formatted texts that contains extra variables to pass on. :type extra_vars: array of strings :param diff_mode: Specify diff mode for job template to run. :type diff_mode: bool :param limit: Specify host limit for job template to run. :type limit: str :param tags: Specify tagged actions in the playbook to run. :type tags: str :param skip_tags: Specify tagged actions in the playbook to omit. :type skip_tags: str :param job_type: Specify job type for job template to run. :type job_type: str :param verbosity: Specify verbosity of the playbook run. :type verbosity: int :param inventory: Specify machine credential for job template to run. :type inventory: str :param credential: Specify machine credential for job template to run. :type credential: str :returns: Result of subsequent ``monitor`` call if ``monitor`` flag is on; Result of subsequent ``wait`` call if ``wait`` flag is on; Result of subsequent ``status`` call if none of the two flags are on. :rtype: dict =====API DOCS===== """ # Get the job template from Ansible Tower. # This is used as the baseline for starting the job. tags = kwargs.get('tags', None) jt_resource = get_resource('job_template') jt = jt_resource.get(job_template) # Update the job data by adding an automatically-generated job name, # and removing the ID. data = {} if tags: data['job_tags'] = tags # Initialize an extra_vars list that starts with the job template # preferences first, if they exist extra_vars_list = [] if 'extra_vars' in data and len(data['extra_vars']) > 0: # But only do this for versions before 2.3 debug.log('Getting version of Tower.', header='details') r = client.get('/config/') if LooseVersion(r.json()['version']) < LooseVersion('2.4'): extra_vars_list = [data['extra_vars']] # Add the runtime extra_vars to this list if extra_vars: extra_vars_list += list(extra_vars) # accept tuples # If the job template requires prompting for extra variables, # do so (unless --no-input is set). if jt.get('ask_variables_on_launch', False) and not no_input \ and not extra_vars: # If JT extra_vars are JSON, echo them to user as YAML initial = parser.process_extra_vars([jt['extra_vars']], force_json=False) initial = '\n'.join(( '# Specify extra variables (if any) here as YAML.', '# Lines beginning with "#" denote comments.', initial, )) extra_vars = click.edit(initial) or '' if extra_vars != initial: extra_vars_list = [extra_vars] # Data is starting out with JT variables, and we only want to # include extra_vars that come from the algorithm here. data.pop('extra_vars', None) # Replace/populate data fields if prompted. modified = set() for resource in PROMPT_LIST: if jt.pop('ask_' + resource + '_on_launch', False) and not no_input: resource_object = kwargs.get(resource, None) if type(resource_object) == types.Related: resource_class = get_resource(resource) resource_object = resource_class.get(resource).pop( 'id', None) if resource_object is None: debug.log('{0} is asked at launch but not provided'.format( resource), header='warning') elif resource != 'tags': data[resource] = resource_object modified.add(resource) # Dump extra_vars into JSON string for launching job if len(extra_vars_list) > 0: data['extra_vars'] = parser.process_extra_vars(extra_vars_list, force_json=True) # Create the new job in Ansible Tower. start_data = {} endpoint = '/job_templates/%d/launch/' % jt['id'] if 'extra_vars' in data and len(data['extra_vars']) > 0: start_data['extra_vars'] = data['extra_vars'] if tags: start_data['job_tags'] = data['job_tags'] for resource in PROMPT_LIST: if resource in modified: start_data[resource] = data[resource] # There's a non-trivial chance that we are going to need some # additional information to start the job; in particular, many jobs # rely on passwords entered at run-time. # # If there are any such passwords on this job, ask for them now. debug.log('Asking for information necessary to start the job.', header='details') job_start_info = client.get(endpoint).json() for password in job_start_info.get('passwords_needed_to_start', []): start_data[password] = getpass('Password for %s: ' % password) # Actually start the job. debug.log('Launching the job.', header='details') self._pop_none(kwargs) kwargs.update(start_data) job_started = client.post(endpoint, data=kwargs) # Get the job ID from the result. job_id = job_started.json()['id'] # If returning json indicates any ignored fields, display it in # verbose mode. if job_started.text == '': ignored_fields = {} else: ignored_fields = job_started.json().get('ignored_fields', {}) has_ignored_fields = False for key, value in ignored_fields.items(): if value and value != '{}': if not has_ignored_fields: debug.log('List of ignored fields on the server side:', header='detail') has_ignored_fields = True debug.log('{0}: {1}'.format(key, value)) # Get some information about the running job to print result = self.status(pk=job_id, detail=True) result['changed'] = True # If we were told to monitor the job once it started, then call # monitor from here. if monitor: return self.monitor(job_id, timeout=timeout) elif wait: return self.wait(job_id, timeout=timeout) return result
def main(**kwargs): isFile = not kwargs["file"] is None if kwargs["edit"]: if isFile or kwargs["key"]: pass else: raise click.UsageError( "'--edit' flag is only allowed with '-f' / '--file'." "\nhelp:\t'--edit'\n\t'-f' / '--file' [required]\n\t'-t' / '--text' [optional]" ) if not kwargs["key"]: kwargs["key"] = click.prompt("Key", hide_input=True) if not kwargs["text"] and (not kwargs["edit"] and not isFile): print("[bold blue]Opening editor[/bold blue]") click.pause() kwargs["text"] = click.edit(text="") if kwargs["text"] in (None, ""): print("[bold red]error:[/bold red] Text is empty.") exit(1) kwargs["text"] = kwargs["text"].strip("\n") key_err = "[bold red]error:[/bold red] Invalid Key." if kwargs["edit"]: try: deciphered = Crypto.Aes_128_cbc_pass().decrypt( kwargs["file"].read().strip(), kwargs["key"] ) except Exception: print(key_err) exit(1) kwargs["text"] = click.edit(text=deciphered) if kwargs["text"] is None: exit(1) ciphered = Crypto.Aes_128_cbc_pass().encrypt(kwargs["text"], kwargs["key"]) kwargs["file"].truncate(0) kwargs["file"].seek(0) kwargs["file"].write(ciphered) elif not kwargs["edit"]: if not isFile: if kwargs["encode"]: _ = Crypto.Aes_128_cbc_pass().encrypt(kwargs["text"], kwargs["key"]) print(f"Final result: [bold yellow]{_}[/bold yellow]") else: # kwargs['decode'] try: _ = Crypto.Aes_128_cbc_pass().decrypt(kwargs["text"], kwargs["key"]) except Exception: print(key_err) exit(1) print(f"Final result: [bold green]{_}[/bold green]") elif isFile: if kwargs["encode"]: _ = Crypto.Aes_128_cbc_pass().encrypt( kwargs["file"].read(), kwargs["key"] ) msg = "[bold green]File is now enncrypted.[/bold green]" else: # kwargs['decode'] try: _ = Crypto.Aes_128_cbc_pass().decrypt( kwargs["file"].read().strip(), kwargs["key"] ) except Exception: print(key_err) exit(1) msg = "[bold green]File is now decrypted.[/bold green]" kwargs["file"].truncate(0) kwargs["file"].seek(0) kwargs["file"].write(_) print(msg) else: # this condition never occurs, for now. print_help() exit(1)
def card_repl(self, card: dict) -> bool: '''card_repl displays a command-prompt based UI for modifying a card, with tab-completion and suggestions. It is the logic behind "gtd review" and the "-e" flag in "gtd add" It makes assumptions about what a user might want to do with a card: - Are there attachments? Maybe you want to open them. - Does there appear to be a URL in the title? You might want to attach it. - Are there no tags? Maybe you want to add some. Returns: boolean: move forwards or backwards in the deck of cards ''' on = Colors.yellow if self.config.color else '' off = Colors.reset if self.config.color else '' self.display.show_card(card) if self.config.prompt_for_open_attachments and card['badges'][ 'attachments']: if prompt_for_confirmation(f'{on}Open attachments?{off}', False): with DevNullRedirect(): for url in [ a['url'] for a in card.fetch_attachments() if a['url'] ]: webbrowser.open(url) if re.search(VALID_URL_REGEX, card['name']): if prompt_for_confirmation( f'{on}Link in title detected, want to attach it & rename?{off}', True): card.title_to_link() if self.config.prompt_for_untagged_cards and not card['labels']: print(f'{on}No tags on this card yet, want to add some?{off}') card.add_labels(self._label_choices) commands = { 'archive': 'mark this card as closed', 'attach': 'add, delete, or open attachments', 'change-list': 'move this to a different list on the same board', 'comment': 'add a comment to this card', 'delete': 'permanently delete this card', 'duedate': 'add a due date or change the due date', 'description': 'change the description of this card (desc)', 'help': 'display this help output (h)', 'move': 'move to a different board and list (m)', 'next': 'move to the next card (n)', 'open': 'open all links on this card (o)', 'prev': 'go back to the previous card (p)', 'print': 're-display this card', 'rename': 'change title of this card', 'tag': 'add or remove tags on this card (t)', 'unarchive': 'mark this card as open', 'quit': 'exit program', } command_completer = FuzzyWordCompleter(commands.keys()) while True: user_input = prompt('gtd.py > ', completer=command_completer) if user_input in ['q', 'quit']: raise GTDException(0) elif user_input in ['n', 'next']: return True elif user_input in ['p', 'prev']: return False elif user_input == 'print': card.fetch() self.display.show_card(card) elif user_input in ['o', 'open']: with DevNullRedirect(): for url in [ a['url'] for a in card.fetch_attachments() if a['url'] is not None ]: webbrowser.open(url) elif user_input in ['desc', 'description']: card.change_description() elif user_input == 'delete': card.delete() print('Card deleted') return True elif user_input == 'attach': card.manipulate_attachments() elif user_input == 'archive': card.set_closed(True) print('Card archived') return True elif user_input == 'unarchive': card.set_closed(False) print('Card returned to board') elif user_input in ['t', 'tag']: card.add_labels(self._label_choices) elif user_input == 'rename': # TODO optional form 'rename New name of card' card.rename() elif user_input == 'duedate': card.set_due_date() elif user_input in ['h', 'help']: for cname, cdesc in commands.items(): print('{0:<16}| {1}{2}{3}'.format(cname, on, cdesc, off)) elif user_input == 'change-list': if card.move_to_list(self._list_choices): return True elif user_input in ['m', 'move']: self.move_between_boards(card) elif user_input == 'comment': # TODO Optional form 'comment Contents of a comment' new_comment = click.edit(text='<Comment here>', require_save=True) if new_comment: card.comment(new_comment) else: click.secho('Change the text & save to post the comment', fg='red') else: print( f'{on}{user_input}{off} is not a command, type "{on}help{off}" to view available commands' )
def configure(): if not config: logger.info('generating new config {}'.format(config_filename)) generate_config(config_filename) click.edit(filename=config_filename)
def config(context, key, value, edit): """ Get and set configuration options. If value is not provided, the content of the key is displayed. Else, the given value is set. You can edit the config file with an editor with the `--edit` option. Example: \b $ watson config backend.token 7e329263e329 $ watson config backend.token 7e329263e329 """ watson = context.obj wconfig = watson.config if edit: try: with open(watson.config_file) as fp: rawconfig = fp.read() except (IOError, OSError): rawconfig = '' newconfig = click.edit(text=rawconfig, extension='.ini') if newconfig: safe_save(watson.config_file, newconfig) try: watson.config = None watson.config # triggers reloading config from file except _watson.ConfigurationError as exc: watson.config = wconfig watson.save() raise WatsonCliError(str(exc)) return if not key: click.echo(context.get_help()) return try: section, option = key.split('.') except ValueError: raise click.ClickException( "The key must have the format 'section.option'" ) if value is None: if not wconfig.has_section(section): raise click.ClickException("No such section {}".format(section)) if not wconfig.has_option(section, option): raise click.ClickException( "No such option {} in {}".format(option, section) ) click.echo(wconfig.get(section, option)) else: if not wconfig.has_section(section): wconfig.add_section(section) wconfig.set(section, option, value) watson.config = wconfig watson.save()
def edit_config(app): """ Edit your config file """ click.edit(filename=app._config_file_path)
def mainloop(file, savedump, database, cfgfile, records, orderby, compact, known, verbose, nosense, translate, destlang): """Get user Janpanse input then parse it and record new words into database.""" jmd = Jamdict() knp = KNP() knownlist = {} with open(known, 'r') as reader: lines = reader.readlines() for line in lines: if re.match("^#", line): continue entry = line.split(",") if len(entry) == 2: knownlist[entry[0].strip()] = entry[1].strip() appid = "" appkey = "" if translate == "true": # See https://fanyi-api.baidu.com/ # See https://fanyi-api.baidu.com/api/trans/product/desktop?req=developer # See https://docs.python.org/3/library/configparser.html config = configparser.ConfigParser() config.read(cfgfile) # Set your own appid/appkey. appid = config['api.fanyi.baidu.com']['appid'] appkey = config['api.fanyi.baidu.com']['appkey'] #print("appid=" + appid) #print("appkey=" + appkey) jumandict = sqlite3.connect(database) dictcursor = jumandict.cursor() dictcursor.execute("CREATE TABLE IF NOT EXISTS words (id INTEGER PRIMARY KEY, name TEXT UNIQUE, desc TEXT, count INTEGER)") dumper = open(savedump, 'w') dumper.write("# 日语学习记录\n\n") while True: userinputs = "" if file == "": try: if not click.confirm('想要进入编辑器输入日文句子或段落进行分析吗?'): continue except EOFError: print("\n你选择退出了哦!") break except click.Abort: print("\n你选择退出了哦!") break if records > 0: rows = dictcursor.execute("SELECT id, name, desc, count FROM words ORDER BY {} DESC LIMIT {}".format(orderby, records)).fetchall() words = len(rows) if words > 0: if orderby == "id": print("最近保存过的{}个单词(最近优先排序):".format(words)) else: print("出现频率最高的{}个单词(高频优先排序):".format(words)) count = 0 for row in rows: print('{} [{} ({}次)]:\n'.format(row[0], row[1], row[3])) print(row[2]) userinputs = click.edit() if userinputs is None: print("你啥也没输入啊!") continue else: with open(file, 'r') as reader: lines = reader.readlines() userinputs = "".join(lines) if translate == "true": # For list of language codes, please refer to `https://api.fanyi.baidu.com/doc/21` from_lang = 'jp' to_lang = destlang endpoint = 'http://api.fanyi.baidu.com' path = '/api/trans/vip/translate' url = endpoint + path salt = random.randint(32768, 65536) sign = make_md5(appid + userinputs + str(salt) + appkey) # Build request headers = {'Content-Type': 'application/x-www-form-urlencoded'} payload = {'appid': appid, 'q': userinputs, 'from': from_lang, 'to': to_lang, 'salt': salt, 'sign': sign} # Send request r = requests.post(url, params=payload, headers=headers) result = r.json() # Show response print("=================================") print(userinputs) dumper.write("```\n") dumper.write(userinputs) print("=================================") dumper.write("=================================\n") trans_result = result["trans_result"] for i in range(len(trans_result)): dst = trans_result[i]["dst"] print(dst) dumper.write(dst + "\n") dumper.write("```\n") inputsentences = [x+"。" for x in userinputs.split("。") if x.strip() != ""] for userinput in inputsentences: userinput = userinput.strip() userinput = userinput.encode('utf-8','surrogatepass').decode('utf-8') print("=================================") print(userinput) dumper.write("## "+ userinput + "\n\n") result = knp.parse(userinput.replace("\n", "")) dumper.write("```\n") dumper.write(userinput + "\n") length = 0 for bnst in result.bnst_list(): # 访问每个词组 phrase = "".join(mrph.midasi for mrph in bnst.mrph_list()) phrase = phrase.replace("\␣", " ") print(" " * length + phrase) dumper.write(" " * length + phrase + "\n") length = length + len(phrase) if length > 80: length = 0 dumper.write("```\n") print("=================================") for mrph in result.mrph_list(): # 访问每个词素 found = False for known in knownlist.keys(): if mrph.genkei == known: types = knownlist[known].split("|") for type in types: if mrph.hinsi == type: found = True break if ((found == True) and (verbose == "none")) or (mrph.hinsi == "特殊"): continue message = "ID:{}".format(mrph.mrph_id) if mrph.midasi: message += ", 词汇:{}".format(mrph.midasi) if mrph.yomi: message += ", 读法:{}".format(mrph.yomi) if mrph.genkei: message += ", 原形:{}".format(mrph.genkei) if mrph.hinsi and mrph.hinsi != "*": message += ", 词性:{}".format(mrph.hinsi) if mrph.bunrui and mrph.bunrui != "*": message += ", 词性细分:{}".format(mrph.bunrui) if mrph.katuyou1 and mrph.katuyou1 != "*": message += ", 活用型:{}".format(mrph.katuyou1) if mrph.katuyou2 and mrph.katuyou2 != "*": message += ", 活用形:{}".format(mrph.katuyou2) if mrph.imis and mrph.imis != "NIL": message += ", {}".format(mrph.imis) #语义信息: elif mrph.repname: message += ", 代表符号:{}".format(mrph.repname) print("\t" + message) dumper.write("### " + message + "\n") if nosense == "true" or (found == True and verbose == "half"): continue # use exact matching to find exact meaning dictcheck = jmd.lookup(mrph.genkei) if len(dictcheck.entries) == 0: dictcheck = jmd.lookup(mrph.midasi) if len(dictcheck.entries) == 0: dictcheck = jmd.lookup(mrph.yomi) if len(dictcheck.entries) > 0: desc = "" print("\n") dumper.write("\n") for entry in dictcheck.entries: text = "" if compact == "true": text = entry.text(compact=False, no_id=True) text = re.sub('[`\']', '"', text) print(text) else: tmp = [] if entry.kana_forms: tmp.append(entry.kana_forms[0].text) if entry.kanji_forms: tmp.append("({})".format(entry.kanji_forms[0].text)) header = " ".join(tmp) tmp = [] if entry.senses: for sense, idx in zip(entry.senses, range(len(entry.senses))): tmps = [str(x) for x in sense.gloss] if sense.pos: s = '{gloss} ({pos})'.format(gloss='/'.join(tmps), pos=('(%s)' % '|'.join(sense.pos))) else: s = '/'.join(tmps) s = re.sub('[`\']', '"', s) tmp.append(' {i}. {s}\n'.format(i=idx + 1, s=s)) senses = "".join(tmp) print(header) print(senses) text = "**" + header + "**\n" + senses desc = desc + text + "\n" text = re.sub('[|]', '\|', text) dumper.write("- " + text + "\n") dictcursor.execute('INSERT INTO words (name, desc, count) VALUES ("{}", "{}", "{}") ON CONFLICT (name) DO UPDATE SET count = count + 1' .format(mrph.genkei.replace('"', '""'), desc.replace('"', '""'), 1)) jumandict.commit() dumper.flush() if file != "": break jumandict.close() dumper.close()
def config(): """Edit the configuration file.""" click.edit(filename=str(CONFIG_FILE))
def remove(ctx, client, sources): """Remove files and check repository for potential problems.""" from renku.core.management.git import _expand_directories def fmt_path(path): """Format path as relative to the client path.""" abs_path = os.path.abspath(client.path / path) try: return str(Path(abs_path).relative_to(client.path)) except ValueError: raise errors.ParameterError( f'File {abs_path} is not within the project.' ) files = { fmt_path(source): fmt_path(file_or_dir) for file_or_dir in sources for source in _expand_directories((file_or_dir, )) } # 1. Update dataset metadata files. with progressbar( client.datasets.values(), item_show_func=lambda item: str(item.short_id) if item else '', label='Updating dataset metadata', width=0, ) as bar: for dataset in bar: remove = [] for file_ in dataset.files: key = file_.path filepath = fmt_path(file_.path) if filepath in files: remove.append(key) if remove: for key in remove: dataset.unlink_file(key) client.remove_file(key) dataset.to_yaml() # 2. Manage .gitattributes for external storage. if client.has_external_storage: tracked = tuple( path for path, attr in client.find_attr(*files).items() if attr.get('filter') == 'lfs' ) client.untrack_paths_from_storage(*tracked) existing = client.find_attr(*tracked) if existing: click.echo(WARNING + 'There are custom .gitattributes.\n') if click.confirm( 'Do you want to edit ".gitattributes" now?', default=False ): click.edit(filename=str(client.path / '.gitattributes')) # Finally remove the files. final_sources = list(set(files.values())) if final_sources: run(['git', 'rm', '-rf'] + final_sources, check=True)
def open_config_file(): """Open Thoth's configuration file.""" with workdir(config.CONFIG_NAME): _LOGGER.debug("Opening configuration file %r", config.CONFIG_NAME) click.edit(filename=config.CONFIG_NAME)
def edit(watson, id): """ Edit a frame. You can specify the frame to edit by its position or by its frame id. For example, to edit the second-to-last frame, pass `-2` as the frame index. You can get the id of a frame with the `watson log` command. If no id or index is given, the frame defaults to the current frame or the last recorded frame, if no project is currently running. The `$EDITOR` environment variable is used to detect your editor. """ date_format = 'YYYY-MM-DD' time_format = 'HH:mm:ss' datetime_format = '{} {}'.format(date_format, time_format) local_tz = tz.tzlocal() if id: frame = get_frame_from_argument(watson, id) id = frame.id elif watson.is_started: frame = Frame(watson.current['start'], None, watson.current['project'], None, watson.current['tags']) elif watson.frames: frame = watson.frames[-1] id = frame.id else: raise click.ClickException( style( 'error', "No frames recorded yet. It's time to create your " "first one!")) data = { 'start': frame.start.format(datetime_format), 'project': frame.project, 'tags': frame.tags, } if id: data['stop'] = frame.stop.format(datetime_format) text = json.dumps(data, indent=4, sort_keys=True, ensure_ascii=False) output = click.edit(text, extension='.json') if not output: click.echo("No change made.") return try: data = json.loads(output) project = data['project'] tags = data['tags'] start = arrow.get(data['start'], datetime_format).replace(tzinfo=local_tz).to('utc') stop = arrow.get(data['stop'], datetime_format).replace( tzinfo=local_tz).to('utc') if id else None except (ValueError, RuntimeError) as e: raise click.ClickException("Error saving edited frame: {}".format(e)) except KeyError: raise click.ClickException( "The edited frame must contain the project, start and stop keys.") if id: watson.frames[id] = (project, start, stop, tags) else: watson.current = dict(start=start, project=project, tags=tags) watson.save() click.echo( 'Edited frame for project {project} {tags}, from {start} to {stop} ' '({delta})'.format( delta=format_timedelta(stop - start) if stop else '-', project=style('project', project), tags=style('tags', tags), start=style('time', start.to(local_tz).format(time_format)), stop=style( 'time', stop.to(local_tz).format(time_format) if stop else '-')))
def edit(self, event, always_save=False, external_edit=False): """create an EventEditor and display it :param event: event to edit :type event: khal.event.Event :param always_save: even save the event if it hasn't changed :type always_save: bool """ if event.readonly: self.pane.window.alert( ('alert', f'Calendar `{event.calendar}` is read-only.')) return if isinstance(event.start_local, dt.datetime): original_start = event.start_local.date() else: original_start = event.start_local if isinstance(event.end_local, dt.datetime): original_end = event.end_local.date() else: original_end = event.end_local def update_colors(new_start, new_end, everything=False): """reset colors in the calendar widget and dates in DayWalker between min(new_start, original_start) :type new_start: datetime.date :type new_end: datetime.date :param everything: set to True if event is a recurring one, than everything gets reseted """ # TODO cleverer support for recurring events, where more than start and # end dates are affected (complicated) if isinstance(new_start, dt.datetime): new_start = new_start.date() if isinstance(new_end, dt.datetime): new_end = new_end.date() start = min(original_start, new_start) end = max(original_end, new_end) self.pane.eventscolumn.base_widget.update(start, end, everything) # set original focus date self.pane.calendar.original_widget.set_focus_date(new_start) self.pane.eventscolumn.original_widget.set_focus_date(new_start) if self.editor: self.pane.window.backtrack() assert not self.editor if external_edit: self.pane.window.loop.stop() text = click.edit(event.raw) self.pane.window.loop.start() if text is None: return # KeyErrors can occur here when we destroy DTSTART, # otherwise, even broken .ics files seem to be no problem new_event = Event.fromString( text, locale=self._conf['locale'], href=event.href, calendar=event.calendar, etag=event.etag, ) self.pane.collection.update(new_event) update_colors( new_event.start_local, new_event.end_local, (event.recurring or new_event.recurring) ) else: self.editor = True editor = EventEditor(self.pane, event, update_colors, always_save=always_save) ContainerWidget = linebox[self.pane._conf['view']['frame']] new_pane = urwid.Columns([ ('weight', 2, ContainerWidget(editor)), ('weight', 1, ContainerWidget(self.dlistbox)) ], dividechars=2, focus_column=0) new_pane.title = editor.title def teardown(data): self.editor = False self.pane.window.open(new_pane, callback=teardown)
def new(flat): with open('invoices.yml', 'r') as f: data = yaml.load(f, Loader=yaml.FullLoader) or {} # input if not flat: input = { 'client': '', 'project': '', 'rate': 0, 'services': [{ 'name': '', 'hours': 0 }] } else: input = { 'client': '', 'project': '', 'rate': 0, 'services': [{ 'name': '' }] } raw_input = click.edit(yaml.dump(input, default_flow_style=False)) if raw_input is None: return input = yaml.load(raw_input, Loader=yaml.FullLoader) client = input['client'] project = input['project'] # build 3-letter initial for client parts = re.split('\W+', client) if len(parts) < 2: initials = client[:3].upper() elif len(parts) < 3: initials = ''.join([parts[0][0], parts[1][:2]]).upper() else: initials = ''.join([p[0] for p in parts[:3]]).upper() # compute the project number for this client past_projects = data.get(client, {}) project_num = str(len(past_projects) + 1).zfill(3) # date today = datetime.today() date = today.strftime('%B %d, %Y') # Month 00, 0000 # build id for project id = '{}-{}-{}'.format(today.strftime('%d%m%y'), initials, project_num) project = { 'date': date, 'project': project, 'rate': input['rate'], 'services': input['services'] } if not flat: project['total_hours'] = sum(s['hours'] for s in project['services']) if client not in data: data[client] = {} data[client][id] = project pdfname = render_invoice(id, client, data[client][id]) with open('invoices.yml', 'w') as f: f.write(yaml.dump(data, default_flow_style=False)) print(pdfname)
def edit(customcommand): """Edit the given custom command""" path = Path(customcommand.customcommand_path) click.edit(filename=path)
def _get_commit_message(*keys: str) -> str: MARKER = "# Everything below is ignored\n" message = click.edit("\n\n" + MARKER + "\n".join(keys)) if message is not None: return message.split(MARKER, 1)[0].rstrip("\n")
def edit_bootloader_default(self, temp_esp: str, overwrite: bool): real_loader_dir_path = os.path.join(self.esp, "loader") temp_loader_dir_path = os.path.join(temp_esp, "loader") real_loader_conf_path = os.path.join(real_loader_dir_path, "loader.conf") temp_loader_conf_path = os.path.join(temp_loader_dir_path, "loader.conf") ZELogger.verbose_log( { "level": "INFO", "message": f"Updating {real_loader_conf_path}\n" }, self.verbose) if not os.path.isdir(temp_loader_dir_path): try: os.makedirs(temp_loader_dir_path) except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to {temp_loader_dir_path}\n{e}" }, exit_on_error=True) except OSError as os_err: ZELogger.log({ "level": "EXCEPTION", "message": os_err }, exit_on_error=True) if not os.path.isfile(real_loader_conf_path): ZELogger.log( { "level": "EXCEPTION", "message": f"Missing file: {real_loader_conf_path}\n" }, exit_on_error=True) try: shutil.copy(real_loader_conf_path, temp_loader_conf_path) except PermissionError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"Require Privileges to write to '{temp_loader_conf_path}'\n{e}" }, exit_on_error=True) except IOError as e: ZELogger.log( { "level": "EXCEPTION", "message": f"IOError writing to '{temp_loader_conf_path}'\n{e}" }, exit_on_error=True) with open(temp_loader_conf_path, "r") as loader_conf: conf_list = loader_conf.readlines() line_num = next((l for l, val in enumerate(conf_list) if val.split(' ', 1)[0] == "default"), None) if line_num: conf_list[line_num] = f"default {self.new_entry}\n" if not self.noop: if os.path.isfile(real_loader_conf_path): ZELogger.verbose_log( { "level": "INFO", "message": (f"File {real_loader_conf_path} already exists, backed up to " f"'{real_loader_conf_path}.bak' and replaced.\n") }, self.verbose) if os.path.isfile(f"{real_loader_conf_path}.bak"): try: os.remove(f"{real_loader_conf_path}.bak") except PermissionError: ZELogger.log( { "level": "EXCEPTION", "message": (f"Require Privileges to remove " f"'{real_loader_conf_path}.bak'\n") }, exit_on_error=True) try: shutil.move(real_loader_conf_path, f"{real_loader_conf_path}.bak") except PermissionError: ZELogger.log( { "level": "EXCEPTION", "message": (f"Require Privileges to write to " f"'{real_loader_conf_path}.bak'\n") }, exit_on_error=True) with open(real_loader_conf_path, "w") as loader_conf: loader_conf.writelines(conf_list) if not self.noconfirm: if click.confirm( "Would you like to edit the generated 'loader.conf'?", default=True): click.edit(filename=real_loader_conf_path)
def cli() -> None: # Pick bugs via fzf picked_bugs = fuzzy_pick_bug() if not picked_bugs: return # Parsing the bugs rst_text = None while True: # Create .rst template from the list of picked bugs if rst_text is None: rst_text = bugs_to_rst(picked_bugs) # Let user fill the template rst_text = click.edit(rst_text) # If the user did not provide any input (either '' or None) then exit if not rst_text: return # Parse filled in template into bugs and errors try: items = list(rst_to_bugs(rst_text)) except SystemMessage: char = user_read_character( "Could not parse text.", "[e]dit/[c]ancel: ", ) if char == "e": print() continue elif char == "c": return only_bugs, only_errs = split_to_types( items=items, t1=Bug, t2=ValidationError, ) # Print the parse results print_bugs_and_errors(bugs=only_bugs, errs=only_errs) # If no problems encountered then go further if not only_errs: break # else if no valid bugs could be parsed from the text if not only_bugs: char = user_read_character( "No meaningfull bugs found.", "[e]dit/[c]ancel: ", ) if char == "e": print() continue elif char == "c": return # else put the options to the user char = user_read_character( "Some errors found. Watcha gonna do?", "[e]dit/[w]rite valid/[c]ancel: ", ) if char == "e": print() continue elif char == "w": break elif char == "c": return # Let the user choose the appropriate dates bugs_save_dialog(only_bugs)