def get_experiment_choice(self) -> str: """ Given a particular, many experiments would have been run. This method gets the choice of experiment that the user wants to interact with Returns ------- str The choice of experiment chosen by the user """ output_dirpath = pathlib.Path(OUTPUT_DIR) experiment_dirnames = self.get_experiments_folder_names() if len(experiment_dirnames) == 0: self.msg_printer.fail( f"There are no experiments for the model type {self.model_type_answer}" ) exit(1) experiment_choices = [ Choice(dirname) for dirname in experiment_dirnames ] exp_choice = questionary.rawselect("Please select an experiment", choices=experiment_choices, qmark="❓").ask() exp_choice_path = pathlib.Path(OUTPUT_DIR, exp_choice) if not exp_choice_path.is_dir(): with self.msg_printer.loading( f"Downloading experiment {exp_choice} from s3"): self.s3util.download_folder(exp_choice, download_only_best_checkpoint=True) return str(output_dirpath.joinpath(exp_choice_path))
def pick_tasks(items, service_task): # Allow users to select the task lists to move tasks from todo_lists = [] for item in items: todo_lists.append(item['title']) selected_lists = (questionary.checkbox( "Select List", choices=todo_lists, ).ask() or ['My List']) print(f"Moving tasks from {' and '.join(selected_lists)}.") #Add a date range min_date = questionary.text( "Enter minimum date in format (yyyy-mm-dd)").ask() min_date = min_date + 'T00:00:00.00Z' print(min_date) max_date_options = ['Now', 'Custom date'] selected_maxDate = (questionary.rawselect( "Select maximum date of date range", choices=max_date_options, ).ask() or "do nothing") if selected_maxDate == 'Now': max_date = datetime.now().strftime('%Y-%m-%dT%H:%M:%S.00Z') else: max_date = questionary.text( "Enter maximum date in format (yyyy-mm-dd)").ask() max_date = max_date + 'T00:00:00.00Z' print(max_date) #Filter completed tasks only completed_tasks = dict() for item in items: if item['title'] in selected_lists: task = service_task.tasks().list(tasklist=item['id'], showHidden=1, completedMin=min_date, completedMax=max_date).execute() # Filter tasks based on date range for i in task['items']: # We are concerned with date only, not time, hence splicing the date string to only include date if str(i['updated'])[0:10] in completed_tasks: completed_tasks[str(i['updated'])[0:10]].append(i['title']) else: completed_tasks[str(i['updated'])[0:10]] = [i['title']] #Sorting the tasks in ascending order of updated date completed_tasks = collections.OrderedDict(sorted(completed_tasks.items())) #print(completed_tasks) return completed_tasks
def ask_pystyle(**kwargs): # create the question object question = questionary.rawselect('What do you want to do?', choices=[ 'Order a pizza', 'Make a reservation', Separator(), 'Ask opening hours', 'Talk to the receptionist' ], style=custom_style_dope, **kwargs) # prompt the user for an answer return question.ask()
def showMain(self): questionary.text("What's your first name").ask() questionary.password("What's your secret?").ask() questionary.confirm("Are you amazed?").ask() questionary.select( "What do you want to do?", choices=[ "Order a pizza", "Make a reservation", "Ask for opening hours" ], ).ask() questionary.rawselect( "What do you want to do?", choices=[ "Order a pizza", "Make a reservation", "Ask for opening hours" ], ).ask() questionary.checkbox("Select toppings", choices=["foo", "bar", "bazz"]).ask() questionary.path("Path to the projects version file").ask()
def save_qualifying_loans(qualifying_loans): """Saves the qualifying loans to a CSV file. # this is a conditional Args: qualifying_loans (list of lists): The qualifying bank loans. """ # @TODO: Complete the usability dialog for savings the CSV Files. # YOUR CODE HERE! #name ="Michael" # string datatype #number=24 # integer datatype questionary.rawselect( "Do you want to save the qualifying loans to the CSV file?", choices=["Y,N"],).ask() #answer = "" #while answer not in ["y", "n"]: # answer = raw_input("OK to push to continue [Y/N]? ").lower() #save_csv_file = input("Do you want to save the qualifying loans to csv. Enter Y/N") if save_csv_file == "y": csvpath = raw_input("Enter the path to CSV file ? ").lower() save_csv(csvpath,qualiying_loans) else: print("Not saving to csv")
def ask_model_type(self): """ Asks to chose a model amongst different model types that are available in sciwing Returns ------- str Model type chosen by the user """ choices = self.return_model_type_choices() model_type_question = questionary.rawselect( "We have the following trained models. Chose one", qmark="❓", choices=choices, ) return model_type_question.ask()
def ask_deletion() -> str: """ Since this is deletion, we want confirmation, just to be sure whether to keep the deleted folder locally or to remove it Returns ------- str An yes or no answer to the question """ deletion_question = questionary.rawselect( "Do you also want to delete the file locally. Caution! File will be removed locally", qmark="❓", choices=[Choice("yes"), Choice("no")], ) deletion_answer = deletion_question.ask() return deletion_answer
def ask_pystyle(**kwargs): # create the question object question = questionary.rawselect( "What do you want to do?", choices=[ "Order a pizza", "Make a reservation", Separator(), "Ask opening hours", "Talk to the receptionist", ], style=custom_style_dope, **kwargs, ) # prompt the user for an answer return question.ask()
def run_prompt(config: Config, session: Session, old_album: Album, new_album: Album) -> Optional[Album]: """Runs the interactive prompt for the given album changes. Args: config: Moe config. session: Current db session. old_album: Original album to be added. new_album: New album with all metadata changes. Will be compared against ``old_album``. Returns: The album to be added to the library. """ if old_album == new_album: return old_album existing_album = new_album.get_existing(session) old_album.merge(existing_album, overwrite_album_info=False) print(_fmt_album_changes(old_album, new_album)) # noqa: WPS421 prompt_choices: List[PromptChoice] = [] config.plugin_manager.hook.add_prompt_choice(prompt_choices=prompt_choices) prompt_choices.sort(key=operator.attrgetter("shortcut_key")) questionary_choices: List[questionary.Choice] = [] for prompt_choice in prompt_choices: questionary_choices.append( questionary.Choice( title=prompt_choice.title, shortcut_key=prompt_choice.shortcut_key, value=prompt_choice.func, )) prompt_choice_func = questionary.rawselect( "What do you want to do?", choices=questionary_choices).ask() if prompt_choice_func: return prompt_choice_func( config=config, session=session, old_album=old_album, new_album=new_album, ) return None
def ask_generate_report_or_interact(): """ Ask the user to chose between interacting with the model or generate the report for all the experiments Returns ------- str choce of the user as a string """ choices = [ Choice("Interact with model", "interact"), Choice("Generate report (for all experiments)", "gen-report"), ] generate_report_or_interact = questionary.rawselect( "What would you like to do ", qmark="❓", choices=choices) return generate_report_or_interact.ask()
import questionary from examples import custom_style_dope if __name__ == '__main__': questionary.text("What's your first name").ask() questionary.password("What's your secret?").ask() questionary.confirm("Are you amazed?").ask() questionary.select("What do you want to do?", choices=[ 'Order a pizza', 'Make a reservation', 'Ask for opening hours' ]).ask() questionary.rawselect("What do you want to do?", choices=[ 'Order a pizza', 'Make a reservation', 'Ask for opening hours' ]).ask() questionary.checkbox('Select toppings', choices=["foo", "bar", 'bazz'], style=custom_style_dope).ask()
def interact(self): """ Interact with the user to explore different models This method provides various options for exploration of the different models. - ``See-Confusion-Matrix`` shows the confusion matrix on the test dataset. - ``See-Examples-of-Classification`` is to explore correct and mis-classifications. You can provide two class numbers as in, ``2 3`` and it shows examples in the test dataset where text that belong to class ``2`` is classified as class ``3``. - ``See-prf-table`` shows the precision recall and fmeasure per class. - ``See-text`` - Manually enter text and look at the classification results. Returns ------- None """ exp_dir = self.get_experiment_choice() exp_dir_path = pathlib.Path(exp_dir) inference_func = self.model_type2inf_func[self.model_type_answer] inference_client = inference_func(exp_dir) inference_client.run_test() while True: choices = [ Choice("See-Confusion-Matrix"), Choice("See-examples-of-Classifications"), Choice("See-prf-table"), Choice(title="Enter text ", value="enter_text"), Choice("exit"), ] if self.model_type_answer == "lstm-crf-scienceie-tagger": choices.append( Choice("official-results", "semeval_official_results")) interaction_choice = questionary.rawselect( "What would you like to do now", qmark="❓", choices=choices).ask() if interaction_choice == "See-Confusion-Matrix": inference_client.print_confusion_matrix() elif interaction_choice == "See-examples-of-Classifications": misclassification_choice = questionary.text( "Enter Two Classes separated by a space. [Hint: 1 2]").ask( ) two_classes = [ int(class_) for class_ in misclassification_choice.split() ] first_class, second_class = two_classes[0], two_classes[1] sentences = inference_client.get_misclassified_sentences( first_class, second_class) self.msg_printer.divider( f"Sentences with class {first_class} classified as {second_class}" .capitalize()) for sentence in sentences: print(sentence) self.msg_printer.divider("") elif interaction_choice == "See-prf-table": inference_client.report_metrics() elif interaction_choice == "enter_text": text = questionary.text("Enter Text: ").ask() tagged_string = inference_client.on_user_input(text) print(tagged_string) elif interaction_choice == "semeval_official_results": dev_folder = pathlib.Path(SCIENCE_IE_DEV_FOLDER) pred_folder = pathlib.Path( REPORTS_DIR, f"science_ie_{exp_dir_path.stem}_results") if not pred_folder.is_dir(): pred_folder.mkdir() inference_client.generate_predict_folder( dev_folder=dev_folder, pred_folder=pred_folder) calculateMeasures( folder_gold=str(dev_folder), folder_pred=str(pred_folder), remove_anno="rel", ) elif interaction_choice == "exit": self.msg_printer.text("See you again!") exit(0)
import questionary if __name__ == "__main__": action = (questionary.rawselect( "What do you want to do?", choices=[ "Order a pizza", "Make a reservation", "Ask for opening hours" ], ).ask() or "do nothing") print(f"Sorry, I can't {action}. Bye! 🙅")
def interact(self): """ Interact with the user to explore different models This method provides various options for exploration of the different models. - ``See-Confusion-Matrix`` shows the confusion matrix on the test dataset. - ``See-Examples-of-Classification`` is to explore correct and mis-classifications. You can provide two class numbers as in, ``2 3`` and it shows examples in the test dataset where text that belong to class ``2`` is classified as class ``3``. - ``See-prf-table`` shows the precision recall and fmeasure per class. - ``See-text`` - Manually enter text and look at the classification results. """ self.infer_obj.run_test() while True: choices = [ Choice("See-Confusion-Matrix"), Choice("See-examples-of-Classifications"), Choice("See-prf-table"), Choice(title="Enter text ", value="enter_text"), Choice( title="If this is ScienceIE chose this to generate results", value="science-ie-official-results", ), Choice("exit"), ] interaction_choice = rawselect("What would you like to do now", qmark="❓", choices=choices).ask() if interaction_choice == "See-Confusion-Matrix": self.infer_obj.print_confusion_matrix() elif interaction_choice == "See-examples-of-Classifications": misclassification_choice = ask_text( "Enter Two Classes separated by a space. [Hint: 1 2]").ask( ) two_classes = [ int(class_) for class_ in misclassification_choice.split() ] first_class, second_class = two_classes[0], two_classes[1] self.infer_obj.get_misclassified_sentences( first_class, second_class) elif interaction_choice == "See-prf-table": self.infer_obj.report_metrics() elif interaction_choice == "enter_text": text = ask_text("Enter Text: ").ask() tagged_string = self.infer_obj.on_user_input(text) print(tagged_string) elif interaction_choice == "semeval_official_results": dev_folder = pathlib.Path(SCIENCE_IE_DEV_FOLDER) pred_folder = ask_text( message="Enter the directory path for storing results") pred_folder = pathlib.Path(pred_folder) if not pred_folder.is_dir(): pred_folder.mkdir() self.infer_obj.generate_predict_folder(dev_folder=dev_folder, pred_folder=pred_folder) calculateMeasures( folder_gold=str(dev_folder), folder_pred=str(pred_folder), remove_anno="rel", ) elif interaction_choice == "exit": self.msg_printer.text("See you again!") exit(0)