def palette_menu(): """prompts for image converter palette method info""" response = convert_shared_menu() response['sample_factor'] = cutie.get_number( '-= Sample grid major dimension (integer):', min_value=1, max_value=999, allow_float=False) print() response['palette_size'] = cutie.get_number( '-= Number of Palette Colors to Return per Sample (8 max, enter 0 for automatic selection):', min_value=0, max_value=8, allow_float=False) print() response['palette_rescale'] = cutie.prompt_yes_or_no( '-= Rescale Image before palette sample (reduces processing time)?', yes_text="Yes, rescale", no_text="No, use original image dimensions", default_is_yes=True, selected_prefix=CON_P, deselected_prefix=DECON_P) return response
def main(self): """Main function initializing application. """ run('clear') print(self._WELCOME) print(self._GUIDE) if cutie.prompt_yes_or_no("Do you want to {}?".format(self._stat.colored('Yellow', 'continue')), selected_prefix=self._stat.colored('Yellow', ' ▶ '), deselected_prefix=' ▷ '): self._prompt_app_list()
def main(): """Main. """ if cutie.prompt_yes_or_no('Are you brave enough to continue?'): # List of names to select from, including some captions names = [ 'Kings:', 'Arthur, King of the Britons', 'Knights of the Round Table:', 'Sir Lancelot the Brave', 'Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot', 'Sir Bedevere the Wise', 'Sir Galahad the Pure', 'Swedish captions:', 'Møøse' ] # Names which are captions and thus not selectable captions = [0, 2, 7] # Get the name name = names[cutie.select(names, caption_indices=captions, selected_index=8)] print(f'Welcome, {name}') # Get an integer greater or equal to 0 age = cutie.get_number('What is your age?', min_value=0, allow_float=False) nemeses_options = [ 'The French', 'The Police', 'The Knights Who Say Ni', 'Women', 'The Black Knight', 'The Bridge Keeper', 'Especially terrifying:', 'The Rabbit of Caerbannog', ] print('Choose your nemeses') # Choose multiple options from a list nemeses_indices = cutie.select_multiple(nemeses_options, caption_indices=[6]) nemeses = [ nemesis for nemesis_index, nemesis in enumerate(nemeses_options) if nemesis_index in nemeses_indices ] # Get input without showing it being typed quest = cutie.secure_input('What is your quest?') print(f'{name}\'s quest (who is {age}) is {quest}.') if nemeses: if len(nemeses) == 1: print(f'His nemesis is {nemeses[0]}.') else: print(f'His nemeses are {" and ".join(nemeses)}.') else: print('He has no nemesis.')
def convert_shared_menu(): """shared prompts for all image conversion menus""" response = {} response['autoscale'] = cutie.prompt_yes_or_no( 'Autoscale with the map\'s major dimension?', yes_text="Yes, autoscale", no_text="No, I will provide two dimensions", default_is_yes=True, selected_prefix=CON_P, deselected_prefix=DECON_P) print() response.update(map_size_menu(response['autoscale'])) return response
def manage_playlists(user): """ List, add, and remove playlists. Parameters ---------- user : user object Object containing all user data. """ user.printPlaylists() if cutie.prompt_yes_or_no('Would you like to remove a playlist?'): user.removePlaylists() if cutie.prompt_yes_or_no('Would you like to add a playlist?'): user.addPlaylists() user.printPlaylists() playlistStr = user.getPlaylistsAsString() config = ConfigParser() config.read('.config.ini') config['spotify']['playlist_id'] = playlistStr with open('.config.ini', 'w') as f: config.write(f)
def main(): """Main. """ if cutie.prompt_yes_or_no('Are you brave enough to continue?'): names = [ 'Arthur, King of the Britons', 'Sir Lancelot the Brave', 'Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot', 'Sir Bedevere the Wise', 'Sir Galahad the Pure', 'Møøse' ] name = names[cutie.select(names, selected_index=5)] print(f'Welcome, {name}') age = cutie.get_number('What is your age?', min_value=0, allow_float=False) quest = cutie.secure_input('What is your quest?') print(f'{name}\'s quest (who is {age}) is {quest}.')
def main(): logfile_path = "download_pushshift_dumps.log" setup_logger_tqdm( logfile_path) # Logger will write messages using tqdm.write args = parser.parse_args() start_month, start_year = tuple(map(int, args.start_period.split(","))) start_date = datetime.datetime(start_year, start_month, 1) if args.finish_period: finish_month, finish_year = tuple( map(int, args.finish_period.split(","))) end_date = datetime.datetime(finish_year, finish_month, 1) else: end_date = datetime.datetime.now() logger.info("Running Script - PushShift submission dumps to sqlite") logger.info("Downloading and processing dumps in the following range:") logger.info(start_date.strftime("Start Period: %m-%Y")) logger.info(end_date.strftime("End Period: %m-%Y")) dumps_directory = os.path.join(args.output_directory, "dumps") if os.path.isdir(dumps_directory): message = f"Directory '{dumps_directory}' already exists, if there are done files" \ " in the directory then these particular months will be skipped. Delete" \ " these files or the directory to avoid this." logger.info(message) if not cutie.prompt_yes_or_no('Do you want to continue?'): sys.exit(0) os.makedirs(dumps_directory, exist_ok=True) logger.info("Building PushShift submission dump file list...") url_list = build_file_list(start_date, end_date) logger.info("Getting sha256sums") sha256sums = get_sha256sums() # Download and Process logger.info("Commencing download and processing into sqlite.") results = [] for url in url_list: result = reddit_processing(url, sha256sums, dumps_directory, args.keep_dumps) results.append(result)
def set_cfg(self): print("\nselect configurations to write\n") # hide udp settings if connected by udp. otherwise you can break the connection. or should we allow it? #skip_indices = UDP_FIELD_INDICES if self.connection_info["type"] == "UDP" else [] #check if it has odometer port or not, then show/hide in options field_names = CFG_FIELD_NAMES[:] field_codes = CFG_FIELD_CODES[:] if not self.has_odo_port: ind = field_codes.index('rport3') field_names.pop(ind) field_codes.pop(ind) options = field_names + ["cancel"] selected_index = cutie.select(options) if options[selected_index] == "cancel": return args = {} name, code = field_names[selected_index], field_codes[selected_index] if code == "orn": # special case: choose between two common options or choose to enter it value = self.select_orientation() elif code in CFG_VALUE_OPTIONS: print("\nselect " + name) options = CFG_VALUE_OPTIONS[code] value = str(options[cutie.select(options)]).encode() else: print("\nenter value for " + name + " " + CFG_FIELD_EXAMPLES[code]) value = input().encode() args[code] = value #if connected by udp, changing udp settings can disconnect - give warning if code in UDP_FIELDS and self.connection_info["type"] == "UDP": change_anyway = cutie.prompt_yes_or_no("Changing UDP settings while connected by UDP may close the connection. Change anyway?") if not change_anyway: return #if setting odometer unit, first set odometer to on, then set the unit if code == "odo": args2 = {"odo": b'on'} resp = self.retry_command(method=self.board.set_cfg_flash, args=[args2], response_types=[b'CFG', b'ERR']) resp = self.retry_command(method=self.board.set_cfg_flash, args=[args], response_types=[b'CFG', b'ERR']) if not proper_response(resp, b'CFG'): show_and_pause("") # proper_response already shows error, just pause to see it.
def start_session(): global token, responses questions = 0 concept = input("Enter the concept you want to practice: ") # Continue showing more questions as long as user wants while True: next_question(concept) questions += 1 if not cutie.prompt_yes_or_no('Continue?'): break # Show basic statistics print('Questions answered:', questions) print('Correctly answered:', sum(map(lambda obj: obj['response'] == obj['correct'], responses)))
def start_session(): global token, responses questions = 0 # Continue showing more questions as long as user wants while True: next_question() questions += 1 if not cutie.prompt_yes_or_no('Continue?'): break # Show basic statistics print('Questions answered:', questions) print('Correctly answered:', reduce(lambda obj: obj['response'] == obj['correct'], responses)) # Pass data to server r = requests.post('http://localhost:5000/api/session/end', json={ 'token': token, 'responses': responses })
def table_2_work(imagenette_path, step_3_batch_size): logger.info("") logger.info("Table 2 Preparation Commencing") logger.info("=============================") marking_percentages = [1, 2, 5, 10, 20] marking_percentages = [0.1, 0.2, 0.3] train_images_path = os.path.join(imagenette_path, "train") test_images_path = os.path.join(imagenette_path, "val") p_values_file = "experiments/table2_imagenette/p_values.pth" logger.info("") logger.info("Step 1 - Train Marking Network") logger.info("------------------------------") # Reuses marking network from Table 1 if available optimizer = lambda x: torch.optim.AdamW(x) epochs = 60 output_directory = os.path.join("experiments", "table1_imagenette", "marking_network") checkpoint_path = os.path.join(output_directory, "checkpoint.pth") # Used later tensorboard_log_directory = os.path.join("runs", "table1_imagenette", "marking_network") resnet18_imagenette.main(optimizer, train_images_path, test_images_path, output_directory, tensorboard_log_directory, epochs=epochs) marking_network = torchvision.models.resnet18(pretrained=False, num_classes=10) marking_network_checkpoint = torch.load(checkpoint_path) marking_network.load_state_dict( marking_network_checkpoint["model_state_dict"]) logger.info("") logger.info("Step 2 - Image Marking") logger.info("----------------------") # Reuses marked images from Table 1 if available training_set = torchvision.datasets.ImageFolder(train_images_path) for marking_percentage in marking_percentages: experiment_directory = os.path.join("experiments", "table1_imagenette", f"{marking_percentage}_percent") if os.path.exists( os.path.join(experiment_directory, "marking.complete")): message = f"Marking step already completed for {marking_percentage}%. Do you want to restart this part of " \ "the experiment?" if not cutie.prompt_yes_or_no( message, yes_text="Restart", no_text="Skip marking step"): continue tensorboard_log_directory = os.path.join( "runs", "table1_imagenette", f"{marking_percentage}_percent", "marking") shutil.rmtree(experiment_directory, ignore_errors=True) shutil.rmtree(tensorboard_log_directory, ignore_errors=True) do_marking_run_multiclass(marking_percentage, experiment_directory, tensorboard_log_directory, marking_network, training_set) logger.info("") logger.info("Step 3 - Training Target Networks") logger.info("---------------------------------") for marking_percentage in marking_percentages: marked_images_directory = os.path.join( "experiments", "table1_imagenette", f"{marking_percentage}_percent", "marked_images") output_directory = os.path.join("experiments", "table2_imagenette", f"{marking_percentage}_percent", "marked_classifier") tensorboard_log_directory = os.path.join( "runs", "table2_imagenette", f"{marking_percentage}_percent", "target") # Train resnet18 from scratch model = torchvision.models.resnet18(pretrained=False, num_classes=10) optimizer = lambda model: torch.optim.AdamW(model.parameters()) epochs = 60 dataloader_func = partial( train_marked_classifier.get_data_loaders_imagenette, train_images_path, test_images_path, marked_images_directory, batch_size=step_3_batch_size) train_marked_classifier.main(dataloader_func, model, optimizer, output_directory, tensorboard_log_directory, epochs=epochs) logger.info("") logger.info("Step 4 - Calculating p-values") logger.info("-----------------------------") p_values = calculate_p_values(marking_percentages, checkpoint_path, 2, True) torch.save(p_values, p_values_file) p_values = torch.load(p_values_file) logger.info("") logger.info("Step 5 - Generating Table 2") logger.info("---------------------------") generate_table_2(marking_percentages, p_values, checkpoint_path)
def main(imagenet_path, step_3_batch_size, mp_args): logger.info("Table 1 Preparation Commencing") logger.info("=============================") marking_percentages = [1, 2, 5, 10] train_images_path = os.path.join(imagenet_path, "train") test_images_path = os.path.join(imagenet_path, "val") p_values_file = "experiments/table1_imagenet/p_values.pth" logger.info("") logger.info("Step 1 - Download Marking Network") logger.info("------------------------------") marking_network = torchvision.models.resnet18(pretrained=True) logger.info("") logger.info("Step 2 - Image Marking") logger.info("----------------------") # Parallelized separately training_set = torchvision.datasets.ImageFolder(train_images_path) for marking_percentage in marking_percentages: experiment_directory = os.path.join("experiments", "table1_imagenet", f"{marking_percentage}_percent") if os.path.exists( os.path.join(experiment_directory, "marking.complete")): message = f"Marking step already completed for {marking_percentage}%. Do you want to restart this part of " \ "the experiment?" if not cutie.prompt_yes_or_no( message, yes_text="Restart", no_text="Skip marking step"): continue #logger.info("SKIPPING MARKING - Fix this up before sending to DGX") #continue tensorboard_log_directory = os.path.join( "runs", "table1_imagenet", f"{marking_percentage}_percent", "marking") shutil.rmtree(experiment_directory, ignore_errors=True) shutil.rmtree(tensorboard_log_directory, ignore_errors=True) do_marking_run_multiclass(marking_percentage, experiment_directory, tensorboard_log_directory, marking_network, training_set, mp_args) logger.info("") logger.info("Step 3 - Training Target Networks") logger.info("---------------------------------") # Parallelized with DDP for marking_percentage in marking_percentages: marked_images_directory = os.path.join( "experiments", "table1_imagenet", f"{marking_percentage}_percent", "marked_images") output_directory = os.path.join("experiments", "table1_imagenet", f"{marking_percentage}_percent", "marked_classifier") tensorboard_log_directory = os.path.join( "runs", "table1_imagenet", f"{marking_percentage}_percent", "target") # Load a new pretrained resnet18 model = torchvision.models.resnet18(pretrained=True) # Retrain the fully connected layer only for param in model.parameters(): param.requires_grad = False model.fc = nn.Linear(model.fc.in_features, len(training_set.classes)) optimizer = train_marked_classifier_dist.adamw_logistic epochs = 20 dataloader_func = partial( train_marked_classifier_dist.get_data_loaders_imagenet, train_images_path, test_images_path, marked_images_directory, step_3_batch_size, 1) train_args = (mp_args, dataloader_func, model, optimizer, output_directory, tensorboard_log_directory, epochs) mp.spawn(train_marked_classifier_dist.main, nprocs=mp_args.gpus, args=train_args) #(dataloader_func, model, optimizer, output_directory, tensorboard_log_directory, # epochs=epochs) logger.info("") logger.info("Step 4 - Calculating p-values") logger.info("-----------------------------") test_set_loader = train_marked_classifier_dist.get_imagenet_test_loader( test_images_path, NORMALIZE_IMAGENET, batch_size=step_3_batch_size) p_values = calculate_p_values(marking_percentages, step_3_batch_size) torch.save(p_values, p_values_file) p_values = torch.load(p_values_file) logger.info("") logger.info("Step 5 - Generating Table 1") logger.info("---------------------------") # Get Vanilla Accuracy marking_network = torchvision.models.resnet18(pretrained=True) marking_network.to("cuda") vanilla_accuracy = train_marked_classifier_dist.test_model( "cuda", marking_network, test_set_loader) # Finish Up generate_table_1(marking_percentages, p_values, vanilla_accuracy)
def askYN(*args, **kwargs): if Config.skipYNPrompts: print(*args, "Yes") return True else: return cutie.prompt_yes_or_no(*args, **kwargs)
def start_ntrip(self): success = False clear_screen() while not success: print("Select NTRIP:") ntrip_settings = load_ntrip_settings() options = ["Manual", "cancel"] captions = [] if ntrip_settings: #saved_string = "Saved: " + str(ntrip_settings) captions = range(1, 1+len(ntrip_settings)) saved_vals = ["\t"+str(k)+": "+str(ntrip_settings[k]) for k in ntrip_settings] options = ["Saved: "] + saved_vals + options selected = options[cutie.select(options, caption_indices=captions)] if selected == "cancel": return elif selected == "Manual": caster = input("caster:") port = int(cutie.get_number("port:")) mountpoint = input("mountpoint:") username = input("username:"******"password:"******"send gga? (requires gps connection)") #TODO - do regular cutie select so style doesn't change? ntrip_settings = {"caster": caster, "port": port, "mountpoint": mountpoint, "username": username, "password": password, "gga": send_gga} save_ntrip_settings(ntrip_settings) # TODO - save later on after confirming it works? else: #Saved #TODO - if any of these missing, can't load from save - check first and hide saved option? caster = ntrip_settings["caster"] port = ntrip_settings["port"] mountpoint = ntrip_settings["mountpoint"] username = ntrip_settings["username"] password = ntrip_settings["password"] send_gga = ntrip_settings["gga"] port = int(port) mountpoint = mountpoint.encode() #ntrip_target = (caster, port) self.ntrip_ip.value = caster.encode() self.ntrip_port.value = port self.ntrip_gga.value = send_gga # seems to convert True/False to 1/0 # _______NTRIP Connection Configs_______ userAgent = b'NTRIP Anello Client' ntrip_version = 1 ntrip_auth = "Basic" #TODO - add more options for these if ntrip_version == 1 and ntrip_auth == "Basic": auth_str = username + ":" + password auth_64 = base64.b64encode(auth_str.encode("ascii")) self.ntrip_req.value = b'GET /' + mountpoint + b' HTTP/1.0\r\nUser-Agent: ' + userAgent + b'\r\nAuthorization: Basic ' + auth_64 + b'\r\n\r\n' else: # TODO make request structure for NTRIP v2, other auth options. print("not implemented: version = " + str(ntrip_version) + ", auth = " + str(ntrip_auth)) self.ntrip_req.value=b'' # will work as False for conditions #signal io_thread to connect the ntrip. clear_screen() self.ntrip_on.value = 1 self.ntrip_start.value = 1 #wait for success or fail message while self.ntrip_succeed.value == 0: continue # should it time out eventually? success = (self.ntrip_succeed.value == 1) # 0 waiting, 1 succeed, 2 fail self.ntrip_succeed.value = 0 debug_print(success)
def main_menu(img_path=None): print() print("▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬") print(" Image to Shmeppy JSON Converter ") print("▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬▬") # initialize answers answers = {'img_path': img_path, 'map_dim_y': None} # Get the op_type print(" ==MENU OPTIONS==") # List of op_types to select from, including some captions op_types = [ '1. Image Converter', ' -Turns an image into a Shmeppy map (.json) - Fills Only, No Edges', 'Palette Method - sharp tiles, slow', 'Filter Resize Method- faster, blended tiles', ' --------', '2. Map Tools', ' -Work with Shmeppy map (.json) files', 'Merge .json files - Fills, Edges, Tokens', 'Fetch Tokens from .json', ' --------', '3. Other', 'Tokenize (Silly - convert image to field of tokens)', 'Help: Learn More', 'Exit' ] captions = [0, 1, 4, 5, 6, 9, 10] answers['op_type'] = op_types[cutie.select( op_types, caption_indices=captions, caption_prefix="", selected_prefix=SEL_P, deselected_prefix=DESEL_P)].lower() # check for early exits check_early_exit(answers) # check for image path if not img_path: answers['img_path'] = input("-= Image File Path/Name: ") # prompt process type questions print() if answers['op_type'].startswith('fil'): answers.update(filter_menu()) elif answers['op_type'].startswith('tok'): answers.update(filter_menu()) elif answers['op_type'].startswith('pal'): answers.update(palette_menu()) print() answers['debug'] = cutie.prompt_yes_or_no('Debug Mode? ', selected_prefix=CON_P, deselected_prefix=DECON_P) # confirm to proceed print() answers['confirm'] = cutie.prompt_yes_or_no('Proceed with Processing? ', default_is_yes=True, selected_prefix=CON_P, deselected_prefix=DECON_P) if not answers['confirm']: print("Processing terminated, exiting program.") exit(0) return answers
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import cutie import pprint if cutie.prompt_yes_or_no('Are you brave enough to continue?'): # List of names to select from, including some captions names = [ 'Kings:', 'Arthur, King of the Britons', 'Knights of the Round Table:', 'Sir Lancelot the Brave', 'Sir Robin the Not-Quite-So-Brave-as-Sir-Lancelot', 'Sir Bedevere the Wise', 'Sir Galahad the Pure', 'Swedish captions:', 'Møøse' ] # Names which are captions and thus not selectable captions = [0, 2, 7] # Get the name # name = names[cutie.select(names, caption_indices=captions, selected_index=8)] # print(f'Welcome, {name}') # get multiple selected = cutie.select_multiple(names, caption_indices=captions) pprint.pprint(selected) # Get an integer greater or equal to 0 age = cutie.get_number('What is your age?', min_value=0, allow_float=False) # Get input without showing it being typed
def run_shell(debug=False): ## Set debug mode if debug: logging.basicConfig(level=logging.INFO) logging.info("Debug mode on") ## If the config file not found if not os.path.exists( os.path.join(os.path.expanduser('~'), "codecomb_config.ini")): config_shell() ## User may want to re-index if cutie.prompt_yes_or_no( colored('Index Corpus ? (Use up/down keys) ', 'yellow')): #set_format() init_corpus() input(colored("Press Enter to continue...", "yellow")) ## Codecomb REPL clrscr() ## Title logo log('CodeComb', 'green', 'slant', True) log('Welcome to CodeComb!!', 'yellow') log('Start searching below...', 'yellow') user_input = "" while user_input != "exit": user_input = pt( 'CCmb>', history=FileHistory('history.txt'), auto_suggest=AutoSuggestFromHistory(), ) if len(user_input.strip()) > 0 and user_input != "exit": results = get_query_results_annoyindex(user_input, topn=10) if results == "error": log("Keyword(s) not found!!", 'red') else: results = json.loads(results) logging.info(results) questions_list = [ res['name'] + "\t" + res['location'] for res in results ] questions_list.append('back') questions_list = [ colored(question, 'green') for question in questions_list ] ## Search results mode answer = get_list_selection(questions_list) while (answer != (len(questions_list) - 1)): answer_row = results[answer] name, loc = answer_row['name'], answer_row['location'] logging.info(answer) open_editor(name, loc) clrscr() answer = get_list_selection(questions_list, selected=answer) log("Exiting CodeComb ", "yellow")
def process_fresh(): return cutie.prompt_yes_or_no( 'Would you like to only add tracks tagged as [FRESH]?')
epochs = 60 resnet18cifar10.main(experiment_name, optimizer, output_directory_root=output_directory_root, epochs=epochs) # Step 2 - Marking # Reuse from Table 1 if available. for marking_percentage in marking_percentages: experiment_directory = os.path.join("experiments", "table1", f"{marking_percentage}_percent") if os.path.exists( os.path.join(experiment_directory, "marking.complete")): message = f"Marking step already completed for {marking_percentage}%. Do you want to restart this part of " \ "the experiment?" if not cutie.prompt_yes_or_no( message, yes_text="Restart", no_text="Skip marking step"): continue tensorboard_log_directory = os.path.join( "runs", "table1", f"{marking_percentage}_percent", "marking") shutil.rmtree(experiment_directory, ignore_errors=True) shutil.rmtree(tensorboard_log_directory, ignore_errors=True) do_marking_run_multiclass(marking_percentage, experiment_directory, tensorboard_log_directory, augment=False) # Step 3 - Training Target Networks for marking_percentage in marking_percentages: # do_training_run(f"{marking_percentage}_percent", augment=False) do_training_run(f"{marking_percentage}_percent", augment=True)