def main_menu(): print("\n-----AUDITORY SHIFT CIPHER MAIN MENU-----") print("\n\tOptions:") print("\t1. Create Message") print("\t2. Encoder Settings") print("\t3. Generate Tones") print("\t4. Create Audio File") print("\t5. Decoder Settings") print("\t6. Decode Message") print("\t7. Exit") try: menu_choice = int(input("Please Select an Option: ")) except ValueError: print("\tError: Value Must Be A Number.\n") main_menu() if menu_choice == 1: message.main() elif menu_choice == 2: code = 0 settings.settings_menu(code) elif menu_choice == 3: encoder.main() elif menu_choice == 4: audio_file.main() elif menu_choice == 5: code = 1 settings.settings_menu(code) elif menu_choice == 6: decoder.main() elif menu_choice == 7: print("Goodbye...") sys.exit(0) else: print("\nInvalid Option...") main_menu()
def test_decodeFileTest(self, safe_input): with open('sflist', 'r') as sf2_names: for name in sf2_names: out = StringIO() safe_input.side_effect = [1, 1, 2, 1, 2] with redirect_stdout(out): argv = ['-i', '../Soundfonts/' + name.strip(), '-o' 'Test'] decoder.main(argv) self.assertIn("success", out.getvalue()) copyfile('./Test_samples.h', './AutoTest/Test_samples.h') copyfile('./Test_samples.cpp', './AutoTest/Test_samples.cpp') print("\nNow uploading " + name + " to the Teensy...\n") check_output(arduino_upload_str, shell=True) print("\nCompiled and uploaded " + name + " to Teensy successfully! Next test...\n")
def run_pipeline(): ''' pipeline for classification of EGFR and ALK test use, result, and method - reported will further classify the reports that passed through the rule based filter as "reported" - positive will ascertain the test result (pos or neg) for the reports that pass through the reported SVM as "reported" - method will ascertain the test method (FISH, MutAnalysis, Other) for reports that pass through the reported SVM as "reported" - insufficient will ascertain insuff vs unknown for the reports that were labeled "not reported" by rule based and svm reported classifiers - final class labels will be output to the FINAL_OUTPUT_DIRECTORY ''' # rule based keyword filter with open(pos_inst, 'w') as pos_out: with open(neg_inst, 'w') as neg_out: for instances in open(VECTOR_FILE, 'r').readlines(): vec = instances.strip().split('\t') if 'NO_KEYWORD_IN_TEXT' in vec: neg_out.write(vec[0] + '\tNotReported\n') else: pos_out.write(vec[0] + '\tReported\n') # loop through SVM classifiers; "positive" and "negative" instances # will be written to algorithm specific directories s # o they can be used by subsequent algorithms for algorithm_tuple in ALGORITHM_ORDER: algorithm = algorithm_tuple[0] label = algorithm_tuple[1] model_file = algorithm + os.sep + TRAIN_BATCH + '.pkl' num_features = int(open(algorithm + os.sep + 'num_features.txt', \ 'r').read().strip()) print 'classifying with model ', model_file, '-', num_features, 'features' # turn text feature vector into integer array vector_to_array.main(TEST_NAME, algorithm, TRAIN_BATCH, VECTOR_FILE) with warnings.catch_warnings(): warnings.simplefilter("ignore") fxn() decoder.main(num_features, model_file, algorithm, TRAIN_BATCH, TEST_NAME, label) final_output.output_final_class_labels(TEST_NAME, FINAL_OUTPUT_DIRECTORY)
def translate(input): sf = open("input.en", "w+", encoding='utf-8') sf.write(input) sf.close() FLAGS.data_dir = 't2t/data' FLAGS.problem = 'translate_enzh_wmt32k' FLAGS.model = 'transformer' FLAGS.hparams_set = 'transformer_big' FLAGS.output_dir = 't2t/train' FLAGS.decode_hparams = 'beam_size=1,alpha=0.1' FLAGS.decode_from_file = 'input.en' FLAGS.decode_to_file = 'output.zh' #sys.argv=['test.py', '--data_dir=t2t/data', '--problem=translate_enzh_wmt32k', '--model=transformer', '--hparams_set=transformer_big', '--output_dir=t2t/train', '--decode_hparams=beam_size=1,alpha=0.1', '--decode_from_file=input.en', '--decode_to_file=output.zh'] # print(FLAGS.data_dir) t2t_decoder.main(sys.argv) f = open("output.zh", encoding='utf-8') output = f.read() sf.close() return output
def click_encoder(): global r, time if string == "": res['text'] = "You must drop an image first" else: res['text'] = "Đang nén..." r, time = encoder.main(string[0:]) r_label['text'] = "Hệ số nén: " + str(r) e_time_label['text'] = "Thời gian nén: " + str(time) res['text'] = "Đang giải nén..." time = decoder.main() res['text'] = "Kết quả nén và giải nén: " d_time_label['text'] = "Thời gian giải nén: " + str(time)
import scorer import gflags import logger FLAGS = gflags.FLAGS if __name__ == '__main__': try: argv = FLAGS(sys.argv) # parse flags except gflags.FlagsError as e: print('%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)) sys.exit(1) import __main__ mydir = os.path.dirname(__main__.__file__) if os.path.exists(FLAGS.rule_dump): logger.writeln('unfiltered rules exists: %s' % FLAGS.rule_dump) else: extractor.main() FLAGS.filter_file = FLAGS.input scorer.main() glue_grammar = os.path.join(mydir, 'test-extractor/monotonic_glue.gr') if FLAGS.persent is None: FLAGS.grammars = [FLAGS.filtered, glue_grammar] else: FLAGS.grammars = [FLAGS.persent, glue_grammar] decoder.main()