def test_one_link(): source = "Mickey Mouse" destination = "https://en.wikipedia.org/wiki/Albert_Einstein" true_source = "Mickey Mouse" true_destination = "Albert Einstein" source = parse_input(source) destination = parse_input(destination) assert source == true_source assert destination == true_destination
def main(): if len(sys.argv) != 2: print("Usage: {} <path-to-input-file>".format(sys.argv[0])) exit(1) # Parse inputs input_f = sys.argv[1] # name of the current case fname = os.path.basename(input_f) print_sec("Running for case: {}".format(fname)) if not os.path.isfile(input_f): raise FileNotFoundError(input_f) # Parse input - use serial form if it's there prob = parse_input(input_f) prob.kickstart() # Process print_ssec("computing...") out_ids, out_dists = assign(prob, prob.curr_positions.keys(), {ride.id: ride for ride in prob.rides}, 0) print("out_ids: ", out_ids) print("out_dists: ", out_dists) print_("compuations done.") # Export results outfile = "{}_{}".format(int(time.time()), fname) export_results(prob, outfile) print_("all done, exiting.")
def main(): if len(sys.argv) != 2: print("Usage: {} <path-to-input-file>".format(sys.argv[0])) exit(1) # Parse inputs input_f = sys.argv[1] # name of the current case fname = os.path.basename(input_f) print_sec("Running for case: {}".format(fname)) if not os.path.isfile(input_f): raise FileNotFoundError(input_f) # Parse input - use serial form if it's there prob = parse_input(input_f) # Process print_ssec("computing...") # TODO print_("compuations done.") # Export results outfile = "{}_{}".format(int(time.time()), fname) export_results(prob, outfile) print_("all done, exiting.")
def ImpFile(ev): fn = tkFileDialog.Open(root, filetypes = [('*.txt files', '.txt')]).show() if fn == '': return textbox.delete('1.0', 'end') global input_context input_context = parse_input(open(fn, 'rt')) textbox.insert('1.0', 'Context imported successfully.')
def cv_analysis(args): print( "##########################################################################" ) print( "#### CV Curve Visualization and Analysis ####" ) print( "##########################################################################" ) print("\n") print( "#### Reading Input ####" ) inputData, smoothData, xlabel, ylabel_a, ylabel_q = parse_input.parse_input( args) print("\n") print( "#### Peak and integration analysis ####" ) if args['perform_analysis']: if args['perform_smooth']: valid_peak_infos = integrate_peak.integration(args, smoothData) else: valid_peak_infos = integrate_peak.integration(args, inputData) print("\n") print( "#### Plot CV curves ####" ) plot_cv.plot_cv_normalized_by_area(args, inputData, smoothData, xlabel, ylabel_a) if args['perform_analysis']: plot_cv.plot_cv_normalized_by_q(args, inputData, smoothData, xlabel, ylabel_q, valid_peak_infos) print("\n") print( "##########################################################################" ) print( "#### Done! ####" ) print( "##########################################################################" )
def main(args=None): """Main function for Cyclus-Trailmap CLI""" p = make_parser() ns = p.parse_args(args=args) if ns.infile[0] is not None: commodity_dictionary = cd.build_commod_dictionary() facility_dictionary = pi.parse_input(ns.infile[0], commodity_dictionary) else: print('No input file given!')
def main(argv): """ The main method. :param argv: the command line arguments """ # parse the input and check for errors valid_args = parse_input.parse_input(argv) # check if all the files match if valid_args: check_all_files_match(valid_args) # terminate normally sys.exit(0)
def getInputToParse(self): #self.input_string = "" #self.parse_complete = False grammar_list = self.grammar_list[:] if len(self.grammar_list) == 0: self.showWarningMsg() else: if len(self.input_string) == 0: input_string , ok = QtGui.QInputDialog.getText(self.window,"Input","Enter the input string to parse") if ok: self.input_string = str(input_string).strip() parse_info = parse_input(grammar_list,self.input_string) self.showParsing(parse_info)
def main(): if len(sys.argv) != 2: print("Usage: {} <path-to-input-file>".format(sys.argv[0])) exit(1) # Parse inputs input_f = sys.argv[1] if not os.path.isfile(input_f): raise FileNotFoundError() problem = parse_input(input_f) # Process # Export results outfile = "{}_{}".format(int(time.time()), input_f) export_results(problem, outfile)
def get_args(): data = [] flags = {'c': False, 'i': False, 'b': False, 's': False} s = None for i in range(1, len(sys.argv)): arg = sys.argv[i].strip() if arg in ['-b', '-c', '-i', '-s']: flags[arg.replace('-', '')] = True elif not s: s = arg else: raise if not s: raise data = parse_input(s) return data, flags
def prepare_input(config_file): # get simulation inputs from configuration file lambd, mu, rho, min_x, max_x, min_y, max_y, min_z, max_z, N_x, N_y, N_z, t_0, t_f, N_t, outfile = parse_input(config_file) # Grid spacing in each dimension # We divide by N_i - 1 so that we include L_x in our boundary dx = (max_x - min_x) / N_x dy = (max_y - min_y) / N_y dz = (max_z - min_z) / N_z # Timestep dt = (t_f - t_0) / N_t # Grid size in each direction L_x = np.float64(max_x) - np.float64(min_x) L_y = np.float64(max_y) - np.float64(min_y) L_z = np.float64(max_z) - np.float64(min_z) return N_x, N_y, N_z, N_t, L_x, L_y, L_z, dx, dy, dz, dt, mu, rho, lambd, t_0, t_f, outfile
if next_action == 'acc': accpet = True analyse_result_array[-1].append('accpet') return next_action, analyse_result_array if next_action == 'error': return next_action, 0 if next_action.startswith('S'): move_in(status_deque, symbol_deque, input_deque, next_action) analyse_result_array[-1].append('move_in') elif next_action.startswith('r'): destination = do_reduce(status_deque, symbol_deque, input_deque, next_action, SLR1_tabel, grammer_productions_list) analyse_result_array[-1].append( 'reduce:' + grammer_productions_list[int(next_action.strip('r'))]) if destination == 'error': return 'error', 0 return '?' if __name__ == '__main__': inputed_parse = parse_input() SLR1_tabel = gen_table('grammer.txt') items_list, grammer_productions_list = getItemCollection("grammer.txt") result, analyse_result_array = do_analyse(SLR1_tabel, inputed_parse, grammer_productions_list) print analyse_result_array print result
def __init__(self, input_file): self.coref_np_list, self.inverse_coref_dict, self.id_to_np, self.np_chunks, self.np_chunks_words, self.text, self.max_coref, self.apostophed_nps, self.sentences = parse_input(input_file) self.orig_inverse_coref_dict = copy.copy(self.inverse_coref_dict) self.ref_dict = {} self.method_list = [ self.handle_alises, #self.appositives, self.handle_dates, self.match_head_nouns, self.handle_hyphens, #self.special_case_appositives, self.anonymous_shuffling, self.np_preceded_by_article, self.synonym_match, self.plain_string_match_on_NPs, self.plain_string_match, self.get_title_based_matches, ] self.articles = ["A", "An", "The"] self.remove_articles_from_phrase_pattern = re.compile('(a|an|the)\s+(?P<phrase>.*)$') self.suffices = ["Corp.", "Co."] self.titles = ["Mr." , "Mrs." , "Ms." , "Dr."] self.out_filename = os.path.splitext(os.path.basename(input_file))[0] + os.path.extsep + "response" self.tag_format = '<COREF ID="(?P<coref_id>%s)">(?P<np>%s)</COREF>' self.new_coref_ids = set() self.additional_synonyms = {"aircraft" : "plane", "plane" : "aircraft"} self.number_words = set(["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]) self.date_format = re.compile("(?P<date>[0-9][0-9]) (?P<month>jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) (?P<year>[0-9][0-9][0-9][0-9])") self.date_format_1 = re.compile("(?P<month>[0-9][0-9])-(?P<date>[0-9][0-9])-(?P<year>[0-9][0-9])") self.days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"] self.month_map = {1 : ("Jan", "January"), 2 : ("Feb", "February"), 3 : ("Mar", "March"), 4 : ("Apr", "April"), 5 : ("May", "May"), 6 : ("Jun", "June"), 7 : ("Jul", "July"), 8 : ("Aug", "August"), 9 : ("Sep", "September"), 10 : ("Oct", "October"), 11: ("Nov", "November"), 12 : ("Dec", "December")}
def __init__(self, input_file): self.coref_np_list, self.inverse_coref_dict, self.id_to_np, self.np_chunks, self.np_chunks_words, self.text, self.max_coref, self.apostophed_nps, self.sentences = parse_input( input_file) self.orig_inverse_coref_dict = copy.copy(self.inverse_coref_dict) self.ref_dict = {} self.method_list = [ self.handle_alises, #self.appositives, self.handle_dates, self.match_head_nouns, self.handle_hyphens, #self.special_case_appositives, self.anonymous_shuffling, self.np_preceded_by_article, self.synonym_match, self.plain_string_match_on_NPs, self.plain_string_match, self.get_title_based_matches, ] self.articles = ["A", "An", "The"] self.remove_articles_from_phrase_pattern = re.compile( '(a|an|the)\s+(?P<phrase>.*)$') self.suffices = ["Corp.", "Co."] self.titles = ["Mr.", "Mrs.", "Ms.", "Dr."] self.out_filename = os.path.splitext( os.path.basename(input_file))[0] + os.path.extsep + "response" self.tag_format = '<COREF ID="(?P<coref_id>%s)">(?P<np>%s)</COREF>' self.new_coref_ids = set() self.additional_synonyms = {"aircraft": "plane", "plane": "aircraft"} self.number_words = set([ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten" ]) self.date_format = re.compile( "(?P<date>[0-9][0-9]) (?P<month>jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec) (?P<year>[0-9][0-9][0-9][0-9])" ) self.date_format_1 = re.compile( "(?P<month>[0-9][0-9])-(?P<date>[0-9][0-9])-(?P<year>[0-9][0-9])") self.days = [ "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday" ] self.month_map = { 1: ("Jan", "January"), 2: ("Feb", "February"), 3: ("Mar", "March"), 4: ("Apr", "April"), 5: ("May", "May"), 6: ("Jun", "June"), 7: ("Jul", "July"), 8: ("Aug", "August"), 9: ("Sep", "September"), 10: ("Oct", "October"), 11: ("Nov", "November"), 12: ("Dec", "December") }
def get_answer(model, a, b): a = float(a) b = float(b) print("First value:") print(a) print("Seconds value:") print(b) inp = np.array([[a, b]], dtype=np.float64) output_raw = model.predict(inp) return output_raw[0][0] if __name__ == "__main__": from model_io import load_saved_model model_dict = dict() model_dict["plus"] = load_saved_model("plus") model_dict["minus"] = load_saved_model("minus") model_dict["multiply"] = load_saved_model("multiply") model_dict["divide"] = load_saved_model("divide") while True: print("Input first value:") a = input() print("Input second value:") b = input() print("Input operator:") method = parse_input(input()) print(get_answer(model_dict[method], a, b))
# print ('{0:3} {1:28} {2:28} {3:>50}'.format(row_count,\ # str(status_deque).strip('deque()'),\ # str(symbol_deque).strip('deque()'),\ # str(input_deque).strip('deque()'))) # print row_count,' ',status_deque,' ',symbol_deque,' ',input_deque,' ' next_action = ACTION(status_deque[-1],input_deque[0],SLR1_tabel) if next_action == 'acc': accpet = True analyse_result_array[-1].append('accpet') return next_action,analyse_result_array if next_action == 'error': return next_action,0 if next_action.startswith('S'): move_in(status_deque,symbol_deque,input_deque,next_action) analyse_result_array[-1].append('move_in') elif next_action.startswith('r'): destination = do_reduce(status_deque,symbol_deque,input_deque,next_action,SLR1_tabel,grammer_productions_list) analyse_result_array[-1].append('reduce:'+grammer_productions_list[int(next_action.strip('r'))]) if destination == 'error': return 'error',0 return '?' if __name__ == '__main__': inputed_parse = parse_input() SLR1_tabel = gen_table('grammer.txt') items_list,grammer_productions_list = getItemCollection("grammer.txt") result,analyse_result_array = do_analyse(SLR1_tabel,inputed_parse,grammer_productions_list) print analyse_result_array print result
import standardize import evaluate import parse_input fname = sys.argv[1] if os.path.isdir(fname): fname = [os.path.join(fname, f) for f in sorted(os.listdir(fname))] else: fname = [fname] for f in fname: print() print('Ergebnisse für Datei', f) data_in = open(f, 'r') data, layout = parse_input.parse_input(data_in) data_in.close() conc, status = standardize.transform_to_concs(data, layout) patientrecords = parse_input.get_patient_records(conc, layout) for patient, record in sorted(patientrecords.items()): test_result = evaluate.evaluate_patient(record) if '--exclude-negative' in sys.argv: if test_result == 0: continue if test_result == 1: outcome = 'positive' elif test_result == 0: outcome = 'negative'
if (ending - ride.nom_val) == ride.t_start: self.score += self.B if len(rides_remaining) > 0: positions = {car_id: self.curr_positions[car_id]} distance, task = assign(positions, [car_id], rides_remaining, ending) heapq.heappush(heap, (distance[0], task[0])) rides_remaining.pop(task[0][1]) pbar.update(n=len(self.rides) - len(rides_remaining) - pbar.n) return self.score parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) args = parser.parse_args() # args.input_file = 'data/a_example.in' # args.input_file = 'data/b_should_be_easy.in' args.input_file = 'data/c_no_hurry.in' prob = parse_input(args.input_file) prob.kickstart() print(prob) score = run(prob) print(score)
from typing import List import collections class Solution: def minSteps(self, s: str, t: str) -> int: a = collections.Counter(s) b = collections.Counter(t) sum_of_common = 0 for k in a: # won't return KeyError, but 0 sum_of_common += min(a[k], b[k]) r = len(s) - sum_of_common return r if __name__ == "__main__": import parse_input as fn t, lin, lout = fn.parse_input() for i in range(t): sol = Solution() f = lin[i] fout = lout[i] # TODO 修改下面这行, 和函数参数匹配即可! args = [ f['s'], f['t'] ] r = sol.minSteps(*args) # 比较结果 print('out = %s; r = %s' % (fout, r))