def create_app(): app = Flask(__name__) CORS(app) preprocessor.process(app) return app
def get_oldest_file_from(dir): files = os.listdir(dir) oldest_file = "" oldest_file_mod_time = 9999999999999999999 for file in files: file_mod_time = os.stat(action_dir + file).st_mtime if (file_mod_time < oldest_file_mod_time) and not (file[-1:] == "~"): oldest_file_mod_time = file_mod_time oldest_file_name = file fname = dir + oldest_file_name preprocessor.process(fname) f = open(fname, 'r+') return f,fname, oldest_file_name #returning file descriptor and file path
def done(self): network_img = process(capture()) # get images img0 = Image.open("../data/img_process/crop.png").resize( (image_size, image_size)) crop_png = ImageTk.PhotoImage(img0) img1 = Image.open("../data/img_process/process.png").resize( (image_size, image_size)) process_png = ImageTk.PhotoImage(img1) # display images self.update_images(crop_png, process_png) # get number from neural network pixel_array = np.array(network_img, dtype="float32") pixel_array = np.append([], pixel_array) pixel_array = np.array([[x / 255] for x in pixel_array]) last_row = self.network.feedforward(pixel_array) self.user_num = last_row.argmax() self.check()
def main(): parse(process(lex("pi = 3.14"))[0]) for i, a in enumerate(argv[1:]): try: parse(process(lex(a))[0]) except: k = "" if str(i)[-1] == "1": k = "st" elif str(i)[-1] == "2": k = "nd" elif str(i)[-1] == "3": k = "rd" else: k = "th" print("%s%s argument is invalid!" % (i, k)) cli()
def process_table(table, lookup, wikidata_lookup, line_count, coverage_tree, ratings, out=False): res, headers, rubbish_rows = pre.process(table['relation'], table['url']) if (not res): return dict(), dict(), dict(), [], 0 if len(res['columns']) > 0: res, data = gazetteer_test(res, lookup, coverage_tree, wikidata_lookup) res = choose_interpretation(res, ratings) coordinates = coder.get_coordinates(res, data, coverage_tree) return res, coordinates, headers, rubbish_rows, 1 return dict(), dict(), headers, rubbish_rows, 1
def main(argv): infile_path, stylesheet_path, outdir_path, base_url, question_type, diffdir,question_list_file = get_input(argv) logging.basicConfig(filename=os.path.join(outdir_path, 'migrator.log'), level=logging.INFO, filemode='w') preprocessed_dom = preprocessor.process(infile_path, base_url, outdir_path, question_type, diffdir, question_list_file) if (WRITE_INTERMEDIATE_FILES): write_outfile(preprocessed_dom, 'pp_source.xml') transformed_etree = transformer.transform_data(preprocessed_dom, stylesheet_path) remove_empty_assessments(transformed_etree) if (WRITE_INTERMEDIATE_FILES): write_outfile(transformed_etree, os.path.join(outdir_path, 'out.xml')) packager.package_assessments(transformed_etree, outdir_path)
def main(image): # open interface interface() #image = Image.open("../data/img_stream/20180802_115324.jpg") #TEST # process photo. save processed photo in data/process_img/. return readable numpy array image = process(image) # send into neural network and return number num = run_network(image) # send output to user output(num)
def cli(name="left",version="0.1",prompt="| %s v%s |> "): try: while True: code = input(warn + prompt % (name, version) + endc) if len(code) == 0: continue out = parse(process(lex(code))[0][0]) if out: print(bold + green + str(toStr(out)) + endc) except (KeyboardInterrupt, EOFError): print(warn + "\n\nProcess finished!\n" + endc) except IOError: raise except Exception as e: getError()
def interface(computer_num): #computer_num = 8 #TEST num_of_tries = 0 print("I have a number from 0 to 9."), while (True): raw_input("Please write down your guess and press enter...") raw_image = capture() processed_image = process(raw_image) user_num = run_network(processed_image) print "\nYour number is: " + str(user_num) correct = raw_input("If correct, press enter. If not, press 'n'... ") if correct == "n" or correct == "N": user_num = handle_wrong_num() # if user wants to try again, repeat loop if user_num == -1: print continue num_of_tries += 1 if user_num < computer_num: print("\nYour number is less than mine!") elif user_num > computer_num: print("\nYour number is greater than mine!") elif user_num == computer_num: break if num_of_tries == 0: print "\nThanks for playing!\n" else: if num_of_tries == 1: grammar = " try!\n" elif num_of_tries > 1: grammar = " tries!\n" print "\nCongratulations, you won the game in " + str( num_of_tries) + grammar
def submit(self, contest_id, problem_id, file_path, modify=False): """ Submit solution for the problem :return : True if success else False """ print("{} {} {}".format(contest_id, problem_id, file_path)) url = self.url + 'contest/{}/submit'.format(contest_id) mp = multipart_post_form.MultiPartForm() mp.add_field('problem', str(problem_id)) file_name = re.search('([^/]|\\\\/)*$', file_path).group() with open(file_path) as f: data = f.read() if modify: data = preprocessor.process(file_name, data) mp.add_file('codefile', file_name, data) content = mp.get_content() response = self.get_data(url, content, { 'Content-type': mp.get_content_type(), 'Content-length': len(content), }) self.raise_errors(response) result = {} res = re.search('<tr><td><a class="stdlink" href="/contest/(\d*)/results/(\d*)">(\d*)</a>', response) if not res: raise OperationFailedException(['undefined post-submit error'], response) result['contest'] = res.group(1) result['problem'] = res.group(2) result['solution'] = res.group(3) return result
def main(): if len(argv) == 1: raise IndexError("Please, give me a source file or type a command.") elif len(argv) == 2: raise IndexError( "You need to specify amount of spaces used for indentation!") else: if not isNum(argv[2]): raise TypeError("Amount of spaces can be only number!") if argv[2][0] != "-": raise SyntaxError( "Syntax for flags is: '-[flagname]', found: %s!" % argv[2]) lexer.spaceCount = int(argv[2][1:]) src = open(argv[1], "r").read() src = replaceEscapes(src) data = lexer.lex(src) data = [replaceEscapes(d) for d in data] data = process(data) data = [replaceEscapes(d) for d in data] parse(data)
if options['with-config-dlg']: directives['WITH_CONFIGURE_DIALOG'] = True # Generate the plugin base for infile, outfile in output_files.iteritems(): print 'Processing %s\n' \ ' into %s...' % (infile, outfile) infile = os.path.join(TEMPLATE_DIR, infile) outfile = os.path.join(os.getcwd(), outfile) if not os.path.isfile(infile): print >>sys.stderr, 'Input file does not exist : %s.' % os.path.basename(infile) continue # Make sure the destination directory exists if not os.path.isdir(os.path.split(outfile)[0]): os.makedirs(os.path.split(outfile)[0]) # Variables relative to the generated file directives['DIRNAME'], directives['FILENAME'] = os.path.split(outfile) # Generate the file preprocessor.process(infile, outfile, directives.copy()) print 'Done.' # ex:ts=4:et:
def parse(toks): indent = [] i = 0 while i < len(toks): if tokType(toks[i]) == "SYM": name = toks[i][4:] if toks[i + 1] == "COLON": r = getUnless(toks, "SEMI", i + 2) i = getUnlessIndex(toks, "SEMI", i + 2) args = getArgs(r) function(name, args) elif toks[i + 1] == "EQ": r = getUnless(toks, "SEMI", i + 2) i = getUnlessIndex(toks, "SEMI", i + 2) expr = getArgs(r) expr = generateType(expr) expr = process(expr) val = evaluate(expr) assign(name, val) elif toks[i + 1] == "APPEND" or toks[i + 1] == "DEAPPEND": if name not in vars: raise NameError( "You need to declare variable before you try to append!" ) if tokType(vars[name]) == "NUM": """ Number increase """ mode = toks[i + 1] expr = "" r = getArgs(getUnless(toks, "SEMI", i + 2)) i = getUnlessIndex(toks, "SEMI", i + 2) - 2 for o in r: expr += o if mode == "APPEND": vars[name] = "NUM:" + str( eval(strip(vars[name][4:]) + "+" + strip(expr))) else: vars[name] = "NUM:" + str( eval(strip(vars[name][4:]) + "-" + strip(expr))) i += 2 elif tokType(vars[name]) == "STR": """ String append """ if toks[i + 1] == "APPEND": expr = "" r = getArgs(getUnless(toks, "SEMI", i + 2)) i = getUnlessIndex(toks, "SEMI", i + 2) for o in r: expr += o vars[name] += expr else: """ Next should be number used to remove indexes from string """ expr = "" r = round( float( getArgs(getUnless(toks, "SEMI", i + 2), "NUM")[0])) i = getUnlessIndex(toks, "SEMI", i + 2) if r >= len(vars[name][4:]): ln = len(vars[name][4:]) raise IndexError( "Index {0} is greater than length {1}!".format( r, ln)) vars[name] = "STR:" + vars[name][4:-r] else: raise TypeError( "Invalid type {0}, expecting number or string!".format( tokType(vars[name]))) else: raise SyntaxError( "Unexpected token {0} - expecting a '=' or ':'!".format( toks[i + 1])) elif toks[i] == "IF": r = getUnless(toks, "SEMI", i) i = getUnlessIndex(toks, "SEMI", i) + 2 if r[-1] != "COLON": raise SyntaxError("Syntax for 'if' is: 'if condition:'!") cond = r[1:-1] cond = generateType(getArgs(addAll(cond, "COMMA"))) b = toBool(condition(cond)) if b: indent.insert(0, 1) else: bl = 1 while bl != 0: if toks[i] == "INDENT": bl += 1 elif toks[i] == "DEDENT": bl -= 1 i += 1 indent.insert(0, 0) i -= 1 elif toks[i] == "ELSE": if len(indent) == 0: raise SyntaxError("Else without if!") elif indent[0] == 1: if len(toks) > i + 1 and toks[i + 1] != "COLON" and toks[ i + 1] != "IF": raise SyntaxError("Expecting ':' after 'else'!") bl = 1 i += 4 while bl != 0: if toks[i] == "INDENT": bl += 1 elif toks[i] == "DEDENT": bl -= 1 i += 1 del indent[0] i -= 2 else: if len(toks) > i + 1 and toks[i + 1] == "COLON": i += 1 del indent[0] elif len(toks) > i + 1 and toks[i + 1] == "IF": del indent[0] else: raise SyntaxError("Expecting ':' after 'else'!") elif toks[i] == "INDENT": if len(indent) > 0 and indent[0] == -1: raise SyntaxError("Unexpected indent!") elif toks[i] == "DEDENT": pass elif toks[i] == "SEMI": pass else: raise SyntaxError("Unexpected token {0}!".format(toks[i])) i += 1
activation=tf.nn.relu ) dense_input = tf.reshape(inputs=conv_5, [-1, ]) dense_1 = tf.layers.dense(dense_input, units=1164) dense_2 = tf.layers.dense(dense_2, units=100) dense_3 = tf.layers.dense(dense_3, units=50) dense_4 = tf.layers.dense(dense_4, units=10) dense_5 = tf.layers.dense(dense_5, units=1) cost = tf.reduce_sum(tf.pow(predicted_steering - steering, 2)) / 2*len(input_images) optimizer = tf.train.AdamOptimizer().minimize() return tf.initialize_all_variables(), optimizer, { image: input_images } if __name__ == '__main__': print('preprocessing data...') images, steering = prep.process() sess = tf.Session() operations, optimizer, tensor_dict = init_model(sess, images, steering) sess.run(operations) for _ in range(50): sess.run(optimizer, feed_dict=tensor_dict)
f.write('\n'.join(lines)) # Generate the plugin base for infile, outfile in output_files.iteritems(): print('Generating file %s from template %s...' % (outfile, infile)) file_directives = directives.copy() infile = os.path.join(TEMPLATE_DIR, infile) outfile = os.path.join(directory, outfile) if not os.path.isfile(infile): print('Input file %s does not exist: skipping' % os.path.basename(infile)) continue # Make sure the destination directory exists if not os.path.isdir(os.path.split(outfile)[0]): os.makedirs(os.path.split(outfile)[0]) # Variables relative to the generated file file_directives['DIRNAME'], file_directives['FILENAME'] = os.path.split( outfile) # Generate the file preprocessor.process(infile, outfile, file_directives) print('Done') # ex:ts=4:et:
# CSP to STG parser v 0.0.1concept_preview_alpha # # Tomasz Chadzynski # San Jose State University, 2020 # # This software is provided AS IS and comes with no warranty # import sys import p_types as t import preprocessor as pp import parser as pa import petrify_converter as pc if __name__ == "__main__": if len(sys.argv) != 2: print("Invalid argument") print("Loading sources: %s" % sys.argv[1]) model = pp.load(sys.argv[1]) print('\n################## Begin preprocess #####################\n') pp.process(model) print('\n################## Begin parsing ########################\n') pa.gen_tree(model) print('\n################## Writing output for petrify############\n') src = pc.to_petrify(model) print(src) pc.save_file(model, src)
def parse(directory): """ Takes a directory of Boston Directory files and returns a 4-tuple composed of fully parsed lines, partially parsed lines, unparsed errored lines, and unparsed death lines. (lines, errors, broken, died) """ #lines - parsed without errors #errors - partially parsed with fixable errors #broken - unparsed with known dirty data #died - unparsed, names of deceased with dates lines, errors, broken, died = [], [], [], [] last_name = lname_marker[0].capitalize() #lname_index - keeps track of the index of the lastname #we're currently on so we don't have to recalculate it #every time lname_index = 0 filepaths = [] if os.path.isdir(directory): filepaths = os.listdir(directory) filepaths = sorted(map((lambda x: directory+ "/" + x), filepaths)) elif os.path.isfile(directory): filepaths.append(directory) else: raise NotImplementedError lname_error_file = open("lname_errors.txt", 'w') for infile in filepaths: #preprocess the file before we start parsing it # preprocessed = mark2.process(infile) preprocessed = preprocessor.process(infile) with open("out", 'a') as outfile: for x in preprocessed: outfile.write(x) #getting down to the actual parsing for line_no, line in enumerate(preprocessed): line = line.strip() count = -1 #if a line has invalid characters in it, we're not dealing with it. #XXX wishlist: hook in a levenshtein distance calculator to fix #words with invalid characters. if find_errors(line): broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip()}) continue #stripping out death lines, these don't contain addresses if re.search(r'\bdied\b', line): died.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'last':last_name}) continue #some lastname headers have form "Cohig see Cohen, Cofen" etc #this just takes the first part of that line. if re.search(r'\bsee\b', line): split_line = line.split() potential_lname = "" for bit in split_line: if bit == "see": break potential_lname += bit.lower() potential_lname = potential_lname.capitalize() if potential_lname.lower() not in lnames: lname_error_file.write("%s,%s,%s\n" % (line.strip(),line_no,infile)) broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad jump'}) continue if valid_jump(last_name, potential_lname): dist = distance(lname_index, potential_lname.lower()) if dist != 0: last_name = potential_lname lname_index += dist else: broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad jump'}) else: broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad jump'}) continue entry = {} entry['filepath'] = infile entry['line_no'] = line_no lineiter = line.split().__iter__() last_chomp = -1 try: chomp = lineiter.next() count += 1 #we've got a subentry, so we should have the #last name from the last line if chomp.startswith("\x97"): first = chomp[1:].lower() first = first.replace("\x97", "") if first in nameabbr: first = nameabbr[first] entry["first"] = first.capitalize() entry["last"] = last_name #if line starts with a last name, then grab the #first name after it as well elif chomp.lower() in lnames: # print "%d %s from\n%s" % (line_no+1, chomp, line) chomp = chomp.capitalize() last_lname = last_name dist = 0 #if the line was misread by OCR or if the lastname is multiple #words not connected by a hyphen, then grab the rest of it if chomp == "Co" or chomp == "De" or chomp == "Del" or chomp == "Des" or chomp == "Di" or chomp == "La" or chomp == "Le" or chomp == "O" or chomp == "Mac" or chomp == "Mi" or chomp == "Mc" or chomp == "Van": plname = chomp for atom in line.split()[1:]: plname += atom.lower() if plname.lower() in lnames: if plname == "Vander": continue chomp = plname break if valid_jump(last_name, chomp): dist = distance(lname_index, chomp.lower()) if dist != 0: #XXX neighborhood/lastname clashes last_name = chomp lname_index += dist else: lname_error_file.write("%s,%s,%s\n" % (line.strip(),line_no,infile)) broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad jump'}) continue else: lname_error_file.write("%s,%s,%s\n" % (line.strip(),line_no,infile)) broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad jump'}) continue chomp = lineiter.next() count += 1 first = chomp.lower() if first in nameabbr: entry["first"] = nameabbr[first].capitalize() elif first in fnames: entry["first"] = first.capitalize() else: #unset last name, what we hit wasn't correct #XXX ideally we'd like to try to parse the line as a subentry, #but for now just error it last_name = last_lname lname_index -= dist broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'unrecognized first name'}) continue entry["last"] = last_name #if it's not a lastname or a subentry, then it's a line #we should have handled in our preprocessor; mark it. else: broken.append({'filepath':infile, 'line_no':line_no, 'line':line.strip(), 'reason':'bad prefix'}) continue chomp = lineiter.next() count += 1 entry["nh"] = "Boston" entry["strsuffix"] = "St" #handling everything else. while True: tup = recognize(chomp) if tup is None: if "prof" in entry: entry["prof"] += " " + chomp else: entry["prof"] = chomp #first name initial elif tup[2] == INITIAL: entry[tup[0]] += " " + tup[1] #spousal entry elif tup[2] == SPOUSE: while not chomp.endswith(")"): chomp += " " + lineiter.next() count += 1 name = tup[1].capitalize() if name.lower() in nameabbr: name = nameabbr[name.lower()].capitalize() elif name.lower() in fnames: if "spouse" in entry: entry["spouse"] += " " + chomp.strip("()") else: entry["spouse"] = chomp.strip("()") else: business = chomp.strip("()") entry["business"] = business #widowed entry; deceased's name optionally follows elif tup[2] == WIDOWED: entry[tup[0]] = tup[1] chomp = lineiter.next() count += 1 if chomp.lower() in nameabbr: chomp = nameabbr[chomp.lower()] if chomp.lower() in fnames: entry["spouse"] = chomp.capitalize() else: continue #we've hit the address section, finish up with everything in #parse_addr elif tup[2] is OWNER or tup[2] == HOUSE_NUM: #or chomp.lower() in streets: # addresses = parse_addr(line) # print "dummy bit " + " ".join(line.split()[count:]) addresses = parse_addr("dummy bit " + " ".join(line.split()[count:])) # print "%d %s\n%s" % (line_no+1, line.strip(), addresses) if addresses is None: break entry.update(addresses) break else: entry[tup[0]] = tup[1] last_chomp = tup[2] chomp = lineiter.next() count += 1 except StopIteration: pass if "first" not in entry or "last" not in entry or "street" not in entry: entry['reason'] = 'incomplete' errors.append(entry) continue lines.append(entry) lname_error_file.close() return (lines, errors, broken, died)
def getArgs(params, t=""): args = [] comma = 0 i = 0 while i < len(params): if tokType(params[i]) == "SYM": if params[i][4:] in vars: params[i] = vars[params[i][4:]] elif params[i][4:] in builtins: r = getUnless(params, "SEMI", i + 1) name = params[i][4:] a = getArgs(r[1:]) del params[i:getUnlessIndex(params, "SEMI", i + 1)] params.insert(i, function(name, a)) i += 1 i = 0 while i < len(params): if comma == 1: if params[i] == "COMMA": comma = 0 i += 1 continue else: print(params, params[i]) raise SyntaxError("Expecting a comma!") if tokType(params[i]) == "NUM": if len(params) > i + 1 and params[i + 1] == "LS": s = params[i] bak = i r = generateType(slicer(params, i + 2, s)) i = slicer(params, i + 2, s, 1) del params[bak:i + 1] i = bak params.insert(i, r) #print(params) continue else: r = process(getUnless(params, "COMMA", i)) bak = i i = getUnlessIndex(params, "COMMA", i) expr = r if len(r) < 2: i = bak args.append(evaluate(expr)[4:]) else: #expr = getArgs(r) #print(expr) del params[bak:i + 1] i = bak params.insert(i, evaluate(expr)) #print(params) continue elif tokType(params[i]) == "SYM": if len(params) > i + 1 and params[i + 1] == "COLON": """ Function call """ a = getArgs(getUnless(params, "SEMI", i + 2)) name = params[i][4:] r = function(name, a) bak = i i = getUnlessIndex(params, "SEMI", i + 2) if len(params) > i and tokType(params[i]): expr = getUnless(params, "COMMA", i) expr.insert(0, r) i = getUnlessIndex(params, "COMMA", i) r = "NUM:" + evaluate(expr) del params[bak:i + 1] params.insert(bak, r) i = bak continue else: if params[i][4:] not in vars: raise NameError("Invalid pointer {0}!".format( params[i][4:])) if len(params) > i + 1 and params[i + 1] == "LS": s = vars[params[i][4:]] if tokType(s) != "STR" and tokType(s) != "NUM": raise TypeError( "Only strings and numbers can be sliced, found %s!" % tokType(s)) bak = i r = generateType(slicer(params, i + 2, s)) i = slicer(params, i + 2, s, 1) del params[bak:i + 1] i = bak params.insert(i, r) continue else: r = getUnless(params, "COMMA", i) if len(r) > 1: r = getArgs(addAll(r, "COMMA")) r = generateType(r) r = process(r) else: args.append(vars[params[i][4:]][4:]) comma = 1 i += 1 continue args.append(evaluate(r)) i = getUnlessIndex(params, "COMMA", i) comma = 1 continue elif tokType(params[i]) == "STR": s = params[i] if len(params) > i + 1 and params[i + 1] == "LS": bak = i r = generateType(slicer(params, i + 2, s)) i = slicer(params, i + 2, s, 1) del params[bak:i + 1] i = bak params.insert(i, r) continue elif len(params) > i + 1 and params[i + 1][4] == "*": r = getUnless(params, "COMMA", i + 1) i = getUnlessIndex(params, "COMMA", i + 1) expr = getArgs(r)[0] try: j = round(float(eval(expr[1:]))) except: raise SyntaxError("Can't count {0}!".format(expr[1:])) if j > sys.maxsize: raise OverflowError( "Maximal integer is {0}, found {1}!".format( sys.maxsize, j)) s = s * round(float(j)) args.append(parseString(s[4:])) else: args.append(params[i]) comma = 1 i += 1 return args