def post(self): try: id = None global counter with counter.get_lock(): id = counter.value counter.value += 1 parser = reqparse.RequestParser() parser.add_argument("sequence", required=True) parser.add_argument("sens", required=True) parser.add_argument("num_matches", required=True) args = parser.parse_args() seq = args["sequence"] sens = float(args["sens"]) num_matches = int(args["num_matches"]) # data["sequence"].append(seq) logging.info("Seq " + seq + " with id " + str(id) + " from ip " + request.remote_addr) parse(seq, id) process(id, seq) p = Process(target=predict(id)) p.start() # print("started") p.join() # print("joined") # predict(id) dot_bracket_string = to_string(id) print(dot_bracket_string) aligned_dot = align_sequence(seq, dot_bracket_string, sens, num_matches, 5) # Maybe make threshold a parameter file1 = open("./pics/" + str(id) + "_pred.png", "rb") img1 = file1.read() file2 = open("./pics/" + str(id) + "_binarized.png", "rb") img2 = file2.read() # resp = make_response(json.dumps(id), 200) resp = make_response(json.dumps({"id": id, "seq" : aligned_dot, "raw_dot" : dot_bracket_string, "img1" : b64encode(img1).decode('utf-8'),\ "img2" : b64encode(img2).decode('utf-8')}), 200) # id+=1 resp.headers.extend({ 'Access-Control-Allow-Headers': '*', 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Origin': '*' }) return resp except Exception as e: print(e) resp = make_response(json.dumps({"id": id, "seq": "Error"}), 200) resp.headers.extend({ 'Access-Control-Allow-Headers': '*', 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Origin': '*' }) return resp
def auto_refresh(): while running: if connections > 0: data = parse() if not data: continue sio.emit('refresh', data, namespace='/') sleep(2)
def test_uas(self): f = open(self.ua_samples) for line in f.readlines(): if line[0] == '#': self.platform, self.browser = line[1:].strip().split() else: ua = parse(line) self.assertEqual(ua['platform'], self.platform, msg = ' ' .join([self.platform, 'expected,', ua['platform'], 'parsed']) ) self.assertEqual(ua['browser'], self.browser, msg = ' ' .join([self.browser, 'expected,', ua['browser'], 'parsed']) ) f.close()
def translate(self, filename): tree = my_parser.parse(filename) print(tree) self.parse_node(tree.root) result = '' errors = [] code_section = 'codeSeg SEGMENT\n \t\t ASSUME cs:code1, ds:dataSeg, ss:stackSeg\n' data_section = 'dataSeg SEGMENT\n' stack_section = 'stackSeg SEGMENT\n\tdb 4096 dup (?)\nstackSeg ends\n\n' ext_params = [] proc_identifiers = [] code_section += '\t main:\n' code_section += '\t\t mov ax, dataSeg\n' code_section += '\t\t mov ds, ax\n' code_section += '\t\t mov ax, stackSeg\n' code_section += '\t\t mov ss, ax\n' code_section += '\t\t mov ax, 0b800h\n' code_section += '\t\t mov es, ax\n\n' code_section += '\t\t mov ax,4c00h\n' code_section += '\t\t int 21h\n\n' code_section += 'codeSeg ends \n\tend main\n' data_section += 'dataSeg ends\n\n' result += data_section result += stack_section result += code_section if len(errors) > 0: print('Errors:') for error in errors: print(error) return '' else: return result
from my_parser import parse for i in range(1, 4): with open(f'test{i}.mylang', 'r', encoding='utf-8') as file_: test_dict = lambda x: None test_dict.variables = {} print(f'Проверяем файл test{i}.mylang:') parse(file_.read()).eval(test_dict)
g.set((bob, ns.hasDateSubmitted, Literal(t['date']))) #,datatype=XSD.date))) # Object Properties if len(t['author']) > 1: g.set((bob, ns.hasAuthor, author)) if len(t['deiarea']) > 1: g.set((bob, ns.hasDeiArea, deiarea)) for k in keywords: g.add((bob, ns.hasKeyword, k)) for a in advisors: print "aaaa", a g.add((bob, ns.hasAdvisor, a)) thesis = dp.parse("thesis.txt") print 'Populating...\nThesis\' found: ', len(thesis) g = rdflib.Graph() g.parse("thesis.rdf") for t in thesis: addThesis(g, t) #g.serialize(destination='thesis_out.turtle', format='turtle') g.serialize(destination='thesis_out.xml', format='xml')
from my_parser import parse from graph import graph_info url = input('Введите адрес страницы:') text = parse(url) print(graph_info(text))
def translate(self, filename): global a tree =my_parser.parse(filename) print(tree) self.parse_node(tree.root) result = '' errors = [] code_section = 'codeSeg SEGMENT\n \t\t ASSUME cs:code1, ds:dataSeg, ss:stackSeg\n' data_section = 'dataSeg SEGMENT\n' stack_section = 'stackSeg SEGMENT\n\tdb 4096 dup (?)\nstackSeg ends\n\n' ext_params = [] proc_identifiers = [] param_identifiers = [] data_indefiers=[] contains_ext = False #print(str(self.parameter_declarations)) print(self.program_date) for proc_decl in self.procedure_declarations: idn_lexem = proc_decl['identifier'] #print( proc_decl['identifier']) if idn_lexem.text in proc_identifiers or idn_lexem.text == self.program_identifier: errors.append(CompilationError('Generator', idn_lexem.line, idn_lexem.column, 'identifier "{}" already exists'.format(idn_lexem.text))) continue proc_identifiers.append(idn_lexem.text) code_section += idn_lexem.text + ' proc\n' total_parameter_length = 4 for parameter in proc_decl['parameters']: # determine type of parameters and its length basic_type = None compound_type = None for attribute in parameter['attributes']: print(basic_type) if attribute.text == 'INTEGER' or attribute.text == 'FLOAT' \ or attribute.text == 'BLOCKFLOAT': if basic_type is not None or basic_type=='EXT': errors.append(CompilationError('Generator', attribute.line, attribute.column, 'attribute "{}" can`t be used together with attribute ' '"{}"'.format(attribute.text, basic_type))) basic_type = attribute.text elif attribute.text == 'COMPLEX' or attribute.text == 'SIGNAL': if compound_type is not None: errors.append(CompilationError('Generator', attribute.line, attribute.column, 'attribute "{}" can`t be used together with attribute ' '"{}"'.format(attribute.text, compound_type))) compound_type = attribute.text elif attribute.text == 'EXT': if basic_type==None: errors.append(CompilationError('Generator', attribute.line, attribute.column, 'attribute "{}" can`t be used together with attribute ' '"{}"'.format(attribute.text, compound_type))) contains_ext = True parameter_memory_size = 4 if basic_type == 'INTEGER' or basic_type == 'FLOAT': parameter_memory_size = 4 if compound_type == 'COMPLEX': parameter_memory_size *= 2 for idn in parameter['identifiers']: if idn.text in param_identifiers: errors.append(CompilationError('Generator', idn.line, idn.column, 'parameter "{}" already defined'.format(idn.text))) param_identifiers.append(idn.text) if not contains_ext: code_section += '\t@{}\t equ \t [bp+{}]\n'.format(idn.text, total_parameter_length) total_parameter_length += parameter_memory_size else: if idn.text not in ext_params: ext_params.append(idn.text) data_section += '\t{} \t db\t{} dup (0)\n'.format(idn.text, parameter_memory_size) code_section += '\t\t push bp\n' code_section += '\t\t mov bp, sp\n\n' code_section += '\t\t pop bp\n' if total_parameter_length - 4 != 0: code_section += '\t\t retn ' + str(total_parameter_length - 4) + '\n' else: code_section += '\t\t ret\n' code_section += idn_lexem.text + ' endp\n\n' basic_type = None compound_type = None if a == True: for indet in self.current_program_date['attributes']: print(indet.text) if indet.text == 'INTEGER' or indet.text == 'FLOAT' \ or indet.text == 'BLOCKFLOAT': if basic_type is not None: errors.append(CompilationError('Generator', indet.line, indet.column, 'attribute "{}" can`t be used together with attribute ' '"{}"'.format(indet.text, basic_type))) basic_type = indet.text elif indet.text == 'COMPLEX' or indet.text == 'SIGNAL': if compound_type is not None: errors.append(CompilationError('Generator', indet.line, indet.column, 'attribute "{}" can`t be used together with attribute ' '"{}"'.format(indet.text, compound_type))) compound_type = indet.text parameter_memory_size = 4 if basic_type == 'INTEGER' or basic_type == 'FLOAT': parameter_memory_size = 4 if compound_type == 'COMPLEX': parameter_memory_size *= 2 for indet1 in self.current_program_date['identifiers']: if contains_ext==True: if indet1.text in param_identifiers: errors.append(CompilationError('Generator', indet1.line, indet1.column, 'identifier "{}" already exists'.format(indet1.text))) continue elif indet1.text in data_indefiers: errors.append(CompilationError('Generator', indet1.line, indet1.column, 'identifier "{}" already exists'.format(indet1.text))) continue data_indefiers.append(indet1.text) param_identifiers.append(indet1.text) print(indet1.text) data_section += '\t{} \t db\t{} dup (0)\n'.format(indet1.text, parameter_memory_size) code_section += '\t main:\n' code_section += '\t\t mov ax, dataSeg\n' code_section += '\t\t mov ds, ax\n' code_section += '\t\t mov ax, stackSeg\n' code_section += '\t\t mov ss, ax\n' code_section += '\t\t mov ax, 0b800h\n' code_section += '\t\t mov es, ax\n\n' code_section += '\t\t mov ax,4c00h\n' code_section += '\t\t int 21h\n\n' code_section += 'codeSeg ends \n\tend main\n' data_section += 'dataSeg ends\n\n' result += data_section result += stack_section result += code_section if len(errors) > 0: print('Errors:') for error in errors: print(error) return '' else: return result
# # matrix = create_matrix((255, 0, 0), h, w) # create_image(matrix, 'test/new2.png') # # matrix = create_random_matrix(h, w) # create_image(matrix, 'test/new3.png') # # matrix = create_schema_matrix(h, w) # create_image(matrix, 'test/new4.png') # # draw(paint.line, (255, 0, 0), "test/new5.png") # draw(paint.line2, (255, 0, 0), "test/new6.png") # draw(paint.line3, (255, 0, 0), "test/new7.png") # draw(paint.line_bresenhema, (255, 0, 0), "test/new8.png") # mod = my_parser.parse() # image = Image.new("RGB", (1000, 1000), (0, 0, 0)) # mod.paint_vertexes(image, (255, 255, 255)) # image.save("test/new9.png", "PNG") # # image = Image.new("RGB", (1000, 1000), (0, 0, 0)) # mod.paint_polygons(image, (255, 255, 255)) # image.save("test/new10.png", "PNG") # image = Image.new("RGB", (1500, 1500), (0, 0, 0)) # mod.init_z_Buffer(1500, 1500) # mod.init_K_t(10000, 10000, 750, 750, np.array([0.005, -0.045, 1.50])) # mod.paint_fill_polygons(image) # image.save("test_laba3/projection_transform.png", "PNG") mod.init_z_Buffer(1500, 1500)
def Perform_placement(cls, student): """Goes through the list in :attr:`_expressions`, processing those expression for the current :samp:`student`, and returning the placement corresponding to the first True expression.""" for expr, place in cls._expressions: if parse(expr, student): return(place)
for i in lst: if i[:1] == 'R': print("{} = {}".format(i, eval(i))) print("--- WORD(s) IN MEMORY ---") for n in MEM: if n is not None: print("MEM[{}] = {}".format(ca0, n)) ca0 = ca0 + 1 if __name__ == "__main__": path = input("Entrez le chemin du fichier à compiler :") listParse = list() try: listParse = P.parse(path) except FileNotFoundError: print("Le fichier n'a pas été trouvé.") #print(listParse) lexemList = L.lexe(listParse) #print(lexemList) code = CG.codeGen(lexemList) exec_res(code)
def analyse(): url = request.args['adress'] text = parse(url) answer = graph_info(text) return render_template("answer.html", req=url, answer=Markup(answer))