def get_filename_block_as_codepoints(self): """ TODO: Support tokenized BASIC. Now we only create ASCII BASIC. """ codepoints = [] codepoints += list(string2codepoint(self.filename.ljust(8, " "))) codepoints.append(self.cfg.FTYPE_BASIC) # one byte file type codepoints.append(self.cfg.BASIC_ASCII) # one byte ASCII flag # one byte gap flag (0x00=no gaps, 0xFF=gaps) # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4231&p=9110#p9110 codepoints.append(self.gap_flag) # machine code starting/loading address if self.file_type != self.cfg.FTYPE_BASIC: # BASIC programm (0x00) codepoints = iter(codepoints) self.start_address = get_word(codepoints) log.info("machine code starting address: %s" % hex(self.start_address)) self.load_address = get_word(codepoints) log.info("machine code loading address: %s" % hex(self.load_address)) else: # not needed in BASIC files # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4341&p=9109#p9109 pass log.debug("filename block: %s" % pformat_codepoints(codepoints)) return codepoints
def image(): uuid_val = request.cookies.get("uuid") print("UUID Cookie: ", uuid_val) # If the user has never been authenticated if (uuid_val is None or uuid_val not in images): # Assign user a uuid value uuid_val = str(uuid.uuid4()) uuid_val = uuid_val.replace("-", "") # Get image and description pair images[uuid_val] = get_image() print("uuid_val: ", uuid_val) # Get a list of all prev words user has seen or empty list if none prev_words = users.get(uuid_val, []) word = get_word()[0] while word in prev_words: word = get_word()[0] prev_words.append(word) # Get prev img and desc for the user img_list = images[uuid_val] print("Prev words: ", prev_words) print("len(prev_words): ", len(prev_words)) # Change image after 10 words if len(prev_words) % 10 == 0: print("Entered if") prev_img = images[uuid_val][0] img_list = get_image() print("img_list: ", img_list) print("prev_img: ", prev_img) # import pdb; pdb.set_trace() while prev_img == img_list[0]: img_list = get_image() images[uuid_val] = img_list users[uuid_val] = prev_words img_list = images[uuid_val] to_speech(word=word, save_to="static/audio/", filename=word) img_path = "images/" + img_list[0] audio_path = "./audio/" + word + ".mp3" img_desc = img_list[1] print("Description: ", img_list[1]) response = make_response(url_for('static', filename=img_path)) response.headers["img_description"] = img_desc response.headers["word"] = word response.headers["translation"] = translation(word)["translatedText"] response.headers["audio_file"] = url_for('static', filename=audio_path) response.set_cookie("uuid", value=uuid_val) return response
def get_guid(self, address): self.r2.cmd("s {addr}".format(addr=address)) guid_bytes = json.loads(self.r2.cmd("pcj 16")) CurrentGUID = [] CurrentGUID.append(utils.get_dword(bytearray(guid_bytes[:4:]))) CurrentGUID.append(utils.get_word(bytearray(guid_bytes[4:6:]))) CurrentGUID.append(utils.get_word(bytearray(guid_bytes[6:8:]))) CurrentGUID += guid_bytes[8:16:] return CurrentGUID
def create_from_wave(self, codepoints): log.debug("filename data: %s" % pformat_codepoints(codepoints)) raw_filename = codepoints[:8] self.filename = codepoints2string(raw_filename).rstrip() print "\nFilename: %s" % repr(self.filename) self.file_type = codepoints[8] if not self.file_type in self.cfg.FILETYPE_DICT: raise NotImplementedError( "Unknown file type %s is not supported, yet." % hex(self.file_type)) log.info("file type: %s" % self.cfg.FILETYPE_DICT[self.file_type]) if self.file_type == self.cfg.FTYPE_DATA: raise NotImplementedError("Data files are not supported, yet.") elif self.file_type == self.cfg.FTYPE_BIN: raise NotImplementedError("Binary files are not supported, yet.") self.ascii_flag = codepoints[9] log.info("Raw ASCII flag is: %s" % repr(self.ascii_flag)) if self.ascii_flag == self.cfg.BASIC_TOKENIZED: self.is_tokenized = True elif self.ascii_flag == self.cfg.BASIC_ASCII: self.is_tokenized = False else: raise NotImplementedError("Unknown BASIC type: '%s'" % hex(self.ascii_flag)) log.info("ASCII flag: %s" % self.cfg.BASIC_TYPE_DICT[self.ascii_flag]) self.gap_flag = codepoints[10] log.info("gap flag is %s (0x00=no gaps, 0xff=gaps)" % hex(self.gap_flag)) # machine code starting/loading address if self.file_type != self.cfg.FTYPE_BASIC: # BASIC programm (0x00) codepoints = iter(codepoints) self.start_address = get_word(codepoints) log.info("machine code starting address: %s" % hex(self.start_address)) self.load_address = get_word(codepoints) log.info("machine code loading address: %s" % hex(self.load_address)) else: # not needed in BASIC files # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4341&p=9109#p9109 pass self.file_content = FileContent(self.cfg)
def create_from_wave(self, codepoints): log.debug("filename data: %s" % pformat_codepoints(codepoints)) raw_filename = codepoints[:8] self.filename = codepoints2string(raw_filename).rstrip() print "\nFilename: %s" % repr(self.filename) self.file_type = codepoints[8] if not self.file_type in self.cfg.FILETYPE_DICT: raise NotImplementedError( "Unknown file type %s is not supported, yet." % hex(self.file_type) ) log.info("file type: %s" % self.cfg.FILETYPE_DICT[self.file_type]) if self.file_type == self.cfg.FTYPE_DATA: raise NotImplementedError("Data files are not supported, yet.") elif self.file_type == self.cfg.FTYPE_BIN: raise NotImplementedError("Binary files are not supported, yet.") self.ascii_flag = codepoints[9] log.info("Raw ASCII flag is: %s" % repr(self.ascii_flag)) if self.ascii_flag == self.cfg.BASIC_TOKENIZED: self.is_tokenized = True elif self.ascii_flag == self.cfg.BASIC_ASCII: self.is_tokenized = False else: raise NotImplementedError("Unknown BASIC type: '%s'" % hex(self.ascii_flag)) log.info("ASCII flag: %s" % self.cfg.BASIC_TYPE_DICT[self.ascii_flag]) self.gap_flag = codepoints[10] log.info("gap flag is %s (0x00=no gaps, 0xff=gaps)" % hex(self.gap_flag)) # machine code starting/loading address if self.file_type != self.cfg.FTYPE_BASIC: # BASIC programm (0x00) codepoints = iter(codepoints) self.start_address = get_word(codepoints) log.info("machine code starting address: %s" % hex(self.start_address)) self.load_address = get_word(codepoints) log.info("machine code loading address: %s" % hex(self.load_address)) else: # not needed in BASIC files # http://archive.worldofdragon.org/phpBB3/viewtopic.php?f=8&t=4341&p=9109#p9109 pass self.file_content = FileContent(self.cfg)
def on_query_completions(self, view, prefix, locations): if utils.get_language() != "java":return ultimo=utils.get_last_character() if ultimo=="." and utils.get_language()=="java": window=sublime.active_window() view=window.active_view() word=utils.get_word(-1) variables=Java().get_variables() tipo=word static=True if variables.get(word): tipo=variables[word] static=False package=re.findall("import ([\w.]+\.%s);"%tipo, utils.get_text()) if not package: posibleRuta=os.path.join(PATH_JSON, "java", "lang", tipo+".json") if os.path.exists(posibleRuta): package=["java.lang."+tipo] if package: package=package[0] clase=self.get_project_class(package) if clase: return utils.get_completion_list(clase["members"]) ruta=package.replace(".", os.sep)+".json" ruta=os.path.join(PATH_JSON, ruta) print("ya se determino") objeto=utils.load_json(ruta) miembros="clase" if static else "object" return utils.get_completion_list(objeto[miembros])
def is_php_statement(self, context): end, word = utils.get_word(context.get_iter()) if not word == 'php' or not end: return False start = end.copy() return start.backward_chars(2) and start.get_text(end) == '<?'
def do_POST(self): data = self.rfile.read(int(self.headers['content-length'])) image = cv2.imdecode(numpy.fromstring(data, numpy.uint8), cv2.IMREAD_UNCHANGED) letter, image = get_letters(image) word = get_word(letter) self.send_response(200) self.end_headers() self.wfile.write(json.dumps({'label': word}).encode()) return
def on_query_completions(self, view, prefix, locations): lang=utils.get_language() if lang != "go":return ultimo=utils.get_last_character() if ultimo != ".":return d=utils.load_json(GO_MAIN_MODULE) word=utils.get_word(-1) if d.get(word): return utils.get_completion_list(d[word])
def check_is_class_const(self, context): start, word = utils.get_word(context.get_iter()) if word: split = word.split('::') if len(split) == 2: return True else: return False else: return False
def do_match(self, context): lang = context.get_iter().get_buffer().get_language() if not lang: return False if lang.get_id() != 'php' and lang.get_id() != 'html': return False start, word = utils.get_word(context.get_iter()) is_class = context.get_data(PHP_PROVIDER_IS_CLASS_DATA_KEY) return is_class or (word and len(word) > 2)
def do_populate(self, context): start, word = utils.get_word(context.get_iter()) if not word: context.add_proposals(self, [], True) return self.move_mark(context.get_iter().get_buffer(), start) code = utils.get_document(start) lang = start.get_buffer().get_language() proposals = self.get_proposals(code, lang, word) context.add_proposals(self, proposals, True)
def on_query_completions(self, view, prefix, locations): print(utils.get_language()) lang=utils.get_language() if lang!="javascript" and lang!="nodejs":return if not utils.is_point():return jsonPath=sublime.packages_path()+os.sep+"javascript"+os.sep+"functions.json" if lang=="nodejs":jsonPath=sublime.packages_path()+os.sep+"javascript"+os.sep+"functions_node.json" d=utils.load_json(jsonPath) obj=utils.get_word(-1) if not d.get(obj): d[obj]=[]; utils.save_json(jsonPath, d) return functions=d[obj] return utils.get_completion_list(functions)
def check_is_class(self, context): piter = context.get_iter() start = piter.copy() start.backward_char() ch = start.get_char() #Move to the start of the word while ch.isalnum() or ch == '_' or ch == ':' and not start.starts_line(): if not start.backward_char(): break ch = start.get_char() #Now we check that the previous word is 'new' start2, word = utils.get_word(start) if word == 'new': return True else: return False
def do_populate(self, context): is_class = context.get_data(PHP_PROVIDER_IS_CLASS_DATA_KEY) is_class_const = context.get_data(PHP_PROVIDER_IS_CLASS_CONST_DATA_KEY) is_php_statement = context.get_data(PHP_PROVIDER_IS_PHP_STATEMENT_DATA_KEY) start, word = utils.get_word(context.get_iter()) if not is_class: if not word: context.add_proposals(self, [], True) return elif is_class_const: # FIXME: This should be implemented in activation start = context.get_iter() else: if not word: start = context.get_iter() proposals = self.get_proposals(is_class, is_class_const, is_php_statement, word) self.move_mark(context.get_iter().get_buffer(), start) context.add_proposals(self, proposals, True)
def check_is_class(self, context): piter = context.get_iter() start = piter.copy() start.backward_char() ch = start.get_char() #Move to the start of the word while ch.isalnum( ) or ch == '_' or ch == ':' and not start.starts_line(): if not start.backward_char(): break ch = start.get_char() #Now we check that the previous word is 'new' start2, word = utils.get_word(start) if word == 'new': return True else: return False
def do_populate(self, context): is_class = context.get_data(PHP_PROVIDER_IS_CLASS_DATA_KEY) is_class_const = context.get_data(PHP_PROVIDER_IS_CLASS_CONST_DATA_KEY) is_php_statement = context.get_data( PHP_PROVIDER_IS_PHP_STATEMENT_DATA_KEY) start, word = utils.get_word(context.get_iter()) if not is_class: if not word: context.add_proposals(self, [], True) return elif is_class_const: # FIXME: This should be implemented in activation start = context.get_iter() else: if not word: start = context.get_iter() proposals = self.get_proposals(is_class, is_class_const, is_php_statement, word) self.move_mark(context.get_iter().get_buffer(), start) context.add_proposals(self, proposals, True)
def do_minitest(dmn, vocab, multiple_ans=False, nbr_test=0, log_it=False, state_name=""): #data = load_minitest(fname) ivocab = dmn.ivocab total_facts = [] total_q = [] total_ans = [] total_pred = [] total_error = 0 for j in range(0, nbr_test): y_true = [] y_pred = [] step_data = dmn.step(j, 'minitest') answers = step_data["answers"] inputs = step_data["inputs"] question = step_data["question"] if (multiple_ans): ret_multiple = step_data["multiple_prediction"] else: prediction = step_data["prediction"] print("-------") w_input = [] w_q = [] print("==> reconstruction of input and question") for i in range(0, np.shape(inputs)[0]): w_input.append(utils.get_word(dmn.word2vec, inputs[i])) for i in range(0, np.shape(question)[0]): w_q.append(utils.get_word(dmn.word2vec, question[i])) print("Facts:") print(' '.join(w_input)) print(' '.join(w_q) + "?") total_facts.append(w_input) total_q.append(w_q) if (multiple_ans == False): print("==>Right answer is:") for x in answers: y_true.append(x) print(ivocab[x]) print("==>Answer found by the model is:") for x in prediction.argmax(axis=1): y_pred.append(x) print(ivocab[x]) else: print("==>Right answer is:") answers = answers[0] for i in range(0, np.shape(answers)[0]): ans = (ivocab[answers[i]]) y_true.append(ans) print(' '.join(y_true) + ".") total_ans.append(y_true) print("==>Multiple answer found are:") list_pred = [] for i in range(0, np.shape(ret_multiple)[1]): pred_temp = ret_multiple[:, i, :] for x in pred_temp.argmax(axis=1): list_pred.append(ivocab[x]) print(' '.join(list_pred) + '. (' + str(np.shape(ret_multiple)[1]) + ' answers)') total_pred.append(list_pred) total_error = total_error + get_number_difference( y_true, list_pred) if (log_it): if (state_name == ""): raise Exception( "Wrong or state_name. You must give the babi_id in order to log the results of the minitest" ) infos = state_name.split(".") # info = states/dmn_multiple.h5.bs10.babi1.epoch5.test3.24979.state babi_temp = infos[4] epoch_temp = infos[5] id = babi_temp[4:] epoch_nbr = epoch_temp[5:] episode = { 'C': total_facts, 'Q': total_q, 'A': total_ans, 'AF': total_pred } babi_id = utils.babi_map[id] fname = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'data/minitest_log/en/%s_100k_log_%s.txt' % (babi_id, epoch_nbr)) write_log_results(fname, episode) print("Error is", (total_error))
def add_block_data(self, block_length, data): """ add a block of tokenized BASIC source code lines. >>> cfg = Dragon32Config >>> fc = FileContent(cfg) >>> block = [ ... 0x1e,0x12,0x0,0xa,0x80,0x20,0x49,0x20,0xcb,0x20,0x31,0x20,0xbc,0x20,0x31,0x30,0x0, ... 0x0,0x0] >>> len(block) 19 >>> fc.add_block_data(19,iter(block)) 19 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 >>> block = iter([ ... 0x1e,0x29,0x0,0x14,0x87,0x20,0x49,0x3b,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22,0x0, ... 0x0,0x0]) >>> fc.add_block_data(999,block) 25 Bytes parsed ERROR: Block length value 999 is not equal to parsed bytes! >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" >>> block = iter([ ... 0x1e,0x31,0x0,0x1e,0x8b,0x20,0x49,0x0, ... 0x0,0x0]) >>> fc.add_block_data(10,block) 10 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" 30 NEXT I Test function tokens in code >>> fc = FileContent(cfg) >>> data = iter([ ... 0x1e,0x4a,0x0,0x1e,0x58,0xcb,0x58,0xc3,0x4c,0xc5,0xff,0x88,0x28,0x52,0x29,0x3a,0x59,0xcb,0x59,0xc3,0x4c,0xc5,0xff,0x89,0x28,0x52,0x29,0x0, ... 0x0,0x0 ... ]) >>> fc.add_block_data(30, data) 30 Bytes parsed >>> fc.print_code_lines() 30 X=X+L*SIN(R):Y=Y+L*COS(R) Test high line numbers >>> fc = FileContent(cfg) >>> data = [ ... 0x1e,0x1a,0x0,0x1,0x87,0x20,0x22,0x4c,0x49,0x4e,0x45,0x20,0x4e,0x55,0x4d,0x42,0x45,0x52,0x20,0x54,0x45,0x53,0x54,0x22,0x0, ... 0x1e,0x23,0x0,0xa,0x87,0x20,0x31,0x30,0x0, ... 0x1e,0x2d,0x0,0x64,0x87,0x20,0x31,0x30,0x30,0x0, ... 0x1e,0x38,0x3,0xe8,0x87,0x20,0x31,0x30,0x30,0x30,0x0, ... 0x1e,0x44,0x27,0x10,0x87,0x20,0x31,0x30,0x30,0x30,0x30,0x0, ... 0x1e,0x50,0x80,0x0,0x87,0x20,0x33,0x32,0x37,0x36,0x38,0x0, ... 0x1e,0x62,0xf9,0xff,0x87,0x20,0x22,0x45,0x4e,0x44,0x22,0x3b,0x36,0x33,0x39,0x39,0x39,0x0,0x0,0x0 ... ] >>> len(data) 99 >>> fc.add_block_data(99, iter(data)) 99 Bytes parsed >>> fc.print_code_lines() 1 PRINT "LINE NUMBER TEST" 10 PRINT 10 100 PRINT 100 1000 PRINT 1000 10000 PRINT 10000 32768 PRINT 32768 63999 PRINT "END";63999 """ # data = list(data) # # print repr(data) # print_as_hex_list(data) # print_codepoint_stream(data) # sys.exit() # create from codepoint list a iterator data = iter(data) byte_count = 0 while True: try: line_pointer = get_word(data) except (StopIteration, IndexError), err: log.error( "No line pointer information in code line data. (%s)" % err) break # print "line_pointer:", repr(line_pointer) byte_count += 2 if not line_pointer: # arrived [0x00, 0x00] -> end of block break try: line_number = get_word(data) except (StopIteration, IndexError), err: log.error( "No line number information in code line data. (%s)" % err) break
def add_block_data(self, block_length, data): """ add a block of tokenized BASIC source code lines. >>> cfg = Dragon32Config >>> fc = FileContent(cfg) >>> block = [ ... 0x1e,0x12,0x0,0xa,0x80,0x20,0x49,0x20,0xcb,0x20,0x31,0x20,0xbc,0x20,0x31,0x30,0x0, ... 0x0,0x0] >>> len(block) 19 >>> fc.add_block_data(19,iter(block)) 19 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 >>> block = iter([ ... 0x1e,0x29,0x0,0x14,0x87,0x20,0x49,0x3b,0x22,0x48,0x45,0x4c,0x4c,0x4f,0x20,0x57,0x4f,0x52,0x4c,0x44,0x21,0x22,0x0, ... 0x0,0x0]) >>> fc.add_block_data(999,block) 25 Bytes parsed ERROR: Block length value 999 is not equal to parsed bytes! >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" >>> block = iter([ ... 0x1e,0x31,0x0,0x1e,0x8b,0x20,0x49,0x0, ... 0x0,0x0]) >>> fc.add_block_data(10,block) 10 Bytes parsed >>> fc.print_code_lines() 10 FOR I = 1 TO 10 20 PRINT I;"HELLO WORLD!" 30 NEXT I Test function tokens in code >>> fc = FileContent(cfg) >>> data = iter([ ... 0x1e,0x4a,0x0,0x1e,0x58,0xcb,0x58,0xc3,0x4c,0xc5,0xff,0x88,0x28,0x52,0x29,0x3a,0x59,0xcb,0x59,0xc3,0x4c,0xc5,0xff,0x89,0x28,0x52,0x29,0x0, ... 0x0,0x0 ... ]) >>> fc.add_block_data(30, data) 30 Bytes parsed >>> fc.print_code_lines() 30 X=X+L*SIN(R):Y=Y+L*COS(R) Test high line numbers >>> fc = FileContent(cfg) >>> data = [ ... 0x1e,0x1a,0x0,0x1,0x87,0x20,0x22,0x4c,0x49,0x4e,0x45,0x20,0x4e,0x55,0x4d,0x42,0x45,0x52,0x20,0x54,0x45,0x53,0x54,0x22,0x0, ... 0x1e,0x23,0x0,0xa,0x87,0x20,0x31,0x30,0x0, ... 0x1e,0x2d,0x0,0x64,0x87,0x20,0x31,0x30,0x30,0x0, ... 0x1e,0x38,0x3,0xe8,0x87,0x20,0x31,0x30,0x30,0x30,0x0, ... 0x1e,0x44,0x27,0x10,0x87,0x20,0x31,0x30,0x30,0x30,0x30,0x0, ... 0x1e,0x50,0x80,0x0,0x87,0x20,0x33,0x32,0x37,0x36,0x38,0x0, ... 0x1e,0x62,0xf9,0xff,0x87,0x20,0x22,0x45,0x4e,0x44,0x22,0x3b,0x36,0x33,0x39,0x39,0x39,0x0,0x0,0x0 ... ] >>> len(data) 99 >>> fc.add_block_data(99, iter(data)) 99 Bytes parsed >>> fc.print_code_lines() 1 PRINT "LINE NUMBER TEST" 10 PRINT 10 100 PRINT 100 1000 PRINT 1000 10000 PRINT 10000 32768 PRINT 32768 63999 PRINT "END";63999 """ # data = list(data) # # print repr(data) # print_as_hex_list(data) # print_codepoint_stream(data) # sys.exit() # create from codepoint list a iterator data = iter(data) byte_count = 0 while True: try: line_pointer = get_word(data) except (StopIteration, IndexError), err: log.error("No line pointer information in code line data. (%s)" % err) break # print "line_pointer:", repr(line_pointer) byte_count += 2 if not line_pointer: # arrived [0x00, 0x00] -> end of block break try: line_number = get_word(data) except (StopIteration, IndexError), err: log.error("No line number information in code line data. (%s)" % err) break
def get_right2_data(): data_text = utils.get_word() # print(data_text) return jsonify({"data":data_text})