def parse_element_to_packet(fp, k, v): if is_inner_type(v) == True: fp.write(" packet << " + k + ";\n") elif util.is_list(v) and len(v) == 1: #vector fp.write(" packet << (uint32)" + k + ".size();\n") fp.write(" for(int i = 0; i < " + k + ".size(); i++)\n") fp.write(" {\n") if is_inner_type(v[0]) == True: fp.write(" packet << " + k + "[i];\n") else: fp.write(" " + k + ".ToPacket(packet);\n") fp.write(" }\n") elif util.is_list(v) and len(v) == 2: #map fp.write(" packet << (uint32)" + k + ".size();\n") fp.write(" std::map<" + v[0] + "," + v[1] + ">::iterator it = " + k + ".begin; it != " + k + ".end(); it++)\n") fp.write(" {\n") if is_inner_type(v[0]) == True: #内置对象 fp.write(" packet << it->first;\n") else: #自定义对象 fp.write(" it->first.ToPacket(packet);\n") if is_inner_type(v[1]) == True: #内置对象 fp.write(" packet << it->second;\n") else: #自定义对象 fp.write(" it->second.ToPacket(packet);\n") fp.write(" }\n") else: fp.write(" " + k + ".ToPacket(packet);\n")
def generate_struct(fp,str,struct_name): head_str = "struct" + " " + struct_name + " {\n" fp.write(head_str) if type(str) != type([]): print "generate_struct str is not list" return for i in range(0,len(str)): element = str[i] key = element[0] value = element[1] # 组装字段类型 if util.is_string(value) and is_inner_type(value): # 内部数据 element_str = " " + value + " " + key + ";\n" elif util.is_list(value) and len(value) == 1: # 如果为元组 则为vector element_str = " " + "std::vector<" + value[0] + "> " + key + ";\n" elif util.is_list(value) and len(value) == 2: # 如果为元组 两个元素则为map element_str = " " + "std::map<" + value[0] + "," + value[1] + "> " + key + ";\n" fp.write(element_str) # 组装ToPacket和FromPacket fp.write(" void ToPacket(INetPacket& packet)\n") fp.write(" {\n") for i in range(0,len(str)): element = str[i] key = element[0] value = element[1] parse_element_to_packet(fp, key, value) fp.write(" }\n") fp.write(" void FromPacket(INetPacket& packet)\n") fp.write(" {\n") fp.write(" int size;\n") for i in range(0, len(str)): element = str[i] key = element[0] value = element[1] parse_element_from_packet(fp,key,value) fp.write(" }\n") fp.write("}\n") fp.flush() fp.close()
def reach_the_deepest_lists_and_modify(Prg, Group, Fun, Params): # if the obj has more highlighted words/hit, it's more important for the user. if util.is_list(Group) and Group: # if one member is list, all members are list GroupHasListsMembers = util.is_list(Group[0]) if GroupHasListsMembers: for ListMember in Group: reach_the_deepest_lists_and_modify(Prg, ListMember, Fun, Params) else: # modify the original values in Group Fun(Prg, Group, Params)
def sentence_from_memory(Prg, Source, LineNum, Strip=False): Msg = "" if "DocumentObjectsLoaded" not in Prg: Msg = f"DocumentsObjectsLoaded not in Prg" elif Source not in Prg["DocumentObjectsLoaded"]: Msg = f"DocumentsObjectsLoaded: {Source} is not loaded" elif "Sentences" not in Prg["DocumentObjectsLoaded"][Source]: Msg = f"DocumentsObjectsLoaded: {Source} no Sentences" elif not util.is_list(Prg["DocumentObjectsLoaded"][Source]["Sentences"]): Sentences = Prg["DocumentObjectsLoaded"][Source]["Sentences"] Msg = f"DocumentsObjectsLoaded: incorrect type: Sentences = {str(Sentences)}" elif len(Prg["DocumentObjectsLoaded"][Source]["Sentences"]) - 1 < LineNum: Msg = f"DocumentsObjectsLoaded: {Source} unknown linenum: {LineNum}" if Msg: print(Msg) util.log(Prg, Msg) return False, Msg Line = Prg["DocumentObjectsLoaded"][Source]["Sentences"][LineNum] if Strip: return True, Line.strip() return True, Line
def FMOBNDgroup(self): broken_bonds = self._fragmentation.getExplicitlyBreakAtomPairs() if not is_list(broken_bonds): raise TypeError if len(broken_bonds) == 0: return "\n" return " $FMOBND%s\n $END" % self._getBondGroupData(broken_bonds)
def __new__(cls, i, *args, **kw): if isinstance(i, NotificationId): return i if is_list(i): i = reduce(int.__or__, map(int, i), 0) else: i = int(i) #TODO: check val return super(NotificationId, cls).__new__(cls, i, *args, **kw)
def parse_element_to_packet_lua(fp, k, v): func_name = lua_type_map_to_func(v,False) #封包 if is_inner_type(v) == True: fp.write(" packet:" + func_name + "(self." + k.lower() + ")\n") elif util.is_list(v) and len(v) == 1:#vector table_name = v[0].lower() + "_list" fp.write(" self." + table_name + " = self." + table_name + " or {}\n") fp.write(" local len = #self." + table_name + "\n") fp.write(" packet:writeInt(len)\n") fp.write(" for i = 1,len do\n") if is_inner_type(v[0]) == True: func_name = lua_type_map_to_func(v[0],False) fp.write(" packet:" + func_name + "(self." + table_name + ")\n") fp.write(" end\n") else: fp.write(" self." + table_name + "to_packet(packet)\n") fp.write(" end\n") elif util.is_list(v) and len(v) == 2:#map table_name = v[0].lower() + "_record" fp.write(" self." + table_name + " = self." + table_name + " or {}\n") #计算出record中的长度 不能简单的用#去取 要通过for k,v in pairs获得长度 fp.write(" local len = 0\n") fp.write(" for k,v in pairs(self." + table_name + ") do\n") fp.write(" len = len + 1\n") fp.write(" end\n") fp.write(" packet:writeInt(len)\n") fp.write(" for k,v in pairs(self." + table_name + ") do\n") key_func = lua_type_map_to_func(v[0],False) fp.write(" packet:" + key_func + "(k)\n") if is_inner_type(v[1]) == True: func_name = lua_type_map_to_func(v[1],False) fp.write(" packet:" + func_name + "(v)\n") else: fp.write(" v:to_packet(packet)\n") fp.write(" end\n") pass
def parse_element_packet_to_lua(fp, k, v): func_name = lua_type_map_to_func(v,True) #解包 if is_inner_type(v) == True: fp.write(" self." + k.lower() + " = packet:" + func_name + "()\n") elif util.is_list(v) and len(v) == 1:#vector fp.write(" local len = packet:readInt()\n") table_name = v[0].lower() + "_list" fp.write(" local " + table_name + " = {}\n") if is_inner_type(v[0]) == True: func_name = lua_type_map_to_func(v[0],True) fp.write(" local tmp = packet:" + func_name + "()\n") fp.write(" self." + table_name + "[#self." + table_name + " + 1] = tmp\n") pass else: fp.write(" for i = 1,len do\n") fp.write(" local tmp = require(\"lua.component.protocol." + v[0] + "\").new()\n") fp.write(" tmp:packet_to(packet)\n") fp.write(" self." + table_name + "[#self." + table_name + " + 1] = tmp\n") fp.write(" end\n") pass elif util.is_list(v) and len(v) == 2:#map fp.write(" local len = packet:readInt()\n") table_name = k.lower() + "_record" fp.write(" local " + table_name + " = {}\n") ##v[0] 肯定是内置对象 v[1] 不见得 key_func = lua_type_map_to_func(v[0],True) fp.write(" for i = 1, len do\n") fp.write(" local key = packet:" + key_func + "()\n") if is_inner_type(v[1]) == True: func_name = lua_type_map_to_func(v[1],True) fp.write(" local value = packet:" + func_name + "()\n") else: fp.write(" local value = require(\"lua.component.protocol." + v[1] + "\").new()\n") fp.write(" value:packet_to(packet)\n") fp.write(" self." + table_name + "[key] = value\n") fp.write(" end\n")
def TxtToObj(TokenTxtList, Index, FileSourceBaseName): # recursive txt-> TokenObj converter TokenObjects = [] for Elem in TokenTxtList: if util.is_str(Elem): TokenObjects.append( tokens.TokenObj(Elem, Index, Prg=Prg, WordsDetected=WordsDetected, FileSourceBaseName=FileSourceBaseName)) if util.is_list(Elem): TokenObjects.append(TxtToObj(Elem, Index, FileSourceBaseName)) return TokenObjects
def parse_element_from_packet(fp, k, v): if is_inner_type(v) == True: fp.write(" packet >> " + k + ";\n") elif util.is_list(v) and len(v) == 1: #vector fp.write(" packet >> size;\n") fp.write(" for(int i = 0; i < size; i++)\n") fp.write(" {\n") if is_inner_type(v[0]) == True: fp.write(" " + v[0] + " value;\n") fp.write(" packet >> value;\n") fp.write(" " + k + ".push_back(value);\n") else: fp.write(" " + v[0] + " info;\n") fp.write(" info.FromPacket(packet);\n") fp.write(" " + k + ".push_back(info);\n") fp.write(" }\n") elif util.is_list(v) and len(v) == 2: #map fp.write(" packet >> size;\n") fp.write(" for(int i = 0; i < size; i++)\n") fp.write(" {\n") fp.write(" " + v[0] + " key;\n") fp.write(" " + v[1] + " value;\n") if is_inner_type(v[0]) == True: fp.write(" packet >> key;\n") else: fp.write(" key.FromPacket(packet);\n") if is_inner_type(v[1]) == True: fp.write(" packet >> value;\n") else: fp.write(" value.FromPacket(packet);\n") fp.write(" " + k + ".insert(make_pair(key,value));\n") fp.write(" }\n") else: fp.write(" " + k + ".FromPacket(packet);\n")
def _restore(self, obj): if has_tag(obj, tags.ID): restore = self._restore_id elif has_tag(obj, tags.REF): # Backwards compatibility restore = self._restore_ref elif has_tag(obj, tags.TYPE): restore = self._restore_type elif has_tag(obj, tags.REPR): # Backwards compatibility restore = self._restore_repr elif has_tag(obj, tags.OBJECT): restore = self._restore_object elif util.is_list(obj): restore = self._restore_list elif has_tag(obj, tags.TUPLE): restore = self._restore_tuple elif has_tag(obj, tags.SET): restore = self._restore_set elif util.is_dictionary(obj): restore = self._restore_dict else: restore = lambda x: x return restore(obj)
def _get_flattener(self, obj): if util.is_primitive(obj): return lambda obj: obj list_recurse = self._list_recurse if util.is_list(obj): if self._mkref(obj): return list_recurse else: self._push() return self._getref # We handle tuples and sets by encoding them in a "(tuple|set)dict" if util.is_tuple(obj): if not self.unpicklable: return list_recurse return lambda obj: {tags.TUPLE: [self._flatten(v) for v in obj]} if util.is_set(obj): if not self.unpicklable: return list_recurse return lambda obj: {tags.SET: [self._flatten(v) for v in obj]} if util.is_dictionary(obj): return self._flatten_dict_obj if util.is_type(obj): return _mktyperef if util.is_object(obj): return self._ref_obj_instance # else, what else? (methods, functions, old style classes...) return None
def test_positive_types( self ): self.assertTrue( util.is_string( "abcd" ) ) self.assertTrue( util.is_boolean( True ) ) self.assertTrue( util.is_integer( 1234 ) ) self.assertTrue( util.is_list( [1,2,3,4]) ) self.assertTrue( util.is_dict( {"foo":"bar"} ) )
def test_neg_lists( self ): self.assertFalse( util.is_list( "1234" ) ) self.assertFalse( util.is_list( 12345 ) )
def operator_exec(Tokens, Scope="subsentence", SubSentenceMulti=100, WordPositionMulti=100, CallLevel=0, ProgressBarConsole=None, ProgressBarChange=3): for Position in range(0, len(Tokens)): # expand all groups in all levels first Token = Tokens[Position] if util.is_list(Token): operator_exec(Token, Scope=Scope, SubSentenceMulti=SubSentenceMulti, WordPositionMulti=WordPositionMulti, CallLevel=CallLevel+1, ProgressBarConsole=ProgressBarConsole) Tokens[Position] = Token[0] Tokens[Position].IsGroup = True # the main, collector, result object is group for Operator in Operators: # during operator exec, the positions can be changed so I have to check them in every Op OperatorPositions = get_operator_positions(Tokens) # if operator exist in the position lists and it has any open position while (Operator in OperatorPositions) and OperatorPositions[Operator]: # The first progress bar change = 2 because at caller position I updated the bar with 1. # but later the change has to be 3, it's the default value if ProgressBarConsole: # I work with 3 tokens in same time: ProgressBarConsole.update(Change= ProgressBarChange) # the operators and two operands # but 1 ProgressBar update happened before operator_exec OperatorPositionLast = OperatorPositions[Operator].pop() ParamLeft = Tokens[OperatorPositionLast - 1] ParamRight = Tokens[OperatorPositionLast + 1] ############################################################### if Operator == "OR": # slow test: ..eading ResultOpExec = ParamLeft.Results + ParamRight.Results ############################################################### elif Operator == "AND": # slow test: prefer AND reading AND cards AND the AND yet if len(ParamLeft.Results) >= len(ParamRight.Results): ResultsBig = ParamLeft.Results ResultsSmall = ParamRight.Results else: ResultsBig = ParamRight.Results ResultsSmall = ParamLeft.Results ResBigScoped = set() for ResBig in ResultsBig: ResBigScoped.add(ResBig.Scopes[Scope]) ResultOpExec = [] for ResSmall in ResultsSmall: if ResSmall.Scopes[Scope] in ResBigScoped: ResultOpExec.append(ResSmall) ############################################################### elif Operator == "THEN": ResRightScoped = dict() for ResRight in ParamRight.Results: # store the current scope AND the finest scope to detect the word order later ResRightScope = ResRight.Scopes[Scope] if ResRightScope not in ResRightScoped: ResRightScoped[ResRightScope] = [] ResRightScoped[ResRightScope].append(ResRight.Scopes["word"]) ResultOpExec = [] if ResRightScoped: # try to find common elems if something is in Right for ResLeft in ParamLeft.Results: if not ResRightScoped: break ResLeftScope = ResLeft.Scopes[Scope] if ResLeftScope in ResRightScoped: for ResRightScopedWord in ResRightScoped[ResLeftScope]: if ResLeft.Scopes["word"] < ResRightScopedWord: ResultOpExec.append(ResLeft) # remove the right scope because one hit is enough # from one sentence # and I decrease the work for next for loops del ResRightScoped[ResLeftScope] break ############################################################### OperatorObj = Tokens[OperatorPositionLast] ObjResult = TokenObj(ParamLeft.words() + OperatorObj.words() + ParamRight.words(), Results=ResultOpExec, FileSourceBaseName=ParamLeft.FileSourceBaseName) ObjResult.Explain = [ParamLeft, OperatorObj, ParamRight] Tokens[OperatorPositionLast-1] = ObjResult Tokens.pop(OperatorPositionLast + 1) Tokens.pop(OperatorPositionLast)
def test_is_list(self): self.assertTrue( util.is_list(self.lsimple))