def am_resourceidchunk(data, pc): #ChunkType:ResourceId Chunk类型,固定四个字节0x00080180 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x00080180": ApUtils.apprint("===========Resolve ResourceId Chunk=============") ApUtils.apprint("[Resource]ChunkType is : " + ApUtils.printhex(ChunkType)) else: ApUtils.apprint("[Error]ResourceId Chunk resolve error!") exit(-1) # ResourceId Chunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint("[Resource]ChunkSize is : " + ApUtils.printhex(ChunkSize)) #ResourcesId的个数为(大小-头部8字节)/4 ResourcesId_num = (int(ApUtils.printhex(ChunkSize), 16) - 8) / 4 for i in range(ResourcesId_num): tmp_hex = ApUtils.printhex( ApUtils.little_endian(data[pc + 8 + i * 4:pc + 8 + i * 4 + 4])) tmp = int(tmp_hex, 16) ApUtils.apprint("[Resource][" + str(i) + "]Id : " + str(tmp) + ",hex : " + tmp_hex) config.global_list['RESOURCEIDS'].append(tmp_hex) config.set_value( 'RESOURCEIDCHUNKEND', config.get_value('RESOURCEIDCHUNKSTAR') + int(ApUtils.printhex(ChunkSize), 16))
def owb_initialize_all_on_click(b): # Open the option file optionFile = open(option_file_path) options = json.load(optionFile) # initialize the parcel data source widget ps.initialize(options) _dict = ps.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Do the same with the data reader tab drt.initialize(options) _dict = drt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Get the signals available # Create the actual time series sources ts_options = drt.dump() ts_source_factory = tss.time_serie_source_factory() ts_sources = ts_source_factory.get_time_series_sources(ts_options) # Build the signal/components dictionary signal_components = {} for ts_source in ts_sources: signal_components[ ts_source.get_signal_type()] = ts_source.get_components() # Initialize the preprocessing tab pt.signal_components = signal_components pt.initialize(options) _dict = pt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Merge the signal/component dictionaries sc_dict = {**signal_components, **(pt.get_signal_components())} # Re-initialize the marker detector tab mdt.signal_components = sc_dict mdt.initialize(options) _dict = mdt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Finally the data displayer ddt.signal_components = sc_dict ddt.initialize(options) _dict = ddt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value)
def wb_save_on_click(b): # Create the output element in the JSON file _dict = self.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value)
def craw_start(key): DIR = cfg.get_value('CSV_FILENAME_WEIBO_DIR') cfg.set_value('CSV_FILENAME_WEIBO', DIR + key + '_WEIBO.csv') print('craw_start: ', cfg._global_dict) WeiBo.sina_crawl(key, url_cnt=4) DIR = cfg.get_value('CSV_FILENAME_BAIDU_DIR') cfg.set_value('CSV_FILENAME_BAIDU', DIR + key + '.csv') print('craw_topTen baidu:', cfg._global_dict) craw_baidu(key)
def dropbox(): while True: config.set_value("dropbox", "token", input("Enter Dropbox access token: ")) config.write_config() functions.clear() try: login.dropbox_login() config.set_value("ftp", "enabled", "true") config.write_config() print("Successfully logged in\n") break except: print("Failed to log in\n")
def Replace_Elite(population): el = config.get_value("Elite_list") if (len(el) == 0): el = population config.set_value("Elite_list", el) else: for i in population: mmin = min(el) idx = el.index(min(el)) if (i > mmin): el[idx] = i config.set_value("Elite_list", el) return el
def wb_save_on_click(b): # Create the output element in the JSON file _dict = self.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Add an empty key for the marker-aggregator (to be dealt with # in a second phase) if config.get_value("marker-aggregator") is None: config.set_value("marker-aggregator", [{}]) # Add a default marker-sink (to be dealt with # in a second phase) config.set_value("marker-sink", [{ "output_file": "./marker_output.csv", "include_header": True }]) else: config.set_value("marker-sink", [{ "output_file": "./marker_output.csv", "include_header": True }, { "output_file": "./agg_marker_output.csv", "include_header": True }])
def main(data, wmin, wmax, pop_size, max_evaluations, prng=None, display=False): if prng is None: prng = Random() prng.seed(time()) v = data config.set_value("series_length", len(v)) problem = timeseriesproblem.timeseriesproblem(dimensions=4, v=v, wmin=wmin, wmax=wmax, random=prng) ea = pso.pso(prng) ea.terminator = inspyred.ec.terminators.evaluation_termination ea.topology = inspyred.swarm.topologies.ring_topology ea.observer = inspyred.ec.observers.default_observer seeds = [] if config.get_value("CHAOS_ALGO") != "None": seeds = CHAOS_INIT() final_pop = ea.evolve(generator=problem.generator, evaluator=problem.evaluator, pop_size=pop_size, seeds=seeds, bounder=problem.bounder, maximize=problem.maximize, max_evaluations=max_evaluations, neighborhood_size=5) if display: best = max(final_pop) print('Best Solution: \n{0}'.format(str(best))) #print('Best Solution: \n{0}'.format(config.get_value("gbestx"))) if config.get_value("SHOW_MOTIF") == True: SHOW_MOTIF(data, best.candidate) if gl.get_value("gbest_sum") != "not found": gl.set_value("gbest_sum", gl.get_value("gbest_sum") + config.get_value("gbest")) else: gl.set_value("gbest_sum", config.get_value("gbest")) return ea
def am_tagchunk_end(data, pc): # ChunkType:end Tag Chunk类型,固定四个字节0x00100103 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x00100103": ApUtils.apprint(" ===========Resolve Tag Chunk End=============") ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]ChunkType is : " + ApUtils.printhex(ChunkType)) else: ApUtils.apprint(" [Error" + str(config.get_value('TAGCOUNTEND')) + "]Tag Chunk end resolve error!") exit(-1) # Tag Chunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]ChunkSize is : " + ApUtils.printhex(ChunkSize)) # Line Number 行号(4) LineNumber = ApUtils.little_endian(data[pc + 8:pc + 12]) ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]LineNumber is : " + ApUtils.printhex(LineNumber)) # Unknown未知区域(4) Unknown = ApUtils.little_endian(data[pc + 12:pc + 16]) #Namespace Uri NameSpcae = ApUtils.little_endian(data[pc + 16:pc + 20]) if int(ApUtils.printhex(NameSpcae), 16) <= len( config.global_list['STRINGPOOL']): ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]NameSpace is : " + config.global_list['STRINGPOOL'][ int(ApUtils.printhex(NameSpcae), 16)]) else: ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]NameSpace is : null") #name Name = ApUtils.little_endian(data[pc + 20:pc + 24]) NameStr = config.global_list['STRINGPOOL'][int(ApUtils.printhex(Name), 16)] ApUtils.apprint(" [endTag" + str(config.get_value('TAGCOUNTEND')) + "]Tag name is : " + NameStr) # 修改结束指针 config.set_value( 'ENDTAGCHUNKEND', config.get_value('ENDTAGCHUNKSTAR') + int(ApUtils.printhex(ChunkSize), 16))
def am_header(data): #魔数,4字节 magic_number = data[0:4] #校验 if ApUtils.printhex(magic_number) == "0x03000800": ApUtils.apprint("===========Resolve Header=============") ApUtils.apprint("[Header]Magic number : " + ApUtils.printhex(magic_number)) else: ApUtils.apprint( "[Error]This file may not an AndroidManifest.xml file! ") exit(-1) #文件大小,4字节 file_size = ApUtils.little_endian(data[4:8]) ApUtils.apprint("[Header]File size is : " + ApUtils.printhex(file_size)) config.set_value('AMHEADER', 8) config.set_value('FILSIZEINDEX', 4)
def _swarm_selector(self, random, population, args): if (config.get_value("SHOW_SWARM_DISTRIBUTION")): for p in population: config.get_value("Xi").append(p.candidate[0]) config.get_value("Wi").append(p.candidate[1]) config.get_value("Xj").append(p.candidate[2]) config.get_value("Wj").append(p.candidate[3]) if (config.get_value("SHOW_SWARM_DISTRIBUTION") ) and config.get_value("t_updated") == 0: utils.show_swarm_distribution() if (config.get_value("t_updated") + 1) % config.get_value("SHOW_SWARM_CYCLE") == 0: utils.show_swarm_distribution() t_updated = config.get_value("t_updated") config.set_value("t_updated", t_updated + 1) if (config.get_value("t_updated") - config.get_value("t_lastupdate") >= config.get_value("TCONV")): initial_cs = [] if config.get_value("CHAOS_ALGO") != "None": initial_cs = utils.CHAOS_INIT() else: num_generated = config.get_value("pop_size") i = 0 initial_cs = [] while i < num_generated: cs = self.generator(random=self._random, args=self._kwargs) initial_cs.append(cs) i += 1 self.logger.debug('evaluating initial population') initial_fit = self.evaluator(candidates=initial_cs, args=self._kwargs) population = [] for cs, fit in zip(initial_cs, initial_fit): if fit is not None: ind = Individual(cs, self.maximize) ind.fitness = fit population.append(ind) else: self.logger.warning( 'excluding candidate {0} because fitness received as None' .format(cs)) self.logger.debug('population size is now {0}'.format( len(population))) self.archive = [] if (max(population).fitness < config.get_value("gbest")): config.set_value("gbest", max(population).fitness) config.set_value("gbestx", max(population)) config.set_value("t_lastupdate", config.get_value("t_updated")) if (config.get_value("SHOW_CONVERGENCE_RATE")): config.get_value("CONVERGENCE_RATE_LIST").append( config.get_value("gbest")) if (config.get_value("IF_Elite") == True): population = utils.Replace_Elite(population=population) return population
def am_namespacechunk_star(data, pc): # ChunkType:Namespace Chunk类型,固定四个字节0x00100100 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x00100100": ApUtils.apprint( "===========Resolve Namespace Chunk Start=============") ApUtils.apprint("[startNamespace]ChunkType is : " + ApUtils.printhex(ChunkType)) else: ApUtils.apprint("[Error]Namespace Chunk Start resolve error!") exit(-1) # Namespace Chunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint("[startNamespace]ChunkSize is : " + ApUtils.printhex(ChunkSize)) # Line Number 行号(4) LineNumber = ApUtils.little_endian(data[pc + 8:pc + 12]) ApUtils.apprint("[startNamespace]LineNumber is : " + ApUtils.printhex(LineNumber)) # Unknown未知区域(4) Unknown = ApUtils.little_endian(data[pc + 12:pc + 16]) # Prefix命名空间前缀(4)(字符串索引) Prefix = ApUtils.little_endian(data[pc + 16:pc + 20]) PrefixName = config.global_list['STRINGPOOL'][int(ApUtils.printhex(Prefix), 16)] ApUtils.apprint("[startNamespace]Prefix is : " + ApUtils.printhex(Prefix) + " , str is : " + PrefixName) # Uri命名空间的URI(4)(字符串索引) Url = ApUtils.little_endian(data[pc + 20:pc + 24]) UrlName = config.global_list['STRINGPOOL'][int(ApUtils.printhex(Url), 16)] ApUtils.apprint("[startNamespace]Url is : " + ApUtils.printhex(Url) + " , str is : " + UrlName) config.set_value( 'STARNAMESPACEEND', config.get_value('STARNAMESPACESTAR') + int(ApUtils.printhex(ChunkSize), 16))
def update_filename_path(key): DIR = cfg.get_value('CSV_FILENAME_BAIDU_DIR', 'NULL') cfg.set_value('CSV_FILENAME_BAIDU', DIR + key + '.csv') DIR = cfg.get_value('CSV_FILENAME_HOTSPOT_DIR', 'NULL') cfg.set_value('CSV_FILENAME_HOTSPOT', DIR + key + '_TOP10.csv') DIR = cfg.get_value('CSV_FILENAME_WEIBO_DIR', 'NULL') cfg.set_value('CSV_FILENAME_WEIBO', DIR + key + '_WEIBO.csv')
def search_theme(): ''' 搜索接口 输入参数: @keywords:关键字 @categories:类别 @bgn_time: 开始时间 @end_time: 结束时间 ''' if request.method == 'POST': # json_data = json.loads(request.data) # print(type(request.json)) # 获取josn格式的数据 json_data = request.json keywords = json_data.get('keywords') categories = json_data.get('categories') bgn_time = json_data.get('bgn_time') end_time = json_data.get('end_time') print(keywords, categories, bgn_time, end_time) # if cfg.get_value("KEYWORDS", "") != "": cfg.set_value('KEYWORDS', keywords) print('craw_topTen') # print(cfg.get_value('KEYWORDS'), cfg._global_dict) global DEFAULT DEFAULT = cfg.get_value('DEFAULT_NAME') KEYWORD_LIST = cfg.get_value('KEYWORD_LIST', [DEFAULT]) print('[search KEYWORD_LIST]: ', KEYWORD_LIST) update_filename_path(keywords) if not keywords in KEYWORD_LIST: craw_topTen(keywords) print("-----CRAW TOPTEN------") return get_hotspot() else: return 'api_search_theme error'
def fetch(from_time: int, sites: List[str] = None) -> List: print() print("fetching data...") now = int(time.time()) result = [] sites = SITES if sites is None else [] total_pages = 0 quota_remain = 0 for site in sites: questions, pages, quota = _fetch_one_site(site=site, from_time=from_time) result = result + questions total_pages += pages quota_remain = quota print( fg256( "grey", "synced pages = {}; remain_quota = {})".format( total_pages, quota_remain))) config.set_value("last-sync", now) return result
def am_textchunk(data, pc): # ChunkType:text chunk 类型,固定四个字节0x00100104 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x00100104": ApUtils.apprint(" ===========Resolve Text Chunk=============") ApUtils.apprint(" [textChunk]" + ApUtils.printhex(ChunkType)) else: ApUtils.apprint(" [Error]Text Chunk resolve error!") exit(-1) # Tag Chunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint(" [textChunk]ChunkSize is : " + ApUtils.printhex(ChunkSize)) # Line Number 行号(4) LineNumber = ApUtils.little_endian(data[pc + 8:pc + 12]) ApUtils.apprint(" [textChunk]LineNumber is : " + ApUtils.printhex(LineNumber)) # Unknown未知区域(4) Unknown = ApUtils.little_endian(data[pc + 12:pc + 16]) # nameApUtils Name = ApUtils.little_endian(data[pc + 16:pc + 20]) NameStr = config.global_list['STRINGPOOL'][int(ApUtils.printhex(Name), 16)] ApUtils.apprint(" [textChunk]Text name is : " + NameStr) # Unknown未知区域(4) Unknown1 = ApUtils.little_endian(data[pc + 20:pc + 24]) # Unknown未知区域(4) Unknown2 = ApUtils.little_endian(data[pc + 24:pc + 28]) config.set_value( 'TEXTCHUNKEND', config.get_value('TEXTCHUNKSTAR') + int(ApUtils.printhex(ChunkSize), 16))
def folders(): functions.clear() print("Please follow the setup wizard\n") valid_folder_name = re.compile("[-.a-zA-Z0-9]+$") while True: data_folder = input("Enter data folder name (default: data): ") if data_folder == "": config.set_value("folders", "data-folder", "data") break elif (valid_folder_name.match(data_folder)): config.set_value("folders", "data-folder", data_folder) break functions.clear() print("Invalid input. Please use [- . A-Z a-z 0-9]\n") while True: builds_folder = input("Enter builds folder name(default: builds): ") if builds_folder == "": config.set_value("folders", "builds-folder", "builds") break elif (valid_folder_name.match(builds_folder)): config.set_value("folders", "builds-folder", builds_folder) break functions.clear() print("Invalid input. Please use [- . A-Z a-z 0-9]\n") functions.clear() # Check if folder already exists, if not create them if not os.path.isdir(config.get_folder('data')): os.mkdir(config.get_folder('data')) if not os.path.isdir(config.get_folder('builds')): os.mkdir(config.get_folder('builds')) config.write_config()
def owb_initialize_all_on_click(b): # Open the option file optionFile = open(option_file_path) options = json.load(optionFile) # initialize the parcel data source widget ps.initialize(options) _dict = ps.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Do the same with the data reader tab drt.initialize(options) _dict = drt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Get the signals available # Create the actual time series sources ts_options = drt.dump() ts_source_factory = tss.time_serie_source_factory() ts_sources = ts_source_factory.get_time_series_sources(ts_options) # Build the signal/components dictionary signal_components = {} for ts_source in ts_sources: signal_components[ ts_source.get_signal_type()] = ts_source.get_components() # Initialize the preprocessing tab pt.signal_components = signal_components pt.initialize(options) _dict = pt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Merge the signal/component dictionaries sc_dict = {**signal_components, **(pt.get_signal_components())} # Re-initialize the marker detector tab mdt.signal_components = sc_dict mdt.initialize(options) _dict = mdt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Initialize the marker aggregation tab al.signals = list(sc_dict.keys()) al.initialize(options) _dict = al.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) # Initialize the scenario evidence widget action_list = al.dump()["marker-aggregator"] marker_list = mdt.dump()["marker-detectors"] sew.available_markers = scenario_evidence_widget.get_markers_names( \ action_list, marker_list ) sew.initialize(options["scenario-evidence"]) _dict = sew.dump() config.set_value("scenario-evidence", _dict) # Finally the data displayer ddt.signal_components = sc_dict ddt.initialize(options) _dict = ddt.dump() key = list(_dict.keys())[0] value = _dict[key] config.set_value(key, value) print("Initialization Completed!")
def am_tagchunk_star(data, pc): # ChunkType:Tag Chunk类型,固定四个字节0x00100102 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x00100102": ApUtils.apprint(" ===========Resolve Tag Chunk Start=============") ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]ChunkType is : " + ApUtils.printhex(ChunkType)) else: ApUtils.apprint(" [Error" + str(config.get_value('TAGCOUNTSTAR')) + "]Tag Chunk start resolve error!") exit(-1) # Tag Chunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]ChunkSize is : " + ApUtils.printhex(ChunkSize)) # Line Number 行号(4) LineNumber = ApUtils.little_endian(data[pc + 8:pc + 12]) ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]LineNumber is : " + ApUtils.printhex(LineNumber)) # Unknown未知区域(4) Unknown = ApUtils.little_endian(data[pc + 12:pc + 16]) #Namespace Uri NameSpcae = ApUtils.little_endian(data[pc + 16:pc + 20]) if int(ApUtils.printhex(NameSpcae), 16) <= len( config.global_list['STRINGPOOL']): ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]NameSpace is : " + config.global_list['STRINGPOOL'][ int(ApUtils.printhex(NameSpcae), 16)]) else: ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]NameSpace is : null") #name Name = ApUtils.little_endian(data[pc + 20:pc + 24]) NameStr = config.global_list['STRINGPOOL'][int(ApUtils.printhex(Name), 16)] ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Tag name is : " + NameStr) if NameStr == "application": config.set_value('APPLICATIONPC', pc) #Flags好像没有意义,一般都是0x00140014 Flags = ApUtils.little_endian(data[pc + 24:pc + 28]) #Attribute Count AttributeCount = ApUtils.little_endian(data[pc + 28:pc + 32]) ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute Count is : " + ApUtils.printhex(AttributeCount)) #Class Attribute一般也没用,0x00000000 ClassAttibute = ApUtils.little_endian(data[pc + 32:pc + 36]) #Attributes for i in range(int(ApUtils.printhex(AttributeCount), 16)): att_namespace = ApUtils.little_endian(data[pc + 36 + i * 5 * 4:pc + 36 + i * 5 * 4 + 4]) att_name = ApUtils.little_endian(data[pc + 36 + i * 5 * 4 + 4:pc + 36 + i * 5 * 4 + 4 + 4]) att_valuestr = ApUtils.little_endian( data[pc + 36 + i * 5 * 4 + 4 + 4:pc + 36 + i * 5 * 4 + 4 + 4 + 4]) att_type = ApUtils.little_endian( data[pc + 36 + i * 5 * 4 + 4 + 4 + 4:pc + 36 + i * 5 * 4 + 4 + 4 + 4 + 4]) att_data = ApUtils.little_endian( data[pc + 36 + i * 5 * 4 + 4 + 4 + 4 + 4:pc + 36 + i * 5 * 4 + 4 + 4 + 4 + 4 + 4]) #打印 ApUtils.apprint(" ----------------------------------------------") if int(ApUtils.printhex(att_namespace), 16) <= len( config.global_list['STRINGPOOL']): ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]NameSpace is : " + config.global_list['STRINGPOOL'][int( ApUtils.printhex(att_namespace), 16)]) else: ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]NameSpace is null ") if int(ApUtils.printhex(att_name), 16) <= len( config.global_list['STRINGPOOL']): attr_name = config.global_list['STRINGPOOL'][int( ApUtils.printhex(att_name), 16)] ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Name is : " + attr_name) #修改application name if NameStr == "application": if attr_name == "name": config.set_value('APPLICATIONNAMEFLAG', 1) config.set_value('APPLICATIONNAMEINDEX', pc + 36 + i * 5 * 4) else: ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Name is null ") if int(ApUtils.printhex(att_valuestr), 16) <= len( config.global_list['STRINGPOOL']): ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Valus String is : " + config.global_list['STRINGPOOL'][int( ApUtils.printhex(att_valuestr), 16)]) else: ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Valus String is null ") type = ApUtils.getAttrType(int(ApUtils.printhex(att_type), 16) >> 24) ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Type is : " + type) dataAtt = ApUtils.getAttrData( int(ApUtils.printhex(att_type), 16) >> 24, att_data) ApUtils.apprint(" [startTag" + str(config.get_value('TAGCOUNTSTAR')) + "]Attribute[" + str(i) + "]Data is : " + dataAtt) # 修改结束指针 config.set_value( 'STARTTAGCHUNKEND', config.get_value('STARTTAGCHUNKSTAR') + int(ApUtils.printhex(ChunkSize), 16))
def craw_topTen(key): DIR = cfg.get_value('CSV_FILENAME_HOTSPOT_DIR') cfg.set_value('CSV_FILENAME_HOTSPOT', DIR + key + '_TOP10.csv') print('craw_topTen weibo:', cfg._global_dict) topTen(key)
def am_stringchunk(data, pc): #StringChunk的类型,固定4个字节 ChunkType = ApUtils.little_endian(data[pc:pc + 4]) if ApUtils.printhex(ChunkType) == "0x001c0001": ApUtils.apprint("===========Resolve String Chunk=============") ApUtils.apprint("[String]ChunkType is : " + ApUtils.printhex(ChunkType)) else: ApUtils.apprint("[Error]String Chunk resolve error!") exit(-1) #StringChunk的大小(4) ChunkSize = ApUtils.little_endian(data[pc + 4:pc + 8]) ApUtils.apprint("[String]ChunkSize is : " + ApUtils.printhex(ChunkSize)) config.set_value('STRINGCHUNKSIZEINDEX', pc + 4) # StringChunk中字符串的个数(4) StringCount = ApUtils.little_endian(data[pc + 8:pc + 12]) ApUtils.apprint("[String]StringCount is : " + ApUtils.printhex(StringCount)) config.set_value('STRINGCHUNKCOUNTINDEX', pc + 8) # StringChunk中样式的个数(4) StyleCount = ApUtils.little_endian(data[pc + 12:pc + 16]) ApUtils.apprint("[String]StyleCount is : " + ApUtils.printhex(StyleCount)) #Unknown(4) Unknown = ApUtils.little_endian(data[pc + 16:pc + 20]) #如果此时Unknown为0x00010000也就是256,说明此时使用的为xml编码格式 if int(ApUtils.printhex(Unknown), 16) == 256: ApUtils.apprint("[String]This is XML Flag!") config.set_value('XMLFLAG', 1) # 字符串池的偏移值(4) StringPoolOffset = ApUtils.little_endian(data[pc + 20:pc + 24]) ApUtils.apprint("[String]StringPoolOffset is : " + ApUtils.printhex(StringPoolOffset)) config.set_value('STRINGPOOLOFFSET', StringPoolOffset) config.set_value('STRINGPOOLOFFSETINDEX', pc + 20) # 样式池的偏移值(4) StylePoolOffset = ApUtils.little_endian(data[pc + 24:pc + 28]) ApUtils.apprint("[String]StylePoolOffset is : " + ApUtils.printhex(StylePoolOffset)) config.set_value('STYLEPOOLOFFSET', StylePoolOffset) #每个字符串的偏移 if int(ApUtils.printhex(StringCount), 16) != 0: StringOffsets = [] for i in range(int(ApUtils.printhex(StringCount), 16)): StringOffset = ApUtils.little_endian(data[pc + 28 + i * 4:pc + 28 + i * 4 + 4]) StringOffsets.append(StringOffset) config.set_value('LASTSTRINGOFFSETSINDEX', pc + 28 + i * 4) #每个字符串的偏移列表 config.global_list['STRINGOFFSETS'].extend(StringOffsets) else: ApUtils.apprint("[String]There is no String") #每个样式的偏移 if int(ApUtils.printhex(StyleCount), 16) != 0: StyleOffsets = [] for j in range(int(ApUtils.printhex(StyleCount), 16)): StyleOffset = ApUtils.little_endian( data[pc + 28 + int(ApUtils.printhex(StringCount), 16) * 4 + j * 4:pc + 28 + int(ApUtils.printhex(StringCount), 16) * 4 + j * 4 + 4]) StyleOffsets.append(StyleOffset) #每个字符串的偏移列表 config.global_list['STYLEOFFSETS'].extend(StyleOffsets) else: ApUtils.apprint("[String]There is no Style") #=======开始解析字符串===========# count = 0 if config.get_value('XMLFLAG') == 0: for index in config.global_list['STRINGOFFSETS']: str_index = pc + int( ApUtils.printhex(config.get_value('STRINGPOOLOFFSET')), 16) + int(ApUtils.printhex(index), 16) str_len = int( ApUtils.printhex( ApUtils.little_endian(data[str_index:str_index + 2])), 16) str_end = data[str_index + (str_len + 1) * 2:str_index + (str_len + 1) * 2 + 2] #utf-8编码 try: string = ApUtils.read_asc(data[str_index + 2:str_index + 2 + str_len * 2]).decode("utf-8") except: try: string = ApUtils.read_asc( data[str_index + 2:str_index + 2 + str_len * 2]).decode("utf-16") except: string = "String resolve error !!" config.set_value('STRINGCHUNKEND', str_index + 2 + str_len * 2 + 2) ApUtils.apprint(" [StringPool][" + str(count) + "]" + string) config.global_list['STRINGPOOL'].append(string) count += 1 config.set_value('LASTSTRINGLEN', str_len) config.set_value('LASTSTRINGINDEX', config.get_value('STRINGCHUNKEND')) elif config.get_value('XMLFLAG') == 1: for index in config.global_list['STRINGOFFSETS']: str_index = pc + int( ApUtils.printhex(config.get_value('STRINGPOOLOFFSET')), 16) + int(ApUtils.printhex(index), 16) str_len = int( ApUtils.printhex( ApUtils.little_endian(data[str_index + 1:str_index + 2])), 16) #utf-8编码 try: string = ApUtils.read_asc(data[str_index + 2:str_index + 2 + str_len]).decode("utf-8") except: try: string = ApUtils.read_asc( data[str_index + 2:str_index + 2 + str_len]).decode("utf-16") except: string = "String resolve error !!" config.set_value('STRINGCHUNKEND', str_index + str_len + 2 + 1) ApUtils.apprint(" [StringPool][" + str(count) + "]" + string) config.global_list['STRINGPOOL'].append(string) count += 1 config.set_value('LASTSTRINGLEN', str_len) config.set_value('LASTSTRINGINDEX', config.get_value('STRINGCHUNKEND')) else: ApUtils.apprint("[String]XMLFLAG is Wrong!") exit(-1)
import numpy as np import tensorflow as tf from tensorflow.python.framework import ops import matplotlib.pyplot as plt from trainDataPD import getData import config as conf conf._init() conf.set_value("issue_num", 70) issue_num = conf.get_value("issue_num") def create_placeholders(n_x, n_y): X = tf.placeholder(tf.float32, [n_x, None], name="X") Y = tf.placeholder(tf.float32, [n_y, None], name="Y") return X, Y def initialize_parameters(n_x, n_y, layer1Num, layer2Num): tf.set_random_seed(1) # 指定随机种子 W1 = tf.get_variable("W1", [layer1Num, n_x], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b1 = tf.get_variable("b1", [layer1Num, 1], initializer=tf.zeros_initializer()) W2 = tf.get_variable("W2", [layer2Num, layer1Num], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b2 = tf.get_variable("b2", [layer2Num, 1], initializer=tf.zeros_initializer()) W3 = tf.get_variable("W3", [n_y, layer2Num], initializer=tf.contrib.layers.xavier_initializer(seed=1))
def update_chaos_seed_cube(): CHAOS_SEED = config.get_value("CHAOS_SEED") for i in range(len(CHAOS_SEED)): CHAOS_SEED[i] = 4.0 * math.pow(CHAOS_SEED[i], 3) - 3.0 * CHAOS_SEED[i] config.set_value("CHAOS_SEED", CHAOS_SEED)
def update_chaos_seed_logistic(): CHAOS_SEED = config.get_value("CHAOS_SEED") for i in range(len(CHAOS_SEED)): CHAOS_SEED[i] = CHAOS_SEED[i] * 4.0 * (1 - CHAOS_SEED[i]) config.set_value("CHAOS_SEED", CHAOS_SEED)
# plt.plot(x, y) # plt.show() stock_normal_X = stock_normal_data[['open', 'high', 'low', 'close', 'volume']] # 'weekday' stock_normal_y = stock_normal_data['close'] train_x, test_x, train_y, test_y = train_test_split(stock_normal_X, stock_normal_y, test_size=0.030, random_state=0, shuffle=False) test_x = stock_normal_X.copy() test_y = stock_normal_y.copy() cfg.set_value(max_close=max_close, min_close=min_close, stock_original_data=stock_data.sort_index(axis=0, ascending=True), test_idx=test_y[cfg.RECEPTIVE:].index) n_dim = train_x.shape[1] training_data = get_format_data(train_x, train_y, False) test_data = get_format_data(test_x, test_y, True) print("[Log_information]", type(training_data), len(training_data), type(test_data), len(test_data)) """final result file operation""" if os.path.exists('fianl_result.csv'): shutil.copyfile('fianl_result.csv', 'fianl_result.csv.bak') os.remove('fianl_result.csv') if not os.path.exists('fianl_result.csv'): print('fianl_result.csv has been saved and delete') for hiden_layer_idx in [1]: # [1, 2, 3, 4, 5] # Tag is :2018.11.23 if hiden_layer_idx == 1:
def global_data_init(): CWDIR = os.getcwd() CSV_FILENAME_BAIDU_DIR = CWDIR + '/topic/' CSV_FILENAME_HOTSPOT_DIR = CWDIR + '/topic/' CSV_FILENAME_WEIBO_DIR = CWDIR + '/topic/' cfg.set_value("CSV_FILENAME_BAIDU_DIR", CSV_FILENAME_BAIDU_DIR) cfg.set_value("CSV_FILENAME_HOTSPOT_DIR", CSV_FILENAME_HOTSPOT_DIR) cfg.set_value("CSV_FILENAME_WEIBO_DIR", CSV_FILENAME_WEIBO_DIR) print(CSV_FILENAME_BAIDU_DIR, CSV_FILENAME_HOTSPOT_DIR, CSV_FILENAME_WEIBO_DIR) # 默认值设置 cfg.set_value('DEFAULT_NAME', '科比') DEFAULT_NAME = cfg.get_value('DEFAULT_NAME') #======= 默认数据 ========== # KEYWORDS = '元旦' # 百度默认数据 # CSV_FILENAME_BAIDU = CSV_FILENAME_BAIDU_DIR + "元旦.csv" cfg.set_value("CSV_FILENAME_BAIDU" , CSV_FILENAME_BAIDU_DIR + DEFAULT_NAME + ".csv") # 焦点排序的数据 # CSV_FILENAME_HOTSPOT = CSV_FILENAME_HOTSPOT_DIR + "元旦_TOP10.csv" cfg.set_value("CSV_FILENAME_HOTSPOT", CSV_FILENAME_HOTSPOT_DIR + DEFAULT_NAME + "_TOP10.csv") # 微博数据 # CSV_FILENAME_WEIBO = CSV_FILENAME_WEIBO_DIR + "元旦.csv" cfg.set_value("CSV_FILENAME_WEIBO", CSV_FILENAME_WEIBO_DIR + DEFAULT_NAME + "_WEIBO.csv") KEYWORD_LIST = get_keyword_list('data.txt') cfg.set_value('KEYWORD_LIST', KEYWORD_LIST) print('read KEYWORD_LIST: ', KEYWORD_LIST)
def wb_save_on_click(b): # Create the output element in the JSON file _dict = self.dump() config.set_value("scenario-evidence", _dict)
def resolver(am_path): print "[ApEditor]Begin to resolve AndroidManifest.xml" #全局变量初始化 config.init_global() # 读取二进制文件并转化成16进制 data_hex = ApUtils.am_read(am_path) #开始解析头部 ApResolver.am_header(data_hex) #解析后当前指针 pc = config.get_value('AMHEADER') #解析StringChunk ApResolver.am_stringchunk(data_hex, pc) pc = config.get_value('STRINGCHUNKEND') #调整指针,预留16位 config.set_value('RESOURCEIDCHUNKSTAR', pc) for ind in range(16): if ApUtils.printhex( ApUtils.little_endian(data_hex[pc + ind:pc + ind + 4])) == "0x00080180": config.set_value('RESOURCEIDCHUNKSTAR', config.get_value('RESOURCEIDCHUNKSTAR') + ind) break pc = config.get_value('RESOURCEIDCHUNKSTAR') #解析resource chunk ApResolver.am_resourceidchunk(data_hex, pc) pc = config.get_value('RESOURCEIDCHUNKEND') #print pc #开始进入循环解析 while pc < len(data_hex): #namespace start if ApUtils.printhex(ApUtils.little_endian( data_hex[pc:pc + 4])) == "0x00100100": config.set_value('STARNAMESPACESTAR', pc) ApResolver.am_namespacechunk_star(data_hex, pc) pc = config.get_value('STARNAMESPACEEND') #tag start elif ApUtils.printhex(ApUtils.little_endian( data_hex[pc:pc + 4])) == "0x00100102": config.set_value('STARTTAGCHUNKSTAR', pc) ApResolver.am_tagchunk_star(data_hex, pc) pc = config.get_value('STARTTAGCHUNKEND') config.set_value('TAGCOUNTSTAR', config.get_value('TAGCOUNTSTAR') + 1) #tag end elif ApUtils.printhex(ApUtils.little_endian( data_hex[pc:pc + 4])) == "0x00100103": config.set_value('ENDTAGCHUNKSTAR', pc) ApResolver.am_tagchunk_end(data_hex, pc) pc = config.get_value('ENDTAGCHUNKEND') config.set_value('TAGCOUNTEND', config.get_value('TAGCOUNTEND') + 1) # tag end elif ApUtils.printhex(ApUtils.little_endian( data_hex[pc:pc + 4])) == "0x00100101": config.set_value('ENDNAMESPACESTAR', pc) ApResolver.am_namespacechunk_end(data_hex, pc) pc = config.get_value('ENDNAMESPACEEND') # text end elif ApUtils.printhex(ApUtils.little_endian( data_hex[pc:pc + 4])) == "0x00100104": config.set_value('TEXTCHUNKSTAR', pc) ApResolver.am_textchunk(data_hex, pc) pc = config.get_value('TEXTCHUNKEND') else: ApUtils.apprint("[error]NO type of the chunk,the pc is " + str(pc)) exit(-1) #解析结束 print "[ApEditor]Resolve AndroidManifest.xml success!!!" return data_hex
def ftp(): while True: print("Please follow the setup wizard:\n") protocol = input("Do you want use ftps instead of ftp? (y, n): ") if protocol in ("y", "Y"): config.set_value("ftp", "protocol", "ftps") elif protocol in ("n", "N"): config.set_value("ftp", "protocol", "ftp") else: functions.clear() print("Wrong input, please try again:\n") config.set_value("ftp", "host", input("Enter ftp host: ")) config.set_value("ftp", "username", input("Enter ftp username: "******"ftp", "password", getpass.getpass("Enter ftp password: "******"[-./a-zA-Z0-9]+$") while True: path = input("Enter ftp path (default: root): ") if path == "": config.set_value("ftp", "path", "/") break elif (valid_path.match(path)): config.set_value("ftp", "path", path) break functions.clear() print("Invalid input. Please use [/ - . A-Z a-z 0-9]\n") message = login.ftp_login() functions.clear() if message == "host_error": print("Failed to reach host\n") elif message == "login_error": print("Failed to login\n") else: config.set_value("ftp", "enabled", "true") config.write_config() print("Successfully logged in\n") break