def ProcessInput(self, keyword): if keyword == "e": # os.system('MicrosoftEdge.exe') # win32api.ShellExecute(0, 'open', 'C:\\Users\\chenm\\AppData\\Local\\Microsoft\\WindowsApps\\MicrosoftEdge.exe', '', '', 1) os.system( '%windir%\\explorer.exe shell:Appsfolder\Microsoft.MicrosoftEdge_8wekyb3d8bbwe!MicrosoftEdge' ) elif keyword == "o": os.system( '%windir%\\explorer.exe shell:Appsfolder\Microsoft.Office.OneNote_8wekyb3d8bbwe!microsoft.onenoteim' ) elif keyword == "r": win32api.ShellExecute( 0, 'open', 'C:\\Program Files\\Autodesk\\Revit 2019\\Revit.exe', '', '', 1) elif keyword == "ed": win32api.ShellExecute(0, 'open', 'E:\\software\\EditPlus 3\\EditPlus.exe', '', '', 1) else: pyperclip.copy(keyword) Log.info(keyword)
def recluster_schedule(lock): logger = Log('re_cluster', 'logs/') schedule.every().day.at("00:05").do(job, lock, logger) while True: schedule.run_pending() logger.info('re_cluster_schedule processing...') time.sleep(50) # 50s检查一次,保证不会错过一分钟的时间
def forcast(): Log.info("do forcast") localBayes = bayes.BayesTrainingFromDB("local") vrBayes = bayes.BayesTrainingFromDB("vr") skipBayes = bayes.BayesTrainingFromDB("skip") trashBayes = bayes.BayesTrainingFromDB("trash") movies = MovieDAO.getMoviesByCondition( "local = 0 and trash = 0 and skip = 0") conn = SysConst.getConnect() for movie in movies: token = movie["av_number"] + movie["actor"] + movie["title"] # token = movie["av_number"] + movie["title"] local = localBayes.probable(token) vr = vrBayes.probable(token) skip = skipBayes.probable(token) trash = trashBayes.probable(token) #movie["vr_forcast"] = local + vr forcast = round((vr - skip * 0.4 - trash * 0.01 + local * 0.3) * 10000) MovieDAO.updateMovieVRForcast(movie["av_number"], forcast, conn) conn.commit() conn.close()
def icon_crop(user_id, icon_path, coords): response = {} if not os.path.exists(icon_path): response["status"] = False response["message"] = "Not Found: %s" % icon_path return response image_path, ext = os.path.splitext(icon_path) store_dir = _get_image_dir(ICONS_DIR) thumb_name = "u%s%s%s" % (user_id, str(int(time.time())), ext) thumb_path = os.path.join(store_dir, thumb_name) middle_name = "u%s%sb%s" % (user_id, str(int(time.time())), ext) middle_path = os.path.join(store_dir, middle_name) img = Image.open(icon_path) left, top, width, height = tuple([int(i) for i in coords.split("|")]) box = (left, top, left + width, top + height) img_thumb = img.crop(box) big_size = (ICON_BIG_WIDTH, ICON_BIG_WIDTH) img_thumb = img_thumb.resize(big_size, Image.ANTIALIAS) img_thumb.save(middle_path, quality=150) thumb_size = (ICON_WIDTH, ICON_WIDTH) img_thumb = img_thumb.resize(thumb_size, Image.ANTIALIAS) img_thumb.save(thumb_path, quality=150) try: os.remove(icon_path) except Exception, ex: Log.info(ex)
def icon_crop(user_id, icon_path, coords): response = {} if not os.path.exists(icon_path): response["status"] = False response["message"] = "Not Found: %s" % icon_path return response image_path, ext = os.path.splitext(icon_path) store_dir = _get_image_dir(ICON_PATH) thumb_name = "u%s%s%s" % (user_id, str(int(time.time())), ext) thumb_path = os.path.join(store_dir, thumb_name) middle_name = "u%s%sb%s" % (user_id, str(int(time.time())), ext) middle_path = os.path.join(store_dir, middle_name) img = Image.open(icon_path) left, top, width, height = tuple([int(i) for i in coords.split("|")]) box = (left, top, left+width, top+height) img_thumb = img.crop(box) big_size = (ICON_BIG_WIDTH, ICON_BIG_WIDTH) img_thumb = img_thumb.resize(big_size, Image.ANTIALIAS) img_thumb.save(middle_path, quality=150) thumb_size = (ICON_WIDTH, ICON_WIDTH) img_thumb = img_thumb.resize(thumb_size, Image.ANTIALIAS) img_thumb.save(thumb_path, quality=150) try: os.remove(icon_path) except Exception, ex: Log.info(ex)
def renameActor(actor, shortName): path = SysConst.getImageCachePath() + actor newPath = SysConst.getImageCachePath() + shortName exist = os.path.exists(path) exist2 = os.path.exists(newPath) if exist and not exist2: Log.info(path + " <--> " + newPath) os.rename(path, newPath)
def rename(): try: Log.info("rename pics begins") actors = actorDAO.getAllActorsFully() for actor in actors: renameActor(actor["name"], actor["short_name"]) except Exception as err: Log.error("rename pics stopped: ") Log.exception(err)
def downloadPicsAllActors(): try: Log.info("download pics begins") actors = actorDAO.getAllActorsFully() count = 0 for actor in actors: count = count + downloadActor(actor["short_name"]) if count > 1000: break except Exception as err: Log.error("download pics stopped: ") Log.exception(err)
def _merge(self, opponent): from util.build_component import merge_components import os import tarfile Log.info('About to merge components.') merged_file = "" with tarfile.open(self._component_file, 'r:gz') as one: with tarfile.open(opponent._component_file, 'r:gz') as two: merged_file = merge_components(one, two) os.remove(opponent._component_file) os.rename(merged_file, opponent._component_file) raise MergedWarning('Merged components')
def _update(self, opponent): while True: override = self._input('Shall I override the component %s? (y/N)' % self.c_id) override = override.strip().lower() if override[0] == 'n': Log.info('break!') return elif override[0] == 'y' or override[0] == 'j': Log.info('overriding.') self.c_pk = opponent.c_pk self.save() return
def process_output(self, output, data): if output is not None: self.publish(output, config.PUBLISH_QUEUE_NAME, config.PUBLISH_ROUTE_KEY, config.PUBLISH_QUEUE_EXCHANGE_NAME, config.PUBLISH_QUEUE_EXCHANGE_TYPE) else: error = "transcript was not generated" Log.info(data["_id"], Log.LEVEL_ERROR, error) errorData = self.process_error(data, error) self.publish(errorData, config.ERROR_QUEUE_NAME, config.ERROR_ROUTE_KEY, config.ERROR_QUEUE_EXCHANGE_NAME, config.ERROR_QUEUE_EXCHANGE_TYPE)
def __init__(self, tag): self.vocabulary = {} self.prob = {} self.totals = {} #self.stopwords = "、 -=1234567890" self.categories = {"pos", "neg"} self.tag = tag self.totalProb = {} self.rows = {} Log.info("Counting ...") totalRows = 0 for category in self.categories: #print(' category: ' + category) (self.prob[category], self.totals[category], self.rows[category]) = self.train(category) totalRows += self.rows[category] for category in self.categories: self.totalProb[category] = self.rows[category] / totalRows #print(self.totalProb) # I am going to eliminate any word in the vocabulary # that doesn't occur at least 3 times toDelete = [] for word in self.vocabulary: if self.vocabulary[word] < 3: # mark word for deletion # can't delete now because you can't delete # from a list you are currently iterating over toDelete.append(word) # now delete for word in toDelete: del self.vocabulary[word] # now compute probabilities vocabLength = len(self.vocabulary) # print("Computing probabilities:") for category in self.categories: # print(' ' + category) denominator = self.totals[category] + vocabLength for word in self.vocabulary: if word in self.prob[category]: count = self.prob[category][word] else: count = 1 self.prob[category][word] = (float(count + 1) / denominator)
def getMagnetFromDB(): #images = DiskIndex.getAllImages(SysConst.getImageTempPath()) Log.info("download magnets begins") movies = MovieDAO.getNoMagnetMovies() mags = [] try: for movie in movies: mag = getMagnet(movie["av_number"], None) if mag: Log.info(mag) MovieDAO.updateMovieMagnet2(movie["av_number"], mag) mags.append(mag) if len(mags) >= 20: break else: MovieDAO.updateMovieLastReadTime(movie["av_number"]) except Exception as err: Log.exception(err) finally: for mag in mags: Log.info(mag) Log.info("download magnets end") return mags
def saveImage(av): actor = av["actor"] shortName = av["short_name"] avNumber = av["av_number"] url = av["remote_cover"] url = url.replace("www.nh87.cn", "imgs.nh87.cn") url = url.replace("//", "") filePath = getFilePath(shortName, avNumber) if not (checkFile(filePath)): Log.info("begin save file: " + filePath) Log.info(url) saveFile(url, filePath) return True else: return False
def process_data(self, data): # only change this method to call the service layer try: output = DemoService.doSomething() if output != "": output = json.dumps(data) else: output = None Log.info(data["_id"], Log.LEVEL_OUTPUT, "Done") self.process_output(output, data) except Exception as e: Log.info(data["_id"], Log.LEVEL_ERROR, str(e)) errorData = self.process_error(data, str(e)) self.publish(errorData, config.ERROR_QUEUE_NAME, config.ERROR_ROUTE_KEY, config.ERROR_QUEUE_EXCHANGE_NAME, config.ERROR_QUEUE_EXCHANGE_TYPE)
def callback(self, channel, method, properties, body): try: data = json.loads(body) # this solves the issue of pikka time out when the process takes more than 180 sec thread = threading.Thread(target=self.process_data, args=(data, )) thread.start() while thread.is_alive(): # Loop while the thread is processing channel._connection.sleep(1.0) except ValueError: Log.info("000", Log.LEVEL_ERROR, "Invalid Json: " + str(body)) except Exception as e: Log.info(data["_id"], Log.LEVEL_ERROR, str(e)) errorData = self.process_error(data, str(e)) self.publish(errorData, config.ERROR_QUEUE_NAME, config.ERROR_ROUTE_KEY, config.ERROR_QUEUE_EXCHANGE_NAME, config.ERROR_QUEUE_EXCHANGE_TYPE) finally: channel.basic_ack(delivery_tag=method.delivery_tag)
def add(cls, component): """ A given package will be added to the local database. A package has to be in the ctlweb-format which can be found in our docmuentation. returns newly created object """ from os import path Log.debug("add(package): adding package to local database") # Create Database entry data = Component._unpack(component) comp = cls.create(data) comp._component_file = component try: comp.save() except NoSuchTable: comp.create_table().save() except ValueError as e: Log.critical('%s' % e.args[0]) return except MergedWarning as e: Log.info('Merged components') return comp # Copy package to store comp._component_file = None # reset the component to default. try: comp_log = ComponentLog.get_exactly(comp['c_exe_hash']) comp_log.remove() except (InstanceNotFoundError, NoSuchTable): pass import shutil try: target_name = comp._component_file shutil.copy(component, target_name) except IOError: Log.critical("Unable to save component to Manifest store in %s" % store) exit(1) return comp
def saveMovieToDB(skipActors): try: if not skipActors: newActors = ActorFinder.findActors() if len(newActors) > 0: Log.info("find new actors:") Log.info(newActors) else: Log.info("no found new actor.") actors = ActorDAO.getAllActors() allMovies = [] count = 200 for actor in actors: count = count - 1 if count < 0: break Log.info("begin to read: " + str(actor)) allMovies = indexActor.saveActorToDB(url=actor["url"], actor=actor["name"], cache=False, shortName=actor["short_name"]) #print("find new movies: " + str(newMovies)) if len(allMovies) > 0: ActorDAO.updateLastReadTime(actor["name"]) time.sleep(10) else: Log.info("not found " + actor["name"] + "'s movies.") forcast() return allMovies except Exception as e: Log.exception(e)
def run_translate(): try: movies = get_batch_movies() size = len(movies) if size > 0: Log.info("begin translate movies: " + str(size)) with getConnect() as conn: for movie in movies: title_cn = Translator.jp_to_cn(movie['title']) update_movie_title_cn(conn, movie['av_number'], title_cn) Log.info("translate end") else: Log.info("nothing to translate") except Exception as e: Log.exception(e)
def readMagnet(avNumber, skipMagnet): #sleep_download_time = 10 #time.sleep(sleep_download_time) #url = "http://www.clpig.org/torrent/"+avNumber+".html" #url = "http://www.cilizhuzhu.org/torrent/" + avNumber + ".html" url = "http://www.cilizhu2.com/torrent/" + avNumber + ".html" Log.info("begin to read: " + url) ''' headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/51.0.2704.63 Safari/537.36'} req = urllib.request.Request(url=url, headers=headers) res = urllib.request.urlopen(req) html = res.read() res.close() #html = urllib.request.urlopen(url).read() ''' html = httpfetch2.getHtml2(url) content = pq(html).find("div.btsowlist div.row") if len(content) == 0: Log.info("nothing to be found: " + url) return "" wrongMagnets = WrongMagnetDAO.findWrongMagnets(avNumber) resultList = [] specialTitles = ["Thz.la"] foundSpecial = False specialHref = "" #print(content) for el in content: el = pq(el) #print(el) href = el.find("a").attr("href") title = el.find("a").attr("title") #print(href, title) if not href: break magnet = getMagnet(href) if magnet in wrongMagnets: continue for specialTitle in specialTitles: if specialTitle in title: foundSpecial = True specialHref = href if not foundSpecial: size = el.find("div.col-lg-1").text() sizeNumber = convertToNumber(size) resultList.append((href, sizeNumber)) if len(resultList) == 0: Log.info("nothing to be found: " + url) return "" if foundSpecial: return getMagnet(specialHref) else: resultList.sort(key=lambda item: item[1], reverse=True) #print(resultList[0]) return getMagnet(resultList[0][0]) #print(readMagnet("ipz806"))
class Spider(object): ''' Spider ''' def __init__(self, name='Spider', url_list_file='./urls', output_path='./output', interval=1, timeout=1, silent=False): ''' @name: string, 定向爬虫的名字 @url_list_file: string, 种子文件 @output_path: string, 输出文件路径 @interval: int, 爬取间隔 @timeout: int, 请求超时 @silent: bool, 是否为静默打印 ''' # 设置保存爬取页面的coolie值(非必要) cj = cookielib.LWPCookieJar() cookie_support = urllib2.HTTPCookieProcessor(cj) self.opener = urllib2.build_opener(cookie_support, urllib2.HTTPHandler) urllib2.install_opener(self.opener) # 设置请求头部(非必要) self.headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36' } self.url_list_file = url_list_file self.output_path = output_path self.interval = interval self.timeout = timeout level = 'INFO' if silent else 'DEBUG' self.log = Log(name, level, 'spider.log', 'a') # 定义一个多线程共享队列,存放需要加载的url self.queue = Queue() # 存放非重复url集合 self.url_set = set() # 定义多线程访问锁 self.lock = Lock() # 匹配根域名 self.base_pattern = r'^(https?:\/\/[a-z0-9\-\.]+)[\/\?]?' if not os.path.exists(self.url_list_file): # 种子文件不存在 self.log.error('%s dose not exist. Exit program !' % (self.url_list_file)) os._exit(0) if not os.path.exists(self.output_path): # 输出文件目录不存在 self.log.info('Create new directory %s' % (self.output_path)) os.makedirs(self.output_path) ''' def __decodeHtml__(self, html): """ Decode Html @html: string, 原生html内容 return: 返回解码后的html内容 """ try: encoding = chardet.detect(html)['encoding'] if encoding == 'GB2312': encoding = 'GBK' else: encoding = 'utf-8' return html.decode(encoding, 'ignore') except Exception, e: self.log.error("Decode error: %s.", e.message) return None ''' def __request__(self, url, threadID=-1, data=None): ''' Request URL @url: string, 指定抓取的url @data: object, POST方法发送的数据 ''' try: req = urllib2.Request(url, data, self.headers) res = urllib2.urlopen(req, timeout=self.timeout).read() self.log.debug('Thread-%d Get %s' % (threadID, url)) return res except Exception, e: self.log.error('Thread-%d in __requests__: %s %s' % (threadID, e.message, url)) return None
def log(): from util import Log Log.info("Hello from test.py")
# 批量update ids = list(id_vectors.keys()) case = [ 'when {} then {} '.format(ids[i], labels[i]) for i in range(len(ids)) ] case = ''.join(case) sql = "update t_cluster set cluster_id = case id {} end where id in {}".format( case, trans_sqlin(ids)) if db.update(sql): db.commit() logger.info('update t_cluster success, re-cluster complete') else: logger.warning('update t_cluster failed') # todo 是否删除dbscan的噪声数据 cost_time = round(time.time() - start) logger.info('re-cluster cost time %d s' % cost_time) re_cluster = False lock.notify() lock.release() if __name__ == '__main__': process_cond = multiprocessing.Condition() cluster_process = multiprocessing.Process(target=recluster_schedule, args=(process_cond, )) cluster_process.start() logger.info('start re-cluster process') process(process_cond)
def grab_proc(url, rate, camera_id): ''' 抓图处理进程 :param url: :param rate: :param camera_id: :param logger: :return: ''' logger = Log('grab-proc' + str(os.getpid()), 'logs/') logger.info('初始化seaweedfs') master = WeedClient(config.get('weed', 'host'), config.getint('weed', 'port')) logger.info('初始化Kafka') kafka = Kafka(bootstrap_servers=config.get('kafka', 'boot_servers')) topic = config.get('camera', 'topic') face_tool = Face(config.get('api', 'face_server')) detect_count = 0 # 用于detect频次计数 frame_internal = track_internal * rate trackable = False # 启动抓图线程 q = queue.Queue(maxsize=100) t = GrabJob(q, camera_id, url, rate, Log('grab-proc' + str(os.getpid()) + '-thread', 'logs/'), config) t.start() while True: try: img = q.get(timeout=20) if detect_count % frame_internal == 0: detect_count = 0 b64 = mat_to_base64(img) t1 = time.time() detect_result = face_tool.detect(b64) logger.info('detect cost time: ', round((time.time() - t1) * 1000), 'ms') if detect_result['error_message'] != '601': logger.warning('verifier detector error, error_message:', detect_result['error_message']) continue tracker = cv2.MultiTracker_create() latest_imgs = [] timestamp = round(time.time()) for face_num in range(detect_result['detect_nums']): tmp = detect_result['detect'][face_num] bbox = (tmp['left'], tmp['top'], tmp['width'], tmp['height']) tracker.add(cv2.TrackerKCF_create(), img, bbox) face_b64 = face_tool.crop(bbox[0], bbox[1], bbox[2], bbox[3], b64, True) latest_img = { 'image_base64': face_b64, 'bbox': bbox, 'landmark': detect_result['detect'][face_num]['landmark'], 'time': timestamp } # 增加人脸质量过滤 if tmp['sideFace'] == 0 and tmp[ 'quality'] == 1 and tmp['score'] > 0.95: latest_imgs.append(latest_img) if len(latest_imgs) > 0: trackable = True else: trackable = False elif trackable: # 开始追踪 ok, bboxs = tracker.update(img) if ok and detect_count < frame_internal - 1: if detect_count % 10 == 0: logger.info('tracking..., detect_count = %d' % detect_count) detect_count += 1 continue else: # 取detect到的人脸 logger.info('tracking over! detect_count = %d' % detect_count) for latest in latest_imgs: logger.info([camera_id], 'track person success!') face_b64 = latest['image_base64'] # save img to seaweed fs logger.info([camera_id], 'save grabbed detect_result to seaweed fs') assign = master.assign() logger.info([camera_id], 'assign result:', assign) ret = master.upload(assign['url'], assign['fid'], base64_to_bytes(face_b64), assign['fid'] + '.jpg') logger.info([camera_id], 'upload result:', ret) # send to Kafka url = 'http' + ':' + '//' + assign[ 'url'] + '/' + assign['fid'] logger.info('[', camera_id, ']', 'img url:', url) msg = json.dumps({ 'url': url, 'time': latest['time'], 'camera_id': camera_id, 'landmark': latest['landmark'] }) logger.info([camera_id], 'send to kafka: ', msg) kafka.send(topic, msg) # 再次进入detect detect_count = 0 trackable = False logger.info('restart detection') else: if detect_count % 10 == 0: logger.info('detect 0 detect_result, do not track', 'detect count= ', detect_count) detect_count += 1 continue except queue.Empty: logger.error('grab queue empty error, exit') break detect_count += 1 logger.info('抓图进程终止')
class AutoHelper(object): def __init__(self, config_name='sample_config', *args): try: # Get the current abspath self.path = getcwd() # Instantiate util.Log module self.log = Log() # Import config file self.config = importlib.import_module(config_name, package='AutoHelper') self.log.info(f'Import config success: {config_name}.py.') if not self.__verify_config(): self.log.warning( 'Config file is not compliant. Missing args will be blank.' ) # Instantiate util.ADB_client to interact with emulator self.adb = ADBClient(self.config.ADB_root, self.config.ADB_host) # Instantiate util.Ocr to interact with Baidu-api self.ocr = Ocr(self.config.APP_ID, self.config.API_KEY, self.config.SECRET_KEY) # Initialize with open('ingame.json', mode='r') as file: self.ingame = json.load(file) self.initialize() except ImportError as e: self.log.critical(f'Import config error:', e) except ADBError as e: self.log.critical(f'ADB Error:', e) except OCRError as e: self.log.critical(f'Connecting to Baidu-api error:', e) except Exception as e: self.log.warning('Unknown Error:', e) # Initializers def initialize(self): """ Initialize in-game info (Includes window size, click positions etc.) And save into ingame.json :return: None """ def homepage(): self.log.info( "Please go to home page, when finished, input anything below:") self.log.input() self.log.info("Start recognizing...") with self.adb.screen_shot(screenshot_path): pic_content = self.ocr.scan(screenshot_path) if pic_content['flag']['is_homepage'] == 'TRUE': self.ingame['location']['homepage_strength'] = pic_content[ 'location']['strength'] self.log.info('Done') else: raise AutoHelperError('Incorrect page') def mission_page(): self.log.info( "Please go to mission page (anyone with '开始行动' button), " "when finished, input anything below:") self.log.input() self.log.info("Start recognizing...") with self.adb.screen_shot(screenshot_path): pic_content = self.ocr.scan(screenshot_path) if pic_content['flag']['is_mission_page'] == 'TRUE': self.ingame['location']['mission_page_strength'] = pic_content[ 'location']['strength'] self.ingame['location']['start_mission'] = pic_content[ 'location']['start_mission'] self.log.info('Done') else: raise AutoHelperError('Incorrect page') def preparation(): self.log.info( 'The program will automatically go to preparation page(without actually start the mission)\n' "Please don't disturbance the program") self.adb.click( self.confuse(self.ingame['location']['start_mission'])) with self.adb.screen_shot(screenshot_path): pic_content = self.ocr.scan(screenshot_path) if pic_content['flag']['is_preparation_page'] == 'TRUE': self.ingame['location']['prepare_start'] = pic_content[ 'location']['prepare_start'] self.log.info('Done') else: raise AutoHelperError('Incorrect page') try: # Detect if it's first time if self.ingame['FIRST_TIME'] == "TRUE": self.log.info( 'First time using AA-Helper, Initializing, please follow the instruction.' ) # TODO # Detect window size self.ingame['window_size'] = self.adb.get_window_size() # Set screenshot save path screenshot_path = path.join(self.path, 'pictures', 'Screenshot.png') # Detect homepage self.retry(homepage) # Detect mission page self.retry(mission_page) # Detect start button in preparation page self.retry(preparation) # Change the first-time status into false # self.ingame['FIRST_TIME'] = "FALSE" TODO: Uncomment # Writing into file with open('ingame.json', mode='w') as file: file.write(json.dumps(self.ingame)) except FileNotFoundError as e: self.log.warning('Cannot found ingame.json, creating...', e) with open('ingame.json', mode='w' ) as file: # Create the file and set FIRST_TIME to TRUE self.ingame = dict() self.ingame['FIRST_TIME'] = "TRUE" file.write(json.dumps(self.ingame)) except JSONDecodeError as e: self.log.warning('JSON decoder error:', e) except Exception as e: self.log.warning('Unknown error during initializing:', e) def retry(self, func, max_time=3): try: return func() except Exception as e: self.log.warning('Unknown error:', e) if max_time > 0: self.log.info( f"Error while running '{func.__name__}', retrying ({max_time} time(s) left)." ) return self.retry(func, max_time - 1) else: raise def __verify_config(self): # TODO """ To verify if the config file is compliant format. :return:(bool) """ return True # Battle functions def battle(self): # TODO """ A overall battle module :return: None """ try: self.__start_battle() except Exception as e: self.log.warning("Unknown Error:", e) def __start_battle(self, times=0): # TODO """ Simply click on '开始战斗' for times :param times: Times of clicking '开始战斗' button :return: None """ try: self.adb.click( self.confuse(self.ingame['location']['start_mission'])) except Exception as e: self.log.warning('Unknown error:', e) def confuse(self, loca): try: return loca['left'] + uniform(-0.49, 0.49) * loca['width'], \ loca['top'] + uniform(-0.49, 0.49) * loca['height'], except Exception as e: self.log.warning("Unknown error:", e)