def device_manager(context): global manager global publisher worker_address = Config.get('services', 'worker_address') worker_port = Config.get('services', 'worker_port') worker_address = "tcp://" + worker_address + ":" + worker_port publisher_address = Config.get('device_service', 'publisher_address') publisher_port = Config.get('device_service', 'publisher_port') publisher_address = "tcp://" + publisher_address + ":" + publisher_port service = Config.get('device_service', 'service_name') publisher = DeviceServicePublisher(context, publisher_address) manager = DeviceServiceManager(context, worker_address, service) try: IOLoop.instance().start() except KeyboardInterrupt: IOLoop.instance().stop() publisher.shutdown() manager.shutdown() return
def get_old_articles_for_query(query): """ Fetches items for a query that are older than the oldest item that has already been fetched. """ logging.error("$$$$ nytArticles.get_old_articles_for_query[]") n_to_fetch = 10 query.doc['n_fetched'] = query.doc['n_fetched'] + n_to_fetch n_requests = int(math.ceil(n_to_fetch / int(Config.get("nyt_article")["response_size"]))) if n_requests == 0: n_requests = 1 prior_cached = len(query.children) logging.error(prior_cached) prior_offset = int(int(prior_cached) / int(Config.get("nyt_article")["response_size"])) logging.error(prior_offset) req_pars = { 'query_id': query.id(), 'functions': [api.make_api_request,cache_response,fetch_comments] } for i in range(0,n_requests): request = {} for k,v in req_pars.iteritems(): request[k] = v request['url'] = 'http://api.nytimes.com/svc/search/v1/article?query=text:'+query.doc['query_text'].replace(' ','+')+'+comments:y' request['url'] = request['url'] + '&offset='+str(int(prior_offset) + int(i)) request['url'] = request['url'] + '&fields=body,byline,date,title,url,geo_facet,per_facet,des_facet,org_facet' request['url'] = request['url'] + '&api-key='+Config.get('nyt_article')['key'] #logging.critical('BSM[get_old_articles_for_query] Size:'+str(sys.getsizeof(pickle.dumps(request)))) with voxpop.VPE.beanstalkd_lock: voxpop.VPE.get_beanstalkd().use("nytarticle") voxpop.VPE.get_beanstalkd().put(pickle.dumps(request), pri=100000) query.save() return query
def __init__(self, config: Config): self._scheduler = Scheduler() self._email_notification = EmailNotification(config.get('smtp'), config.get('recipients')) for site in config.get('sites'): self._scheduler.every_minutes(Site(site), self.parse) logger.info(f"Will be parsing {len(config.get('sites'))} site(s)")
def __init__(self): self.tcp_socket = socket.socket() self.logger = Logger() configuration = Config() try: self.tcp_socket.connect((configuration.get('SOCKET_HOST'), configuration.get('SOCKET_PORT'))) except socket.error: self.logger.log("Can't connect to socket "+configuration.get('SOCKET_HOST')+":"+str(configuration.get('SOCKET_PORT'))) sys.exit(0)
def mountOAuth2Data(self): config = Config() oauth_data = { "uid": self.uid, "auth_code": self.auth_code, "developer_key": config.get("developer_key"), "secret_key": config.get("secret_key") } return oauth_data
def __init__(self): configuration = Config() self.send_via_api = configuration.get('SEND_VIA_API') self.send_via_socket = configuration.get('SEND_VIA_SOCKET') self.device_id = configuration.get('DEVICE_ID') self.api_endpoint = configuration.get('API_ENDPOINT').replace( ':id', str(self.device_id)) self.api_token = configuration.get('API_TOKEN') if (self.send_via_socket): self.socket = SocketSender()
def refresh_device(device): address = Config.get('services', 'client_address') port = Config.get('services', 'client_port') address = "tcp://" + address + ":" + port socket = zmq.Context().socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 0) socket.connect(address) message = ['00000000', 'control', 'refresh', device] mdp_request(socket, 'device', message, 1)
def __init__(self): configuration = Config() self.input_camera = configuration.get('INPUT_CAMERA') self.camera = cv2.VideoCapture(0)
def fetch_comments_for_article_id(pars={}, **kwargs): if "article_id" not in pars: logging.error("**** nytCommunity.fetch_comments_for_article: NO ARTICLE ID PROVIDED") return False if "article_url" not in pars: logging.error("**** nytCommunity.fetch_comments_for_article: NO ARTICLE URL PROVIDED") return False logging.info( "$$$$ nytCommunity.fetch_comments_for_article_id[id:" + pars["article_id"] + ",url:" + pars["article_url"] + "]" ) _url = "http://api.nytimes.com/svc/community/v2/comments/url/exact-match.json?" _url = _url + "url=" + urllib.quote_plus(pars["article_url"]) _url = _url + "&sort=oldest" _url = _url + "&api-key=" + Config.get("nyt_community")["key"] request = { "url": _url, "article_id": pars["article_id"], "article_url": pars["article_url"], "functions": [api.make_api_request, fetch_remainder], } # logging.critical('BSM[fetch_comments_for_article_id] Size:'+str(sys.getsizeof(pickle.dumps(request)))) with voxpop.VPE.beanstalkd_lock: voxpop.VPE.get_beanstalkd().use("nytcommunity") voxpop.VPE.get_beanstalkd().put(pickle.dumps(request), pri=100000) return True
def __init__(self, oauthToken): config = Config() self.base_url = "https://sandbox.original.com.br" self.headers = { "Authorization": oauthToken, "developer-key": config.get("developer_key") }
def _build_paths(self): rrd_location = Config.get('rrdtool', 'location') self.folder_path = rrd_location + self.folder self.file_path = self.folder_path + self.file_name if not os.path.exists(self.folder_path): os.makedirs(self.folder_path)
def delete_device(device): address = Config.get('services', 'client_address') port = Config.get('services', 'client_port') address = "tcp://" + address + ":" + port socket = zmq.Context().socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 0) socket.connect(address) message = ['00000000', 'control', 'remove', device.id] mdp_request(socket, 'device', message, 1) rrd = RRD(device.id, 'device') rrd.remove(remove_folder=True) db_session.query(Device).filter_by(id=device.id).delete() db_session.commit() return
def grainbin_manager(context): global manager worker_address = Config.get('services', 'worker_address') worker_port = Config.get('services', 'worker_port') worker_address = "tcp://" + worker_address + ":" + worker_port service = Config.get('grainbin_service', 'service_name') manager = GrainbinServiceManager(context, worker_address, service) try: IOLoop.instance().start() except KeyboardInterrupt: IOLoop.instance().stop() manager.shutdown() return
def farm_broker(): global broker worker_address = Config.get('broker', 'worker_address') worker_port = Config.get('broker', 'worker_port') worker_address = "tcp://" + worker_address + ":" + worker_port client_address = Config.get('broker', 'client_address') client_port = Config.get('broker', 'client_port') client_address = "tcp://" + client_address + ":" + client_port context = zmq.Context() broker = FarmBroker(context, main_ep=worker_address, opt_ep=client_address) try: IOLoop.instance().start() except KeyboardInterrupt: IOLoop.instance().stop() broker.shutdown() return
def get_new_articles_for_query(query): """ Fetches items for a query that are newer than the newest item that has already been fetched. """ logging.error("$$$$ nytArticles.get_new_articles_for_query[]") for i in range(0,n_requests): request = {} for k,v in req_pars.iteritems(): request[k] = v request['url'] = 'http://api.nytimes.com/svc/search/v1/article?query=text:'+query.doc['query_text'].replace(' ','+')+'+comments:y' request['url'] = request['url'] + '&offset='+str(prior_cached + (i * int(Config.get("nyt_article")["response_size"]))) request['url'] = request['url'] + '&fields=body,byline,date,title,url,geo_facet,per_facet,des_facet,org_facet' request['url'] = request['url'] + '&api-key='+Config.get('nyt_article')['key'] #logging.critical('BSM[get_new_articles_for_query] Size:'+str(sys.getsizeof(pickle.dumps(request)))) with voxpop.VPE.beanstalkd_lock: voxpop.VPE.get_beanstalkd().use("nytarticle") voxpop.VPE.get_beanstalkd().put(pickle.dumps(request), pri=100000) return message
def tfidf(self, report): """ Run TF-IDF embedding and then classification on outputted vector :param report: behavioral report :return prediction """ # initialize TF-IDF model tfidf_conf = Config({ 'do_training': False, 'model_name': self.tfidf_model_name, 'output_dir': self.output_dir }) tfidf_model = TfidfModel(tfidf_conf) # embedding vectors = tfidf_model.transform([report]) # classification if self.classifier == 'svc': svc_conf = Config({ 'do_training': False, 'model_name': tfidf_conf.get('model_name'), 'output_dir': self.output_dir }) model = SVCModel(svc_conf) prediction = Predictor.classify(model, vectors) elif self.classifier == 'xgboost': xgb_conf = Config({ "do_training": False, 'model_name': tfidf_conf.get('model_name'), 'n_estimators': [80], 'output_dir': self.output_dir }) model = XGBoostModel(xgb_conf) prediction = Predictor.classify(model, vectors) return prediction
def __init__(self): # loading models script_dir = os.path.dirname(__file__) emotion_model_path = os.path.join(constants.ROOT_DIR, constants.EMOTION_MODEL) configuration = Config() self.input_camera = configuration.get('INPUT_CAMERA') self.emotion_classifier = load_model(emotion_model_path) self.emotion_target_size = self.emotion_classifier.input_shape[1:3] self.emotion_labels = get_labels('fer2013') self.emotion_offsets = (20, 40)
def __init__(self): # loading models script_dir = os.path.dirname(__file__) emotion_model_path = os.path.join(constants.ROOT_DIR, constants.AGE_GENDER_MODEL) configuration = Config() self.input_camera = configuration.get('INPUT_CAMERA') model_dir = os.path.join(os.getcwd(), "pretrained_models").replace("//", "\\") self.model = WideResNet(64, depth=16, k=8)() fpath = get_file( emotion_model_path, "https://github.com/Tony607/Keras_age_gender/releases/download/V1.0/weights.18-4.06.hdf5", cache_subdir=model_dir) self.model.load_weights(fpath)
def create(self, sources): """ creates an rrd file sources is a list of data sources to add. Every item in the list must have a unique 'name' key that identifies the item """ archives = Config.get('rrdtool', 'data_archives') data_archives = [x.strip() for x in archives.splitlines()] data_source = [] for x in sources: if 'name' in x: data_source.append("DS:" + x['name'] + ":GAUGE:7200:-40:80") path = self.file_path rrdtool.create(path, "--step", '3600', data_source, data_archives) return
class StartpageWindow(wx.Frame): """Main class of sc2mafia the start page, the create game page. All GUI logic is here. """ def __init__(self, parent, id, title, style): """Construct the GUI from config file, and bind GUI events to their handlers. """ # 读取sc2mafia.cfg self.readFromCfg() # 调用基类的构造函数 wx.Frame.__init__(self, parent, wx.ID_ANY, title, size=self.framesize, style=style) # 加载资源 # self.resin = Resin(os.path.join(homepath, # 'res', self.prefer.getValue("iconset"))) self.resin = Resin() # 图标 self.SetIcon(self.resin.getIcon("notalon")) # 提示 self.tips = wx.CreateFileTipProvider("Tips.txt", 0) self.showtips = int(self.config.get("other", "showtips")) if self.showtips: wx.CallAfter(self.ShowTips) # 显示Tips的同时显示主界面 # self.ShowTips() # 与上面相反,显示Tips之后才显示主界面 # 创建MenuBar self.createMenuBar() # 创建ToolBar self.createToolBar() # 创建StatusBar self.createStatusBar() # 创建主显示栏 self.createMainWindow() # 绑定关闭窗口的方法 self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) # 创建TaskBarIcon self.tbIcon = tbIcon(self) # -----------------创建主框架的相关函数------------------ # def readFromCfg(self): """读取配置文件 """ self.config = Config(os.path.join(homepath, "sc2mafia.cfg")) self.framesize = (int(self.config.get("frame", "width")), int(self.config.get("frame", "height"))) def menuData(self): """菜单项数据 """ return ( (u"程序", (u"退出", u"退出程序", self.OnCloseWindow)), ( u"员工档案", (u"新建", u"在远程服务器新建一个员工档案", self.OnCreateStaff), (u"修改", u"修改一个员工档案", self.OnModifyStaff), (u"查看", u"查看所有员工档案", self.OnDisplayStaff), (u"筛选", u"筛选员工档案", self.OnFilterStaff), ), (u"工资", (u"工资单发送", "用邮件发送工资单", self.OnMailSalary)), (u"其他", (u"报销系统", u"在这里登录报销系统", self.OnOpenExpAccHtml)), (u"工具", (u"通讯录", u"快速查询通讯录的小工具", self.OnSearchAddr)), ( u"帮助", (u"用户手册", u"用户手册", self.OnManual), ("", "", ""), (u"版权", u"本软件的版权信息", self.OnCopyRight), (u"关于作者", u"本软件作者的相关信息", self.OnAuthor), (u"关于本软件", u"本软件的相关信息", self.OnAbout), ), ) def createMenuBar(self): """创建菜单栏 """ menuBar = wx.MenuBar() for eachMenuData in self.menuData(): menuLabel = eachMenuData[0] menuItems = eachMenuData[1:] menuBar.Append(self.createMenu(menuItems), menuLabel) self.SetMenuBar(menuBar) def createMenu(self, menuData): """创建一个菜单 --从创建菜单栏函数中抽象出来的函数 """ menu = wx.Menu() for eachLabel, eachStatus, eachHandler in menuData: if not eachLabel: menu.AppendSeparator() continue menuItem = menu.Append(-1, eachLabel, eachStatus) self.Bind(wx.EVT_MENU, eachHandler, menuItem) return menu def toolBarData(self): """工具栏数据 """ return ( (u"创建", "new.bmp", u"新建一个员工档案", self.OnCreateStaff), # (u"修改", self.OnModifyStaff), (u"查看", "display.bmp", u"浏览", self.OnDisplayStaff), (u"筛选", "search.bmp", u"筛选", self.OnFilterStaff), ("", "", "", ""), (u"通讯录", "addr.bmp", u"搜索通讯录", self.OnSearchAddr), ) def createToolBar(self): """创建工具栏 """ toolBar = self.CreateToolBar() for each in self.toolBarData(): self.createSimpleTool(toolBar, *each) toolBar.Realize() def createSimpleTool(self, toolbar, label, filename, help, handler): """创建一个工具按钮 --从创建工具栏函数中抽象出来的函数 """ if not label: toolbar.AddSeparator() return bmp = wx.Image(filename, wx.BITMAP_TYPE_BMP).ConvertToBitmap() tool = toolbar.AddSimpleTool(-1, bmp, label, help) self.Bind(wx.EVT_MENU, handler, tool) def createMainWindow(self): """创建主显示窗口 """ pass # 显示每日提示 def ShowTips(self): """Shows the tips window on startup; returns False if the option to turn off tips is checked. """ # constructs the tip (wx.ShowTip), which returns whether or not the # user checked the 'show tips' box if wx.ShowTip(None, self.tips, True): btmp = 1 print 1 else: btmp = 0 print 0 self.config.set("other", "showtips", btmp) return self.config.get("other", "showtips") # ----------------------主框架的事件响应函数------------------------# def OnCreateStaff(self, event): pass def OnModifyStaff(self, event): pass def OnDisplayStaff(self, event): pass def OnFilterStaff(self, event): pass def OnMailSalary(self, event): pass def OnOpenExpAccHtml(self, event): pass def OnSearchAddr(self, event): pass def OnManual(self, event): pass def OnCopyRight(self, event): pass def OnAuthor(self, event): pass def OnAbout(self, event): pass def createStatusBar(self): self.statusBar = self.CreateStatusBar() self.statusBar.SetFieldsCount(3) self.statusBar.SetStatusWidths([-1, -2, -3]) # Sash位置变动 def OnSashPosChanged(self, event): """Handler for when the splitter sash, who divided the `tree` and the `content`, is moved. """ pass # pos = self.splitter.GetSashPosition() # self.config.setValue("sashposition", pos) def OnSelChanged(self): pass # 关闭主框架,清理资源 # TODO:devo def OnCloseWindow(self, event): self.tbIcon.Destroy() self.Destroy()
def doc2vec(self, train_reports, train_labels, validation_reports, validation_labels, test_reports, test_labels): """ Run Paragraph Vectors (Doc2Vec) embedding and classification on the outputted vectors Model PV-DM and PV-DBOW are assembled, evaluated separately and then concatenated and evaluated """ # DM model initialization logger.info('--- DM model classification ---') dm_conf = Config({ 'do_training': self.do_training_emb, 'model_name': 'd2v_dm_model', 'output_dir': self.output_dir, # following params used only for training 'dm': 1, 'vector_size': 1000, 'window': 10, 'negative': 5, 'hs': 0, 'min_count': 50, 'sample': 0, 'alpha': 0.025, 'compute_loss': True, 'epochs': 10, 'start_alpha': 0.025, 'end_alpha': -0.00025, }) dm_model = Doc2VecModel(dm_conf) # DM model training if self.do_training_emb: dm_model.train(train_reports, train_labels, save_model=True) # DM model reports embedding dm_train_vecs = dm_model.infer_vectors(train_reports, train_labels) dm_validation_vecs = dm_model.infer_vectors(validation_reports, validation_labels) dm_test_vecs = dm_model.infer_vectors(test_reports, test_labels) # classification on DM model's output if self.classifier == 'svc': svc_conf = Config({ 'do_training': self.do_training_cls, 'model_name': dm_conf.get('model_name'), 'output_dir': self.output_dir, # following params used only for training 'C': 1.0, 'loss': 'hinge', 'gamma': 'auto', 'kernel': 'linear', 'random_value': 42 }) model = SVCModel(svc_conf) self.classify(model, dm_train_vecs, train_labels, dm_validation_vecs, validation_labels, dm_test_vecs, test_labels) elif self.classifier == 'xgboost': xgb_conf = Config({ 'do_training': self.do_training_cls, 'model_name': dm_conf.get('model_name'), 'output_dir': self.output_dir, "n_estimators": [80], # following params used only for training "eta": [0.01, 0.02], "max_depth": [2, 3, 4], "learning_rate": [0.05, 0.1, 0.2], "min_child_weight": [3, 5], "gamma": [0.3], 'colsample_bytree': [0.3], 'n_jobs': 4, 'scoring': 'neg_log_loss', 'cv': 3, }) model = XGBoostModel(xgb_conf) self.classify(model, dm_train_vecs, train_labels, dm_validation_vecs, validation_labels, dm_test_vecs, test_labels) # DBOW model initialization logger.info('--- DBOW model classification ---') dbow_conf = Config({ 'do_training': self.do_training_emb, 'model_name': 'd2v_dbow_model', 'output_dir': self.output_dir, # following params used only for training 'dm': 0, 'vector_size': 1000, 'window': 10, 'negative': 5, 'hs': 0, 'min_count': 25, 'sample': 0, 'alpha': 0.015, 'compute_loss': True, 'epochs': 10, 'start_alpha': 0.015, 'end_alpha': 0.00015 }) dbow_model = Doc2VecModel(dbow_conf) # DBOW model training if self.do_training_emb: dbow_model.train(train_reports, train_labels, save_model=True) # DBOW model reports embedding dbow_train_vecs = dbow_model.infer_vectors(train_reports, train_labels) dbow_validation_vecs = dbow_model.infer_vectors( validation_reports, validation_labels) dbow_test_vecs = dbow_model.infer_vectors(test_reports, test_labels) # classification on DBOW model's output if self.classifier == 'svc': svc_conf = Config({ 'do_training': self.do_training_cls, 'model_name': dbow_conf.get('model_name'), 'output_dir': self.output_dir, # following params used only for training 'C': 1.0, 'loss': 'hinge', 'gamma': 'auto', 'kernel': 'linear', 'random_value': 42 }) model = SVCModel(svc_conf) self.classify(model, dbow_train_vecs, train_labels, dbow_validation_vecs, validation_labels, dbow_test_vecs, test_labels) elif self.classifier == 'xgboost': xgb_conf = Config({ 'do_training': self.do_training_cls, 'model_name': dbow_conf.get('model_name'), 'output_dir': self.output_dir, "n_estimators": [80], # following params used only for training "eta": [0.01, 0.02], "max_depth": [2, 3, 4], "learning_rate": [0.05, 0.1, 0.2], "min_child_weight": [3, 5], "gamma": [0.3], 'colsample_bytree': [0.3], 'n_jobs': 4, 'scoring': 'neg_log_loss', 'cv': 3, }) model = XGBoostModel(xgb_conf) self.classify(model, dbow_train_vecs, train_labels, dbow_validation_vecs, validation_labels, dbow_test_vecs, test_labels) # Models concatenation logger.info('--- Combined model classification ---') concat_model_name = 'd2v_concat' combination = Doc2VecCombinator( os.path.join(dm_model.model_path, dm_model.config.get('model_name') + '.d2v'), os.path.join(dbow_model.model_path, dbow_model.config.get('model_name') + '.d2v')) # embedding conc_train_vecs = combination.infer_vectors(train_reports) conc_validation_vecs = combination.infer_vectors(validation_reports) conc_test_vecs = combination.infer_vectors(test_reports) # classification on concatenated model's output if self.classifier == 'svc': svc_conf = Config({ 'do_training': self.do_training_cls, 'model_name': concat_model_name, 'output_dir': self.output_dir, # following params used only for training 'C': 1.0, 'loss': 'hinge', 'gamma': 'auto', 'kernel': 'linear', 'random_value': 42 }) model = SVCModel(svc_conf) self.classify(model, conc_train_vecs, train_labels, conc_validation_vecs, validation_labels, conc_test_vecs, test_labels) elif self.classifier == 'xgboost': xgb_conf = Config({ 'do_training': self.do_training_cls, 'model_name': concat_model_name, 'output_dir': self.output_dir, "n_estimators": [80], # following params used only for training "eta": [0.01, 0.02], "max_depth": [2, 3, 4], "learning_rate": [0.05, 0.1, 0.2], "min_child_weight": [3, 5], "gamma": [0.3], 'colsample_bytree': [0.3], 'n_jobs': 4, 'scoring': 'neg_log_loss', 'cv': 3, }) model = XGBoostModel(xgb_conf) self.classify(model, conc_train_vecs, train_labels, conc_validation_vecs, validation_labels, conc_test_vecs, test_labels)
def tfidf(self, train_reports, train_labels, validation_reports, validation_labels, test_reports, test_labels): """ Run TF-IDF model embedding and then classification on outputted vectors """ tfidf_conf = Config({ 'do_training': self.do_training_emb, 'model_name': 'tfidf_range_1_2_kbest_10000', 'output_dir': self.output_dir, # following params used only for training 'ngram_range': (1, 2), 'max_features': 100000, 'k_best_features': 10000, 'smooth_idf': True, 'use_idf': True }) tfidf_model = TfidfModel(tfidf_conf) # training TF-IDF model if self.do_training_emb: tfidf_model.train(train_reports, train_labels, save_model=True) # text embedding train_vectors = tfidf_model.transform(train_reports) validation_vectors = tfidf_model.transform(validation_reports) test_vectors = tfidf_model.transform(test_reports) # classification if self.classifier == 'svc': svc_conf = Config({ 'do_training': self.do_training_cls, 'model_name': tfidf_conf.get('model_name'), 'output_dir': self.output_dir, # following params used only for training 'C': 1.0, 'loss': 'hinge', 'gamma': 'auto', 'kernel': 'linear', 'random_value': 42 }) model = SVCModel(svc_conf) self.classify(model, train_vectors, train_labels, validation_vectors, validation_labels, test_vectors, test_labels) elif self.classifier == 'xgboost': xgb_conf = Config({ "do_training": self.do_training_cls, 'model_name': tfidf_conf.get('model_name'), 'output_dir': self.output_dir, "n_estimators": [80], # following params used only for training "eta": [0.01, 0.02], "max_depth": [2, 3, 4], "learning_rate": [0.05, 0.1, 0.2], "min_child_weight": [3, 5], "gamma": [0.3], 'colsample_bytree': [0.3], 'n_jobs': 4, 'scoring': 'neg_log_loss', 'cv': 3, }) model = XGBoostModel(xgb_conf) self.classify(model, train_vectors, train_labels, validation_vectors, validation_labels, test_vectors, test_labels)
def test_get(): config = Config(config_file_master) assert config.get('default.city') == 'Anyang'
from flask import Flask, render_template, g import logging from logging.handlers import RotatingFileHandler from datetime import datetime # user made module imports below from user_web import user from device_web import device from grainbin_web import grain_bin from config.config import Config from database.model import db_session, Device, Grainbin app = Flask(__name__) app.secret_key = Config.get('flask', 'secret_key') app.register_blueprint(user, url_prefix='/user') app.register_blueprint(device, url_prefix='/device') if Config.getboolean('services', 'grainbin_service'): app.register_blueprint(grain_bin, url_prefix='/grain_bin') logfile_path = Config.get('flask', 'logfile_path') file_handler = RotatingFileHandler(logfile_path, maxBytes=1024 * 1024 * 10, backupCount=10) file_handler.setLevel(logging.WARNING) formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s") file_handler.setFormatter(formatter) app.logger.addHandler(file_handler)
import os from config.config import Config config = Config() BASE_DIR = os.path.dirname(os.path.abspath(__file__)) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', 'NAME': config.get('HORKOS', 'datasource.url').split("/")[3].split("?")[0], 'USER': config.get('HORKOS', 'datasource.username'), 'PASSWORD': config.get('HORKOS', 'datasource.password'), 'HOST': config.get('HORKOS', 'datasource.url').split("/")[2], } } INSTALLED_APPS = ( 'data', ) SECRET_KEY = os.environ['SECRET_KEY']
import logging from config.config import Config config = Config() config.parse() logger = logging.getLogger('Application') level = config.get('log_level') logger.setLevel(level) formatter = logging.Formatter( '%(asctime)s : %(name)s - %(levelname)s : %(message)s') sh = logging.StreamHandler() sh.setLevel(level) sh.setFormatter(formatter) logger.addHandler(sh) # fh = logging.FileHandler('logs/app.log') # fh.setFormatter(formatter) # logger.addHandler(fh)
def __init__(self): configuration = Config() self._DEBUG = configuration.get('DEBUG')
class BasePage(object, metaclass=Base): """ 页面基类,用于页面对象类的继承 对象层 """ def __init__(self): """ self.config: 获取配置文件 self.poco: 获取poco实例 """ self.config = Config().get_data print(f"device(): {device()}") # 检查adb状态 adb = ADB() device_list = adb.devices() print(f"device_list: {device_list}") device_num = len(device_list) >= 1 # print(device_list) assert_equal(device_num, True, "设备连接数至少>=1") # self.device_list = device_list # 获取poco实例 self.poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False) # 获取根目录 # self.root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # if not cli_setup(): # auto_setup( # basedir=self.root_dir, # devices=self.["android_devices"], # logdir=False # ) self._setting() self.width, self.height = device().get_current_resolution() print("[BasePage] init...") self.root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) self.main_phone = self.config.get("main_phone") # auto_setup(basedir=self.root_dir, logdir=True) # self.device_list = [] # for i in range(len(device_list)): # self.device = connect_device(self.main_phone + device_list[i][0]) # # print(self.main_phone + device_list[i][0]) # self.device_list.append(self.device) # auto_setup( # basedir=self.root_dir, # devices=[self.main_phone + device_list[i][0]], # logdir=True) def __getattr__(self, attr): """ 扩展属性或方法 当且仅当无聊或者没有封装完整方法的时候再使用枚举值中的方法! :param attr: :return:某一个操作元素的方法 """ # if not callable(attr): # print(f"unknown attr: {attr}") # # 尝试查找元素 # return print(self.config.get("package_name")) if attr == Actions.click.value: return self.find_click elif attr == Actions.wake.value: print("唤醒并解锁目标设备") return wake elif attr == Actions.home.value: print("返回HOME界面") return home def act_click(self, *args): print(f'{self.cls_name} @act_click: {str(args[0])}') # def set_permission(self): # """Android授权""" # permission = config.Config.get_yaml().get("permission", None) # f = list(set(permission)) # print(f) # for i in f: # print(f"adb shell pm grant {self.package_name} {i}") # os.popen(f"adb shell pm grant {self.package_name} {i}") def _setting(self): """全局设置""" ST.FIND_TIMEOUT = 5 # 隐式等待 ST.FIND_TIMEOUT_TMP = 10 # 隐式等待 ST.SNAPSHOT_QUALITY = 70 # 图片精度 def start_app(self): """启动app""" print(f"准备启动app[{self.package_name}]") start_app(self.package_name) sleep(2) return self def stop_app(self): """停止app""" stop_app(self.package_name) return self def restart_app(self): """重启app""" stop_app(self.package_name) sleep(2) print(f"准备启动app[{self.package_name}]") start_app(self.package_name) sleep(2) return self def find(self, *element): """基本查找, 可能会抛出异常""" if len(element) == 1: self.poco(f"{element[0]}").wait_for_appearance(20) return self.poco(f"{element[0]}") elif len(element) == 2: self.poco(f"{element[0]}").child(f"{element[1]}").wait_for_appearance(20) return self.poco(f"{element[0]}").child(f"{element[1]}") def find_click(self, *element): """基本点击,支持链式操作""" self.find(*element).click() return self def find_long_click(self, *element): """基本长点击,支持链式操作""" self.find(*element).long_click() return self def find_chirden(self, *element): """基本查找子节点所有子节点""" return self.find(*element).children() def find_text(self, *element): """查找当前ui下文字""" return self.find(*element).get_text() @property def screen_size(self): """获取当前屏幕尺寸""" return self.width, self.height def snap(self, msg: str = None): """ 默认不填直接截图 :param msg:描述信息 :return:支持链式操作 """ if msg is not None: snapshot(msg=msg) return self snapshot() return self def back(self): keyevent("BACK") print('keyevent(BACK")') log('返回按键 keyevent(BACK")', timestamp=time.time(), desc='返回按键 keyevent(BACK")', snapshot=True) return self def go_me_page(self): """ 跳转个人页 :return: poco """ print('跳转个人页go_me_page') self.find_click(Config().get_base_page("ID_ME_PAGE")) return self def up_swipe(self): """上滑""" start_pt = (self.width * 0.7, self.height * 0.7) end_pt = (self.width * 0.7, self.height * 0.3) swipe(start_pt, end_pt) return self def down_swipe(self): """下滑""" start_pt = (self.width * 0.7, self.height * 0.3) end_pt = (self.width * 0.7, self.height * 0.7) swipe(start_pt, end_pt) return self def left_swipe(self): """左滑""" start_pt = (self.width * 0.3, self.height / 2) end_pt = (self.width * 0.7, self.height / 2) swipe(start_pt, end_pt) return self def right_swipe(self): """右滑""" start_pt = (self.width * 0.7, self.height / 2) end_pt = (self.width * 0.3, self.height / 2) swipe(start_pt, end_pt) return self def up_swipe_tab(self): """上滑tab 切换直播间""" start_pt = (self.width / 2, self.height * 0.5) end_pt = (self.width / 2, self.height * 0.1) swipe(start_pt, end_pt) return self def down_swipe_tab(self): """下滑tab 切换直播间""" start_pt = (self.width / 2, self.height * 0.1) end_pt = (self.width / 2, self.height * 0.5) swipe(start_pt, end_pt) return self def left_swipe_tab(self): """左滑tab""" start_pt = (self.width * 0.8, self.height / 2) end_pt = (self.width * 0.05, self.height / 2) swipe(start_pt, end_pt) return self def right_swipe_tab(self): """右滑tab""" start_pt = (self.width * 0.05, self.height / 2) end_pt = (self.width * 0.8, self.height / 2) swipe(start_pt, end_pt) return self def in_current_page(self): pass def goto_this(self, from_page=None): pass
def fetch_remainder(message={}): if "json" not in message: logging.error("$$$$ nytCommunity.fetch_remainder: ERROR NO JSON") return False if "results" not in message["json"]: logging.error("$$$$ nytCommunity.fetch_remainder: ERROR NO RESULTS IN JSON") return False if "totalCommentsFound" not in message["json"]["results"]: logging.warning("$$$$ nytCommunity.fetch_remainder: NO COMMENTS FOUND") return message if int(message["json"]["results"]["totalCommentsFound"]) <= 0: logging.warning("$$$$ nytCommunity.fetch_remainder: NO COMMENTS TO FETCH") return message if "article_id" not in message: logging.error("$$$$ nytCommunity.fetch_remainder: ERROR NO ARTICLE_ID PROVIDED") return False if "article_url" not in message: logging.error("**** nytCommunity.fetch_remainder: NO ARTICLE URL PROVIDED") return False logging.critical("$$$$ nytCommunity.fetch_remainder[" + message["article_id"].encode("utf-8") + "]") article = voxpop.VPE.get_items().get(_id=message["article_id"]) if article is None: logging.error("$$$$ nytCommunity.fetch_remainder:ERROR Article Not Found") return False if "n_comments_cached" not in article.doc: article.doc["n_comments_cached"] = 0 if int(article.doc["n_comments_cached"]) == int(message["json"]["results"]["totalCommentsFound"]): logging.error("$$$$ nytCommunity.fetch_remainder:All Comments already cached.") return False _nCommentsToFetch = int(message["json"]["results"]["totalCommentsFound"]) - article.doc["n_comments_cached"] n_requests = int(math.ceil(_nCommentsToFetch / int(Config.get("nyt_community")["response_size"]))) + 1 _nRequestsOffset = int( math.floor(article.doc["n_comments_cached"] / int(Config.get("nyt_community")["response_size"])) ) req_pars = { "article_id": message["article_id"], "article_url": message["article_url"], "functions": [api.make_api_request, cache_response], } logging.critical( "$$$$ nytCommunity.fetch_remainder: adding " + str(n_requests) + " more requests at offset [" + str(_nRequestsOffset) + "]" ) for i in range(_nRequestsOffset, n_requests): request = {} for k, v in req_pars.iteritems(): request[k] = v _url = "http://api.nytimes.com/svc/community/v2/comments/url/exact-match.json?" _url = _url + "url=" + urllib.quote_plus(message["article_url"]) _url = _url + "&offset=" + str(i * 25) _url = _url + "&sort=oldest" _url = _url + "&api-key=" + Config.get("nyt_community")["key"] request["url"] = _url # logging.critical('BSM[fetch_remainder] Size:'+str(sys.getsizeof(pickle.dumps(request)))) with voxpop.VPE.beanstalkd_lock: voxpop.VPE.get_beanstalkd().use("nytcommunity") voxpop.VPE.get_beanstalkd().put(pickle.dumps(request), pri=100000) return message