def _deal_image_resource(image_name): image_path = os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), image_name) attach_info = { 'name': image_name, 'path': image_path, 'size': image.get_image_size(image_path) } # 生成缩略图图&上传 thumbnail_info = image.thumbnail_img(attach_info['name'], config.get_config('SHAREHP_UPLOAD_DIR')) thumbnail_url = 'resource/thumbnail/' + thumbnail_info['name'] image.qiniu_upload(thumbnail_info['path'], thumbnail_url) # 上传原图 attach_url = 'resource/normal/' + attach_info['name'] image.qiniu_upload(attach_info['path'], attach_url) content = json.dumps({ 'size': attach_info['size'], # only image_used 'url': attach_url }) thumbnail = json.dumps({ 'size': thumbnail_info['size'], 'url': thumbnail_url }) return (thumbnail, content)
def _deal_image_resource(image_name): image_path = os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), image_name) attach_info = { 'name': image_name, 'path': image_path, 'size': image.get_image_size(image_path) } # 生成缩略图图&上传 thumbnail_info = image.thumbnail_img( attach_info['name'], config.get_config('SHAREHP_UPLOAD_DIR')) thumbnail_url = 'resource/thumbnail/' + thumbnail_info['name'] image.qiniu_upload(thumbnail_info['path'], thumbnail_url) # 上传原图 attach_url = 'resource/normal/' + attach_info['name'] image.qiniu_upload(attach_info['path'], attach_url) content = json.dumps({ 'size': attach_info['size'], # only image_used 'url': attach_url }) thumbnail = json.dumps({ 'size': thumbnail_info['size'], 'url': thumbnail_url }) return (thumbnail, content)
async def autocomplete(keyword: str): keyword_query_body = { "size": 10000, "query": { "wildcard": { "keyword": "".join(["*", keyword.lower(), "*"]) } }, } try: es_ins = ElasticSearch( ips=get_config("elasticsearch", "ips"), port=get_config("elasticsearch", "port"), ) keywords = es_ins.get_data_by_body(index_name="keyword", query_body=keyword_query_body) except Exception: logger.error(traceback.format_exc()) return {"error": 1, "msg": "内部错误,详情请看日志"} else: if keywords["hits"]["total"]["value"]: return { "code": 0, "data": set([ item["_source"]["keyword"] for item in keywords["hits"]["hits"] ]), } else: return {"code": 2, "data": [], "msg": "无匹配指令"}
def main(): baseinit_ins = BaseInit() baseinit_ins.es_index_init() nginx_doc_url = get_config("common", "nginx_doc_url") timeout = int(get_config("common", "timeout")) retry = int(get_config("common", "retry")) interval = int(get_config("common", "interval")) logger.info("Start get url {}".format(nginx_doc_url)) nginx_module_index_page_html = retry_call(get_request_text, fargs=[nginx_doc_url, timeout], tries=retry) nginxpage_ins = NginxPage(nginx_module_index_page_html) nginx_module_names_info = nginxpage_ins.get_module_names() # 存储变量和配置名称的相关数据,为搜索添加数据支撑 keyword_info = [] for item in nginx_module_names_info: module_name = item.split("/")[-1].split(".")[0] if module_name.startswith("ngx_") and "http_api" not in module_name: nginx_module_ins = NginxModule(item) nginx_module_ins.handle_module_direct_info() nginx_module_ins.handle_module_vars_info() nginx_module_ins.save_module_info_to_es() keyword_info.extend(nginx_module_ins.keyword_info) time.sleep(interval) baseinit_ins.es_ins.insert_mul_index_data("keyword", keyword_info)
async def monitor_uptime(self) -> None: """Checks the status of each server and sends up/down notifications""" await self.bot.wait_until_ready() channel = self.bot.get_channel(get_config("notification_channel")) for i in get_servers(): if i["type"] == "ping": if ping(i["address"]) is False: await self.notify_down(i, channel, "Host unknown") elif ping(i["address"]) is None: await self.notify_down(i, channel, "Timed out") else: await self.notify_up(i, channel) else: address = i["address"] timeout = get_config("http_timeout") if not address.startswith("http"): address = f"http://{address}" async with aiohttp.ClientSession( timeout=aiohttp.ClientTimeout(total=timeout) ) as session: try: async with session.get(address) as res: if res.ok: await self.notify_up(i, channel) else: await self.notify_down(i, channel, res.reason) except asyncio.TimeoutError: await self.notify_down(i, channel, "Timed out") except aiohttp.ClientError: await self.notify_down(i, channel, "Connection failed")
def get_nginx_module_page_content(self): return retry_call( get_request_text, fargs=[ get_config("common", "nginx_doc_url") + self.item, int(get_config("common", "timeout")), ], tries=int(get_config("common", "retry")), )
def get_logger(name: str): logger = logging.getLogger(name) logger.propagate = False logger.addHandler(CustomConsoleHandler()) if (get_config()['environmet']['log']['file']['active'] is True): logger.addFilter(CustomFileHandler()) logger.setLevel( logging.getLevelName(get_config()['environmet']['log']['level'])) return logger
def compile(folder: str, file_name: str, data: dict) -> str: file_path = '%s/%s/%s' % (get_config()['environmet']['templateFolder'], folder, file_name) logger.debug('Retrieving template from file [%s]' % (file_path)) compiler = pybars.Compiler() file = open(file_path, 'r', encoding="utf-8") file_string = file.read() template = compiler.compile(file_string) final_string = template(data) if (get_config()['environmet']['printXml']): logger.debug('Compiled template [%s]' % (final_string)) return final_string
def __init__(self): super().__init__( command_prefix=get_config("prefix"), description="Bot to monitor uptime of services", reconnect=True, intents=discord.Intents.default(), activity=discord.Activity( type=getattr(discord.ActivityType, get_config("activity_type").lower()), name="services", ), help_command=DefaultHelpCommand() if not get_config("disable_help") == "true" else None, ) self.bot = bot
def main(config_file_path): config_parser = get_config_parser(config_file_path) #加载配置信息路径 config = get_config(config_parser) #配置信息 logger = get_logger(config) #运行日志 file_processor(config[TRANSACTIONS_FILE], config[DATASET_PATH]) #数据文件预处理:交易数据信息配置,数据路径配置信息
async def initialize(app): loop = asyncio.get_event_loop() config = get_config() app['channels'] = {} app['db'] = DatabaseConnection(**config['db']) await app['db'].connect(loop) logger.info('Database pool created') app['redis'] = RedisConnection(**config['redis']) await app['redis'].connect(loop) logger.info('Redis pool created') users_dao = UsersDAO(db=app['db']) schedule_dao = ScheduleDAO(db=app['db']) user_handler = UsersHandler(users_dao=users_dao, schedule_dao=schedule_dao) schedule_handler = ScheduleHandler(schedule_dao=schedule_dao) chat_handler = ChatHandler(redis=app['redis'], users_dao=users_dao) app.add_routes([web.post('/user/', user_handler.create_user)]) app.add_routes([web.get('/user/{user_id}', user_handler.get_user)]) app.add_routes([web.get('/users/', user_handler.get_users_list)]) app.add_routes([web.post('/user/{user_id}/schedule/', schedule_handler.add_schedule)]) app.add_routes([web.get('/ws/{chat_id}/{user_id}/', chat_handler.websocket_server)]) logger.info('Routes added')
def main(): config = get_config() torch.backends.cudnn.benchmark = True torch.backends.cudnn.enabled = True # logging to the file and stdout logger = get_logger(config.log_dir, config.exp_name) # fix random seed to reproduce results set_random_seed(config.random_seed) logger.info('Random seed: {:d}'.format(config.random_seed)) logger.info(pprint.pformat(config)) if config.method in ['src', 'jigsaw', 'rotate']: model = AuxModel(config, logger) elif config.method in ['cdan', 'cdan+e', 'dann']: model = CDANModel(config, logger) else: raise ValueError("Unknown method: %s" % config.method) # create data loaders src_loader, val_loader = get_train_val_dataloader(config.datasets.src) test_loader = get_test_dataloader(config.datasets.test) tar_loader = None if config.datasets.get('tar', None): tar_loader = get_target_dataloader(config.datasets.tar) # main loop if config.mode == 'train': model.train(src_loader, tar_loader, val_loader, test_loader) elif config.mode == 'test': model.test(test_loader)
def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_path = sys.argv[1] config = get_config(config_path) space = Space() space.build_space_from_config(config) space.save_space_from_config(config)
def run(dry_run): config = get_config() token = vault_client.read( config['github-repo-invites']['secret_path'], config['github-repo-invites']['secret_field']) g = utils.raw_github_api.RawGithubApi(token) gqlapi = gql.get_api() result = gqlapi.query(REPOS_QUERY) urls = [] for app in result['apps_v1']: code_components = app['codeComponents'] if code_components is None: continue for code_component in app['codeComponents']: urls.append(code_component['url']) for i in g.repo_invitations(): invitation_id = i['id'] invitation_url = i['html_url'] url = os.path.dirname(invitation_url) if url in urls: logging.info(['accept', url]) if not dry_run: g.accept_repo_invitation(invitation_id) else: logging.debug(['skipping', url])
def new_view(request, *args, **kwargs): if not request.xmanuser['login']: return_url = urllib.urlencode( {'return_url': config.get_config('SHAREHP_SERVER_HOST') + request.get_full_path()}) #FIXME return HttpResponseRedirect('/login/?' + return_url) else: return view(request, *args, **kwargs)
def __init__(self, resource: str): super().__init__("sync_cbc_%s" % (resource)) self._ps = get_ps() self._ss = get_storage() self._resource = resource self._iso_lang = get_config()['prestashop']['mainLanguage'] self._current_lang = self.get_language(self._iso_lang)
def submit_order(self): url = 'https://trade.jd.com/shopping/order/submitOrder.action?=' payload = {'presaleStockSign': '1'} headers = { 'Host': 'trade.jd.com', 'Referer': 'https://trade.jd.com/shopping/order/getOrderInfo.action', } rsp = self.session.get(url=url, params=payload, headers=headers) if rsp.status_code == requests.codes.OK: log.info('正在提交订单...') else: log.error('订单提交失败,状态码:' + str(rsp.status_code) + ' 正在重试...') try: rsp_json = json.loads(rsp.text) except Exception as e: log.error('提交订单失败,请稍后重试') return False if rsp_json.get('success'): order_id = rsp_json.get('orderId') log.info('抢购成功,订单号:{},'.format(order_id)) if config.get_config('messenger', 'enable') == 'true': success_message = "抢购成功,订单号:{}, 请尽快到PC端进行付款".format(order_id) send_wechat(success_message) return True else: log.info('抢购失败,返回信息:{}'.format(rsp_json)) return False
def __init__(self): config = get_config() server = config['vault']['server'] role_id = config['vault']['role_id'] secret_id = config['vault']['secret_id'] # This is a threaded world. Let's define a big # connections pool to live in that world # (this avoids the warning "Connection pool is # full, discarding connection: vault.devshift.net") session = requests.Session() adapter = HTTPAdapter(pool_connections=100, pool_maxsize=100) session.mount('https://', adapter) self._client = hvac.Client(url=server, session=session) self._cache = {} authenticated = False for i in range(0, 3): try: self._client.auth_approle(role_id, secret_id) authenticated = self._client.is_authenticated() break except requests.exceptions.ConnectionError: time.sleep(1) if not authenticated: raise VaultConnectionError()
class ApiHelper: url = get_config('base_url_api') headers = {'Content-Type': 'application/json'} payload = {} def send_get_request(self, endpoint, headers=None, params=None): if params is None: params = self.payload if headers is None: headers = headers return requests.get(self.url + "/" + endpoint, headers=headers, params=params) def send_post_request(self, endpoint, headers=None, payload=None): if payload is None: payload = payload if headers is None: headers = headers payload = json.load(payload) return requests.get(self.url + "/" + endpoint, headers=headers, params=payload)
def load(): """ Reads taxi data and writes to destination. :return: """ conf = config.get_config() host = conf['dw']['postgresql']['host'] port = conf['dw']['postgresql']['port'] db = conf['dw']['postgresql']['db'] user = conf['dw']['postgresql']['user'] password = get_dw_password() schema = conf['dw']['postgresql']['schema'] table = conf['dw']['postgresql']['table'] driver = conf['dw']['postgresql']['driver'] url = "jdbc:postgresql://{host}:{port}/{db}"\ .format(host=host, port=port, db=db) db_table = "{schema}.{table}"\ .format(schema=schema, table=table) spark = spark_session.get_spark_session() spark \ .read \ .csv("temp/transformed", header=True, sep=",")\ .write \ .mode("overwrite") \ .format('jdbc') \ .option("url", url) \ .option("dbtable", db_table) \ .option("user", user) \ .option("password", password) \ .option("driver", driver) \ .save()
def reserve(self): self.login() log.info('登录成功') log.info('商品名称:{}'.format(get_sku_title())) url = 'https://yushou.jd.com/youshouinfo.action?' payload = { 'callback': 'fetchJSON', 'sku': self.sku_id, '_': str(int(time.time() * 1000)), } headers = { 'Referer': 'https://item.jd.com/{}.html'.format(self.sku_id), } log.info('正在进行预约') rsp = self.session.get(url=url, params=payload, headers=headers) log.debug(rsp.text) rsp_json = parse_json(rsp.text) reserve_url = rsp_json.get('url') # self.timers.start() while True: try: self.session.get(url='https:' + reserve_url) log.info('预约成功,已获得抢购资格 / 您已成功预约过了,无需重复预约') if config.get_config('messenger', 'enable') == 'true': success_message = "预约成功,已获得抢购资格 / 您已成功预约过了,无需重复预约" send_wechat(success_message) break except Exception as e: log.error('预约失败正在重试...')
def handle(self, *args, **kwargs): if len(args) == 0: import time start_time = time.time() cou_codes = get_cou_codes() for ys in [get_config('crawler', 'semester')]: ACIXSTORE, auth_num = get_auth_pair( 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE' '/JH/6/6.2/6.2.9/JH629001.php' ) print('Crawling course for ' + ys) crawl_course(ACIXSTORE, auth_num, cou_codes, ys) ACIXSTORE, auth_num = get_auth_pair( 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE' '/JH/6/6.2/6.2.3/JH623001.php' ) print('Crawling dept for ' + ys) crawl_dept(ACIXSTORE, auth_num, cou_codes, ys) print('===============================\n') elapsed_time = time.time() - start_time print('Total %.3f second used.' % elapsed_time) if len(args) == 1: if args[0] == 'clear': Course.objects.all().delete() Department.objects.all().delete()
def main(args): cfg = config.get_config(args.config_path) process_config(args, cfg) data.process_config(cfg) devices.device_setup(cfg) print("Loading data...") data_loaders = data.get_train_valid_loaders(cfg) classes = {} for k in ["model", "trainer"]: module_name = cfg[k].module_name class_name = cfg[k].class_name classes[k] = factory.get_class(module_name, class_name) print("Building model...") model = classes["model"](cfg) model.build_model() print(model.summary()) if "load_checkpoint" in cfg.model: model.load(cfg.model.load_checkpoint) trainer = classes["trainer"](cfg, model, data_loaders) try: print("Training...") trainer.train() except KeyboardInterrupt: print("\nInterrupted by user.") finally: if "save_checkpoint" in cfg.model: model.save(cfg.model.save_checkpoint)
async def notify_down(self, server: dict, channel: discord.TextChannel, reason: Optional[str]) -> None: """ Sends an embed to indicate a service is offline :param server: Server object to extract data from :param channel: Channel to send the notification to :param reason: Reason why the service is down """ if server["address"] not in self.currently_down: self.currently_down.update({server["address"]: 0}) embed = embeds.Embed( title= f"**:red_circle: {get_server_name(server['address'])} is down!**", color=16711680, ) embed.add_field(name="Address", value=server["address"], inline=False) embed.add_field(name="Type", value=server["type"], inline=False) embed.add_field(name="Reason", value=reason, inline=False) await channel.send(embed=embed) await channel.send(f"<@&{get_config('role_to_mention')}>", delete_after=3) else: self.currently_down[server["address"]] = self.currently_down.get( server["address"], 0) + get_config("secs_between_ping")
def main(): config = get_config() print(config.log_dir) log_dirs = glob.glob(config.log_dir.replace(str(config.random_seed), '*')) best_accs = [] best_nonsib_accs = [] for log_dir in sorted(log_dirs): log_file = sorted(glob.glob(log_dir + '/run*'))[-1] print('Reading results from {}'.format(log_file)) with open(log_file, 'r') as f: lines = f.read().splitlines() best_line = '' for line in lines: if 'Best testing accuracy' in line: best_line = line if best_line: print(best_line) if 'sib' in best_line: best_nonsib = best_line.split(' ')[-6] best = best_line.split(' ')[-3] best_nonsib_accs.append(float(best_nonsib)) else: best = best_line.split(' ')[-2] best_accs.append(float(best)) if best_nonsib_accs: accs = np.array(best_nonsib_accs, np.float32) print('===> non-sib mean: {:.2f}, std: {:.2f}'.format( np.mean(accs), np.std(accs))) if best_accs: accs = np.array(best_accs, np.float32) print('===> {} mean: {:.2f}, std: {:.2f}'.format( config.method, np.mean(accs), np.std(accs)))
def main(args): cfg = config.get_config(args.config_path) process_config(args, cfg) devices.device_setup(cfg) classes = {} for k in ["data_preprocessing", "model", "trainer"]: module_name = cfg[k].module_name class_name = cfg[k].class_name classes[k] = factory.get_class(module_name, class_name) preprocessing: base.data_preprocessing.BaseDataPreprocessing = classes["data_preprocessing"](cfg) preprocessing.preprocess_config() print("Loading data...") data_loaders = data.get_train_valid_data(cfg, preprocessing) print(f"Number of training batches: {data.get_train_batch_count(cfg)}") print("Building model...") model: base.model.BaseModel = classes["model"](cfg) model.build() model.compile() model.summary() model.load() trainer: base.trainer.BaseTrainer = classes["trainer"](cfg, model, data_loaders) print("Training...") try: trainer.train() finally: model.save()
def verify_configuration(logger): """ 检查config.yaml配置是否正常 """ config = get_config() missing_key = [] for key in key_list: point_count = key.count(".") if point_count == 0: if key not in config: missing_key.append(key) elif point_count == 1: _key_ = key.split(".", 1) if _key_[0] in missing_key: missing_key.append(key) continue if config[_key_[0]] is None or _key_[1] not in config[_key_[0]]: missing_key.append(key) elif point_count == 2: _key_ = key.split(".", 2) if _key_[0] in missing_key or _key_[0] + "." + _key_[ 1] in missing_key: missing_key.append(key) continue if config[_key_[0]][_key_[1]] is None or _key_[2] not in config[ _key_[0]][_key_[1]]: missing_key.append(key) if missing_key: logger.info("配置文件(config.yaml)中缺少键%s,请检查配置文件" % missing_key) exit(1)
def test(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_fn = sys.argv[1] config = get_config(config_fn) w2v = MyWord2Vec() w2v.load(config) fn = '/Users/xingshi/Workspace/data/wordlist/antonyms.txt' label_list = [] vectors = [] for line in open(fn): ll = line.split() w1 = ll[0] w2 = ll[1] v1 = w2v.getNorm(w1) v2 = w2v.getNorm(w2) vector = v1 - v2 label = w1+'_'+w2 vectors.append(vector) label_list.append(label) v(label_list,vectors,'antonyms.pdf')
def __init__(self): INFO("欢迎执行JD全自动退会程序,如有使用问题请加TG群https://t.me/jdMemberCloseAccount进行讨论") # 初始化基础配置 self.config = get_config() self.selenium_cfg = get_config()["selenium"] self.shop_cfg = get_config()["shop"] self.sms_captcha_cfg = get_config()["sms_captcha"] self.image_captcha_cfg = get_config()["image_captcha"] self.ocr_cfg = self.sms_captcha_cfg["ocr"] # 初始化selenium配置 self.browser = get_browser(self.config) self.wait = WebDriverWait(self.browser, self.selenium_cfg["timeout"]) self.wait_check = WebDriverWait(self.browser, self.selenium_cfg["check_wait"]) # 初始化短信验证码配置 if not self.sms_captcha_cfg["is_ocr"]: if not self.sms_captcha_cfg["jd_wstool"]: from utils.listener import SmsSocket self.sms = SmsSocket() elif self.sms_captcha_cfg["is_ocr"]: if self.ocr_cfg["type"] == "": WARN("当前已开启OCR模式,但是并未选择OCR类型,请在config.yaml补充ocr.type") sys.exit(1) if self.ocr_cfg["type"] == "baidu": from captcha.baidu_ocr import BaiduOCR self.baidu_ocr = BaiduOCR(self.ocr_cfg) elif self.ocr_cfg["type"] == "aliyun": from captcha.aliyun_ocr import AliYunOCR self.aliyun_ocr = AliYunOCR(self.ocr_cfg) elif self.ocr_cfg["type"] == "easyocr": from captcha.easy_ocr import EasyOCR self.easy_ocr = EasyOCR() # 初始化图形验证码配置 if self.image_captcha_cfg["type"] == "cjy": self.cjy = ChaoJiYing(self.image_captcha_cfg) elif self.image_captcha_cfg["type"] == "tj": self.tj = TuJian(self.image_captcha_cfg) elif self.image_captcha_cfg["type"] == "local": pass elif self.image_captcha_cfg["type"] == "yolov4": self.JDyolo = JDyolocaptcha(self.image_captcha_cfg) else: WARN("请在config.yaml中补充image_captcha.type") sys.exit(1)
def add_new_topic(request, group_id): # 获取发帖的标题&内容 topic_title = request.POST.get('title', '').strip() topic_content = request.POST.get('content', '').strip() # 获取attachment(可选) has_attach = request.POST.get('has_attach', 'false').strip() attach_name = request.POST.get('attach', '').strip() attach_type = request.POST.get('type', '').strip() # image attach_path = os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), attach_name) attach_url = 'topic/' + attach_name # FIXME # 校验参数 if not _check_login(request): return HttpResponse(json.dumps({'success': -1, 'error_msg': "请登录后操作!"})) if not topic_title: return HttpResponse(json.dumps({'success': -1, 'error_msg': "话题不能为空!"})) if len(topic_title) >= 256: return HttpResponse(json.dumps({'success': -1, 'error_msg': "话题过长,只能256个字符!"})) if not topic_content: return HttpResponse(json.dumps({'success': -1, 'error_msg': "话题内容不能为空!"})) if len(topic_content) >= 10000: return HttpResponse(json.dumps({'success': -1, 'error_msg': "话题内容过长,只能10000个字符!"})) if has_attach == 'true' and not default_storage.exists(attach_path): return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器图片丢失,请重新上传!"})) # 上传图片至七牛 if has_attach == 'true': try: image.qiniu_upload(attach_path, attach_url) except QiniuUploadFileError: logger.error('Fail to add new topic, uploading image error!\n') return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器异常,请稍后再试!"})) attachment = _gen_attachment_info(has_attach, attach_type, attach_path, attach_url) current_date = datetime.now() # 保存新话题 # FIXME transcation new_topic = Group_Topic( gmt_create=current_date, gmt_modify=current_date, group_id=group_id, user_id=_get_current_userid(request), # won't be None topic_name=topic_title, comments=0, status='enabled' ) new_topic.save() # 话题内容作为评论保存 topic_comment = Topic_Comment( gmt_create=current_date, gmt_modify=current_date, topic_id=new_topic.id, user_id=_get_current_userid(request), # won't be None content=topic_content, attachment=json.dumps(attachment), status='enabled' ) topic_comment.save() return HttpResponse(json.dumps({'success': 0, 'data': {}}))
def main_build(): # build for wiki logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_path = sys.argv[1] config = get_config(config_path) bf = BruteForceSearch() bf.build_from_config(config) bf.save_from_config(config)
async def monitor_uptime(self) -> None: """Checks the status of each server and sends up/down notifications""" await self.bot.wait_until_ready() channel = self.bot.get_channel(get_config("notification_channel")) timeout = get_config("timeout") for i in get_servers(): if i["type"] == "ping": if ping(i["address"], timeout=timeout) is False: await self.notify_down(i, channel, "Host unknown") elif ping(i["address"], timeout=timeout) is None: await self.notify_down(i, channel, "Timed out") else: await self.notify_up(i, channel) elif i["type"] == "tcp": host, port = i["address"].split(":") conn = asyncio.open_connection(host, port) try: reader, writer = await asyncio.wait_for(conn, timeout) writer.close() await writer.wait_closed() except asyncio.TimeoutError: await self.notify_down(i, channel, "Timed out") except ConnectionRefusedError: await self.notify_down(i, channel, "Connection failed") else: await self.notify_up(i, channel) else: address = i["address"] if not address.startswith("http"): address = f"http://{address}" async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout( total=timeout)) as session: try: async with session.get(address) as res: if res.ok: await self.notify_up(i, channel) else: await self.notify_down(i, channel, res.reason) except asyncio.TimeoutError: await self.notify_down(i, channel, "Timed out") except aiohttp.ClientError: await self.notify_down(i, channel, "Connection failed")
def add_storage(name,type="zfs",capacity=None): default_capacity = get_config('storage','default_capacity') default_pool = get_config('storage','default_pool') pool = Pizza() if capacity : capacity = capacity else : capacity = default_capacity if type == 'zfs' : attr = {'quota' : capacity } pool.create_dataset(default_pool,name,attribute=attr) elif type == "zvol" : pool.create_zvol(default_pool,name,capacity) else : pass
def __init__(self): super().__init__( command_prefix=get_config("prefix"), description="Bot to monitor uptime of services", reconnect=True, intents=Intents.default(), ) self.bot = bot
def get_gitlab_api(): config = get_config() gitlab_config = config['gitlab'] server = gitlab_config['server'] token = gitlab_config['token'] return GitLabApi(server, token, ssl_verify=False)
def init_from_config(): config = get_config() server = config['vault']['server'] role_id = config['vault']['role_id'] secret_id = config['vault']['secret_id'] return init(server, role_id, secret_id)
def __init__(self): super(AUFR, self).__init__() self.setupUi(self) try: self.setting = Setting(self) conf = self.setting.get_config() self.threshold = conf.threshold self.model_name = 'mobile face' if conf.net_mode is None else conf.net_mode self.net_mode = conf.net_mode self.use_mtcnn = True if conf.use_mtcnn else False self.camera_id = conf.video_source self.face_recognize = face_recognize(conf) except: pass conf = get_config(net_mode='ir_se', use_mtcnn=True, threshold=1.25) self.threshold = conf.threshold self.model_name = 'mobile face' if conf.net_mode is None else conf.net_mode self.net_mode = conf.net_mode self.use_mtcnn = True if conf.use_mtcnn else False self.camera_id = conf.video_source self.face_recognize = face_recognize(conf) self.targets, self.names = self.face_recognize.get_facebank() self.has_targ = len(self.targets) > 0 # Variables self.camera_id = 'video.mp4' # can also be a url of Video self.dataset_per_subject = 50 self.ret = False self.trained_model = 0 self.image = cv2.imread("icon/default.jpg", 1) self.modified_image = self.image.copy() self.reload() self.display() # Actions self.generate_dataset_btn.setCheckable(True) # self.train_model_btn.setCheckable(True) self.recognize_face_btn.setCheckable(True) # Menu self.about_menu = self.menu_bar.addAction("About") self.help_menu = self.menu_bar.addAction("Help") self.about_menu.triggered.connect(self.about_info) self.help_menu.triggered.connect(self.help_info) # Algorithms self.generate_dataset_btn.clicked.connect(self.pressedGendataButton) self.recognize_face_btn.clicked.connect(self.recognize) self.video_recording_btn.clicked.connect(self.save_video) self.adv_setting.clicked.connect(self.pressedSettingsButton) self.capture_btn.clicked.connect(self.captureDialog_show) if not os.path.exists(FACE_BANK): os.mkdir(FACE_BANK) self.createMenus()
def evolution(): parser = argparse.ArgumentParser() paraser.add_argument("--cfg", type=str, required=True) macs_budget = 15 CONFIG = get_config(args.cfg) nodes_num = calculate_nodes(CONFIG) model = Predictor(nodes_num) model.load_state_dict(torch.load(CONFIG.predictor_pretrained)) model = model.cuda() sol_per_pop = 20 num_parents_mating = 10 pop_size = (sol_per_pop, 5 * 6) new_population = np.random.randint(low=0, high=1, size=pop_size) best_outputs = [] num_generations = 1000 for generation in range(num_generations): print("Generation : ", generation) fitness = predict_accuracy(new_population, model, nodes_num) best_outputs.append(np.max(fitness)) print("Best result : ", np.max(fitness)) parents = ga.select_mating_pool(new_population, fitness, num_parents_mating) offspring_size = (sol_per_pop - num_parents_mating, 30) evolution_id = 0 offspring = np.empty(offspring_size, dtype=np.int32) while evolution_id < offspring_size[0]: offspring_crossover = ga.crossover(evolution_id, parents, offspring_size, macs_budget) offspring_mutation = ga.mutation(offspring_crossover, num_mutations=2) offspring_mutation_macs = cal_macs(offspring_mutation[0]) if offspring_mutation_macs <= macs_budget: offspring[evolution_id] = offspring_mutation evolution_id += 1 new_population[0:parents.shape[0], :] = parents new_population[parents.shape[0]:, :] = offspring_mutation fitness = predict_accuracy(new_population, model, nodes_num) best_idx = np.argmax(fitness) print("Best solution : ", new_population[best_idx, :]) print("Best predict accuracy : ", fitness[best_idx]) architecture_metric = [] architecture_metric.append( decode_population(new_population[best_idx, :], nodes_num).reshape(-1)) df_architecture = pd.DataFrame(architecture_metric) df_architecture.to_csv(CONFIG.path_to_evolution_architecture, index=False)
def change_avatar(request): if request.method == "POST": attach_name = request.POST.get('attach', '').strip() crop_x = request.POST.get('crop_x') crop_y = request.POST.get('crop_y') crop_width = request.POST.get('crop_width') crop_height = request.POST.get('crop_height') if not _check_login(request): return HttpResponse(json.dumps({'success': -1, 'error_msg': "请登录后操作!"})) if not default_storage.exists(os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), attach_name)): return HttpResponse(json.dumps({'success': -1, 'error_msg': "图片丢失,请重新上传!"})) crop_result = image.crop_user_avatar(config.get_config('SHAREHP_UPLOAD_DIR'), attach_name, (int(crop_x), int(crop_y), int(crop_x) + int(crop_width), int(crop_y) + int(crop_height))) try: avatar_big_url = 'user/avater/' + crop_result['big']['name'] image.qiniu_upload(crop_result['big']['path'], avatar_big_url) avatar_mid_url = 'user/avater/' + crop_result['mid']['name'] image.qiniu_upload(crop_result['mid']['path'], avatar_mid_url) avatar_small_url = 'user/avater/' + crop_result['small']['name'] image.qiniu_upload(crop_result['small']['path'], avatar_small_url) except QiniuUploadFileError: logger.error('Fail to change avatar, uploading image error!\n') return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器异常,请稍后再试!"})) avatar_info = { 'big': avatar_big_url, 'mid': avatar_mid_url, 'small': avatar_small_url } User.objects.filter(id=_get_current_userid(request)).update(gmt_modify=datetime.now(), avatar=json.dumps(avatar_info)) cache.del_user_info(_get_current_userid(request)) # important! return HttpResponse(json.dumps({'success': 0, 'data': {'src': avatar_big_url}})) else: avatar = json.loads(User.objects.get(id=_get_current_userid(request)).avatar) context = RequestContext(request, {'avatar': avatar}) return render_to_response('change_avatar.htm', context)
def _get_return_url(request): return_url = request.GET.get('return_url', None) referer = request.META.get('HTTP_REFERER', None) if not return_url and referer: return_url = referer if not return_url or not _safe_return_url(return_url): return_url = config.get_config('SHAREHP_DEFAULT_RETURN_URL') # default return url return return_url
def main_query(): # build for wiki logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_path = sys.argv[1] config = get_config(config_path) lsh = LSH() lsh.load_from_config(config) results = lsh.query2_word("high_school",10) print results
def main_query(): # build for wiki logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_path = sys.argv[1] config = get_config(config_path) bf = BruteForceSearch() bf.build_from_config(config) results = bf.query2_bf("high_school",10) print results
def module_stats(): global RESMAP global MI global MSM update = True if MI is None or update: cfg = get_config() MI = ModuleIndexer( cachedir=cfg.get('github_cache_dir', '/tmp/mi.cache'), checkoutdir=cfg.get('ansible_checkout', '/tmp/ansible.checkout') ) MI.get_ansible_modules() if MSM is None or update: MSM = ModuleStatsMaker(MI) module_df = MSM.get_grouped_data() module_df.index.names = ['date'] added_rolmean = module_df['total added'].rolling(window=3) module_df['total added (rolling mean)'] = added_rolmean.mean() added_rolstd = pd.rolling_std(module_df['total added'], window=3) added_rolstd = added_rolstd.to_frame() module_df['total added (rolling std)'] = added_rolstd['total added'] # must be float for ARMA/ARIMA cumsum = pd.to_numeric(module_df['cumulative sum'], downcast='float') cumsum = cumsum.to_frame() cumsum.columns = ['cumsum'] ''' # don't let the unfinished month screw up the prediction cumsum.drop(cumsum.index[len(cumsum)-1]) # use the log to set the stationarity ts_log_data = np.log(cumsum['cumsum']) # create the model model = sm.tsa.ARMA(ts_log_data, order=(1,1), freq='M').fit() #start_date = ts_log_data.index[-1] + Day(1) #end_date = ts_log_data.index[-1] + Day(60) #y_forecast = model.predict('2017-04-30', '2018-01-31') #y_vals = np.exp(y_forecast) #print(module_df.to_csv()) print(np.exp(model.predict('2017-04-30', '2017-10-31'))) ''' print(module_df.to_csv()) import epdb; epdb.st()
def __init__(self, print_urls=True): # read the parameters from config file info = config.get_config('info') oauth_token = info['token'] base_url = info['canvas_instance_url'] api_prefix = info['api_prefix'] self.canvas = CanvasReader(oauth_token, base_url, api_prefix, verbose=print_urls) self.course_id = info['course_id'] course_info = self.canvas.get_course_info(self.course_id) self.course_name = course_info['name']
def new_view(request, *args, **kwargs): if not request.xmanuser['login']: return_url = urllib.urlencode( {'return_url': config.get_config('SHAREHP_SERVER_HOST') + request.get_full_path()}) return HttpResponseRedirect('/login/?' + return_url) else: if not _is_admin(request): return render_to_response('bops/no_privilege.htm') else: return view(request, *args, **kwargs)
def module_stats(): log_client() if request.headers.get('User-Agent') == SLACK: return None global RESMAP global MI global MSM oformat = request.args.get('format', None) update = request.args.get('update', 'False') if update == 'False': update = False else: update = True if MI is None or update: print("!MI: %s" % (MI is None)) print("Update: %s" % update) cfg = get_config() MI = ModuleIndexer( cachedir=cfg.get('github_cache_dir', '/tmp/mi.cache'), checkoutdir=cfg.get('ansible_checkout', '/tmp/ansible.checkout') ) MI.get_ansible_modules() if MSM is None or update: print("!MSM: %s" % (MSM is None)) print("Update: %s" % update) MSM = ModuleStatsMaker(MI) module_df = MSM.get_grouped_data() module_df.index.names = ['date'] # show everything show_cols = [[x, True] for x in module_df.columns] if oformat == 'csv': return module_df.to_csv() else: (plot_script, plot_div, js_resources, css_resources) = \ bkt.dataframe_to_linegraph(module_df, show_cols, title="module growth") return render_template( 'module_stats.html', data={ 'username': None, 'lines': show_cols, }, js_resources=js_resources, css_resources=css_resources, plot_script=plot_script, plot_div=plot_div, )
def add_new_resource(request): title = request.POST.get('title', '').strip() type = request.POST.get('type') # to image: filename, to video: id attach = request.POST.get('attach', '').strip() # check params if not _check_login(request): return HttpResponse(json.dumps({'success': -1, 'error_msg': "请登录后操作!"})) if len(title) <= 0: return HttpResponse(json.dumps({'success': -1, 'error_msg': "资源标题不能为空!"})) if len(title) >= 1000: return HttpResponse(json.dumps({'success': -1, 'error_msg': "资源标题过长,只能1000个字符!"})) if len(attach) <= 0: return HttpResponse(json.dumps({'success': -1, 'error_msg': "请上传一个视频或者图片!"})) if type not in ('image', 'video'): return HttpResponse(json.dumps({'success': -1, 'error_msg': "资源类型不支持!"})) if type == 'image' and not default_storage.exists(os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), attach)): return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器图片丢失,请重新上传!"})) if type == 'video': video_info = cache.get_video_info(attach) if not video_info: return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器视频丢失,请重新上传!"})) # build filed data try: if type == 'image': thumbnail, content = _deal_image_resource(attach) else: thumbnail, content = _deal_video_resource(video_info) except QiniuUploadFileError: logger.error('Fail to add new resource, uploading image error!\n') return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器异常,请稍后再试!"})) # 保存资源 current_date = datetime.now() resource = Resource( gmt_create=current_date, gmt_modify=current_date, user_id=_get_current_userid(request), title=title, type=type, thumbnail=thumbnail, content=content, up=0, down=0, comments=0, status='enabled' ) resource.save() cache.set_last_resource_id(resource.id) # important return HttpResponse(json.dumps({'success': 0, 'data': {}}))
def _deal_video_resource(video_info): # 保存视频截图 img_info = image.save_img(video_info['bimg'], config.get_config('SHAREHP_UPLOAD_DIR')) attach_info = { 'name': img_info['name'], 'path': img_info['path'], 'size': img_info['size'] } # 生成缩略图图&上传 thumbnail_info = image.thumbnail_img(attach_info['name'], config.get_config('SHAREHP_UPLOAD_DIR')) thumbnail_url = 'resource/thumbnail/' + thumbnail_info['name'] image.qiniu_upload(thumbnail_info['path'], thumbnail_url) content = json.dumps({ 'size': '', # vidoe not used 'url': video_info['swf'] }) thumbnail = json.dumps({ 'size': thumbnail_info['size'], 'url': thumbnail_url }) return (thumbnail, content)
def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_fn = sys.argv[1] config = get_config(config_fn) ftext = config.get('path','short_abstracts_text') fpos = config.get('path','short_abstracts_pos') fdict = ftext + '.phrase.dict' fphrase = ftext + '.phrase' #logging.info('POS tagging...') #pos_tagging(ftext,fpos) logging.info('collecting phrases...') top_AN(ftext,fpos,fdict) logging.info('replacing phrases...') replace_phrase(ftext,fphrase,fdict,50000)
def main(config_file_path): config_parser = get_config_parser(config_file_path) config = get_config(config_parser) logger = get_logger(config) with tf.Session() as sess: processor = Processor(config, logger) env = Environment(logger, config, processor.diff_blocks, processor.price_blocks, processor.timestamp_blocks) agent = Agent(sess, logger, config, env) agent.train() agent.summary_writer.close()
def add_topic_comment(request, topic_id): content = request.POST.get('content', '').strip() # 获取attachment(可选) has_attach = request.POST.get('has_attach', 'false').strip() attach_name = request.POST.get('attach', '').strip() attach_type = request.POST.get('type', '').strip() attach_path = os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), attach_name) attach_url = 'topic/' + attach_name if not Group_Topic.objects.filter(id=topic_id).exists(): return HttpResponse(json.dumps({'success': -1, 'error_msg': "对不起你回复的话题已经不存在!"})) if not _check_login(request): return HttpResponse(json.dumps({'success': -1, 'error_msg': "请登录后操作!"})) if not len(content.strip()): return HttpResponse(json.dumps({'success': -1, 'error_msg': "回复内容不能为空!"})) if len(content) >= 10000: return HttpResponse(json.dumps({'success': -1, 'error_msg': "回复内容过长,只能10000个字符!"})) if has_attach == 'true' and not default_storage.exists(attach_path): return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器图片丢失,请重新上传!"})) # 上传图片至七牛 if has_attach == 'true': try: image.qiniu_upload(attach_path, attach_url) except QiniuUploadFileError: logger.error('Fail to add topic comment, uploading image error!\n') return HttpResponse(json.dumps({'success': -1, 'error_msg': "服务器异常,请稍后再试!"})) attachment = _gen_attachment_info(has_attach, attach_type, attach_path, attach_url) current_date = datetime.now() # 插入回复内容 FIXME transcation topic_comment = Topic_Comment( gmt_create=current_date, gmt_modify=current_date, topic_id=topic_id, user_id=_get_current_userid(request), # won't be None content=content, attachment=json.dumps(attachment), status='enabled' ) topic_comment.save() # 更新topic相关信息 topic = Group_Topic.objects.get(id=topic_id) Group_Topic.objects.filter(id=topic_id).update(gmt_modify=current_date, comments=topic.comments + 1) return HttpResponse(json.dumps({'success': 0, 'data': {}}))
def upload_image(request): if not _check_login(request): return HttpResponse(json.dumps({'success': -1, 'error_msg': "请登录后操作!"})) if not request.FILES or not request.FILES['file']: return HttpResponse(json.dumps({'success': -1, 'error_msg': "请选择一个图片!"})) upload_file = request.FILES['file'] # 检查上传图片大小 if upload_file.size >= 1024 * 1024: # 1M return HttpResponse(json.dumps({'success': -1, 'error_msg': "请上传小于1M的图片!"})) # 检查上传文件类型 if not image.get_image_type(upload_file): return HttpResponse(json.dumps({'success': -1, 'error_msg': "对不起,你上传的图片类型不支持!"})) # 保存临时文件 filename = common.unique_filename() filepath = default_storage.save(os.path.join(config.get_config('SHAREHP_UPLOAD_DIR'), filename), ContentFile(upload_file.read())) width, height = image.get_image_size(filepath) return HttpResponse(json.dumps({'success': 0, 'data': {'src': filename, 'width': width, 'height': height}}))
def group(request, group_id, order): is_exist = Group.objects.filter(id=group_id).exists() if not is_exist: return render_to_response('miss/group.htm', RequestContext(request)) # 每页显示10条 page_size = 10 page = _get_page(request) offset = (int(page) - 1) * page_size # 排序 if order == 'hot': # FIXME 最热排序逻辑 gts = [] else: gts = Group_Topic.objects.filter(group_id=group_id, status='enabled').order_by('-gmt_create')[ offset: offset + page_size] topics = [] for t in gts: topic = { 'id': t.id, 'topic_name': t.topic_name, 'user_id': t.user_id, 'nickname': cache.get_user_nickname(t.user_id), 'avatar': cache.get_user_avatar(t.user_id), 'comments': t.comments, 'publish_date': prettydate.convert(t.gmt_create), 'last_comment_date': prettydate.convert(t.gmt_modify) } topics.append(topic) topic_count = Group_Topic.objects.filter(group_id=group_id, status='enabled').count() pages = int(math.ceil(topic_count / (page_size * 1.0))) pageUrl = _get_page_url(config.get_config('SHAREHP_SERVER_HOST'), request.path) group = cache.get_group_info(group_id) context = RequestContext(request, {'group': group, 'topics': topics, 'curPage': page, 'pages': pages, 'pageUrl': pageUrl}) return render_to_response('group.htm', context)
def group_topic(request, topic_id): is_exist = Group_Topic.objects.filter(id=topic_id).exists() if not is_exist: return render_to_response('miss/topic.htm', RequestContext(request)) # 每页显示20楼 page_size = 20 page = _get_page(request) offset = (int(page) - 1) * page_size comment_count = Topic_Comment.objects.filter(topic_id=topic_id).count() pages = int(math.ceil(comment_count / (page_size * 1.0))) pageUrl = _get_page_url(config.get_config('SHAREHP_SERVER_HOST'), request.path) tcs = Topic_Comment.objects.filter(topic_id=topic_id)[offset: offset + page_size] topic_comments = [] floor_base = page_size * (page - 1) for floor, tc in enumerate(tcs): topic_comment = { 'floor': floor + 1 + floor_base, # FIXME 'create_date': tc.gmt_create.strftime('%Y-%m-%d %H:%M:%S'), 'user_id': tc.user_id, 'nickname': cache.get_user_nickname(tc.user_id), 'avatar': cache.get_user_avatar(tc.user_id), 'content': tc.content, 'attachment': json.loads(tc.attachment), } topic_comments.append(topic_comment) topic = cache.get_topic_info(topic_id) group = cache.get_group_info_by_topicid(topic['id']) context = RequestContext(request, {'group': group, 'topic': topic, 'topicComments': topic_comments, 'curPage': page, 'pages': pages, 'pageUrl': pageUrl}) return render_to_response('group_topic.htm', context)
def main(): config_path = sys.argv[1] config = get_config(config_path) noun_path = config.get('path','noun_words') adj_path = config.get('path','adj_words') bigram_path = config.get('path','bigram_words') stop_path = config.get('path','stop_words') new_path = config.get('path','bigram_adj_noun') fout = open(new_path,'w') dnoun = fn2dict(noun_path) dadj = fn2dict(adj_path) dstop = fn2dict(stop_path) with open(bigram_path) as f: for line in f: words = line.strip().split() if words[0] in dadj and words[1] in dnoun and (not words[0] in dstop) and (not words[1] in dstop): fout.write(line) fout.close()
def classify(request, tag): page_size = 10 page = _get_page(request) offset = (int(page) - 1) * page_size rl = Resource.objects.filter(type=tag, status='enabled').order_by('-gmt_create')[offset: offset + page_size] res_count = Resource.objects.filter(type=tag, status='enabled').count() pages = int(math.ceil(res_count / (page_size * 1.0))) res_list = [] for r in rl: res = { 'id': r.id, 'create_date': prettydate.convert(r.gmt_create), 'user_id': r.user_id, 'nickname': cache.get_user_nickname(r.user_id), 'avatar': cache.get_user_avatar(r.user_id), 'title': r.title, 'type': r.type, 'thumbnail': json.loads(r.thumbnail), # json 'comments': r.comments, 'vote_type': cache.resource_vote_type(r.id, _get_current_userid(request)), 'up': r.up, 'down': r.down } res_list.append(res) # side bar group_list = cache.get_group_list() pageUrl = _get_page_url(config.get_config('SHAREHP_SERVER_HOST'), request.path) context = RequestContext(request, {'tag': tag, 'resList': res_list, 'groups': group_list, 'curPage': page, 'pages': pages, 'pageUrl': pageUrl}) return render_to_response('tag.htm', context)
def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_fn = sys.argv[1] config = get_config(config_fn) lsh,engine,matrix,wordlist = build_environment(config) # load the portmantaeu word list port_path = config.get('portmanteau','path') ports = [] for line in open(port_path): ll = line.split() w1 = ll[0] w2 = ll[1] ports.append((w1,w2)) logging.info('Loaded {} portmanteaus'.format(len(ports))) # search according to ports fout = open(config.get('portmanteau','outpath'),'w') i = 0 for w1,w2 in ports: i += 1 logging.info('search for {}/{}:({} {})'.format(i,len(ports),w1,w2)) data,norm = analogy(w1,w2,lsh,engine,matrix,wordlist) if data == None: continue fout.write('\n####\n') fout.write('{} {} {}\n'.format(w1,w2,norm)) for dis,w3,w4 in data: fout.write('{} {} {}\n'.format(w3,w4,-dis)) if i == 2: break fout.close()
def main(): logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) config_fn = sys.argv[1] config = get_config(config_fn) ftext = config.get('path','short_abstracts_text') fpos = config.get('path','short_abstracts_pos') fn_noun = config.get('path','noun_words') fn_adj = config.get('path','adj_words') w2v = MyWord2Vec() w2v.load(config) #collect Noun Words nounList = WordList() nounList.load(fn_noun) nounList.filter(w2v) #nounList.collect(['NN','NNS'],ftext,fpos) nounList.save(fn_noun) #collect Adj Words adjList = WordList() adjList.load(fn_adj) adjList.filter(w2v) #adjList.collect(['JJ','JJS','JJR'],ftext,fpos) adjList.save(fn_adj)
from operator import itemgetter from pprint import pprint from textblob import TextBlob from utils.config import get_config from utils.elastic import get_all_issues from utils.moduletools import ModuleIndexer NOUNS = [] NOUN_PHRASES = [] TAGMAP = {} CMAP = {} config = get_config() MI = ModuleIndexer( cachedir=config.get('github_cache_dir', '/tmp/mi.cache'), checkoutdir=config.get('ansible_checkout', '/tmp/ansible.checkout') ) MI.get_ansible_modules() #query = "SELECT * FROM github WHERE state IS 'open'" query = "SELECT * FROM github" rdict = get_all_issues(query=query) for k,v in rdict.iteritems(): if v['github_repo'] != 'ansible': continue if v['github_type'] != 'pullrequest':
from go_processes.go_timeouts import GoTimeouts from go_processes.go_vet import GoVet ___author___ = "Jim Hill (github.com/jimah)" ___credits___ = ["Jim Hill (github.com/jimah)", "Lee Archer (github.com/lbn)", "Dom Udall (github.com/domudall)", "Chris Mallon (github.com/JaegerBane)"] ___license___ = "MIT" ___version___ = "1.0" ___maintainer___ = "Jim Hill" ___email___ = "*****@*****.**" ___status___ = "Development" CONFIG = get_config() logging.basicConfig(level="DEBUG") logger = logging.getLogger(__name__) def error(*objs): """ Outputs to stderr """ print(*objs, file=sys.stderr) def code_coverage(package):