def init_runlist(): '''生成任务列表''' # 获取所有配置项目 items = config('agent') items.remove('global') # 运行脚本目录 script_dir = 'scripts/' # 运行状态 run_list = [] # 初始化执行列表 for item in items: item_conf = config('agent', item) if not item_conf['timeout']: item_conf['timeout'] = agent_conf['timeout'] # 加入如开始时间 item_conf['last_time'] = None # 脚本目录 item_conf['script_ptah'] = script_dir + item_conf['name'] script_path = item_conf['script_ptah'] # 检查脚本是否存在 if not os.path.exists(script_path): raise Exception('script %s not find' % script_path) sys.exit(1) run_list.append(item_conf) # 初始任务化时间状态 for task in run_list: if not task['last_time']: task['last_time'] = time.time() cmd = task['script_ptah'] task['timeout'] = int(task['timeout']) task['interval']= int(task['interval']) return run_list
class Feishu(object): FEISHU_APP_ID = config('FEISHU_APP_ID') FEISHU_APP_SECRET = config('FEISHU_APP_SECRET') FEISHU_CHARGE_CHAT_ID = config('FEISHU_CHARGE_CHAT_ID', default='oc_a4bc2f10dd9ec84f08f2bbcaa82e08cd') FEISHU_MAC_CHAT_ID = config('FEISHU_MAC_CHAT_ID', default='oc_3a7065d01efdb36d949088341aada466') FEISHU_SESSION_CHAT_ID = config('FEISHU_SESSION_CHAT_ID', default='oc_19b2404bb0917fc066cce1b3a58c3558') @classmethod def send_groud_msg(cls, receiver_id: str, text: str): data = { 'app_id': cls.FEISHU_APP_ID, 'app_secret': cls.FEISHU_APP_SECRET, } response = requests.post('https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/', json=data) assert response.ok body = json.loads(response.text) if body['code'] != 0: raise Exception('飞书获取access_token失败') access_token = response.json()['tenant_access_token'] # headers = { 'Authorization': f'Bearer {access_token}' } data = { 'chat_id': receiver_id, 'msg_type': 'text', 'content': { 'text': text, } } response = requests.post('https://open.feishu.cn/open-apis/message/v4/send/', json=data, headers=headers) assert response.ok body = json.loads(response.text) if body['code'] != 0: raise Exception('信息发送到飞书败')
def save_figs(train_returns, test_returns, train_loss_critic, train_loss_actor, prefix=""): tr_cycle = config().metrics.train_cycle_length ts_cycle = config().metrics.test_cycle_length filepath = os.path.abspath(os.path.join(config().sim.output.path, f"{prefix}metrics")) plt.clf() plt.cla() plt.figure(0) plt.plot(range(0, len(train_returns) * tr_cycle, tr_cycle), train_returns, label="Train") plt.plot(range(0, len(test_returns) * ts_cycle, ts_cycle), test_returns, label="Test") plt.legend() plt.xlabel("Episodes") plt.ylabel("Expected return") plt.savefig(filepath + "_returns.eps", type="eps", dpi=1000) plt.clf() plt.cla() plt.figure(1) if config().sim.agent.type == "dqn": plt.plot(range(0, len(train_loss_critic) * tr_cycle, tr_cycle), train_loss_critic) else: plt.plot(range(0, len(train_loss_critic) * tr_cycle, tr_cycle), train_loss_critic, label="Critic") plt.plot(range(0, len(train_loss_actor) * tr_cycle, tr_cycle), train_loss_actor, label="Actor") plt.legend() plt.xlabel("Episodes") plt.ylabel("Training Losses") plt.savefig(filepath + "_losses.eps", type="eps", dpi=1000)
def download_file(version, format, type_, extension): v = version.replace('.', '_') archive_path = '{}/{}/{}/{}.{}'.format(config()['data']['data_folder'], config()['data']['build_name'], format, type_, extension) return FileResponse(archive_path, filename='echr_{}_{}_{}.{}'.format( v, format, type_, extension))
def get_categories(userid): # get user's categories based on id retrieve = """SELECT X."CategoryId", X."CategoryName" FROM "Categories" X, "UserCategories"Y WHERE X."CategoryId" = Y."CategoryId" AND Y."UserId" = '{}';""".format( userid) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(retrieve) # store all results categories = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return categories
def recommend_by_recent(): """ finds recipe based on search """ checkdb = """SELECT "RecipeId", "RecipeName", "CreationDate" FROM "Recipes" ORDER BY "CreationDate" DESC;""" conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb) # store all results results = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return results
def recommend_by_rating(): """ finds recipe based on search """ checkdb = """SELECT "RecipeId", "RecipeName", "avg" FROM (SELECT "Recipes"."RecipeId", "Recipes"."RecipeName", ROUND(AVG("CookedRecipes"."Rating") ,2) AS "avg" FROM "Recipes" INNER JOIN "CookedRecipes" ON "Recipes"."RecipeId" = "CookedRecipes"."RecipeId" GROUP BY "Recipes"."RecipeId") AS "Ratings" ORDER BY "avg" DESC;""" conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb) # store all results results = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return results
def cook(scale, rating, userid, recipeid): """ gets a user's pantry data """ checkdb = """INSERT INTO "CookedRecipes"("DateCooked", "Scale", "Rating", "UserId", "RecipeId") VALUES(%s, %s, %s, %s, %s)""" ct = datetime.datetime.utcnow() conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb, (ct, scale, rating, userid, recipeid)) # store all results conn.commit() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return 'success'
def unregister_services_conf(): """ 从.ini获取需要下架的服务 :return: """ env_type = env_file_conf('ENV_TYPE').upper() if env_file_conf( 'ENV_TYPE') else "DEV" seervices_conf = config('configs/unregister_services.ini') services_str = seervices_conf.getOption(env_type, 'services', default='{}') service_dict = json.loads(services_str) all_services = [] if not service_dict: return all_services for product, service_str in service_dict.items(): # print("Debug {}".format(service_str)) services_list = service_str.split(',') for service in services_list: service_attr = { "product": product, "service": service, "env_type": env_type.lower() } try: all_services.index(service_attr) except ValueError: all_services.append(service_attr) print("Deleted services: {}".format(all_services)) return all_services
def insert_category(recipe_id, category_id): """ inserts into the RecipeCategories Table """ check_sql = """SELECT * FROM "RecipeCategories" WHERE "RecipeId" = '{}' AND "CategoryId" = '{}';""".format( recipe_id, category_id) insert_category_sql = """INSERT INTO "RecipeCategories"("RecipeId", "CategoryId") VALUES(%s, %s)""" try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(check_sql) category = cur.fetchone() if category is None: # execute the INSERT statement to the RecipeCategories table cur.execute(insert_category_sql, (recipe_id, category_id)) # commit the changes to the database conn.commit() else: return 'failed' # close communication with the database cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) return 'failed2' finally: if conn is not None: conn.close() return 'success'
def search_ingredient(keyword): """ finds recipe based on search """ results = None checkdb = """SELECT X."IngredientId", X."IngredientName" FROM "Ingredients" X WHERE X."IngredientName" LIKE '%{}%';""".format( keyword) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb) # store all results results = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return results
def __init__(self, mode='train', generate_img=True): super(point_modelnet40_Dataset_cls, self).__init__() cfg = config.config() if mode == 'train': self.files = h5_helper.getDataFiles( osp.join(cfg.modelnet_cls_dir, 'train_files.txt')) elif mode == 'test': self.files = h5_helper.getDataFiles( osp.join(cfg.modelnet_cls_dir, 'test_file.txt')) self.files = [self.files[0]] self.data = None self.label = None if generate_img: self.compose = point_preprocess.get_train_test_compose() else: self.compose = point_preprocess.get_norm_points_compose() for f in self.files: cur_data, cur_label = h5_helper.loadDataFile(osp.join('../', f)) if self.data is None: self.data = cur_data self.label = cur_label else: self.data = np.concatenate((self.data, cur_data), 0) self.label = np.concatenate((self.label, cur_label), 0) self.label.astype(np.int64)
def show_pantry(uid): """ gets a user's pantry data """ checkdb = """SELECT I."IngredientName", P."CurrentQuantity", P."ExpirationDate", P."OrderId" FROM "UserOrders" U, "OrderIngredients" O, "Ingredients" I, "Pantry" P WHERE U."UserId" = '{}' AND U."OrderId" = O."OrderId" AND O."IngredientId" = I."IngredientId" AND U."OrderId" = P."OrderId";""".format(uid) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb) # store all results results = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return results
def main(): param_config = config() gpt_config = GPT2Config(vocab_size_or_config_json_file=param_config.input_dim, n_positions=param_config.sequence_length, n_ctx=param_config.sequence_length) model = GPT2LMHeadModel(gpt_config) # Load Data # Load Data if param_config.input == 'bytes': # Load Data for bytes _, validation_data = get_wili_data_bytes(param_config) else: # Load Data _, validation_data = get_wili_data(param_config) validation_loader = DataLoader(validation_data, batch_size=1, shuffle=False, drop_last=False) if param_config.model_checkpoint is not None: with open(param_config.model_checkpoint, 'rb') as f: state_dict = torch.load(f) model.load_state_dict(state_dict) print("Model Loaded From: {}".format(param_config.model_checkpoint)) model = model.to(device) predict(model, validation_loader, validation_data, param_config)
def get_ingredients_with_ids(recipeid): # gets ingredients based on recipeid retrieve = """SELECT X."IngredientId", X."IngredientName", Y."Amount" FROM "Ingredients" X, "IngredientsForRecipe" Y WHERE X."IngredientId" = Y."IngredientId" AND Y."RecipeId" = '{}';""".format( recipeid) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(retrieve) # store all results ingredients = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return ingredients
def create_tables(): commands = (""" DROP TABLE IF EXISTS personalise_app; """, """ CREATE TABLE news ( -- model fields here ) """, """ # insert values here """) try: # read the connection parameters params = config() # connect to the PostgreSQL server conn = psycopg2.connect(**params) cur = conn.cursor() # create table one by one for command in commands: cur.execute(command) # close communication with the PostgreSQL database server cur.close() # commit the changes conn.commit() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close()
def get_my_recipes(uid): """ gets all a users' recipes """ result = None conn = None get_recipes = """SELECT "RecipeId", "RecipeName" FROM "Recipes" WHERE "UserId" = %s """ try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(get_recipes, (uid, )) result = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result
def get_rating(recipeid): # gets rating based on recipeid avgrating = """SELECT ROUND(AVG("Rating") ,2) FROM "CookedRecipes" WHERE "RecipeId" = '{}';""".format( recipeid) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(avgrating) # store all results rating = cur.fetchone() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return rating
def show_categories(user_id): """ gets a user's pantry data """ global results checkdb = """SELECT "Categories"."CategoryName" FROM "Categories" INNER JOIN "UserCategories" On "Categories"."CategoryId" = "UserCategories"."CategoryId" WHERE "UserId" = '{}'""".format(user_id) conn = None try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(checkdb) # store all results results = cur.fetchall() # close the cursor cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return results
def ini_conf(section, option, default, file_name=None): ini_config = config(filename=file_name) conf_value = ini_config.getOption(section=section, option=option, default=default) return conf_value
def main(): args = config.config() if not args.train_data_path: logger.info("please input train dataset path") exit() # if not (args.dev_data_path or args.test_data_path): # logger.info("please input dev or test dataset path") # exit() all_ = data_preprocess.load_dataset(args.train_data_path, args.dev_data_path, args.test_data_path, \ args.src_embedding_path, args.tgt_embedding_path, args.train_batch_size, \ args.dev_batch_size, args.test_batch_size) src_TEXT, tgt_TEXT, src_vocab_size, tgt_vocab_size, src_word_embeddings, tgt_word_embeddings, \ train_iter, dev_iter, test_iter = all_ bos_id = dict(tgt_TEXT.vocab.stoi)['<bos>'] eos_id = dict(tgt_TEXT.vocab.stoi)['<eos>'] index2tgtword = tgt_TEXT.vocab.itos model = NMT_Atten(src_vocab_size, tgt_vocab_size, args.src_embedding_dim, args.tgt_embedding_dim, \ src_word_embeddings, tgt_word_embeddings, args.hidden_size, args.tgt_max_len, bos_id) if torch.cuda.is_available(): model = model.cuda() train_data, dev_data = data_preprocess.train_dev_split(train_iter, 0.9) loss_func = torch.nn.CrossEntropyLoss(reduction='none') if args.load_model: model.load_state_dict(torch.load(args.load_model, map_location='cpu')) while True: test_sent = input("Input source sentence (q exit) >>>>") if test_sent.lower() == 'q': break #分词 sent = ' '.join(jieba.cut(test_sent, cut_all=False)) #print(sent) test_sent = src_TEXT.preprocess(sent) #print(test_sent) test_idx = [[src_TEXT.vocab.stoi[x] for x in test_sent]] #print(test_idx) inference(model, test_idx, eos_id, index2tgtword) return best_score = 0.0 for epoch in range(args.epoch): train_loss, eval_loss, eval_score = train_model(model, train_data, dev_data, epoch,\ args.lr, loss_func, eos_id, index2tgtword) logger.info('Epoch:%d, Training Loss:%.4f', epoch, train_loss) logger.info('Epoch:%d, Eval Loss:%.4f, Eval BLEU score:%.4f', epoch, eval_loss, eval_score) if eval_score > best_score: best_score = eval_score torch.save( model.state_dict(), 'results/%d_%s_%s.pt' % (epoch, 'Model', str(best_score)))
def main(args): global conn, cur # connect to PostgreSQL database db_params = config(filename=CONFIG_INI_FILE) conn = psycopg2.connect(**db_params) cur = conn.cursor() # load detection model model = detection.load_inference_resnet50() # fetch areas that will be analyzed spots = fetch_parking_spots(args.cam_ids) # get video data vcap = cv2.VideoCapture(args.video_file) fps = int(vcap.get(cv2.CAP_PROP_FPS)) frame_counter = 0 detection_interval = fps * time2seconds(args.time_interval) # start analyzing parking lot while vcap.isOpened(): ret, frame = vcap.read() key = cv2.waitKey(fps) & 0xFF # end of video or user exit if not ret or key == ord("q"): print("Video stopped") break # check if parking spots are occupied every nth frame if frame_counter % detection_interval == 0: # set occupancy for each spot to false reset_occupancy(args.cam_ids) # detect which spots are occupied bboxes = detection.detect_objects(model, frame, [3, 4], threshold=0.5) occupied_spots = fetch_occupied_spots(spots, bboxes) # update occupancy in table for each spot update_occupancy(occupied_spots) # check if spot_time > time_threshold update_occupied_time(fps) update_overtime(args.limit) frame_counter += 1 # display video # frame = display(frame, args.cam_ids) # cv2.imshow("parking lot", frame) # reset and close connections vcap.release() reset() cur.close() conn.close()
def make_stations_db_table(stop_db=False): """Load dataset with Russian weather stations and their characteristics to PostgreSQL database. """ start_instance() conn = None try: # read connection parameters db_params = config(section="postgresql") # connect to the PostgreSQL server logging.info("Connecting to the PostgreSQL database...") conn = psycopg2.connect(**db_params) # conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) # create a cursor to perform database operations # cur = conn.cursor() with conn.cursor() as cur, open("../data/rus_weather_stations.csv", "r") as f: # ['id', 'lat', 'lon', 'elev', 'state', 'name', 'extra1', 'extra2'], sql = ( "DROP TABLE IF EXISTS weather_stations; " "CREATE TABLE weather_stations (station_id varchar(11) UNIQUE NOT NULL, " "latitude real NOT NULL, longitude real NOT NULL, elevation real NOT NULL, " "state varchar(2), station_name varchar(30), extra1 varchar(7), " "extra2 real)") for q in sql.split("; "): logging.debug(f"SQL query to be executed: {q}") cur.execute(q) logging.debug(f"cur.statusmessage is {cur.statusmessage}") next(f) # Skip the header row # for the command below to work, the single null value (empty string) # in the last column (extra2) had to be manually replaced with a # real value (just copied from the row just below) # otherwise, this exception was raised: # psycopg2.errors.InvalidTextRepresentation: invalid input syntax for type real: "" cur.copy_from(f, "weather_stations", sep="\t") # Make the changes to the database persistent conn.commit() for line in conn.notices: logging.debug(line.strip("\n")) except (Exception, psycopg2.DatabaseError) as error: logging.exception("Exception occurred") finally: if conn is not None: conn.close() logging.info("Database connection closed.") if stop_db: stop_instance()
def get_build_info(build_name): current_build_info_path = os.path.join(config()['data']['data_folder'], build_name, 'build_info.yml') try: with open(current_build_info_path, 'r') as f: current_build_info = yaml.full_load(f) return current_build_info except Exception as e: return {}
def update_build(): available, new_build = is_new_build_available() if not available: return {'message': 'Build up-to-date'} if os.path.isdir(TRANSITION_DIR): shutil.rmtree(TRANSITION_DIR) current_build = os.path.join(config()['data']['data_folder'], config()['data']['build_name']) new_build_path = os.path.join(config()['data']['data_folder'], new_build) shutil.copytree(current_build, TRANSITION_DIR) shutil.rmtree(current_build) shutil.copytree(new_build_path, current_build) output = subprocess.run(["cat", "/proc/self/cgroup"], stdout=subprocess.PIPE) output = [e for e in output.stdout.decode().splitlines() if 'docker' in e] cid = output[-1].split('/')[-1] subprocess.run(["docker", "restart", cid]) return {'message': 'Build updated'}
def main(video_file, num, loc): # connect to database logging.info("Connecting to database") db_params = config(filename=CONFIG_INI_FILE) conn = psycopg2.connect(**db_params) cur = conn.cursor() # Create necessary tables cur.execute("SAVEPOINT table_creation") try: logging.info("Instantiating relations") cur.execute(open(TABLES_SQL_FILE).read()) except psycopg2.errors.DuplicateObject: logging.info("Relations are already instantiated") cur.execute("ROLLBACK TO SAVEPOINT table_creation") else: cur.execute("RELEASE SAVEPOINT table_creation") # Fetch video metadata cap = cv2.VideoCapture(video_file) res_x = int(cap.get(3)) res_y = int(cap.get(4)) fps = int(cap.get(cv2.CAP_PROP_FPS)) # Populate entries for cameras table # If the chosen camera exists, then # jump to populating spots query = """INSERT INTO cameras VALUES (%s, %s, %s, %s, %s);""" cur.execute("SELECT id from cameras;") ids = [r[0] for r in cur.fetchall()] if num not in ids: cur.execute(query, (num, loc, res_x, res_y, fps)) else: logging.warning("Modifying existing camera") # Initialize parking lots that will be later analyzed query = """INSERT INTO spots VALUES (DEFAULT, %s, ST_SetSRID(%s::geometry, %s), false, '00:00:00', false)""" logging.info("Select areas of interest") ret, frame = cap.read() coords = events.select_area(frame) # Populate entries for spots table for pts in coords: polygon = events.sort2cyclic(pts) spot = Polygon(polygon) cur.execute(query, (num, spot.wkb_hex, 4326)) # Commit changes and close connection logging.info("Committing changes and closing connection to database") conn.commit() cur.close() conn.close()
def main(): args = config.config() if not args.train_data_path: logger.info("please input train dataset path") exit() # if not (args.dev_data_path or args.test_data_path): # logger.info("please input dev or test dataset path") # exit() all_ = dataset.load_dataset(args.train_data_path, args.dev_data_path, args.test_data_path, \ args.txt_embedding_path, args.cpt_embedding_path, args.train_batch_size, \ args.dev_batch_size, args.test_batch_size) txt_TEXT, cpt_TEXT, txt_vocab_size, cpt_vocab_size, txt_word_embeddings, cpt_word_embeddings, \ train_iter, dev_iter, test_iter, label_size = all_ model = STCK_Atten(txt_vocab_size, cpt_vocab_size, args.embedding_dim, txt_word_embeddings,\ cpt_word_embeddings, args.hidden_size, label_size) if torch.cuda.is_available(): model = model.cuda() train_data, test_data = dataset.train_test_split(train_iter, 0.8) train_data, dev_data = dataset.train_dev_split(train_data, 0.8) loss_func = torch.nn.CrossEntropyLoss() if args.load_model: model.load_state_dict(torch.load(args.load_model)) test_loss, acc, p, r, f1 = eval_model(model, test_data, loss_func) logger.info( 'Test Loss:%.4f, Test Acc:%.4f, Test P:%.4f, Test R:%.4f, Test F1:%.4f', test_loss, acc, p, r, f1) return best_score = 0.0 test_loss, test_acc, test_p, test_r, test_f1 = 0, 0, 0, 0, 0 for epoch in range(args.epoch): train_loss, eval_loss, acc, p, r, f1 = train_model( model, train_data, dev_data, epoch, args.lr, loss_func) logger.info('Epoch:%d, Training Loss:%.4f', epoch, train_loss) logger.info( 'Epoch:%d, Eval Loss:%.4f, Eval Acc:%.4f, Eval P:%.4f, Eval R:%.4f, Eval F1:%.4f', epoch, eval_loss, acc, p, r, f1) if f1 > best_score: best_score = f1 torch.save( model.state_dict(), 'results/%d_%s_%s.pt' % (epoch, 'Model', str(best_score))) test_loss, test_acc, test_p, test_r, test_f1 = eval_model( model, test_data, loss_func) logger.info( 'Test Loss:%.4f, Test Acc:%.4f, Test P:%.4f, Test R:%.4f, Test F1:%.4f', test_loss, test_acc, test_p, test_r, test_f1)
def get_build_history(): history_path = os.path.join(config()['data']['data_folder'], '.build_history') try: with open(history_path, 'r') as f: history = f.readlines() history = [e.split('::') for e in history] except Exception as e: print(e) history = [['never built', '']] return [dict(zip(['date', 'workflow'], e)) for e in history]
def stats(): global __stats_client if not __stats_client: cfg = config() cfg.get('STATS_HOST', 'localhost') cfg.get('STATS_PORT', 8125) __stats_client = statsd.StatsClient(cfg.STATS_HOST, cfg.STATS_PORT) return __stats_client
def remove_recipe(recipe_id): """ gets all a users' recipes """ result = False conn = None get_cooked_recipes = """SELECT "RecipeId" FROM "CookedRecipes" WHERE "RecipeId" = %s """ delete_recipe = """DELETE FROM "Recipes" WHERE "RecipeId" = %s """ delete_ingredients = """DELETE FROM "IngredientsForRecipe" WHERE "RecipeId" = %s """ delete_categories = """DELETE FROM "RecipeCategories" WHERE "RecipeId" = %s """ try: # read database configuration params = config() # connect to the PostgreSQL database conn = psycopg2.connect(**params) # create a new cursor cur = conn.cursor() # check if user exists cur.execute(get_cooked_recipes, (recipe_id, )) cooked = cur.fetchall() # do not allow cooked recipes to be deleted if len(cooked) > 0: return result cur.execute(delete_ingredients, (recipe_id, )) cur.execute(delete_categories, (recipe_id, )) cur.execute(delete_recipe, (recipe_id, )) conn.commit() # close the cursor cur.close() result = True except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() return result
import sys import time import errno import socket import select import multiprocessing from inspect import currentframe workdir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, workdir + "/../") from utils.config import config from utils.log import Logger # 获取log配置文件 logconf = config('nbnet', 'log') # 调用日志模块 logger = Logger.getLogger() # debug开关,开启后记录debug日志 debug = logconf['debug'] class DebugLog(): '''debug日志模块会显示运行的文件和行数''' def __init__(self, file_path, debug=True): self.file_path = file_path self.debug = debug def get_linenumber(self): '''获取函数运行在一行''' cf = currentframe()
'''Function Filter 检测客户端发送数据有误异常 ''' import os import sys import json workdir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, workdir + "/../") from utils.config import config from simpleNet.nbNetFramework import bind_socket, nbNet # ff配置文件 ff_conf = config('nbnet', 'functionfilter') host_alarm = config('alarm', 'host_alarm') # alarm 状态记录字典 alarmStatus = {} def ff(data): '''根据host_alarm获取的值与获取的数据类型进行比较''' mon_data = json.loads(data) alarm_list = list(host_alarm) for key in alarm_list: mon_value = mon_data[key] alarm_value = host_alarm[key] eval_function = str(mon_value) + alarm_value ff_result = eval(eval_function) if ff_result:
#!/usr/bin/env python # coding=utf-8 import os import sys workdir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, workdir + "/../") from utils.config import config from simpleNet.nbNetFramework import bind_socket, nbNet from simpleNet.nbNetUtils import sendData_mh # 导入配置文件 trans_conf = config('nbnet', 'trans') # 监控服务器列表 ff_l = trans_conf['ff_l'].split(';') # 服务器主机列表 saver_l = trans_conf['saver_l'].split(';') # ff 和 saver的soket,使用列表,具体参考sendData_mh # ff socket ff_sock_l = [None] # saver socket saver_sock_l = [None] # 发送给服务端data是发送的数据 def sendsaver(saver_l, data, sock_l): return sendData_mh(saver_l, data, sock_l) #发送给监控端 def sendff(ff_l, data, sock_l):
#!/usr/bin/env python # coding=utf-8 import os import sys import json workdir = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, workdir + "/../") from utils.config import config from utils.crypt import encrypt,decrypt from utils.syscmd import Command, get_iphostname from simpleNet.nbNetFramework import bind_socket, nbNet ctrl_conf = config('nbnet', 'controller') # 监听地址和端口 addr = ctrl_conf['addr'] port = int(ctrl_conf['port']) def_timeout = ctrl_conf['timeout'] #处理程序 def logic(data): dec_data = decrypt(data) data = json.loads(dec_data) if "cmd" in data: send_data = get_iphostname() cmd = data['cmd'] timeout = data.get("timeout", def_timeout) command = Command(cmd)