from redis import StrictRedis from redis_queue import pull, rm_first, clear_all, publish, pull_all, rm_at import json redis_cli = StrictRedis(port=6381) def get(queue_name): res = pull(redis_cli, queue_name) if res: rm_first(redis_cli, queue_name) return json.loads(res[0]) return None def list(queue_name): res = pull_all(redis_cli, queue_name) if res: res = [json.loads(i) for i in res] return res return None def create(queue_name, item): publish(redis_cli, queue_name, item) def rm(queue_name): rm_first(redis_cli, queue_name)
from os import getenv from redis import StrictRedis redis_client = StrictRedis(host=getenv('REDIS_HOST') or '127.0.0.1', port=int(getenv('REDIS_PORT') or 6379), db=int(getenv('REDIS_DB') or 0))
import os from concurrent.futures import ProcessPoolExecutor from redis import StrictRedis from libs.jd_analysis import JdAnalysis from mysql.connector import pooling cnxpool = pooling.MySQLConnectionPool(pool_name='mypool', pool_size=10, user='******', password='******', host='localhost', database='jd') redis = StrictRedis('127.0.0.1') process_pool = ProcessPoolExecutor(max_workers=4) basedir = os.path.abspath(os.path.dirname(__file__)) save_path = os.path.join(basedir, 'static', 'analysis_picture') def run_analysis(goods_id): while True: try: cnx = cnxpool.get_connection() except Exception as e: print(e) print('wait connection...') else: break jd_analysis = JdAnalysis(goods_id, cnx, save_path) jd_analysis.run_analysis_comment_count() jd_analysis.run_analysis_comment_wordcloud()
def __init__(self): self.r = StrictRedis(host='localhost', port=6379, db=0)
stats.configure(**stats_conf) # Init Celery app CELERY = Celery(config_source=CFG['celery']) CELERY.register_task(Discover(CFG)) # Init Flask app from . import web APP = web.get_app(CFG) # Initialize CORS support cors = CFG.get('cors') if cors: CORS(APP, origins=cors) # Initialize Celery and Redis APP.celery = CELERY APP.redis = StrictRedis( connection_pool=BlockingConnectionPool.from_url( CFG['redis_uri'], max_connections=50, timeout=CFG.get('redis_timeout', 10), decode_responses=True ) ) # ensure the instance folder exists try: os.makedirs(APP.instance_path) except OSError: pass
def create_app(config_name): # 配置日志,根据不同的模式配置不同的log等级 setup_log(config_name) # 初始化FLASK对象 # 因为静态文件static目录和当前app的__name__同级,所以不需要额外设置,templates也是 app = Flask(__name__) # 加载配置 app.config.from_object(config[config_name]) # 初始化数据库, 需要被manage.py外界访问,需要提取到外面 # db = SQLAlchemy(app) db.init_app(app) # 初始化redis,这个StrictRedis是用来保存项目中的K-V,并不是保存session的redis global redis_store redis_store = StrictRedis(host=config[config_name].REDIS_HOST, port=config[config_name].REDIS_PORT) # 开启csrf保护, 源代码中显示,如果不符合csrf直接return请求 # 配合ajax的post请求必须在html中设置<meta name="csrf-token" content="{{ csrf_token() }}"> # 并且在相应的js文件中设置 ,否则ajax的请求一直400 BAD REQUEST # var csrftoken = $('meta[name=csrf-token]').attr('content') # # $.ajaxSetup({ # beforeSend: function(xhr, settings) { # if (!/^(GET|HEAD|OPTIONS|TRACE)$/i.test(settings.type) && !this.crossDomain) { # xhr.setRequestHeader("X-CSRFToken", csrftoken) # } # } # }) """ 真正的session建立cookie注入是来自于这个方法CSRFProtect(app),或者在session['xxx']=yyy赋值的时候也能建立 一个是全局url保护,一个只是部分的 1- Flask-wtf扩展中的CSRF保护,设置在Session(app)之前,因为先产生session_id才能保存在redis中 2- CSRF的保护会在访问页面的时候,生成session_id进行cookie注入,CSRFProtect开启的话, <meta name="csrf-token" content="{{ csrf_token() }}">中的csrf_token()方法的调用是来自CSRFprotect中,也就是来自flask-wtf中 在html中获取了生成的csrf-token才能在js中进行请求的时候带上token,也就是session_id进行验证,不然400错误码 CSRFProtect(app)做了两件事:1-从cookie中取出随机值 2- 进行验证,返回响应 我们需要做: 1- 在界面加载的时候,在cookie中设置一个csrf-token 2- 表单提交的时候带上自己生成的token 3- 因为我们登陆注册中使用的是ajax请求,并不是表单,所以不需要在表单中添加{{XXX}},需要在ajax中设置header 上面的方法也可行,就是meta的那种方法,二选一 """ CSRFProtect(app) # 设置session保存制定位置 Session(app) # 在蓝图之前添加自定义过滤器,过滤器的import也是何时注册何时调用,否则会循环依赖错误 # 初始化数据库, 需要被manage.py外界访问,需要提取到外面 # 在flask中的很多扩展中都可以先初始化对象,然后再去调用init.app方法去关联app from info.utils.common import do_index_class app.add_template_filter(do_index_class, "index_class") # 设置全局的404页面,errorhandler去捕获制定状态吗的错误 from info.utils.common import user_login_data @app.errorhandler(404) @user_login_data def page_not_found(error): user = g.user data = {"user_info": user.to_dict() if user else None} return render_template('news/404.html', data=data) # 响应客户端的时候添加上token到cookie @app.after_request def after_request(response): # 通过flask-wtf中生成csrf csrf_token = generate_csrf() # 设置cookie response.set_cookie("csrf_token", csrf_token) # 返回被装饰后的response return response # 注册蓝图, 如果下面的import放在上面的话,那么卡启动的时候就会报错 # 因为一个包去导入另一个包然会最后一个views.py去导入redis_store的时候就发现当前的文件还有有执行到redis_store = None # 这个循环导入,就会还没定义这个redis_store,因此以后何时注册蓝图,何时导入这个蓝图,蓝图的导入不要放在顶部 from info.modules.index import index_blue app.register_blueprint(index_blue) from info.modules.passport import passport_blue app.register_blueprint(passport_blue) from info.modules.news import news_blue app.register_blueprint(news_blue) from info.modules.profile import profile_blue app.register_blueprint(profile_blue) from info.modules.admin import admin_blue app.register_blueprint(admin_blue) return app
import datetime import json import psycopg2 from bokeh.models import ColumnDataSource,Range1d from bokeh.models.annotations import Title, Legend, LegendItem from bokeh.plotting import figure, save, output_file from bokeh.embed import json_item from bokeh.tile_providers import OSM, get_provider from bokeh.palettes import Category20_20, Inferno256 from bokeh.transform import linear_cmap, factor_cmap from redis import StrictRedis from redis_cache import RedisCache client = StrictRedis(host="cache", decode_responses=True) cache = RedisCache(redis_client=client) class Colours: colourMap = Category20_20 def __init__(self): self.current = 1 @property def new(self): self.current += 1 if self.current > len(self.colourMap) + 1: raise Exception("No more colours left") return self.colourMap[self.current]
def redis_cli(): """创建redis连接""" redis_config = ConfigParser().redis_option redis_pool = ConnectionPool(**redis_config) redis_connect = StrictRedis(connection_pool=redis_pool) return redis_connect
def register_redis(app): redis_data = StrictRedis(db=app.config['CACHE_REDIS_DB'], password=app.config['CACHE_REDIS_PASSWORD'], decode_responses=True) return redis_data
class SharedOptions: CUDA_MODE = os.getenv("CUDA_MODE", "False") APPDIR = os.getenv("APPDIR", ".") PROFILE = os.getenv("PROFILE", "desktop_cpu") if PROFILE == "windows_native": sys.path.append(os.path.join(APPDIR, "windows_packages")) from redis import RedisError, StrictRedis TEMP_PATH = os.getenv("TEMP_PATH", "/deeptemp/") SLEEP_TIME = 0.01 SHARED_APP_DIR = os.path.join(APPDIR, "sharedfiles") # SHARED_APP_DIR="/home/johnolafenwa/Documents/Source/DeepStack/sharedfiles" GPU_APP_DIR = os.path.join(APPDIR, "gpufiles") CPU_APP_DIR = os.path.join(APPDIR, "cpufiles") DATA_DIR = os.getenv("DATA_DIR", "/datastore") if CUDA_MODE == "True": APP_DIR = GPU_APP_DIR CUDA_MODE = True else: APP_DIR = CPU_APP_DIR CUDA_MODE = False MODE = "Medium" if "MODE" in os.environ: MODE = os.environ["MODE"] db = StrictRedis(host="localhost", db=0, decode_responses=True) TB_EMBEDDINGS = "TB_EMBEDDINGS" PROFILE_SETTINGS = { "desktop_cpu": Settings( DETECTION_HIGH=640, DETECTION_MEDIUM=416, DETECTION_LOW=256, DETECTION_MODEL="yolov5m.pt", FACE_HIGH=416, FACE_MEDIUM=320, FACE_LOW=256, FACE_MODEL="face.pt", ), "desktop_gpu": Settings( DETECTION_HIGH=640, DETECTION_MEDIUM=416, DETECTION_LOW=256, DETECTION_MODEL="yolov5m.pt", FACE_HIGH=416, FACE_MEDIUM=320, FACE_LOW=256, FACE_MODEL="face.pt", ), "jetson": Settings( DETECTION_HIGH=416, DETECTION_MEDIUM=320, DETECTION_LOW=256, DETECTION_MODEL="yolov5s.pt", FACE_HIGH=384, FACE_MEDIUM=256, FACE_LOW=192, FACE_MODEL="face_lite.pt", ), "windows_native": Settings( DETECTION_HIGH=640, DETECTION_MEDIUM=416, DETECTION_LOW=256, DETECTION_MODEL="yolov5m.pt", FACE_HIGH=416, FACE_MEDIUM=320, FACE_LOW=256, FACE_MODEL="face.pt", ), } SETTINGS = PROFILE_SETTINGS[PROFILE]
from config import Config from redis import StrictRedis REDIS: StrictRedis = StrictRedis( host=Config.REDIS_HOST, port=Config.REDIS_PORT, db=Config.REDIS_DB_IDX, password=Config.REDIS_PASSWORD, ) def init_cache(): REDIS.set("apple", f"{1}")
#!/usr/bin/env python # -*- coding: utf-8 -*- from redis import StrictRedis from helper import dict_to_md import dateutil.parser import time r = StrictRedis(port=6399) use_subset = True names_by_events = {} events_by_ts = {} for key, score in r.zrevrange('timestamps', 0, -1, True): if score == 1: continue timestamp = time.mktime(dateutil.parser.parse(key).timetuple()) hashes = r.smembers('{}:{}'.format('timestamp', int(timestamp))) for h in hashes: md5 = r.hget(h, 'md5') sha1 = r.hget(h, 'sha1') eids = r.smembers('{}:eids'.format(md5)).union( r.smembers('{}:eids'.format(sha1))).union( r.smembers('{}:eids'.format(h))) for e in eids: uuid = r.get('eventuuid:{}'.format(e)) if use_subset and uuid not in r.smembers('subset'): continue if not names_by_events.get(e):
def create_app(config_name): """工厂方法""" # 1.创建app对象 app = Flask(__name__) # development -- DevelopmentConfig 根据传入的参数不同获取不同的配置信息 config_class = config_dict[config_name] # 设置日志 set_log(config_class) # 将对于的配置类关联到app身上 # DevelopmentConfig:对应的就是开发模式的app # ProductionConfig: 对应的就是线上模式的app app.config.from_object(config_class) # 2.创建数据库对象 # 使用延迟,懒加载的模式:真实的db数据库对象的初始化操作 db.init_app(app) # 3.创建redis对象 -- 延迟加载的思想 # decode_responses=True 获取的数据转换成字符串 global redis_store redis_store = StrictRedis(host=config_class.REDIS_HOST, port=config_class.REDIS_PORT, db=config_class.REDIS_NUM, decode_responses=True ) # 4.给项目添加csrf防护机制 # 提取cookie中的csrf_token # 如果有表单提取form表单中的csrf_token,如果前端发送的ajax请求从请求头的X-CSRFToken字段中提取csrf_token # 进行值的比对 CSRFProtect(app) # 借助钩子函数请求完毕页面显示的时候就在cookie中设置csrf_token @app.after_request def set_csrf_token(response): # 请求结束后来调用 # 1. 生成csrf_token随机值 csrf_token = generate_csrf() # 2. 借助response响应对象值设置到cookie中 response.set_cookie("csrf_token", csrf_token) # 3. 返回响应对象 return response # 5.将session存储的数据从`内存`转移到`redis`中存储的 Session(app) # 添加最定义过滤器 app.add_template_filter(set_rank_class, "set_rank_class") @app.errorhandler(404) @user_login_data def handler_404(e): """处理404页面""" # 获取当前登录用户数据 user = g.user data = { "user_info": user.to_dict() if user else None } return render_template("news/404.html", data=data) # 6.注册index首页的蓝图对象 # 延迟导入解决循环导入问题 from info.module.index import index_bp app.register_blueprint(index_bp) # 登录注册模块蓝图注册 from info.module.passport import passport_bp app.register_blueprint(passport_bp) # 新闻详情模块蓝图注册 from info.module.news import newsdetail_bp app.register_blueprint(newsdetail_bp) # 用户中心模块蓝图注册 from info.module.profile import profile_bp app.register_blueprint(profile_bp) # 后台管理模块蓝图注册 from info.module.admin import admin_bp app.register_blueprint(admin_bp) return app
def clear(self): con = StrictRedis() con.delete(self.queue_name)
from contextlib import contextmanager import json import pickle as pkl import uuid from backend.config import Config from redis import StrictRedis from redis_cache import RedisCache from redlock import Redlock rds = StrictRedis(Config.REDIS_HOST, decode_responses=True, charset="utf-8") rds_cache = StrictRedis(Config.REDIS_HOST, decode_responses=False, charset="utf-8") redis_cache = RedisCache(redis_client=rds_cache, prefix="rc", serializer=pkl.dumps, deserializer=pkl.loads) dlm = Redlock([{"host": Config.REDIS_HOST}]) DEFAULT_ASSET_EXPIRATION = 8 * 24 * 60 * 60 # by default keep cached values around for 8 days DEFAULT_CACHE_EXPIRATION = 1 * 24 * 60 * 60 # we can keep cached values around for a shorter period of time REMOVE_ONLY_IF_OWNER_SCRIPT = """ if redis.call("get",KEYS[1]) == ARGV[1] then return redis.call("del",KEYS[1]) else return 0 end """ @contextmanager def redis_lock(lock_name, expires=60): # https://breadcrumbscollector.tech/what-is-celery-beat-and-how-to-use-it-part-2-patterns-and-caveats/ random_value = str(uuid.uuid4())
from redis import StrictRedis redis = StrictRedis(host='localhost', port=6379, db=0) redis.set('name', 'mark') print(redis.get('name'))
if key_type == b'zset': return r.zcard(key_name) if key_type == b'set': return r.scard(key_name) if key_type == b'list': return r.llen(key_name) return None if __name__ == '__main__': try: redis_host = sys.argv[1] redis_port = sys.argv[2] redis_db = sys.argv[3] redis_key = sys.argv[4] redis_instanse = redis_host.split('.')[0] redis_password = redis_pwd_dic.get(redis_instanse) if not redis_password: print(-1) sys.exit(1) r = StrictRedis(host=redis_host, port=redis_port, password=redis_password, db=redis_db, decode_responses=False) print(get_size(r, redis_key, r.type(redis_key))) except Exception as e: print(-1)
from redis import StrictRedis redis = StrictRedis(host='localhost', port=6379, db=0, password='******') redis.set('name', 'Lockin') print(redis.get('name'))
def _connect(self): return StrictRedis(connection_pool=self.redis_pool, db=self.db)
from typing import List from loguru import logger from redis import StrictRedis import rx from rx.concurrency import ThreadPoolScheduler import rx.operators as op from fx_service.config import DATABASE_URL, PULL_INTERVAL_SECONDS from fx_service.provider import ( Fx1Forge, get_currency_pairs_data, save_quotes_to_timescaledb, ) redis_client = StrictRedis(decode_responses=True) def get_fx_symbols(): return redis_client.smembers("fx_symbols") def get_fx_quotes() -> None: logger.info("Getting latest fx quotes...") symbols = get_fx_symbols() quotes = Fx1Forge.get_quotes(symbols) # Save data to timescaledb save_quotes_to_timescaledb(quotes) logger.info("Latest fx quotes saved to timescaledb.") # Save data to redis redis_client.set("fx_quotes", json.dumps(quotes))
from flask import Flask from flask_sqlalchemy import SQLAlchemy from redis import StrictRedis from apps.config import REDIS_HOST, REDIS_POST # from flask_wtf import CSRFProtect from flask_session import Session from flask_cors import CORS app = Flask(__name__) app.config.from_pyfile('config.py') db = SQLAlchemy(app) # 初始化 redis 存储对象 redis_store = StrictRedis(host=REDIS_HOST, port=REDIS_POST) # 开启CSRF保护 # CSRFProtect(app) # 设置session 保存到redis Session(app) CORS(app) from apps import models, views
import time, json from redis import StrictRedis from redis.exceptions import RedisError time.sleep(8) redisDB = StrictRedis(host='redis', port=6379, db=0, password='******', decode_responses=True) payload = { "userid": 1234, "message": "You got a message. ' \" \ @ # & % 😀", "message_type": 2 } while True: payload["ts"] = int(time.time()) # Right push to the list. We'll pop from left. redisDB.rpush('gopy_message_queue', json.dumps(payload)) time.sleep(5)
def get_redis_connection(): host = Config.get("redis_host", default="127.0.0.1") port = Config.get("redis_port", default=6379) db = Config.get("redis_db", default=0) return StrictRedis(host=host, port=port, db=db)
import uuid import sys from dotenv import load_dotenv from os import getenv from redis import StrictRedis load_dotenv() REDIS_HOST = getenv("REDIS_HOST") REDIS_PASS = getenv("REDIS_PASS") REDIS_INSTANCE = getenv("REDIS_INSTANCE") db = StrictRedis(REDIS_HOST, db=REDIS_INSTANCE, password=REDIS_PASS) def is_user(login): return db.hexists(f"user:{login}", "password") def save_label(label, login): label_id = str(uuid.uuid4()) db.hmset(f"label:{login}:{label_id}", label) return True def get_user_labels(login): labels = [] keys = db.keys(pattern=f"label:{login}*") for key in keys:
from multiprocessing import Process import requests from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.wait import WebDriverWait import time from os.path import exists from os import makedirs from redis import StrictRedis from selenium.common.exceptions import WebDriverException db = StrictRedis(host='localhost', port=6379, db=0, password='******', decode_responses=True) urls = [ 'http://music.163.com/#/djradio?id=350234234', 'http://music.163.com/#/djradio?id=350234234', 'http://music.163.com/#/djradio?id=6779005', 'http://music.163.com/#/djradio?id=350589167', 'http://music.163.com/#/djradio?id=7375084', 'http://music.163.com/#/djradio?id=349374451', 'http://music.163.com/#/djradio?id=350325839', 'http://music.163.com/#/djradio?id=337676057', 'http://music.163.com/#/djradio?id=349629091', 'http://music.163.com/#/djradio?id=349977940', 'http://music.163.com/#/djradio?id=346216051', 'http://music.163.com/#/djradio?id=333635084', 'http://music.163.com/#/djradio?id=334635050',
# encoding: utf8 from __future__ import absolute_import import json from datetime import datetime from unittest import TestCase from redis import StrictRedis from schematics import types, models from redis_schematics import HashRedisMixin, SimpleRedisMixin from redis_schematics.exceptions import NotFound client = StrictRedis(host="localhost", port=6379, db=4) class TestModel(models.Model): __redis_client__ = client __expire__ = 120 pk = types.StringType() id = types.IntType(required=True) name = types.StringType() created = types.DateTimeType() good_number = types.IntType() class BaseModelStorageTest(object): @property def raw_value(self):
from redis import StrictRedis # 创建redis客户端 # decode_responses=True 将返回的响应bytes类型数据解码成字符串 redis_client = StrictRedis(host='192.168.248.157', port=6381, decode_responses=True) redis_client.set("name", "curry", ex=3600) print(redis_client.get('name'))
# logger.add(sink="uhrs_{time}.log", rotation="00:00", retention="10 days") logger.add(sink="uhrs.log") logger.info("starting uhrs service...") conf = configparser.ConfigParser() conf.read(filenames="uhrs.cfg", encoding="utf-8") log_num_list = conf.get("server", "log_num_list", fallback="") for num in log_num_list.split(","): pretty.set_log_number(int(num)) # init db pool dbpool.init_db_pool(conf) # init redis redis_on = conf.has_section("redis") if redis_on: redis = StrictRedis(host=conf.get("redis", "host"), port=conf.getint("redis", "port"), db=conf.getint("redis", "db"), password=conf.get("redis", "password")) bus.login.login_manager.set_redis(redis) # startup http server if redis_on: worker_num = conf.getint("server", "worker_num") else: worker_num = 1 # multi-process can not share global dict routers.app.run(host=conf.get("net", "host"), port=conf.getint("net", "port"), workers=worker_num, access_log=False)
from lidarts import create_app from lidarts import db, socketio from lidarts.models import Game, User, UserStatistic import json from sqlalchemy import or_ from datetime import datetime from redis import StrictRedis import time import logging app = create_app() app.app_context().push() redis_client = StrictRedis() def calc_stats(player_id, max_games=None, max_darts=None): games_query = (Game.query.filter( or_(Game.player1 == player_id, Game.player2 == player_id) & (Game.status == 'completed') & (Game.type == '501') & (Game.in_mode == 'si') & (Game.out_mode == 'do')).order_by(Game.id.desc())) if max_games: games_query = games_query.limit(max_games) games = games_query.all() total_score = 0 total_score_first9 = 0 darts_thrown = 0
def run_selected_algorithm(timeseries, metric_name): """ Filter timeseries and run selected algorithm. """ # @added 20180807 - Feature #2492: alert on stale metrics # Determine if a metric has stopped sending data and if so add to the # analyzer.alert_on_stale_metrics Redis set if ALERT_ON_STALE_METRICS: add_to_alert_on_stale_metrics = False # @modified 20180816 - Feature #2492: alert on stale metrics # Added try and except to prevent some errors that are encounter between # 00:14 and 00:17 on some days # Traceback (most recent call last): # File "/opt/skyline/github/skyline/skyline/analyzer/analyzer.py", line 394, in spin_process # anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name) # File "/opt/skyline/github/skyline/skyline/analyzer/algorithms.py", line 530, in run_selected_algorithm # if int(time()) - int(timeseries[-1][0]) >= ALERT_ON_STALE_PERIOD: # IndexError: list index out of range try: if int(time()) - int(timeseries[-1][0]) >= ALERT_ON_STALE_PERIOD: add_to_alert_on_stale_metrics = True except: # @modified 20180816 - # Feature #2492: alert on stale metrics add_to_alert_on_stale_metrics = False try: if int(time()) - int(timeseries[-1][0]) >= STALE_PERIOD: add_to_alert_on_stale_metrics = False except: add_to_alert_on_stale_metrics = False if add_to_alert_on_stale_metrics: try: redis_conn.ping() except: from redis import StrictRedis if REDIS_PASSWORD: redis_conn = StrictRedis(password=REDIS_PASSWORD, unix_socket_path=REDIS_SOCKET_PATH) else: redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET_PATH) try: redis_conn.sadd('analyzer.alert_on_stale_metrics', metric_name) except: pass # Get rid of short series if len(timeseries) < MIN_TOLERABLE_LENGTH: raise TooShort() # Get rid of stale series if time() - timeseries[-1][0] > STALE_PERIOD: raise Stale() # Get rid of boring series if len(set(item[1] for item in timeseries[-MAX_TOLERABLE_BOREDOM:])) == BOREDOM_SET_SIZE: raise Boring() # RUN_OPTIMIZED_WORKFLOW - replaces the original ensemble method: # ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS] # which runs all timeseries through all ALGORITHMS final_ensemble = [] number_of_algorithms_triggered = 0 number_of_algorithms_run = 0 number_of_algorithms = len(ALGORITHMS) maximum_false_count = number_of_algorithms - CONSENSUS + 1 # logger.info('the maximum_false_count is %s, above which CONSENSUS cannot be achieved' % (str(maximum_false_count))) consensus_possible = True # DEVELOPMENT: this is for a development version of analyzer only if skyline_app == 'analyzer_dev': time_all_algorithms = True else: time_all_algorithms = False algorithm_tmp_file_prefix = '%s/%s.' % (SKYLINE_TMP_DIR, skyline_app) for algorithm in ALGORITHMS: if consensus_possible: if send_algorithm_run_metrics: algorithm_count_file = '%s%s.count' % (algorithm_tmp_file_prefix, algorithm) algorithm_timings_file = '%s%s.timings' % (algorithm_tmp_file_prefix, algorithm) run_algorithm = [] run_algorithm.append(algorithm) number_of_algorithms_run += 1 if send_algorithm_run_metrics: start = timer() try: algorithm_result = [globals()[test_algorithm](timeseries) for test_algorithm in run_algorithm] except: # logger.error('%s failed' % (algorithm)) algorithm_result = [None] if send_algorithm_run_metrics: end = timer() with open(algorithm_count_file, 'a') as f: f.write('1\n') with open(algorithm_timings_file, 'a') as f: f.write('%.6f\n' % (end - start)) else: algorithm_result = [False] # logger.info('CONSENSUS NOT ACHIEVABLE - skipping %s' % (str(algorithm))) if algorithm_result.count(True) == 1: result = True number_of_algorithms_triggered += 1 # logger.info('algorithm %s triggerred' % (str(algorithm))) elif algorithm_result.count(False) == 1: result = False elif algorithm_result.count(None) == 1: result = None else: result = False final_ensemble.append(result) if not RUN_OPTIMIZED_WORKFLOW: continue if time_all_algorithms: continue if ENABLE_ALL_ALGORITHMS_RUN_METRICS: continue # true_count = final_ensemble.count(True) # false_count = final_ensemble.count(False) # logger.info('current false_count %s' % (str(false_count))) if final_ensemble.count(False) >= maximum_false_count: consensus_possible = False # logger.info('CONSENSUS cannot be reached as %s algorithms have already not been triggered' % (str(false_count))) # skip_algorithms_count = number_of_algorithms - number_of_algorithms_run # logger.info('skipping %s algorithms' % (str(skip_algorithms_count))) # logger.info('final_ensemble: %s' % (str(final_ensemble))) try: # ensemble = [globals()[algorithm](timeseries) for algorithm in ALGORITHMS] ensemble = final_ensemble threshold = len(ensemble) - CONSENSUS if ensemble.count(False) <= threshold: if ENABLE_SECOND_ORDER: if is_anomalously_anomalous(metric_name, ensemble, timeseries[-1][1]): return True, ensemble, timeseries[-1][1] else: return True, ensemble, timeseries[-1][1] return False, ensemble, timeseries[-1][1] except: logger.error('Algorithm error: %s' % traceback.format_exc()) return False, [], 1