Ejemplo n.º 1
0
 def getPikaConn(**kwargs):
     conf_name = kwargs.get("conf_name")
     link_type = kwargs.get("link_type", 'default')
     pool = PikaDB.pool.get(conf_name + link_type, None)
     # if pool and not pool.ping():
     #     pool = None
     if not pool:
         pika_conf = ConfigService.get_pika_conf()
         conf = pika_conf.get(conf_name, {}).get(link_type, None)
         if not conf:
             print("pika配置未找到")
             return
         db_url = conf.get("db_url", "")
         host = conf.get("host", "127.0.0.1")
         port = conf.get("port", 6379)
         db = conf.get("db", 0)
         max_connections = conf.get("max_connections", 50)
         if db_url:
             pool = redis.StrictRedis.from_url(url=db_url)
         else:
             pool = redis.ConnectionPool(host=host,
                                         port=port,
                                         db=db,
                                         max_connections=max_connections)
         PikaDB.pool.setdefault(conf_name + link_type, pool)
     server = redis.StrictRedis(connection_pool=pool)
     return server
Ejemplo n.º 2
0
def logger(**kwargs):
    log_config = ConfigService.get_log_config()
    log_path = kwargs.get("log_path") or log_config.get("log_path")
    level_type = kwargs.get("level_type") or log_config.get("level_type")
    date_format = kwargs.get("date_format") or log_config.get("date_format")
    log_format = kwargs.get("log_format") or log_config.get("log_format")
    suffix = kwargs.get("suffix") or log_config.get("suffix")
    when = kwargs.get("when") or 'MIDNIGHT'
    interval = kwargs.get("interval") or 1
    backupCount = kwargs.get("backupCount") or 30

    # 缓存命中情况 入etl
    hdlr = logging.handlers.TimedRotatingFileHandler(filename=log_path,
                                                     when=when,
                                                     interval=interval,
                                                     backupCount=backupCount)
    hdlr.suffix = suffix
    hdlr.setLevel(level_type)
    formater = logging.Formatter(fmt=log_format, datefmt=date_format)
    hdlr.setFormatter(fmt=formater)
    ch = logging.StreamHandler()
    ch.setLevel(level_type)
    ch.setFormatter(fmt=formater)
    logging.getLogger("lowercache").addHandler(hdlr)
    logging.getLogger("lowercache").addHandler(ch)
    logging.getLogger("lowercache").setLevel(level_type)

    # 打印降级缓存的参数和返回值 输出到控制台
    ch1 = logging.StreamHandler()
    ch1.setLevel(level_type)
    logger_content = logging.getLogger("cache_content")
    logger_content.setLevel(level_type)
    logger_content.addHandler(ch1)
Ejemplo n.º 3
0
 def wrapper(*args, **kwargs):
     city = kwargs.get("city", "default")
     class_name = list(args).pop().__class__.__name__
     func_name = func.__name__
     # hashkey:【城市】【类名【函数名称】
     pika_conf = ConfigService.get_pika_conf()
     prefix = pika_conf.get(self.conf_name).get('pre')
     name = self.pre + prefix + city + "." + class_name + "." + func_name
     cache_key = kwargs.get(self.key)
     cache_type = 0
     st = time.time()
     if self.update:  # 更新清理缓存
         cache_type = 2
         for key in self.update:
             self.del_cache(name=self.pre + prefix + city + "." +
                            class_name + "." + key,
                            keys=cache_key)
         result = func(*args, **kwargs)
     else:
         result = self.pull_cache(name=name, key=cache_key)
         if not result:
             # 未查询到缓存
             result = func(*args, **kwargs)  # 查询数据
             if result:
                 cache_type = 1
                 # 添加缓存
                 self.push_cache(name=name, key=cache_key, value=result)
     # logging.debug("cache", extra={"fun_name": func_name, "cache_type": cache_type, "fun_time": time.time()-st, "project": self.pre})
     return result
Ejemplo n.º 4
0
 def get_conn(self, *args, **kwargs):
     city = kwargs.get("city")
     link_type = kwargs.get("link_type", 'default')
     conf_name = getConfigName(city=city, type=self.conf_name)
     connect_name = link_type + conf_name
     conn_pool = MysqlDB.__pool.get(connect_name)
     db_name = get_db(type=conf_name, city=city)
     if conn_pool:
         pass
     else:
         # 获取主从连接
         mysql_conf = ConfigService.get_mysql_conf()
         conf1 = mysql_conf.get(conf_name)
         conf = conf1.get(link_type)
         host = conf.get("host")
         port = conf.get("port")
         user = conf.get("user")
         passwd = conf.get("passwd")
         mincached = conf.get("mincached", 1)
         maxcached = conf.get("maxcached", 1)
         maxconnections = conf.get("maxconnections", 1)
         conn_pool = PooledDB(creator=pymysql,
                              blocking=True,
                              host=host,
                              port=port,
                              user=user,
                              mincached=mincached,
                              maxcached=maxcached,
                              maxconnections=maxconnections,
                              password=passwd,
                              charset="utf8")
         MysqlDB.__pool.setdefault(connect_name, conn_pool)
     conn = conn_pool.connection()
     conn._con._con.select_db(db_name)
     cursor = conn.cursor(cursor=pymysql.cursors.DictCursor)
     try:
         yield cursor
         conn.commit()
     except Exception as e:
         conn.rollback()
         logging.error(e)
     finally:
         cursor.close()
         conn.close()
Ejemplo n.º 5
0
 def get_conn(self, **kwargs):
     city = kwargs.get("city", "")
     # 通过城市和业务获取配置
     if city == "":  # 没有城市用默认配置
         conf_name = self.conf_name
     else:
         conf_name = getEsConfigName(city=city, service_type=self.conf_name)
     es = EsDB.__pool.get(conf_name)
     if es and not es.ping():
         es = None
     if not es:
         es_config = ConfigService.get_es_config()
         db_config = es_config.get(conf_name)
         host = db_config.get("host")
         maxsize = db_config.get('maxsize', 10)
         es = Elasticsearch(hosts=host, maxsize=maxsize)
         es.ping()
         EsDB.__pool.setdefault(self.conf_name, es)
     yield es
Ejemplo n.º 6
0
 def wrapper(*args, **kwargs):
     city = kwargs.get("city", "default")
     class_name = list(args).pop().__class__.__name__
     func_name = func.__name__
     # hashkey:【城市】【类名【函数名称】
     pika_conf = ConfigService.get_pika_conf()
     prefix = pika_conf.get(self.conf_name).get('pre')
     st = time.time()
     for key in self.update:
         self.del_cache(name=self.pre + prefix + city + "." +
                        class_name + "." + key)
     result = func(*args, **kwargs)
     logging.debug("cache",
                   extra={
                       "fun_name": func_name,
                       "cache_type": "del",
                       "fun_time": time.time() - st,
                       "project": self.pre
                   })
     return result
Ejemplo n.º 7
0
 def getRedisConn(**kwargs):
     conf_name = kwargs.get("conf_name")
     server = RedisDB.pool.get(conf_name, None)
     if not server:
         redis_conf = ConfigService.get_redis_conf()
         conf = redis_conf.get(conf_name, {})
         db_url = conf.get("db_url", "")
         host = conf.get("host", "127.0.0.1")
         port = conf.get("port", 6379)
         db = conf.get("db", 0)
         max_connections = conf.get("max_connections", 2)
         if db_url:
             pool = redis.ConnectionPool.from_url(url=db_url)
         else:
             pool = redis.ConnectionPool(host=host,
                                         port=port,
                                         db=db,
                                         max_connections=max_connections)
         server = redis.StrictRedis(connection_pool=pool)
         RedisDB.pool.setdefault(conf_name, server)
     return server
Ejemplo n.º 8
0
 def __new__(cls, *args, **kwargs):
     if not getattr(cls, '_instance', None):
         cls._instance = ThreadPoolExecutor(
             max_workers=ConfigService.get_thread_num())  # 设置线程数
     return cls._instance
Ejemplo n.º 9
0
 def __init__(self, *args, **kwargs):
     self.conf_name = kwargs.get("conf_name")
     hosts_conf = ConfigService.get_kafka_conf()
     self.kafka_config = hosts_conf.get(self.conf_name)
Ejemplo n.º 10
0
 def __init__(self, *args, **kwargs):
     self.conf_name = kwargs.get("conf_name")
     mongo_conf = ConfigService.get_mongo_conf()
     self.conf = mongo_conf.get(self.conf_name)
Ejemplo n.º 11
0
 def __init__(self, *args, **kwargs):
     self.conf_name = kwargs.get("conf_name")
     rabbitmq_conf = ConfigService.get_rabbitmq_conf()
     self.mq_config = rabbitmq_conf.get(self.conf_name)
Ejemplo n.º 12
0
        def wrapper(*args, **kwargs):
            st = time.time()  # 开始时间戳
            # 开始拼接缓存key和field
            city = kwargs.get("city", "default")  # 城市
            class_name = list(args).pop().__class__.__name__  # 类名
            func_name = func.__name__  # 函数名
            prefix = ConfigService.get_pika_conf().get(self.conf_name).get(
                'pre')  # 获取pika配置中二级前缀
            # hashkey:【城市】【类名【函数名称】
            name = self.pre + prefix + city + "." + class_name + "." + func_name

            cache_key = kwargs.get(self.key)  # 通过变量名获取缓存field的值

            # 从pika降级缓存开关值。
            pika_search = dbfactory.create_db(conf_name=self.conf_name,
                                              db_type="db_pika",
                                              link_type="slave1")
            cache_type = int(pika_search.get("lower_pcache")
                             or 0)  # 0超时再走缓存,1直接走缓存

            # 开始请求数据 正常情况下插入缓存并返回结果
            if cache_type == 0:  # 降级模式
                result = {}
                try:
                    result = func(*args, **kwargs)  # 查询数据
                    if result:
                        self.push_cache(name=name,
                                        key=cache_key,
                                        value=json.dumps(result))
                    # 降级模式 正常状态
                    logger.info("lowcache",
                                extra={
                                    "fun_name": func_name,
                                    "cache_type": 0,
                                    "fun_time": time.time() - st,
                                    "project": prefix
                                })

                    logger_content.debug({
                        "name": name,
                        "cache_key": cache_key,
                        "result": result
                    })
                    return result
                except Exception as e:
                    print("func执行异常", e)
                    # cache_type == 1

            # 查询到缓存就返回数据
            result = self.pull_cache(name=name, key=cache_key)
            if result:
                result = json.loads(result)
                logger.info("lowcache",
                            extra={
                                "fun_name": func_name,
                                "cache_type": 2,
                                "fun_time": time.time() - st,
                                "project": prefix
                            })

                logger_content.debug({
                    "name": name,
                    "cache_key": cache_key,
                    "result": result
                })
                return result

            # 查询缓存模式 并且没有数据 需要执行func 获取数据
            if cache_type == 1:
                try:
                    result = func(*args, **kwargs)  # 执行函数
                    if result:
                        self.push_cache(name=name,
                                        key=cache_key,
                                        value=json.dumps(result))
                    logger.info("lowcache",
                                extra={
                                    "fun_name": func_name,
                                    "cache_type": 1,
                                    "fun_time": time.time() - st,
                                    "project": prefix
                                })
                    logger_content.debug({
                        "name": name,
                        "cache_key": cache_key,
                        "result": result
                    })
                    return result
                except Exception as e:
                    print("缓存模式下func执行异常", e)
                    logger.info("lowcache",
                                extra={
                                    "fun_name": func_name,
                                    "cache_type": 4,
                                    "fun_time": time.time() - st,
                                    "project": prefix
                                })
            logger_content.debug({
                "name": name,
                "cache_key": cache_key,
                "result": result
            })
            return result or {}