def update_rule_limit(self, update_list): limit_dict = {} with RedisLock('re_core_rule_limit_update_set') as lock: if lock: db = get_mysql() cache = get_redis() p = cache.pipeline() p.srem('re_core_rule_limit_update_set', *update_list) p.get('re_core_rule_limit_dict') res = p.execute() limit_dict = json.loads(zlib.decompress(res[1])) sql = 'select `product_key`, `msg_limit`, `triggle_limit` from `{0}` where `product_key` in ({1})'.format( settings.MYSQL_TABLE['limit']['table'], ','.join(['"{}"'.format(x) for x in update_list])) db.execute(sql) result = db.fetchall() update_limit = { x[0]: { 'msg_limit': x[1], 'triggle_limit': x[2] } for x in result } limit_dict.update(update_limit) map( lambda x: limit_dict.pop(x) if x not in update_limit and x in limit_dict else None, update_list) cache.set('re_core_rule_limit_dict', zlib.compress(json.dumps(limit_dict))) return limit_dict
def check_device_online(did): cache = get_redis() try: result = cache.get('re_core_{}_dev_online_ts'.format(did)) except redis.exceptions.RedisError, e: logger.exception(e) result = None
def update_product_key_set(self): cache = get_redis() p = cache.pipeline() while True: try: if 'all' == self.mq_queue_name: p.get('re_core_rule_cache_update') p.smembers('re_core_rule_limit_update_set') f_res = p.execute() if not f_res[0]: self.init_rules_cache() if f_res[1]: limit_dict = self.update_rule_limit(list(f_res[1])) if limit_dict: self.limit_dict = limit_dict p.smembers('re_core_product_key_set') p.get('re_core_rule_limit_dict') res = p.execute() if res[0]: self.product_key_set = res[0] if res[1]: self.limit_dict = json.loads(zlib.decompress(res[1])) self.save_thermal_data() except: pass finally: time.sleep(60)
def getset_rule_last_data(data, rule_id, did): cache = get_redis() p = cache.pipeline() p.getset('re_core_new_{}_{}_dev_latest'.format(rule_id, did), zlib.compress(json.dumps(data))) p.expire('re_core_new_{}_{}_dev_latest'.format(rule_id, did), 86400) result = p.execute() return json.loads(zlib.decompress(result[0])) if result[0] else {}
def check_interval_locked(rule_id, did): try: cache = get_redis() lock = cache.get('re_core_{0}_{1}_rule_interval'.format(did, rule_id)) return bool(lock) except redis.exceptions.RedisError, e: logger.exception(e) return False
def get_device_online_count(did): try: cache = get_redis() key = 're_device_{}_online_count'.format(did) return cache.get(key) except redis.exceptions.RedisError, e: logger.exception(e) return False
def clean_device_offline_ts(did): try: cache = get_redis() key = 're_device_{}_offline_ts'.format(did) return cache.delete(key) except redis.exceptions.RedisError, e: logger.exception(e) return False
def get_pks_limit_cache(): cache = get_redis() limit_dict = cache.get('re_core_rule_limit_dict') if limit_dict: limit_dict = json.loads(zlib.decompress(limit_dict)) else: limit_dict = {} return limit_dict
def set_monitor_data(key, value, expired_seconds): cache = get_redis() p = cache.pipeline() try: p.set(key, value) p.expire(key, expired_seconds) p.execute() except redis.exceptions.RedisError, e: logger.exception(e)
def get_rules_from_cache(product_key, did): cache = get_redis() p = cache.pipeline() p.get('re_core_{}_cache_rules'.format(product_key)) p.get('re_core_{}_cache_rules'.format(did)) result = p.execute() return reduce( lambda rules, x: rules + (json.loads(zlib.decompress(x)) if x else []), result, [])
def get_json(content, params, refresh, name, rule_id, now_ts): cache_key = 're_core_{0}_{1}_custom_json'.format(rule_id, name) cache = get_redis() result = {} while True: try: result = cache.get(cache_key) if result: result = json.loads(result) break else: with RedisLock(cache_key) as lock: if lock: _content = json.dumps(content) for key, val in params.items(): _content = _content.replace( '"${' + key + '}"', json.dumps(val)) content = json.loads(_content) url = content['url'] headers = content.get('headers', {}) data = content.get('data', {}) method = content.get('method', 'get') if 'get' == method: response = requests.get(url, headers=headers, params=data) elif 'post' == method: response = requests.post(url, data=json.dumps(data), headers=headers) else: raise Exception( u'invalid http method: {}'.format(method)) if response.status_code > 199 and response.status_code < 300: try: result = json.loads(response.content) except ValueError: raise Exception( u'error response: status_code - {0}, content: {1}' .format(response.status_code, response.content[:100])) else: raise Exception( u'error response: status_code - {0}, content: {1}' .format(response.status_code, response.content[:100])) p = cache.pipeline() p.set(cache_key, response.content) p.expire(cache_key, refresh + 5) p.execute() break time.sleep(1) except redis.exceptions.RedisError, e: result = {'error_message': 'redis error: {}'.format(str(e))}
def update_device_online(did, ts, status=False): cache = get_redis() key = 're_core_{}_dev_online_ts'.format(did) try: if status: cache.set(key, ts) else: cache.delete(key) except redis.exceptions.RedisError, e: logger.exception(e)
def get_product_whitelist(): try: cache = get_redis() value = cache.get('constance:gwreapi:PRODUCT_WHITELIST') if value: return loads(value) return [] except redis.exceptions.RedisError, e: logger.exception(e) return []
def get_monitor_dids(): try: cache = get_redis() value = cache.get('constance:gwreapi:MONITOR_DIDS') if value: return loads(value) return [] except redis.exceptions.RedisError, e: logger.exception(e) return []
def update_sequence(key, data, expire=settings.SEQUENCE_EXPIRE): try: cache = get_redis() p = cache.pipeline() p.lpush(key, json.dumps(data)) p.expire(key, expire) p.ltrim(key, 0, settings.SEQUENCE_MAX_LEN - 1) p.execute() except redis.exceptions.RedisError, e: logger.exception(e) return {'error_message': 'redis error: {}'.format(str(e))}
def __init__(self, mq_queue_name, product_key=None, routing_key=None): self.product_key_set = set() self.limit_dict = {} self.mq_queue_name = mq_queue_name self.product_key = product_key or '*' self.routing_key = routing_key or None self.mq_initial() self.processor = MainProcessor(MainSender(self.product_key)) self.thermal_map = defaultdict(int) self.thermal_data = {} self.cache = get_redis()
def get_noti_product_interval(product_key): ''' 获取间隔时间缓存 ''' try: cache = get_redis() key = 're_core_product_{}_interval'.format(product_key) return cache.get(key) except redis.exceptions.RedisError, e: logger.exception(e) return 0
def get_device_offline_ts(did): """ 获取设备离线时间 """ try: cache = get_redis() key = 're_device_{}_offline_ts'.format(did) return cache.get(key) except redis.exceptions.RedisError, e: logger.exception(e) return False
def set_interval_lock(rule_id, did, interval): if not rule_id or not interval: return {} lock = False result = {} try: cache = get_redis() lock = cache.setnx( 're_core_{0}_{1}_rule_interval'.format(did, rule_id), 1) except redis.exceptions.RedisError, e: logger.exception(e) result = {'error_message': 'redis error: {}'.format(str(e))}
def set_device_offline_ts(did, ts, interval): """ 设置设备离线发送时间 """ cache = get_redis() p = cache.pipeline() key = 're_device_{}_offline_ts'.format(did) try: p.set(key, str(ts)) p.expire(key, int(interval) + 2) p.execute() except redis.exceptions.RedisError, e: logger.exception(e)
def set_noti_product_interval(product_key, delay_time): ''' 设备 Notification 间隔时间缓存 ''' cache = get_redis() p = cache.pipeline() key = 're_core_product_{}_interval'.format(product_key) try: p.set(key, str(delay_time)) p.expire(key, settings.NOTIFICATION_INTERVAL_EXPIRE) p.execute() except redis.exceptions.RedisError, e: logger.exception(e)
def get_sequence(key, length, start=0): try: cache = get_redis() result = cache.lrange(key, start, start + length - 1) or [] if result: result = map(lambda x: json.loads(x), result) res_len = len(result) if res_len < length: result.extend([copy.deepcopy(result[-1])] * (length - res_len)) else: result = result[:length] except redis.exceptions.RedisError, e: logger.exception(e) result = {'error_message': 'redis error: {}'.format(str(e))}
def device_sequence(self, msg): content = msg['current']['content'] try: if content['event'] in ['alert', 'fault'] and msg['task_vars'].get(content['attr'], '') != int(content['attr_type']): return [] except ValueError: return [] if content['data'] not in msg['task_vars']: next_node = { "id": "uuid", "params": [content['data']], "category": "input", "type": "device_data", "inputs": 0, "outputs": 1, "ports": [0], "wires": [ [msg['current']['id']] ], "content": { "event": "data", "data_type": "device_data", "refresh": 3600, "interval": 10 } } return [dict(copy.deepcopy(msg), current=next_node)] flag = True if content['step'] > 1: try: _key = 're_core_{0}_{1}_device_sequence_counter'.format(msg['task_vars']['did'], content['data']) cache = get_redis() p = cache.pipeline() p.incr(_key) p.expire(_key, settings.SEQUENCE_EXPIRE) res = p.execute() except redis.exceptions.RedisError, e: msg['error_message'] = 'redis error: {}'.format(str(e)) msg['task_vars'][content['alias']] = [copy.deepcopy(msg['task_vars'][content['data']])] * content['length'] flag = False else: if res[0] > content['step'] * 50: cache.incr(_key, -content['step']*50) if 0 != res[0] % content['step']: flag = False
def set_schedule_msg(key, ts, now, msg): cache = get_redis() p = cache.pipeline() offset = ts % 86400 bit_key = 're_core_{}_schedule_bitmap'.format(ts - offset) _key = 're_core_{}_schedule_set'.format(ts) try: expire_sec = int(86400 + ts - now) p.sadd(_key, key) p.expire(_key, expire_sec) p.set(key, json.dumps(msg)) p.expire(key, expire_sec) p.setbit(bit_key, offset, 1) p.execute() except redis.exceptions.RedisError, e: logger.exception(e)
def cache_rules(rules, product_key=None): if rules: try: cache = get_redis() p = cache.pipeline() if product_key: p.sadd('re_core_product_key_set', product_key) for k, v in rules.items(): if v: p.set('re_core_{}_cache_rules'.format(k), zlib.compress(json.dumps(v))) p.expire('re_core_{}_cache_rules'.format(k), 86400) else: p.delete('re_core_{}_cache_rules'.format(k)) p.execute() except redis.exceptions.RedisError, e: logger.exception(e)
def check_rule_limit(product_key, limit, type, incr=True): if not limit: return True cache = get_redis() key = 're_core_{0}_{1}_limit'.format(product_key, type) if incr: num = cache.incr(key) if 1 == num: cache.expire( key, int( time.mktime( time.strptime(time.strftime('%Y-%m-%d'), '%Y-%m-%d')) + 86400 - time.time())) else: num = cache.get(key) or 0 # print 'num:', num # print 'limit:', limit return int(num) <= limit
def scan(self, ts, log): log['running_status'] = 'scan_device' minute = ts % 60 dir_name = '{0}/task/{1}/{2}'.format( settings.SCHEDULE_FILE_DIR.rstrip('/'), minute, ts) if os.path.isdir(dir_name): cache = get_redis() lock_key = 're_core_{0}_{1}'.format(self.fid, ts) with RedisLock(lock_key) as lock: if lock and os.path.isdir(dir_name): msg_lsit = reduce(lambda m_lst, f_lst: m_lst + \ filter(lambda m: m, map(lambda x: self.read_msg('{0}/{1}'.format(f_lst[0], x)), f_lst[2])), os.walk(dir_name), []) if msg_lsit: self.sender.send_msgs(msg_lsit) shutil.rmtree(dir_name) cache.delete(lock_key) log['dir_name'] = dir_name logger.info(json.dumps(log))
def begin(self): cache = get_redis() while True: try: pk_set = cache.smembers('re_core_product_key_set') if pk_set: self.product_key_set = pk_set break except: pass time.sleep(1) while True: try: limit_dict = cache.get('re_core_rule_limit_dict') if limit_dict: self.limit_dict = json.loads(zlib.decompress(limit_dict)) break except: pass time.sleep(1) self.mq_listen(self.mq_queue_name, self.product_key, settings.IS_NO_ACK, self.routing_key)
def get_token(self, key, content, log={}): token = None cnt = 0 lock = False cache = get_redis() p = cache.pipeline() while cnt < 3 and not token: try: token = cache.get(key) if not token: lock = cache.setnx(key + '_lock', 'lock') if lock: token_url = content['token_url'] + '?app_id={}&secret_key={}'.format(content['app_id'], content['secret_key']) resp_token = requests.get(token_url) if 200 == resp_token.status_code: resp_content = json.loads(resp_token.content) token = resp_content['token'] expires_at = resp_content['expires_at'].split('.') expires_at = int(time.mktime(time.strptime(expires_at[0],'%Y-%m-%dT%H:%M:%S')))# * 1000 + int(expires_at[1][0:-1]) p.set(key, token) p.expire(key, expires_at - int(time.time()) - 5) p.execute() elif 429 == resp_token.status_code: log['status'] = resp_token.status_code log['message'] = resp_token.content cnt += 4 else: time.sleep(2) except redis.exceptions.RedisError, e: log['message'] = 'redis error: {}'.format(str(e)) raise e except requests.exceptions.RequestException, e: log['message'] = 'http request error: {}'.format(str(e))
def init_rules_cache(self): with RedisLock('re_core_product_key_set') as lock: if lock: db = get_mysql() cache = get_redis() p = cache.pipeline() cache_rule = defaultdict(list) pk_set = [] # 获取所有限制 sql = 'select `product_key`, `msg_limit`, `triggle_limit` from `{0}`'.format( settings.MYSQL_TABLE['limit']['table']) db.execute(sql) result = db.fetchall() limit_dict = { x[0]: { 'msg_limit': x[1], 'triggle_limit': x[2] } for x in result } p.set('re_core_rule_limit_dict', zlib.compress(json.dumps(limit_dict))) # 遍历所有规则 id_max = 0 while True: sql = 'select `id`, `product_key`, `rule_tree`, `custom_vars`, `enabled`, `ver`, `type`, `interval`, `obj_id`, `params` from `{0}` where `id`>{1} order by `id` limit 500'.format( settings.MYSQL_TABLE['rule']['table'], id_max) db.execute(sql) result = db.fetchall() if not result: break for rule_id, product_key, rule_tree, custom_vars, enabled, ver, type, interval, obj_id, params in result: pk_set.append(product_key) self.product_key_set.add(product_key) if 1 != enabled: continue rule_tree = json.loads(rule_tree) if rule_tree else [] custom_vars = json.loads( custom_vars) if custom_vars else {} cache_rule[obj_id].append({ 'ver': ver, 'rule_id': rule_id, 'rule_tree': rule_tree, 'custom_vars': custom_vars, 'params': json.loads(params) if params else [], 'type': type, 'interval': interval }) id_max = result[-1][0] if pk_set: p.sadd('re_core_product_key_set', *pk_set) cache_rules(cache_rule) p.setnx('re_core_rule_cache_update', 1) p.expire('re_core_rule_cache_update', 82800) p.execute() return True