def cache_running_info(self, func_id, script_publish_version, exec_mode=None, is_failed=False, cost=None): timestamp = int(time.time()) # 全局计数 data = { 'funcId': func_id, 'scriptPublishVersion': script_publish_version, 'execMode': exec_mode, 'isFailed': is_failed, 'cost': cost, 'timestamp': timestamp, } data = toolkit.json_dumps(data, indent=0) cache_key = toolkit.get_cache_key('syncCache', 'scriptRunningInfo') self.cache_db.lpush(cache_key, data) # 函数调用记数 data = { 'funcId': func_id, 'timestamp': timestamp, } data = toolkit.json_dumps(data, indent=0) cache_key = toolkit.get_cache_key('syncCache', 'funcCallInfo') self.cache_db.lpush(cache_key, data)
def _cache_scripts(self): scripts = sorted(SCRIPT_MAP.values(), key=lambda x: x['seq']) scripts_dump = toolkit.json_dumps(scripts, sort_keys=True) cache_key = toolkit.get_cache_key('fixedCache', 'scriptsMD5') self.cache_db.set(cache_key, toolkit.get_md5(scripts_dump)) cache_key = toolkit.get_cache_key('fixedCache', 'scriptsDump') self.cache_db.set(cache_key, scripts_dump)
def run(self, *args, **kwargs): command = args[0] command_args = args[1:] args_dumps = ', '.join([toolkit.json_dumps(x) for x in command_args]) if len(args_dumps) > LIMIT_ARGS_DUMP: args_dumps = args_dumps[0:LIMIT_ARGS_DUMP - 3] + '...' self.logger.debug('[MEMCACHED] Run `{}` <- `{}`'.format( command.upper(), args_dumps)) return getattr(self.client, command.lower())(*command_args, **kwargs)
def format(self, record): message = record.msg['message'] meta = record.msg['meta'] output_content = None if self.options.get('json'): log_content_json = {} for field, k in LOG_JSON_FIELD_MAP.items(): log_content_json[k] = meta[field] log_content_json['message'] = message output_content = toolkit.json_dumps(log_content_json) else: log_content_arr = []; for field in LOG_TEXT_FIELDS: # Detect field color field_color = LOG_TEXT_COLOR_MAP[field] if field_color is True: field_color = LOG_LEVELS['colors'][meta['level']] # Pretty field field_value = meta.get(field) or '' if field == 'upTime': field_value = 'UP {}s'.format(field_value or '0') elif field == 'costTime': field_value = '{}ms'.format(field_value or '0') elif field == 'diffTime': field_value = '+{}ms'.format(field_value or '0') elif field == 'userId' or field == 'userIdShort': field_value = field_value or 'NON_USER_ID' elif field == 'username': field_value = '@{}'.format(field_value or 'NON_USERNAME') field_value = '[{}]'.format(field_value) # Add color if self.options.get('color') and hasattr(colors, field_color): field_value = str(colors.__getattr__(field_color)(field_value)) log_content_arr.append(field_value) log_content_arr.append(message) output_content = ' '.join(log_content_arr) return output_content
def run(self, *args, **kwargs): command = args[0] command_args = args[1:] if not self.skip_log: key = '' if len(command_args) > 1: key = command_args[0] + ' ...' elif len(command_args) > 0: key = command_args[0] kwargs_dump = '' if kwargs: kwargs_dump = 'kwargs=' + toolkit.json_dumps(kwargs) self.logger.debug('[REDIS] Run `{} {}` {}'.format( command.upper(), key, kwargs_dump)) return getattr(self.client, command)(*command_args, **kwargs)
def call(self, method, path=None, query=None, body=None, timeout=None): if path is None: method, path = method.split(' ', 1) url = '{protocol}://{host}:{port}'.format(**self.config) + path timeout = timeout or self.config['timeout'] if not isinstance(body, str): body = toolkit.json_dumps(body) method = method.upper() # 签名 nonce = uuid.uuid4().hex timestamp = str(int(time.time())) headers = { 'Content-Type': 'application/json', 'X-Auth-Nonce': nonce, 'X-Auth-Timestamp': timestamp, } secret_key = bytes(self.config.get('secretKey') or '', 'utf-8') str_to_sign = bytes('\n'.join([method, path, nonce, timestamp, body]), 'utf-8') sign = hmac.new(secret_key, str_to_sign, hashlib.sha1).hexdigest().upper() print('str_to_sign', str_to_sign) print('secret_key', secret_key) print('sign', sign) headers['X-Auth-Signature'] = sign r = self.client.request(method=method, url=url, params=query, data=body, headers=headers, timeout=timeout) parsed_resp = parse_response(r) if r.status_code >= 400: e = Exception(r.status_code, parsed_resp) raise e return r.status_code, parsed_resp
def query(self, *args, **options): command = args[0] command_args = args[1:] if not self.skip_log: key = '' if len(command_args) > 1: key = command_args[0] + ' ...' elif len(command_args) > 0: key = command_args[0] options_dump = '' if options: options_dump = 'options=' + toolkit.json_dumps(options) self.logger.debug('[REDIS] Query `{} {}` {}'.format( command.upper(), key, options_dump)) return self.client.execute_command(*args, **options)
def cache_func_result(self, func_id, script_code_md5, script_publish_version, func_call_kwargs_md5, result, cache_result_expires): if not all([ func_id, script_code_md5, script_publish_version, func_call_kwargs_md5, cache_result_expires ]): return cache_key = toolkit.get_cache_key( 'cache', 'funcResult', tags=[ 'funcId', func_id, 'scriptCodeMD5', script_code_md5, 'scriptPublishVersion', script_publish_version, 'funcCallKwargsMD5', func_call_kwargs_md5 ]) result_dumps = toolkit.json_dumps(result) self.cache_db.setex(cache_key, cache_result_expires, result_dumps)
def _set_task_status(self, status, **next_context): ''' Set task result for WAT's monitor. ''' # Fixed in Celery for saving/publishing task result. # See [https://github.com/celery/celery/blob/v4.1.0/celery/backends/base.py#L518] if self.request.called_directly: return self.request.update(**next_context) if status not in (celery_status.SUCCESS, celery_status.FAILURE): return # Publish result by Redis key = 'celery-task-meta-' + self.request.id content = { 'task': self.name, 'id': self.request.id, 'args': self.request.args, 'kwargs': self.request.kwargs, 'origin': self.request. origin, # 注意:Celery中Task.requset.origin 和Func 业务中的Origin *不是*一回事 'queue': self.worker_queue, 'status': status, 'startTime': self.request.x_start_time, 'endTime': self.request.x_end_time, 'retval': self.request.x_retval, 'einfoTEXT': self.request.x_einfo_text, 'exceptionMessage': self.request.x_exception_message, 'exceptionDump': self.request.x_exception_dump, } if hasattr(self.request, 'extra'): content['extra'] = self.request.extra content = toolkit.json_dumps(content, indent=None) self.backend.client.publish(key, content)
def publish(self, topic, message, timeout=None): if time.time( ) - self.producers_update_timestamp > self.PRODUCERS_UPDATE_INTERVAL: self.update_producers() nsq_node = random.choice(self.nsq_nodes) url = '{}://{}/pub'.format(self.config['protocol'], nsq_node) query = { 'topic': topic, } if isinstance(message, (dict, list, tuple)): message = toolkit.json_dumps(message) message = six.ensure_binary(message) timeout = timeout or self.config['timeout'] self.logger.debug('[NSQLOOKUP] Pub -> `{}`'.format(topic)) r = requests.post(url, params=query, data=message, timeout=timeout) r.raise_for_status()
def ts_add(self, key, value, timestamp=None, mode=None): mode = mode or 'update' if not self.skip_log: self.logger.debug('[REDIS] TS Add `{}`'.format(key)) if key not in self.checked_keys: cache_res = self.client.type(key) if six.ensure_str(cache_res) != 'zset': self.client.delete(key) self.checked_keys.add(key) timestamp = timestamp or int(time.time()) # 时间戳自动根据最小间隔对齐 timestamp = int( timestamp / self.config['tsMinInterval']) * self.config['tsMinInterval'] if mode.lower() == 'addup': prev_points = self.client.zrangebyscore(key, timestamp, timestamp) if prev_points: _, prev_value = self.ts_parse_point(prev_points[0]) value += float(prev_value) self.client.zremrangebyscore(key, timestamp, timestamp) value = toolkit.json_dumps(value) data = ','.join([str(timestamp), value]) self.client.zadd(key, {data: timestamp}) self.client.expire(key, self.config['tsMaxAge']) if self.config['tsMaxPeriod']: min_timestamp = int(time.time()) - self.config['tsMaxPeriod'] self.client.zremrangebyscore(key, '-inf', min_timestamp)
def client(self, data, timeout=3): self.times += 1 if self.times % 20 == 0: if time.time() - self.start_time < 60: time.sleep(60) self.start_time = time.time() headers = {'Content-Type': 'application/json; charset=utf-8'} data = toolkit.json_dumps(data) resp = requests.post(self.webhook, headers=headers, data=data, timeout=timeout) parsed_resp = parse_response(resp) if not isinstance(parsed_resp, dict) or parsed_resp.get('errcode') != 0: if isinstance(parsed_resp, (six.string_types, six.text_type)): parsed_resp = 'Error occured, response is an HTML page' raise Exception(parsed_resp) return resp.status_code, parsed_resp
def func_debugger(self, *args, **kwargs): # 执行函数、参数 func_id = kwargs.get('funcId') func_call_kwargs = kwargs.get('funcCallKwargs') or {} script_set_id = func_id.split('__')[0] script_id = func_id.split('.')[0] func_name = func_id[len(script_id) + 1:] self.logger.info('Main.FuncDebugger Task launched: `{}`'.format(func_id)) # 来源 origin = kwargs.get('origin') origin_id = kwargs.get('originId') # 任务ID task_id = self.request.id root_task_id = kwargs.get('rootTaskId') or task_id # 函数链 func_chain = kwargs.get('funcChain') or [] func_chain.append(func_id) # 执行模式:UI执行只能使用同步模式 exec_mode = 'sync' # 启动时间 start_time = int(time.time()) start_time_ms = int(time.time() * 1000) # 触发时间 trigger_time = kwargs.get('triggerTime') or start_time trigger_time_ms = kwargs.get('triggerTimeMs') or start_time_ms # HTTP请求 http_request = kwargs.get('httpRequest') or {} if 'headers' in http_request: http_request['headers'] = toolkit.IgnoreCaseDict( http_request['headers']) # 函数结果、上下文、跟踪信息、错误堆栈 func_resp = None script_scope = None log_messages = None trace_info = None einfo_text = None # 被强行Kill时,不会进入except范围,所以默认制定为"failure" end_status = 'failure' try: # 获取代码对象 script_dict = self.get_script_dict_for_debugger(script_id) target_script = script_dict.get(script_id) if not target_script: e = NotFoundException('Script `{}` not found'.format(script_id)) raise e extra_vars = { '_DFF_DEBUG': True, '_DFF_TASK_ID': task_id, '_DFF_ROOT_TASK_ID': root_task_id, '_DFF_SCRIPT_SET_ID': script_set_id, '_DFF_SCRIPT_ID': script_id, '_DFF_FUNC_ID': func_id, '_DFF_FUNC_NAME': func_name, '_DFF_FUNC_CHAIN': func_chain, '_DFF_ORIGIN': origin, '_DFF_ORIGIN_ID': origin_id, '_DFF_EXEC_MODE': exec_mode, '_DFF_START_TIME': start_time, '_DFF_START_TIME_MS': start_time_ms, '_DFF_TRIGGER_TIME': trigger_time, '_DFF_TRIGGER_TIME_MS': trigger_time_ms, '_DFF_CRONTAB': kwargs.get('crontab'), '_DFF_CRONTAB_DELAY': kwargs.get('crontabDelay'), '_DFF_QUEUE': self.queue, '_DFF_WORKER_QUEUE': self.worker_queue, '_DFF_HTTP_REQUEST': http_request, } self.logger.info('[CREATE SAFE SCOPE] `{}`'.format(script_id)) script_scope = self.create_safe_scope(script_name=script_id, script_dict=script_dict, extra_vars=extra_vars) # 加载代码 self.logger.info('[LOAD SCRIPT] `{}`'.format(script_id)) script_scope = self.safe_exec(target_script['codeObj'], globals=script_scope) # 执行脚本 if func_name: entry_func = script_scope.get(func_name) if not entry_func: e = NotFoundException('Function `{}` not found in `{}`'.format( func_name, script_id)) raise e # 执行函数 self.logger.info('[RUN FUNC] `{}`'.format(func_id)) func_resp = entry_func(**func_call_kwargs) if not isinstance(func_resp, BaseFuncResponse): func_resp = FuncResponse(func_resp) if isinstance(func_resp.data, Exception): raise func_resp.data except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.warning(line) end_status = 'failure' # 预检查任务需要将检查结果和错误同时返回给调用方,因此本身永远不会失败 # API端需要判断预检查是否通过,并将错误重新包装后返回给调用放 self.logger.error('Error occured in script. `{}`'.format(script_id or func_id)) trace_info = self.get_trace_info() einfo_text = self.get_formated_einfo(trace_info, only_in_script=True) else: end_status = 'success' finally: result = {} if script_scope: # 脚本解析结果 exported_api_func = script_scope['DFF'].exported_api_funcs or [] result['exportedAPIFuncs'] = exported_api_func # 脚本输出日志 log_messages = script_scope['DFF'].log_messages or [] result['logMessages'] = log_messages if func_name and func_resp: # 准备函数运行结果 func_result_raw = None func_result_repr = None func_result_json_dumps = None if func_resp.data: try: func_result_raw = func_resp.data except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) try: func_result_repr = pprint.saferepr(func_resp.data) except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) try: func_result_json_dumps = toolkit.json_dumps(func_resp.data, indent=None) except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) result['funcResult'] = { 'raw': func_result_raw, 'repr': func_result_repr, 'jsonDumps': func_result_json_dumps, '_responseControl': func_resp._create_response_control() } if end_status == 'failure': trace_info = trace_info or self.get_trace_info() einfo_text = einfo_text or self.get_formated_einfo( trace_info, only_in_script=True) # 准备返回值 retval = { 'result': result, 'traceInfo': trace_info, 'einfoTEXT': einfo_text, 'cost': time.time() - start_time, } # 清理资源 self.clean_up() # 返回函数结果 return retval
def func_runner(self, *args, **kwargs): # 执行函数、参数 func_id = kwargs.get('funcId') func_call_kwargs = kwargs.get('funcCallKwargs') or {} func_call_kwargs_md5 = kwargs.get('funcCallKwargsMD5') script_set_id = func_id.split('__')[0] script_id = func_id.split('.')[0] func_name = func_id[len(script_id) + 1:] self.logger.info('Main.FuncRunner Task launched: `{}`'.format(func_id)) # 任务来源 origin = kwargs.get('origin') origin_id = kwargs.get('originId') # 任务记录数 task_info_limit = kwargs.get('taskInfoLimit') # 任务ID task_id = self.request.id root_task_id = kwargs.get('rootTaskId') or 'ROOT' # 函数链 func_chain = kwargs.get('funcChain') or [] func_chain.append(func_id) # 执行模式 exec_mode = kwargs.get('execMode') or 'sync' # 启动时间 start_time = int(time.time()) start_time_ms = int(time.time() * 1000) # 触发时间 trigger_time = kwargs.get('triggerTime') or start_time trigger_time_ms = kwargs.get('triggerTimeMs') or start_time_ms # HTTP请求 http_request = kwargs.get('httpRequest') or {} if 'headers' in http_request: http_request['headers'] = toolkit.IgnoreCaseDict( http_request['headers']) # 是否缓存函数运行结果 cache_result_expires = None # 是否保存结果 save_result = kwargs.get('saveResult') or False # 函数结果、上下文、跟踪信息、错误堆栈 func_resp = None script_scope = None log_messages = None trace_info = None einfo_text = None edump_text = None # 被强行Kill时,不会进入except范围,所以默认制定为"failure" end_status = 'failure' # 缩减任务信息缓存(仅在主任务之前) if root_task_id == 'ROOT': self.trim_task_info(origin, origin_id, exec_mode, task_info_limit) ### 任务开始 target_script = None try: global SCRIPT_DICT_CACHE # 更新脚本缓存 self.update_script_dict_cache() target_script = SCRIPT_DICT_CACHE.get(script_id) if not target_script: e = NotFoundException('Script `{}` not found'.format(script_id)) raise e extra_vars = { '_DFF_DEBUG': False, '_DFF_TASK_ID': task_id, '_DFF_ROOT_TASK_ID': root_task_id, '_DFF_SCRIPT_SET_ID': script_set_id, '_DFF_SCRIPT_ID': script_id, '_DFF_FUNC_ID': func_id, '_DFF_FUNC_NAME': func_name, '_DFF_FUNC_CHAIN': func_chain, '_DFF_ORIGIN': origin, '_DFF_ORIGIN_ID': origin_id, '_DFF_EXEC_MODE': exec_mode, '_DFF_START_TIME': start_time, '_DFF_START_TIME_MS': start_time_ms, '_DFF_TRIGGER_TIME': trigger_time, '_DFF_TRIGGER_TIME_MS': trigger_time_ms, '_DFF_CRONTAB': kwargs.get('crontab'), '_DFF_CRONTAB_DELAY': kwargs.get('crontabDelay'), '_DFF_QUEUE': self.queue, '_DFF_WORKER_QUEUE': self.worker_queue, '_DFF_HTTP_REQUEST': http_request, } self.logger.info('[CREATE SAFE SCOPE] `{}`'.format(script_id)) script_scope = self.create_safe_scope(script_name=script_id, script_dict=SCRIPT_DICT_CACHE, extra_vars=extra_vars) # 加载代码 self.logger.info('[LOAD SCRIPT] `{}`'.format(script_id)) script_scope = self.safe_exec(target_script['codeObj'], globals=script_scope) # 执行脚本 entry_func = script_scope.get(func_name) if not entry_func: e = NotFoundException('Function `{}` not found in `{}`'.format( func_name, script_id)) raise e # 执行函数 self.logger.info('[RUN FUNC] `{}`'.format(func_id)) func_resp = entry_func(**func_call_kwargs) if not isinstance(func_resp, BaseFuncResponse): func_resp = FuncResponse(func_resp) if isinstance(func_resp.data, Exception): raise func_resp.data # 获取函数结果缓存配置 try: cache_result_expires = target_script['funcExtraConfig'][func_id][ 'cacheResult'] except (KeyError, TypeError) as e: pass # 响应大型数据,根据是否开启缓存函数运行结果区分处理 if isinstance(func_resp, FuncResponseLargeData): if cache_result_expires is None: # 未开启缓存,默认方式缓存为文件 func_resp.cache_to_file(auto_delete=True) else: # 开启缓存,则指定缓存事件 func_resp.cache_to_file(auto_delete=False, cache_expires=cache_result_expires) except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) end_status = 'failure' self.logger.error('Error occured in script. `{}`'.format(func_id)) trace_info = self.get_trace_info() einfo_text = self.get_formated_einfo(trace_info, only_in_script=True) edump_text = trace_info.get('exceptionDump') raise else: end_status = 'success' # 准备函数运行结果 func_result_raw = None func_result_repr = None func_result_json_dumps = None if func_resp.data: try: func_result_raw = func_resp.data except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) try: func_result_repr = pprint.saferepr(func_resp.data) except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) try: func_result_json_dumps = toolkit.json_dumps(func_resp.data) except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) result = { 'raw': func_result_raw, 'repr': func_result_repr, 'jsonDumps': func_result_json_dumps, '_responseControl': func_resp._create_response_control() } # 记录函数运行结果 if save_result: args = (self.request.id, self.name, self.request.origin, self.request.x_start_time, int(time.time()), self.request.args, self.request.kwargs, result, celery_status.SUCCESS, None) result_task_id = '{}-RESULT'.format(self.request.id) result_saving_task.apply_async(task_id=result_task_id, args=args) # 缓存函数运行结果 if cache_result_expires: self.cache_func_result( func_id=func_id, script_code_md5=target_script['codeMD5'], script_publish_version=target_script['publishVersion'], func_call_kwargs_md5=func_call_kwargs_md5, result=result, cache_result_expires=cache_result_expires) # 返回函数结果 return result finally: # Crontab解锁 lock_key = kwargs.get('lockKey') lock_value = kwargs.get('lockValue') if lock_key and lock_value: self.cache_db.unlock(lock_key, lock_value) # 记录脚本日志 if script_scope: log_messages = script_scope['DFF'].log_messages or None # 记录函数运行故障 if end_status == 'failure': trace_info = trace_info or self.get_trace_info() einfo_text = einfo_text or self.get_formated_einfo( trace_info, only_in_script=True) # 记录函数运行信息 self.cache_running_info( func_id=func_id, script_publish_version=target_script['publishVersion'], exec_mode=exec_mode, is_failed=(end_status == 'failure'), cost=int(time.time() * 1000) - start_time_ms) # 缓存任务状态 self.cache_task_info(origin=origin, origin_id=origin_id, exec_mode=exec_mode, status=end_status, trigger_time_ms=trigger_time_ms, start_time_ms=start_time_ms, root_task_id=root_task_id, func_id=func_id, log_messages=log_messages, einfo_text=einfo_text, edump_text=edump_text) # 清理资源 self.clean_up()
def update_script_dict_cache(self): ''' 更新脚本字典缓存 与 ReloadScriptsTask 配合完成高速脚本加载处理 具体如下: 1. 从本地内存中获取缓存时间,未超时直接结束 2. 从Redis检查当前脚本缓存MD5值 2.1. 如未改变,则延长缓存时间并结束 2.2. 如已改变,则从Redis中获取脚本缓存数据 3. 如Redis中无脚本缓存数据,则直接从数据库中获取数据 (正常不会发生,ReloadScriptsTask 会定时更新Redis缓存) ''' global SCRIPTS_CACHE_MD5 global SCRIPT_DICT_CACHE cache_key_script_md5 = toolkit.get_cache_key('fixedCache', 'scriptsMD5') # 1. 检查Redis缓存 scripts_md5 = self.cache_db.get(cache_key_script_md5) if scripts_md5: scripts_md5 = six.ensure_str(scripts_md5) if scripts_md5 and scripts_md5 == SCRIPTS_CACHE_MD5: # 存在缓存,且MD5未发生变化,不更新本地缓存 self.logger.debug( '[SCRIPT CACHE] Not Modified, extend local cache') return # 2. 不存在缓存/缓存MD5发生变化,从Redis读取Dump scripts = None cache_key_script_dump = toolkit.get_cache_key('fixedCache', 'scriptsDump') scripts_dump = self.cache_db.get(cache_key_script_dump) if scripts_dump: self.logger.debug('[SCRIPT CACHE] Modified, Use Redis cache') scripts_dump = six.ensure_str(scripts_dump) try: scripts = toolkit.json_loads(scripts_dump) except Exception as e: pass if not scripts_md5: # 不存在缓存,自行计算(极少情况) scripts_md5 = toolkit.get_md5(scripts_dump) # 记录缓存MD5 SCRIPTS_CACHE_MD5 = scripts_md5 # 3. 未能从Redis读取Dump,从数据库获取完整用户脚本 if not scripts or not scripts_dump: self.logger.warning('[SCRIPT CACHE] Cache failed! Use DB data') scripts = self.get_scripts() # 自行计算并记录缓存MD5 scripts_dump = toolkit.json_dumps(scripts, sort_keys=True) SCRIPTS_CACHE_MD5 = toolkit.get_md5(scripts_dump) # 记录到本地缓存 SCRIPT_DICT_CACHE = self.create_script_dict(scripts)
def dump_for_json(self, val): ''' Dump JSON to string ''' return toolkit.json_dumps(val)
def cache_task_info(self, origin, origin_id, exec_mode, status, trigger_time_ms, start_time_ms, root_task_id=None, func_id=None, log_messages=None, einfo_text=None, edump_text=None): if not self.is_support_task_info(origin, origin_id, exec_mode): return # 压缩日志/错误 data = { 'id': self.request.id, 'rootTaskId': root_task_id, 'funcId': func_id, 'execMode': exec_mode, 'status': status, 'triggerTimeMs': trigger_time_ms, 'startTimeMs': start_time_ms, 'endTimeMs': int(time.time() * 1000), } if log_messages: log_messages_reduced = [ toolkit.limit_text(l, CONFIG['_TASK_INFO_LOG_MESSAGE_LINE_LIMIT'], show_length=True, length_title='Line length') for l in log_messages ] data['logMessageTEXT'] = '\n'.join(log_messages_reduced).strip() log_message_text_len = len(data['logMessageTEXT']) if log_message_text_len > CONFIG[ '_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_HEAD'] + CONFIG[ '_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_TAIL']: data['logMessageTEXT'] = '\n'.join([ f"!!! Log content too long, only FIRST {CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_HEAD']} chars and LAST {CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_TAIL']} are saved !!!", '', data['logMessageTEXT'] [:CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_HEAD']] + '...', '', f"<skipped {log_message_text_len - CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_HEAD'] - CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_TAIL']} chars>", '', '...' + data['logMessageTEXT'] [-CONFIG['_TASK_INFO_LOG_MESSAGE_TOTAL_LIMIT_TAIL']:], ]) if einfo_text: data['einfoTEXT'] = einfo_text if edump_text: data['edumpTEXT'] = edump_text data = toolkit.json_dumps(data, indent=0) log_bin = io.BytesIO() log_zip = zipfile.ZipFile(log_bin, 'w', compression=zipfile.ZIP_DEFLATED, compresslevel=9) log_zip.writestr('task-info.log', data) log_zip.close() log_b64 = toolkit.get_base64(log_bin.getvalue()) cache_key = toolkit.get_cache_key('syncCache', 'taskInfo', tags=['originId', origin_id]) self.cache_db.run('lpush', cache_key, log_b64)