def main(argv): if len(argv) < 4: print("error: parameters missing") return cache = CacheProxy('plugin_file') last_config_version = cache.get('version') config_version = int(argv[2]) host_id = argv[3] if last_config_version is None or config_version != last_config_version: config_client = ConfigClient() current_version, file_list = config_client.get_user_config( 'plugin_file', host_id) if file_list is not None: cache.set('file_list', file_list) cache.set('version', current_version) else: file_list = cache.get('file_list') if file_list: ntp_checked, timestamp = time_util.get_ntp_time() out_list = [] for path in file_list: dimensions = {'path': path} metrics = {} if os.path.isfile(path): metrics['file_exist'] = 1 last_modify_time = long(os.stat(path).st_mtime * 1000) metrics['last_modify_time'] = last_modify_time last_modify_time_record = cache.get('lmt_' + path) if last_modify_time_record is None: metrics['file_modified'] = 0 cache.set('lmt_' + path, last_modify_time) elif last_modify_time != last_modify_time_record: metrics['file_modified'] = 1 cache.set('lmt_' + path, last_modify_time) else: metrics['file_modified'] = 0 metrics['file_size'] = os.path.getsize(path) else: metrics['file_exist'] = 0 # metrics['last_modify_time'] = 0 # metrics['size'] = 0 out = { 'dimensions': dimensions, 'metrics': metrics, 'timestamp': timestamp, 'ntp_checked': ntp_checked } out_list.append(out) print(json.dumps(out_list)) sys.stdout.flush() cache.close()
def main(argv): if len(argv) < 4: print("error: parameters missing") return cache = CacheProxy('plugin_process') last_config_version = cache.get('version') config_version = int(argv[2]) host_id = argv[3] if last_config_version is None or config_version != last_config_version: config_client = ConfigClient() current_version, config_process_list = config_client.get_user_config('plugin_process', host_id) if config_process_list is not None: cache.set('process_list', config_process_list) cache.set('version', current_version) cache.close()
class ErrorReport(object): def __init__(self): self.cache = CacheProxy('err_cache') def __del__(self): self.cache.close() def get_report_id(self): report_id = self.cache.get('report_id') if report_id is None: report_id = uuid.uuid1().__str__() self.cache.set('report_id', report_id) return report_id def record_err_info(self, err_name, err): err_info = {err_name: err, 'timestamp': int(time.time())} self.cache.set(err_name, json.dumps(err_info)) def pop_err_info(self, err_name): err_info_str = self.cache.get(err_name) self.cache.delete(err_name) if err_info_str is None: return None else: return json.loads(err_info_str) def set_report_enabled(self, enable): enabled = 0 if enable: enabled = 1 self.cache.set('report_enabled', enabled) def get_report_enabled(self): enabled = self.cache.get('report_enabled') if enabled is None: return False else: return bool(enabled) def record_reader_err(self, err): self.record_err_info(READER_ERR_KEY, err) def record_sender_err(self, err): self.record_err_info(SENDER_ERR_KEY, err) def record_other_err(self, err): self.record_err_info(OTHER_ERR_KEY, err) def pop_reader_err(self): return self.pop_err_info(READER_ERR_KEY) def pop_sender_err(self): return self.pop_err_info(SENDER_ERR_KEY) def pop_other_err(self): return self.pop_err_info(OTHER_ERR_KEY)
def main(argv): config = get_configs() section = 'custom_path' if not config.has_section(section): return cache = CacheProxy('custom') options = config.options(section) delay_limit = config.getint('custom_config', 'dely_limit') out_list = [] ntp_checked, timestamp = time_util.get_ntp_time() for key in options: dir_path = config.get(section, key) if check_valid(dir_path): key_out = {'data': [], 'source_key': key} log_list = get_log_list(dir_path) log_record = cache.get(key) for log in log_list: log_path = '%s/%s' % (dir_path, log) if log_record and log < log_record: os.remove(log_path) continue else: cache.set(key, log) if os.path.isfile(log_path) and os.access(log_path, os.R_OK): delete = False with open(log_path) as f: offset_key = '%s-%s' % (key, log) offset = cache.get(offset_key) if offset: f.seek(offset) else: offset = 0 while True: line = f.readline() if line: offset += len(line.decode('ascii')) cache.set(offset_key, offset) line_dict = parse_line(line) if line_dict: if ('timestamp' in line_dict ) and (line_dict['timestamp'] < long( time.time() * 1000) - delay_limit): pass else: data = { 'dimensions': {}, 'metrics': line_dict, 'timestamp': timestamp, 'ntp_checked': ntp_checked } key_out['data'].append(data) else: if log_path != get_latest_log(dir_path): cache.delete(offset_key) delete = True break if delete: os.remove(log_path) if key_out['data']: out_list.append(key_out) cache.close() if out_list: print(json.dumps(out_list)) sys.stdout.flush()
def main(argv): if len(argv) < 4: print("error: parameters missing") return cache = CacheProxy('plugin_process') last_config_version = cache.get('version') config_version = int(argv[2]) host_id = argv[3] if last_config_version is None or config_version != last_config_version: config_client = ConfigClient() current_version, config_process_list = config_client.get_user_config( 'plugin_process', host_id) if config_process_list is not None: cache.set('process_list', config_process_list) cache.set('version', current_version) else: config_process_list = cache.get('process_list') if not config_process_list: cache.close() return ntp_checked, timestamp = time_util.get_ntp_time() cpu_total_jiffies = cache.counter_to_gauge('cpu_total_jiffies', get_cpu_total_jiffies()) total_mem = get_total_mem() pids = get_pids() process_info_list = [] page_size = resource.getpagesize() for pid in pids: stat_path = '/proc/%d/stat' % pid if not os.path.isfile(stat_path): continue try: with open(stat_path, 'r') as f_stat: line = f_stat.readline() values = line.split(None) if len(values) < 24: continue name = values[1][1:len(values[1]) - 1] cmdline_path = '/proc/%d/cmdline' % pid if os.path.isfile(cmdline_path) and os.access( cmdline_path, os.R_OK): with open(cmdline_path, 'r') as f_cmd: cmdline = f_cmd.readline().replace('\0', ' ').strip() if cmdline: name = cmdline for p in config_process_list: if fnmatch(name, p): process_info = {'pid': pid, 'name': name, 'match': p} status = values[2] ppid = values[3] process_info['parent_pid'] = ppid process_info['proc_stat_cd'] = status used_cpu_jiff = cache.counter_to_gauge( 'used_cpu_jiff_%d' % pid, long(values[13]) + long(values[14])) if used_cpu_jiff is None or cpu_total_jiffies is None: cpu_usert = 0.0 else: cpu_usert = used_cpu_jiff * 100.0 / cpu_total_jiffies mem = long(values[23]) * page_size if total_mem is None: mem_usert = 0.0 else: mem_usert = mem * 100.0 / total_mem / 1024 vir_mem = float(values[22]) / 1024.0 thread_num = int(values[19]) process_info['cpu_usert'] = cpu_usert process_info['mem_usert'] = mem_usert process_info['mem_byt_cnt'] = vir_mem process_info['thd_cnt'] = thread_num process_info_list.append(process_info) except Exception: pass out_list = [] for p in config_process_list: pid_list = [] process_count = 0 tot_cpu_usert = 0.0 tot_mem_usert = 0.0 tot_mem_byt_cnt = 0.0 tot_thd_cnt = 0 for process_info in process_info_list: if process_info['match'] == p: process_count += 1 tot_cpu_usert += process_info['cpu_usert'] tot_mem_usert += process_info['mem_usert'] tot_mem_byt_cnt += process_info['mem_byt_cnt'] tot_thd_cnt += process_info['thd_cnt'] pid_list.append(process_info['pid']) dimensions = {'proc_name': p} pid_list_record = cache.get('pip_list_record_' + p) cache.set('pip_list_record_' + p, pid_list) if pid_list_record is None or len(pid_list_record) == 0: if len(pid_list) > 0: is_process_up = 1 else: is_process_up = 0 else: is_process_up = 1 for pid in pid_list_record: if pid not in pid_list: is_process_up = 0 break if process_count == 0: metrics = { 'is_process_up': is_process_up, 'process_count': process_count, 'avg_cpu_usert': 0.0, 'avg_mem_usert': 0.0, 'avg_mem_byt_cnt': 0.0, 'avg_thd_cnt': 0, 'tot_cpu_usert': tot_cpu_usert, 'tot_mem_usert': tot_mem_usert, 'tot_mem_byt_cnt': tot_mem_byt_cnt, 'tot_thd_cnt': tot_thd_cnt } else: metrics = { 'is_process_up': is_process_up, 'process_count': process_count, 'avg_cpu_usert': tot_cpu_usert / process_count, 'avg_mem_usert': tot_mem_usert / process_count, 'avg_mem_byt_cnt': tot_mem_byt_cnt / process_count, 'avg_thd_cnt': tot_thd_cnt / float(process_count), 'tot_cpu_usert': tot_cpu_usert, 'tot_mem_usert': tot_mem_usert, 'tot_mem_byt_cnt': tot_mem_byt_cnt, 'tot_thd_cnt': tot_thd_cnt } out = { 'dimensions': dimensions, 'metrics': metrics, 'timestamp': timestamp, 'ntp_checked': ntp_checked } out_list.append(out) print(json.dumps(out_list)) sys.stdout.flush() cache.close()
def query(): global hq global counter_dict win32pdh.CollectQueryData(hq) proc_cnt = 0 cpu_cnt = get_cpu_core_count() out_list = [] top10 = [] proc_mem_usert = 0.0 cache = CacheProxy('plugin_process') config_process_list = cache.get('process_list') process_info_list = [] ntp_checked, timestamp = get_ntp_time() for instance_with_id in counter_dict: instance = instance_with_id.split('#')[0] dimensions = {'proc_nm': instance} metrics = {} try: _, proc_id = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['proc_id'], win32pdh.PDH_FMT_LONG) metrics['proc_id'] = proc_id p = psutil.Process(pid=proc_id) dimensions['proc_nm'] = p.name() metrics['proc_stat_cd'] = p.status() except Exception: pass process_info = None if config_process_list: for p in config_process_list: if fnmatch(dimensions['proc_nm'], p): process_info = {'pid': metrics['proc_id'], 'name': dimensions['proc_nm'], 'match': p} try: _, cpu_usert = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['cpu_usert'], win32pdh.PDH_FMT_DOUBLE) if cpu_cnt is not None and cpu_cnt > 0: metrics['cpu_usert'] = cpu_usert / cpu_cnt else: metrics['cpu_usert'] = cpu_usert except Exception: pass try: _, cpu_tm_ss = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['cpu_tm_ss'], win32pdh.PDH_FMT_LONG) metrics['cpu_tm_ss'] = cpu_tm_ss except Exception: pass try: _, pf = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['pf'], win32pdh.PDH_FMT_DOUBLE) metrics['pf'] = pf except Exception: pass try: _, prit_rnk = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['prit_rnk'], win32pdh.PDH_FMT_LONG) metrics['prit_rnk'] = prit_rnk except Exception: pass try: _, thd_cnt = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['thd_cnt'], win32pdh.PDH_FMT_LONG) metrics['thd_cnt'] = thd_cnt except Exception: pass try: _, vir_mem_byt_cnt = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['vir_mem_byt_cnt'], win32pdh.PDH_FMT_LONG) metrics['vir_mem_byt_cnt'] = vir_mem_byt_cnt metrics['p_proc_mem_usert'] = vir_mem_byt_cnt * 100.0 / psutil.virtual_memory().total proc_mem_usert += metrics['p_proc_mem_usert'] except Exception: pass try: _, parent_pid = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['parent_pid'], win32pdh.PDH_FMT_LONG) metrics['parent_pid'] = parent_pid except Exception: pass try: _, svc_tm = win32pdh.GetFormattedCounterValue(counter_dict[instance_with_id]['svc_tm'], win32pdh.PDH_FMT_DOUBLE) metrics['svc_tm'] = svc_tm * 1000 except Exception: pass proc_cnt += 1 if metrics: metrics['proc_cpu_cnt'] = cpu_cnt out = {'dimensions': dimensions, 'metrics': metrics, 'timestamp': timestamp, 'ntp_checked': ntp_checked} top10_check_insert(top10, out) if process_info is not None: if 'cpu_usert' in metrics: process_info['cpu_usert'] = metrics['cpu_usert'] else: process_info['cpu_usert'] = 0.0 if 'p_proc_mem_usert' in metrics: process_info['mem_usert'] = metrics['p_proc_mem_usert'] else: process_info['mem_usert'] = 0.0 if 'vir_mem_byt_cnt' in metrics: process_info['mem_byt_cnt'] = metrics['vir_mem_byt_cnt'] else: process_info['mem_byt_cnt'] = 0.0 if 'thd_cnt' in metrics: process_info['thd_cnt'] = metrics['thd_cnt'] else: process_info['thd_cnt'] = 0 process_info_list.append(process_info) for item in top10: out_list.append(item) out = {'dimensions': {'schema_type': 'svr'}, 'metrics': {'proc_cnt': proc_cnt, 'proc_mem_usert': proc_mem_usert}, 'timestamp': timestamp, 'ntp_checked': ntp_checked } out_list.append(out) if config_process_list: for p in config_process_list: pid_list = [] process_count = 0 tot_cpu_usert = 0.0 tot_mem_usert = 0.0 tot_mem_byt_cnt = 0.0 tot_thd_cnt = 0 for process_info in process_info_list: if process_info['match'] == p: process_count += 1 tot_cpu_usert += process_info['cpu_usert'] tot_mem_usert += process_info['mem_usert'] tot_mem_byt_cnt += process_info['mem_byt_cnt'] tot_thd_cnt += process_info['thd_cnt'] pid_list.append(process_info['pid']) dimensions = { 'proc_name': p, 'schema_type': 'plugin_process' } pid_list_record = cache.get('pip_list_record_' + p) cache.set('pip_list_record_' + p, pid_list) if pid_list_record is None or len(pid_list_record) == 0: if len(pid_list) > 0: is_process_up = 1 else: is_process_up = 0 else: is_process_up = 1 for pid in pid_list_record: if pid not in pid_list: is_process_up = 0 break if process_count == 0: metrics = { 'is_process_up': is_process_up, 'process_count': process_count, 'avg_cpu_usert': 0.0, 'avg_mem_usert': 0.0, 'avg_mem_byt_cnt': 0.0, 'avg_thd_cnt': 0, 'tot_cpu_usert': tot_cpu_usert, 'tot_mem_usert': tot_mem_usert, 'tot_mem_byt_cnt': tot_mem_byt_cnt, 'tot_thd_cnt': tot_thd_cnt } else: metrics = { 'is_process_up': is_process_up, 'process_count': process_count, 'avg_cpu_usert': tot_cpu_usert / process_count, 'avg_mem_usert': tot_mem_usert / process_count, 'avg_mem_byt_cnt': tot_mem_byt_cnt / process_count, 'avg_thd_cnt': tot_thd_cnt / float(process_count), 'tot_cpu_usert': tot_cpu_usert, 'tot_mem_usert': tot_mem_usert, 'tot_mem_byt_cnt': tot_mem_byt_cnt, 'tot_thd_cnt': tot_thd_cnt } plugin_process_out = { 'dimensions': dimensions, 'metrics': metrics, 'timestamp': timestamp, 'ntp_checked': ntp_checked } out_list.append(plugin_process_out) print(json.dumps(out_list)) sys.stdout.flush() cache.close()