def get_policies(sql_filter, sql_order, sql_limit): dbtp = get_db().table_prefix s = SQL(get_db()) s.select_from('audit_policy', ['id', 'rank', 'name', 'desc', 'state'], alt_name='p') str_where = '' _where = list() if len(sql_filter) > 0: for k in sql_filter: if k == 'search': _where.append('(p.name LIKE "%{filter}%" OR p.desc LIKE "%{filter}%")'.format(filter=sql_filter[k])) if k == 'state': _where.append('p.state={}'.format(sql_filter[k])) else: log.e('unknown filter field: {}\n'.format(k)) return TPE_PARAM, s.total_count, 0, s.recorder if len(_where) > 0: str_where = '( {} )'.format(' AND '.join(_where)) s.where(str_where) s.order_by('p.rank', True) if len(sql_limit) > 0: s.limit(sql_limit['page_index'], sql_limit['per_page']) err = s.query() return err, s.total_count, s.page_index, s.recorder
def init(self): if not tp_cfg().common.check_host_alive: return True icmp_protocol = socket.getprotobyname('icmp') try: self._socket_ping = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp_protocol) except PermissionError: print('To use PING to check host state, must run as root.') log.e('To use PING to check host state, must run as root.\n') return False # 加载所有主机IP hosts = host.get_all_hosts_for_check_state() for h in hosts: if h['router_ip'] != '': self.add_host(h['router_ip'], HostAlive.METHOD_PING) else: self.add_host(h['ip'], HostAlive.METHOD_PING) self._thread_recv_ping_result = threading.Thread( target=self._thread_func_recv_ping_result) self._thread_recv_ping_result.start() tp_cron().add_job('host_check_alive', self._check_alive, first_interval_seconds=10, interval_seconds=HostAlive.PING_INTERVAL) # for test: # tp_cron().add_job('host_show_alive', self._show_alive, first_interval_seconds=20, interval_seconds=HostAlive.PING_INTERVAL) return True
def do_create_and_init(self, sysadmin, email, password): try: self._create_config() self._create_core_server() self._create_role() self._create_user() self._create_user_rpt() self._create_host() self._create_acc() self._create_acc_auth() self._create_group() self._create_group_map() self._create_ops_policy() self._create_ops_auz() self._create_ops_map() self._create_audit_policy() self._create_audit_auz() self._create_audit_map() self._create_syslog() self._create_record() self._create_record_audit() self._make_builtin_data(sysadmin, email, password) except: log.e('[db] can not create and initialize database.\n') return False return True
def post(self): ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ) if ret != TPE_OK: return args = self.get_argument('args', None) if args is None: return self.write_json(TPE_PARAM) try: args = json.loads(args) except: return self.write_json(TPE_JSON_FORMAT) try: action = args['action'] p_ids = args['policy_ids'] except: log.e('\n') return self.write_json(TPE_PARAM) if action == 'lock': err = ops.update_policies_state(self, p_ids, TP_STATE_DISABLED) return self.write_json(err) elif action == 'unlock': err = ops.update_policies_state(self, p_ids, TP_STATE_NORMAL) return self.write_json(err) elif action == 'remove': err = ops.remove_policies(self, p_ids) return self.write_json(err) else: return self.write_json(TPE_PARAM)
def post(self): ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ) if ret != TPE_OK: return args = self.get_argument('args', None) if args is None: return self.write_json(TPE_PARAM) try: args = json.loads(args) except: return self.write_json(TPE_JSON_FORMAT) try: pid = int(args['pid']) new_rank = int(args['new_rank']) start_rank = int(args['start_rank']) end_rank = int(args['end_rank']) direct = int(args['direct']) except: log.e('\n') return self.write_json(TPE_PARAM) if direct == -1: direct = '-1' elif direct == 1: direct = '+1' else: return self.write_json(TPE_PARAM) err = ops.rank_reorder(self, pid, new_rank, start_rank, end_rank, direct) self.write_json(err)
def is_table_exists(self, table_name): """ 判断指定的表是否存在 @param table_name: string @return: None or Boolean """ if self.db_type == self.DB_TYPE_SQLITE: ret = self.query('SELECT COUNT(*) FROM `sqlite_master` WHERE `type`="table" AND `name`="{}";'.format(table_name)) if ret is None: return None if len(ret) == 0: return False if ret[0][0] == 0: return False return True elif self.db_type == self.DB_TYPE_MYSQL: ret = self.query('SELECT TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA="{}" and TABLE_NAME="{}";'.format(self.mysql_db, table_name)) if ret is None: return None if len(ret) == 0: return False else: return True else: log.e('Unknown database type.\n') return None
def add_host(self, host_ip, method=0, param=None, check_now=False): if not tp_cfg().common.check_host_alive: return True if param is None: param = {} # now we support PING only if method != HostAlive.METHOD_PING: log.e('Unknown method for check host state: {}\n'.format(method)) return False with self._lock: if host_ip not in self._states: self._states[host_ip] = { 'last_online': 0, 'last_check': 0, 'method': method, 'param': param } else: self._states[host_ip]['method'] = method self._states[host_ip]['param'] = param if check_now: if method == HostAlive.METHOD_PING: self._ping(host_ip) else: log.w('Warning: check alive method not implement.\n')
def get_policies(sql_filter, sql_order, sql_limit): dbtp = get_db().table_prefix s = SQL(get_db()) s.select_from('audit_policy', ['id', 'rank', 'name', 'desc', 'state'], alt_name='p') str_where = '' _where = list() if len(sql_filter) > 0: for k in sql_filter: if k == 'search': _where.append( '(p.name LIKE "%{filter}%" OR p.desc LIKE "%{filter}%")'. format(filter=sql_filter[k])) if k == 'state': _where.append('p.state={}'.format(sql_filter[k])) else: log.e('unknown filter field: {}\n'.format(k)) return TPE_PARAM, s.total_count, 0, s.recorder if len(_where) > 0: str_where = '( {} )'.format(' AND '.join(_where)) s.where(str_where) s.order_by('p.rank', True) if len(sql_limit) > 0: s.limit(sql_limit['page_index'], sql_limit['per_page']) err = s.query() return err, s.total_count, s.page_index, s.recorder
def post(self): ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ) if ret != TPE_OK: return args = self.get_argument('args', None) if args is None: return self.write_json(TPE_PARAM) try: args = json.loads(args) except: return self.write_json(TPE_JSON_FORMAT) try: args['id'] = int(args['id']) args['name'] = args['name'].strip() args['desc'] = args['desc'].strip() except: log.e('\n') return self.write_json(TPE_PARAM) if len(args['name']) == 0: return self.write_json(TPE_PARAM) if args['id'] == -1: err, info = ops.create_policy(self, args) else: err = ops.update_policy(self, args) info = {} self.write_json(err, data=info)
def is_table_exists(self, table_name): """ 判断指定的表是否存在 @param table_name: string @return: None or Boolean """ if self.db_type == self.DB_TYPE_SQLITE: ret = self.query( 'SELECT COUNT(*) FROM `sqlite_master` WHERE `type`="table" AND `name`="{}";' .format(table_name)) if ret is None: return None if len(ret) == 0: return False if ret[0][0] == 0: return False return True elif self.db_type == self.DB_TYPE_MYSQL: ret = self.query( 'SELECT TABLE_NAME from INFORMATION_SCHEMA.TABLES where TABLE_SCHEMA="{}" and TABLE_NAME="{}";' .format(self.mysql_db, table_name)) if ret is None: return None if len(ret) == 0: return False else: return True else: log.e('Unknown database type.\n') return None
def alter_table(self, table_names, field_names=None): """ 修改表名称及字段名称 table_name: 如果是string,则指定要操作的表,如果是list,则第一个元素是要操作的表,第二个元素是此表改名的目标名称 fields_names: 如果为None,则不修改字段名,否则应该是一个list,其中每个元素是包含两个str的list,表示将此list第一个指定的字段改名为第二个指定的名称 @return: None or Boolean """ # TODO: 此函数尚未完成 if self.db_type == self.DB_TYPE_SQLITE: if not isinstance(table_names, list) and field_names is None: log.w('nothing to do.\n') return False if isinstance(table_names, str): old_table_name = table_names new_table_name = table_names elif isinstance(table_names, list) and len(table_names) == 2: old_table_name = table_names[0] new_table_name = table_names[1] else: log.w('invalid param.\n') return False if isinstance(field_names, list): for i in field_names: if not isinstance(i, list) or 2 != len(i): log.w('invalid param.\n') return False if field_names is None: # 仅数据表改名 return self.exec('ALTER TABLE `{}` RENAME TO `{}`;'.format( old_table_name, new_table_name)) else: # sqlite不支持字段改名,所以需要通过临时表中转一下 # 先获取数据表的字段名列表 ret = self.query( 'SELECT * FROM `sqlite_master` WHERE `type`="table" AND `name`="{}";' .format(old_table_name)) log.w('-----\n') log.w(ret[0][4]) log.w('\n') # 先将数据表改名,成为一个临时表 # tmp_table_name = '{}_sqlite_tmp'.format(old_table_name) # ret = self.exec('ALTER TABLE `{}` RENAME TO `{}`;'.format(old_table_name, tmp_table_name)) # if ret is None or not ret: # return ret pass elif self.db_type == self.DB_TYPE_MYSQL: log.e('mysql not supported yet.\n') return False else: log.e('Unknown database type.\n') return False
def _do_exec(self, conn, sql, args): try: with conn: conn.execute(sql, args) return True except Exception as e: log.e('[sqlite] _do_exec() failed: {}\n'.format(e.__str__())) log.e('[sqlite] SQL={}'.format(sql)) return False
def upgrade_database(self, step_begin, step_end): log.v('start database upgrade process.\n') if DatabaseUpgrade(self, step_begin, step_end).do_upgrade(): log.v('database upgraded.\n') self.need_upgrade = False return True else: log.e('database upgrade failed.\n') return False
def _do_transaction(self, conn, sql_list): try: # 使用context manager,发生异常时会自动rollback,正常执行完毕后会自动commit with conn: for sql in sql_list: conn.execute(sql) return True except Exception as e: log.e('[sqlite] _do_transaction() failed: {}\n'.format(e.__str__())) return False
def _do_connect(self): # if not os.path.exists(self._db_file): # log.e('[sqlite] can not connect, database file not exists.\n') # return None try: return sqlite3.connect(self._db_file) except: log.e('[sqlite] can not connect, does the database file correct?\n') return None
def stop(self): if self._need_stop: return self._need_stop = True cfg = tp_cfg() try: c = urllib.request.urlopen('http://127.0.0.1:{}{}'.format(cfg.common.port, cfg.random_exit_uri)) c.read() except: log.e('\n')
def alter_table(self, table_names, field_names=None): """ 修改表名称及字段名称 table_name: 如果是string,则指定要操作的表,如果是list,则第一个元素是要操作的表,第二个元素是此表改名的目标名称 fields_names: 如果为None,则不修改字段名,否则应该是一个list,其中每个元素是包含两个str的list,表示将此list第一个指定的字段改名为第二个指定的名称 @return: None or Boolean """ # TODO: 此函数尚未完成 if self.db_type == self.DB_TYPE_SQLITE: if not isinstance(table_names, list) and field_names is None: log.w('nothing to do.\n') return False if isinstance(table_names, str): old_table_name = table_names new_table_name = table_names elif isinstance(table_names, list) and len(table_names) == 2: old_table_name = table_names[0] new_table_name = table_names[1] else: log.w('invalid param.\n') return False if isinstance(field_names, list): for i in field_names: if not isinstance(i, list) or 2 != len(i): log.w('invalid param.\n') return False if field_names is None: # 仅数据表改名 return self.exec('ALTER TABLE `{}` RENAME TO `{}`;'.format(old_table_name, new_table_name)) else: # sqlite不支持字段改名,所以需要通过临时表中转一下 # 先获取数据表的字段名列表 ret = self.query('SELECT * FROM `sqlite_master` WHERE `type`="table" AND `name`="{}";'.format(old_table_name)) log.w('-----\n') log.w(ret[0][4]) log.w('\n') # 先将数据表改名,成为一个临时表 # tmp_table_name = '{}_sqlite_tmp'.format(old_table_name) # ret = self.exec('ALTER TABLE `{}` RENAME TO `{}`;'.format(old_table_name, tmp_table_name)) # if ret is None or not ret: # return ret pass elif self.db_type == self.DB_TYPE_MYSQL: log.e('mysql not supported yet.\n') return False else: log.e('Unknown database type.\n') return False
def _last_insert_id(self, conn): cursor = conn.cursor() try: cursor.execute('SELECT last_insert_rowid();') db_ret = cursor.fetchall() return db_ret[0][0] except Exception as e: log.e('[sqlite] _last_insert_id() failed: {}\n'.format(e.__str__())) return -1 finally: cursor.close()
def _do_query(self, conn, sql, args): cursor = conn.cursor() try: cursor.execute(sql, args) db_ret = cursor.fetchall() return db_ret except Exception as e: log.e('[sqlite] _do_query() failed: {}\n'.format(e.__str__())) log.e('[sqlite] SQL={}'.format(sql)) finally: cursor.close()
def _last_insert_id(self, conn): cursor = conn.cursor() try: cursor.execute('SELECT LAST_INSERT_ID();') db_ret = cursor.fetchall() conn.commit() return db_ret[0][0] except Exception as e: log.e('[mysql] _last_insert_id() failed: {}\n'.format(e.__str__())) return -1 finally: cursor.close()
def run(self): while not self._stop_flag: time.sleep(1) with self._lock: _now = int(datetime.datetime.now().timestamp()) for j in self._jobs: # log.v('--now: {}, job-name: {}, job-t: {}, job-i: {}\n'.format(_now, j, self._jobs[j]['t'], self._jobs[j]['i'])) if _now - self._jobs[j]['t'] >= self._jobs[j]['i']: self._jobs[j]['t'] = _now try: self._jobs[j]['f']() except: log.e('got exception when exec job: {}\n'.format(j))
def _do_exec(self, conn, sql, args): for retry in range(2): cursor = conn.cursor() try: cursor.execute(sql, args) conn.commit() return True except pymysql.err.OperationalError as e: errno, _ = e.args if retry == 1 or errno not in [2006, 2013]: log.v('[mysql] SQL={}\n'.format(sql)) log.e('[mysql] _do_exec() failed: {}\n'.format( e.__str__())) return None conn = self._reconnect() if conn is None: return None except pymysql.err.InterfaceError as e: if retry == 1: log.v('[mysql] SQL={}\n'.format(sql)) log.e('[mysql] _do_exec() failed: {}\n'.format( e.__str__())) return None conn = self._reconnect() if conn is None: return None except Exception as e: log.e('[mysql] _do_exec() failed: {}\n'.format(e.__str__())) log.e('[mysql] SQL={}'.format(sql)) return None finally: cursor.close()
def _reconnect(self): log.w('[mysql] lost connection, reconnect.\n') with self._locker: thread_id = threading.get_ident() if thread_id not in self._connections: log.e('[mysql] database pool internal error.\n') return None _conn = self._do_connect() if _conn is not None: self._connections[thread_id] = _conn return _conn else: del self._connections[thread_id] return None
def _do_exec(self, conn, sql, args): for retry in range(2): cursor = conn.cursor() try: cursor.execute(sql, args) conn.commit() return True except pymysql.err.OperationalError as e: errno, _ = e.args if retry == 1 or errno not in [2006, 2013]: log.v('[mysql] SQL={}\n'.format(sql)) log.e('[mysql] _do_exec() failed: {}\n'.format(e.__str__())) return None conn = self._reconnect() if conn is None: return None except pymysql.err.InterfaceError as e: if retry == 1: log.v('[mysql] SQL={}\n'.format(sql)) log.e('[mysql] _do_exec() failed: {}\n'.format(e.__str__())) return None conn = self._reconnect() if conn is None: return None except Exception as e: log.e('[mysql] _do_exec() failed: {}\n'.format(e.__str__())) log.e('[mysql] SQL={}'.format(sql)) return None finally: cursor.close()
def init(self): cfg = tp_cfg() cfg_file = os.path.join(cfg.cfg_path, 'extsrv.json') # 如果配置文件不存在,则不支持第三方服务调用TP-API if not os.path.exists(cfg_file): return True log.i('Loading external server configuration...\n') with open(cfg_file, encoding='utf_8') as f: c = f.read() try: sc = json.loads(c) except: return False if 'version' not in sc: return False if 'ext_srv' not in sc: return False srv = sc['ext_srv'] try: for i in range(len(srv)): srv_name = srv[i]['name'] srv_desc = srv[i]['desc'] for j in range(len(srv[i]['access'])): key = srv[i]['access'][j]['key'] secret = srv[i]['access'][j]['secret'] privilege = int(srv[i]['access'][j]['privilege']) if key in self._cfg: log.e( 'Invalid extsrv.json, duplicated key: {}\n'.format( key)) return False self._cfg[key] = { 'name': srv_name, 'desc': srv_desc, 'secret': secret, 'privilege': privilege } except: log.e('Invalid extsrv.json\n') return False return True
def _get_core_server_config(self): cfg = tp_cfg() try: req = {'method': 'get_config', 'param': []} req_data = json.dumps(req) data = urllib.parse.quote(req_data).encode('utf-8') req = urllib.request.Request(url=cfg.common.core_server_rpc, data=data) rep = urllib.request.urlopen(req, timeout=3) body = rep.read().decode() x = json.loads(body) if 'code' not in x or x['code'] != 0: log.e('connect core-server for get config info failed.\n') else: cfg.update_core(x['data']) log.d('get config info of core-server succeeded.\n') except: log.w('can not connect to core-server to get config, maybe it not start yet, ignore.\n')
def sys_log(operator, client_ip, code, message=""): try: db = get_db() sql_s = 'INSERT INTO `{tp}syslog` (`user_name`,`user_surname`,`client_ip`,`code`,`log_time`,`message`) ' \ 'VALUES ({ph},{ph},{ph},{ph},{ph},{ph})' \ ';'.format(tp=db.table_prefix, ph=db.place_holder) sql_v = (operator['username'], operator['surname'], client_ip, code, tp_timestamp_sec(), message) ret = db.exec(sql_s, sql_v) if not ret: return TPE_DATABASE return TPE_OK except: log.e('\n') return TPE_DATABASE
def get_logs(sql_filter, sql_order, sql_limit): s = SQL(get_db()) s.select_from('syslog', [ 'id', 'user_name', 'user_surname', 'client_ip', 'code', 'log_time', 'message' ], alt_name='l') str_where = '' _where = list() if len(sql_filter) > 0: for k in sql_filter: if k == 'log_user_name': _where.append('l.user_name="{}"'.format(sql_filter[k])) # elif k == 'search_record': # _where.append('(h.name LIKE "%{}%" OR h.ip LIKE "%{}%" OR h.router_addr LIKE "%{}%" OR h.desc LIKE "%{}%" OR h.cid LIKE "%{}%")'.format(sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k])) if len(_where) > 0: str_where = '( {} )'.format(' AND '.join(_where)) s.where(str_where) if sql_order is not None: _sort = False if not sql_order['asc'] else True if 'log_time' == sql_order['name']: s.order_by('l.log_time', _sort) # elif 'name' == sql_order['name']: # s.order_by('h.name', _sort) # elif 'os_type' == sql_order['name']: # s.order_by('h.os_type', _sort) # elif 'cid' == sql_order['name']: # s.order_by('h.cid', _sort) # elif 'state' == sql_order['name']: # s.order_by('h.state', _sort) else: log.e('unknown order field: {}\n'.format(sql_order['name'])) return TPE_PARAM, s.total_count, s.recorder if len(sql_limit) > 0: s.limit(sql_limit['page_index'], sql_limit['per_page']) err = s.query() return err, s.total_count, s.recorder
def sys_log(operator, client_ip, code, message=""): try: db = get_db() sql = 'INSERT INTO `{}syslog` (user_name,user_surname,client_ip,code,log_time,message) ' \ 'VALUES ("{user_name}","{user_surname}","{client_ip}",{code},{log_time},"{message}")' \ ';'.format(db.table_prefix, user_name=operator['username'], user_surname=operator['surname'], client_ip=client_ip, code=code, log_time=tp_timestamp_utc_now(), message=message ) ret = db.exec(sql) if not ret: return TPE_DATABASE return TPE_OK except: log.e('\n') return TPE_DATABASE
def init(self): cfg = tp_cfg() if 'sqlite' == cfg.database.type: if cfg.database.sqlite_file is None: cfg.set_default('database::sqlite-file', os.path.join(cfg.data_path, 'db', 'teleport.db')) if not self._init_sqlite(cfg.database.sqlite_file): return False if self.need_create: return True elif 'mysql' == cfg.database.type: if not self._init_mysql(cfg.database.mysql_host, cfg.database.mysql_port, cfg.database.mysql_db, cfg.database.mysql_prefix, cfg.database.mysql_user, cfg.database.mysql_password): return False else: log.e('unknown database type `{}`, support sqlite/mysql now.\n'.format(cfg.database.type)) return False return True
def _do_transaction(self, conn, sql_list): # s = '' # v = None try: # 使用context manager,发生异常时会自动rollback,正常执行完毕后会自动commit with conn: for item in sql_list: # s = item['s'] # v = item['v'] if item['v'] is None: conn.execute(item['s']) else: conn.execute(item['s'], item['v']) return True except Exception as e: # log.d('|||', s, '|||', v, '|||', '\n') # log.d('///', sql_list, '///', '\n') log.e('[sqlite] _do_transaction() failed: {}\n'.format(e.__str__())) return False
def core_service_async_post_http(post_data): try: v = json.dumps(post_data) data = urllib.parse.quote(v).encode('utf-8') c = tornado.httpclient.AsyncHTTPClient() r = yield c.fetch(tp_cfg().common.core_server_rpc, body=data, method='POST') # print('async_post_http return:', r.body.decode()) # return TPE_OK, json.loads(r.body.decode()) ret = json.loads(r.body.decode()) # print('core_service_async_post_http::', ret) if 'code' not in ret: return TPE_FAILED, None if 'data' not in ret: return ret['code'], None return ret['code'], ret['data'] except: log.e('core_service_async_post_http() failed.\n') return TPE_NO_CORE_SERVER, None
def create_and_init(self, step_begin, step_end, sysadmin, email, password): log.v('start database create and initialization process.\n') if self.db_type == self.DB_TYPE_SQLITE: db_path = os.path.dirname(self.sqlite_file) if not os.path.exists(db_path): tp_make_dir(db_path) if not os.path.exists(db_path): log.e('can not create folder `{}` to store database file.\n'.format(db_path)) return False # 创建一个空数据文件,这样才能进行connect。 if not os.path.exists(self.sqlite_file): try: with open(self.sqlite_file, 'w') as f: pass except: log.e('can not create db file `{}`.\n'.format(self.sqlite_file)) return False if DatabaseInit(self, step_begin, step_end).do_create_and_init(sysadmin, email, password): log.v('database created.\n') self.need_create = False self.load_system_config() return True else: log.e('database create and initialize failed.\n') return False
def create_and_init(self, step_begin, step_end, sysadmin, email, password): log.v('start database create and initialization process.\n') if self.db_type == self.DB_TYPE_SQLITE: db_path = os.path.dirname(self.sqlite_file) if not os.path.exists(db_path): tp_make_dir(db_path) if not os.path.exists(db_path): log.e( 'can not create folder `{}` to store database file.\n'. format(db_path)) return False # 创建一个空数据文件,这样才能进行connect。 if not os.path.exists(self.sqlite_file): try: with open(self.sqlite_file, 'w') as f: pass except: log.e('can not create db file `{}`.\n'.format( self.sqlite_file)) return False if DatabaseInit(self, step_begin, step_end).do_create_and_init(sysadmin, email, password): log.v('database created.\n') self.need_create = False self.load_system_config() return True else: log.e('database create and initialize failed.\n') return False
def get_logs(sql_filter, sql_order, sql_limit): s = SQL(get_db()) s.select_from('syslog', ['id', 'user_name', 'user_surname', 'client_ip', 'code', 'log_time', 'message'], alt_name='l') str_where = '' _where = list() if len(sql_filter) > 0: for k in sql_filter: if k == 'log_user_name': _where.append('l.user_name="{}"'.format(sql_filter[k])) # elif k == 'search_record': # _where.append('(h.name LIKE "%{}%" OR h.ip LIKE "%{}%" OR h.router_addr LIKE "%{}%" OR h.desc LIKE "%{}%" OR h.cid LIKE "%{}%")'.format(sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k])) if len(_where) > 0: str_where = '( {} )'.format(' AND '.join(_where)) s.where(str_where) if sql_order is not None: _sort = False if not sql_order['asc'] else True if 'log_time' == sql_order['name']: s.order_by('l.log_time', _sort) # elif 'name' == sql_order['name']: # s.order_by('h.name', _sort) # elif 'os_type' == sql_order['name']: # s.order_by('h.os_type', _sort) # elif 'cid' == sql_order['name']: # s.order_by('h.cid', _sort) # elif 'state' == sql_order['name']: # s.order_by('h.state', _sort) else: log.e('unknown order field: {}\n'.format(sql_order['name'])) return TPE_PARAM, s.total_count, s.recorder if len(sql_limit) > 0: s.limit(sql_limit['page_index'], sql_limit['per_page']) err = s.query() return err, s.total_count, s.recorder
def _do_transaction(self, conn, sql_list): for retry in range(2): cursor = conn.cursor() try: conn.begin() for sql in sql_list: cursor.execute(sql) conn.commit() return True except pymysql.err.OperationalError as e: errno, _ = e.args if retry == 1 or errno not in [2006, 2013]: log.e('[mysql] _do_transaction() failed: {}\n'.format( e.__str__())) return False conn = self._reconnect() if conn is None: return None except pymysql.err.InterfaceError as e: if retry == 1: log.e('[mysql] _do_transaction() failed: {}\n'.format( e.__str__())) return None conn = self._reconnect() if conn is None: return None except Exception as e: conn.rollback() log.e('[mysql] _do_transaction() failed: {}\n'.format( e.__str__())) return False finally: cursor.close()
def _do_transaction(self, conn, sql_list): for retry in range(2): cursor = conn.cursor() try: conn.begin() for sql in sql_list: cursor.execute(sql) conn.commit() return True except pymysql.err.OperationalError as e: errno, _ = e.args if retry == 1 or errno not in [2006, 2013]: log.e('[mysql] _do_transaction() failed: {}\n'.format(e.__str__())) return False conn = self._reconnect() if conn is None: return None except pymysql.err.InterfaceError as e: if retry == 1: log.e('[mysql] _do_transaction() failed: {}\n'.format(e.__str__())) return None conn = self._reconnect() if conn is None: return None except Exception as e: conn.rollback() log.e('[mysql] _do_transaction() failed: {}\n'.format(e.__str__())) return False finally: cursor.close()
def _do_connect(self): try: conn = pymysql.connect(host=self._host, user=self._user, passwd=self._password, db=self._db_name, port=self._port, autocommit=False, connect_timeout=3.0, charset='utf8') self._do_exec(conn, 'SET SESSION sql_mode=(SELECT CONCAT(@@sql_mode,",NO_ZERO_DATE,NO_ZERO_IN_DATE,ERROR_FOR_DIVISION_BY_ZERO"));', args=()) # x = self._do_query(conn, 'SELECT @@sql_mode;', args=()) # print(x) # err = self._do_exec(conn, 'SET SESSION sql_mode=(SELECT REPLACE(@@sql_mode,"ONLY_FULL_GROUP_BY",""));', args=()) if err is None: log.e('[mysql] can not disable ONLY_FULL_GROUP_BY flag.\n') return conn except pymysql.err.OperationalError as e: errno, _ = e.args if 2003 == errno: log.e('[mysql] connect [{}:{}] failed: {}\n'.format(self._host, self._port, e.__str__())) return None except Exception as e: log.e('[mysql] connect [{}:{}] failed: {}\n'.format(self._host, self._port, e.__str__())) return None
def get_asset(sql_filter, sql_order, sql_limit): ss = SQL(get_db()) ss.select_from('ops_auz', ['id', 'policy_id', 'rtype', 'rid', 'name'], alt_name='p') _where = list() _where.append('p.type=1') if len(sql_filter) > 0: for k in sql_filter: if k == 'policy_id': # _where.append('(p.name LIKE "%{filter}%" OR p.desc LIKE "%{filter}%")'.format(filter=sql_filter[k])) _where.append('p.policy_id={}'.format(sql_filter[k])) elif k == 'search': _where.append('(p.name LIKE "%{filter}%")'.format(filter=sql_filter[k])) else: log.e('unknown filter field: {}\n'.format(k)) return TPE_PARAM, 0, 0, {} if len(_where) > 0: ss.where('( {} )'.format(' AND '.join(_where))) if sql_order is not None: _sort = False if not sql_order['asc'] else True if 'name' == sql_order['name']: ss.order_by('p.name', _sort) elif 'rtype' == sql_order['name']: ss.order_by('p.rtype', _sort) else: log.e('unknown order field: {}\n'.format(sql_order['name'])) return TPE_PARAM, ss.total_count, 0, ss.recorder if len(sql_limit) > 0: ss.limit(sql_limit['page_index'], sql_limit['per_page']) err = ss.query() if err != TPE_OK: return err, 0, 0, {} # print(ss.recorder) return TPE_OK, ss.total_count, ss.page_index, ss.recorder
def post(self): ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ) if ret != TPE_OK: return args = self.get_argument('args', None) if args is None: return self.write_json(TPE_PARAM) try: args = json.loads(args) except: return self.write_json(TPE_JSON_FORMAT) try: policy_id = int(args['policy_id']) policy_type = int(args['policy_type']) ids = args['ids'] except: log.e('\n') return self.write_json(TPE_PARAM) err = ops.remove_members(self, policy_id, policy_type, ids) self.write_json(err)
def is_field_exists(self, table_name, field_name): if self.db_type == self.DB_TYPE_SQLITE: ret = self.query('PRAGMA table_info(`{}`);'.format(table_name)) if ret is None: return None if len(ret) == 0: return False else: for f in ret: if f[1] == field_name: return True return False elif self.db_type == self.DB_TYPE_MYSQL: ret = self.query('DESC `{}` `{}`;'.format(table_name, field_name)) if ret is None: return None if len(ret) == 0: return False else: return True else: log.e('Unknown database type.\n') return None
def post(self): ret = self.check_privilege(TP_PRIVILEGE_OPS_AUZ) if ret != TPE_OK: return args = self.get_argument('args', None) if args is None: return self.write_json(TPE_PARAM) try: args = json.loads(args) except: return self.write_json(TPE_JSON_FORMAT) try: policy_id = int(args['policy_id']) flag_record = int(args['flag_record']) flag_rdp = int(args['flag_rdp']) flag_ssh = int(args['flag_ssh']) except: log.e('\n') return self.write_json(TPE_PARAM) err = ops.set_flags(self, policy_id, flag_record, flag_rdp, flag_ssh) self.write_json(err)
def read_telnet_record_data(record_id, offset): if not tp_cfg().core.detected: return None, TPE_NO_CORE_SERVER record_path = os.path.join(tp_cfg().core.replay_path, 'telnet', '{:09d}'.format(int(record_id))) file_data = os.path.join(record_path, 'tp-telnet.dat') if not os.path.exists(file_data): return None, 0, TPE_NOT_EXISTS data_list = list() data_size = 0 file = None try: file_size = os.path.getsize(file_data) if offset >= file_size: return None, 0, TPE_FAILED file = open(file_data, 'rb') if offset > 0: file.seek(offset, io.SEEK_SET) # read 1000 packages one time from offset. for i in range(1000): """ // 一个数据包的头 typedef struct TS_RECORD_PKG { ex_u8 type; // 包的数据类型 ex_u32 size; // 这个包的总大小(不含包头) ex_u32 time_ms; // 这个包距起始时间的时间差(毫秒,意味着一个连接不能持续超过49天) ex_u8 _reserve[3]; // 保留 }TS_RECORD_PKG; """ _data = file.read(12) data_size += 12 _action, _size, _time, = struct.unpack_from('=BII', _data) if offset + data_size + _size > file_size: return None, 0, TPE_FAILED _data = file.read(_size) data_size += _size temp = dict() temp['a'] = _action temp['t'] = _time if _action == 1: # this is window size changed. w, h = struct.unpack_from('HH', _data) temp['w'] = w temp['h'] = h elif _action == 2: try: _d = _data.decode() temp['d'] = _d except: _data = base64.b64encode(_data) temp['a'] = 3 temp['d'] = _data.decode() else: return None, 0, TPE_FAILED data_list.append(temp) if offset + data_size == file_size: break except Exception: log.e('failed to read record file: {}\n'.format(file_data)) return None, 0, TPE_FAILED finally: if file is not None: file.close() return data_list, data_size, TPE_OK
def read_record_head(protocol_type, record_id): if not tp_cfg().core.detected: return None, TPE_NO_CORE_SERVER if protocol_type == TP_PROTOCOL_TYPE_RDP: path_name = 'rdp' elif protocol_type == TP_PROTOCOL_TYPE_SSH: path_name = 'ssh' elif protocol_type == TP_PROTOCOL_TYPE_TELNET: path_name = 'telnet' record_path = os.path.join(tp_cfg().core.replay_path, path_name, '{:09d}'.format(int(record_id))) header_file_path = os.path.join(record_path, 'tp-{}.tpr'.format(path_name)) if not os.path.exists(header_file_path): return None, TPE_NOT_EXISTS file = None try: file = open(header_file_path, 'rb') data = file.read() offset = 0 magic, = struct.unpack_from('I', data, offset) # magic must be 1381126228, 'TPPR' offset += 4 ver, = struct.unpack_from('H', data, offset) offset += 2 pkg_count, = struct.unpack_from('I', data, offset) offset += 4 time_used, = struct.unpack_from('I', data, offset) offset += 4 protocol_type, = struct.unpack_from('H', data, offset) offset += 2 protocol_sub_type, = struct.unpack_from('H', data, offset) offset += 2 time_start, = struct.unpack_from('Q', data, offset) offset += 8 width, = struct.unpack_from('H', data, offset) offset += 2 height, = struct.unpack_from('H', data, offset) offset += 2 # file_count, = struct.unpack_from('H', data, offset) # offset += 2 # total_size, = struct.unpack_from('I', data, offset) # offset += 4 user_name, = struct.unpack_from('64s', data, offset) user_name = _remove_padding_space(user_name).decode() offset += 64 account, = struct.unpack_from('64s', data, offset) account = _remove_padding_space(account).decode() offset += 64 host_ip, = struct.unpack_from('40s', data, offset) host_ip = _remove_padding_space(host_ip).decode() offset += 40 conn_ip, = struct.unpack_from('40s', data, offset) conn_ip = _remove_padding_space(conn_ip).decode() offset += 40 conn_port, = struct.unpack_from('H', data, offset) offset += 2 client_ip, = struct.unpack_from('40s', data, offset) client_ip = _remove_padding_space(client_ip).decode() offset += 40 except Exception as e: log.e(e) return None, TPE_FAILED finally: if file is not None: file.close() header = dict() header['start'] = time_start header['pkg_count'] = pkg_count header['time_used'] = time_used header['width'] = width header['height'] = height header['account'] = account header['user_name'] = user_name header['host_ip'] = host_ip header['conn_ip'] = conn_ip header['conn_port'] = conn_port header['client_ip'] = client_ip return header, TPE_OK
def get_records(handler, sql_filter, sql_order, sql_limit, sql_restrict, sql_exclude): """ 获取会话列表 会话审计列表的显示策略(下列的`审计`操作指为会话做标记、置为保留状态、写备注等): 1. 运维权限:可以查看自己的会话,但不能审计; 2. 运维授权权限:可以查看所有会话,但不能审计; 3. 审计权限:可以查看被授权的主机相关的会话,且可以审计; 4. 审计授权权限:可以查看所有会话,且可以审计。 """ allow_uid = 0 allow_hids = list() allow_all = False user = handler.get_current_user() if (user['privilege'] & TP_PRIVILEGE_OPS_AUZ) != 0 or (user['privilege'] & TP_PRIVILEGE_AUDIT_AUZ) != 0: allow_all = True if not allow_all: if (user['privilege'] & TP_PRIVILEGE_OPS) != 0: allow_uid = user.id if (user['privilege'] & TP_PRIVILEGE_AUDIT) != 0: s = SQL(get_db()) s.select_from('audit_map', ['u_id', 'h_id', 'p_state', 'policy_auth_type', 'u_state', 'gu_state'], alt_name='a') s.where( 'a.u_id={user_id} AND ' 'a.p_state={enable_state} AND' '(' '((a.policy_auth_type={U2H} OR a.policy_auth_type={U2HG}) AND a.u_state={enable_state}) OR ' '((a.policy_auth_type={UG2H} OR a.policy_auth_type={UG2HG}) AND a.u_state={enable_state} AND a.gu_state={enable_state})' ')'.format(enable_state=TP_STATE_NORMAL, user_id=user.id, U2H=TP_POLICY_AUTH_USER_HOST, U2HG=TP_POLICY_AUTH_USER_gHOST, UG2H=TP_POLICY_AUTH_gUSER_HOST, UG2HG=TP_POLICY_AUTH_gUSER_gHOST)) err = s.query() if err != TPE_OK: return err, 0, [] for h in s.recorder: if h.h_id not in allow_hids: allow_hids.append(h.h_id) if len(allow_hids) == 0: return TPE_OK, 0, [] if allow_uid == 0 and len(allow_hids) == 0: return TPE_FAILED, 0, [] s = SQL(get_db()) s.select_from('record', ['id', 'sid', 'user_id', 'host_id', 'acc_id', 'state', 'user_username', 'user_surname', 'host_ip', 'conn_ip', 'conn_port', 'client_ip', 'acc_username', 'protocol_type', 'protocol_sub_type', 'time_begin', 'time_end'], alt_name='r') str_where = '' _where = list() if len(sql_restrict) > 0: for k in sql_restrict: if k == 'state': _where.append('r.state IN ({})'.format(','.join([str(state) for state in sql_restrict[k]]))) else: log.w('unknown restrict field: {}\n'.format(k)) if len(sql_exclude) > 0: for k in sql_exclude: if k == 'state': _where.append('r.state NOT IN ({})'.format(','.join([str(state) for state in sql_exclude[k]]))) else: log.w('unknown exclude field: {}\n'.format(k)) if len(sql_filter) > 0: for k in sql_filter: if k == 'state': _where.append('r.state={}'.format(sql_filter[k])) # elif k == 'search_record': # _where.append('(h.name LIKE "%{}%" OR h.ip LIKE "%{}%" OR h.router_addr LIKE "%{}%" OR h.desc LIKE "%{}%" OR h.cid LIKE "%{}%")'.format(sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k], sql_filter[k])) if not allow_all: if allow_uid != 0: _where.append('r.user_id={uid}'.format(uid=allow_uid)) if len(allow_hids) > 0: hids = [str(h) for h in allow_hids] _where.append('r.host_id IN ({hids})'.format(hids=','.join(hids))) if len(_where) > 0: str_where = '( {} )'.format(' AND '.join(_where)) s.where(str_where) if sql_order is not None: _sort = False if not sql_order['asc'] else True if 'id' == sql_order['name']: s.order_by('r.id', _sort) elif 'time_begin' == sql_order['name']: s.order_by('r.time_begin', _sort) elif 'sid' == sql_order['name']: s.order_by('r.sid', _sort) # elif 'cid' == sql_order['name']: # s.order_by('h.cid', _sort) # elif 'state' == sql_order['name']: # s.order_by('h.state', _sort) else: log.e('unknown order field: {}\n'.format(sql_order['name'])) return TPE_PARAM, s.total_count, s.recorder if len(sql_limit) > 0: s.limit(sql_limit['page_index'], sql_limit['per_page']) err = s.query() return err, s.total_count, s.recorder