def start(self): """ Start the daemon """ # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile, 'r') pid = pf.read().strip() pf.close() except IOError: pid = None try: if pid: message = self.serviceName + " already running!" print message sys_log.SysLog(self.logfile, self.instance).writeLog("error", message) sys.exit(1) print self.serviceName + " is starting ..." # Start the daemon self.daemonize() self.run() except Exception, e: sys_log.SysLog(self.logfile, self.instance).writeLog("error", str(traceback.format_exc()))
def is_stats_data(self, cDate, hourNum=None): """ 判断该时间是否有统计数据 """ bis = True cur = self._getCursor() if cur == None: return bis if hourNum == None: sql = "select logscnt, flowscnt from t_stats_day where statsdate = %s;" args = (cDate, ) else: sql = "select logscnt, flowscnt from t_stats_hour where statsdate = %s and hournum = %s;" args = (cDate, hourNum) try: cur.execute(sql, args) lst = cur.fetchall() bis = (len(lst) > 0) except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def new_stats_day(self, statsDate, logsCnt, logsDatasize, flowsCnt, flowsDatasize, dataSize): """ 向表t_stats_day中插入一条数据; 如果statsDate日期的数据已存在,则修改 """ cur = self._getCursor() if cur == None: return ins = "insert into t_stats_day(statsdate, logscnt, logsdatasize, flowscnt, flowsdatasize, datasize) values(%s, %s, %s, %s, %s, %s);" upd = "update t_stats_day set logscnt = %s, logsdatasize = %s, flowscnt = %s, flowsdatasize = %s, datasize = %s where statsdate = %s;" sel = "select logscnt, logsdatasize, flowscnt, flowsdatasize, datasize from t_stats_day where statsdate = %s;" try: cur.execute(sel, (statsDate, )) rst = cur.fetchall() if len(rst) > 0: cur.execute(upd, (logsCnt, logsDatasize, flowsCnt, flowsDatasize, dataSize, statsDate)) else: cur.execute(ins, (statsDate, logsCnt, logsDatasize, flowsCnt, flowsDatasize, dataSize)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def get_dn_list_day(self, cDate): """ 获取所有设置中dn列表, 包括上传文件的 """ lst = [] cur = self._getCursor() if cur == None: return lst nextDate = cDate + datetime.timedelta(days=1) sql = "select distinct(dn) as dn from (" sql = sql + "select distinct(configpath) as dn from t_syslog union " sql = sql + "select distinct(configpath) as dn from t_pullfile union " sql = sql + "select distinct('biz:' || COALESCE(compname,'') || ':' || COALESCE(sysname,'')) " sql = sql + "as dn from t_database union " sql = sql + "select distinct(configpath) as dn from t_filehash " sql = sql + "where firstime >= %s and firstime < %s) as t_dn;" try: cur.execute(sql, (cDate, nextDate)) lst = cur.fetchall() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def get_group_list_day(self, cDate): """ 获取所有设置中的组名列表, 包括上传文件的组upload """ lst = [] cur = self._getCursor() if cur == None: return lst nextDate = cDate + datetime.timedelta(days=1) sql = "select distinct(groupname) as groupname from t_confgroup " sql = sql + "where groupid in (" sql = sql + "select distinct(groupid) as groupid from t_syslog union " sql = sql + "select distinct(groupid) as groupid from t_pullfile union " sql = sql + "select distinct(groupid) as groupid from t_database) union " sql = sql + "select 'upload' as groupname where exists (select logsource " sql = sql + "from t_filehash where firstime >= %s and firstime < %s limit 1);" try: cur.execute(sql, (cDate, nextDate)) lst = cur.fetchall() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def get_host_list_day(self, cDate): """ 获取所有设置中的主机列表, 包括上传文件的客户机 """ lst = [] cur = self._getCursor() if cur == None: return lst nextDate = cDate + datetime.timedelta(days=1) sql = "select distinct(host) as host from (" sql = sql + "select distinct(logsource) as host from t_syslog union " sql = sql + "select distinct(logsource) as host from t_pullfile union " sql = sql + "select distinct(hostname) as host from t_connection " sql = sql + "where conname in (select conname from t_database) union " sql = sql + "select distinct(logsource) as host from t_filehash " sql = sql + "where firstime >= %s and firstime < %s) as t_host;" try: cur.execute(sql, (cDate, nextDate)) lst = cur.fetchall() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def new_statsbynode_day(self, nodeName, statsDate, flowsCnt, flowsBytes, flowsPackets): """ 向表t_statsbynode_day中插入一条数据; 如果数据已存在,则修改 """ cur = self._getCursor() if cur == None: return ins = "insert into t_statsbynode_day(nodename, statsdate, flowscnt, flowsbytes, flowspackets) values(%s, %s, %s, %s, %s);" upd = "update t_statsbynode_day set flowscnt = %s, flowsbytes = %s, flowspackets = %s where nodename = %s and statsdate = %s;" sel = "select flowscnt, flowsbytes, flowspackets from t_statsbynode_day where nodename = %s and statsdate = %s;" try: cur.execute(sel, (nodeName, statsDate)) rst = cur.fetchall() if len(rst) > 0: cur.execute( upd, (flowsCnt, flowsBytes, flowsPackets, nodeName, statsDate)) else: cur.execute( ins, (nodeName, statsDate, flowsCnt, flowsBytes, flowsPackets)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def new_statsbyhost_hour(self, host, statsDate, hourNum, cnt): """ 向表t_statsbyhost_hour中插入一条数据; 如果数据已存在,则修改 """ cur = self._getCursor() if cur == None: return ins = "insert into t_statsbyhost_hour(host, statsdate, hournum, statscnt) values(%s, %s, %s, %s);" upd = "update t_statsbyhost_hour set statscnt = %s where host = %s and statsdate = %s and hournum = %s;" sel = "select statscnt from t_statsbyhost_hour where host = %s and statsdate = %s and hournum = %s;" try: cur.execute(sel, (host, statsDate, hourNum)) rst = cur.fetchall() if len(rst) > 0: cur.execute(upd, (cnt, host, statsDate, hourNum)) else: cur.execute(ins, (host, statsDate, hourNum, cnt)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def taskFunc(schedid, searchcond, startime, endtime, warnornot, warncondop, warncondval, warnlevel, saveresult): """ 计划任务的实际功能 """ if postdata != None and postdata.is_master() == False: return execTime = datetime.datetime.now() #print 'Task start time: %s' % execTime.strftime( '%Y-%m-%d %H:%M:%S' ) # 要将searchstart和searchend转换为绝对时间;原类型为字符串 searchstart = scheduletime.fmtRelTime(startime, execTime) #print 'Condition start time: %s' % searchstart.strftime( '%Y-%m-%d %H:%M:%S' ) searchend = scheduletime.fmtRelTime(endtime, execTime) #print 'Conditon end time: %s' % searchend.strftime( '%Y-%m-%d %H:%M:%S' ) try: es = esdata.ESData(configFile) # 查询符合条件的数据量 cnt = es.Count(searchcond, searchstart, searchend) #print 'Count of search: %s' % cnt if saveresult > 0: #lst = es.Search( searchcond, searchstart, searchend, fields = DEFAULTFIELDS, size = saveresult ) lst = es.Search(searchcond, searchstart, searchend, fields=None, size=saveresult) #print 'Length of search list: %s' % len( lst ) # 向t_schedresult中插入一条数据,并返回插入数据的id(resultid) resultid = postdata.newSchedResult(schedid, execTime, searchcond, searchstart, searchend, cnt, saveresult) warnid = 0 if warnornot.upper() != 'N': if ( warncondop == '>' and cnt > warncondval ) or \ ( warncondop == '>=' and cnt >= warncondval ) or \ ( warncondop == '<=' and cnt <= warncondval ) or \ ( warncondop == '<' and cnt < warncondval ) or \ ( warncondop == '=' and cnt == warncondval ): # 向t_warn中插入报警数据,并返回插入数据的id(warnid) warnid = postdata.newWarn(schedid, execTime, warnlevel, searchcond, searchstart, searchend, cnt) # 向t_resultdetail中插入数据 if saveresult > 0 and len(lst) > 0: postdata.newResultDetail(warnid, resultid, lst) except Exception, e: conf = sys_config.SysConfig(configFile) _logFile = conf.getConfig("tasksched", "logFile") _instance = conf.getConfig("tasksched", "instanceName") sys_log.SysLog(_logFile, _instance).writeLog("error", str(traceback.format_exc()))
def newResultDetail( self, warnID, resultID, lstDetail ): """ 向t_resultdetail中插入数据 """ cur = self._getCursor() if cur == None: return try: sql = "INSERT INTO t_resultdetail(warnid, resultid, idxname, typename, msgid, message) " sql += "VALUES(%s, %s, %s, %s, %s, %s);" for rst in lstDetail: idxName = rst["_index"] typeName = rst["_type"] msgID = rst["_id"] #msg = rst["fields"][FIELDMSG] msg = rst["_source"][FIELDMSG] cur.execute( sql, (warnID, resultID, idxName, typeName, msgID, msg) ) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def new_stats_hour(self, statsDate, hourNum, logsCnt, flowsCnt, flowsBytes, flowsPackets): """ 向表t_stats_hour中插入一条数据; 如果statsDate日期对应小时的数据已存在,则修改 """ cur = self._getCursor() if cur == None: return ins = "insert into t_stats_hour(statsdate, hournum, logscnt, flowscnt, flowsbytes, flowspackets) values(%s, %s, %s, %s, %s, %s);" upd = "update t_stats_hour set logscnt = %s, flowscnt = %s, flowsbytes = %s, flowspackets = %s where statsdate = %s and hournum = %s;" sel = "select logscnt, flowscnt, flowsbytes, flowspackets from t_stats_hour where statsdate = %s and hournum = %s;" try: cur.execute(sel, (statsDate, hourNum)) rst = cur.fetchall() if len(rst) > 0: cur.execute(upd, (logsCnt, flowsCnt, flowsBytes, flowsPackets, statsDate, hourNum)) else: cur.execute(ins, (statsDate, hourNum, logsCnt, flowsCnt, flowsBytes, flowsPackets)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def funcHour(pd): """ 每小时后的第5分钟要执行的任务 """ try: if pd != None and pd.is_master() == False: return execTime = datetime.datetime.now() cDateTime = execTime + datetime.timedelta(hours = -1) cDate = cDateTime.date() hourNum = cDateTime.hour lstHost = postdata.get_host_list_day(cDate) lstGroup = postdata.get_group_list_day(cDate) lstDn = postdata.get_dn_list_day(cDate) es = estats.ESData(configFile) # 获取某日流量采集结点列表 lstNode = es.Get_node_list_day(cDate) stats_hour(es, cDate, hourNum) if es.Exists(cDate, idxType = "logs"): # 判断日志索引是否存在 statsbyhost_hour(es, lstHost, cDate, hourNum) statsbygroup_hour(es, lstGroup, cDate, hourNum) statsbydn_hour(es, lstDn, cDate, hourNum) if es.Exists(cDate, idxType = "flows"): # 判断流量索引是否存在 # 按流量采集结点统计数据量 statsbynode_hour(es, lstNode, cDate, hourNum) # 一小时一小时往前推,判断前面的数据是否统计过 # 直到索引不存在或统计数据已经存在 dDateTime = cDateTime + datetime.timedelta(hours = -1) dDate = dDateTime.date() hNum = dDateTime.hour while (es.Exists(dDate, idxType = "logs") or es.Exists(dDate, idxType = "flows")) \ and postdata.is_stats_data(dDate, hNum + 1) == False: if dDate != cDate: lstHost = postdata.get_host_list_day(dDate) lstGroup = postdata.get_group_list_day(dDate) lstDn = postdata.get_dn_list_day(dDate) lstNode = es.Get_node_list_day(dDate) cDate = dDate stats_hour(es, dDate, hNum) statsbyhost_hour(es, lstHost, dDate, hNum) statsbygroup_hour(es, lstGroup, dDate, hNum) statsbydn_hour(es, lstDn, dDate, hNum) statsbynode_hour(es, lstNode, dDate, hNum) dDateTime = dDateTime + datetime.timedelta(hours = -1) dDate = dDateTime.date() hNum = dDateTime.hour except Exception, e: sys_log.SysLog(postdata._logFile, postdata._instance).writeLog("error", str(traceback.format_exc()))
def _getCursor( self): """ 获取postgresql数据库的游标 """ cursor = None try: cursor = db_manager.PostgreDBManager( self._dbuser, self._dbpwd, \ self._dbhost, self._dbport, self._dbname ).getCursor() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def Sum(self, startTime, endTime, sumfield, searchCond=None, idxType="logs"): """ 在时间范围[startTime, endTime]内,统计某个数字型字段的和 注意:此函数用作统计,起始时间和截止时间应该在同一天 sumfield:必须为数字型字段 """ if startTime == endTime: endTime = endTime + datetime.timedelta(seconds=1) ret = 0 try: fldTime = FIELDTIME if idxType == "flows": fldTime = FLOWS_FIELDTIME lstIndex = self._indexList(startTime, endTime, idxType=idxType) if lstIndex == None or len(lstIndex) == 0: return ret # StringQuery查询 if searchCond == None or len(searchCond) == 0: searchCond = "*" qry = pyes.query.QueryStringQuery(searchCond, default_field="_all", default_operator="AND") # RangeFilter过滤 ftr = pyes.filters.RangeFilter(pyes.utils.ESRange(fldTime, from_value=sys_timezone.toLocalDatetime(startTime), \ to_value=sys_timezone.toLocalDatetime(endTime), include_upper=False)) # FilteredQuery查询 query = pyes.query.FilteredQuery(qry, ftr) # Search search = query.search() # Sum aggregation sumagg = pyes.aggs.SumAgg(name="tmpagg", field=sumfield) search.agg.add(sumagg) # 连接ES es = pyes.ES(self._url) # 计数 for index in lstIndex: if es.indices.exists_index(index): result = es.search(search, size=0, indices=index) ret += int(result.aggs.tmpagg.value) except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def clearDataExtraction( self ): """ 清空表t_dbnew、t_dbupd、t_dbdel """ cur = self._getCursor() if cur == None: return try: cur.execute( "truncate table t_dbnew;" ) cur.execute( "truncate table t_dbupd;" ) cur.execute( "truncate table t_dbdel;" ) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def expireschedule( self, schedid ): """ 将计划任务设置为到期 """ cur = self._getCursor() if cur == None: return try: sql = "update t_schedule set schedstatus = 2 where schedid = %s;" cur.execute( sql, (schedid,) ) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def getDelDataExtraction( self ): """ 从表t_dbdel中读取删除的数据库数据提取任务 """ listDelTask = None cur = self._getCursor() if cur == None: return listDelTask try: sql = "select dbid from t_dbdel;" cur.execute( sql ) listDelTask = cur.fetchall() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def getGroupList( self ): """ 获取组名列表 """ listGroup = None cur = self._getCursor() if cur == None: return listGroup try: sql = "select groupname from t_confgroup;" cur.execute( sql ) listGroup = cur.fetchall() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def getDbFieldList( self, dbid ): """ 获取从数据库提取数据的字段列表 """ listField = None cur = self._getCursor() if cur == None: return listField try: sql = "select fldid, fldsrc, fldout from t_dbfields where dbid = %s order by fldid;" cur.execute( sql, (dbid, ) ) listField = cur.fetchall() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def setschedulestatus(self, pfid, status): """ 设置提取文件任务的状态 status: 1 完成 2 失效 到期 """ cur = self._getCursor() if cur == None: return try: sql = "update t_pullfile set pfstatus = %s, lastmoditime = now() where pfid = %s;" cur.execute(sql, (status, pfid)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def getConnInfo( self, conname ): """ 获取数据库连接信息 """ info = None cur = self._getCursor() if cur == None: return info try: sql = "select conname, conntype, hostname, port, dbname, username, userpass, usepooling" sql += " from t_connection where conname = %s;" cur.execute( sql, (conname, ) ) info = cur.fetchone() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def getGroupName( self, groupid ): """ 获取组名 """ groupName = groupid cur = self._getCursor() if cur == None: return groupName try: sql = "select groupname from t_confgroup where groupid = %s;" cur.execute( sql, (groupid,) ) row = cur.fetchone() groupName = row["groupname"] except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def updCurPosition( self, dbid, curpos, curpos_stime, curpos_etime ): """ 设置已提取数据的位置 """ cur = self._getCursor() if cur == None: return try: sql = "update t_database set curpos = %s, curpos_stime = %s, curpos_etime = %s where dbid = %s;" if curpos_stime is None: cur.execute(sql, (str(curpos), None, None, dbid)) else: cur.execute(sql, (str(curpos), curpos_stime, curpos_etime, dbid)) cur.connection.commit() except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog("error", str(traceback.format_exc()))
def bulkInsData(self, lstData, incType, timeFld, lstFlds, incrFld, tbName, idxName): """ lstFlds中有dbid, fldsrc, fldout关键字 在向ES写入数据时,必须包含以下字段: tbname 表名 """ fldLen = len(lstFlds) try: # 连接ES es = pyes.ES(self._url, bulk_size=BULKSIZE) idxName = LOGPREFIX + idxName #如果没有则默认为表名 typeName = tbName #表名tbName for dt in lstData: # lstData返回的是元组列表,而不是lstFlds那种字典列表 docid = None data = { "tbname": tbName, "lastmoditime___": datetime.datetime.now() } for i in xrange(fldLen): # 空值不导入 if dt[i] is not None: if isinstance(dt[i], datetime.time): data[lstFlds[i]["fldsrc"]] = ( dt[i]).strftime("%H:%M:%S") else: data[lstFlds[i]["fldsrc"]] = dt[i] if (lstFlds[i]["fldsrc"]).lower() == incrFld.lower(): docid = dt[i] if incType == 2 and (lstFlds[i]["fldsrc"] ).lower() == timeFld.lower(): data["lastmoditime___"] = dt[i] #print data #print idxName, '=======', data es.index(data, idxName, typeName, docid, bulk=True) #es.refresh() es.force_bulk() except Exception, e: #print '--------',e sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def getTask( self ): """ 从表t_schedule中读取活动的计划任务列表(schedstatus=0) """ listTask = None cur = self._getCursor() if cur == None: return listTask try: sql = "select schedid, searchcond, searchstart, searchend, schedstart, schedend," sql += " schedtime, schedcron, warnornot, warncondop, warncondval, warnlevel, saveresult" sql += " from t_schedule where schedstatus = 0 order by schedid;" cur.execute( sql ) listTask = cur.fetchall() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def Count(self, startTime, endTime, searchCond=None, idxType="logs"): """ 在时间范围[startTime, endTime]内,获取满足条件searchCond的文档数 注意:此函数用作统计,起始时间和截止时间应该在同一天 """ if startTime == endTime: endTime = endTime + datetime.timedelta(seconds=1) cnt = 0 try: fldTime = FIELDTIME if idxType == "flows": fldTime = FLOWS_FIELDTIME lstIndex = self._indexList(startTime, endTime, idxType=idxType) if lstIndex == None or len(lstIndex) == 0: return cnt # StringQuery查询 if searchCond == None or len(searchCond) == 0: searchCond = "*" #qry = pyes.query.StringQuery(searchCond, default_field="_all", default_operator="AND") qry = pyes.query.QueryStringQuery(searchCond, default_field="_all", default_operator="AND") # RangeFilter过滤 ftr = pyes.filters.RangeFilter(pyes.utils.ESRange(fldTime, from_value=sys_timezone.toLocalDatetime(startTime), \ to_value=sys_timezone.toLocalDatetime(endTime), include_upper=False)) # FilteredQuery查询 query = pyes.query.FilteredQuery(qry, ftr) search = pyes.query.Search(query=query, start=0, size=0, fields=[]) # 连接ES es = pyes.ES(self._url) # 计数 for index in lstIndex: if es.indices.exists_index(index): #cnt += es.count(query=query, indices=index)['count'] cnt += es.search(search, indices=index).total except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def newSchedResult( self, schedID, schedExecTime, searchCond, searchStart, searchEnd, resultCnt, savedCnt ): """ 向t_schedresult中插入一条数据,并返回resultid,没插入成功返回0 """ rid = 0 cur = self._getCursor() if cur == None: return rid try: sql = "INSERT INTO t_schedresult(schedid, schedexectime, searchcond, searchstart, searchend, resultcnt, savedcnt) " sql += "VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING resultid;" cur.execute( sql, (schedID, schedExecTime, searchCond, searchStart, searchEnd, resultCnt, savedCnt) ) itemSchedResult = cur.fetchone() rid = int( itemSchedResult['resultid'] ) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def get_retain(self, idxType="logs"): """ 获取索引数据保留期限 """ ret = 0 cur = self._getCursor() if cur == None: return ret sql = "select cast(svalue as integer) retention from t_settings where skey = '" sql = sql + idxType + "_retain' limit 1;" try: cur.execute(sql) lst = cur.fetchone() if lst != None: ret = lst["retention"] except Exception, e: sys_log.SysLog(self._logFile, self._instance).writeLog( "error", str(traceback.format_exc()))
def saveDbFieldList(self, dbid, lstFlds): """ 存储字段列表 """ cur = self._getCursor() try: sql = "select * from t_dbfields where dbid = %s" cur.execute(sql, (dbid, )) rs = cur.fetchall() # 只导入一次(视情况修改) if len(rs) == 0: for fld in lstFlds: sql = "insert into t_dbfields(dbid, fldsrc, fldout) values(%s, %s, %s)" cur.execute(sql, (dbid, fld["fldsrc"], fld["fldout"])) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )
def newWarn( self, schedID, schedExecTime, warnLevel, searchCond, searchStart, searchEnd, resultCnt ): """ 向t_warn中插入一条数据,并返回warnid,没插入成功返回0 """ wid = 0 cur = self._getCursor() if cur == None: return wid try: sql = "INSERT INTO t_warn(schedid, schedexectime, warnlevel, searchcond, searchstart, searchend, resultcnt) " sql += "VALUES(%s, %s, %s, %s, %s, %s, %s) RETURNING warnid;" cur.execute( sql, (schedID, schedExecTime, warnLevel, searchCond, searchStart, searchEnd, resultCnt) ) itemWarn = cur.fetchone() wid = int( itemWarn['warnid'] ) cur.connection.commit() except Exception, e: sys_log.SysLog( self._logFile, self._instance ).writeLog( "error", str(traceback.format_exc()) )