def mainProc(): conn = cAPI.mongoConn(cfg.conalogMongo) tmpDBName = cfg.conalogMongo['dbName'] dbConalog = conn[tmpDBName] tmpCollName = cfg.conalogMongo['collectorCollName'] collectorColl = dbConalog[tmpCollName] tmpCollName = cfg.conalogMongo['certCollName'] certColl = dbConalog[tmpCollName] collectors = collectorColl.find({'category': 'passive'}) if collectors: for collector in collectors: # get find cert coll to get host, user, password cert = certColl.find_one({'_id': ObjectId(collector['host'])}) if cert: gatherLogInfo(collector, dbConalog, cert) else: pass else: log.info('No collector config found in Conalog DB.') conn.close()
result = updateJobRunningSetting(jobRunningSettingColl, -1) # TODO: check result, if success, result.matched_count=1, result.modified_count=1 return 0 if __name__=="__main__" : rtnCode = 0 rltCode = 0 # newJob = cfg.job1 homeSys = cfg.logManPy # contact DB, get new job conn = cAPI.mongoConn(cfg.logManPyMongo) tmpDBName = cfg.logManPyMongo['dbName'] logManPyDB = conn[tmpDBName] tmpCollName = cfg.logManPyMongo['logJobInfoCollName'] jobColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobSumCollName'] jobSumColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobRunningSettingCollName'] jobRunningSettingColl = logManPyDB[tmpCollName] date4BkpStr = cAPI.getDate4BackupStr() newJob = jobColl.find_one({'logDate4BackupInStr': date4BkpStr,
def mainProc(homeSys) : conn = cAPI.mongoConn(cfg.logManPyMongo) tmpDBName = cfg.logManPyMongo['dbName'] logManPyDB = conn[tmpDBName] tmpCollName = cfg.logManPyMongo['sysInfoCollName'] sysInfoColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logInfoCollName'] logInfoColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobInfoCollName'] logJobColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['certCollName'] certColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobSumCollName'] jobSumColl = logManPyDB[tmpCollName] # which day's log will backup, today backup yesterday's logs date4Bkp = cAPI.getDate4Backup() date4BkpStr = cAPI.getDate4BackupStr() # check whether the backup jobs have been generated jobSum = jobSumColl.find_one({'logDate4BackupInStr': date4BkpStr}) if not jobSum : # get all available logInfo, and check one by one to generate backup job logInfoList = logInfoColl.find({'state':'available'}) if logInfoList : logJob = {} sysInfo = {} logInfo = {} certInfo = {} # go through all the logInfo for logInfo in logInfoList : #get SysInfo sysInfo = sysInfoColl.find_one({'_id': ObjectId(logInfo['sysID'])}) #get certInfo certInfo = certColl.find_one({'_id': ObjectId(logInfo['certID'])}) # compose log backup job logJob['sysInfo'] = sysInfo logJob['logInfo'] = logInfo logJob['logInfo']['cert'] = certInfo # log backup save info logJob['logBackupSaveInfo']['logSaveType'] = 'normal' # which day's log will backup, today backup yesterday's logs logJob['logDate4Backup'] = date4Bkp logJob['logDate4BackupInStr'] = date4BkpStr # 日志备份路径 * # backupRootDir + backupDirName + abbr + YYYY + MM + DD + hostIP + logAbbr logJob['logBackupSaveInfo']['logSaveBaseDir'] = homeSys['backupRootDir'] + os.sep + homeSys['backupDirName'] + os.sep + sysInfo['sysAbbr'] + os.sep + str(date4Bkp.year) + os.sep + str(date4Bkp.month).zfill(2) + os.sep + str(date4Bkp.day).zfill(2) + os.sep + certInfo['host'] + os.sep + logInfo['logAbbr'] # TODO: 'os.sep' is surpose logManPy System and target user system are all use Linux, a safe way is to check the 'sysOS' field in sysInfo dict. # job status logJob['jobStatus']['createTime'] = datetime.now() logJob['jobStatus']['state'] = 'ready' # save job to DB tmpDocId = logJobColl.insert_one(logJob) # record summary info to log job jobSum = {} jobSum['logDate4BackupInStr'] = date4BkpStr jobSum['allJobGenerated'] = True jobSum['allJobDone'] = False itemCnt = logInfoList.count() jobSum['statInfo']['jobTotalCnt'] = itemCnt jobSum['statInfo']['jobInReadyCnt'] = itemCnt jobSum['statInfo']['jobInStartedCnt'] = 0 jobSum['statInfo']['jobInFinishCnt'] = 0 jobSum['statInfo']['jobInErrorCnt'] = 0 # save to DB tmpDocId = jobSumColl.insert_one(jobSum) else : # no available logInfo item for generating job. log.info('[InfoDesc:no available logInfo item for generating job]') else : # today's backup jobs have already been generated. log.info('[InfoDesc:today backup jobs have already been generated]') conn.close() return 0
def mainProc(): homeSys = cfg.logManPy conn = cAPI.mongoConn(cfg.logManPyMongo) tmpDBName = cfg.logManPyMongo['dbName'] logManPyDB = conn[tmpDBName] tmpCollName = cfg.logManPyMongo['logJobSumCollName'] logJobSumColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobInfoCollName'] logJobColl = logManPyDB[tmpCollName] tmpCollName = cfg.logManPyMongo['logJobRunningSettingCollName'] logJobRunningSettingColl = logManPyDB[tmpCollName] date4Bkp = cAPI.getDate4Backup() date4BkpStr = cAPI.getDate4BackupStr() # check whether backup job already generated logJobSum = logJobSumColl.find_one({ 'logDate4BackupInStr': date4BkpStr, 'allJobGenerated': True }) if logJobSum: # log backup jobs generated already jobList = logJobColl.find({ 'logDate4BackupInStr': date4BkpStr, 'jobStatus.state': 'ready' }) if jobList: # have job ready for processing for job in jobList: jobRunningSetting = logJobRunningSettingColl.find_one() if jobRunningSetting: # has running setting which used to check job running limit if jobRunningSetting['jobRunningCnt'] < jobRunningSetting[ 'jobThreadCntLmt']: # running job thread below limit, which can start new job # TODO: later chg to subprocess or use PM2 to start rltCode = subprocess.call(['python', 'beeBumble.py']) if not rltCode: # log backup job exec was not successfully log.error( '[ErrorDesc:beeBumble job exec met error]') else: # enough running jobs, no new job time.sleep(60) else: # Error: no job Running Setting found in mongoDB log.error('[ErrorDesc:no jobRunningSetting in MongoDB]') conn.close() sys.exit(-3001) else: # no job ready for processing log.info('[InfoDesc:No backup job ready for processing]') pass else: # log backup job has not been generated. log.info('[InfoDesc:log backup job has not been generated]') pass conn.close()
def gatherLogInfo(collector, dbConalog, cert): # gather info for sysInfo sysInfo = {} collectorName = collector['name'] # 系统名称的缩写取自Collector Name的开始的连续英文字母序列 reExp = r'(^[A-Za-z]+)' sysAbbr = re.match(reExp, collectorName) if sysAbbr: sysInfo['sysAbbr'] = sysAbbr.group() sysInfo['sysName'] = sysAbbr.group() if collector['type'] == 'FileTail' or collector['cmd'].replace( ' ', '') == 'tail-F': sysInfo['sysOS'] = 'Linux' else: sysInfo['sysOS'] = 'AIX' else: log.error( '[ErrorDesc:Cannot parse out system Abbr from collector name, which the name is %s]' % (collectorName)) return -2011 logInfo = {} logInfo['logName'] = collectorName logInfo['logAbbr'] = collectorName logInfo['hostName'] = collectorName (logInfo['logDir'], logInfo['logFileFilterStr']) = os.path.split(collector['param']) logInfo['logFormatType'] = 'text' # temporary set to 'getFromConalog' need configuration manually later logInfo['logDescOfProduceMethod'] = 'Get from Conalog' logInfo['logTypeOfProduceMethod'] = 'getFromConalog' # this log info is just created, need update later for above info logInfo['state'] = 'maintain' logInfo['logSaveZipPassword'] = '******' # data(sysInfo and logInfo) are ready, now save to db logManPyConn = cAPI.mongoConn(cfg.logManPyMongo) tmpDBName = cfg.logManPyMongo['dbName'] dbLogManPy = logManPyConn[tmpDBName] tmpCollName = cfg.logManPyMongo['sysInfoCollName'] sysInfoColl = dbLogManPy[tmpCollName] # save system info to DB tmpDocID = sysInfoColl.insert_one(sysInfo) # fill back sysID to Log Info logInfo['sysID'] = tmpDocID.inserted_id tmpCollName = cfg.logManPyMongo['certCollName'] certColl = dbLogManPy[tmpCollName] # save cert info to DB tmpDocID = certColl.insert_one(cert) # fill back certID to Log Info logInfo['certID'] = tmpDocID.inserted_id tmpCollName = cfg.logManPyMongo['logInfoCollName'] logInfoColl = dbLogManPy[tmpCollName] tmpDocID = logInfoColl.insert_one(logInfo) logManPyConn.close() return 0