def __init__(self, baseurl, ssl_ctx, retries=1, connection_debug=False): """ baseurl - корневой URL для всех запросов pilot ssl_ctx - инициализированный SSL.Context со всеми сертификатами и т.п. retries - количество попыток повторения запроса, в случае сбоя """ self.baseurl = baseurl self.retries = retries self._cli_version = None self.ssl_ctx = ssl_ctx # FIXME: timeout must be configurable self.http = http.HTTP(baseurl, ssl_context=ssl_ctx, retries=retries, timeout=30, connection_debug=connection_debug)
def testRun(self): tstConfig = config.test() # override baseUrl to use private VM tstConfig.baseUrl = doUpload.frontendUrlTemplate % doUpload.frontendHost (username, account, password) = netrc.netrc().authenticators('newOffDb') frontendHttp = http.HTTP() frontendHttp.setBaseUrl(tstConfig.baseUrl) folders = os.listdir( 'testFiles' ) logging.info('Testing %s bunches...', len(folders)) i = 0 for folder in folders: i += 1 loggingPrefix = ' [%s/%s] %s:' % (i, len(folders), folder) logging.info('%s Testing bunch...', loggingPrefix) logging.info( '%s Signing in the frontend...', loggingPrefix) frontendHttp.query('signIn', { 'username': username, 'password': password, }) # First ask also to hold the files until we have uploaded all # the folder to prevent the backend from taking them in-between. logging.info( '%s Asking the frontend to hold files...', loggingPrefix) frontendHttp.query('holdFiles') # Wait until the dropBox has nothing to do logging.info( '%s Waiting for backend to be idle...', loggingPrefix) while dataAccess.getLatestRunLogStatusCode() != Constants.NOTHING_TO_DO: time.sleep(2) # When we reach this point, the server will always report an empty # list of files, so even if it starts a new run right now, we can # safely manipulate the list of files. Therefore, ask the frontend # to do a clean up to delete previous files and database entries logging.info( '%s Asking the frontend to clean up files and database...', loggingPrefix) frontendHttp.query('cleanUp') # Upload all the test files in the folder logging.info('%s Uploading files...', loggingPrefix) self.upload(folder, loggingPrefix = loggingPrefix) # And finally release the files so that the backend can take them logging.info( '%s Asking the frontend to release files...', loggingPrefix) frontendHttp.query('releaseFiles') logging.info( '%s Signing out the frontend...', loggingPrefix) frontendHttp.query('signOut') # The backend will process the files eventually, so wait for # a finished status code logging.info('%s Waiting for backend to process files...', loggingPrefix) while True: statusCode = dataAccess.getLatestRunLogStatusCode() if statusCode in frozenset([Constants.DONE_WITH_ERRORS, Constants.DONE_ALL_OK]): break time.sleep(2) # First compare the runLog's statusCode logging.info('%s Comparing runLog results...', loggingPrefix) with open(os.path.join('testFiles', folder, 'statusCode'), 'rb') as f: self.assertEqual(statusCode, getattr(Constants, f.read().strip())) # Then compare the runLog's logs (creationTimestamp, downloadLog, globalLog) = dataAccess.getLatestRunLogInfo() downloadLog = logPack.unpack(downloadLog) globalLog = logPack.unpack(globalLog) logging.debug('downloadLog = %s', downloadLog) logging.debug('globalLog = %s', globalLog) with open(os.path.join('testFiles', folder, 'downloadLog'), 'rb') as f: templateMatch.match(f.read(), downloadLog) with open(os.path.join('testFiles', folder, 'globalLog'), 'rb') as f: templateMatch.match(f.read(), globalLog) tests = [x.partition('.txt')[0] for x in glob.glob(os.path.join('testFiles', folder, '*.txt'))] logging.info('%s Comparing %s fileLogs results...', loggingPrefix, len(tests)) # Then for each file in the test, compare the fileLog's foreign key, statusCode and log j = 0 for test in tests: j += 1 logging.info('%s [%s/%s] %s: Comparing file...', loggingPrefix, j, len(tests), os.path.basename(test)) # Get the expected file hash with open('%s.fileHash' % test, 'rb') as f: fileHash = f.read().strip() (fileStatusCode, fileLog, runLogCreationTimestamp) = dataAccess.getFileLogInfo(fileHash) # Compare the foreign key self.assertEqual(creationTimestamp, runLogCreationTimestamp) # Compare the statusCode with open('%s.statusCode' % test, 'rb') as f: self.assertEqual(fileStatusCode, getattr(Constants, f.read().strip())) fileLog = logPack.unpack(fileLog) # Compare the fileLog with open('%s.fileLog' % test, 'rb') as f: templateMatch.match(f.read(), fileLog)
def get_http_storage(location): import http return http.HTTP(location)
def main(): dropBoxRuns = calculateOldDropBoxRuns() with open('/afs/cern.ch/cms/DB/conddb/test/dropbox/replay/runInfo.json', 'rb') as f: runInfo = json.load(f) # Ask the frontend to clean up the files and database (username, account, password) = netrc.netrc().authenticators('newOffDb') frontendHttp = http.HTTP() frontendHttp.setBaseUrl(doUpload.frontendUrlTemplate % doUpload.frontendHost) logging.info('Signing in the frontend...') frontendHttp.query('signIn', { 'username': username, 'password': password, }) logging.info('Asking the frontend to clean up files and database...') frontendHttp.query('cleanUp') logging.info('Signing out the frontend...') frontendHttp.query('signOut') logging.info('Removing files in the backend...') execute('rm -rf ../NewOfflineDropBoxBaseDir/TestDropBox/*/*') conf = config.replay() logging.info('Cleaning up backend database...') execute('cmscond_schema_manager -c %s -P %s --dropAll' % (conf.destinationDB, conf.authpath)) logging.info('Setting up backend database...') execute('cmscond_export_database -s sqlite_file:%s -d %s -P %s' % (replayMasterDB, conf.destinationDB, conf.authpath), 'Y\n') dropBoxBE = Dropbox.Dropbox( conf ) # Replay all the runs _fwLoad = conditionDatabase.condDB.FWIncantation() i = 0 for runTimestamp in sorted(dropBoxRuns): i += 1 (hltRun, fcsRun) = runInfo[runTimestamp.strftime('%Y-%m-%d %H:%M:%S,%f')[:-3]] logging.info('[%s/%s] %s: Replaying run with hltRun %s and fcsRun %s...', i, len(dropBoxRuns), runTimestamp, hltRun, fcsRun) j = 0 for fileName in dropBoxRuns[runTimestamp]: j += 1 logging.info(' [%s/%s] %s: Converting...', j, len(dropBoxRuns[runTimestamp]), fileName) tarFile = tarfile.open(os.path.join(dropBoxReplayFilesFolder, fileName)) names = tarFile.getnames() if len(names) != 2: raise Exception('%s: Invalid number of files in tar file.', fileName) baseFileName = names[0].rsplit('.', 1)[0] dbFileName = '%s.db' % baseFileName txtFileName = '%s.txt' % baseFileName if set([dbFileName, txtFileName]) != set(names): raise Exception('%s: Invalid file names in tar file.', fileName) with open('/tmp/replayRequest.txt', 'wb') as f: f.write(metadata.port(tarFile.extractfile(txtFileName).read(), fileName)) with open('/tmp/replayRequest.db', 'wb') as f: f.write(tarFile.extractfile(dbFileName).read()) tarFile.close() logging.info(' [%s/%s] %s: Uploading...', j, len(dropBoxRuns[runTimestamp]), fileName) try: doUpload.upload('/tmp/replayRequest', 'private') except doUpload.UploadError as e: # If it is a error from the server (i.e. UploadError), # we can continue with the next files. # If it is another kind, we do not catch it since in that case # it is a real problem with the upload.py script. logging.info(' [%s/%s] %s: Upload error: %s', j, len(dropBoxRuns[runTimestamp]), fileName, str(e)) dropBoxBE.reprocess(runTimestamp, hltRun, fcsRun) if runTimestamp in truncates: for runNumber in truncates[runTimestamp]: for tag in truncates[runTimestamp][runNumber]: logging.info('[%s/%s] %s: Truncating up to %s tag %s...', i, len(dropBoxRuns), runTimestamp, runNumber, tag) while True: # FIXME: Why can't we instantiate the RDBMS once? db = conditionDatabase.condDB.RDBMS(conf.authpath).getReadOnlyDB(conf.destinationDB) iov = conditionDatabase.IOVChecker(db) iov.load(tag) lastSince = iov.lastSince() if iov.timetype() == 'lumiid': lastSince >>= 32 db.closeSession() logging.info('[%s/%s] %s: lastSince now is %s...', i, len(dropBoxRuns), runTimestamp, lastSince) if lastSince < runNumber: break execute('cmscond_truncate_iov -c %s -P %s -t %s' % (conf.destinationDB, conf.authpath, tag))
def getCheckMKStatus(): '''Returns the status of CheckMK as a dictionary. The keys are the check (service) names, i.e. those used in the global tree; and the values are tuples (status, message, state age, check age). e.g. output = { 'OfflineDB_CMSR1_cpuLoad': ( 'OK', 'OK - ioWait: 0.19 (OK), idle: 91.4 (OK), user: 7.53 (OK), system: 0.49 (OK),', '2013-03-01 15:05:48', '17 sec' ), 'OfflineDB_CMSR2_cpuLoad': ... ... } Note: * Status is in ['OK', 'CRIT', 'WARN', 'UNKNOWN']. UNKNOWN is used for any case that is not OK, CRIT or WARN (this would include the PENDING and UNAVAILABLE Check_MK states). * The state age shows how long the service's state did not change. * The check age shows the last time the service was checked. Could be cached for some seconds if the load proves too high for Check_MK. ''' h = http.HTTP() h.setTimeout(5) h.setRetries([1]) h.setUsernamePassword(service.secrets['username'], service.secrets['password']) data = h.query(dataUrl) # Bug in Check_MK: does not return valid JSON strings in the first # list/line (the header): single quotes must be converted to double ones lines = data.splitlines() lines[1] = lines[1].replace("'", '"') data = json.loads('\n'.join(lines)) headers = data[0] nameIndex = headers.index('service_description') statusIndex = headers.index('service_state') statusMessageIndex = headers.index('svc_plugin_output') stateAgeIndex = headers.index('svc_state_age') checkAgeIndex = headers.index('svc_check_age') output = {} for row in data[1:]: status = row[statusIndex] if status not in ['OK', 'CRIT', 'WARN']: status = 'UNKNOWN' output[row[nameIndex]] = ( status, row[statusMessageIndex], row[stateAgeIndex], row[checkAgeIndex], ) return output
def getImage(self, name): if name not in zip(*images)[0]: raise cherrypy.NotFound() cherrypy.response.headers['Content-Type'] = "image/png" return http.HTTP().query('http://popcon2vm:8081/snapshot/%s' % name)
def __init__(self, config): self.curl = http.HTTP() if config.proxy: self.curl.setProxy(config.proxy) self.curl.setRetries(config.retriesPyCurler) self.config = config