def copyFromContainerWithLibs(self, filePath, outFolderPath): if ( not self.containerId ): self.logger.error("Trying to copy file from non-running container! self.containerId: %s", self.containerId) return False originalCmd = "sudo docker {} cp -L {}:{} {}" cmd = originalCmd.format(self.remote, self.containerId, filePath, outFolderPath) returncode, out, err = util.runCommand(cmd) tempFilePath = filePath if ( returncode != 0 ): self.logger.debug("Error copying from docker. Starting to check for file in environment paths. dockerId: %s, filePath: %s, outputFolderPath: %s Error message: %s", self.containerId, filePath, outFolderPath, err) cmd = "sudo docker exec -it {} echo $PATH" cmd = cmd.format(self.containerId) returncode, envPaths, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.debug("Error running echo PATH command on docker: %s, forfeiting file: %s", err, filePath) return False envPaths = envPaths.split(":") for envPath in envPaths: envPath = envPath.strip() filePath = envPath + "/" + tempFilePath cmd = originalCmd.format(self.remote, self.containerId, filePath, outFolderPath) returncode, out, err = util.runCommand(cmd) if ( returncode == 0 ): tempFilePath = filePath break if ( filePath != tempFilePath ): #Use it as an identifier of having been able to find file in one of the env. paths or not return False if ( tempFilePath.strip() != "" and util.isFolder(outFolderPath + "/" + util.getNameWithExtFromPath(tempFilePath)) ): folderPath = outFolderPath + "/" + util.getNameWithExtFromPath(tempFilePath) util.deleteFolder(folderPath, self.logger) return True return self.extractLibsFromBinary(filePath, outFolderPath)
def extractCronJobs(self, tempOutputFolder): if (not self.containerId): self.logger.error( "Trying to extract list of cron jobs from non-running container! self.containerId: %s", self.containerId) return None processList = [] cmd = "sudo docker {} exec -it {} echo $PATH" cmd = cmd.format(self.remote, self.containerId) returncode, envPaths, err = util.runCommand(cmd) if (returncode != 0): self.logger.error("Error running echo PATH command on docker: %s", err) return None cronFolderPath = "/etc/cron*" cmd = util.getCmdRetrieveAllShellScripts(cronFolderPath) cmd = "sudo docker {} exec -it {} " + cmd cmd = cmd.format(self.remote, self.containerId) self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) splittedOut = out.splitlines() for scriptFilePath in splittedOut: scriptFilePath = scriptFilePath[:scriptFilePath.index(":")] self.logger.debug("Found script file: %s", scriptFilePath) self.copyFromContainer(scriptFilePath, tempOutputFolder) scriptFilePath = tempOutputFolder + "/" + util.getNameWithExtFromPath( scriptFilePath) processList.extend( self.extractProcessListFromShellScript(envPaths, scriptFilePath, tempOutputFolder)) return processList
def extractListOfRunningProcesses(self, tempOutputFolder): if ( not self.containerId ): self.logger.error("Trying to extract list of running processes on non-running container! self.containerId: %s", self.containerId) return None processList = [] self.logger.info("Running process snapshot") cmd = "sudo docker {} exec -it {} ps axo user:20,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,comm:50" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running process snapshot on docker: %s", err) return None outLines = out.splitlines() userIndex = 0 pidIndex = 1 for line in outLines[1:]: splittedLine = line.split() if ( userIndex < len(splittedLine) and pidIndex < len(splittedLine)): cmd = "sudo docker {} exec -it {} runuser -l {} -c 'ls -l /proc/{}/exe'" cmd = cmd.format(self.remote, self.containerId, splittedLine[userIndex].strip(), splittedLine[pidIndex].strip()) self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running cmd: %s on docker: %s", cmd, err) else: splittedOut = out.split() processList.append(splittedOut[-1]) else: self.logger.warning("ps output header has PID at index: %d, but current line: %s doesn't have that index!", pidIndex, line) serviceProcessList = self.extractRunningServices(tempOutputFolder) if ( serviceProcessList ): processList.extend(serviceProcessList) return processList
def extractRunningServices(self, tempOutputFolder): if ( not self.containerId ): self.logger.error("Trying to extract list of running services on non-running container! self.containerId: %s", self.containerId) return None processList = [] cmd = "sudo docker {} exec -it {} echo $PATH" cmd = cmd.format(self.remote, self.containerId) returncode, envPaths, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running echo PATH command on docker: %s", err) return None self.logger.info("Running service snapshot") cmd = "sudo docker {} exec -it {} ps auxww" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running service snapshot on docker: %s", err) return None outLines = out.splitlines() cmdIndex = 10 for line in outLines[1:]: splittedLine = line.split() if ( cmdIndex < len(splittedLine) ): if ( splittedLine[cmdIndex].strip().startswith("runsvdir") ): #TODO Handle runsvdir cmdComplete = splittedLine[cmdIndex:] cmdComplete = ' '.join(cmdComplete) serviceFolderPath = util.extractCommandArgument(cmdComplete, "-P") self.logger.debug("Handling runsvdir process special case, serviceFolder: %s", serviceFolderPath) if ( serviceFolderPath ): cmd = util.getCmdRetrieveAllShellScripts(serviceFolderPath) cmd = "sudo docker {} exec -it {} " + cmd cmd = cmd.format(self.remote, self.containerId) self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) splittedOut = out.splitlines() for scriptFilePath in splittedOut: scriptFilePath = scriptFilePath[:scriptFilePath.index(":")] self.logger.debug("Found script file: %s", scriptFilePath) self.copyFromContainer(scriptFilePath, tempOutputFolder) scriptFilePath = tempOutputFolder + "/" + util.getNameWithExtFromPath(scriptFilePath) processList.extend(self.extractProcessListFromShellScript(envPaths, scriptFilePath, tempOutputFolder)) elif ( splittedLine[cmdIndex].strip().startswith("runsv") ): #TODO Handle runsv continue else: self.logger.warning("ps output header has Command at index: %d, but current line: %s doesn't have that index!", cmdIndex, line) return processList
def processResults(self, moose_dir, retcode, options, output): (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output) if reason != '' or self.specs['skip_checks']: return (reason, output) # Don't Run Exodiff on Scaled Tests if options.scaling and self.specs['scale_refine']: return (reason, output) # Make sure that all of the Exodiff files are actually available for file in self.specs['exodiff']: if not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)): output += "File Not Found: " + os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file) reason = 'MISSING GOLD FILE' break if reason == '': # Retrieve the commands commands = self.processResultsCommand(moose_dir, options) for command in commands: exo_output = runCommand(command) output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(self.specs['exodiff_opts']) if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output: reason = 'EXODIFF' break return (reason, output)
def extractLibsFromBinary(self, filePath, outFolderPath): if (not self.containerId): self.logger.error( "Trying to extract binary libs from non-running container! self.containerId: %s", self.containerId) return False cmd = "sudo docker " + self.remote + " exec -it {} ldd {}" cmd = cmd.format(self.containerId, filePath) self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 and out.strip() == "" ): #In cases the binary doesn't have a dynamic section and so the return code isn't 0 but we don't have an error either self.logger.error( "Error extracting library dependencies from docker: %s", err) return False splittedOut = out.splitlines() for outItem in splittedOut: if ("=>" in outItem): splittedItem = outItem.split("=>") if (len(splittedItem) > 1): splittedItem = splittedItem[1].split() if (not self.copyFromContainer(splittedItem[0].strip(), outFolderPath)): self.logger.warning( "Wasn't able to copy library: %s dependent of: %s", splittedItem[0].strip(), filePath) else: self.logger.warning( "This should not happen! ldd output line has => but can't be split into two parts by that substring." ) return True
def extractDirectSyscalls(self): #Dump binary to tmp file dumpFileName = self.binaryPath + ".dump" if ("/" in dumpFileName): dumpFileName = dumpFileName[dumpFileName.rindex("/") + 1:] dumpFilePath = "/tmp/" + dumpFileName cmd = "objdump -d {} > " + dumpFilePath if (os.path.isfile(self.binaryPath)): cmd = cmd.format(self.binaryPath) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error( "Couldn't create dump file for: %s with err: %s", self.binaryPath, dumpFilePath) return None #Find direct syscalls and arguments #Specify how many were found successfully and how many were not syscallSet, successCount, failedCount = self.parseObjdump( dumpFilePath) #Return syscall list along with number of not found syscalls self.logger.debug( "Finished extracting direct syscalls for %s, deleting temp file: %s", self.binaryPath, dumpFilePath) os.unlink(dumpFilePath) return (syscallSet, successCount, failedCount) else: self.logger.error("binary path doesn't exist: %s", self.binaryPath) return (None, -1)
def extractBinariesFromAuditLog(self, auditLogOutput): if (not self.containerId): self.logger.error( "Trying to extract binaries from non-running container! self.containerId: %s", self.containerId) return None processList = [] cmd = "sudo docker {} exec -it {} echo $PATH" cmd = cmd.format(self.remote, self.containerId) returncode, envPaths, err = util.runCommand(cmd) if (returncode != 0): self.logger.error("Error running echo PATH command on docker: %s", err) return None splittedOut = auditLogOutput.splitlines() for outLine in splittedOut: #type=EXECVE msg=audit(05/09/2019 14:49:01.489:29065511) : argc=3 a0=/usr/sbin/sshd a1=-D a2=-R splittedArgs = outLine.split() for argkv in splittedArgs: if (argkv.startswith("a0=")): processList.append(argkv[3:]) break return processList
def runLatex(code, res, charheightpx, latexTemplate): def ensureWorkingDirectory(path): """Ensure this directory exists and is writable.""" if not os.access(path,os.F_OK): os.mkdir(path) if not os.access(path,os.W_OK): os.system('chmod u+rwx %s' % path) texfileName = fileNameFor(code, charheightpx, '.tex') dvifileName = fileNameFor(code, charheightpx, '.dvi') psfileName = fileNameFor(code, charheightpx, '.ps') cmdLine = '%s %s' %(latexpath, texfileName) ensureWorkingDirectory(workingDir) file = open(os.path.join(workingDir, texfileName), 'w') file.write(latexTemplate %(code,)) file.close() err, stdout, stderr = runCommand(cmdLine) if err: out = stderr + '\n' + stdout err = re.search('!.*\?', out, re.MULTILINE+re.DOTALL) if err: out = err.group(0) # FIXME translate latex line numbers to source line numbers raise LatexSyntaxError(out)
def processResults(self, moose_dir, retcode, options, output): (reason, output) = RunApp.processResults(self, moose_dir, retcode, options, output) specs = self.specs if reason != '' or specs['skip_checks']: return (reason, output) # Don't Run Exodiff on Scaled Tests if options.scaling and specs['scale_refine']: return (reason, output) for file in specs['exodiff']: custom_cmp = '' old_floor = '' if specs.isValid('custom_cmp'): custom_cmp = ' -f ' + os.path.join(specs['test_dir'], specs['custom_cmp']) if specs['use_old_floor']: old_floor = ' -use_old_floor' if not os.path.exists(os.path.join(specs['test_dir'], specs['gold_dir'], file)): output += "File Not Found: " + os.path.join(specs['test_dir'], specs['gold_dir'], file) reason = 'MISSING GOLD FILE' break else: command = os.path.join(moose_dir, 'framework', 'contrib', 'exodiff', 'exodiff') + ' -m' + custom_cmp + ' -F' + ' ' + str(specs['abs_zero']) + old_floor + ' -t ' + str(specs['rel_err']) \ + ' ' + ' '.join(specs['exodiff_opts']) + ' ' + os.path.join(specs['test_dir'], specs['gold_dir'], file) + ' ' + os.path.join(specs['test_dir'], file) exo_output = runCommand(command) output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(specs['exodiff_opts']) if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output: reason = 'EXODIFF' break return (reason, output)
def killToolContainers(logger): cmd = "sudo docker kill $(sudo docker ps -aq --filter label={})" cmd = cmd.format(C.TOOLNAME) returncode, out, err = util.runCommand(cmd) if (returncode != 0): #logger.error("Error running prune on docker with label: %s", err) return False return True
def pruneVolumes(self): cmd = "sudo docker volume prune -f --filter \"label=={}\"" cmd = cmd.format(C.TOOLNAME) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running prune on docker with label: %s", err) return False return True
def checkoutFile(self, commitId, fileName): cmd = "cd {}; git checkout {} {}" cmd = cmd.format(self.repopath, commitId, fileName) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error("Error running checkout command: %s", err) return False return True
def getCurrent(self, jobs): # Get results from condor_q q_cmd = 'condor_q -name %s -long %s' % (self.scheddName, jobs) logging.info("condor_q command: %s" % q_cmd) q_data, err_data = util.runCommand(q_cmd) if err_data != '': # We really should be checking the return code but that's not available raise Exception("Executing condor_q command:\n%s" % err_data) return q_data
def getCurrent(self, jobs): # Get results from condor_q q_cmd = 'condor_q -name %s -long %s' % (self.scheddName, jobs) logging.info("condor_q command: %s" %q_cmd) q_data, err_data = util.runCommand(q_cmd) if err_data != '': # We really should be checking the return code but that's not available raise Exception("Executing condor_q command:\n%s" %err_data) return q_data
def getFunctionDefsInFile(self, fileName, commitId): fileNameCommitTuple = (fileName, commitId) self.logger.debug("fileNameCommit tuple: %s", str(fileNameCommitTuple)) functionDefDict = dict() tmpDict = self.fileFuncDefDict.get(fileNameCommitTuple, None) if (tmpDict): self.logger.debug( "fileNameCommit tuple has been already extracted: %s", str(fileNameCommitTuple)) return tmpDict firstCmd = "cd {}; cscope -R -L -1 \".*\" 2>/dev/null | grep \"{}\" | sed -e 's/^[ \t]*//' | grep -v \"^struct\"" firstCmd = firstCmd.format(self.repopath, fileName) returncode, firstOut, err = util.runCommand(firstCmd) if (returncode != 0): self.logger.error( "Error running first cscope command: %s error: %s", firstCmd, err) return functionDefDict # secondCmd = "cd {}; cscope -R -L -1 \".*\" 2>/dev/null | grep \"{}\" | grep \", struct \"" # secondCmd = secondCmd.format(self.repopath, fileName) # returncode, secondOut, err = util.runCommand(secondCmd) # if ( returncode != 0 ): # self.logger.error("Error running cscope command: %s error: %s", secondCmd, err) # return functionDefDict ''' mm/gup.c .* 29 unsigned int page_mask; mm/gup.c .* 32 typedef int (*set_dirty_func_t)(struct page *page); mm/gup.c .* 34 static void __put_user_pages_dirty(struct page **pages, mm/gup.c .* 87 void put_user_pages_dirty(struct page **pages, unsigned long npages) mm/gup.c .* 108 void put_user_pages_dirty_lock(struct page **pages, unsigned long npages) mm/gup.c .* 123 void put_user_pages(struct page **pages, unsigned long npages) mm/gup.c .* 138 static struct page *no_page_table(struct vm_area_struct *vma, mm/gup.c .* 154 static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, ''' #TODO check if we're correctly extracting function definitions from the cscope output out = firstOut # + "\n" + secondOut outLines = out.splitlines() for outLine in outLines: if ("(" in outLine): splittedOut = outLine.split("(") if (not splittedOut[0].endswith(" ")): funcArgs = splittedOut[1] splittedOut = splittedOut[0].split() funcName = splittedOut[-1] if (funcName.startswith("SYSCALL_DEFINE")): if ("," in funcArgs): funcArgs = funcArgs[:funcArgs.index(",")] funcName = "__x64_sys_" + funcArgs cscopeFileName = splittedOut[0].strip() lineNumber = int(splittedOut[2]) if (cscopeFileName == fileName): functionDefDict[lineNumber] = funcName self.fileFuncDefDict[fileNameCommitTuple] = functionDefDict return functionDefDict
def runCommand(self, cmd): if ( self.containerId ): self.logger.info("Running cmd: %s on container: %s", cmd, self.containerId) cmd = "sudo docker {} exec -it {} " + cmd cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running cmd: %s", cmd) return False return True
def getIp(self): if ( not self.containerId ): self.logger.error("Trying to get IP from non-running container! self.containerId: %s", self.containerId) return "" cmd = "docker " + self.remote + " inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' " + self.containerId returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error trying to extract IP for container: %s errmsg: %s", self.containerId, err) return "" return out.strip()
def getItemizedHistoryFromFile(self, completed_since, jobs, history_file): '''Note: we could modify the above method to take a constraint on jobs, and process the list of X.Y Z into a filter. Then we would not need to run condor_history at all.''' history_data = '' err_data = '' history_cmd = 'condor_history -l -f %s %s' % (history_file, jobs) history_data, err_data = util.runCommand(history_cmd) if err_data != '': raise Exception("Executing condor_history command:\n%s" % err_data) return history_data
def getItemizedHistoryFromFile(self, completed_since, jobs, history_file): '''Note: we could modify the above method to take a constraint on jobs, and process the list of X.Y Z into a filter. Then we would not need to run condor_history at all.''' history_data = '' err_data = '' history_cmd = 'condor_history -l -f %s %s' % (history_file, jobs) history_data, err_data = util.runCommand(history_cmd) if err_data != '': raise Exception("Executing condor_history command:\n%s" %err_data) return history_data
def runWithSeccompProfile(self, seccompPath): self.logger.debug("Running container %s", self.imageName) cmd = "sudo docker {} run -l {} --name {} {} --security-opt seccomp={} -td {} {}" cmd = cmd.format(self.remote, C.TOOLNAME, self.containerName, self.options, seccompPath, self.imageName, self.args) returncode, out, err = util.runCommand(cmd) self.containerId = out.strip() if ( returncode != 0 ): self.logger.error("Error running docker: %s", err) return False return True
def installPackage(self, packageName): if (not self.containerId): self.logger.error( "Trying to install package on non-running container! self.containerId: %s", self.containerId) return False if (self.checkOs() == "debian"): self.logger.debug("Running apt-get update on container") cmd = "sudo docker {} exec -it {} apt-get update" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.debug("Error running apt-get update on docker: %s", err) return False self.logger.debug("Running apt install -y %s", packageName) cmd = "sudo docker {} exec -it {} apt install -y {}" cmd = cmd.format(self.remote, self.containerId, packageName) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.debug("Error installing procps docker: %s", err) return False self.logger.debug("Finished running apt install -y %s", packageName) else: self.logger.debug("Running yum -y update on container") cmd = "sudo docker {} exec -it {} yum -y update" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.debug("Error running yum -y update on docker: %s", err) return False self.logger.debug("Running yum install -y %s", packageName) cmd = "sudo docker {} exec -it {} yum install -y {}" cmd = cmd.format(self.remote, self.containerId, packageName) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.debug("Error installing procps docker: %s", err) return False self.logger.debug("Finished running yum install -y %s", packageName) return True
def extractPsNames(self): psNames = set() try: cmd = "sysdig -r {}" cmd = cmd.format(self.tmpFile) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error( "Couldn't open file: %s with err: %s trying again.", self.tmpFile, err) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error( "Couldn't open file: %s with err: %s trying again.", self.tmpFile, err) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error( "Couldn't open file: %s after 3 attempts with error: %s", self.tmpFile, err) #sys.exit(-1) return None self.logger.debug("sysdig output: %s", out) splittedOut = out.splitlines() for line in splittedOut: splittedLine = line.split() if (len(splittedLine) >= 9 and splittedLine[8].startswith("exe=")): psName = splittedLine[8].strip()[4:] psName = psName.replace("[", "") if (not psName.strip().startswith("/proc/")): psNames.add(psName) elif (len(splittedLine) == 8 and splittedLine[7].startswith("filename=")): psName = splittedLine[7].strip()[9:] psName = psName.replace("[", "") if (not psName.strip().startswith("/proc/")): psNames.add(psName) except IOError as e: self.logger.error("Couldn't open file: %s", self.tmpFile) return None return psNames
def find(self, folder, fileName): if (self.containerId): self.logger.info("Finding on container %s", self.imageName) cmd = "sudo docker {} exec -it {} find {} -name {}" cmd = cmd.format(self.remote, self.containerId, folder, fileName) self.logger.debug("Find command: %s", cmd) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error("Error running find docker: %s", err) return "" return out
def runWithRuntime(self, runtime): self.logger.debug("Running container %s with runtime: %s", self.imageName, runtime) #TODO extract log in both scenarios, mariadb logs is the same with unconfined percona the other cmd = "sudo docker {} run -l {} --runtime={} --name {} {} -td {} {}" cmd = cmd.format(self.remote, C.TOOLNAME, runtime, self.containerName, self.options, self.imageName, self.args) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running docker: %s", err) return False self.containerId = out.strip() return True
def checkLogs(self): if ( not self.containerId ): self.logger.error("Trying to check logs on non-running container! self.containerId: %s", self.containerId) return False cmd = "sudo docker {} logs {}" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error running logs command on docker: %s", err) return None return out
def runGhostscript(fName, res, device): input, output = fName+'.ps', fName+'-%03d.png' cmdLine = '%s -dDOINTERPOLATE -dTextAlphaBits=4 '%gspath + \ '-dGraphicsAlphaBits=4 -r%f -sDEVICE=%s ' + \ '-dBATCH -dNOPAUSE -dQUIT -sOutputFile=%s %s ' cmdLine = cmdLine %(res, device, output, input) err, stdout, stderr = runCommand(cmdLine) if err: log('%s\n%s\n%s\n'%(err, stdout, stderr), 'GhostscriptError') raise GhostscriptError(stderr+'\n'+stdout) return stderr # when using bbox, BoundingBox is on stderr
def getCurrent(self, jobs): # Get results from condor_q if os.environ.has_key("CONDOR_MAJOR_VERSION") and float(os.environ["CONDOR_MAJOR_VERSION"]) >= 8.5: q_cmd = 'condor_q -allusers -name %s -long %s' % (self.scheddName, jobs) else: q_cmd = 'condor_q -name %s -long %s' % (self.scheddName, jobs) logging.info("condor_q command: %s" %q_cmd) q_data, err_data = util.runCommand(q_cmd) if err_data != '': # We really should be checking the return code but that's not available raise Exception("Executing condor_q command:\n%s" %err_data) return q_data
def checkOs(self): cmd = "sudo docker {} exec -it {} cat /etc/*release" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if ( out.strip() == "" ): self.logger.error("Can't check release for os type, falling back to ubuntu") return "debian" out = out.lower() if ( "ubuntu" in out or "debian" in out ): return "debian" else: return "redhat"
def copyFromContainer(self, filePath, outFolderPath): if ( not self.containerId ): self.logger.error("Trying to copy file from non-running container! self.containerId: %s", self.containerId) return False cmd = "sudo docker {} cp -L {}:{} {}" cmd = cmd.format(self.remote, self.containerId, filePath, outFolderPath) self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) if ( returncode != 0 ): self.logger.error("Error copying from docker. dockerId: %s, filePath: %s, outputFolderPath: %s Error message: %s", self.containerId, filePath, outFolderPath, err) return False return True
def getHistoryFromFile(self, completed_since, jobs, history_file): history_data = '' err_data = '' if jobs != "": history_cmd = 'condor_history -l -f %s %s' % (history_file, jobs) else: # note: we use EnteredCurrentStatus because some jobs may have been removed, # so they have no CompletionDate history_cmd = 'condor_history -l -f %s -constraint "EnteredCurrentStatus >= %s"' % (history_file, completed_since) history_data, err_data = util.runCommand(history_cmd) if err_data != '': raise Exception("Executing condor_history command:\n%s" %err_data) return history_data
def checkStatus(self): if ( not self.containerId ): self.logger.error("Trying to check status of non-running container! self.containerId: %s", self.containerId) return False cmd = "docker " + self.remote + " inspect -f '{{.State.Running}}' " + self.containerName returncode, out, err = util.runCommand(cmd) self.logger.debug("docker inspect status returned: %s", out) if ( returncode != 0 ): self.logger.error("Problem checking container status, error: %s", err) return False if ( out.strip() == "false" ): return False return True
def extractAllUsersFromPasswd(self): userList = list() cmd = "sudo docker " + self.remote + " exec -it {} cat /etc/passwd" cmd = cmd.format(self.containerId) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.error("Can't cat /etc/passwd: %s", err) # sys.exit(-1) outLines = out.splitlines() for user in outLines: splittedLine = user.split(":") userList.append(splittedLine[0].strip()) return userList
def delete(self): if (self.containerId): self.logger.debug("Deleting container %s", self.imageName) cmd = "sudo docker {} rm {}" cmd = cmd.format(self.remote, self.containerId) returncode, out, err = util.runCommand(cmd) if (returncode != 0): self.logger.debug("Error deleting docker: %s", err) return False return True else: self.logger.debug("Trying to delete non-existent container") return False
def getCurrent(self, jobs): # Get results from condor_q if os.environ.has_key("CONDOR_MAJOR_VERSION") and float( os.environ["CONDOR_MAJOR_VERSION"]) >= 8.5: q_cmd = 'condor_q -allusers -name %s -long %s' % (self.scheddName, jobs) else: q_cmd = 'condor_q -name %s -long %s' % (self.scheddName, jobs) logging.info("condor_q command: %s" % q_cmd) q_data, err_data = util.runCommand(q_cmd) if err_data != '': # We really should be checking the return code but that's not available raise Exception("Executing condor_q command:\n%s" % err_data) return q_data
def extractAllBinaries(self): if ( not self.containerId ): self.logger.error("Trying to extract binaries from non-running container! self.containerId: %s", self.containerId) return False processList = [] setupDir = "rm -rf tarball; mkdir tarball" returncode = util.runCommand(setupDir) cmd = "docker export {} -o tarball/test.tar" cmd = cmd.format(self.containerId) self.logger.debug("Running command: %s", cmd) returncode = util.runCommand(cmd) untar = "tar -xf tarball/test.tar -C tarball/" returncode = util.runCommand(untar) fileCmd = "find tarball/ -type f -executable -exec file -i '{}' \; | grep 'application'" returncode, out, err = util.runCommand(fileCmd) splittedOut = out.splitlines() for binaryFilePath in splittedOut: binaryFilePath = binaryFilePath[:binaryFilePath.index(":")] binaryFilePath = binaryFilePath.replace("tarball", "") self.logger.debug("Binary path of found exec: " + str(binaryFilePath)) processList.append(binaryFilePath) cleanDir = "rm -rf tarball" returncode = util.runCommand(cleanDir) return processList
def dviPng(fName, res): input, output = fName+'.dvi', fName+'-%03d.png' gspngfname = fName+'-gs-%03d.png' psfname = fName+'-gs'; i=1 # '--truecolor -bg Transparent' generates RGB images with transparent pixel # (not alpha channel) but it's close... if dvipngpath is not None: cmdLine = '%s --truecolor -bg Transparent -picky -D %f -Ttight -o %s %s'%\ (dvipngpath, res, output, input) err, stdout, stderr = runCommand(cmdLine) ppredo = [] if not err: return # dvipng -picky will give the following message on pages it cannot render # (usually due to the use of postscript specials). For that we fall # through to ghostscript matcher = re.finditer(r'\[(\d+) not rendered\]', stdout) for m in matcher: if ppredo: ppredo += ',' ppredo.append(m.group(1)) ppopt = '-pp ' + string.join(ppredo,',') else: ppopt = '' cmdLine = '%s %s -R -D %f -o %s %s'%(dvipspath, ppopt, res, psfname+'.ps', input) err, stdout, stderr = runCommand(cmdLine) if err: log('%s\n%s\n%s\n'%(err, stdout, stderr), 'DVIPSError') raise DVIPSError(stderr+'\n'+stdout) if not ppopt: ppredo = range(1,len(re.findall(r'\[\d+\]', stderr))+1) err = runGhostscript(psfname, res, 'pngalpha') center(psfname, res) for page in ppredo: oldfname = os.path.join(workingDir, gspngfname%i) newfname = os.path.join(workingDir, output%int(page)) os.rename(oldfname, newfname) i += 1
def extractAllBinaries(self): if (not self.containerId): self.logger.error( "Trying to extract binaries from non-running container! self.containerId: %s", self.containerId) return False processList = [] cmd = "sudo docker {} exec -it {} " cmd = cmd.format(self.remote, self.containerId) cmd = cmd + util.getCmdRetrieveAllBinaries("/") self.logger.debug("Running command: %s", cmd) returncode, out, err = util.runCommand(cmd) splittedOut = out.splitlines() for binaryFilePath in splittedOut: binaryFilePath = binaryFilePath[:binaryFilePath.index(":")] processList.append(binaryFilePath) return processList
def queryBluecoat(self, url): phantomjs_bin = "~/Lab_TargetedAds/phantomjs/phantomjs--linux-x86_64/bin/phantomjs"; phantomjs_script = "~/Lab_TargetedAds/src/bin/bluecoat_category.js"; command = phantomjs_bin + ' ' + phantomjs_script + ' "' + url + '"'; results = runCommand(command); ret = {} try: ret['source'] = 'Bluecoat' ret['category'] = [] for i in range(len(results)): temp = results[i].split('\t'); if len(temp) >= 2 and temp[0] == '<CATEGORY>': ret['category'].append(temp[1]); if len(ret['category']) > 0: return ret; else: return {}; except: return {}
def detectRedirection(self, url): # return "NONE"; # If we don't want to detect redirection if url[:4] != "http": return 'NONE' # TODO: Handle flashvars, may need decoding. if url in self.redirection_db: self.stats.increment('Redirection DB hit', 1); return self.redirection_db[url]; self.stats.increment('Redirection DB miss', 1); command = "~/Lab_TargetedAds/phantomjs/phantomjs--linux-x86_64/bin/phantomjs ~/Lab_TargetedAds/src/bin/detect_redirection.js '"; command += url + "'"; results = runCommand(command); self.stats.increment('Ad link clicked', 1); for i in range(len(results)): results[i] = results[i].split('\t'); if len(results[i]) >= 3 and results[i][0] == '<MSG><RESULT>' and results[i][1] == "Destination": destination = results[i][2]; if not self.isAdProvider(destination): if not url in self.redirection_db: self.redirection_db[url] = destination; return results[i][2]; return "NONE";
def processResults(self, moose_dir, retcode, options, output): output = FileTester.processResults(self, moose_dir, retcode, options, output) if self.getStatus() == self.bucket_fail or self.specs['skip_checks']: return output # Don't Run Exodiff on Scaled Tests if options.scaling and self.specs['scale_refine']: self.success_message = "SCALED" self.setStatus(self.getSuccessMessage(), self.bucket_success) return output # Make sure that all of the Exodiff files are actually available for file in self.specs['exodiff']: if not os.path.exists(os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file)): output += "File Not Found: " + os.path.join(self.specs['test_dir'], self.specs['gold_dir'], file) self.setStatus('MISSING GOLD FILE', self.bucket_fail) break if self.getStatus() != self.bucket_fail: # Retrieve the commands commands = self.processResultsCommand(moose_dir, options) for command in commands: exo_output = util.runCommand(command) output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(self.specs['exodiff_opts']) if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output: self.setStatus('EXODIFF', self.bucket_diff) break # If status is still pending, then it is a passing test if self.getStatus() == self.bucket_pending: self.setStatus(self.success_message, self.bucket_success) return output
def getGoogleAdPref(cookies_file): command = '~/Lab_TargetedAds/phantomjs/phantomjs--linux-x86_64/bin/phantomjs --cookies-file=' + cookies_file + ' ~/Lab_TargetedAds/src/bin/google_ad_pref.js' results = runCommand(command); for i in range(len(results)): if '<AD_PREF>' in results[i]: print results[i]