def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing input into jobs and tracking the job thread through the life of its runtime. """ try: ## Get mapping file mappingFileName = runtime.parameters.get('confFileWithMapping') mappingEntries = getMappingFile(runtime, mappingFileName) ## Configure snmp client client = getClient(runtime) ## Do the work processResults(runtime, client, mappingEntries) ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) except: runtime.setError(__name__) ## end startJob return
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing input into jobs and tracking the job thread through the life of its runtime. """ client = None try: ## Read/initialize the job parameters trackEveryIpInDNS = runtime.parameters.get('trackEveryIpInDNS', False) retainCacheAfterJobCompletion = runtime.parameters.get( 'retainCacheAfterJobCompletion', False) ipAddrToIdDict = {} jobRuntimePath = verifyJobRuntimePath(__file__) runtime.logger.report('path jobRuntimePath: {jobRuntimePath!r}', jobRuntimePath=jobRuntimePath) ## See if we need to query the API for current IPs if not trackEveryIpInDNS: ## Pull current IPs from DB getCurrentIps(runtime, ipAddrToIdDict) runtime.logger.report('IP count found from database: {ipCount!r}', ipCount=len(ipAddrToIdDict)) ## Create a realm utility to check if IPs are within the boundary runtime.logger.report('realm: {runtime!r}', runtime=runtime.jobMetaData.get('realm')) runtime.logger.report('protocols: {runtime!r}', runtime=runtime.jobMetaData) realmUtil = RealmUtility(runtime) ## Get the WMI client client = getClient(runtime, namespace='root/microsoftdns') if client is not None: ## Open client session before starting the work client.open() ## Do the work doWork(runtime, client.connection, ipAddrToIdDict, jobRuntimePath, realmUtil) ## Good house keeping; though I force this after the exception below client.close() ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) if not retainCacheAfterJobCompletion: cleanupJobRuntimePath(__file__) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing I/O for jobs and tracking the job thread through its runtime """ client = None try: ## Configure shell client client = getClient(runtime, commandTimeout=30) if client is not None: ## Get a handle on our Node in order to link objects in this job nodeId = runtime.endpoint.get('data').get('container') ## Get all related IPs ips = {} for ip in runtime.endpoint.get('children', {}).get('IpAddress', []): ipObjectId = ip.get('identifier') ipAddress = ip.get('data').get('address') ips[ipAddress] = ipObjectId ## Open client session before starting the work client.open() ## First query for SQL instance name and IDs instances = {} findInstances(runtime, client, instances) ## Only continue if we found SQL instances if len(instances) <= 0: ## Job executed fine, but didn't find what it was looking for runtime.setInfo('SQL Server not found') else: addObject(runtime, 'Node', uniqueId=nodeId) #runtime.results.addObject('Node', uniqueId=nodeId) for instanceName, instanceData in instances.items(): regInstancePath = 'HKLM:\SOFTWARE\Microsoft\Microsoft SQL Server\{}'.format( instanceData.get('id')) dbContextId = qualifyInstance(runtime, client, instanceName, instanceData, regInstancePath, nodeId) getConnectionParameters(runtime, client, instanceName, instanceData, regInstancePath, dbContextId) ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing I/O for jobs and tracking the job thread through its runtime. """ client = None try: ## Issue API query for the node, and get details for creating this entry nodeDetails = getNodeDetails(runtime) if nodeDetails is None: raise EnvironmentError( 'Unable to pull node details from endpoint.') ## Get parameters controlling the job commandsToTest = runtime.parameters.get('commandList', []) commandTimeout = runtime.parameters.get('commandTimeout', 10) ## Configure shell client client = getClient(runtime) if client is not None: ## Open client session before starting the work client.open() ## Run commands for command in commandsToTest: runtime.logger.warn('Running command: {command!r}', command=command) (stdOut, stdError, hitProblem) = client.run(command, commandTimeout) if hitProblem: runtime.logger.warn(' command encountered a problem') else: runtime.logger.warn(' command successful') runtime.logger.warn(' stdOut: {stdOut!r}', stdOut=stdOut) runtime.logger.warn(' stdError: {stdError!r}', stdError=stdError) ## Update the runtime status to success runtime.status(1) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing input into jobs and tracking the job thread through the life of its runtime. """ client = None try: ## Drop our input parameters into the log for inputParameter in runtime.parameters: runtime.logger.report(' --> input parameter {name!r}: {value!r}', name=inputParameter, value=runtime.parameters.get(inputParameter)) ## Configure shell client client = getClient(runtime) if client is not None: ## Our client is configured and ready to use, but we won't establish ## an actual connection until needed ## Get a handle on our Node in order to attach files. nodeId = runtime.endpoint.get('data').get('container') ## Open client session before starting the work client.open() ## Now we do something; this just calls a function above checkFiles(runtime, client, nodeId) ## Good house keeping; though I force this after the exception below client.close() ## Update the runtime status to success, for job tracking and proper ## statistics. You don't want jobs to list UNKNOWN if you know they ## either passed or failed. Likewise, you may not want to leave them ## tagged with FAILED if some of the previous failures can be safely ## disregarded upon a final success criteria. if runtime.getStatus() == 'UNKNOWN': runtime.status(1) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing input into jobs and tracking the job thread through the life of its runtime. """ client = None try: ## Configure shell client client = getClient(runtime) if client is not None: ## Get a handle on our Node in order to link objects in this job nodeId = runtime.endpoint.get('data').get('container') runtime.results.addObject('Node', uniqueId=nodeId) ## Open client session before starting the work client.open() ## Do the work resultDictionary = getStartTasks(runtime, client, nodeId, trackResults=True) ## Good house keeping; though I force this after the exception below client.close() ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) ## Debug output runtime.logger.report('resultDictionary:', resultDictionary=resultDictionary) for result in resultDictionary: runtime.logger.report(' {result!r}', result=result) runtime.logger.report( 'results of shell_OS_start_tasks: {results!r}\n\n', results=runtime.results.getJson()) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): client = None try: client = getClient(runtime) if client is not None: client.open() command = 'hostname' (stdout, stderr, hitProblem) = client.run(command) if (hitProblem): raise OSError('Command failed {}. Error returned {}.'.format(command, stderr)) runtime.logger.report(' --> {command!r}: {output!r}', command=command, output=stdout) except: runtime.setError(__name__) if client is not None: with suppress(Exception): client.close()
def startJob(runtime): client = None try: endpointContext = runtime.endpoint.get('data') osType = endpointContext.get('node_type') nodeId = endpointContext.get('container') connectViaFQDN = endpointContext.get('parameters').get('connectViaFQDN', False) client = getClient(runtime) if client is not None: client.open() checkFiles(runtime, client, nodeId) client.close() except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close()
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing I/O for jobs and tracking the job thread through its runtime """ client = None try: ## Configure shell client client = getClient(runtime, commandTimeout=30) if client is not None: ## Get a handle on our Node in order to link objects in this job nodeId = runtime.endpoint.get('data').get('container') runtime.results.addObject('Node', uniqueId=nodeId) ## Initialize data container used to pass I/O between functions data = shellAppComponentsUtils.dataContainer( runtime, nodeId, client) ## Open client session before starting the work client.open() ## Do the work vocalAppComponents(runtime, data) ## Good house keeping; though I force this after the exception below client.close() ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return
def startJob(runtime): client = getClient(runtime) client.open() (stdout, stderr, hitProblem) = client.run('hostname') client.close()
def startJob(runtime): """Standard job entry point. Arguments: runtime (dict) : object used for providing input into jobs and tracking the job thread through the life of its runtime. """ client = None try: ## Configure shell client client = getClient(runtime) if client is not None: ## Get a handle on our IP in order to link objects in this job ipAddress = runtime.endpoint.get('data').get('ipaddress') ipList = [] runtime.logger.report(' ENDPOINT: {endpoint!r}', endpoint=runtime.endpoint) ## This pulls the first Node related to the shell (and there will ## only be one since the max/min on the endpoint query is 1), and ## pulls the list of IPs related to the Node for our local IPs: thisNode = runtime.endpoint.get('children').get('Node')[0] for ipObject in thisNode.get('children').get('IpAddress'): thisAddress = ipObject.get('data').get('address') is_ipv4 = ipObject.get('data').get('is_ipv4') if thisAddress is not None: ipList.append((thisAddress, is_ipv4)) runtime.logger.report(' ipList: {ipList!r}', ipList=ipList) hostname = thisNode.get('data', {}).get('hostname') domain = thisNode.get('data', {}).get('domain') ## Open client session before starting the work client.open() ## Do the work (udpListenerList, tcpListenerList, tcpEstablishedList) = getNetworkStack(runtime, client, ipAddress, localIpList=ipList, trackResults=True, hostname=hostname, domain=domain) ## Good house keeping; though I force this after the exception below client.close() ## Update the runtime status to success if runtime.getStatus() == 'UNKNOWN': runtime.status(1) ## Debug output runtime.logger.report('udpListenerList:', udpListenerList=udpListenerList) for listener in udpListenerList: runtime.logger.report(' {listener!r}', listener=listener) runtime.logger.report('tcpListenerList:', udpListenerList=udpListenerList) for listener in tcpListenerList: runtime.logger.report(' {listener!r}', listener=listener) runtime.logger.report('tcpEstablishedList:', udpListenerList=udpListenerList) for established in tcpEstablishedList: runtime.logger.report(' {established!r}', established=established) runtime.logger.report( 'results of shell_OS_network_stack: {results!r}', results=runtime.results.getJson()) except: runtime.setError(__name__) with suppress(Exception): if client is not None: client.close() ## end startJob return