Exemplo n.º 1
0
def getFullDataSet(runtime, jobPath, query):
    """Pull requested dataset from ITDM."""
    ## Load the file containing the query
    queryFile = os.path.join(jobPath, 'input', query + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'JSON query file does not exist: {}'.format(queryFile))
    queryContent = None
    with open(queryFile) as fp:
        queryContent = json.load(fp)

    ## Get chunked query results from the API; best to use when the data
    ## set can be quite large, but the second version can be used if you
    ## want to gather everything in one call or in a non-Flat format:
    #queryResults = getApiQueryResultsInChunks(runtime, queryContent)
    queryResults = getApiQueryResultsFull(
        runtime,
        queryContent,
        resultsFormat='Nested-Simple',
        headers={'removeEmptyAttributes': False})
    if queryResults is None or len(queryResults) <= 0:
        raise EnvironmentError(
            'No results found from database; nothing to send.')

    ## Convert the API results into a desired layout for our needs here? No,
    ## we would only need to do this if using chunked/flat result format.
    #transformResultsFromApi(runtime, queryResults)

    ## end getFullDataSet
    return queryResults
Exemplo n.º 2
0
def getCurrentURLs(runtime, urlList):
    """Pull URLs from the database."""
    targetQuery = runtime.parameters.get('targetQuery')
    queryFile = os.path.join(runtime.env.contentGatheringPkgPath,
                             'checkUrlResponse', 'input',
                             targetQuery + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'JSON query file does not exist: {}'.format(queryFile))

    ## Load the file containing the query for IPs
    queryContent = None
    with open(queryFile) as fp:
        queryContent = json.load(fp)

    ## Get the results from the query
    queryResults = utilities.getApiQueryResultsFull(runtime,
                                                    queryContent,
                                                    resultsFormat='Flat')
    if queryResults is None or len(queryResults) <= 0:
        raise EnvironmentError('No URLs found from database; nothing to do.')

    ## Convert the API results into a desired layout for our needs here
    for entry in queryResults.get('objects', []):
        url = entry.get('data').get('name')
        urlList.append(url)
    queryResults = None

    ## end getCurrentURLs
    return
Exemplo n.º 3
0
def startJob(runtime):
	"""Standard job entry point.

	Arguments:
	  runtime (dict)   : object used for providing input into jobs and tracking
	                     the job thread through the life of its runtime.
	"""
	try:
		## Set the corresponding directories for input
		jobScriptPath = os.path.dirname(os.path.realpath(__file__))
		basePath = os.path.abspath(os.path.join(jobScriptPath, '..'))
		orderFile = os.path.join(basePath, 'conf', 'orderedList.json')
		inputPath = os.path.join(basePath, 'input')
		transformPath = os.path.join(basePath, 'transform')

		## Read in the conf/orderedList.json file
		with open(orderFile) as fp:
			orderedList = json.load(fp)

		for task in orderedList:
			try:
				inputName = task.get('inputName')
				transformName = task.get('transformName')
				runtime.logger.report('Working on input query {inputName!r}', inputName=inputName)

				## Read the input query and transformation descriptor files
				inputFile = loadJsonFile(os.path.join(inputPath, inputName + '.json'), 'input')
				transformFile = loadJsonFile(os.path.join(transformPath, transformName + '.json'), 'transform')
				## Query API for the query result
				queryResults = getApiQueryResultsFull(runtime, inputFile, resultsFormat='Nested-Simple', headers={'removeEmptyAttributes': False}, verify=runtime.ocpCertFile)
				if queryResults is None or len(queryResults) <= 0:
					runtime.logger.report('No results found for input query {inputName!r}; skipping.', inputName=inputName)
					continue
				runtime.logger.report(' -- queryResults: {queryResults!r}', queryResults=queryResults)
				## Special case since we need Nested format; if query was only
				## looking for a single object, the nested format drops down to
				## a Flat format, with a list of objects.
				if 'objects' in queryResults:
					queryResults = queryResults['objects']

				## Start the work
				processThisEntry(runtime, queryResults, transformFile, transformName)

			except:
				runtime.logger.error('Failure with query {inputName!r}: {stacktrace!r}', inputName=inputName, stacktrace=str(sys.exc_info()[1]))

		## Update the runtime status to success
		if runtime.getStatus() == 'UNKNOWN':
			runtime.status(1)

	except:
		runtime.setError(__name__)

	## end startJob
	return
Exemplo n.º 4
0
def getQueryResults(runtime, queryName, resultsFormat):
    """Run a query to get all matching objects."""
    queryFile = os.path.join(runtime.env.universalJobPkgPath, 'logicalModels',
                             'input', queryName + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'Missing query file specified in the parameters: {queryFile!r}',
            queryFile=queryFile)
    queryContent = None
    with open(queryFile) as fp:
        queryContent = json.load(fp)
    queryResults = getApiQueryResultsFull(runtime,
                                          queryContent,
                                          resultsFormat=resultsFormat,
                                          verify=runtime.ocpCertFile)

    ## end getQueryResults
    return queryResults
Exemplo n.º 5
0
def getItdmDataSet(runtime, jobPath):
	"""Pull requested dataset from ITDM."""
	targetQuery = runtime.parameters.get('targetQuery')
	queryFile = os.path.join(jobPath, 'input', targetQuery + '.json')
	if not os.path.isfile(queryFile):
		raise EnvironmentError('JSON query file does not exist: {}'.format(queryFile))

	## Load the file containing the query
	queryContent = None
	with open(queryFile) as fp:
		queryContent = json.load(fp)

	## Get chunked query results from the API; best to use when the data
	## set can be quite large, but the second version can be used if you
	## want to gather everything in one call or in a non-Flat format:
	#queryResults = getApiQueryResultsInChunks(runtime, queryContent)
	queryResults = getApiQueryResultsFull(runtime, queryContent, resultsFormat='Nested', headers={'removeEmptyAttributes': False})
	if queryResults is None or len(queryResults) <= 0:
		raise EnvironmentError('No results found from ITDM; nothing to update.')

	## end getItdmDataSet
	return queryResults
Exemplo n.º 6
0
def getDataSet(runtime, jobPath, queryName):
    """Pull requested dataset from ITDM."""
    queryFile = os.path.join(jobPath, 'input', queryName + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'JSON query file does not exist: {}'.format(queryFile))

    ## Load the file containing the query
    queryContent = None
    with open(queryFile) as fp:
        queryContent = json.load(fp)
    queryResults = getApiQueryResultsFull(
        runtime,
        queryContent,
        resultsFormat='Nested-Simple',
        headers={'removeEmptyAttributes': False})
    if queryResults is None or len(queryResults) <= 0:
        raise EnvironmentError(
            'No IPs found from database; nothing to update.')

    ## end getDataSet
    return queryResults
Exemplo n.º 7
0
def startJob(runtime):
    """Standard job entry point.

	Arguments:
	  runtime (dict)   : object used for providing input into jobs and tracking
	                     the job thread through the life of its runtime.
	"""
    try:
        ## Establish our runtime working directory
        jobRuntimePath = verifyJobRuntimePath(__file__)
        runtime.logger.report('path jobRuntimePath: {jobRuntimePath!r}',
                              jobRuntimePath=jobRuntimePath)

        ## Initialize a workbook
        wb = Workbook()
        worksheets = runtime.parameters.get('worksheets')
        firstSheet = True
        ## Go through each worksheet definition set in the job
        for definition in worksheets:
            try:
                queryName = definition.get('queryName')
                sheetName = definition.get('sheetName')
                worksheet = wb.active
                if firstSheet:
                    worksheet.title = sheetName
                    firstSheet = False
                else:
                    wb.create_sheet(sheetName)
                    worksheet = wb.get_sheet_by_name(sheetName)

                ## Read the query definition
                queryFile = os.path.join(runtime.env.universalJobPkgPath,
                                         'reportWorkbook', 'input',
                                         queryName + '.json')
                if not os.path.isfile(queryFile):
                    raise EnvironmentError(
                        'JSON query file does not exist: {}'.format(queryFile))
                queryContent = None
                with open(queryFile) as fp:
                    queryContent = json.load(fp)

                runtime.logger.report(' Requesting queryName {queryName!r}',
                                      queryName=queryName)
                queryResults = getApiQueryResultsFull(
                    runtime,
                    queryContent,
                    resultsFormat='Nested-Simple',
                    headers={'removeEmptyAttributes': False},
                    verify=runtime.ocpCertFile)
                if queryResults is None or len(queryResults) <= 0:
                    runtime.logger.debug(
                        'No results found for queryName {queryName!r}',
                        queryName=queryName)
                    continue

                ## Special case since we need Nested format; if query was only
                ## looking for a single object, the nested format drops down to
                ## a Flat format, with a list of objects.
                if 'objects' in queryResults:
                    queryResults = queryResults['objects']

                ## Convert the API results into a desired layout for our needs here
                transformResults(runtime, queryResults, worksheet)

            except:
                stacktrace = traceback.format_exception(
                    sys.exc_info()[0],
                    sys.exc_info()[1],
                    sys.exc_info()[2])
                runtime.logger.error(
                    'Failure parsing queryName {queryName!r}: {stacktrace!r}',
                    queryName=queryName,
                    stacktrace=stacktrace)

        ## Save the workbook as a file
        reportName = '{}.xlsx'.format(runtime.parameters.get('reportFile'))
        reportFile = os.path.join(jobRuntimePath, reportName)
        wb.save(reportFile)

        ## Update the runtime status to success
        if runtime.getStatus() == 'UNKNOWN':
            runtime.status(1)

    except:
        runtime.setError(__name__)

    ## end startJob
    return
def processResults(runtime, mappingEntries, queryContent):
    """Get all nodes with snmp_oid values; update any that match a mapping."""
    try:
        queryResults = getApiQueryResultsFull(runtime,
                                              queryContent,
                                              resultsFormat='Flat',
                                              verify=runtime.ocpCertFile)
        nodeResults = queryResults.get('objects', [])
        runtime.logger.info('Found {} base nodes with SNMP OID values'.format(
            len(nodeResults)))

        ## Loop through the target nodes
        for result in nodeResults:
            ## Get node attributes
            identifier = result.get('identifier')
            nodeData = result.get('data', {})
            nodeName = nodeData.get('hostname')
            deviceOID = nodeData.get('snmp_oid')
            runtime.logger.report(
                ' node: {name!r} with OID {oid!r} has ID: {identifier!r}',
                name=nodeName,
                oid=deviceOID,
                identifier=identifier)

            ## Loop through the mapping entries
            foundMatch = False
            for entry in mappingEntries:
                ref = entry.get('deviceTypeForReferenceOnly')
                matchingSection = entry.get('matchingSection', {})

                ## Matching section
                snmpOID = matchingSection.get('snmpOID')
                compareType = matchingSection.get('compareType')
                compareValue = matchingSection.get('compareValue')
                if compareType == '==':
                    if deviceOID == snmpOID:
                        foundMatch = True
                elif compareType.lower() == 'regex':
                    escapedOID = snmpOID.replace('.', '[.]')
                    value = compareValue.replace('snmpOID', escapedOID, re.I)
                    if re.search(value, deviceOID):
                        foundMatch = True
                else:
                    runtime.logger.info(
                        'Unknown compare type for mapping {}. Received {} and expected either "==" or "regEx".'
                        .format(ref, compareType))

                ## If no match, continue to the next mapping definition
                if not foundMatch:
                    continue

                ## Mapping section
                mappingSection = entry.get('mappingSection', {})
                nodeType = mappingSection.get('type')
                attributeValueOverrides = mappingSection.get(
                    'attributeValueOverrides', {})
                attributeValuesToUseWhenEmpty = mappingSection.get(
                    'attributeValuesToUseWhenEmpty', {})

                attrs = {}
                ## Explicitly set domain to null if it was null before
                nodeDomain = nodeData.get('domain')
                attrs['hostname'] = nodeName
                attrs['domain'] = nodeDomain
                ## Update static attribute values
                for key, value in attributeValuesToUseWhenEmpty.items():
                    ## The data section by default will not remove null values
                    if key not in nodeData:
                        attrs[key] = value
                for key, value in attributeValueOverrides.items():
                    attrs[key] = value

                runtime.logger.report(
                    '  updating subtype for {}'.format(nodeName))
                runtime.logger.report('    ==================')
                runtime.logger.report('    previous data:')
                for key, value in nodeData.items():
                    runtime.logger.report('      {}: {}'.format(key, value))
                runtime.logger.report('    attribute updates:')
                for key, value in attrs.items():
                    runtime.logger.report('      {}: {}'.format(key, value))
                runtime.logger.report('    ==================')

                ## Update the object and subtype accordingly
                runtime.results.addObject(nodeType,
                                          uniqueId=identifier,
                                          **attrs)
                break

    except:
        runtime.setError(__name__)

    ## end processResults
    return
Exemplo n.º 9
0
def getDeltaDataSet(runtime, jobPath, query, fullQuery,
                    previousRuntimeStatistics, deltaSyncExpirationInDays):
    """Pull partial/delta dataset from ITDM, based on last successful run."""
    ## First check the previous runtime stats to direct the type of job. The job
    ## params asked for delta, but if we didn't have a successful in the past,
    ## then this needs to redirect to a full sync instead.
    runtime.logger.report('SNOW JOB Stats: {stats!r}',
                          stats=previousRuntimeStatistics)
    lastSuccess = previousRuntimeStatistics.get('date_last_success')
    lastSuccessDate = None
    redirectToFull = False
    if lastSuccess is None:
        redirectToFull = True
        runtime.logger.info(
            'Redirecting this delta-sync to full-sync; previous runtime statistics do not hold a value for date_last_success.'
        )
    else:
        ## The deltaSyncExpirationInDays variable holds the number of days since
        ## the last successful delta sync, before the job redirects to full sync
        #lastSuccessDate = datetime.datetime.strptime(lastSuccess, '%Y-%m-%d %H:%M:%S')
        lastSuccessDate = arrow.get(lastSuccess, 'YYYY-MM-DD HH:mm:ss')
        expirationDate = arrow.utcnow().shift(
            days=-(deltaSyncExpirationInDays)).datetime
        if expirationDate > lastSuccessDate:
            redirectToFull = True
            runtime.logger.info(
                'Redirecting this delta-sync to full-sync; value for \'date_last_success\'={lastSuccess!r} is past the number of days allowed in the user provided parameter \'deltaSyncExpirationInDays\'={deltaSyncExpirationInDays!r}.',
                lastSuccess=lastSuccess,
                deltaSyncExpirationInDays=deltaSyncExpirationInDays)

    if redirectToFull or lastSuccessDate is None:
        runtime.logger.info('Redirecting Delta sync to Full sync')
        return (getFullDataSet(runtime, jobPath, fullQuery))

    ## Load the file containing the query
    queryFile = os.path.join(jobPath, 'input', query + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'JSON query file does not exist: {}'.format(queryFile))
    queryContent = None
    with open(queryFile, 'r') as fp:
        queryContent = fp.read()
    ## Replace the <VALUE1> and <VALUE2> placeholders in the in betweendate
    ## operation, with the proper formats:
    ##   VALUE1 = the last successful runtime of the job, which should be in UTC
    ##   VALUE2 = current UTC time sent in the expected string format
    utcNow = arrow.utcnow().format('YYYY-MM-DD HH:mm:ss')
    runtime.logger.info(
        'Using this time comparison for deta-sync query:  time_created is between {lastSuccess!r} and {utcNow!r}...',
        lastSuccess=lastSuccess,
        utcNow=utcNow)
    # lastSuccess = "2019-07-26 09:39:00"
    # utcNow = "2019-07-10 09:39:00"
    queryContent = queryContent.replace('<VALUE1>',
                                        '"{} UTC"'.format(lastSuccess))
    queryContent = queryContent.replace('<VALUE2>', '"{} UTC"'.format(utcNow))

    queryResults = getApiQueryResultsFull(
        runtime,
        queryContent,
        resultsFormat='Nested-Simple',
        headers={'removeEmptyAttributes': False})
    if queryResults is None or len(queryResults) <= 0:
        raise EnvironmentError(
            'No results found from database; nothing to send.')

    ## end getDeltaDataSet
    return queryResults
Exemplo n.º 10
0
def issueApiCall(runtime):
    endpoint = None
    ## Get the parameters
    inputQuery = runtime.parameters.get('inputQuery')
    targetIp = runtime.parameters.get('endpointIp')

    ## Corresponding directory for our input query
    jobScriptPath = os.path.dirname(os.path.realpath(__file__))
    jobPath = os.path.abspath(os.path.join(jobScriptPath, '..'))

    ## Load the file containing the query
    queryFile = os.path.join(jobPath, 'input', inputQuery + '.json')
    if not os.path.isfile(queryFile):
        raise EnvironmentError(
            'JSON query file does not exist: {}'.format(queryFile))
    queryContent = None
    with open(queryFile, 'r') as fp:
        queryContent = fp.read()

    ## We do not know shell type, and in the future there may be more. So we
    ## will loop through each one, trying until we find it.

    ## Note: previously this was sent in by the endpoint query, but when we
    ## switched over to make the IP a parameter instead... the shell details are
    ## no longer sent into the job. Hence this step to request from the API.
    ## The idea was to simplify user experience by adding more code.
    queryResults = None
    for shellType in validShellTypes:
        thisQuery = queryContent
        ## Replace <VALUE1> and <VALUE2> placeholders:
        ##   VALUE1 = the shell type: PowerShell or SSH
        ##   VALUE2 = the IP address of the target endpoint you wish to template
        thisQuery = thisQuery.replace('<VALUE1>', '"{}"'.format(shellType))
        thisQuery = thisQuery.replace('<VALUE2>', '"{}"'.format(targetIp))

        queryResults = getApiQueryResultsFull(
            runtime,
            thisQuery,
            resultsFormat='Nested',
            headers={'removeEmptyAttributes': False},
            verify=runtime.ocpCertFile)
        if queryResults is not None and len(queryResults[shellType]) > 0:
            break

    if queryResults is None or len(queryResults[shellType]) <= 0:
        raise EnvironmentError(
            'Could not find endpoint {} with a correspoinding shell. Please make sure to run the corresponding "Find" job first.'
            .format(targetIp))

    runtime.logger.report('queryResults: {queryResults!r}...',
                          queryResults=queryResults)
    ## Set the protocol data on runtime, as though it was sent in regularly.
    ## And don't use 'get'; allow exceptions here if endpoint values are missing
    endpoint = queryResults[shellType][0]
    runtime.endpoint = {
        "class_name": endpoint["class_name"],
        "identifier": endpoint["identifier"],
        "data": endpoint["data"]
    }
    runtime.setParameters()

    return endpoint