def wait_for_scans (joblist): # were all jobs completed on return all_jobs_complete = True # number of high sev issues in completed jobs high_issue_count = 0 med_issue_count = 0 dash = python_utils.find_service_dashboard(DYNAMIC_ANALYSIS_SERVICE) for jobid in joblist: try: while True: state = appscan_status(jobid) python_utils.LOGGER.info("Job " + str(jobid) + " in state " + get_state_name(state)) if get_state_completed(state): results = appscan_info(jobid) if get_state_successful(state): high_issue_count += results["NHighIssues"] med_issue_count += results["NMediumIssues"] python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")") #print "\tOther Message : " + msg #appscan_get_result(jobid) print python_utils.LABEL_GREEN + python_utils.STARS print "Analysis successful for job \"" + results["Name"] + "\"" print "\tHigh Severity Issues : " + str(results["NHighIssues"]) print "\tMedium Severity Issues : " + str(results["NMediumIssues"]) print "\tLow Severity Issues : " + str(results["NLowIssues"]) print "\tInfo Severity Issues : " + str(results["NInfoIssues"]) if dash != None: print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR else: python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"") break else: time_left = get_remaining_wait_time() if (time_left > SLEEP_TIME): time.sleep(SLEEP_TIME) else: # ran out of time, flag that at least one job didn't complete all_jobs_complete = False # get what info we can on this job results = appscan_info(jobid) # notify the user print python_utils.LABEL_RED + python_utils.STARS print "Analysis incomplete for job \"" + results["Name"] + "\"" print "\t" + str(results["Progress"]) + "% complete" if dash != None: print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked." print python_utils.STARS + python_utils.LABEL_NO_COLOR # and continue to get state for other jobs break except Exception, e: # bad id, skip it if python_utils.DEBUG: python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
def wait_for_scans(joblist): # create array of the jon results in json format jobResults = [] # were all jobs completed on return all_jobs_complete = True # number of high sev issues in completed jobs high_issue_count = 0 med_issue_count = 0 python_utils.LOGGER.debug("Waiting for joblist: " + str(joblist)) dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE) for jobid in joblist: try: while True: state = appscan_status(jobid) python_utils.LOGGER.info("Job " + str(jobid) + " in state " + state) if get_state_completed(state): results = appscan_info(jobid) if get_state_successful(state): high_issue_count += results["NHighIssues"] med_issue_count += results["NMediumIssues"] python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")") #print "\tOther Message : " + msg job_result = { 'job_name': results["Name"], 'job_id': jobid, 'status': "successful", 'high_severity_issues': int(str(results["NHighIssues"])), 'medium_severity_issues': int(str(results["NMediumIssues"])), 'low_severity_issues': int(str(results["NLowIssues"])), 'info_severity_issues': int(str(results["NInfoIssues"])), 'url': dash } # Search for file name results["Name"] + "*.zip" if os.environ.get('DRA_IS_PRESENT') == "1": appscan_get_result(jobid, results["Name"]) save_job_result(results["Name"], job_result) #appscan_get_result(jobid) print python_utils.LABEL_GREEN + python_utils.STARS print "Analysis successful for job \"" + results[ "Name"] + "\"" print "\tHigh Severity Issues : " + str( results["NHighIssues"]) print "\tMedium Severity Issues : " + str( results["NMediumIssues"]) print "\tLow Severity Issues : " + str( results["NLowIssues"]) print "\tInfo Severity Issues : " + str( results["NInfoIssues"]) if dash != None: print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash f = open("result_url", "w") f.write(dash) f.close() print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR # append results to the jobResults for the json format jobResults.append(job_result) else: python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"") # append results to the jobResults for the json format jobResults.append({ 'job_name': results["Name"], 'job_id': jobid, 'status': "unsuccessful" }) break else: time_left = python_utils.get_remaining_wait_time() if (time_left > SLEEP_TIME): time.sleep(SLEEP_TIME) else: # ran out of time, flag that at least one job didn't complete all_jobs_complete = False # get what info we can on this job results = appscan_info(jobid) # notify the user print python_utils.LABEL_RED + python_utils.STARS print "Analysis incomplete for job \"" + results[ "Name"] + "\"" print "\t" + str(results["Progress"]) + "% complete" if dash != None: print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash f = open("result_url", "w") f.write(dash) f.close() print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked." print python_utils.STARS + python_utils.LABEL_NO_COLOR # append results to the jobResults for the json format jobResults.append({ 'job_name': results["Name"], 'job_id': jobid, 'status': "incomplete", 'percentage_complete': int(str(results["Progress"])) }) # and continue to get state for other jobs break except Exception, e: # bad id, skip it if python_utils.DEBUG: python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
joblist = appscan_list() else: # if the job we would run is already up (and either pending or complete), # we just want to get state (and wait for it if needed), not create a whole # new submission joblist = check_for_existing_job() if joblist == None: python_utils.LOGGER.info("Scanning for code submission") files_to_submit = appscan_prepare() python_utils.LOGGER.info("Submitting scans for analysis") joblist, errMsg = appscan_submit(files_to_submit) if (not joblist) or len(joblist) < len(files_to_submit): if (not errMsg): errMsg = "Check status of existing scans." #Error, we didn't return as many jobs as we should have dash = python_utils.find_service_dashboard( APP_SECURITY_SERVICE) if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR): command = '{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> could not successfully submit scan. {errMsg}\"'.format( path=python_utils.EXT_DIR, url=dash, errMsg=errMsg) proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE) out, err = proc.communicate() python_utils.LOGGER.debug(out) python_utils.LOGGER.error( 'ERROR: could not successfully submit scan. {errMsg} {url}' .format(url=dash, errMsg=errMsg)) endtime = timeit.default_timer() print "Script completed in " + str(
def wait_for_scans (joblist): # create array of the jon results in json format jobResults = [] # were all jobs completed on return all_jobs_complete = True # number of high sev issues in completed jobs high_issue_count = 0 med_issue_count=0 python_utils.LOGGER.debug("Waiting for joblist: "+str(joblist)) dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE) for jobid in joblist: try: while True: state = appscan_status(jobid) python_utils.LOGGER.info("Job " + str(jobid) + " in state " + state) if get_state_completed(state): results = appscan_info(jobid) if get_state_successful(state): high_issue_count += results["NHighIssues"] med_issue_count += results["NMediumIssues"] python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")") #print "\tOther Message : " + msg job_result = { 'job_name': results["Name"], 'job_id': jobid, 'status': "successful", 'high_severity_issues': int(str(results["NHighIssues"])), 'medium_severity_issues': int(str(results["NMediumIssues"])), 'low_severity_issues': int(str(results["NLowIssues"])), 'info_severity_issues': int(str(results["NInfoIssues"])), 'url': dash} # Search for file name results["Name"] + "*.zip" if os.environ.get('DRA_IS_PRESENT') == "1": appscan_get_result(jobid, results["Name"]); save_job_result(results["Name"], job_result); #appscan_get_result(jobid) print python_utils.LABEL_GREEN + python_utils.STARS print "Analysis successful for job \"" + results["Name"] + "\"" print "\tHigh Severity Issues : " + str(results["NHighIssues"]) print "\tMedium Severity Issues : " + str(results["NMediumIssues"]) print "\tLow Severity Issues : " + str(results["NLowIssues"]) print "\tInfo Severity Issues : " + str(results["NInfoIssues"]) if dash != None: print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash f = open("result_url","w") f.write(dash) f.close() print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR # append results to the jobResults for the json format jobResults.append(job_result) else: python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"") # append results to the jobResults for the json format jobResults.append({'job_name': results["Name"], 'job_id': jobid, 'status': "unsuccessful"}) break else: time_left = python_utils.get_remaining_wait_time() if (time_left > SLEEP_TIME): time.sleep(SLEEP_TIME) else: # ran out of time, flag that at least one job didn't complete all_jobs_complete = False # get what info we can on this job results = appscan_info(jobid) # notify the user print python_utils.LABEL_RED + python_utils.STARS print "Analysis incomplete for job \"" + results["Name"] + "\"" print "\t" + str(results["Progress"]) + "% complete" if dash != None: print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash f = open("result_url","w") f.write(dash) f.close() print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked." print python_utils.STARS + python_utils.LABEL_NO_COLOR # append results to the jobResults for the json format jobResults.append({'job_name': results["Name"], 'job_id': jobid, 'status': "incomplete", 'percentage_complete': int(str(results["Progress"]))}) # and continue to get state for other jobs break except Exception, e: # bad id, skip it if python_utils.DEBUG: python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
joblist = appscan_list() else: # if the job we would run is already up (and either pending or complete), # we just want to get state (and wait for it if needed), not create a whole # new submission joblist = check_for_existing_job() if joblist == None: python_utils.LOGGER.info("Scanning for code submission") files_to_submit = appscan_prepare() python_utils.LOGGER.info("Submitting scans for analysis") joblist, errMsg = appscan_submit(files_to_submit) if (not joblist) or len(joblist) < len(files_to_submit): if (not errMsg): errMsg = "Check status of existing scans." #Error, we didn't return as many jobs as we should have dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE) if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR): command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> could not successfully submit scan. {errMsg}\"'.format(path=python_utils.EXT_DIR,url=dash,errMsg=errMsg) proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE) out, err = proc.communicate(); python_utils.LOGGER.debug(out) python_utils.LOGGER.error('ERROR: could not successfully submit scan. {errMsg} {url}'.format(url=dash,errMsg=errMsg)) endtime = timeit.default_timer() print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds" sys.exit(4) python_utils.LOGGER.info("Waiting for analysis to complete") else: python_utils.LOGGER.info("Existing job found, connecting") # check on pending jobs, waiting if appropriate all_jobs_complete, high_issue_count, med_issue_count = wait_for_scans(joblist)
appscan_cancel(job) # and cleanup the submitted irx files for file in files_to_submit: if os.path.isfile(file): os.remove(file) if os.path.isfile(file+".log"): os.remove(file+".log") else: # cleanup old copies of this job cleanup_old_jobs() # if we didn't successfully complete jobs, return that we timed out if not all_jobs_complete: # send slack notification if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR): dash = python_utils.find_service_dashboard(STATIC_ANALYSIS_SERVICE) command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> did not complete within {wait} minutes. Stage will need to be re-run after the scan completes.\"'.format(path=python_utils.EXT_DIR,url=dash,wait=python_utils.FULL_WAIT_TIME) proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE) out, err = proc.communicate(); python_utils.LOGGER.debug(out) endtime = timeit.default_timer() print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds" sys.exit(2) else: if high_issue_count > 0: # send slack notification if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR): dash = python_utils.find_service_dashboard(STATIC_ANALYSIS_SERVICE) command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Static security scan> completed with {issues} high issues detected in the application.\"'.format(path=python_utils.EXT_DIR,url=dash, issues=high_issue_count) proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
# if the job we would run is already up (and either pending or complete), # we just want to get state (and wait for it if needed), not create a whole # new submission (check current version only at this point) joblist = check_for_existing_job(ignore_older_jobs=True) if joblist == None: python_utils.LOGGER.info("Submitting URL for analysis") joblist = appscan_submit(AD_BASE_URL, baseuser=AD_USER, basepwd=AD_PWD, oldjobs=old_joblist) python_utils.LOGGER.info("Waiting for analysis to complete") else: python_utils.LOGGER.info("Existing job found, connecting") # check on pending jobs, waiting if appropriate all_jobs_complete, high_issue_count, med_issue_count = wait_for_scans(joblist) # prebuild common substrings dash = python_utils.find_service_dashboard(DYNAMIC_ANALYSIS_SERVICE) # if we didn't successfully complete jobs, return that we timed out if not all_jobs_complete: # send slack notification if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR): command='{path}/utilities/sendMessage.sh -l bad -m \"<{url}|Dynamic security scan> did not complete within {wait} minutes. Stage will need to be re-run after the scan completes.\"'.format(path=python_utils.EXT_DIR,url=dash,wait=FULL_WAIT_TIME) proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE) out, err = proc.communicate(); python_utils.LOGGER.debug(out) endtime = timeit.default_timer() print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds" sys.exit(2) else: if high_issue_count > 0: # send slack notification
Logger.info("Getting credentials for Globalization service") credentials = python_utils.get_credentials_for_non_binding_service( service=GLOBALIZATION_SERVICE) if not (credentials): raise Exception( "Unable to get credentials for access to the Globalization Pipeline service." ) url = credentials['url'] instanceId = credentials['instanceId'] userId = credentials['userId'] password = credentials['password'] if not (url) or not (instanceId) or not (userId) or not (password): raise Exception( "Unable to get credentials for access to the Globalization Pipeline service." ) dashboard = python_utils.find_service_dashboard(GLOBALIZATION_SERVICE) Logger.info("Target url for Globalization Service is " + url) Logger.info("Writing credentials to setenv_globalization.sh") setenvvariable('GAAS_ENDPOINT', url) setenvvariable('GAAS_INSTANCE_ID', instanceId) setenvvariable('GAAS_USER_ID', userId) setenvvariable('GAAS_PASSWORD', password) setenvvariable('GAAS_DASHBOARD', dashboard) # allow testing connection without full job scan and submission if parsedArgs['loginonly']: Logger.info("LoginOnly set, login complete, exiting") sys.exit(0) except Exception, e: Logger.warning("Exception received", exc_info=e)
try: python_utils.LOGGER = python_utils.setup_logging() Logger = python_utils.LOGGER parsedArgs = parseArgs() Logger.info("Getting credentials for Globalization service") credentials = python_utils.get_credentials_for_non_binding_service(service=GLOBALIZATION_SERVICE) if not (credentials): raise Exception("Unable to get credentials for access to the Globalization Pipeline service.") url = credentials['url'] instanceId = credentials['instanceId'] userId = credentials['userId'] password = credentials['password'] if not (url) or not (instanceId) or not (userId) or not (password): raise Exception("Unable to get credentials for access to the Globalization Pipeline service.") dashboard = python_utils.find_service_dashboard(GLOBALIZATION_SERVICE) Logger.info("Target url for Globalization Service is " + url) Logger.info("Writing credentials to setenv_globalization.sh") setenvvariable('GAAS_ENDPOINT', url) setenvvariable('GAAS_INSTANCE_ID', instanceId) setenvvariable('GAAS_USER_ID', userId) setenvvariable('GAAS_PASSWORD', password) setenvvariable('GAAS_DASHBOARD', dashboard) # allow testing connection without full job scan and submission if parsedArgs['loginonly']: Logger.info("LoginOnly set, login complete, exiting") sys.exit(0) except Exception, e: Logger.warning("Exception received", exc_info=e)
def wait_for_scans (joblist): # were all jobs completed on return all_jobs_complete = True # number of high sev issues in completed jobs high_issue_count = 0 med_issue_count = 0 dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE) for jobid in joblist: try: while True: scan = refresh_appscan_info(jobid) state = parse_status(scan) python_utils.LOGGER.info("Job " + scan["Id"] + " in state " + state) if get_state_completed(state): if get_state_successful(state): high_issue_count += scan["LatestExecution"]["NHighIssues"] med_issue_count += scan["LatestExecution"]["NMediumIssues"] python_utils.LOGGER.info("Analysis successful (" + scan["Name"] + ")") #print "\tOther Message : " + msg #appscan_get_result(jobid) print python_utils.LABEL_GREEN + python_utils.STARS print "Analysis successful for job \"" + scan["Name"] + "\"" print "\tHigh Severity Issues : " + str(scan["LatestExecution"]["NHighIssues"]) print "\tMedium Severity Issues : " + str(scan["LatestExecution"]["NMediumIssues"]) print "\tLow Severity Issues : " + str(scan["LatestExecution"]["NLowIssues"]) print "\tInfo Severity Issues : " + str(scan["LatestExecution"]["NInfoIssues"]) if os.environ.get('DRA_IS_PRESENT') == "1": job_result = { 'job_name': scan["Name"], 'job_id': scan["Id"], 'status': "successful", 'high_severity_issues': int(str(scan["LatestExecution"]["NHighIssues"])), 'medium_severity_issues': int(str(scan["LatestExecution"]["NMediumIssues"])), 'low_severity_issues': int(str(scan["LatestExecution"]["NLowIssues"])), 'info_severity_issues': int(str(scan["LatestExecution"]["NInfoIssues"])), 'url': dash} get_appscan_xml_report(scan) save_job_result(scan["Name"], job_result) if dash != None: print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR else: python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"") break else: time_left = python_utils.get_remaining_wait_time() if (time_left > SLEEP_TIME): time.sleep(SLEEP_TIME) else: # ran out of time, flag that at least one job didn't complete all_jobs_complete = False # get what info we can on this job scan = refresh_appscan_info(jobid) # notify the user print python_utils.LABEL_RED + python_utils.STARS print "Analysis incomplete for job \"" + scan["Name"] + "\"" print "\t" + str(scan["LatestExecution"]["Progress"]) + "% complete" if dash != None: print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked." print python_utils.STARS + python_utils.LABEL_NO_COLOR # and continue to get state for other jobs break except Exception, e: # bad id, skip it if python_utils.DEBUG: python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))