예제 #1
0
    python_utils.LOGGER = python_utils.setup_logging()
    # send slack notification
    if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
        command = '{path}/utilities/sendMessage.sh -l info -m \"Starting static security scan\"'.format(
            path=python_utils.EXT_DIR)
        if python_utils.DEBUG:
            print "running command " + command
        proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
        out, err = proc.communicate()
        python_utils.LOGGER.debug(out)
    else:
        if python_utils.DEBUG:
            print "sendMessage.sh not found, notifications not attempted"

    python_utils.WAIT_TIME = python_utils.get_remaining_wait_time(first=True)
    python_utils.LOGGER.info("Getting credentials for Static Analysis service")
    creds = python_utils.get_credentials_for_non_binding_service(
        service=APP_SECURITY_SERVICE)
    python_utils.LOGGER.info("Connecting to Static Analysis service")
    appscan_login(creds['bindingid'], creds['password'])

    # allow testing connection without full job scan and submission
    if parsed_args['loginonly']:
        python_utils.LOGGER.info("LoginOnly set, login complete, exiting")
        endtime = timeit.default_timer()
        print "Script completed in " + str(
            endtime - python_utils.SCRIPT_START_TIME) + " seconds"
        sys.exit(0)

    # if checkstate, don't really do a scan, just check state of current outstanding ones
예제 #2
0
def wait_for_scans(joblist):
    # create array of the jon results in json format
    jobResults = []
    # were all jobs completed on return
    all_jobs_complete = True
    # number of high sev issues in completed jobs
    high_issue_count = 0
    med_issue_count = 0
    python_utils.LOGGER.debug("Waiting for joblist: " + str(joblist))
    dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
    for jobid in joblist:
        try:
            while True:
                state = appscan_status(jobid)
                python_utils.LOGGER.info("Job " + str(jobid) + " in state " +
                                         state)
                if get_state_completed(state):
                    results = appscan_info(jobid)
                    if get_state_successful(state):
                        high_issue_count += results["NHighIssues"]
                        med_issue_count += results["NMediumIssues"]
                        python_utils.LOGGER.info("Analysis successful (" +
                                                 results["Name"] + ")")
                        #print "\tOther Message : " + msg

                        job_result = {
                            'job_name':
                            results["Name"],
                            'job_id':
                            jobid,
                            'status':
                            "successful",
                            'high_severity_issues':
                            int(str(results["NHighIssues"])),
                            'medium_severity_issues':
                            int(str(results["NMediumIssues"])),
                            'low_severity_issues':
                            int(str(results["NLowIssues"])),
                            'info_severity_issues':
                            int(str(results["NInfoIssues"])),
                            'url':
                            dash
                        }

                        # Search for file name results["Name"] + "*.zip"
                        if os.environ.get('DRA_IS_PRESENT') == "1":
                            appscan_get_result(jobid, results["Name"])
                            save_job_result(results["Name"], job_result)

                        #appscan_get_result(jobid)
                        print python_utils.LABEL_GREEN + python_utils.STARS
                        print "Analysis successful for job \"" + results[
                            "Name"] + "\""
                        print "\tHigh Severity Issues   : " + str(
                            results["NHighIssues"])
                        print "\tMedium Severity Issues : " + str(
                            results["NMediumIssues"])
                        print "\tLow Severity Issues    : " + str(
                            results["NLowIssues"])
                        print "\tInfo Severity Issues   : " + str(
                            results["NInfoIssues"])
                        if dash != None:
                            print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash
                            f = open("result_url", "w")
                            f.write(dash)
                            f.close()
                        print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR

                        # append results to the jobResults for the json format
                        jobResults.append(job_result)
                    else:
                        python_utils.LOGGER.info("Analysis unsuccessful (" +
                                                 results["Name"] +
                                                 ") with message \"" +
                                                 results["UserMessage"] + "\"")

                        # append results to the jobResults for the json format
                        jobResults.append({
                            'job_name': results["Name"],
                            'job_id': jobid,
                            'status': "unsuccessful"
                        })

                    break
                else:
                    time_left = python_utils.get_remaining_wait_time()
                    if (time_left > SLEEP_TIME):
                        time.sleep(SLEEP_TIME)
                    else:
                        # ran out of time, flag that at least one job didn't complete
                        all_jobs_complete = False
                        # get what info we can on this job
                        results = appscan_info(jobid)
                        # notify the user
                        print python_utils.LABEL_RED + python_utils.STARS
                        print "Analysis incomplete for job \"" + results[
                            "Name"] + "\""
                        print "\t" + str(results["Progress"]) + "% complete"
                        if dash != None:
                            print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash
                            f = open("result_url", "w")
                            f.write(dash)
                            f.close()
                        print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked."
                        print python_utils.STARS + python_utils.LABEL_NO_COLOR

                        # append results to the jobResults for the json format
                        jobResults.append({
                            'job_name':
                            results["Name"],
                            'job_id':
                            jobid,
                            'status':
                            "incomplete",
                            'percentage_complete':
                            int(str(results["Progress"]))
                        })

                        # and continue to get state for other jobs
                        break
        except Exception, e:
            # bad id, skip it
            if python_utils.DEBUG:
                python_utils.LOGGER.debug("exception in wait_for_scans: " +
                                          str(e))
def wait_for_scans (joblist):
    # create array of the jon results in json format
    jobResults = []
    # were all jobs completed on return
    all_jobs_complete = True
    # number of high sev issues in completed jobs
    high_issue_count = 0
    med_issue_count=0
    python_utils.LOGGER.debug("Waiting for joblist: "+str(joblist))
    dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
    for jobid in joblist:
        try:
            while True:
                state = appscan_status(jobid)
                python_utils.LOGGER.info("Job " + str(jobid) + " in state " + state)
                if get_state_completed(state):
                    results = appscan_info(jobid)
                    if get_state_successful(state):
                        high_issue_count += results["NHighIssues"]
                        med_issue_count += results["NMediumIssues"]
                        python_utils.LOGGER.info("Analysis successful (" + results["Name"] + ")")
                        #print "\tOther Message : " + msg

                        job_result = {  'job_name': results["Name"],
                                        'job_id': jobid,
                                        'status': "successful",
                                        'high_severity_issues': int(str(results["NHighIssues"])),
                                        'medium_severity_issues': int(str(results["NMediumIssues"])),
                                        'low_severity_issues': int(str(results["NLowIssues"])),
                                        'info_severity_issues': int(str(results["NInfoIssues"])),
                                        'url': dash}

                        # Search for file name results["Name"] + "*.zip"
                        if os.environ.get('DRA_IS_PRESENT') == "1":
                            appscan_get_result(jobid, results["Name"]);
                            save_job_result(results["Name"], job_result);

                        #appscan_get_result(jobid)
                        print python_utils.LABEL_GREEN + python_utils.STARS
                        print "Analysis successful for job \"" + results["Name"] + "\""
                        print "\tHigh Severity Issues   : " + str(results["NHighIssues"])
                        print "\tMedium Severity Issues : " + str(results["NMediumIssues"])
                        print "\tLow Severity Issues    : " + str(results["NLowIssues"])
                        print "\tInfo Severity Issues   : " + str(results["NInfoIssues"])
                        if dash != None:
                            print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash
                            f = open("result_url","w")
                            f.write(dash)
                            f.close()
                        print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR

                        # append results to the jobResults for the json format
                        jobResults.append(job_result)
                    else: 
                        python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"")

                        # append results to the jobResults for the json format
                        jobResults.append({'job_name': results["Name"], 
                                           'job_id': jobid, 
                                           'status': "unsuccessful"})

                    break
                else:
                    time_left = python_utils.get_remaining_wait_time()
                    if (time_left > SLEEP_TIME):
                        time.sleep(SLEEP_TIME)
                    else:
                        # ran out of time, flag that at least one job didn't complete
                        all_jobs_complete = False
                        # get what info we can on this job
                        results = appscan_info(jobid)
                        # notify the user
                        print python_utils.LABEL_RED + python_utils.STARS
                        print "Analysis incomplete for job \"" + results["Name"] + "\""
                        print "\t" + str(results["Progress"]) + "% complete"
                        if dash != None:
                            print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash
                            f = open("result_url","w")
                            f.write(dash)
                            f.close()
                        print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked."
                        print python_utils.STARS + python_utils.LABEL_NO_COLOR

                        # append results to the jobResults for the json format
                        jobResults.append({'job_name': results["Name"], 
                                           'job_id': jobid, 
                                           'status': "incomplete",
                                           'percentage_complete': int(str(results["Progress"]))})

                        # and continue to get state for other jobs
                        break
        except Exception, e:
            # bad id, skip it
            if python_utils.DEBUG:
                python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))
        sys.exit(0)

    python_utils.LOGGER = python_utils.setup_logging()
    # send slack notification 
    if os.path.isfile("%s/utilities/sendMessage.sh" % python_utils.EXT_DIR):
        command='{path}/utilities/sendMessage.sh -l info -m \"Starting static security scan\"'.format(path=python_utils.EXT_DIR)
        if python_utils.DEBUG:
            print "running command " + command 
        proc = Popen([command], shell=True, stdout=PIPE, stderr=PIPE)
        out, err = proc.communicate();
        python_utils.LOGGER.debug(out)
    else:
        if python_utils.DEBUG:
            print "sendMessage.sh not found, notifications not attempted"
    
    python_utils.WAIT_TIME = python_utils.get_remaining_wait_time(first = True)
    python_utils.LOGGER.info("Getting credentials for Static Analysis service")
    creds = python_utils.get_credentials_for_non_binding_service(service=APP_SECURITY_SERVICE)
    python_utils.LOGGER.info("Connecting to Static Analysis service")
    appscan_login(creds['bindingid'],creds['password'])

    # allow testing connection without full job scan and submission
    if parsed_args['loginonly']:
        python_utils.LOGGER.info("LoginOnly set, login complete, exiting")
        endtime = timeit.default_timer()
        print "Script completed in " + str(endtime - python_utils.SCRIPT_START_TIME) + " seconds"
        sys.exit(0)

    # if checkstate, don't really do a scan, just check state of current outstanding ones
    if parsed_args['checkstate']:
        # for checkstate, don't wait, just check current
def parse_args ():
    global VULN_BASE_URL, COMP_BASE_URL, API_SERVER, CRAWLER_SERVER, CALL_VIA_API
    global BEARER_TOKEN, SPACE_GUID
    global CF_API_SERVER, API_SERVER
    parsed_args = {}
    parsed_args['nocompcheck'] = False
    parsed_args['novulncheck'] = False
    parsed_args['calldirect'] = False
    parsed_args['hidepass'] = False
    parsed_args['images'] = []
    parsed_args['debug'] = False
    parsed_args['help'] = False
    # check command line args
    for idx, arg in enumerate(sys.argv):
        if idx == 0:
            # don't worry about the calling parm at this time
            continue
        if arg == "--nocompcheck":
            # only check vulnerabilities
            parsed_args['nocompcheck'] = True
        if arg == "--novulncheck":
            # only check compliance
            parsed_args['novulncheck'] = True
        if arg == "--calldirect":
            # call direct mode - bypass the api server and go straight to the crawler server
            parsed_args['calldirect'] = True
            CALL_VIA_API = False
        if arg == "--hidepass":
            # don't print checks that passed
            parsed_args['hidepass'] = True
        if arg == "--debug":
            # enable debug mode, can also be done with python_utils.DEBUG env var
            parsed_args['debug'] = True
            python_utils.DEBUG = "1"
        if arg == "--help":
            # just print help and return
            parsed_args['help'] = True
        if not arg.startswith("--"):
            # add this as an image to be checked
            parsed_args['images'].append(arg)

    # check for env var args that we may need as well
    image_name = os.environ.get('IMAGE_NAME')
    if image_name:
        parsed_args['images'].append(image_name)
    call_direct_env = os.environ.get('CC_CALLDIRECT')
    if call_direct_env:
        # call direct mode - bypass the api server and go straight to the crawler server
        parsed_args['calldirect'] = True
        CALL_VIA_API = False

    python_utils.LOGGER = python_utils.setup_logging()

    # set up the server urls
    if CALL_VIA_API:
        CF_API_SERVER, API_SERVER = python_utils.find_api_servers()
        if not API_SERVER:
            msg = "Cannot determine correct api server, unable to place queries"
            python_utils.LOGGER.error( msg )
            raise Exception( msg )
    else:
        CRAWLER_SERVER = os.environ.get('CRAWLER_SERVER')
        if not CRAWLER_SERVER:
            msg = "CRAWLER_SERVER is not set, unable to place queries"
            python_utils.LOGGER.error( msg )
            raise Exception( msg )
        VULN_BASE_URL=VULN_BASE_TEMPLATE % CRAWLER_SERVER
        COMP_BASE_URL=COMP_BASE_TEMPLATE % CRAWLER_SERVER

    # load creds
    BEARER_TOKEN, SPACE_GUID = python_utils.load_cf_auth_info()

    # see how much time we have left after completing init
    python_utils.WAIT_TIME = python_utils.get_remaining_wait_time(first = True)

    return parsed_args
def wait_for_image_results (images):
    global last_image

    all_passed = True
    any_passed = False
    failed_exception = None
    time_left = python_utils.WAIT_TIME
    # check all images
    for image in images:
        python_utils.LOGGER.info("Running checks on image %s" % str(image))
        comp_complete = False
        vuln_complete = False
        last_image = None
        while ((not comp_complete) or (not vuln_complete)) and (time_left >= SLEEP_TIME):
            try:
                # only check comp if not already complete
                if not comp_complete:
                    comp_complete, passed_check = check_compliance(image)
                    # if no exception, make sure it's clear
                    failed_exception = None
                    # if this check completed, and it didn't pass, mark that not all passed
                    if comp_complete and (not passed_check):
                        all_passed = False
                # only check vulnerabilities if not already complete
                if not vuln_complete:
                    vuln_complete, passed_check = check_vulnerabilities(image)
                    # if no exception, make sure it's clear
                    failed_exception = None
                    # if this check completed, and it didn't pass, mark that not all passed
                    if vuln_complete and (not passed_check):
                        all_passed = False
            except Exception, e:
                python_utils.LOGGER.debug( "non-fatal failure during check for image %s" % str(image), exc_info=e )
                # we'll retry, but save the exception for if this was the last try
                failed_exception = e
            time_left = python_utils.get_remaining_wait_time()
            if ((not comp_complete) or (not vuln_complete)) and (time_left >= SLEEP_TIME):
                python_utils.LOGGER.info( "waiting for results for image %s" % str(image) )
                time.sleep(SLEEP_TIME)

        # if we failed because of an exception, even after retries, reraise it now
        if (failed_exception != None):
            raise failed_exception

        # if no results found for a given image, display that
        if (not parsed_args['nocompcheck']) and (not comp_complete):
            all_passed = False
            python_utils.LOGGER.warning( python_utils.LABEL_COLOR + "no compliance results found for image %s" % str(image) + python_utils.LABEL_NO_COLOR       )
        else:
            any_passed = True
        if (not parsed_args['novulncheck']) and (not vuln_complete):
            all_passed = False
            python_utils.LOGGER.warning( python_utils.LABEL_COLOR + "no vulnerability results found for image %s" % str(image) + python_utils.LABEL_NO_COLOR       )
        else:
            any_passed = True

        # if any of the scans passed, link to the results page
        if API_SERVER and any_passed:
            if not last_image:
                # get the image id
                last_image = image
            if last_image:
                results_cmd = "bx cr va %s" % (last_image)
                python_utils.LOGGER.info("For a more in-depth review of these results, run this command: %s" % results_cmd)
            else:
                python_utils.LOGGER.debug("Unable to get image id, no command presented")
예제 #7
0
def wait_for_scans (joblist):
    # were all jobs completed on return
    
    all_jobs_complete = True
    # number of high sev issues in completed jobs
    high_issue_count = 0
    med_issue_count = 0
    dash = python_utils.find_service_dashboard(APP_SECURITY_SERVICE)
    for jobid in joblist:
        try:
            while True:
                scan = refresh_appscan_info(jobid)
                state = parse_status(scan)
                python_utils.LOGGER.info("Job " + scan["Id"] + " in state " + state)
                if get_state_completed(state):
                    if get_state_successful(state):
                        high_issue_count += scan["LatestExecution"]["NHighIssues"]
                        med_issue_count += scan["LatestExecution"]["NMediumIssues"]
                        python_utils.LOGGER.info("Analysis successful (" + scan["Name"] + ")")
                        #print "\tOther Message : " + msg
                        #appscan_get_result(jobid)
                        print python_utils.LABEL_GREEN + python_utils.STARS
                        print "Analysis successful for job \"" + scan["Name"] + "\""
                        print "\tHigh Severity Issues   : " + str(scan["LatestExecution"]["NHighIssues"])
                        print "\tMedium Severity Issues : " + str(scan["LatestExecution"]["NMediumIssues"])
                        print "\tLow Severity Issues    : " + str(scan["LatestExecution"]["NLowIssues"])
                        print "\tInfo Severity Issues   : " + str(scan["LatestExecution"]["NInfoIssues"])
                        
                        if os.environ.get('DRA_IS_PRESENT') == "1":
                            job_result = {  'job_name': scan["Name"],
                                            'job_id': scan["Id"],
                                            'status': "successful",
                                            'high_severity_issues': int(str(scan["LatestExecution"]["NHighIssues"])),
                                            'medium_severity_issues': int(str(scan["LatestExecution"]["NMediumIssues"])),
                                            'low_severity_issues': int(str(scan["LatestExecution"]["NLowIssues"])),
                                            'info_severity_issues': int(str(scan["LatestExecution"]["NInfoIssues"])),
                                            'url': dash}
                            get_appscan_xml_report(scan)
                            save_job_result(scan["Name"], job_result)
                        
                        if dash != None:
                            print "See detailed results at: " + python_utils.LABEL_COLOR + " " + dash
                        print python_utils.LABEL_GREEN + python_utils.STARS + python_utils.LABEL_NO_COLOR
                    else: 
                        python_utils.LOGGER.info("Analysis unsuccessful (" + results["Name"] + ") with message \"" + results["UserMessage"] + "\"")

                    break
                else:
                    time_left = python_utils.get_remaining_wait_time()
                    if (time_left > SLEEP_TIME):
                        time.sleep(SLEEP_TIME)
                    else:
                        # ran out of time, flag that at least one job didn't complete
                        all_jobs_complete = False
                        # get what info we can on this job
                        scan = refresh_appscan_info(jobid)
                        # notify the user
                        print python_utils.LABEL_RED + python_utils.STARS
                        print "Analysis incomplete for job \"" + scan["Name"] + "\""
                        print "\t" + str(scan["LatestExecution"]["Progress"]) + "% complete"
                        if dash != None:
                            print "Track current state and results at: " + python_utils.LABEL_COLOR + " " + dash
                        print python_utils.LABEL_RED + "Increase the time to wait and rerun this job. The existing analysis will continue and be found and tracked."
                        print python_utils.STARS + python_utils.LABEL_NO_COLOR

                        # and continue to get state for other jobs
                        break
        except Exception, e:
            # bad id, skip it
            if python_utils.DEBUG:
                python_utils.LOGGER.debug("exception in wait_for_scans: " + str(e))