def setup_sec_hub():
    if not cfg.get('aws-integration', False):
        return
    try:
        client.enable_security_hub()
    except client.exceptions.ResourceConflictException as exception:
        write_to_log(exception)
def create_default_insights():
    try:
        insight_arns = open_insights_arn_file()
        aws_insight_arns = retrieve_insight_arns_as_list()

        if insight_arns and set(insight_arns).issubset(set(aws_insight_arns)):
            # The insights already exist on AWS
            return
        else:
            # Create the default insights
            response_list = list()
            get_insights = open_insights_file()
            for insight in get_insights:
                response = client.create_insight(
                    Name=insight['Name'],
                    Filters=insight['Filters'],
                    GroupByAttribute=insight['GroupByAttribute'])

                response_list.append(response['InsightArn'])

            write_to_insights_arn_file(response_list)
    except client.exceptions.LimitExceededException as exc:
        write_to_log(exc)
    except EndpointConnectionError as exception:
        write_to_log(exception)
def enable_batch_import_findings():
    if not cfg.get('aws-integration', False):
        return
    product_arn = f"arn:aws:securityhub:{cfg['region_name']}:365761988620:product/forcepoint/forcepoint-ngfw"
    try:
        client.enable_import_findings_for_product(ProductArn=product_arn)
    except client.exceptions.ResourceConflictException as exception:
        write_to_log(exception)
async def amazon_security_hub_batch_upload(asff_findings):
    try:
        # The max batch size allowed by boto upload is 100
        chunked_values = chunker(asff_findings, 100)

        queue = deque(__sanitise_list_input(chunked_values))

        # Upload to AWS Security Hub and write the response to a log file
        while queue:
            write_to_log(
                client.batch_import_findings(Findings=queue.popleft()))

    except EndpointConnectionError as exception:
        write_to_log(exception)
async def amazon_security_hub_batch_upload(asff_findings, max_finding_date):
    if not cfg.get('aws-integration', False):
        return
    try:
        # The max batch size allowed by boto upload is 100
        chunked_values = chunker(asff_findings, 100)
        queue = deque(__sanitise_list_input(chunked_values))

        # Upload to AWS Security Hub and write the response to a log file
        while queue:
            try:
                resp = client.batch_import_findings(Findings=queue.popleft())
                if resp['FailedCount'] > 0:
                    write_to_log(resp['FailedFindings'])
                    return
            except ClientError as e:
                write_to_log(e.response)
                return

        cfg['latest-date'] = max_finding_date
        write_config_file(cfg)

    except EndpointConnectionError as exception:
        write_to_log(exception)
Esempio n. 6
0
def run_query_and_upload():
    aws_integration = cfg.get('aws-integration', False)
    azure_integration = cfg.get('azure-integration', False)

    # Create default insights if they don't already exist
    if aws_integration:
        threading.Thread(target=create_default_insights).start()
        setup_sec_hub()
        enable_batch_import_findings()

    default_filter = __setup_smc_query_filter(cfg['default-filter'])

    smc_url = cfg['host-ip'] + ':' + cfg['host-port']

    api_version = __get_latest_api_version(smc_url)

    session.login(url=smc_url, api_key=cfg['client-api-key'], api_version=api_version)

    try:
        query = LogQuery(fetch_size=int(cfg['fetch-size']))

        translated_filter = query.add_translated_filter()

        # Create default filter specified in the config file
        translated_filter.update_filter(default_filter)

        # Query the SMC for events matching the filter and flatten the list of result-lists into a single list
        record_list = list(itertools.chain(*query.fetch_raw()))

        extra_filters_enabled = cfg['extra-filters-enabled']

        # Check to see if extra filters are enabled and if any are present before iterating over them and requesting
        # matching events from the SMC and appending to the original results list
        if bool(extra_filters_enabled):
            extra_filters = cfg['extra-filters']
            if bool(extra_filters):
                for log_filter in extra_filters:
                    translated_filter.update_filter(__setup_smc_query_filter(log_filter))
                    record_list.extend(list(itertools.chain(*query.fetch_raw())))

        if record_list:
            # Find the max date in the record list and store this to add to the filter for subsequent queries
            # to avoid uploading duplicates/wasting bandwidth. This value is written to the cfg.json file
            max_finding_date = format_date_smc_filter(max(item['Creation Time'] for item in record_list))

            loop = asyncio.get_event_loop()

            # Map to appropriate format and upload if integration is active
            if aws_integration:
                aws_task = loop.create_task(
                    amazon_security_hub_batch_upload(list(map(create_asff_object, record_list)), max_finding_date))
                loop.run_until_complete(aws_task)

            if azure_integration:
                send_sentinel_data(list(map(format_smc_logs_to_cef, record_list)), max_finding_date)

    # This catches any issues related to requesting events with a malformed filter
    except FetchAborted as exception:
        print(exception)
        write_to_log(exception)

    session.logout()
def setup_sec_hub():
    try:
        client.enable_security_hub()
    except client.exceptions.ResourceConflictException as exception:
        write_to_log(exception)
def enable_batch_import_findings():
    product_arn = f"arn:aws:securityhub:{cfg['region_name']}:{cfg['AwsAccountId']}:product/forcepoint/forcepoint-ngfw"
    try:
        client.enable_import_findings_for_product(ProductArn=product_arn)
    except client.exceptions.ResourceConflictException as exception:
        write_to_log(exception)