Beispiel #1
0
def on_finish(email, summary):
    setupkey = 'setup_data' + str(email['current_rule_run_id'])
    collectkey = 'collect_data' + str(email['current_rule_run_id'])
    email_to, email_from, smtp_asset = phantom.get_data(setupkey, clear_data=True)
    container_url = phantom.get_base_url() + 'container/' + str(email['id'])
    # calling get_summary to find out if we actually had anything we acted on
    getsummary = phantom.get_summary()
    #phantom.debug('Get summary: {}'.format(getsummary))
    #
    if len(getsummary['result']) > 0: # we have processed at least one item in on_start
        collected_results, collected_vault_items, container_owner = phantom.get_data(collectkey, clear_data=True)
        # finalize the vault item info and add to email
        for vaultid in collected_vault_items.keys():
            vaultinfo = phantom.get_vault_item_info(vaultid)
            for app_run_id, datavalues in collected_results.iteritems():
                #phantom.debug('iterate collected results: \napprunid: {}\n\ndatavals: {}'.format(app_run_id, datavalues))
                if datavalues['detonate_summary']['target'] == vaultid:
                    collected_results[app_run_id]['vault_info'] = vaultinfo
        if len(collected_results) < (len(getsummary['result'])-2): # subtracting actions that arent counted as detonations
            collected_results['message'] = "Unexpected: Collected Results: {} is less than actions run: {}".format(len(collected_results), (len(getsummary['result'])-2))
        # send summary email
        email_subject = "Results: Ingest file detonatation"
        email_body = '\nPhantom Container ID: {} - Owner: {}\nURL: {}\nReturned results by app_run_id:\n{}'.format(email['id'], container_owner, container_url, pprint.pformat(collected_results, indent=4))
        phantom.act('send email', parameters=[{ "from" : email_from,  "to" : email_to,  "subject" : email_subject,  "body" : email_body }], assets=[smtp_asset], callback=send_email_cb)
        phantom.debug("Summary: " + pprint.pformat(summary, indent=4))
    else: # no artifacts run on
        phantom.debug('No artifacts, sending abort email.')
        email_subject = "Results: No artifacts to run, aborting"
        email_body = '\nPhantom Container ID: {}\nURL: {} \nSummary:\n{}'.format(email['id'],container_url,summary)
        phantom.act('send email', parameters=[{ "from" : email_from,  "to" : email_to,  "subject" : email_subject,  "body" : email_body }], assets=[smtp_asset], callback=send_email_cb)
    return
Beispiel #2
0
def add_to_blocklist(action=None,
                     success=None,
                     container=None,
                     results=None,
                     handle=None,
                     filtered_artifacts=None,
                     filtered_results=None):
    phantom.debug('add_to_list() called')

    #phantom.debug('Action: {0} {1}'.format(action['name'], ('SUCCEEDED' if success else 'FAILED')))
    filtered_artifacts_data_1 = phantom.collect2(
        container=container,
        datapath=[
            'filtered-data:filter_3:condition_1:artifact:*.cef.sourceAddress',
            'filtered-data:filter_3:condition_1:artifact:*.id'
        ])

    phantom_url = phantom.get_base_url()
    container_url = "{}/mission/{}".format(phantom_url, container['id'])

    for filtered_artifacts_item_1 in filtered_artifacts_data_1:
        if filtered_artifacts_item_1[0]:
            phantom.datastore_add(
                'blocked_ips',
                [filtered_artifacts_item_1[0], 'yes', container_url])

    block_ip_1(action, success, container, results, handle, filtered_artifacts,
               filtered_results)
    return
Beispiel #3
0
def on_start(email):
    #
    #phantom.debug('Email container data:\n {}\n\n'.format(email))
    email_to = "email_to@my_enterprise.com"
    email_from = "*****@*****.**"
    smtp_asset = "smtp"
    # these keys are used to save persistent data across the playbook,
    # they must be unique by rule run ID, otherwise its possible the data 
    # could be clobbered in another playbook running at the same time
    setupkey = 'setup_data' + str(email['current_rule_run_id'])
    collectkey = 'collect_data' + str(email['current_rule_run_id'])
    phantom.save_data([email_to, email_from, smtp_asset], key=setupkey)
    #
    collected_results = dict()
    collected_vault_items = dict()
    container_owner = "None"
    container_url = phantom.get_base_url() + 'container/' + str(email['id'])
    ##
    # we needed to get the vault_id for the email attachment to be detonated and pass that to the detonate action
    # so we use phantom.collect to grab the cef field (cs6) where we place the vault_id on the artifact
    vaultid = phantom.collect(email, 'artifact:*.cef.cs6', scope='new')
    #
    if len(vaultid) > 0:  # we have at least one item to process
        # lets grab the owner of the container and make it something useful if blank
        if email['owner'] == '':
            container_owner = 'None'
        else:
            container_owner = email['owner']
        phantom.debug('url: {}'.format(phantom.get_base_url()))
        email_body = "\nStarted file detonations on container_id: {} - Owner: {}\nURL: {}\nvault_item_info:\n".format(email['id'], container_owner, container_url)
        for vault_item in vaultid:
            vaultinfo = phantom.get_vault_item_info(vault_item)
            for vault_item_info in vaultinfo:
                collected_vault_items[vault_item] = vault_item_info
                email_body = email_body + pprint.pformat(vault_item_info, indent=4) + '\n'
            phantom.act('detonate file', parameters=[{'vault_id':vault_item}], assets=["threatgrid"], callback=detonate_file_cb)
        email_subject = "Running: Detonating files from ingest"
        # save modified data
        phantom.save_data([collected_results, collected_vault_items, container_owner], key=collectkey)
        # send email
        phantom.act('send email', parameters=[{ "from" : email_from,  "to" : email_to,  "subject" : email_subject,  "body" : email_body }], assets=[smtp_asset], callback=send_email_cb)
    else: # no artifacts run on
        phantom.debug('No artifacts to process, ending on_start without running any actions. \n{}'.format(email))

    return
def add_endpoint_to_patched_list(container):
    # collect data for 'add_to_remediated_list_1' call
    infected_endpoints = phantom.collect2(
        container=container,
        datapath=['artifact:*.cef.sourceAddress', 'artifact:*.id'])

    phantom_url = phantom.get_base_url()
    container_url = "{}/mission/{}".format(phantom_url, container['id'])

    for infected_endpoint in infected_endpoints:
        if infected_endpoint[0]:
            phantom.datastore_add('wannacry_patched_endpoints',
                                  [infected_endpoint[0], 'yes', container_url])

    return
Beispiel #5
0
def get_case_note_count(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None):
    phantom.debug('no_op_1() called')
    
    # get the container id and phantom url to format for the request
    container_id = container.get('id', None)
    phantom_url = phantom.get_base_url()
    request_url = "{}/rest/container/{}/phases".format(str(phantom_url), str(container_id))
    
    # make the request
    r = requests.get(request_url, auth=("admin","password"), verify=False).json()
    
    # check to see if all notes fields are filled out
    notes_counter = 0
    for i in r["data"][0]["tasks"]:
        phantom.debug(i)
        if i["notes"]:
            notes_counter = notes_counter + 1

    # if all the fields are filled out prompt before emailing, if not sleep and check again
    if notes_counter != 3:
        no_op_2(container=container, handle=notes_counter)
    else:
        for i in r["data"][0]["tasks"]:
            raw = {}
            cef = {}
            cef['container_note'] = i["notes"][0]["content"]
    
            success, message, artifact_id = phantom.add_artifact(
                container=container, raw_data=raw, cef_data=cef, label='note',
                name='container note', severity='low',
                identifier=None,
                artifact_type='note')
        
        prompt_1(container=container)

    return
def find_related_containers(value_list=None, minimum_match_count=None, container=None, earliest_time=None, filter_status=None, filter_label=None, filter_severity=None, filter_in_case=None, **kwargs):
    """
    Takes a provided list of indicator values to search for and finds all related containers. It will produce a list of the related container details.
    
    Args:
        value_list (CEF type: *): An indicator value to search on, such as a file hash or IP address. To search on all indicator values in the container, use "*".
        minimum_match_count (CEF type: *): The minimum number of similar indicator records that a container must have to be considered "related."  If no match count provided, this will default to 1.
        container (CEF type: phantom container id): The container to run indicator analysis against. Supports container object or container_id. This container will also be excluded from the results for related_containers.
        earliest_time: Optional modifier to only consider related containers within a time window. Default is -30d.  Supports year (y), month (m), day (d), hour (h), or minute (m)  Custom function will always set the earliest container window based on the input container "create_time".
        filter_status: Optional comma-separated list of statuses to filter on. Only containers that have statuses matching an item in this list will be included.
        filter_label: Optional comma-separated list of labels to filter on. Only containers that have labels matching an item in this list will be included.
        filter_severity: Optional comma-separated list of severities to filter on. Only containers that have severities matching an item in this list will be included.
        filter_in_case: Optional parameter to filter containers that are in a case or not. Defaults to True (drop containers that are already in a case).
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.container_id (CEF type: *): The unique id of the related container
        *.container_indicator_match_count: The number of indicators matched to the related container
        *.container_status: The status of the related container e.g. new, open, closed
        *.container_type: The type of the related container, e.g. default or case
        *.container_name: The name of the related container
        *.in_case: True or False if the related container is already included in a case
        *.indicator_ids: Indicator ID that matched
        *.container_url (CEF type: url): Link to container
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    import re
    from datetime import datetime, timedelta
    from urllib import parse
    
    outputs = []
    related_containers = []
    indicator_id_dictionary = {}
    container_dictionary = {}
    offset_time = None
    
    base_url = phantom.get_base_url()
    indicator_by_value_url = phantom.build_phantom_rest_url('indicator_by_value')
    indicator_common_container_url = phantom.build_phantom_rest_url('indicator_common_container')
    container_url = phantom.build_phantom_rest_url('container')

    # Get indicator ids based on value_list
    def format_offset_time(seconds):
        datetime_obj = datetime.now() - timedelta(seconds=seconds)
        formatted_time = datetime_obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')  
        return formatted_time
    
    def fetch_indicator_ids(value_list):
        indicator_id_list = []
        for value in value_list:
            params = {'indicator_value': f'{value}', 'timerange': 'all'}
            indicator_id = phantom.requests.get(indicator_by_value_url, params=params, verify=False).json().get('id')
            if indicator_id:
                indicator_id_list.append(indicator_id)
        return indicator_id_list
    
    # Ensure valid time modifier
    if earliest_time:
        # convert user-provided input to seconds
        char_lookup = {'y': 31557600, 'mon': 2592000, 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60}
        pattern = re.compile(r'-(\d+)([mM][oO][nN]|[yYwWdDhHmM]{1})$')
        if re.search(pattern, earliest_time):
            integer, char = (re.findall(pattern, earliest_time)[0])
            time_in_seconds = int(integer) * char_lookup[char.lower()]
        else:
            raise RuntimeError(f'earliest_time string "{earliest_time}" is incorrectly formatted. Format is -<int><time> where <int> is an integer and <time> is y, mon, w, d, h, or m. Example: "-1h"')
    else:
        # default 30 days in seconds
        time_in_seconds = 2592000

    # Ensure valid container input
    if isinstance(container, dict) and container.get('id'):
        current_container = container['id']
    elif isinstance(container, int):
        current_container = container
    else:
        raise TypeError("The input 'container' is neither a container dictionary nor an int, so it cannot be used")
    
    if minimum_match_count and not isinstance(minimum_match_count, int):
        raise TypeError(f"Invalid type for 'minimum_match_count', {type(minimum_match_count)}, must be 'int'")
    elif not minimum_match_count:
        minimum_match_count = 1
    
    # Ensure valid filter inputs
    status_list, label_list, severity_list = [], [], []
    if isinstance(filter_status, str):
        status_list = [item.strip().lower() for item in filter_status.split(',')]
    if isinstance(filter_label, str):
        label_list = [item.strip().lower() for item in filter_label.split(',')]
    if isinstance(filter_severity, str):
        severity_list = [item.strip().lower() for item in filter_severity.split(',')]
    if isinstance(filter_in_case, str) and filter_in_case.lower() == 'false':
        filter_in_case = False
    else:
        filter_in_case = True
    
    # If value list is equal to * then proceed to grab all indicator records for the current container
    if isinstance(value_list, list) and value_list[0] == "*":
        new_value_list = []
        url = phantom.build_phantom_rest_url('container', current_container, 'artifacts') + '?page_size=0'
        response_data = phantom.requests.get(uri=url, verify=False).json().get('data')
        if response_data:
            for data in response_data:
                for k,v in data['cef'].items():
                    if isinstance(v, list):
                        for item in v:
                            new_value_list.append(item)
                    else:
                        new_value_list.append(v)
        new_value_list = list(set(new_value_list))
        indicator_id_list = fetch_indicator_ids(new_value_list)
    elif isinstance(value_list, list):
        # dedup value_list
        value_list = list(set(value_list))
        indicator_id_list = fetch_indicator_ids(value_list)
    else:
        raise TypeError(f"Invalid input for value_list: '{value_list}'")

    # Quit early if no indicator_ids were found
    if not indicator_id_list:
        phantom.debug(f"No indicators IDs found for provided values: '{value_list}'")
        assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
        return outputs
    
    # Get list of related containers
    for indicator_id in list(set(indicator_id_list)):
        params = {'indicator_ids': indicator_id}
        response_data = phantom.requests.get(indicator_common_container_url, params=params, verify=False).json()
        # Populate an indicator dictionary where the original ids are the dictionary keys and the                     
        # associated continers are the values
        if response_data:
            # Quit early if no related containers were found
            if len(response_data) == 1 and response_data[0].get('container_id') == current_container:
                phantom.debug(f"No related containers found for provided values: '{value_list}'")
                assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
                return outputs
            
            indicator_id_dictionary[str(indicator_id)] = []
            for item in response_data:
                # Append all related containers except for current container
                if item['container_id'] != current_container:
                    indicator_id_dictionary[str(indicator_id)].append(item['container_id'])

    # Iterate through the newly created indicator id dictionary and create a dictionary where 
    # the keys are related containers and the values are the associated indicator ids
    for k,v in indicator_id_dictionary.items():
        for item in v:
            if str(item) not in container_dictionary.keys():
                container_dictionary[str(item)] = [str(k)]
            else:
                container_dictionary[str(item)].append(str(k))
        
    # Iterate through the newly created container dictionary                
    if container_dictionary:
        
        container_number = 0
        # Dedupe the number of indicators
        for k,v in container_dictionary.items():
            container_dictionary[str(k)] = list(set(v))
             # Count how many containers are actually going to be queried based on minimum_match_count
            if len(container_dictionary[str(k)]) >= minimum_match_count:
                container_number += 1
                
        # If the container number is greater than 600, then its faster to grab all containers
        if container_number >= 600:

            # Gather container data
            params = {'page_size': 0}
            if offset_time:
                params['_filter__create_time__gt'] = f'"{format_offset_time(time_in_seconds)}"'
            containers_response = phantom.requests.get(uri=container_url, params=params, verify=False).json()
            all_container_dictionary = {}
            if containers_response['count'] > 0:
                
                # Build repository of available container data
                for data in containers_response['data']:
                    all_container_dictionary[str(data['id'])] = data

                for k,v in container_dictionary.items():

                    # Omit any containers that have less than the minimum match count
                    if len(container_dictionary[str(k)]) >= minimum_match_count:
                        valid_container = True
                        # Grab container details if its a valid container based on previous filtering.
                        if str(k) in all_container_dictionary.keys():
                            container_data = all_container_dictionary[str(k)]
                            
                            # Omit any containers that don't meet the specified criteria
                            if container_data['create_time'] < format_offset_time(time_in_seconds): 
                                valid_container = False
                            if status_list and container_data['status'].lower() not in status_list:
                                valid_container = False
                            if label_list and container_data['label'].lower() not in label_list:
                                valid_container = False
                            if severity_list and container_data['severity'].lower() not in severity_list:
                                valid_container = False
                            if response_data['in_case'] and filter_in_case:
                                valid_container = False
                                
                            # Build outputs if checks are passed
                            if valid_container:
                                outputs.append({
                                    'container_id': str(k),
                                    'container_indicator_match_count': len(container_dictionary[str(k)]),
                                    'container_status': container_data['status'],
                                    'container_type': container_data['container_type'],
                                    'container_name': container_data['name'],
                                    'container_url': base_url.rstrip('/') + '/mission/{}'.format(str(k)),
                                    'in_case': container_data['in_case'],
                                    'indicator_id': container_dictionary[str(k)]
                                })

            else:
                raise RuntimeError(f"'Unable to find any valid containers at url: '{url}'")
                
        elif container_number < 600 and container_number > 0:
            # if the container number is smaller than 600, its faster to grab each container individiually
            for k,v in container_dictionary.items():
                # Dedupe the number of indicators
                container_dictionary[str(k)] = list(set(v))

                # If any of the containers contain more than the minimum match count request that container detail.
                if len(container_dictionary[str(k)]) >= minimum_match_count:
                    
                    valid_container = True
                    
                    # Grab container details
                    url = phantom.build_phantom_rest_url('container', k)
                    response_data = phantom.requests.get(url, verify=False).json()
                            
                    # Omit any containers that don't meet the specified criteria
                    if response_data['create_time'] < format_offset_time(time_in_seconds): 
                        valid_container = False
                    if status_list and response_data['status'].lower() not in status_list:
                        valid_container = False
                    if label_list and response_data['label'].lower() not in label_list:
                        valid_container = False
                    if severity_list and response_data['severity'].lower() not in severity_list:
                        valid_container = False
                    if response_data['in_case'] and filter_in_case:
                        valid_container = False
                    
                    # Build outputs if checks are passed and valid_container is still true
                    if valid_container: 
                        outputs.append({
                            'container_id': str(k),
                            'container_indicator_match_count': len(container_dictionary[str(k)]),
                            'container_status': response_data['status'],
                            'container_severity': response_data['severity'],
                            'container_type':  response_data['container_type'],
                            'container_name':  response_data['name'],
                            'container_url': base_url.rstrip('/') + '/mission/{}'.format(str(k)),
                            'in_case': response_data['in_case'],
                            'indicator_ids': container_dictionary[str(k)]
                        })


    else:
        raise RuntimeError('Unable to create container_dictionary')               
    # Return a JSON-serializable object
    assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
def container_merge(target_container=None,
                    container_list=None,
                    workbook=None,
                    close_containers=None,
                    **kwargs):
    """
    An alternative to the add-to-case API call. This function will copy all artifacts, automation, notes and comments over from every container within the container_list into the target_container. The target_container will be upgraded to a case.
    
    The notes will be copied over with references to the child containers from where they came. A note will be left in the child containers with a link to the target container. The child containers will be marked as evidence within the target container. 
    
    Any notes left as a consequence of the merge process will be skipped in subsequent merges.
    
    Args:
        target_container (CEF type: phantom container id): The target container to copy the information over. Supports container dictionary or container id.
        container_list: A list of container IDs to copy into the target container.
        workbook: Name or ID of the workbook to add if the container does not have a workbook yet. If no workbook is provided, the system default workbook will be added.
        close_containers: True or False to close the child containers in the container_list after merge. Defaults to False.
    
    Returns a JSON-serializable object that implements the configured data paths:
        
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}

    # Check if valid target_container input was provided
    if isinstance(target_container, int):
        container = phantom.get_container(target_container)
    elif isinstance(target_container, dict):
        container = target_container
    else:
        raise TypeError(
            f"target_container '{target_container}' is neither a int or a dictionary"
        )

    container_url = phantom.build_phantom_rest_url('container',
                                                   container['id'])

    # Check if container_list input is a list of IDs
    if isinstance(container_list, list) and (all(
            isinstance(x, int)
            for x in container_list) or all(x.isnumeric()
                                            for x in container_list)):
        pass
    else:
        raise TypeError(
            f"container_list '{container_list}' is not a list of integers")

    ## Prep parent container as case with workbook ##
    workbook_name = phantom.requests.get(
        container_url, verify=False).json().get('workflow_name')
    # If workbook already exists, proceed to promote to case
    if workbook_name:
        phantom.debug(
            "workbook already exists. adding [Parent] to container name and promoting to case"
        )
        update_data = {'container_type': 'case'}
        if not '[Parent]' in container['name']:
            update_data['name'] = "[Parent] {}".format(container['name'])
            phantom.update(container, update_data)
        else:
            phantom.update(container, update_data)
    # If no workbook exists, add one
    else:
        phantom.debug(
            "no workbook in container. adding one by name or using the default"
        )
        # If workbook ID was provided, add it
        if isinstance(workbook, int):
            workbook_id = workbook
            phantom.add_workbook(container=container['id'],
                                 workbook_id=workbook_id)
        # elif workbook name was provided, attempt to translate it to an id
        elif isinstance(workbook, str):
            workbook_url = phantom.build_phantom_rest_url(
                'workbook_template') + '?_filter_name="{}"'.format(workbook)
            response = phantom.requests.get(workbook_url, verify=False).json()
            if response['count'] > 1:
                raise RuntimeError(
                    'Unable to add workbook - more than one ID matches workbook name'
                )
            elif response['data'][0]['id']:
                workbook_id = response['data'][0]['id']
                phantom.add_workbook(container=container['id'],
                                     workbook_id=workbook_id)
        else:
            # Adding default workbook
            phantom.promote(container=container['id'])
        # Check again to see if a workbook now exists
        workbook_name = phantom.requests.get(
            container_url, verify=False).json().get('workflow_name')
        # If workbook is now present, promote to case
        if workbook_name:
            update_data = {'container_type': 'case'}
            if not '[Parent]' in container['name']:
                update_data['name'] = "[Parent] {}".format(container['name'])
                phantom.update(container, update_data)
            else:
                phantom.update(container, update_data)
        else:
            raise RuntimeError(
                f"Error occurred during workbook add for workbook '{workbook_name}'"
            )

    ## Check if current phase is set. If not, set the current phase to the first available phase to avoid artifact merge error ##
    if not container.get('current_phase_id'):
        phantom.debug(
            "no current phase, so setting first available phase to current")
        workbook_phase_url = phantom.build_phantom_rest_url(
            'workbook_phase') + "?_filter_container={}".format(container['id'])
        request_json = phantom.requests.get(workbook_phase_url,
                                            verify=False).json()
        update_data = {'current_phase_id': request_json['data'][0]['id']}
        phantom.update(container, update_data)

    child_container_list = []
    child_container_name_list = []
    # Iterate through child containers
    for child_container_id in container_list:

        ### Begin child container processing ###
        phantom.debug(
            "Processing Child Container ID: {}".format(child_container_id))

        child_container = phantom.get_container(child_container_id)
        child_container_list.append(child_container_id)
        child_container_name_list.append(child_container['name'])
        child_container_url = phantom.build_phantom_rest_url(
            'container', child_container_id)

        ## Update container name with parent relationship
        if not "[Parent:" in child_container['name']:
            update_data = {
                'name':
                "[Parent: {0}] {1}".format(container['id'],
                                           child_container['name'])
            }
            phantom.update(child_container, update_data)

        ## Gather and add notes ##
        for note in phantom.get_notes(container=child_container_id):
            # Avoid copying any notes related to the merge process.
            if note['success'] and not note['data']['title'] in (
                    '[Auto-Generated] Related Containers',
                    '[Auto-Generated] Parent Container',
                    '[Auto-Generated] Child Containers'):
                phantom.add_note(container=container['id'],
                                 note_type='general',
                                 note_format=note['data']['note_format'],
                                 title="[From Event {0}] {1}".format(
                                     note['data']['container'],
                                     note['data']['title']),
                                 content=note['data']['content'])

        ## Copy information and add to case
        data = {
            'add_to_case': True,
            'container_id': child_container_id,
            'copy_artifacts': True,
            'copy_automation': True,
            'copy_files': True,
            'copy_comments': True
        }
        phantom.requests.post(container_url, json=data, verify=False)

        ## Leave a note with a link to the parent container
        phantom.debug(
            "Adding parent relationship note to child container '{}'".format(
                child_container_id))
        data_row = "{0} | [{1}]({2}/mission/{0}) |".format(
            container['id'], container['name'], phantom.get_base_url())
        phantom.add_note(
            container=child_container_id,
            note_type="general",
            note_format="markdown",
            title="[Auto-Generated] Parent Container",
            content="| Container_ID | Container_Name |\n| --- | --- |\n| {}".
            format(data_row))

        ## Mark child container as evidence in target_container
        data = {
            "container_id": container['id'],
            "object_id": child_container_id,
            "content_type": "container"
        }
        evidence_url = phantom.build_phantom_rest_url('evidence')
        response = phantom.requests.post(evidence_url, json=data,
                                         verify=False).json()

        ## Close child container
        if isinstance(close_containers,
                      str) and close_containers.lower() == 'true':
            phantom.set_status(container=child_container_id, status="closed")

        ### End child container processing ###

    ## Format and add note for link back to child_containers in parent_container
    note_title = "[Auto-Generated] Child Containers"
    note_format = "markdown"
    format_list = []
    # Build new note
    for child_container_id, child_container_name in zip(
            child_container_list, child_container_name_list):
        format_list.append("| {0} | [{1}]({2}/mission/{0}) |\n".format(
            child_container_id, child_container_name, phantom.get_base_url()))
    # Fetch any previous merge note
    params = {
        '_filter_container': '"{}"'.format(container['id']),
        '_filter_title': '"[Auto-Generated] Child Containers"'
    }
    note_url = phantom.build_phantom_rest_url('note')
    response_data = phantom.requests.get(note_url, verify=False).json()
    # If an old note was found, proceed to overwrite it
    if response_data['count'] > 0:
        note_item = response_data['data'][0]
        note_content = note_item['content']
        # Append new information to existing note
        for c_note in format_list:
            note_content += c_note
        data = {
            "note_type": "general",
            "title": note_title,
            "content": note_content,
            "note_format": note_format
        }
        # Overwrite note
        response_data = phantom.requests.post(note_url +
                                              "/{}".format(note_item['id']),
                                              json=data,
                                              verify=False).json()
    # If no old note was found, add new with header
    else:
        template = "| Container ID | Container Name |\n| --- | --- |\n"
        for c_note in format_list:
            template += c_note
        success, message, process_container_merge__note_id = phantom.add_note(
            container=container,
            note_type="general",
            title=note_title,
            content=template,
            note_format=note_format)

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
Beispiel #8
0
def on_start(incident):

    phantom.act('send email', parameters=[{ "body" : "This is a test mail, Executed from a playbook",  "to" : "*****@*****.**",  "subject" : "Test Email from playbook" }], assets=["smtp"], callback=send_email_cb)
    html_body = '<html>'
    html_body += 'This is a test mail,<br>'
    html_body += 'Executed from a playbook for '
    html_body += '<a href="{base_url}/container/{container_id}"><b>this container</b></a>.<br>'.format(base_url=phantom.get_base_url(), container_id=incident['id'])
    html_body += '</html>'
    
    phantom.act('send email', parameters=[{ "body" : html_body,  "to" : "*****@*****.**",  "subject" : "Test HTML Email from playbook" }], assets=["smtp"], callback=send_email_cb)


    return
def custom_format(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
    phantom.debug("custom_format() called")

    ################################################################################
    # Produce a custom format that calculates how many related indicators there are 
    # per container. This is used to truncate the output if it's over the specified 
    # amount.
    ################################################################################

    find_related_events_data = phantom.collect2(container=container, datapath=["find_related_events:custom_function_result.data.*.container_id","find_related_events:custom_function_result.data.*.indicator_ids","find_related_events:custom_function_result.data.*.container_name"])

    find_related_events_data___container_id = [item[0] for item in find_related_events_data]
    find_related_events_data___indicator_ids = [item[1] for item in find_related_events_data]
    find_related_events_data___container_name = [item[2] for item in find_related_events_data]

    custom_format__output = None

    ################################################################################
    ## Custom Code Start
    ################################################################################

    # Define base format - customize as needed
    custom_format__output = """Please review the following events and their associated indicators. Consider merging the related events into the current investigation.
    
The merge process will:
 - Mark the current event as the parent case. If no workbook has been added, it will use the default workbook.
 - Copy events, artifacts, and notes to the parent case.
 - Close the related events with a link to the parent case.
 
 """
    
    # Build phantom url for use later 
    base_url = phantom.get_base_url()
    url = phantom.build_phantom_rest_url('indicator')
    
    # Iterate through all inputs and append to base format
    for item1,item2,item3 in zip(find_related_events_data___container_id,find_related_events_data___indicator_ids,find_related_events_data___container_name):
        custom_format__output += "#### [Event {0}: {1}]({2}/mission/{0}/summary/evidence)\n\n".format(item1, item3, base_url)
        custom_format__output += "| Field Names | Values |\n"
        custom_format__output += "| --- | --- |\n"
        
        indicator_dict = {}

        # Find_related_containers only returns an indicator id, this converts the indicator id to an actual value
        # Only iterate through 10 indicators for easier readability
        for indicator in item2[0:10]:
            response = phantom.requests.get(uri = url + "/{}?_special_fields=true".format(indicator), verify=False).json()              
            value = response['value']
            fields = response.get('_special_fields')
            
            # Remove null items and join
            if isinstance(fields, list):
                fields = [item for item in fields if item]
                fields = sorted(fields)
                fields = ", ".join(fields)
                
            indicator_dict[value] = fields
            
        # sort dictionary alphabetically by value
        for k,v in sorted(indicator_dict.items(), key = lambda kv:(kv[1], kv[0])):
            if len(k) > 250:
                custom_format__output += "| {0} | ```{1}``` ***...truncated...*** | \n".format(v, k[:250])
            else:
                custom_format__output += "| {0} | ```{1}``` | \n".format(v, k)
            
        # If there were more than 10 indicators, add a note at the end letting the analyst know they can find more by following the event link    
        if len(item2) > 10:
            custom_format__output += "- ***+{0} additional related artifacts***".format(len(item2) - 10)
            
        custom_format__output += "\n---\n\n"

    ################################################################################
    ## Custom Code End
    ################################################################################

    phantom.save_run_data(key="custom_format:output", value=json.dumps(custom_format__output))

    event_details(container=container)

    return