コード例 #1
0
def update_workbook_tasks(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
    phantom.debug("update_workbook_tasks() called")

    ################################################################################
    # Custom code to determine which task this playbook occurs in, complete that task, 
    # and set the status of the next task in the workbook (within the same phase) 
    # to "In Progress".
    ################################################################################

    ################################################################################
    ## Custom Code Start
    ################################################################################

    # Get current repo and playbook name
    current_scm = phantom.get_playbook_info()[0]['repo_name']
    current_playbook = phantom.get_playbook_info()[0]['name']
    task_order = None
    
    # Iterate through tasks on the current container
    for task in phantom.get_tasks(container=container):
        playbooks = task.get('data').get('suggestions').get('playbooks')
        if playbooks:
            for playbook in playbooks:
                # Check if the current container tasks contain a reference to this playbook.
                # If so, this is the task phase you want to mark as current
                if playbook['playbook'] == current_playbook and playbook['scm'] == current_scm:
                    task_order = task['data']['order']
                    status = task['data']['status']
                    url = phantom.build_phantom_rest_url('workflow_task') + '/{}'.format(task['data']['id'])
                    # If status is not started (statud id 0), move to in progress (status id 2) before moving to complete (status id 1)
                    if status == 0:
                        data = {'status': 2}
                        phantom.requests.post(url, data=json.dumps(data), verify=False)
                    data = {'status': 1}      
                    phantom.set_phase(container=container, phase=task['data']['phase']) 
                    phantom.requests.post(url, data=json.dumps(data), verify=False)
                    
    # Iterate through the other tasks on the current container if a task was updated as indicated by the presence of "task_order"                
    if task_order:
        for task in phantom.get_tasks(container=container):
            # If another task matches the updated task's order + 1, then update it as well
            if task['data']['order'] == task_order + 1:
                data = {'status': 2}
                url = phantom.build_phantom_rest_url('workflow_task') + '/{}'.format(task['data']['id'])
                phantom.requests.post(url, data=json.dumps(data), verify=False)


    ################################################################################
    ## Custom Code End
    ################################################################################

    return
コード例 #2
0
def get_effective_user(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
    phantom.debug("get_effective_user() called")

    ################################################################################
    # Find the user and user type that launched this playbook.
    ################################################################################

    get_effective_user__user_id = None
    get_effective_user__user_type = None

    ################################################################################
    ## Custom Code Start
    ################################################################################

    effective_user_id = phantom.get_effective_user()
    url = phantom.build_phantom_rest_url('ph_user', effective_user_id)
    response_json = phantom.requests.get(url, verify=False).json()
    
    get_effective_user__user_type = response_json['type']
    get_effective_user__user_id = effective_user_id
        

    ################################################################################
    ## Custom Code End
    ################################################################################

    phantom.save_run_data(key="get_effective_user:user_id", value=json.dumps(get_effective_user__user_id))
    phantom.save_run_data(key="get_effective_user:user_type", value=json.dumps(get_effective_user__user_type))

    user_decision(container=container)

    return
コード例 #3
0
def asset_get_attributes(asset=None, **kwargs):
    """
    Allows the retrieval of an attribute from an asset configuration for access in a playbook. This can be valuable in instances such as a dynamic note that references the Asset hostname. Must provide asset name or id.
    
    Args:
        asset: Asset numeric ID or asset name.
    
    Returns a JSON-serializable object that implements the configured data paths:
        id: Unique asset id
        name: Unique asset name
        configuration: Access individual configuration attributes by appending ".<keyname>"
            Example: configuration.device
        tags: Asset tags
        description: Asset description
        product_name: Asset product_name
        product_vendor: Asset product_vendor
        product_version: Asset product_version
        type: Asset type
        version: Asset version
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}
    url = phantom.build_phantom_rest_url('asset')

    if isinstance(asset, int):
        url += '/{}'.format(asset)

    # Attempt to translate asset_name to asset_id
    elif isinstance(asset, str):
        params = {'_filter_name': '"{}"'.format(asset)}
        response = phantom.requests.get(uri=url, params=params,
                                        verify=False).json()
        if response['count'] == 1:
            url += '/{}'.format(response['data'][0]['id'])
        else:
            raise RuntimeError(
                "No valid asset id found for provided asset name: {}".format(
                    asset))
    else:
        raise TypeError("No valid asset id or name provided.")

    response = phantom.requests.get(uri=url, verify=False).json()
    if response.get('id'):
        outputs = response
    else:
        raise RuntimeError("No valid asset id found.")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #4
0
def add_artifact_with_tags(cef=None,
                           tags=None,
                           severity=None,
                           container_id=None,
                           label=None,
                           name=None,
                           run_automation=None,
                           field_mapping=None,
                           **kwargs):
    """
    Adds an artifact and updates that artifact with provided tags
    
    Args:
        cef (CEF type: *)
        tags (CEF type: *)
        severity (CEF type: *)
        container_id (CEF type: phantom container id)
        label (CEF type: *)
        name (CEF type: *)
        run_automation (CEF type: *): Defaults to False
        field_mapping (CEF type: *): valid field_mapping json
    
    Returns a JSON-serializable object that implements the configured data paths:
        id
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}
    if not run_automation or run_automation.lower() == 'false':
        run_automation = False
    elif run_automation.lower() == 'true':
        run_automation = True

    success, message, artifact_id = phantom.add_artifact(
        container=container_id,
        raw_data={},
        cef_data=cef,
        label=label,
        field_mapping=field_mapping,
        name=name,
        severity=severity,
        run_automation=run_automation)

    artifact_url = phantom.build_phantom_rest_url('artifact', artifact_id)
    data = {'tags': tags}
    phantom.requests.post(artifact_url, data=json.dumps(data), verify=False)

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #5
0
def select_indicators(action=None,
                      success=None,
                      container=None,
                      results=None,
                      handle=None,
                      filtered_artifacts=None,
                      filtered_results=None,
                      custom_function=None,
                      **kwargs):
    phantom.debug("select_indicators() called")

    # set user and message variables for phantom.prompt call
    custom_format__output = json.loads(
        phantom.get_run_data(key="custom_format:output"))

    user_id = phantom.get_effective_user()
    url = phantom.build_phantom_rest_url('ph_user', user_id)
    response = phantom.requests.get(url, verify=False).json()
    user = response['username']
    message = """Please review the list of suspect indicators and select an action.\n\n{0}""".format(
        custom_format__output)

    indicator_records = phantom.collect2(
        container=container,
        datapath=[
            "get_suspect_indicators:custom_function_result.data.*.indicator_value"
        ],
        action_results=results)

    indicator_value_list = [item[0] for item in indicator_records]

    # dynamic response generation
    response_types = []
    parameters = None
    for ind_val in indicator_value_list:
        response_types.append({
            "prompt": "{0}".format(ind_val),
            "options": {
                "type": "list",
                "choices": ["Block", "Tag as Safe", "Do Nothing"]
            }
        })

    phantom.prompt2(container=container,
                    user=user,
                    message=message,
                    respond_in_mins=30,
                    name="select_indicators",
                    parameters=parameters,
                    response_types=response_types,
                    callback=process_responses)

    return
コード例 #6
0
def custom_list_enumerate(custom_list=None, **kwargs):
    """
    Fetch a custom list and iterate through the rows, producing a dictionary output for each row with the row number and the value for each column.
    
    Args:
        custom_list: the name or ID of a custom list
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.row_num
        *.column_0
        *.column_1
        *.column_2
        *.column_3
        *.column_4
        *.column_5
        *.column_6
        *.column_7
        *.column_8
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    if not custom_list:
        raise ValueError('list_name_or_num parameter is required')

    outputs = []

    # Use REST to get the custom list
    custom_list_request = phantom.requests.get(phantom.build_phantom_rest_url(
        'decided_list', custom_list),
                                               verify=False)

    # Raise error if unsuccessful
    custom_list_request.raise_for_status()

    # Get the list content
    custom_list = custom_list_request.json().get('content', [])

    # Iterate through all rows and save to a list of dicts
    for row_num, row in enumerate(custom_list):
        row_dict = {
            'column_{}'.format(col): val
            for col, val in enumerate(row)
        }
        row_dict['row_num'] = row_num
        outputs.append(row_dict)

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #7
0
def workbook_list(**kwargs):
    """
    Return a list of all the workbooks on this Phantom instance. This might be useful to display possible options for workbooks to add to this event.
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.id: Unique workbook ID
        *.name: Workbook name
        *.description: Workbook description
        *.status: Status of the workbook, e.g. published
        *.is_default: True or False if it is the default workbook
        *.is_note_required: True or False if a note is required to finish each task in the workbook
        *.creator: Unique ID of the user that created the workbook
        *.create_time: Timestamp when the workbook was created
        *.modified_time: Timestamp when the workbook was last modified
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = []
    url = phantom.build_phantom_rest_url('workbook_template') + '?page_size=0'
    phantom.debug(f"Querying for workbooks using URL: '{url}'")

    response = phantom.requests.get(uri=url, verify=False).json()
    if response and response['count'] > 0:
        for data in response['data']:
            outputs.append({
                "id": data['id'],
                "name": data['name'],
                "description": data['description'],
                "status": data['status'],
                "is_default": data['is_default'],
                "is_note_required": data['is_note_required'],
                "creator": data['creator'],
                "create_time": data['create_time'],
                "modified_time": data['modified_time']
            })
    else:
        raise RuntimeError(f"Error getting workbook data: {response}")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #8
0
def event_details(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
    phantom.debug("event_details() called")

    ################################################################################
    # A dynamic prompt to list out details for each container so that the user can 
    # decide which to merge.
    ################################################################################

    # set user and message variables for phantom.prompt call
    effective_user_id = phantom.get_effective_user()
    url = phantom.build_phantom_rest_url('ph_user', effective_user_id)
    response = phantom.requests.get(url, verify=False).json()
    user = response.get('username')
    message = """{0}"""

    # parameter list for template variable replacement
    parameters = [
        "custom_format:custom_function:output",
    ]
    
    # fetch data for dynamic response
    container_data = phantom.collect2(container=container, datapath=['find_related_events:custom_function_result.data.*.container_id', 'find_related_events:custom_function_result.data.*.container_name'], action_results=results)
    container_id_list = [item[0] for item in container_data]
    container_name_list = [item[1] for item in container_data]
    
    #Dynamic Responses:
    response_types = []
    for container_id, container_name in zip(container_id_list, container_name_list):
        response_types.append({
                "prompt": "Event {0}: {1}".format(container_id, container_name),
                "options": {
                    "type": "list",
                    "choices": [
                        "Merge Into Case",
                        "Ignore",
                    ]
                },
            })
        
    phantom.save_run_data(value=json.dumps(container_id_list), key="container_list", auto=True)

    phantom.prompt2(container=container, user=user, message=message, respond_in_mins=30, name="event_details", parameters=parameters, response_types=response_types, callback=process_responses)

    return
コード例 #9
0
def assign_workbook_by_name(container_id=None, workbook_name=None, **kwargs):
    """
    Assigns workbooks as actions to give granular control on the tasks responders have to do on a case by case basis.
    
    Args:
        container_id
        workbook_name
    
    Returns a JSON-serializable object that implements the configured data paths:
        success
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    
    outputs = {"status":False}
    
    container = phantom.get_container(container_id) 
    
    workbook_list = phantom.build_phantom_rest_url('workbook_template')
    response = phantom.requests.get(
        "{}?page_size=0".format(workbook_list),
        verify=False,
    )
    
    try:
        response = json.loads(response.text)
        if "data" in response:
            for item in response['data']:
                if item['name'].lower().strip() == workbook_name.lower().strip():
                    phantom.add_workbook(container, item['id'])
                    break
            outputs['status'] = True
    
    except Exception as e:
        phantom.debug("error in assign_workbook_by_name: {}".format(e))
    
    # Write your custom code here...
    
    # Return a JSON-serializable object
    assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #10
0
def container_merge(target_container=None,
                    container_list=None,
                    workbook=None,
                    close_containers=None,
                    **kwargs):
    """
    An alternative to the add-to-case API call. This function will copy all artifacts, automation, notes and comments over from every container within the container_list into the target_container. The target_container will be upgraded to a case.
    
    The notes will be copied over with references to the child containers from where they came. A note will be left in the child containers with a link to the target container. The child containers will be marked as evidence within the target container. 
    
    Any notes left as a consequence of the merge process will be skipped in subsequent merges.
    
    Args:
        target_container (CEF type: phantom container id): The target container to copy the information over. Supports container dictionary or container id.
        container_list: A list of container IDs to copy into the target container.
        workbook: Name or ID of the workbook to add if the container does not have a workbook yet. If no workbook is provided, the system default workbook will be added.
        close_containers: True or False to close the child containers in the container_list after merge. Defaults to False.
    
    Returns a JSON-serializable object that implements the configured data paths:
        
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}

    # Check if valid target_container input was provided
    if isinstance(target_container, int):
        container = phantom.get_container(target_container)
    elif isinstance(target_container, dict):
        container = target_container
    else:
        raise TypeError(
            f"target_container '{target_container}' is neither a int or a dictionary"
        )

    container_url = phantom.build_phantom_rest_url('container',
                                                   container['id'])

    # Check if container_list input is a list of IDs
    if isinstance(container_list, list) and (all(
            isinstance(x, int)
            for x in container_list) or all(x.isnumeric()
                                            for x in container_list)):
        pass
    else:
        raise TypeError(
            f"container_list '{container_list}' is not a list of integers")

    ## Prep parent container as case with workbook ##
    workbook_name = phantom.requests.get(
        container_url, verify=False).json().get('workflow_name')
    # If workbook already exists, proceed to promote to case
    if workbook_name:
        phantom.debug(
            "workbook already exists. adding [Parent] to container name and promoting to case"
        )
        update_data = {'container_type': 'case'}
        if not '[Parent]' in container['name']:
            update_data['name'] = "[Parent] {}".format(container['name'])
            phantom.update(container, update_data)
        else:
            phantom.update(container, update_data)
    # If no workbook exists, add one
    else:
        phantom.debug(
            "no workbook in container. adding one by name or using the default"
        )
        # If workbook ID was provided, add it
        if isinstance(workbook, int):
            workbook_id = workbook
            phantom.add_workbook(container=container['id'],
                                 workbook_id=workbook_id)
        # elif workbook name was provided, attempt to translate it to an id
        elif isinstance(workbook, str):
            workbook_url = phantom.build_phantom_rest_url(
                'workbook_template') + '?_filter_name="{}"'.format(workbook)
            response = phantom.requests.get(workbook_url, verify=False).json()
            if response['count'] > 1:
                raise RuntimeError(
                    'Unable to add workbook - more than one ID matches workbook name'
                )
            elif response['data'][0]['id']:
                workbook_id = response['data'][0]['id']
                phantom.add_workbook(container=container['id'],
                                     workbook_id=workbook_id)
        else:
            # Adding default workbook
            phantom.promote(container=container['id'])
        # Check again to see if a workbook now exists
        workbook_name = phantom.requests.get(
            container_url, verify=False).json().get('workflow_name')
        # If workbook is now present, promote to case
        if workbook_name:
            update_data = {'container_type': 'case'}
            if not '[Parent]' in container['name']:
                update_data['name'] = "[Parent] {}".format(container['name'])
                phantom.update(container, update_data)
            else:
                phantom.update(container, update_data)
        else:
            raise RuntimeError(
                f"Error occurred during workbook add for workbook '{workbook_name}'"
            )

    ## Check if current phase is set. If not, set the current phase to the first available phase to avoid artifact merge error ##
    if not container.get('current_phase_id'):
        phantom.debug(
            "no current phase, so setting first available phase to current")
        workbook_phase_url = phantom.build_phantom_rest_url(
            'workbook_phase') + "?_filter_container={}".format(container['id'])
        request_json = phantom.requests.get(workbook_phase_url,
                                            verify=False).json()
        update_data = {'current_phase_id': request_json['data'][0]['id']}
        phantom.update(container, update_data)

    child_container_list = []
    child_container_name_list = []
    # Iterate through child containers
    for child_container_id in container_list:

        ### Begin child container processing ###
        phantom.debug(
            "Processing Child Container ID: {}".format(child_container_id))

        child_container = phantom.get_container(child_container_id)
        child_container_list.append(child_container_id)
        child_container_name_list.append(child_container['name'])
        child_container_url = phantom.build_phantom_rest_url(
            'container', child_container_id)

        ## Update container name with parent relationship
        if not "[Parent:" in child_container['name']:
            update_data = {
                'name':
                "[Parent: {0}] {1}".format(container['id'],
                                           child_container['name'])
            }
            phantom.update(child_container, update_data)

        ## Gather and add notes ##
        for note in phantom.get_notes(container=child_container_id):
            # Avoid copying any notes related to the merge process.
            if note['success'] and not note['data']['title'] in (
                    '[Auto-Generated] Related Containers',
                    '[Auto-Generated] Parent Container',
                    '[Auto-Generated] Child Containers'):
                phantom.add_note(container=container['id'],
                                 note_type='general',
                                 note_format=note['data']['note_format'],
                                 title="[From Event {0}] {1}".format(
                                     note['data']['container'],
                                     note['data']['title']),
                                 content=note['data']['content'])

        ## Copy information and add to case
        data = {
            'add_to_case': True,
            'container_id': child_container_id,
            'copy_artifacts': True,
            'copy_automation': True,
            'copy_files': True,
            'copy_comments': True
        }
        phantom.requests.post(container_url, json=data, verify=False)

        ## Leave a note with a link to the parent container
        phantom.debug(
            "Adding parent relationship note to child container '{}'".format(
                child_container_id))
        data_row = "{0} | [{1}]({2}/mission/{0}) |".format(
            container['id'], container['name'], phantom.get_base_url())
        phantom.add_note(
            container=child_container_id,
            note_type="general",
            note_format="markdown",
            title="[Auto-Generated] Parent Container",
            content="| Container_ID | Container_Name |\n| --- | --- |\n| {}".
            format(data_row))

        ## Mark child container as evidence in target_container
        data = {
            "container_id": container['id'],
            "object_id": child_container_id,
            "content_type": "container"
        }
        evidence_url = phantom.build_phantom_rest_url('evidence')
        response = phantom.requests.post(evidence_url, json=data,
                                         verify=False).json()

        ## Close child container
        if isinstance(close_containers,
                      str) and close_containers.lower() == 'true':
            phantom.set_status(container=child_container_id, status="closed")

        ### End child container processing ###

    ## Format and add note for link back to child_containers in parent_container
    note_title = "[Auto-Generated] Child Containers"
    note_format = "markdown"
    format_list = []
    # Build new note
    for child_container_id, child_container_name in zip(
            child_container_list, child_container_name_list):
        format_list.append("| {0} | [{1}]({2}/mission/{0}) |\n".format(
            child_container_id, child_container_name, phantom.get_base_url()))
    # Fetch any previous merge note
    params = {
        '_filter_container': '"{}"'.format(container['id']),
        '_filter_title': '"[Auto-Generated] Child Containers"'
    }
    note_url = phantom.build_phantom_rest_url('note')
    response_data = phantom.requests.get(note_url, verify=False).json()
    # If an old note was found, proceed to overwrite it
    if response_data['count'] > 0:
        note_item = response_data['data'][0]
        note_content = note_item['content']
        # Append new information to existing note
        for c_note in format_list:
            note_content += c_note
        data = {
            "note_type": "general",
            "title": note_title,
            "content": note_content,
            "note_format": note_format
        }
        # Overwrite note
        response_data = phantom.requests.post(note_url +
                                              "/{}".format(note_item['id']),
                                              json=data,
                                              verify=False).json()
    # If no old note was found, add new with header
    else:
        template = "| Container ID | Container Name |\n| --- | --- |\n"
        for c_note in format_list:
            template += c_note
        success, message, process_container_merge__note_id = phantom.add_note(
            container=container,
            note_type="general",
            title=note_title,
            content=template,
            note_format=note_format)

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #11
0
def container_update(container_input=None,
                     name=None,
                     description=None,
                     label=None,
                     owner=None,
                     sensitivity=None,
                     severity=None,
                     status=None,
                     tags=None,
                     input_json=None,
                     **kwargs):
    """
    Allows updating various attributes of a container in a single custom function. Any attributes of a container not listed can be updated via the input_json parameter. 
    
    Args:
        container_input (CEF type: phantom container id): Supports a container id or container dictionary
        name: Optional parameter to change container name
        description: Optional parameter to change the container description
        label (CEF type: phantom container label): Optional parameter to change the container label
        owner: Optional parameter to change the container owner. Accepts a username or role name or keyword "current" to set the currently running playbook user as the owner.
        sensitivity: Optional parameter to change the container sensitivity. 
        severity: Optional parameter to change the container severity.
        status: Optional parameter to change the container status.
        tags: Optional parameter to change the container tags. Must be in the format of a comma separated list.
        input_json: Optional parameter to modify any extra attributes of a container. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.
    
    Returns a JSON-serializable object that implements the configured data paths:
        
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}
    update_dict = {}

    if isinstance(container_input, int):
        container = phantom.get_container(container_input)
    elif isinstance(container_input, dict):
        container = container_input
    else:
        raise TypeError("container_input is neither a int or a dictionary")

    if name:
        update_dict['name'] = name
    if description:
        update_dict['description'] = description
    if label:
        update_dict['label'] = label
    if owner:
        # If keyword 'current' entered then translate effective_user id to a username
        if owner.lower() == 'current':
            update_dict['owner_id'] = phantom.get_effective_user()
        else:
            # Attempt to translate name to owner_id
            url = phantom.build_phantom_rest_url(
                'ph_user') + f'?_filter_username="******"'
            data = phantom.requests.get(url, verify=False).json().get('data')
            if data and len(data) == 1:
                update_dict['owner_id'] = data[0]['id']
            elif data and len(data) > 1:
                phantom.error(f'Multiple matches for owner "{owner}"')
            else:
                # Attempt to translate name to role_id
                url = phantom.build_phantom_rest_url(
                    'role') + f'?_filter_name="{owner}"'
                data = phantom.requests.get(url,
                                            verify=False).json().get('data')
                if data and len(data) == 1:
                    update_dict['role_id'] = data[0]['id']
                elif data and len(data) > 1:
                    phantom.error(f'Multiple matches for role "{owner}"')
                else:
                    phantom.error(f'"{owner}" is not a valid username or role')
    if sensitivity:
        update_dict['sensitivity'] = sensitivity
    if severity:
        update_dict['severity'] = severity
    if status:
        update_dict['status'] = status
    if tags:
        tags = tags.replace(" ", "").split(",")
        update_dict['tags'] = tags
    if input_json:
        json_dict = json.loads(input_json)
        # Merge dictionaries together. The second argument, "**json_dict" will take precedence and overwrite any duplicate parameters.
        update_dict = {**update_dict, **json_dict}

    if update_dict:
        phantom.debug(
            'Updating container {0} with the following information: "{1}"'.
            format(container['id'], update_dict))
        phantom.update(container, update_dict)
    else:
        phantom.debug(
            "Valid container entered but no valid container changes provided.")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #12
0
def indicator_tag(indicator=None, tags=None, overwrite=None, **kwargs):
    """
    Tag an existing indicator record. Tags can be overwritten or appended.
    
    Args:
        indicator (CEF type: *): Specifies the indicator which the tag will be added to. Supports a string indicator value or an indicator id.
        tags (CEF type: *): Comma separated list of tags. Tags should only contain characters Aa-Zz, 0-9, '-', and '_'.
        overwrite: Optional input. Either "true" or "false" with default as "false". If set to "true", existing tags on the indicator record will be replaced by the provided input. If set to "false", the new tags will be appended to the existing indicator tags.
    
    Returns a JSON-serializable object that implements the configured data paths:
        indicator_id: The indicator id that was tagged.
        indicator_tags: The new tags for the indicator
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    import string

    outputs = {}

    # remove whitespace from tags and convert to a list
    tags = tags.replace(' ', '').split(',')
    allowed_characters = string.ascii_lowercase + string.ascii_uppercase + string.digits + '_' + '-'
    for tag in tags:
        if any(c not in allowed_characters for c in tag):
            raise ValueError(
                "Tags should only contain characters Aa-Zz, 0-9, '-', and '_'")

    # overwrite must be "true" or "false" and defaults to "false"
    if overwrite:
        if not isinstance(overwrite, str):
            raise TypeError("overwrite must be a string")
        if overwrite.lower() == 'true':
            overwrite = True
        elif overwrite.lower() == 'false':
            overwrite = False
        else:
            raise ValueError("overwrite must be either 'true' or 'false'")
    else:
        overwrite = False

    url = phantom.build_phantom_rest_url('indicator')

    # if indicator is an int, treat it as an indicator id
    if isinstance(indicator, int):
        indicator_id = indicator
        url += f'/{indicator_id}'
        response = phantom.requests.get(url, verify=False).json()
        if response.get('id'):
            existing_tags = response['tags']
        else:
            raise RuntimeError(
                f"No indicator record found for indicator with id: {indicator}"
            )

    # attempt to translate indicator string value to a indicator id
    elif isinstance(indicator, str):
        params = {'_filter_value__iexact': f'"{indicator}"'}
        response = phantom.requests.get(url, params=params,
                                        verify=False).json()
        if response['count'] == 1:
            indicator_id = response['data'][0]['id']
            url += f'/{indicator_id}'
            existing_tags = response['data'][0]['tags']
        elif response['count'] > 1:
            raise RuntimeError("Located more than 1 indicator record")
        else:
            raise RuntimeError(
                f"Unable to locate any indicator record for value: {indicator}"
            )
    else:
        raise ValueError("Indicator must be a string or integer")

    # if overwrite is set to false, then start with existing tags and append new tags to them
    if not overwrite:
        tags = existing_tags + tags

    # deduplicate before POSTing
    tags = list(set(tags))

    data = {"tags": tags}
    response = phantom.requests.post(url, json=data, verify=False).json()
    if response.get('success'):
        outputs = {'indicator_id': indicator_id, 'indicator_tags': tags}
    else:
        raise RuntimeError(
            f"Failed to update tags for indicator with id: {indicator_id}")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #13
0
def workbook_add(container=None,
                 workbook=None,
                 check_for_existing_workbook=None,
                 start_workbook=None,
                 **kwargs):
    """
    Add a workbook to a container. Provide a container id and a workbook name or id
    
    Args:
        container (CEF type: phantom container id): A phantom container id
        workbook (CEF type: *): A workbook name or id
        check_for_existing_workbook: Defaults to True. Check to see if workbook already exists in container before adding.
        start_workbook: Defaults to True. Sets the added workbook to the current phase.
    
    Returns a JSON-serializable object that implements the configured data paths:
        workbook_id: ID of the workbook that was added
        current_phase_id: ID of the current phase if start_workbook set to True.
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}
    existing_templates = []
    container_id = None

    # Ensure valid container input
    if isinstance(container, dict) and container.get('id'):
        container_id = container['id']
    elif isinstance(container, int):
        container_id = container
    else:
        raise TypeError(
            "The input 'container' is neither a container dictionary nor an int, so it cannot be used"
        )

    # Determine if check_for_existing_workbook should be overwritten by function input
    if isinstance(check_for_existing_workbook,
                  str) and check_for_existing_workbook.lower() == 'false':
        check_for_existing_workbook = False
    else:
        check_for_existing_workbook = True

    # Determine if start_workbook should be overwritten by function input
    if isinstance(start_workbook, str) and start_workbook.lower() == 'false':
        start_workbook = False
    else:
        start_workbook = True

    if check_for_existing_workbook:
        #phantom.debug('Checking for existing workbook')
        url = phantom.build_phantom_rest_url('container', container_id,
                                             'phases')
        container_data = phantom.requests.get(url, verify=False).json()
        if container_data['count'] > 0:
            phase_names = set(
                [phase_id['name'] for phase_id in container_data['data']])
            existing_templates = []
            for name in phase_names:
                url = phantom.build_phantom_rest_url(
                    'workbook_phase_template') + '?_filter_name="{}"'.format(
                        name)
                phase_template_response = phantom.requests.get(
                    url, verify=False).json()
                if phase_template_response['count'] > 0:
                    for phase in phase_template_response['data']:
                        existing_templates.append(phase['template'])
            existing_templates = set(existing_templates)

    if isinstance(workbook, int):
        workbook_id = workbook
        if workbook_id in existing_templates:
            phantom.debug("Workbook already added to container. Skipping")
        else:
            phantom.add_workbook(container=container_id,
                                 workbook_id=workbook_id)

    elif isinstance(workbook, str):
        url = phantom.build_phantom_rest_url(
            'workbook_template') + '?_filter_name="{}"'.format(workbook)
        response = phantom.requests.get(url, verify=False).json()
        if response['count'] > 1:
            raise RuntimeError(
                'Unable to add workbook - more than one ID matches workbook name'
            )
        elif response['data'][0]['id']:
            workbook_id = response['data'][0]['id']

            if workbook_id in existing_templates:
                phantom.debug("Workbook already added to container. Skipping")
            else:
                phantom.add_workbook(container=container_id,
                                     workbook_id=workbook_id)

    outputs['workbook_id'] = workbook_id

    if start_workbook:
        url = phantom.build_phantom_rest_url(
            'workbook_phase_template') + '?_filter_template="{}"'.format(
                workbook_id)
        first_phase = phantom.requests.get(
            url, verify=False).json()['data'][0]['name']
        url = phantom.build_phantom_rest_url(
            'container', container_id,
            'phases') + '?_filter_name="{}"'.format(first_phase)
        existing_phases = phantom.requests.get(url, verify=False).json()
        if existing_phases['count'] > 1:
            raise RuntimeError(
                'Cannot set current phase - duplicate phase names exist in container'
            )
        else:
            phantom.set_phase(container=container_id,
                              phase=existing_phases['data'][0]['id'],
                              trace=False)
            outputs['current_phase_id'] = existing_phases['data'][0]['id']

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #14
0
def indicator_collect(container=None, **kwargs):
    """
    Collect all indicators in a container and separate them by data type. Additional output data paths are created for each data type. Artifact scope is ignored. 
    
    Args:
        container (CEF type: phantom container id): The current container
    
    Returns a JSON-serializable object that implements the configured data paths:
        all_indicators.*.cef_key
        all_indicators.*.cef_value
        all_indicators.*.data_types
        all_indicators.*.artifact_id
        domain.*.cef_key
        domain.*.cef_value (CEF type: domain)
        domain.*.artifact_id
        file_name.*.cef_key (CEF type: file name)
        file_name.*.cef_value (CEF type: file name)
        file_name.*.artifact_id
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {'all_indicators': []}
    data_types = [
        "domain", "file name", "file path", "hash", "host name", "ip",
        "mac address", "md5", "port", "process name", "sha1", "sha256",
        "sha512", "url", "user name", "vault id"
    ]

    for data_type in data_types:
        data_type_escaped = data_type.replace(' ', '_')
        outputs[data_type_escaped] = []

    # validate container and get ID
    if isinstance(container, dict) and container['id']:
        container_dict = container
        container_id = container['id']
    elif isinstance(container, int):
        rest_container = phantom.requests.get(
            uri=phantom.build_phantom_rest_url('container', container),
            verify=False).json()
        if 'id' not in rest_container:
            raise ValueError('Failed to find container with id {container}')
        container_dict = rest_container
        container_id = container
    else:
        raise TypeError(
            "The input 'container' is neither a container dictionary nor an int, so it cannot be used"
        )

    # fetch all artifacts in the container
    artifacts = phantom.requests.get(uri=phantom.build_phantom_rest_url(
        'container', container_id, 'artifacts'),
                                     params={
                                         'page_size': 0
                                     },
                                     verify=False).json()['data']

    for artifact in artifacts:
        artifact_id = artifact['id']
        for cef_key in artifact['cef']:
            cef_value = artifact['cef'][cef_key]
            params = {
                'indicator_value': cef_value,
                "_special_contains": True,
                'page_size': 1
            }
            indicator_data = phantom.requests.get(
                uri=phantom.build_phantom_rest_url('indicator_by_value'),
                params=params,
                verify=False)
            if indicator_data.status_code == 200:
                indicator_json = indicator_data.json()
            data_types = []
            if indicator_json.get('id'):
                data_types = indicator_json['_special_contains']
                # drop none
                data_types = [item for item in data_types if item]

            # store the value in all_indicators and a list of values for each data type
            outputs['all_indicators'].append({
                'cef_key': cef_key,
                'cef_value': cef_value,
                'artifact_id': artifact_id,
                'data_types': data_types
            })
            for data_type in data_types:
                # outputs will have underscores instead of spaces
                data_type_escaped = data_type.replace(' ', '_')
                if data_type_escaped not in outputs:
                    outputs[data_type_escaped] = []
                outputs[data_type_escaped].append({
                    'cef_key': cef_key,
                    'cef_value': cef_value,
                    'artifact_id': artifact_id
                })

    # sort the all_indicators outputs to make them more consistent
    outputs['all_indicators'].sort(
        key=lambda indicator: str(indicator['cef_value']))

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #15
0
def playbooks_list(name=None, category=None, tags=None, repo=None, playbook_type=None, **kwargs):
    """
    List all playbooks matching the provided name, category, and tags. If no filters are provided, list all playbooks.
    
    Args:
        name: Only return playbooks with the provided name.
        category: Only returns playbooks that match the provided category.
        tags: Only return playbooks that contain ALL the provided tags. Multiple tags must be a comma-separated list.
        repo: Only return playbooks that exist in this repo.
        playbook_type: Only return playbooks that match the provided type. Accepts 'automation', 'input' or 'data.'
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.id: Playbook ID:
            e.g. 1234
        *.full_name: Playbook full name with repo, e.g.:
            local/playbook_name
        *.name: Playbook Name:
            e.g. My Playbook
        *.category: Playbook category:
            e.g. Uncategorized
        *.tags: List of tags:
            e.g. [ tag1, tag2, tag3 ]
        *.active: Playbook automation status:
            e.g. True or False
        *.disabled: Playbook enabled / disabled status:
            e.g. True or False
        *.playbook_type: Playbook type: 'automation' or 'data'
        *.input_spec: If the playbook type is 'data,' this will be a list of dictionaries for the accepted inputs.
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    
    outputs = []
    
    url = phantom.build_phantom_rest_url('playbook')
    params = {'pretty' : True, 'page_size': 0}
    
    # Add Name
    if name:
        params['_filter_name'] = f'"{name}"'
    # Add Category
    if category:
        params['_filter_category'] = f'"{category}"'
        
    # Create list of tags and add tags minus whitespace 
    if tags:
        tags = [item.replace(' ','') for item in tags.split(',')]
        params['_filter_tags__contains'] = f'{json.dumps(tags)}'
    
    # Add Repo
    if isinstance(repo, int):
        params['_filter_scm'] = f'{repo}'
    # Translate string to id
    elif isinstance(repo, str):
        scm_params = {'_filter_name': f'"{repo}"'}
        response = phantom.requests.get(uri=phantom.build_phantom_rest_url('scm'), params=scm_params, verify=False).json()
        if response['count'] == 1:
            params['_filter_scm'] = '{}'.format(response['data'][0]['id'])
        else:
            raise RuntimeError(f"Invalid repo specified: '{repo}'")       
    
    # Add type
    if isinstance(playbook_type, str) and playbook_type.lower() in ['automation', 'input', 'data']:
        # Alias 'input' to 'data'
        if playbook_type.lower() == 'input':
            playbook_type = 'data'
        playbook_type = playbook_type.lower()
    elif playbook_type:
        raise TypeError(f"Invalid playbook type specified - '{playbook_type}' - must be one of: 'automation', 'input', 'data'")
                     
    # Fetch playbook data
    response = phantom.requests.get(uri=url, params=params, verify=False).json()
    # If playbooks were found generate output
    if response['count'] > 0:
        for data in response['data']:
            
            valid_playbook = False
            # SOAR < 5.0 does not have playbook_type so providing a playbook type will raise an error
            if not data.get('playbook_type') and playbook_type:
                raise TypeError("playbook_type filter not valid on SOAR prior to 5.0")
            # If no playbook type exists user does not want to filter on playbook types
            elif not playbook_type:
                valid_playbook = True
            # If user provided a playbook type then only output playbooks that match that provided type
            elif data.get('playbook_type') == playbook_type:
                valid_playbook = True
                
            if valid_playbook:
                outputs.append({'id': data['id'],
                                'full_name': f"{data['_pretty_scm']}/{data['name']}",
                                'name': data['name'],
                                'category': data['category'],
                                'tags': data['tags'],
                                'active': data['active'],
                                'disabled': data['disabled'],
                                'playbook_type': data.get('playbook_type'),
                                'input_spec': data.get('input_spec')
                               })
    else:
        phantom.debug("No playbook found for supplied filter")
    # Return a JSON-serializable object
    assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #16
0
def protect_prompt(action=None,
                   success=None,
                   container=None,
                   results=None,
                   handle=None,
                   filtered_artifacts=None,
                   filtered_results=None,
                   custom_function=None,
                   **kwargs):
    phantom.debug("protect_prompt() called")

    # set user and message variables for phantom.prompt call

    user_id = phantom.get_effective_user()
    url = phantom.build_phantom_rest_url('ph_user', user_id)
    response = phantom.requests.get(url, verify=False).json()
    user = response['username']
    message = """{0}"""

    # parameter list for template variable replacement
    parameters = ["format_prompt:formatted_data"]

    device_data = phantom.collect2(
        container=container,
        datapath=['run_asset_query:action_result.data.*.nt_host'],
        action_results=results)
    device_list = [item[0] for item in device_data]
    user_data = phantom.collect2(
        container=container,
        datapath=['run_identity_query:action_result.data.*.email'],
        action_results=results)
    user_list = [item[0] for item in user_data]

    list_asset_playbooks_data = phantom.collect2(
        container=container,
        datapath=[
            "list_asset_playbooks:custom_function_result.data.*.full_name"
        ])
    list_asset_playbooks_list = [
        item[0] for item in list_asset_playbooks_data if item[0]
    ]
    list_identity_playbooks_data = phantom.collect2(
        container=container,
        datapath=[
            "list_identity_playbooks:custom_function_result.data.*.full_name"
        ])
    list_identity_playbooks_list = [
        item[0] for item in list_identity_playbooks_data if item[0]
    ]

    #responses:
    all_entity_list = []
    response_types = []
    # only add a response if a device exists and a playbook exists
    if device_list and list_asset_playbooks_list:
        for item in device_list:
            if item:
                response_types.append({
                    "prompt":
                    "Launch protect asset playbooks on '{}'?".format(item),
                    "options": {
                        "type": "list",
                        "choices": ["Yes", "No"]
                    },
                })
                all_entity_list.append({'type': 'device', 'name': item})

    # only add a response if a user exists and a playbook exists
    if user_list and list_identity_playbooks_list:
        for item in user_list:
            if item:
                response_types.append({
                    "prompt":
                    "Launch protect identity playbooks on '{}'?".format(item),
                    "options": {
                        "type": "list",
                        "choices": ["Yes", "No"]
                    },
                })
                all_entity_list.append({'type': 'user', 'name': item})

    phantom.save_run_data(key='all_entities',
                          value=json.dumps(all_entity_list))
    phantom.prompt2(container=container,
                    user=user,
                    message=message,
                    respond_in_mins=30,
                    name="protect_prompt",
                    parameters=parameters,
                    response_types=response_types,
                    callback=decide_and_launch_playbooks)

    return
コード例 #17
0
def update_artifact(artifact_id=None, data=None, overwrite=None, **kwargs):
    """
    Update artifact with a valid json dictionary. See Phantom Artifact REST API for valid dictionary.
    
    Args:
        artifact_id (CEF type: *): ID of artifact to update
        data (CEF type: *): JSON formatted data. See artifact REST api
        overwrite (CEF type: *): Optional: Leave blank for False
    
    Returns a JSON-serializable object that implements the configured data paths:
        
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    def field_updater(data, update_data, overwrite):
        if type(update_data) == list:
            if not (overwrite):
                return (list(set((data or []) + update_data)))
            else:
                return (update_data)
        elif type(update_data) == dict:
            for keya in update_data.keys():
                data[keya] = field_updater(data.get(keya, {}),
                                           update_data[keya], overwrite)
        else:
            if (overwrite and data) or not (data):
                return update_data

        return data

    outputs = {}
    try:
        data = json.loads(data)
    except Exception as err:
        return phantom.error('Unable to parse "data" field: {}'.format(err))

    if not overwrite:
        overwrite = False

    artifact_url = phantom.build_phantom_rest_url(
        'artifact/{}'.format(artifact_id))

    response = phantom.requests.get(artifact_url, verify=False)
    if response.status_code != 200:
        return phantom.error(
            'Unable to find artifact id: {}. Response: {}'.format(
                artifact_id, response.text))

    artifact_data = response.json()
    update_data = {}
    for key in data.keys():
        update_data[key] = field_updater(artifact_data.get(key, {}), data[key],
                                         overwrite)

    post_response = phantom.requests.post(artifact_url,
                                          data=json.dumps(update_data),
                                          verify=False)

    if post_response.status_code != 200:
        return phantom.error('Unable to save artifact data: {}'.format(
            post_response.text))

    phantom.debug('Successfully updated artifact ID: {}'.format(artifact_id))

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #18
0
def find_related_containers(value_list=None, minimum_match_count=None, container=None, earliest_time=None, filter_status=None, filter_label=None, filter_severity=None, filter_in_case=None, **kwargs):
    """
    Takes a provided list of indicator values to search for and finds all related containers. It will produce a list of the related container details.
    
    Args:
        value_list (CEF type: *): An indicator value to search on, such as a file hash or IP address. To search on all indicator values in the container, use "*".
        minimum_match_count (CEF type: *): The minimum number of similar indicator records that a container must have to be considered "related."  If no match count provided, this will default to 1.
        container (CEF type: phantom container id): The container to run indicator analysis against. Supports container object or container_id. This container will also be excluded from the results for related_containers.
        earliest_time: Optional modifier to only consider related containers within a time window. Default is -30d.  Supports year (y), month (m), day (d), hour (h), or minute (m)  Custom function will always set the earliest container window based on the input container "create_time".
        filter_status: Optional comma-separated list of statuses to filter on. Only containers that have statuses matching an item in this list will be included.
        filter_label: Optional comma-separated list of labels to filter on. Only containers that have labels matching an item in this list will be included.
        filter_severity: Optional comma-separated list of severities to filter on. Only containers that have severities matching an item in this list will be included.
        filter_in_case: Optional parameter to filter containers that are in a case or not. Defaults to True (drop containers that are already in a case).
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.container_id (CEF type: *): The unique id of the related container
        *.container_indicator_match_count: The number of indicators matched to the related container
        *.container_status: The status of the related container e.g. new, open, closed
        *.container_type: The type of the related container, e.g. default or case
        *.container_name: The name of the related container
        *.in_case: True or False if the related container is already included in a case
        *.indicator_ids: Indicator ID that matched
        *.container_url (CEF type: url): Link to container
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    import re
    from datetime import datetime, timedelta
    from urllib import parse
    
    outputs = []
    related_containers = []
    indicator_id_dictionary = {}
    container_dictionary = {}
    offset_time = None
    
    base_url = phantom.get_base_url()
    indicator_by_value_url = phantom.build_phantom_rest_url('indicator_by_value')
    indicator_common_container_url = phantom.build_phantom_rest_url('indicator_common_container')
    container_url = phantom.build_phantom_rest_url('container')

    # Get indicator ids based on value_list
    def format_offset_time(seconds):
        datetime_obj = datetime.now() - timedelta(seconds=seconds)
        formatted_time = datetime_obj.strftime('%Y-%m-%dT%H:%M:%S.%fZ')  
        return formatted_time
    
    def fetch_indicator_ids(value_list):
        indicator_id_list = []
        for value in value_list:
            params = {'indicator_value': f'{value}', 'timerange': 'all'}
            indicator_id = phantom.requests.get(indicator_by_value_url, params=params, verify=False).json().get('id')
            if indicator_id:
                indicator_id_list.append(indicator_id)
        return indicator_id_list
    
    # Ensure valid time modifier
    if earliest_time:
        # convert user-provided input to seconds
        char_lookup = {'y': 31557600, 'mon': 2592000, 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60}
        pattern = re.compile(r'-(\d+)([mM][oO][nN]|[yYwWdDhHmM]{1})$')
        if re.search(pattern, earliest_time):
            integer, char = (re.findall(pattern, earliest_time)[0])
            time_in_seconds = int(integer) * char_lookup[char.lower()]
        else:
            raise RuntimeError(f'earliest_time string "{earliest_time}" is incorrectly formatted. Format is -<int><time> where <int> is an integer and <time> is y, mon, w, d, h, or m. Example: "-1h"')
    else:
        # default 30 days in seconds
        time_in_seconds = 2592000

    # Ensure valid container input
    if isinstance(container, dict) and container.get('id'):
        current_container = container['id']
    elif isinstance(container, int):
        current_container = container
    else:
        raise TypeError("The input 'container' is neither a container dictionary nor an int, so it cannot be used")
    
    if minimum_match_count and not isinstance(minimum_match_count, int):
        raise TypeError(f"Invalid type for 'minimum_match_count', {type(minimum_match_count)}, must be 'int'")
    elif not minimum_match_count:
        minimum_match_count = 1
    
    # Ensure valid filter inputs
    status_list, label_list, severity_list = [], [], []
    if isinstance(filter_status, str):
        status_list = [item.strip().lower() for item in filter_status.split(',')]
    if isinstance(filter_label, str):
        label_list = [item.strip().lower() for item in filter_label.split(',')]
    if isinstance(filter_severity, str):
        severity_list = [item.strip().lower() for item in filter_severity.split(',')]
    if isinstance(filter_in_case, str) and filter_in_case.lower() == 'false':
        filter_in_case = False
    else:
        filter_in_case = True
    
    # If value list is equal to * then proceed to grab all indicator records for the current container
    if isinstance(value_list, list) and value_list[0] == "*":
        new_value_list = []
        url = phantom.build_phantom_rest_url('container', current_container, 'artifacts') + '?page_size=0'
        response_data = phantom.requests.get(uri=url, verify=False).json().get('data')
        if response_data:
            for data in response_data:
                for k,v in data['cef'].items():
                    if isinstance(v, list):
                        for item in v:
                            new_value_list.append(item)
                    else:
                        new_value_list.append(v)
        new_value_list = list(set(new_value_list))
        indicator_id_list = fetch_indicator_ids(new_value_list)
    elif isinstance(value_list, list):
        # dedup value_list
        value_list = list(set(value_list))
        indicator_id_list = fetch_indicator_ids(value_list)
    else:
        raise TypeError(f"Invalid input for value_list: '{value_list}'")

    # Quit early if no indicator_ids were found
    if not indicator_id_list:
        phantom.debug(f"No indicators IDs found for provided values: '{value_list}'")
        assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
        return outputs
    
    # Get list of related containers
    for indicator_id in list(set(indicator_id_list)):
        params = {'indicator_ids': indicator_id}
        response_data = phantom.requests.get(indicator_common_container_url, params=params, verify=False).json()
        # Populate an indicator dictionary where the original ids are the dictionary keys and the                     
        # associated continers are the values
        if response_data:
            # Quit early if no related containers were found
            if len(response_data) == 1 and response_data[0].get('container_id') == current_container:
                phantom.debug(f"No related containers found for provided values: '{value_list}'")
                assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
                return outputs
            
            indicator_id_dictionary[str(indicator_id)] = []
            for item in response_data:
                # Append all related containers except for current container
                if item['container_id'] != current_container:
                    indicator_id_dictionary[str(indicator_id)].append(item['container_id'])

    # Iterate through the newly created indicator id dictionary and create a dictionary where 
    # the keys are related containers and the values are the associated indicator ids
    for k,v in indicator_id_dictionary.items():
        for item in v:
            if str(item) not in container_dictionary.keys():
                container_dictionary[str(item)] = [str(k)]
            else:
                container_dictionary[str(item)].append(str(k))
        
    # Iterate through the newly created container dictionary                
    if container_dictionary:
        
        container_number = 0
        # Dedupe the number of indicators
        for k,v in container_dictionary.items():
            container_dictionary[str(k)] = list(set(v))
             # Count how many containers are actually going to be queried based on minimum_match_count
            if len(container_dictionary[str(k)]) >= minimum_match_count:
                container_number += 1
                
        # If the container number is greater than 600, then its faster to grab all containers
        if container_number >= 600:

            # Gather container data
            params = {'page_size': 0}
            if offset_time:
                params['_filter__create_time__gt'] = f'"{format_offset_time(time_in_seconds)}"'
            containers_response = phantom.requests.get(uri=container_url, params=params, verify=False).json()
            all_container_dictionary = {}
            if containers_response['count'] > 0:
                
                # Build repository of available container data
                for data in containers_response['data']:
                    all_container_dictionary[str(data['id'])] = data

                for k,v in container_dictionary.items():

                    # Omit any containers that have less than the minimum match count
                    if len(container_dictionary[str(k)]) >= minimum_match_count:
                        valid_container = True
                        # Grab container details if its a valid container based on previous filtering.
                        if str(k) in all_container_dictionary.keys():
                            container_data = all_container_dictionary[str(k)]
                            
                            # Omit any containers that don't meet the specified criteria
                            if container_data['create_time'] < format_offset_time(time_in_seconds): 
                                valid_container = False
                            if status_list and container_data['status'].lower() not in status_list:
                                valid_container = False
                            if label_list and container_data['label'].lower() not in label_list:
                                valid_container = False
                            if severity_list and container_data['severity'].lower() not in severity_list:
                                valid_container = False
                            if response_data['in_case'] and filter_in_case:
                                valid_container = False
                                
                            # Build outputs if checks are passed
                            if valid_container:
                                outputs.append({
                                    'container_id': str(k),
                                    'container_indicator_match_count': len(container_dictionary[str(k)]),
                                    'container_status': container_data['status'],
                                    'container_type': container_data['container_type'],
                                    'container_name': container_data['name'],
                                    'container_url': base_url.rstrip('/') + '/mission/{}'.format(str(k)),
                                    'in_case': container_data['in_case'],
                                    'indicator_id': container_dictionary[str(k)]
                                })

            else:
                raise RuntimeError(f"'Unable to find any valid containers at url: '{url}'")
                
        elif container_number < 600 and container_number > 0:
            # if the container number is smaller than 600, its faster to grab each container individiually
            for k,v in container_dictionary.items():
                # Dedupe the number of indicators
                container_dictionary[str(k)] = list(set(v))

                # If any of the containers contain more than the minimum match count request that container detail.
                if len(container_dictionary[str(k)]) >= minimum_match_count:
                    
                    valid_container = True
                    
                    # Grab container details
                    url = phantom.build_phantom_rest_url('container', k)
                    response_data = phantom.requests.get(url, verify=False).json()
                            
                    # Omit any containers that don't meet the specified criteria
                    if response_data['create_time'] < format_offset_time(time_in_seconds): 
                        valid_container = False
                    if status_list and response_data['status'].lower() not in status_list:
                        valid_container = False
                    if label_list and response_data['label'].lower() not in label_list:
                        valid_container = False
                    if severity_list and response_data['severity'].lower() not in severity_list:
                        valid_container = False
                    if response_data['in_case'] and filter_in_case:
                        valid_container = False
                    
                    # Build outputs if checks are passed and valid_container is still true
                    if valid_container: 
                        outputs.append({
                            'container_id': str(k),
                            'container_indicator_match_count': len(container_dictionary[str(k)]),
                            'container_status': response_data['status'],
                            'container_severity': response_data['severity'],
                            'container_type':  response_data['container_type'],
                            'container_name':  response_data['name'],
                            'container_url': base_url.rstrip('/') + '/mission/{}'.format(str(k)),
                            'in_case': response_data['in_case'],
                            'indicator_ids': container_dictionary[str(k)]
                        })


    else:
        raise RuntimeError('Unable to create container_dictionary')               
    # Return a JSON-serializable object
    assert json.dumps(outputs)  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #19
0
def artifact_update(artifact_id=None,
                    name=None,
                    label=None,
                    severity=None,
                    cef_field=None,
                    cef_value=None,
                    cef_data_type=None,
                    tags=None,
                    input_json=None,
                    **kwargs):
    """
    Update an artifact with the specified attributes. All parameters are optional, except that cef_field and cef_value must both be provided if one is provided.
    
    Args:
        artifact_id (CEF type: phantom artifact id): ID of the artifact to update, which is required.
        name: Change the name of the artifact.
        label: Change the label of the artifact.
        severity: Change the severity of the artifact. Typically this is either "High", "Medium", or "Low".
        cef_field: The name of the CEF field to populate in the artifact, such as "destinationAddress" or "sourceDnsDomain". Required only if cef_value is provided.
        cef_value (CEF type: *): The value of the CEF field to populate in the artifact, such as the IP address, domain name, or file hash. Required only if cef_field is provided.
        cef_data_type: The CEF data type of the data in cef_value. For example, this could be "ip", "hash", or "domain". Optional, but only operational if cef_field is provided.
        tags: A comma-separated list of tags to apply to the artifact, which is optional.
        input_json: Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.
    
    Returns a JSON-serializable object that implements the configured data paths:
        
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    updated_artifact = {}

    if not isinstance(artifact_id, int):
        raise TypeError("artifact_id is required")

    if name:
        updated_artifact['name'] = name
    if label:
        updated_artifact['label'] = label
    if severity:
        updated_artifact['severity'] = severity

    # validate that if cef_field or cef_value is provided, the other is also provided
    if (cef_field and not cef_value) or (cef_value and not cef_field):
        raise ValueError("only one of cef_field and cef_value was provided")

    # cef_data should be formatted {cef_field: cef_value}
    if cef_field:
        updated_artifact['cef'] = {cef_field: cef_value}
        if cef_data_type and isinstance(cef_data_type, str):
            updated_artifact['cef_types'] = {cef_field: [cef_data_type]}

    # separate tags by comma
    if tags:
        tags = tags.replace(" ", "").split(",")
        updated_artifact['tags'] = tags

    if input_json:
        json_dict = json.loads(input_json)
        # Merge dictionaries, using the value from json_dict if there are any conflicting keys
        for json_key in json_dict:
            updated_artifact[json_key] = json_dict[json_key]

    # now actually update the artifact
    phantom.debug(
        'updating artifact {} with the following attributes:\n{}'.format(
            artifact_id, updated_artifact))
    url = phantom.build_phantom_rest_url('artifact', artifact_id)
    response = phantom.requests.post(url, json=updated_artifact,
                                     verify=False).json()

    phantom.debug(
        'POST /rest/artifact returned the following response:\n{}'.format(
            response))
    if 'success' not in response or response['success'] != True:
        raise RuntimeError("POST /rest/artifact failed")

    return
コード例 #20
0
def custom_format(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs):
    phantom.debug("custom_format() called")

    ################################################################################
    # Produce a custom format that calculates how many related indicators there are 
    # per container. This is used to truncate the output if it's over the specified 
    # amount.
    ################################################################################

    find_related_events_data = phantom.collect2(container=container, datapath=["find_related_events:custom_function_result.data.*.container_id","find_related_events:custom_function_result.data.*.indicator_ids","find_related_events:custom_function_result.data.*.container_name"])

    find_related_events_data___container_id = [item[0] for item in find_related_events_data]
    find_related_events_data___indicator_ids = [item[1] for item in find_related_events_data]
    find_related_events_data___container_name = [item[2] for item in find_related_events_data]

    custom_format__output = None

    ################################################################################
    ## Custom Code Start
    ################################################################################

    # Define base format - customize as needed
    custom_format__output = """Please review the following events and their associated indicators. Consider merging the related events into the current investigation.
    
The merge process will:
 - Mark the current event as the parent case. If no workbook has been added, it will use the default workbook.
 - Copy events, artifacts, and notes to the parent case.
 - Close the related events with a link to the parent case.
 
 """
    
    # Build phantom url for use later 
    base_url = phantom.get_base_url()
    url = phantom.build_phantom_rest_url('indicator')
    
    # Iterate through all inputs and append to base format
    for item1,item2,item3 in zip(find_related_events_data___container_id,find_related_events_data___indicator_ids,find_related_events_data___container_name):
        custom_format__output += "#### [Event {0}: {1}]({2}/mission/{0}/summary/evidence)\n\n".format(item1, item3, base_url)
        custom_format__output += "| Field Names | Values |\n"
        custom_format__output += "| --- | --- |\n"
        
        indicator_dict = {}

        # Find_related_containers only returns an indicator id, this converts the indicator id to an actual value
        # Only iterate through 10 indicators for easier readability
        for indicator in item2[0:10]:
            response = phantom.requests.get(uri = url + "/{}?_special_fields=true".format(indicator), verify=False).json()              
            value = response['value']
            fields = response.get('_special_fields')
            
            # Remove null items and join
            if isinstance(fields, list):
                fields = [item for item in fields if item]
                fields = sorted(fields)
                fields = ", ".join(fields)
                
            indicator_dict[value] = fields
            
        # sort dictionary alphabetically by value
        for k,v in sorted(indicator_dict.items(), key = lambda kv:(kv[1], kv[0])):
            if len(k) > 250:
                custom_format__output += "| {0} | ```{1}``` ***...truncated...*** | \n".format(v, k[:250])
            else:
                custom_format__output += "| {0} | ```{1}``` | \n".format(v, k)
            
        # If there were more than 10 indicators, add a note at the end letting the analyst know they can find more by following the event link    
        if len(item2) > 10:
            custom_format__output += "- ***+{0} additional related artifacts***".format(len(item2) - 10)
            
        custom_format__output += "\n---\n\n"

    ################################################################################
    ## Custom Code End
    ################################################################################

    phantom.save_run_data(key="custom_format:output", value=json.dumps(custom_format__output))

    event_details(container=container)

    return
コード例 #21
0
def workbook_task_update(task_name=None,
                         note_title=None,
                         note_content=None,
                         status=None,
                         owner=None,
                         container=None,
                         **kwargs):
    """
    Update a workbook task by task name
    
    Args:
        task_name (CEF type: *): Name of a workbook task (Required)
        note_title (CEF type: *): Note title goes here (Optional)
        note_content (CEF type: *): Body of note goes here (Optional)
        status (CEF type: *): One of: incomplete, in_progress, complete (Optional)
        owner (CEF type: *): Assigns task to provided owner. Accepts keyword 'current" to assign task to currently running playbook user. (Optional)
        container (CEF type: phantom container id): ID of Phantom Container (Required)
    
    Returns a JSON-serializable object that implements the configured data paths:
        note_id: Returns note_id if a note was added
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = {}

    # Ensure valid container input
    if isinstance(container, dict) and container.get('id'):
        container_id = container['id']
    elif isinstance(container, int):
        container_id = container
    else:
        raise TypeError(
            "The input 'container' is neither a container dictionary nor an int, so it cannot be used"
        )

    if task_name:
        task_list = phantom.get_tasks(container_id)
        task_count = 0
        for task in task_list:
            if task_name == task['data']['name']:
                task_count += 1
                if task_count > 1:
                    raise RuntimeError(
                        f'Unable to update workbook task - multiple tasks match criteria: {task_count}'
                    )
                task_id = task['data']['id']
                task_is_note_required = task['data']['is_note_required']
                task_count += 1
                task_status = task['data']['status']
                task_notes = task['data']['notes']
                task_owner = task['data']['owner']

    if task_count == 0:
        raise RuntimeError(
            f"No task name matches input task_name: '{task_name}'")

    if task_is_note_required and (
            not note_content
            or not note_title) and status == 'complete' and task_status != 1:
        raise RuntimeError(
            'Unable to update workbook task - The task requires a closing note and a closing title'
        )
    else:
        # Add Note
        if note_content:
            success, message, note_id = phantom.add_note(
                container=container_id,
                note_type='task',
                task_id=task_id,
                title=note_title,
                content=note_content,
                note_format='markdown')
            outputs['note_id'] = str(note_id)

        # Set owner
        if owner:
            owner_dict = {}
            # If keyword 'current' entered then translate effective_user id to a username
            if owner.lower() == 'current':
                owner_dict['owner_id'] = phantom.get_effective_user()
            else:
                # Attempt to translate name to owner_id
                url = phantom.build_phantom_rest_url(
                    'ph_user') + f'?_filter_username="******"'
                data = phantom.requests.get(url,
                                            verify=False).json().get('data')
                if data and len(data) == 1:
                    owner_dict['owner_id'] = data[0]['id']
                elif data and len(data) > 1:
                    raise RuntimeError(f'Multiple matches for owner "{owner}"')
                else:
                    # Attempt to translate name to role_id
                    url = phantom.build_phantom_rest_url(
                        'role') + f'?_filter_name="{owner}"'
                    data = phantom.requests.get(
                        url, verify=False).json().get('data')
                    if data and len(data) == 1:
                        owner_dict['role_id'] = data[0]['id']
                    elif data and len(data) > 1:
                        raise RuntimeError(
                            f'Multiple matches for owner "{owner}"')
                    else:
                        raise RuntimeError(
                            f'"{owner}" is not a valid username or role')

            url = phantom.build_phantom_rest_url(
                'workbook_task') + '/{}'.format(task_id)
            response = phantom.requests.post(url,
                                             data=json.dumps(owner_dict),
                                             verify=False).json()
            if not response.get('success'):
                raise RuntimeError(f'Error setting "{owner}" - {response}')

        # Set Status
        if isinstance(status, str):
            status = status.lower()
            url = phantom.build_phantom_rest_url(
                'workbook_task') + '/{}'.format(task_id)
            if status == 'complete' and task_status == 0:
                # Move to in progress
                data = {'status': 2}
                response = phantom.requests.post(url,
                                                 data=json.dumps(data),
                                                 verify=False).json()
                if not response.get('success'):
                    raise RuntimeError(
                        f'Error setting status "{status}" - {response}')
                    # Then move to close
                data = {'status': 1}
                if task_is_note_required and note_content:
                    data['note'] = note_content
                    data['title'] = note_title
                    data['note_format'] = 'markdown'
                response = phantom.requests.post(url,
                                                 data=json.dumps(data),
                                                 verify=False).json()
                if not response.get('success'):
                    raise RuntimeError(
                        f'Error setting status "{status}" - {response}')
            elif (status == 'in progress'
                  or status == 'in_progress') and task_status != 2:
                data = {'status': 2}
                # Move to in progress
                response = phantom.requests.post(url,
                                                 data=json.dumps(data),
                                                 verify=False).json()
                if not response.get('success'):
                    raise RuntimeError(
                        f'Error setting status "{status}" - {response}')
            elif status == 'incomplete' and task_status != 0:
                data = {'status': 0}
                # Move to incomplete
                response = phantom.requests.post(url,
                                                 data=json.dumps(data),
                                                 verify=False).json()
                if not response.get('success'):
                    raise RuntimeError(
                        f'Error setting status "{status}" - {response}')
            elif status == 'complete' and task_status != 1:
                data = {'status': 1}
                # Move to complete
                if task_is_note_required and note_content:
                    data['note'] = note_content
                    data['title'] = note_title
                    data['note_format'] = 'markdown'
                response = phantom.requests.post(url,
                                                 data=json.dumps(data),
                                                 verify=False).json()
                if not response.get('success'):
                    raise RuntimeError(
                        f'Error setting status "{status}" - {response}')

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #22
0
def artifact_create(container=None,
                    name=None,
                    label=None,
                    severity=None,
                    cef_field=None,
                    cef_value=None,
                    cef_data_type=None,
                    tags=None,
                    run_automation=None,
                    input_json=None,
                    **kwargs):
    """
    Create a new artifact with the specified attributes.
    
    Args:
        container (CEF type: phantom container id): Container which the artifact will be added to.
        name: The name of the new artifact, which is optional and defaults to "artifact".
        label: The label of the new artifact, which is optional and defaults to "events"
        severity: The severity of the new artifact, which is optional and defaults to "Medium". Typically this is either "High", "Medium", or "Low".
        cef_field: The name of the CEF field to populate in the artifact, such as "destinationAddress" or "sourceDnsDomain". Required only if cef_value is provided.
        cef_value (CEF type: *): The value of the CEF field to populate in the artifact, such as the IP address, domain name, or file hash. Required only if cef_field is provided.
        cef_data_type: The CEF data type of the data in cef_value. For example, this could be "ip", "hash", or "domain". Optional.
        tags: A comma-separated list of tags to apply to the created artifact, which is optional.
        run_automation: Either "true" or "false", depending on whether or not the new artifact should trigger the execution of any playbooks that are set to active on the label of the container the artifact will be added to. Optional and defaults to "false".
        input_json: Optional parameter to modify any extra attributes of the artifact. Input_json will be merged with other inputs. In the event of a conflict, input_json will take precedence.
    
    Returns a JSON-serializable object that implements the configured data paths:
        artifact_id (CEF type: phantom artifact id): The ID of the created artifact.
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    new_artifact = {}
    json_dict = None

    if isinstance(container, int):
        container_id = container
    elif isinstance(container, dict):
        container_id = container['id']
    else:
        raise TypeError("container is neither an int nor a dictionary")

    if name:
        new_artifact['name'] = name
    else:
        new_artifact['name'] = 'artifact'
    if label:
        new_artifact['label'] = label
    else:
        new_artifact['label'] = 'events'
    if severity:
        new_artifact['severity'] = severity
    else:
        new_artifact['severity'] = 'Medium'

    # validate that if cef_field or cef_value is provided, the other is also provided
    if (cef_field and not cef_value) or (cef_value and not cef_field):
        raise ValueError("only one of cef_field and cef_value was provided")

    # cef_data should be formatted {cef_field: cef_value}
    if cef_field:
        new_artifact['cef_data'] = {cef_field: cef_value}
        if cef_data_type and isinstance(cef_data_type, str):
            new_artifact['field_mapping'] = {cef_field: [cef_data_type]}

    # run_automation must be "true" or "false" and defaults to "false"
    if run_automation:
        if not isinstance(run_automation, str):
            raise TypeError("run automation must be a string")
        if run_automation.lower() == 'true':
            new_artifact['run_automation'] = True
        elif run_automation.lower() == 'false':
            new_artifact['run_automation'] = False
        else:
            raise ValueError("run_automation must be either 'true' or 'false'")
    else:
        new_artifact['run_automation'] = False

    if input_json:
        # ensure valid input_json
        if isinstance(input_json, dict):
            json_dict = input_json
        elif isinstance(input_json, str):
            json_dict = json.loads(input_json)
        else:
            raise ValueError(
                "input_json must be either 'dict' or valid json 'string'")

    if json_dict:
        # Merge dictionaries, using the value from json_dict if there are any conflicting keys
        for json_key in json_dict:
            # extract tags from json_dict since it is not a valid parameter for phantom.add_artifact()
            if json_key == 'tags':
                tags = json_dict[json_key]
            else:
                new_artifact[json_key] = json_dict[json_key]

    # now actually create the artifact
    phantom.debug(
        'creating a new artifact with the following attributes:\n{}'.format(
            new_artifact))
    success, message, artifact_id = phantom.add_artifact(**new_artifact)

    phantom.debug(
        'add_artifact() returned the following:\nsuccess: {}\nmessage: {}\nartifact_id: {}'
        .format(success, message, artifact_id))
    if not success:
        raise RuntimeError("add_artifact() failed")

    # add the tags in a separate REST call because there is no tags parameter in add_artifact()
    if tags:
        tags = tags.replace(" ", "").split(",")
        url = phantom.build_phantom_rest_url('artifact', artifact_id)
        response = phantom.requests.post(uri=url,
                                         json={
                                             'tags': tags
                                         },
                                         verify=False).json()
        phantom.debug(
            'response from POST request to add tags:\n{}'.format(response))

    # Return the id of the created artifact
    return {'artifact_id': artifact_id}
コード例 #23
0
def collect_by_cef_type(container=None,
                        data_types=None,
                        tags=None,
                        scope=None,
                        **kwargs):
    """
    Collect all artifact values that match the desired CEF data types, such as "ip", "url", "sha1", or "all". Optionally also filter for artifacts that have the specified tags.
    
    Args:
        container (CEF type: phantom container id): Container ID or container object.
        data_types: The CEF data type to collect values for. This could be a single string or a comma separated list such as "hash,filehash,file_hash". The special value "all" can also be used to collect all field values from all artifacts.
        tags: If tags are provided, only return fields from artifacts that have all of the provided tags. This could be an individual tag or a comma separated list.
        scope: Defaults to 'new'. Define custom scope. Advanced Settings Scope is not passed to a custom function. Options are 'all' or 'new'.
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.artifact_value (CEF type: *): The value of the field with the matching CEF data type.
        *.artifact_id (CEF type: phantom artifact id): ID of the artifact that contains the value.
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    import traceback

    # validate container and get ID
    if isinstance(container, dict) and container['id']:
        container_dict = container
        container_id = container['id']
    elif isinstance(container, int):
        rest_container = phantom.requests.get(
            uri=phantom.build_phantom_rest_url('container', container),
            verify=False).json()
        if 'id' not in rest_container:
            raise ValueError('Failed to find container with id {container}')
        container_dict = rest_container
        container_id = container
    else:
        raise TypeError(
            "The input 'container' is neither a container dictionary nor an int, so it cannot be used"
        )

    # validate the data_types input
    if not data_types or not isinstance(data_types, str):
        raise ValueError(
            "The input 'data_types' must exist and must be a string")
    # if data_types has a comma, split it and treat it as a list
    elif "," in data_types:
        data_types = [item.strip() for item in data_types.split(",")]
    # else it must be a single data type
    else:
        data_types = [data_types]

    # validate scope input
    if isinstance(scope, str) and scope.lower() in ['new', 'all']:
        scope = scope.lower()
    elif not scope:
        scope = None
    else:
        raise ValueError("The input 'scope' is not one of 'new' or 'all'")

    # split tags if it contains commas or use as-is
    if not tags:
        tags = []
    # if tags has a comma, split it and treat it as a list
    elif tags and "," in tags:
        tags = [item.strip() for item in tags.split(",")]
    # if there is no comma, treat it as a single tag
    else:
        tags = [tags]

    # collect all values matching the cef type (which was previously called "contains")
    collected_field_values = phantom.collect_from_contains(
        container=container_dict,
        action_results=None,
        contains=data_types,
        scope=scope)
    phantom.debug(
        f'found the following field values: {collected_field_values}')

    # collect all the artifacts in the container to get the artifact IDs
    artifacts = phantom.requests.get(uri=phantom.build_phantom_rest_url(
        'container', container_id, 'artifacts'),
                                     params={
                                         'page_size': 0
                                     },
                                     verify=False).json()['data']

    # build the output list from artifacts with the collected field values
    outputs = []
    for artifact in artifacts:
        # if any tags are provided, make sure each provided tag is in the artifact's tags
        if tags:
            if not set(tags).issubset(set(artifact['tags'])):
                continue
        # "all" is a special value to collect every value from every artifact
        if data_types == ['all']:
            for cef_key in artifact['cef']:
                new_output = {
                    'artifact_value': artifact['cef'][cef_key],
                    'artifact_id': artifact['id']
                }
                if new_output not in outputs:
                    outputs.append(new_output)
            continue
        for cef_key in artifact['cef']:
            if artifact['cef'][cef_key] in collected_field_values:
                new_output = {
                    'artifact_value': artifact['cef'][cef_key],
                    'artifact_id': artifact['id']
                }
                if new_output not in outputs:
                    outputs.append(new_output)

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #24
0
def indicator_get_by_tag(tags_or=None,
                         tags_and=None,
                         indicator_timerange=None,
                         container=None,
                         tags_exclude=None,
                         **kwargs):
    """
    Get indicator(s) by tags.
    
    Args:
        tags_or: Comma separated list of tags. Tags will be OR'd together: e.g. tag1 OR tag2 OR tag3. Tags do not support whitespace and whitespace will be automatically removed.
        tags_and: Comma separated list of tags. Tags will be AND'd together: e.g. tag1 AND tag2 AND tag3. Tags do not support whitespace and whitespace will be automatically removed.
        indicator_timerange: Defaults to last_30_days
            options:
            today
            yesterday
            this_week
            this_month
            last_7_days
            last_30_days
            last_week
            last_month
        container: Optional parameter to ensure the fetched indicator exists in the supplied container.
        tags_exclude: Comma separated list of tags to filter out. If the indicator's tags contain any of the values in this list, they will be omitted from the output.
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.indicator_id (CEF type: *): A matching indicator id record
        *.indicator_value (CEF type: *): A matching indicator value
        *.indicator_tags (CEF type: *): List of tags associated with the indicator record
        *.indicator_cef_type: List of cef types associated with the indicator record
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom
    from datetime import datetime, timedelta

    outputs = []
    indicator_record = {}
    container_id = None
    allowed_timeranges = [
        'today', 'yesterday', 'this_week', 'this_month', 'this_year',
        'last_7_days', 'last_30_days', 'last_week', 'last_month', 'last_year'
    ]

    # Helper function to translate timeranges to relative datetime.
    # Uses filter_earliest / filter_later for anything 30 days and under as it is quicker.
    # Uses summary timeranges for items greater than 30 days.
    def translate_relative_input(relative_time):
        now = datetime.utcnow()
        relative_time = relative_time.lower()
        time_format = "%Y-%m-%dT%H:%M:%S.%fZ"
        if relative_time == 'today':
            earliest = now.replace(hour=0, minute=0, second=0, microsecond=0)
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format))
            }
        elif relative_time == 'yesterday':
            earliest = now.replace(hour=0, minute=0, second=0,
                                   microsecond=0) - timedelta(days=1)
            latest = earliest.replace(hour=23,
                                      minute=59,
                                      second=59,
                                      microsecond=0)
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format)),
                "_filter_latest_time__lt":
                '"{}"'.format(latest.strftime(time_format))
            }
        elif relative_time == 'this_week':
            earliest = now.replace(
                hour=0, minute=0, second=0,
                microsecond=0) - timedelta(days=now.weekday())
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format))
            }
        elif relative_time == 'this_month':
            earliest = now.replace(day=1,
                                   hour=0,
                                   minute=0,
                                   second=0,
                                   microsecond=0)
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format))
            }
        elif relative_time == 'last_7_days':
            earliest = now - timedelta(days=7)
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format))
            }
        elif relative_time == 'last_30_days':
            params = {}
        elif relative_time == 'last_week':
            latest = now.replace(
                hour=23, minute=59, second=59,
                microsecond=0) - timedelta(days=now.weekday() + 1)
            earliest = latest.replace(hour=0, minute=0,
                                      second=0) - timedelta(days=8)
            params = {
                "_filter_earliest_time__gt":
                '"{}"'.format(earliest.strftime(time_format)),
                "_filter_latest_time__lt":
                '"{}"'.format(latest.strftime(time_format))
            }
        else:
            params = {'timerange': relative_time}

        return params

    if indicator_timerange and isinstance(
            indicator_timerange,
            str) and indicator_timerange.lower() in allowed_timeranges:
        time_params = translate_relative_input(indicator_timerange)
    elif not indicator_timerange:
        time_params = {}
    else:
        raise ValueError(
            f"invalid indicator_timerange: '{indicator_timerange}'")

    if isinstance(container, int):
        container_id = container
    elif isinstance(container, dict):
        container_id = container['id']
    elif container:
        raise TypeError("container_input is neither a int or a dictionary")

    url = phantom.build_phantom_rest_url('indicator')
    if tags_or:
        tags_or = tags_or.replace(' ', '')
        for tag in tags_or.split(','):
            params = {
                '_filter_tags__contains': f'"{tag}"',
                "_special_contains": True,
                'page_size': 0,
                **time_params
            }
            response = phantom.requests.get(url, params=params,
                                            verify=False).json()
            if response['count'] > 0:
                for data in response['data']:
                    indicator_record[data['id']] = {
                        'indicator_value': data['value'],
                        'indicator_tags': data['tags'],
                        'indicator_cef_type': data['_special_contains']
                    }
    if tags_and:
        tags = tags_and.replace(' ', '').split(',')
        params = {
            '_filter_tags__contains': f'{json.dumps(tags)}',
            "_special_contains": True,
            'page_size': 0,
            **time_params
        }
        response = phantom.requests.get(url, params=params,
                                        verify=False).json()
        if response['count'] > 0:
            for data in response['data']:
                indicator_record[data['id']] = {
                    'indicator_value': data['value'],
                    'indicator_tags': data['tags'],
                    'indicator_cef_type': data['_special_contains']
                }

    if tags_exclude:
        tags_exclude = [item.strip() for item in tags_exclude.split(',')]

    if indicator_record:
        for i_id, i_data in indicator_record.items():
            skip_indicator = False

            # Skip indicators that contain an excluded tag
            if tags_exclude:
                for item in tags_exclude:
                    if item in i_data['indicator_tags']:
                        skip_indicator = True

            if container_id and not skip_indicator:
                url = phantom.build_phantom_rest_url(
                    'indicator_common_container')
                params = {'indicator_ids': i_id}
                response = phantom.requests.get(url,
                                                params=params,
                                                verify=False).json()
                if response:
                    for container_item in response:
                        # Only add to outputs if the supplied container_id shows in the common_container results
                        if container_item['container_id'] == container_id:
                            outputs.append({
                                'indicator_id': i_id,
                                **indicator_record[i_id]
                            })
                else:
                    phantom.debug(
                        "No indicators found for provided tags and container")

            elif not skip_indicator:

                outputs.append({
                    'indicator_id': i_id,
                    **indicator_record[i_id]
                })
    else:
        phantom.debug("No indicators found for provided tags")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs
コード例 #25
0
def mark_evidence(container=None,
                  input_object=None,
                  content_type=None,
                  **kwargs):
    """
    Mark an object as Evidence in a container
    
    Args:
        container (CEF type: phantom container id): Container ID or Container Object
        input_object (CEF type: *): The object to mark as evidence. This could be a vault_id, artifact_id, note_id, container_id, or action_run_id. If the previous playbook block is an action then "keyword_argument:results" can be used for the action_run_id with the content_type "action_run_id". Vault_id can be an ID or a vault hash.
        content_type (CEF type: *): The content type of the object to add as evidence which must be one of the following:
                        
                        vault_id
                        artifact_id
                        container_id
                        note_id
                        action_run_id
    
    Returns a JSON-serializable object that implements the configured data paths:
        *.id (CEF type: *): ID of the evidence item
    """
    ############################ Custom Code Goes Below This Line #################################
    import json
    import phantom.rules as phantom

    outputs = []
    container_id = None
    data = []
    valid_types = [
        'vault_id', 'artifact_id', 'container_id', 'note_id', 'action_run_id'
    ]

    # Ensure valid content_type:
    if content_type.lower() not in valid_types:
        raise TypeError(
            f"The content_type '{content_type}' is not a valid content_type")

    # Ensure valid container input
    if isinstance(container, dict) and container.get('id'):
        container_id = container['id']
    elif isinstance(container, int) or (isinstance(container, str)
                                        and container.isdigit()):
        container_id = container
    else:
        raise TypeError(
            "The input 'container' is neither a container dictionary nor an int, so it cannot be used"
        )

    # If content added is type 'action_run_id',
    # then iterate through an input object that is a results object,
    # and append the action_run_id's to data
    if isinstance(input_object,
                  list) and content_type.lower() == 'action_run_id':
        for action_result in input_object:
            if action_result.get('action_run_id'):
                data.append({
                    "container_id": container_id,
                    "object_id": action_result['action_run_id'],
                    "content_type": 'actionrun',
                })
        # If data is still an empty list after for loop,
        # it indicates that the input_object was not a valid results object
        if not data:
            raise TypeError(
                "The input for 'input_object' is not a valid integer or supported object."
            )

    # If 'input_object' is already an action_run_id, no need to translate it.
    elif (isinstance(input_object, int) or
          (isinstance(input_object, str) and input_object.isdigit())
          ) and content_type.lower() == 'action_run_id':
        data = [{
            "container_id": container_id,
            "object_id": input_object,
            "content_type": 'actionrun',
        }]

    # If vault_id was entered, check to see if user already entered a vault integer
    # else if user entered a hash vault_id, attempt to translate to a vault integer
    elif input_object and content_type.lower() == 'vault_id':
        if isinstance(input_object, int) or (isinstance(input_object, str)
                                             and input_object.isdigit()):
            content_type = "containerattachment"
        else:
            success, message, info = phantom.vault_info(vault_id=input_object)
            if success == False:
                raise RuntimeError(f"Invalid vault_id: {message}")
            else:
                input_object = info[0]['id']
                content_type = "containerattachment"
        data = [{
            "container_id": container_id,
            "object_id": input_object,
            "content_type": content_type,
        }]

    # If 'container_id' was entered, the content_type needs to be set to 'container'.
    # Phantom does not allow a literal input of 'container' so thus 'container_id is used.
    elif (isinstance(input_object, int) or
          (isinstance(input_object, str) and
           input_object.isdigit())) and content_type.lower() == 'container_id':
        data = [{
            "container_id": container_id,
            "object_id": input_object,
            "content_type": 'container',
        }]

    # If 'artifact_id' was entered, the content_type needs to be set to 'artifact'
    elif (isinstance(input_object, int) or
          (isinstance(input_object, str) and
           input_object.isdigit())) and content_type.lower() == 'artifact_id':
        data = [{
            "container_id": container_id,
            "object_id": input_object,
            "content_type": 'artifact',
        }]
    # If 'note_id' was entered, the content_type needs to be set to 'note'
    elif (isinstance(input_object, int) or
          (isinstance(input_object, str)
           and input_object.isdigit())) and content_type.lower() == 'note_id':
        data = [{
            "container_id": container_id,
            "object_id": input_object,
            "content_type": 'note',
        }]
    else:
        raise TypeError(
            f"The input_object is not a valid integer or supported object. Type '{type(input_object)}'"
        )

    # Build url for evidence endpoint
    url = phantom.build_phantom_rest_url('evidence')

    # Post data to evidence endpoint
    for item in data:
        response = phantom.requests.post(uri=url, json=item,
                                         verify=False).json()

        # If successful add evidence id to outputs
        # elif evidence already exists print to debug
        # else error out
        if response.get('success'):
            outputs.append({'id': response['id']})
        elif response.get('failed') and response.get(
                'message') == 'Already added to Evidence.':
            phantom.debug(
                f"{content_type} \'{container_id}\' {response['message']}")
        else:
            raise RuntimeError(f"Unable to add evidence: {response}")

    # Return a JSON-serializable object
    assert json.dumps(
        outputs
    )  # Will raise an exception if the :outputs: object is not JSON-serializable
    return outputs