def call_response_parser(parser, response, request=None):
    """ Calls a specified parser on a response

    @param parser: The parser function to calls
    @type  parser: Func
    @param response: The response to parse
    @type  response: HttpResponse
    @param request: The request whose parser is being called
    @type  request: Request (None ok)

    @return False if there was a parser exception
    @rtype  Boolean

    """
    from utils.logger import write_to_main
    # parse response and set dependent variables (for garbage collector)
    try:
        if parser:
            parser(response.json_body)
            # Check request's producers to verify dynamic objects were set
            if request:
                for producer in request.produces:
                    if dependencies.get_variable(producer) == 'None':
                        err_str = f'Failed to parse {producer}; is now set to None.'
                        write_to_main(err_str)
                        _RAW_LOGGING(err_str)
    except (ResponseParsingException, AttributeError) as error:
        _RAW_LOGGING(str(error))
        return False
    return True
    def _namespace_rule(self):
        """ Try to hijack objects of @param target_types and use them via
        a secondary attacker user.

        @param target_types: The types of the target object to attemp hijack.
        @type  target_types: Set

        @return: None
        @rtype : None

        """
        # For the target types (target dynamic objects), get the latest
        # values which we know will exist due to the previous rendering.
        # We will later on use these old values atop a new rendering.
        hijacked_values = {}
        consumed_types = self._sequence.consumes
        consumed_types = set(itertools.chain(*consumed_types))

        # Exit the checker and do not re-render if nothing is consumed since
        # the checker will have nothing to work on anyways.
        if not consumed_types:
            return

        # Render only last request if not in exhaustive (expensive) mode.
        # If that last request does not consume anything, stop here.
        if self._mode != 'exhaustive' and not self._sequence.last_request.consumes:
            return

        self._render_original_sequence_start(self._sequence)

        for type in consumed_types:
           hijacked_values[type] = dependencies.get_variable(type)

        self._checker_log.checker_print(f"Hijacked values: {hijacked_values}")
        RAW_LOGGING(f"Hijacked values: {hijacked_values}")


        for i, req in enumerate(self._sequence):
            # Render only last request if not in exhaustive (expensive) mode.
            if self._mode != 'exhaustive' and i != self._sequence.length - 1:
                continue
            # Skip requests that are not consumers.
            if not req.consumes:
                continue
            dependencies.reset_tlb()
            self._render_attacker_subsequence(req)

            # Feed hijacked values.
            for type in hijacked_values:
                dependencies.set_variable(type, hijacked_values[type])
            self._render_hijack_request(req)
    def resolve_dependencies(self, data):
        """ Renders dependent variables.

        @param data: The rendered payload with dependency placeholders.
        @type data: String

        @return: The rendered payload with dependency placeholders substituted
                    by the respective values parsed from the appropriate
                    responses.
        @rtype : String

        """
        data = str(data).split(dependencies.RDELIM)
        for i in range(1, len(data), 2):
            var_name = data[i]
            data[i] = dependencies.get_variable(var_name)
            if data[i] == 'None':
                RAW_LOGGING(f'Dynamic object {var_name} is set to None!')
        return "".join(data)
Exemple #4
0
def apply_create_once_resources(fuzzing_requests):
    """ Attempts to create all of the resources in the 'create_once' endpoints.

    @param fuzzing_requests: The collection of requests to be fuzzed
    @type  fuzzing_requests: FuzzingRequestCollection

    @return: A list of destructors to use to cleanup the create_once resources
    @rtype : list(Request)

    """
    def exclude_requests(pre_reqs, post_reqs):
        # Exclude any requests that produce or destroy the create_once endpoint
        for req_i in pre_reqs:
            fuzzing_requests.exclude_preprocessing_request(req_i)
        for req_i in post_reqs:
            fuzzing_requests.exclude_postprocessing_request(req_i)

    create_once_endpoints = Settings().create_once_endpoints

    if not create_once_endpoints:
        return

    logger.create_network_log(logger.LOG_TYPE_PREPROCESSING)
    destructors = set()
    exclude_reqs = set()
    request_count = 0

    logger.write_to_main("Rendering for create-once resources:\n")
    # Iterate through each 'create_once' endpoint
    for endpoint in create_once_endpoints:
        # Verify that the endpoint exists in the request collection
        if endpoint in GrammarRequestCollection().request_id_collection:
            # The create_once resource generator
            resource_gen_req = None
            # Iterate through each of the requests that contain the create_once endpoint
            for req in GrammarRequestCollection(
            ).request_id_collection[endpoint]:
                if req not in fuzzing_requests:
                    logger.write_to_main(
                        "Warning: Create-once endpoint is not a request in the fuzzing list\n",
                        True)
                    break
                if not resource_gen_req and req.is_resource_generator():
                    resource_gen_req = req
                    # Compute the sequence necessary to create the create_once resource
                    req_list = driver.compute_request_goal_seq(
                        resource_gen_req, fuzzing_requests)
                    logger.write_to_main(
                        f"{formatting.timestamp()}: Endpoint - {resource_gen_req.endpoint_no_dynamic_objects}"
                    )
                    logger.write_to_main(
                        f"{formatting.timestamp()}: Hex Def - {resource_gen_req.method_endpoint_hex_definition}"
                    )
                    create_once_seq = sequences.Sequence(req_list)
                    renderings = create_once_seq.render(
                        GrammarRequestCollection().candidate_values_pool,
                        None,
                        preprocessing=True)

                    # Make sure we were able to successfully create the create_once resource
                    if not renderings.valid:
                        logger.write_to_main(
                            f"{formatting.timestamp()}: Rendering INVALID")
                        exclude_requests(exclude_reqs, destructors)
                        raise FailedToCreateResource(destructors)

                    logger.write_to_main(
                        f"{formatting.timestamp()}: Rendering VALID")
                    logger.format_rendering_stats_definition(
                        resource_gen_req,
                        GrammarRequestCollection().candidate_values_pool)
                    if Settings().in_smoke_test_mode():
                        resource_gen_req.stats.request_order = 'Preprocessing'
                        resource_gen_req.stats.valid = 1
                        resource_gen_req.stats.status_code = renderings.final_request_response.status_code
                        resource_gen_req.stats.status_text = renderings.final_request_response.status_text
                        resource_gen_req.stats.sample_request.set_request_stats(
                            renderings.sequence.sent_request_data_list[-1].
                            rendered_data)
                        resource_gen_req.stats.sample_request.set_response_stats(
                            renderings.final_request_response,
                            renderings.final_response_datetime)

                if req.is_destructor():
                    # Add destructors to the destructor list that will be returned
                    destructors.add(req)

            # Only continue processing if a resource generator was actually found for this endpoint
            if not resource_gen_req:
                continue
            request_count += len(req_list)
            # Get the set of all dynamic object names in the endpoint
            var_names = resource_gen_req.consumes.union(
                resource_gen_req.produces)
            # This dictionary will map dynamic object names to the values created during
            # this preprocessing create-once step.
            dynamic_object_values = {}
            for name in var_names:
                dynamic_object_values[name] = dependencies.get_variable(name)

            # Iterate through the entire request collection, searching for requests that include
            # the create_once resource. We want to "lock" the resources in these requests with
            # the dynamic object values that were created during this preprocessing step.
            for req_i in fuzzing_requests:
                # Set the variables in any requests whose consumers were produced
                # by the create_once resource generator
                if resource_gen_req.produces & req_i.consumes:
                    req_i.set_id_values_for_create_once_dynamic_objects(
                        dynamic_object_values, renderings)
                # Exclude any requests that produce the create_once object(s)
                if resource_gen_req.produces & req_i.produces:
                    exclude_reqs.add(req_i)
        else:
            exclude_requests(exclude_reqs, destructors)
            raise InvalidCreateOnce(destructors)

    exclude_requests(exclude_reqs, destructors)

    # Reset all of the dynamic object values that were just created
    dependencies.reset_tlb()
    # Reset the garbage collector, so it doesn't delete any of the resources that were just created
    dependencies.set_saved_dynamic_objects()

    logger.print_request_rendering_stats(
        GrammarRequestCollection().candidate_values_pool, fuzzing_requests,
        Monitor(), request_count, logger.PREPROCESSING_GENERATION, None)

    # Return the list of destructors that were removed from the request collection.
    # These will be used to cleanup the create_once resources created during preprocessing.
    return list(destructors)
    def apply(self, rendered_sequence, lock):
        """ Applies check for resource hierarchy rule violations.

        @param rendered_sequence: Object containing the rendered sequence information
        @type  rendered_sequence: RenderedSequence
        @param lock: Lock object used to sync more than one fuzzing job
        @type  lock: thread.Lock

        @return: None
        @rtype : None

        """
        if not rendered_sequence.valid:
            return
        self._sequence = rendered_sequence.sequence

        # We skip any sequence that contains DELETE methods so that we
        # keep in isolation this checker and the use-after-free checker.
        if self._sequence.has_destructor():
            return

        consumes = self._sequence.consumes
        predecessors_types = consumes[:-1]
        # Last request is the victim -- our target!
        target_types = consumes[-1]
        # In the dictionary of "consumes" constraints, each request of the
        # sequence instance has its own dictionary of the dynamic variable
        # types produced by each request. We need to flatten this structure.
        predecessors_types = set(itertools.chain(*predecessors_types))

        # Skip sequence if there are no predecessor dependencies or no
        # target objects to swap.
        if not predecessors_types.intersection(target_types)\
                or not target_types - predecessors_types:
            return

        # For the victim types (target dynamic objects), get the lattest
        # values which we know will exist due to the previous rendering.
        # We will later on use these old values atop a new rendering.
        old_values = {}
        for target_type in target_types - predecessors_types:
            old_values[target_type] = dependencies.get_variable(target_type)

        # Reset tlb of all values and re-render all predecessor up to
        # the parent's parent. This will propagate new values for all
        # dynamic objects except for those with target type. That's what we
        # want and that's why we render up to the parent's parent (i.e.,
        # up to length(seq) - 2.
        dependencies.reset_tlb()

        # Render sequence up to before the first predecessor that produces
        # the target type. that is, if any of the types produced by the
        # request is in the target types, then do not render this
        # predecessor and stop here.
        n_predecessors = 0
        for req in self._sequence:
            if req.produces.intersection(target_types - predecessors_types):
                break
            n_predecessors += 1
        new_seq = self._render_n_predecessor_requests(n_predecessors)

        # log some helpful info
        self._checker_log.checker_print("\nTarget types: {}".\
                            format(target_types - predecessors_types))
        self._checker_log.checker_print(
            f"Predecesor types: {predecessors_types}")
        self._checker_log.checker_print("Clean tlb: {}".\
                            format(dependencies.tlb))

        # Before rendering the last request, substitute all target types
        # (target dynamic object) with a value that does NOT belong to
        # the current rendering and should not (?) be accessible through
        # the new predecessors' rendering.
        for target_type in old_values:
            dependencies.set_variable(target_type, old_values[target_type])

        self._checker_log.checker_print("Poluted tlb: {}".\
                            format(dependencies.tlb))
        self._render_last_request(new_seq)