def _render_hijack_request(self, req):
        """ Render the last request of the sequence and inspect the status
        code of the response. If it's any of 20x, we have probably hit a bug.

        @param req: The hijack request.
        @type  req: Request Class object.

        @return: None
        @rtype : None

        """
        self._checker_log.checker_print("Hijack request rendering")
        RAW_LOGGING("Hijack request rendering")
        rendered_data, parser = req.render_current(
            self._req_collection.candidate_values_pool
        )
        rendered_data = self._sequence.resolve_dependencies(rendered_data)
        rendered_data = self._change_user_identity(rendered_data)

        response = self._send_request(parser, rendered_data)
        request_utilities.call_response_parser(parser, response)
        if response and self._rule_violation(self._sequence, response):
            self._print_suspect_sequence(self._sequence, response)
            BugBuckets.Instance().update_bug_buckets(
                self._sequence, response.status_code, origin=self.__class__.__name__, reproduce=False
            )
Exemple #2
0
    def _execute_start_of_sequence(self):
        """ Send all requests in the sequence up until the last request

        @return: None
        @rtype : None

        """
        # Copied from InvalidDynamicObjectChecker
        RAW_LOGGING("Re-rendering and sending start of sequence")
        new_seq = sequences.Sequence([])
        for request in self._sequence.requests[:-1]:
            new_seq = new_seq + sequences.Sequence(request)
            initial_response, response_to_parse = self._render_and_send_data(
                new_seq, request)

            # Check to make sure a bug wasn't uncovered while executing the
            # sequence
            if initial_response:
                if initial_response.has_bug_code():
                    self._print_suspect_sequence(new_seq, initial_response)
                    BugBuckets.Instance().update_bug_buckets(
                        new_seq,
                        initial_response.status_code,
                        origin=self.__class__.__name__)

                if self._acc_response:
                    hints = self._map_response_to_current_body_schema(
                        response_to_parse)
                    for tag in hints:
                        self._response_values[tag] = hints[tag]
        return new_seq
Exemple #3
0
    def _render_consumer_request(self, seq):
        """ Render the last request of the sequence and inspect the status
        code of the response. If it's not 40x, we have probably hit a bug.

        @param seq: The sequence whose last request we will try to render.
        @type  seq: Sequence Class object.

        @return: None
        @rtype : None

        """
        request = seq.last_request
        response, _ = self._render_and_send_data(seq, request)
        if response and self._rule_violation(seq, response):
            self._print_suspect_sequence(seq, response)
            BugBuckets.Instance().update_bug_buckets(seq, response.status_code, origin=self.__class__.__name__)
    def _render_last_request(self, new_seq):
        """ Render the last request of the sequence and inspect the status
        code of the response. If it's any of 20x, we have probably hit a bug.

        @param new_seq: The new sequence that was rendered with this checker
        @type  new_seq: Sequence

        @return: None
        @rtype : None

        """
        new_seq = new_seq + sequences.Sequence(self._sequence.last_request)
        response, _ = self._render_and_send_data(new_seq,
                                                 self._sequence.last_request)
        if response and self._rule_violation(new_seq, response):
            self._print_suspect_sequence(new_seq, response)
            BugBuckets.Instance().update_bug_buckets(
                new_seq, response.status_code, origin=self.__class__.__name__)
Exemple #5
0
    def _render_last_request(self, seq):
        """ Render the last request of the sequence and inspect the status
        code of the response. If it's any of 20x, we have probably hit a bug.

        @param seq: The sequence whose last request we will try to render.
        @type  seq: Sequence Class object.

        @return: None
        @rtype : None

        """
        request = seq.last_request
        for rendered_data, parser in\
            request.render_iter(self._req_collection.candidate_values_pool,
                                skip=request._current_combination_id):
            # Hold the lock (because other workers may be rendering the same
            # request) and check whether the current rendering is known from the
            # past to lead to invalid status codes. If so, skip the current
            # rendering.
            if self._lock is not None:
                self._lock.acquire()
            should_skip = Monitor().is_invalid_rendering(request)
            if self._lock is not None:
                self._lock.release()

            # Skip the loop and don't forget to increase the counter.
            if should_skip:
                RAW_LOGGING("Skipping rendering: {}".\
                            format(request._current_combination_id))
                request._current_combination_id += 1
                continue

            rendered_data = seq.resolve_dependencies(rendered_data)

            response = self._send_request(parser, rendered_data)
            request_utilities.call_response_parser(parser, response)
            # Append the rendered data to the sent list as we will not be rendering
            # with the sequence's render function
            seq.append_data_to_sent_list(rendered_data, parser, response)
            if response and self._rule_violation(seq, response):
                self._print_suspect_sequence(seq, response)
                BugBuckets.Instance().update_bug_buckets(
                    seq, response.status_code, origin=self.__class__.__name__)
        def _send_request(request_to_send):
            self._log(
                "Sending example request: \n"
                f"{request_to_send.definition}",
                print_to_network_log=False)
            seq = self._sequence + Sequence(request_to_send)
            response, _ = self._render_and_send_data(seq, request_to_send)

            code = response.status_code
            self._log(f"Status Code: {code}", print_to_network_log=False)
            if code not in status_codes:
                status_codes[code] = 0
            status_codes[code] += 1

            # Check to make sure a bug wasn't uncovered while executing the sequence
            if response and response.has_bug_code():
                self._print_suspect_sequence(seq, response)
                BugBuckets.Instance().update_bug_buckets(
                    seq,
                    code,
                    origin=self.__class__.__name__,
                    hash_full_request=True)
    def _render_n_predecessor_requests(self, n_predecessors):
        """ Render up to the parent's parent predecessor request.

        @param n_predecessors: The number of predecessors to render.
        @type  n_predecessors: Int

        @return: Sequence of n predecessor requests sent to server
        @rtype : Sequence

        """
        new_seq = sequences.Sequence([])
        for i in range(n_predecessors):
            request = self._sequence.requests[i]
            new_seq = new_seq + sequences.Sequence(request)
            response, _ = self._render_and_send_data(new_seq, request)
            # Check to make sure a bug wasn't uncovered while executing the sequence
            if response and response.has_bug_code():
                self._print_suspect_sequence(new_seq, response)
                BugBuckets.Instance().update_bug_buckets(
                    new_seq,
                    response.status_code,
                    origin=self.__class__.__name__)

        return new_seq
Exemple #8
0
def print_generation_stats(req_collection,
                           fuzzing_monitor,
                           global_lock,
                           final=False):
    """ Prints global generation's statistics.

    @param req_collection: The requests collection.
    @type  req_collection: RequestCollection class object.
    @param fuzzing_monitor: The global fuzzing monitor
    @type  fuzzing_monitor: FuzzingMonitor
    @param global_lock: Lock object used for sync of more than one fuzzing jobs.
    @type  global_lock: thread.Lock object
    @param final: If set to True, this is the end of the run generation stats
    @type  final: Bool

    @return: None
    @rtype : None

    """
    from engine.bug_bucketing import BugBuckets
    from engine.transport_layer.response import VALID_CODES
    from engine.transport_layer.response import RESTLER_INVALID_CODE
    timestamp = formatting.timestamp()

    successful_requests = []
    num_fully_valid = 0
    num_sequence_failures = 0
    for r in req_collection:
        query_result = fuzzing_monitor.query_status_codes_monitor(
            r, VALID_CODES, [RESTLER_INVALID_CODE], global_lock)
        successful_requests.append(query_result.valid_code)
        if (query_result.fully_valid):
            num_fully_valid += 1
        if (query_result.sequence_failure):
            num_sequence_failures += 1

    sum_successful_requests = sum(successful_requests)
    num_rendered_requests = fuzzing_monitor.num_fully_rendered_requests(
        req_collection, global_lock)

    final_spec_coverage = f"{num_fully_valid} / {req_collection.size}"
    rendered_requests = f"{num_rendered_requests} / {req_collection.size}"
    rendered_requests_valid_status = f"{sum_successful_requests} / {num_rendered_requests}"
    num_invalid_by_failed_resource_creations = sum_successful_requests - num_fully_valid
    total_object_creations = dependencies.object_creations
    total_requests_sent = fuzzing_monitor.num_requests_sent()
    bug_buckets = BugBuckets.Instance().num_bug_buckets()

    write_to_main(
        f"{timestamp}: Final Swagger spec coverage: {final_spec_coverage}\n"
        f"{timestamp}: Rendered requests: {rendered_requests}\n"
        f"{timestamp}: Rendered requests with \"valid\" status codes: {rendered_requests_valid_status}\n"
        f"{timestamp}: Num fully valid requests (no resource creation failures): {num_fully_valid}\n"
        f"{timestamp}: Num requests not rendered due to invalid sequence re-renders: {num_sequence_failures}\n"
        f"{timestamp}: Num invalid requests caused by failed resource creations: {num_invalid_by_failed_resource_creations}\n"
        f"{timestamp}: Total Creations of Dyn Objects: {total_object_creations}\n"
        f"{timestamp}: Total Requests Sent: {total_requests_sent}\n"
        f"{timestamp}: Bug Buckets: {BugBuckets.Instance().num_bug_buckets()}")

    if final:
        testing_summary = OrderedDict()
        testing_summary['final_spec_coverage'] = final_spec_coverage
        testing_summary['rendered_requests'] = rendered_requests
        testing_summary[
            'rendered_requests_valid_status'] = rendered_requests_valid_status
        testing_summary['num_fully_valid'] = num_fully_valid
        testing_summary['num_sequence_failures'] = num_sequence_failures
        testing_summary[
            'num_invalid_by_failed_resource_creations'] = num_invalid_by_failed_resource_creations
        testing_summary['total_object_creations'] = total_object_creations
        testing_summary['total_requests_sent'] = total_requests_sent
        testing_summary['bug_buckets'] = bug_buckets

        with open(os.path.join(LOGS_DIR, "testing_summary.json"),
                  "w+") as summary_json:
            json.dump(testing_summary, summary_json, indent=4)
Exemple #9
0
    def _exec_request_with_new_body(self,
                                    request,
                                    body_blocks,
                                    tracker,
                                    valid_is_violation=False):
        """ Render and send the new request and analyze the response

        @param request: Seed request
        @type  request: Request
        @param body_blocks: Definition (request blocks) of the new body
        @type  body_blocks: List
        @param tracker: Response tracker for this run
        @type  tracker: ResponseTracker
        @param valid_is_violation: If valid response is violation
        @type  valid_is_violation: Bool

        @return: None
        @rtype:  None

        """
        # substitute to the original request
        new_request = substitute_body(request, body_blocks)

        seq = copy(self._sequence)
        cnt = 0

        # iterate through different value combinations
        for rendered_data, parser in new_request.render_iter(
                self._req_collection.candidate_values_pool):
            # check time budget
            if Monitor().remaining_time_budget <= 0:
                raise TimeOutException('Exceed Timeout')

            # stop fuzzing when reaching the bound
            if cnt > int(Settings().max_combinations):
                break
            cnt += 1

            # stop fuzzing when reaching the global bound
            if self._global_bound > 0 and self._global_count > self._global_bound:
                break
            self._global_count += 1

            # refresh the sequence to make sure the resource is not garbage collected
            if self._refresh_req:
                seq = self._refresh(request)

            # render the data
            rendered_data = seq.resolve_dependencies(rendered_data)

            # substitute if there is UUID suffix
            original_rendered_data = rendered_data
            uuid4_suffix_dict = self._get_custom_payload_uuid4_suffix()
            for uuid4_suffix in uuid4_suffix_dict:
                suffix = uuid4_suffix_dict[uuid4_suffix]
                len_suffix = len(suffix)
                # need the query to partition path and body
                try:
                    partition = rendered_data.index('?')
                    if suffix in rendered_data[:partition]:
                        new_val_start = rendered_data[:partition].index(suffix)
                        if new_val_start + len_suffix + 10 > partition:
                            self._log('unexpected uuid')
                            continue
                        new_val = rendered_data[new_val_start:new_val_start +
                                                len_suffix + 10]

                        # find all occurence in the body
                        suffix_in_body = [
                            m.start()
                            for m in re.finditer(suffix, rendered_data)
                        ][1:]
                        for si in suffix_in_body:
                            old_val = rendered_data[si:si + len_suffix + 10]
                            rendered_data = rendered_data.replace(
                                old_val, new_val)
                except Exception:
                    rendered_data = original_rendered_data

            # send out the request
            response = self._send_request(parser, rendered_data)
            request_utilities.call_response_parser(parser, response)
            self._set_refresh_req(request, response)

            if not response or not response.status_code:
                self._log('ERROR: no response received')
                continue

            # analyze response -- coverage
            tracker.process_response(response)

            if self._acc_response:
                hints = self._map_response_to_current_body_schema(response)
                for tag in hints:
                    self._response_values[tag] = hints[tag]

            # analyze response -- error
            if self._rule_violation(seq, response, valid_is_violation):
                # Append the new request to the sequence before filing the bug
                seq.replace_last_sent_request_data(rendered_data, parser,
                                                   response)
                err_seq = sequences.Sequence(seq.requests[:-1] + [new_request])
                err_seq.set_sent_requests_for_replay(
                    seq.sent_request_data_list)
                self._print_suspect_sequence(err_seq, response)

                bug_info = self._buckets.add_bug(request, rendered_data)
                if bug_info is not None:
                    error_str = bug_info[0]
                    new_body = bug_info[1]
                    log_str = f'{error_str}\n{new_body}'
                    BugBuckets.Instance().update_bug_buckets(
                        err_seq,
                        response.status_code,
                        origin=self.__class__.__name__,
                        checker_str=error_str,
                        additional_log_str=log_str)
                self._refresh_req = True
Exemple #10
0
    def render(self,
               candidate_values_pool,
               lock,
               preprocessing=False,
               postprocessing=False):
        """ Core routine that performs the rendering of restler sequences. In
        principal all requests of a sequence are being constantly rendered with
        a specific values combination @param request._current_combination_id
        which we know in the past led to a valid rendering and only the last
        request of the sequence is being rendered iteratively with all feasible
        value combinations. Each time a "valid rendering" is found for the last
        request of the sequence (where "valid rendering" is defined according
        to "VALID_CODES"), the routine returns a new sequence which has an
        end-to-end (i.e., all requests) "valid rendering" and can be added in
        the sequences collection in order to be used in the future as a building
        block for longer sequences.


        @param candidate_values_pool: The pool of values for primitive types.
        @type candidate_values_pool: Dict
        @param lock: Lock object used for sync of more than one fuzzing jobs.
        @type  lock: thread.Lock object
        @param preprocessing: Set to true if rendering during preprocessing
        @type  preprocessing: Bool

        @return: A RenderedSequence object containing the sequence, the final
                 request's response, whether or not the final request received
                 a valid status code, and a FailureInformation enum if there was
                 a failure or bug detected during rendering.
        @rtype : RenderedSequence
        """
        # Try rendering  all primitive type value combinations for last request
        request = self.last_request

        # for clarity reasons, don't log requests whose render iterator is over
        if request._current_combination_id <\
                request.num_combinations(candidate_values_pool):
            CUSTOM_LOGGING(self, candidate_values_pool)

        self._sent_request_data_list = []
        for rendered_data, parser in\
                request.render_iter(candidate_values_pool,
                                    skip=request._current_combination_id,
                                    preprocessing=preprocessing):
            # Hold the lock (because other workers may be rendering the same
            # request) and check whether the current rendering is known from the
            # past to lead to invalid status codes. If so, skip the current
            # rendering.
            if lock is not None:
                lock.acquire()
            should_skip = Monitor().is_invalid_rendering(request)
            if lock is not None:
                lock.release()

            # Skip the loop and don't forget to increase the counter.
            if should_skip:
                RAW_LOGGING("Skipping rendering: {}".\
                            format(request._current_combination_id))
                request._current_combination_id += 1
                continue

            # Clean up internal state
            self.status_codes = []
            dependencies.reset_tlb()

            sequence_failed = False
            # Step A: Static template rendering
            # Render last known valid combination of primitive type values
            # for every request until the last
            for i in range(len(self.requests) - 1):
                prev_request = self.requests[i]
                prev_rendered_data, prev_parser =\
                    prev_request.render_current(candidate_values_pool,
                    preprocessing=preprocessing)

                # substitute reference placeholders with resolved values
                if not Settings().ignore_dependencies:
                    prev_rendered_data =\
                        self.resolve_dependencies(prev_rendered_data)

                prev_req_async_wait = Settings(
                ).get_max_async_resource_creation_time(prev_request.request_id)
                prev_producer_timing_delay = Settings(
                ).get_producer_timing_delay(prev_request.request_id)

                prev_response = request_utilities.send_request_data(
                    prev_rendered_data)
                prev_response_to_parse, resource_error, async_waited = async_request_utilities.try_async_poll(
                    prev_rendered_data, prev_response, prev_req_async_wait)
                prev_parser_threw_exception = False
                # Response may not exist if there was an error sending the request or a timeout
                if prev_parser and prev_response_to_parse:
                    prev_parser_threw_exception = not request_utilities.call_response_parser(
                        prev_parser, prev_response_to_parse, prev_request)
                prev_status_code = prev_response.status_code

                # If the async logic waited for the resource, this wait already included the required
                # producer timing delay. Here, set the producer timing delay to zero, so this wait is
                # skipped both below for this request and during replay
                if async_waited:
                    prev_producer_timing_delay = 0
                else:
                    prev_req_async_wait = 0

                self.append_data_to_sent_list(prev_rendered_data, prev_parser,
                                              prev_response,
                                              prev_producer_timing_delay,
                                              prev_req_async_wait)

                if not prev_status_code:
                    logger.write_to_main(
                        f"Error: Failed to get status code during valid sequence re-rendering.\n"
                    )
                    sequence_failed = True
                    break

                if prev_response.has_bug_code():
                    BugBuckets.Instance().update_bug_buckets(self,
                                                             prev_status_code,
                                                             reproduce=False,
                                                             lock=lock)
                    sequence_failed = True
                    break

                if prev_parser_threw_exception:
                    logger.write_to_main(
                        "Error: Parser exception occurred during valid sequence re-rendering.\n"
                    )
                    sequence_failed = True
                    break

                if resource_error:
                    logger.write_to_main(
                        "Error: The resource was left in a Failed state after creation during valid sequence re-rendering.\n"
                    )
                    sequence_failed = True
                    break

                # If the previous request is a resource generator and we did not perform an async resource
                # creation wait, then wait for the specified duration in order for the backend to have a
                # chance to create the resource.
                if prev_producer_timing_delay > 0 and prev_request.is_resource_generator(
                ):
                    print(
                        f"Pausing for {prev_producer_timing_delay} seconds, request is a generator..."
                    )
                    time.sleep(prev_producer_timing_delay)

                # register latest client/server interaction
                timestamp_micro = int(time.time() * 10**6)
                self.status_codes.append(
                    status_codes_monitor.RequestExecutionStatus(
                        timestamp_micro,
                        prev_request.hex_definition, prev_status_code,
                        prev_response.has_valid_code(), False))

            if sequence_failed:
                self.status_codes.append(
                    status_codes_monitor.RequestExecutionStatus(
                        int(time.time() * 10**6), request.hex_definition,
                        RESTLER_INVALID_CODE, False, True))
                Monitor().update_status_codes_monitor(self, self.status_codes,
                                                      lock)
                return RenderedSequence(
                    failure_info=FailureInformation.SEQUENCE)

            # Step B: Dynamic template rendering
            # substitute reference placeholders with ressoved values
            # for the last request
            if not Settings().ignore_dependencies:
                rendered_data = self.resolve_dependencies(rendered_data)

            # Render candidate value combinations seeking for valid error codes
            request._current_combination_id += 1

            req_async_wait = Settings().get_max_async_resource_creation_time(
                request.request_id)

            response = request_utilities.send_request_data(rendered_data)
            response_to_parse, resource_error, _ = async_request_utilities.try_async_poll(
                rendered_data, response, req_async_wait)
            parser_exception_occurred = False
            # Response may not exist if there was an error sending the request or a timeout
            if parser and response_to_parse:
                parser_exception_occurred = not request_utilities.call_response_parser(
                    parser, response_to_parse, request)
            status_code = response.status_code
            if not status_code:
                return RenderedSequence(None)

            self.append_data_to_sent_list(rendered_data,
                                          parser,
                                          response,
                                          max_async_wait_time=req_async_wait)

            rendering_is_valid = not parser_exception_occurred\
                and not resource_error\
                and response.has_valid_code()
            # register latest client/server interaction and add to the status codes list
            response_datetime = datetime.datetime.now(datetime.timezone.utc)
            timestamp_micro = int(response_datetime.timestamp() * 10**6)

            self.status_codes.append(
                status_codes_monitor.RequestExecutionStatus(
                    timestamp_micro, request.hex_definition, status_code,
                    rendering_is_valid, False))

            # add sequence's error codes to bug buckets.
            if response.has_bug_code():
                BugBuckets.Instance().update_bug_buckets(self,
                                                         status_code,
                                                         lock=lock)

            Monitor().update_status_codes_monitor(self, self.status_codes,
                                                  lock)

            # Register current rendering's status.
            if lock is not None:
                lock.acquire()
            Monitor().update_renderings_monitor(request, rendering_is_valid)
            if lock is not None:
                lock.release()

            if Monitor().remaining_time_budget <= 0 and not postprocessing:
                raise TimeOutException("Exceeded Timeout")

            if lock is not None:
                lock.acquire()
            # Deep  copying here will try copying anything the class has access
            # to including the shared client monitor, which we update in the
            # above code block holding the lock, but then we release the
            # lock and one thread can be updating while another is copying.
            # This is a typlical nasty read after write syncronization bug.
            duplicate = copy.deepcopy(self)
            if lock is not None:
                lock.release()

            datetime_format = "%Y-%m-%d %H:%M:%S"
            # return a rendered clone if response indicates a valid status code
            if rendering_is_valid or Settings().ignore_feedback:
                return RenderedSequence(
                    duplicate,
                    valid=True,
                    final_request_response=response,
                    response_datetime=response_datetime.strftime(
                        datetime_format))
            else:
                information = None
                if response.has_valid_code():
                    if parser_exception_occurred:
                        information = FailureInformation.PARSER
                    elif resource_error:
                        information = FailureInformation.RESOURCE_CREATION
                elif response.has_bug_code():
                    information = FailureInformation.BUG
                return RenderedSequence(
                    duplicate,
                    valid=False,
                    failure_info=information,
                    final_request_response=response,
                    response_datetime=response_datetime.strftime(
                        datetime_format))

        return RenderedSequence(None)