Example #1
0
def print_memory_consumption(req_collection, fuzzing_monitor, fuzzing_mode,
                             generation):
    """ Prints global generation's memory consumption statistics.

    @param req_collection: The requests collection.
    @type  req_collection: RequestCollection class object.
    @param fuzzing_monitor: The global fuzzing monitor
    @type  fuzzing_monitor: FuzzingMonitor
    @param fuzzing_mode: The current fuzzing mode
    @type  fuzzing_mode: Str
    @param generation: The current sequence generation
    @type  generation: Int

    @return: None
    @rtype : None

    """
    from engine.bug_bucketing import BugBuckets
    timestamp = formatting.timestamp()
    print_memory_consumption.invocations += 1

    lcov = 0

    avg_val = statistics.mean([r.num_combinations(req_collection.candidate_values_pool)\
                   for r in req_collection])
    write_to_main(
        f"MARKER, {print_memory_consumption.invocations}, "
        f"{os.path.basename(req_collection._grammar_name)[:-3]}, "
        f"{req_collection.size}, {avg_val}, {fuzzing_mode}, "
        f"{timestamp}, {lcov}, {generation}, "
        f"{fuzzing_monitor.num_requests_sent()['main_driver']}"
        f"{timestamp}: Total Creations of Dyn Objects: {dependencies.object_creations}\n"
        f"{timestamp}: Total Accesses of Dyn Objects: {dependencies.object_accesses}\n"
        f"{timestamp}: Total Requests Sent: {fuzzing_monitor.num_requests_sent()}\n"
        f"{timestamp}: Bug Buckets: {BugBuckets.Instance().num_bug_buckets()}")
Example #2
0
def print_req_collection_stats(req_collection, candidate_values_pool):
    """  Prints request collection evolution stats.

    @param req_collection: A collection of requests.
    @type  req_collection: FuzzingRequestCollection class object.
    @param candidate_values_pool: The shared global pool of candidate values
    @type  candidate_values_pool: CandidateValuesPool

    @return: None
    @rtype : None

    """
    timestamp = formatting.timestamp()
    data = f"{timestamp}: Going to fuzz a set with {req_collection.size} requests\n"

    for i, r in enumerate(req_collection):
        val = r.num_combinations(candidate_values_pool)
        data += f"{timestamp}: Request-{i}: Value Combinations: {val}\n"

    val = statistics.mean([r.num_combinations(candidate_values_pool)\
                   for r in req_collection])
    data += f"{timestamp}: Avg. Value Combinations per Request: {val}\n"

    val = statistics.median([r.num_combinations(candidate_values_pool)\
                     for r in req_collection])
    data += f"{timestamp}: Median Value Combinations per Request: {val}\n"

    val = min([r.num_combinations(candidate_values_pool)\
                  for r in req_collection])
    data += f"{timestamp}: Min Value Combinations per Request: {val}\n"

    val = max([r.num_combinations(candidate_values_pool)\
                  for r in req_collection])
    data += f"{timestamp}: Max Value Combinations per Request: {val}\n"

    val = 0
    for r in req_collection:
        val += len(r.produces)
        val += len(r.consumes)
    data += f"{timestamp}: Total dependencies: {val}"

    write_to_main(data)
Example #3
0
def print_request_rendering_stats(candidate_values_pool, fuzzing_requests,
                                  fuzzing_monitor, num_rendered_requests,
                                  generation, global_lock):
    """ Prints to file statistics for request renderings.

    @param candidate_values_pool: The global pool of candidate values
    @type  candidate_values_pool: CandidateValuesPool
    @param fuzzing_requests: The collection of requests to be fuzzed.
    @type  fuzzing_requests: FuzzingRequestCollection class object.
    @param fuzzing_monitor: The global monitor of the fuzzing run
    @type  fuzzing_monitor: FuzzingMonitor
    @param num_rendered_requests: Number of requests that have been rendered
                                    at least once.
    @type  num_rendered_requests: Int
    @param generation: Current generation.
    @type  generation: Int
    @param global_lock: Lock object used for sync of more than one fuzzing jobs.
    @type  global_lock: thread.Lock object

    @return: None
    @rtype : None

    """
    from engine.transport_layer.response import VALID_CODES
    from engine.transport_layer.response import RESTLER_INVALID_CODE
    successful_requests = []
    fully_valid_count = 0
    for r in fuzzing_requests.all_requests:
        query_result = fuzzing_monitor.query_status_codes_monitor(
            r, VALID_CODES, [RESTLER_INVALID_CODE], global_lock)
        successful_requests.append(query_result.valid_code)
        if (query_result.fully_valid):
            fully_valid_count += 1

    timestamp = formatting.timestamp()

    if generation == PREPROCESSING_GENERATION:
        generation_name = "Preprocessing"
    elif generation == POSTPROCESSING_GENERATION:
        generation_name = "Postprocessing"
    else:
        generation_name = f"Generation-{generation}"

    with open(REQUEST_RENDERING_LOGS, "a+") as log_file:
        print(f"\n{timestamp}: {generation_name}"\
              f"\n{timestamp}: \tRendered requests: {num_rendered_requests} / {fuzzing_requests.size_all_requests}"\
              f"\n{timestamp}: \tRendered requests with \"valid\" status codes: {sum(successful_requests)} / {num_rendered_requests}"\
              f"\n{timestamp}: \tRendered requests determined to be fully valid (no resource creation failures): {fully_valid_count} / {num_rendered_requests}", file=log_file)

        # if all request have succeded, we don't need longer generations
        if sum(successful_requests) == len(successful_requests):
            return

        print(f"{timestamp}: List of failing requests:", file=log_file)

        for ind, request in enumerate(fuzzing_requests):
            if successful_requests[ind]:
                continue
            if not fuzzing_monitor.is_fully_rendered_request(
                    request, global_lock):
                continue

            if len(request.definition) == 0:
                return

            print(f"\tRequest: {ind}", file=log_file)
            format_rendering_stats_definition(request, candidate_values_pool,
                                              log_file)

        print("-------------------------\n", file=log_file)
        log_file.flush()
Example #4
0
def print_generation_stats(req_collection,
                           fuzzing_monitor,
                           global_lock,
                           final=False):
    """ Prints global generation's statistics.

    @param req_collection: The requests collection.
    @type  req_collection: RequestCollection class object.
    @param fuzzing_monitor: The global fuzzing monitor
    @type  fuzzing_monitor: FuzzingMonitor
    @param global_lock: Lock object used for sync of more than one fuzzing jobs.
    @type  global_lock: thread.Lock object
    @param final: If set to True, this is the end of the run generation stats
    @type  final: Bool

    @return: None
    @rtype : None

    """
    from engine.bug_bucketing import BugBuckets
    from engine.transport_layer.response import VALID_CODES
    from engine.transport_layer.response import RESTLER_INVALID_CODE
    timestamp = formatting.timestamp()

    successful_requests = []
    num_fully_valid = 0
    num_sequence_failures = 0
    for r in req_collection:
        query_result = fuzzing_monitor.query_status_codes_monitor(
            r, VALID_CODES, [RESTLER_INVALID_CODE], global_lock)
        successful_requests.append(query_result.valid_code)
        if (query_result.fully_valid):
            num_fully_valid += 1
        if (query_result.sequence_failure):
            num_sequence_failures += 1

    sum_successful_requests = sum(successful_requests)
    num_rendered_requests = fuzzing_monitor.num_fully_rendered_requests(
        req_collection, global_lock)

    final_spec_coverage = f"{num_fully_valid} / {req_collection.size}"
    rendered_requests = f"{num_rendered_requests} / {req_collection.size}"
    rendered_requests_valid_status = f"{sum_successful_requests} / {num_rendered_requests}"
    num_invalid_by_failed_resource_creations = sum_successful_requests - num_fully_valid
    total_object_creations = dependencies.object_creations
    total_requests_sent = fuzzing_monitor.num_requests_sent()
    bug_buckets = BugBuckets.Instance().num_bug_buckets()

    write_to_main(
        f"{timestamp}: Final Swagger spec coverage: {final_spec_coverage}\n"
        f"{timestamp}: Rendered requests: {rendered_requests}\n"
        f"{timestamp}: Rendered requests with \"valid\" status codes: {rendered_requests_valid_status}\n"
        f"{timestamp}: Num fully valid requests (no resource creation failures): {num_fully_valid}\n"
        f"{timestamp}: Num requests not rendered due to invalid sequence re-renders: {num_sequence_failures}\n"
        f"{timestamp}: Num invalid requests caused by failed resource creations: {num_invalid_by_failed_resource_creations}\n"
        f"{timestamp}: Total Creations of Dyn Objects: {total_object_creations}\n"
        f"{timestamp}: Total Requests Sent: {total_requests_sent}\n"
        f"{timestamp}: Bug Buckets: {BugBuckets.Instance().num_bug_buckets()}")

    if final:
        testing_summary = OrderedDict()
        testing_summary['final_spec_coverage'] = final_spec_coverage
        testing_summary['rendered_requests'] = rendered_requests
        testing_summary[
            'rendered_requests_valid_status'] = rendered_requests_valid_status
        testing_summary['num_fully_valid'] = num_fully_valid
        testing_summary['num_sequence_failures'] = num_sequence_failures
        testing_summary[
            'num_invalid_by_failed_resource_creations'] = num_invalid_by_failed_resource_creations
        testing_summary['total_object_creations'] = total_object_creations
        testing_summary['total_requests_sent'] = total_requests_sent
        testing_summary['bug_buckets'] = bug_buckets

        with open(os.path.join(LOGS_DIR, "testing_summary.json"),
                  "w+") as summary_json:
            json.dump(testing_summary, summary_json, indent=4)
Example #5
0
    def apply_destructors(self, destructors, max_aged_objects=100):
        """ Background task trying to delete evicted objects that are in
            @overflowing dictionary.

        @param overflowing: Dictionary of overflowing objects. We need to issue
                                deletes to all these objects
        @type  overflowing: Dict
        @param destructors: The Request class objects required to delete.
        @type  destructors: Dict
        @param max_aged_objects: Maximum number of objects we allow to age
                                    and will retry to delete (since delete
                                    is idempotent this is fine).
        @type  max_aged_objects: Int

        @return: None
        @rtype : None

        NOTE: This function is invoked without any lock since overflowing
        objects are already dead (not referenced by anything) and are just
        aging here.

        """
        if not self.overflowing:
            return

        from engine.errors import TransportLayerException
        from engine.transport_layer import messaging
        from utils.logger import raw_network_logging as RAW_LOGGING
        from utils.logger import garbage_collector_logging as CUSTOM_LOGGING
        # For each object in the overflowing area, whose destructor is
        # available, render the corresponding request, send the request,
        # and then check the status code. If the resource has been determined
        # to be removed, delete the object from the overflow area.
        # At the end keep track of only up to @param max_aged_objects
        # remaining objects.
        for type in destructors:
            destructor = destructors[type]
            deleted_list = []

            if self.overflowing[type]:
                CUSTOM_LOGGING("{}: Trying garbage collection of * {} * objects".\
                format(formatting.timestamp(), len(self.overflowing[type])))
                CUSTOM_LOGGING(f"{type}: {self.overflowing[type]}")

            # Iterate in reverse to give priority to newest resources
            for value in reversed(self.overflowing[type]):
                rendered_data, _ = destructor.\
                    render_current(self.req_collection.candidate_values_pool)

                # replace dynamic parameters
                fully_rendered_data = str(rendered_data)
                fully_rendered_data = fully_rendered_data.replace(
                    RDELIM + type + RDELIM, value)

                if fully_rendered_data:
                    try:
                        # Establish connection to the server
                        sock = messaging.HttpSock(
                            Settings().connection_settings)
                    except TransportLayerException as error:
                        RAW_LOGGING(f"{error!s}")
                        return

                    # Send the request and receive the response
                    success, response = sock.sendRecv(fully_rendered_data)
                    if success:
                        self.monitor.increment_requests_count('gc')
                    else:
                        RAW_LOGGING(response.to_str)

                    # Check to see if the DELETE operation is complete
                    try:
                        if response.status_code in DELETED_CODES:
                            deleted_list.append(value)
                    except Exception:
                        pass

            # Remove deleted items from the to-delete cache
            for value in deleted_list:
                self.overflowing[type].remove(value)
            self.overflowing[type] = self.overflowing[type][-max_aged_objects:]