def run(self): """ Thread entrance - periodically do garbage collection. @return: None @rtype : None """ def _should_stop(): if self._finishing: elapsed_time = time.time() - self._cleanup_start_time return elapsed_time > self._max_cleanup_time or self._cache_empty( ) else: return False from utils.logger import create_network_log from utils.logger import LOG_TYPE_GC create_network_log(LOG_TYPE_GC) while not _should_stop(): # Sleep here for _interval unless the cleanup event has been set self._cleanup_event.wait(self._interval) try: self.do_garbage_collection() except Exception as error: error_str = f"Exception during garbage collection: {error!s}" print(error_str) from utils.logger import garbage_collector_logging as CUSTOM_LOGGING CUSTOM_LOGGING(error_str) sys.exit(-1)
def apply_create_once_resources(fuzzing_requests): """ Attempts to create all of the resources in the 'create_once' endpoints. @param fuzzing_requests: The collection of requests to be fuzzed @type fuzzing_requests: FuzzingRequestCollection @return: A list of destructors to use to cleanup the create_once resources @rtype : list(Request) """ def exclude_requests(pre_reqs, post_reqs): # Exclude any requests that produce or destroy the create_once endpoint for req_i in pre_reqs: fuzzing_requests.exclude_preprocessing_request(req_i) for req_i in post_reqs: fuzzing_requests.exclude_postprocessing_request(req_i) create_once_endpoints = Settings().create_once_endpoints if not create_once_endpoints: return logger.create_network_log(logger.LOG_TYPE_PREPROCESSING) destructors = set() exclude_reqs = set() request_count = 0 logger.write_to_main("Rendering for create-once resources:\n") # Iterate through each 'create_once' endpoint for endpoint in create_once_endpoints: # Verify that the endpoint exists in the request collection if endpoint in GrammarRequestCollection().request_id_collection: # The create_once resource generator resource_gen_req = None # Iterate through each of the requests that contain the create_once endpoint for req in GrammarRequestCollection( ).request_id_collection[endpoint]: if req not in fuzzing_requests: logger.write_to_main( "Warning: Create-once endpoint is not a request in the fuzzing list\n", True) break if not resource_gen_req and req.is_resource_generator(): resource_gen_req = req # Compute the sequence necessary to create the create_once resource req_list = driver.compute_request_goal_seq( resource_gen_req, fuzzing_requests) logger.write_to_main( f"{formatting.timestamp()}: Endpoint - {resource_gen_req.endpoint_no_dynamic_objects}" ) logger.write_to_main( f"{formatting.timestamp()}: Hex Def - {resource_gen_req.method_endpoint_hex_definition}" ) create_once_seq = sequences.Sequence(req_list) renderings = create_once_seq.render( GrammarRequestCollection().candidate_values_pool, None, preprocessing=True) # Make sure we were able to successfully create the create_once resource if not renderings.valid: logger.write_to_main( f"{formatting.timestamp()}: Rendering INVALID") exclude_requests(exclude_reqs, destructors) raise FailedToCreateResource(destructors) logger.write_to_main( f"{formatting.timestamp()}: Rendering VALID") logger.format_rendering_stats_definition( resource_gen_req, GrammarRequestCollection().candidate_values_pool) if Settings().in_smoke_test_mode(): resource_gen_req.stats.request_order = 'Preprocessing' resource_gen_req.stats.valid = 1 resource_gen_req.stats.status_code = renderings.final_request_response.status_code resource_gen_req.stats.status_text = renderings.final_request_response.status_text resource_gen_req.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) resource_gen_req.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) if req.is_destructor(): # Add destructors to the destructor list that will be returned destructors.add(req) # Only continue processing if a resource generator was actually found for this endpoint if not resource_gen_req: continue request_count += len(req_list) # Get the set of all dynamic object names in the endpoint var_names = resource_gen_req.consumes.union( resource_gen_req.produces) # This dictionary will map dynamic object names to the values created during # this preprocessing create-once step. dynamic_object_values = {} for name in var_names: dynamic_object_values[name] = dependencies.get_variable(name) # Iterate through the entire request collection, searching for requests that include # the create_once resource. We want to "lock" the resources in these requests with # the dynamic object values that were created during this preprocessing step. for req_i in fuzzing_requests: # Set the variables in any requests whose consumers were produced # by the create_once resource generator if resource_gen_req.produces & req_i.consumes: req_i.set_id_values_for_create_once_dynamic_objects( dynamic_object_values, renderings) # Exclude any requests that produce the create_once object(s) if resource_gen_req.produces & req_i.produces: exclude_reqs.add(req_i) else: exclude_requests(exclude_reqs, destructors) raise InvalidCreateOnce(destructors) exclude_requests(exclude_reqs, destructors) # Reset all of the dynamic object values that were just created dependencies.reset_tlb() # Reset the garbage collector, so it doesn't delete any of the resources that were just created dependencies.set_saved_dynamic_objects() logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), request_count, logger.PREPROCESSING_GENERATION, None) # Return the list of destructors that were removed from the request collection. # These will be used to cleanup the create_once resources created during preprocessing. return list(destructors)
def generate_sequences(fuzzing_requests, checkers, fuzzing_jobs=1): """ Implements core restler algorithm. @param fuzzing_requests: The collection of requests that will be fuzzed @type fuzzing_requests: FuzzingRequestCollection @param checkers: The list of checkers to apply @type checkers: list[Checker] @param fuzzing_jobs: Optional number of fuzzing jobs for parallel fuzzing. Default value passed is one (sequential fuzzing). @type fuzzing_jobs: Int @return: None @rtype : None """ if not fuzzing_requests.size: return logger.create_network_log(logger.LOG_TYPE_TESTING) fuzzing_mode = Settings().fuzzing_mode max_len = Settings().max_sequence_length if fuzzing_mode == 'directed-smoke-test': return generate_sequences_directed_smoketest(fuzzing_requests, checkers) if fuzzing_jobs > 1: render = render_parallel global_lock = multiprocessing.Lock() fuzzing_pool = ThreadPool(fuzzing_jobs) else: global_lock = None fuzzing_pool = None render = render_sequential should_stop = False timeout_reached = False seq_collection_exhausted = False num_total_sequences = 0 while not should_stop: seq_collection = [sequences.Sequence()] # Only for bfs: If any checkpoint file is available, load state of # latest generation. Note that it only makes sense to use checkpoints # for the bfs exploration method, since it is the only systemic and # exhaustive method. min_len = 0 if fuzzing_mode == 'bfs': req_collection = GrammarRequestCollection() monitor = Monitor() req_collection, seq_collection, fuzzing_requests, monitor, min_len =\ saver.load(req_collection, seq_collection, fuzzing_requests, monitor) requests.GlobalRequestCollection.Instance( )._req_collection = req_collection fuzzing_monitor.FuzzingMonitor.__instance = monitor # Repeat external loop only for random walk if fuzzing_mode != 'random-walk': should_stop = True # Initialize fuzzing schedule fuzzing_schedule = {} logger.write_to_main(f"Setting fuzzing schemes: {fuzzing_mode}") for length in range(min_len, max_len): fuzzing_schedule[length] = fuzzing_mode # print(" - {}: {}".format(length + 1, fuzzing_schedule[length])) # print general request-related stats logger.print_req_collection_stats( fuzzing_requests, GrammarRequestCollection().candidate_values_pool) generation = 0 for length in range(min_len, max_len): # we can set this without locking, since noone else writes (main # driver is single-threaded) and every potential worker will just # read-access this value. generation = length + 1 fuzzing_mode = fuzzing_schedule[length] # extend sequences with new request templates seq_collection = extend(seq_collection, fuzzing_requests, global_lock) print(f"{formatting.timestamp()}: Generation: {generation} ") logger.write_to_main( f"{formatting.timestamp()}: Generation: {generation} / " f"Sequences Collection Size: {len(seq_collection)} " f"(After {fuzzing_schedule[length]} Extend)") # render templates try: seq_collection_exhausted = False seq_collection = render(seq_collection, fuzzing_pool, checkers, generation, global_lock) except TimeOutException: logger.write_to_main("Timed out...") timeout_reached = True seq_collection_exhausted = True # Increase fuzzing generation after timeout because the code # that does it would have never been reached. This is done so # the previous generation's test summary is logged correctly. Monitor().current_fuzzing_generation += 1 except ExhaustSeqCollectionException: logger.write_to_main("Exhausted collection...") seq_collection = [] seq_collection_exhausted = True logger.write_to_main( f"{formatting.timestamp()}: Generation: {generation} / " f"Sequences Collection Size: {len(seq_collection)} " f"(After {fuzzing_schedule[length]} Render)") # saving latest state saver.save(GrammarRequestCollection(), seq_collection, fuzzing_requests, Monitor(), generation) # Print stats for iteration of the current generation logger.print_generation_stats(GrammarRequestCollection(), Monitor(), global_lock) num_total_sequences += len(seq_collection) logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), Monitor().num_fully_rendered_requests( fuzzing_requests.all_requests), generation, global_lock) if timeout_reached or seq_collection_exhausted: if timeout_reached: should_stop = True break logger.write_to_main("--\n") if fuzzing_pool is not None: fuzzing_pool.close() fuzzing_pool.join() return num_total_sequences
print(f"Cannot import custom mutations: {error!s}") sys.exit(-1) # Create the directory where all the results will be saved try: logger.create_experiment_dir() except Exception as err: print(f"Failed to create logs directory: {err!s}") sys.exit(-1) if settings.no_tokens_in_logs: logger.no_tokens_in_logs() if args.replay_log: try: logger.create_network_log(logger.LOG_TYPE_REPLAY) driver.replay_sequence_from_log(args.replay_log, settings.token_refresh_cmd) print("Done playing sequence from log") sys.exit(0) except NoTokenSpecifiedException: logger.write_to_main( "Failed to play sequence from log:\n" "A valid authorization token was expected.\n" "Retry with a token refresh script in the settings file or " "update the request in the replay log with a valid authorization token.", print_to_console=True) sys.exit(-1) except Exception as error: print(f"Failed to play sequence from log:\n{error!s}") sys.exit(-1)