def _execute_start_of_sequence(self): """ Send all requests in the sequence up until the last request @return: None @rtype : None """ # Copied from InvalidDynamicObjectChecker RAW_LOGGING("Re-rendering and sending start of sequence") new_seq = sequences.Sequence([]) for request in self._sequence.requests[:-1]: new_seq = new_seq + sequences.Sequence(request) initial_response, response_to_parse = self._render_and_send_data( new_seq, request) # Check to make sure a bug wasn't uncovered while executing the # sequence if initial_response: if initial_response.has_bug_code(): self._print_suspect_sequence(new_seq, initial_response) BugBuckets.Instance().update_bug_buckets( new_seq, initial_response.status_code, origin=self.__class__.__name__) if self._acc_response: hints = self._map_response_to_current_body_schema( response_to_parse) for tag in hints: self._response_values[tag] = hints[tag] return new_seq
def replay_sequence_from_log(replay_log_filename, token_refresh_cmd): """ Replays a sequence of requests from a properly formed log file @param replay_log_filename: The log's filename @type replay_log_filename: Str @param token_refresh_cmd: The command to create an authorization token @type token_refresh_cmd: Str @return: None @rtype : None """ log_file = open(replay_log_filename, "r") file_lines = log_file.readlines() send_data = [] for line in file_lines: line = line.strip() # Check for comment or empty line if line: if line.startswith(logger.REPLAY_REQUEST_INDICATOR): # Clean up the request string before continuing line = line.lstrip(logger.REPLAY_REQUEST_INDICATOR) line = line.rstrip('\n') line = line.replace('\\r', '\r') line = line.replace('\\n', '\n') if not Settings().host: # Extract hostname from request hostname = get_hostname_from_line(line) if hostname is None: raise Exception("Host not found in request. The replay log may be corrupted.") Settings().set_hostname(hostname) # Append the request data to the list # None is for the parser, which does not currently run during replays. send_data.append(sequences.SentRequestData(line, None)) elif line.startswith(logger.BUG_LOG_NOTIFICATION_ICON): line = line.lstrip(logger.BUG_LOG_NOTIFICATION_ICON) if line.startswith('producer_timing_delay'): if send_data: # Add the producer timing delay to the most recent request data send_data[-1].producer_timing_delay = int(line.lstrip('producer_timing_delay ')) if line.startswith('max_async_wait_time'): if send_data: # Add the max async wait time to the most recent request data send_data[-1].max_async_wait_time = int(line.lstrip('max_async_wait_time ')) sequence = sequences.Sequence() sequence.set_sent_requests_for_replay(send_data) if token_refresh_cmd: # Set the authorization tokens in the data execute_token_refresh_cmd(token_refresh_cmd) # Send the requests sequence.replay_sequence()
def _render_n_predecessor_requests(self, n_predecessors): """ Render up to the parent's parent predecessor request. @param n_predecessors: The number of predecessors to render. @type n_predecessors: Int @return: Sequence of n predecessor requests sent to server @rtype : Sequence """ new_seq = sequences.Sequence([]) for i in range(n_predecessors): request = self._sequence.requests[i] new_seq = new_seq + sequences.Sequence(request) response, _ = self._render_and_send_data(new_seq, request) # Check to make sure a bug wasn't uncovered while executing the sequence if response and response.has_bug_code(): self._print_suspect_sequence(new_seq, response) BugBuckets.Instance().update_bug_buckets( new_seq, response.status_code, origin=self.__class__.__name__) return new_seq
def apply(self, rendered_sequence, lock): """ Applies check for leakage rule violations. @param rendered_sequence: Object containing the rendered sequence information @type rendered_sequence: RenderedSequence @param lock: Lock object used to sync more than one fuzzing job @type lock: thread.Lock @return: None @rtype : None """ # Note that, unlike other checkers, the precondition here is failed renderings. if rendered_sequence.valid: return # Return if the sequence was never fully rendered if rendered_sequence.sequence is None: return self._sequence = rendered_sequence.sequence # We skip any sequence that contains DELETE methods so that we # keep in isolation this checker and the use-after-free checker. if self._sequence.has_destructor(): return # Type produced by the sequence. Necessary for type-checking produces = self._sequence.produces seq_produced_types = set(itertools.chain(*produces)) # Target the types produced by the last request of the sequence. target_types = produces[-1] for target_type in target_types: self._checker_log.checker_print(f"\nTarget type: {target_type}") # Iterate through each request that has a matching request ID to the the final # request in the sequence, which is the request that will be checked for leakage. for req in self._req_collection.request_id_collection[self._sequence.last_request.request_id]: # Skip requests that are not consumers or don't type-check. if not req.consumes\ or req.consumes - seq_produced_types\ or target_type not in req.consumes: continue self._set_dynamic_variables(self._sequence.sent_request_data_list[-1].rendered_data, req) self._render_consumer_request(self._sequence + sequences.Sequence(req)) if self._mode != 'exhaustive': break
def _render_last_request(self, new_seq): """ Render the last request of the sequence and inspect the status code of the response. If it's any of 20x, we have probably hit a bug. @param new_seq: The new sequence that was rendered with this checker @type new_seq: Sequence @return: None @rtype : None """ new_seq = new_seq + sequences.Sequence(self._sequence.last_request) response, _ = self._render_and_send_data(new_seq, self._sequence.last_request) if response and self._rule_violation(new_seq, response): self._print_suspect_sequence(new_seq, response) BugBuckets.Instance().update_bug_buckets( new_seq, response.status_code, origin=self.__class__.__name__)
def _use_after_free(self, destructed_types): """ Tries to access deleted dynamic object. Accessing means try to apply any request, defined in the request collection, that consumes an object with type @param type. @param destructed_types: Ordered set of the hierarchy of dynamic object types the current request will need in order to destruct (probably) an object of the last object type. @type destructed_types: Set @return: None @rtype : None """ consumers = [] destructor = self._sequence.last_request # Search for any consumer request, except for the current destructor # request, that consumes an hierarchy similar to the one deleted. for request in self._fuzzing_requests: if request.hex_definition == destructor.hex_definition: continue if request.consumes == destructed_types: consumers.append(copy.copy(request)) self._checker_log.checker_print("Found * {} * consumers.".\ format(len(consumers))) # Try any consumer of the deleted types. for request in consumers: # Try accessing deleted objects. self._render_last_request(self._sequence + sequences.Sequence(request)) # One consumer is OK -- to save us some time if self._mode != 'exhaustive': break
def apply_create_once_resources(fuzzing_requests): """ Attempts to create all of the resources in the 'create_once' endpoints. @param fuzzing_requests: The collection of requests to be fuzzed @type fuzzing_requests: FuzzingRequestCollection @return: A list of destructors to use to cleanup the create_once resources @rtype : list(Request) """ def exclude_requests(pre_reqs, post_reqs): # Exclude any requests that produce or destroy the create_once endpoint for req_i in pre_reqs: fuzzing_requests.exclude_preprocessing_request(req_i) for req_i in post_reqs: fuzzing_requests.exclude_postprocessing_request(req_i) create_once_endpoints = Settings().create_once_endpoints if not create_once_endpoints: return logger.create_network_log(logger.LOG_TYPE_PREPROCESSING) destructors = set() exclude_reqs = set() request_count = 0 logger.write_to_main("Rendering for create-once resources:\n") # Iterate through each 'create_once' endpoint for endpoint in create_once_endpoints: # Verify that the endpoint exists in the request collection if endpoint in GrammarRequestCollection().request_id_collection: # The create_once resource generator resource_gen_req = None # Iterate through each of the requests that contain the create_once endpoint for req in GrammarRequestCollection( ).request_id_collection[endpoint]: if req not in fuzzing_requests: logger.write_to_main( "Warning: Create-once endpoint is not a request in the fuzzing list\n", True) break if not resource_gen_req and req.is_resource_generator(): resource_gen_req = req # Compute the sequence necessary to create the create_once resource req_list = driver.compute_request_goal_seq( resource_gen_req, fuzzing_requests) logger.write_to_main( f"{formatting.timestamp()}: Endpoint - {resource_gen_req.endpoint_no_dynamic_objects}" ) logger.write_to_main( f"{formatting.timestamp()}: Hex Def - {resource_gen_req.method_endpoint_hex_definition}" ) create_once_seq = sequences.Sequence(req_list) renderings = create_once_seq.render( GrammarRequestCollection().candidate_values_pool, None, preprocessing=True) # Make sure we were able to successfully create the create_once resource if not renderings.valid: logger.write_to_main( f"{formatting.timestamp()}: Rendering INVALID") exclude_requests(exclude_reqs, destructors) raise FailedToCreateResource(destructors) logger.write_to_main( f"{formatting.timestamp()}: Rendering VALID") logger.format_rendering_stats_definition( resource_gen_req, GrammarRequestCollection().candidate_values_pool) if Settings().in_smoke_test_mode(): resource_gen_req.stats.request_order = 'Preprocessing' resource_gen_req.stats.valid = 1 resource_gen_req.stats.status_code = renderings.final_request_response.status_code resource_gen_req.stats.status_text = renderings.final_request_response.status_text resource_gen_req.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) resource_gen_req.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) if req.is_destructor(): # Add destructors to the destructor list that will be returned destructors.add(req) # Only continue processing if a resource generator was actually found for this endpoint if not resource_gen_req: continue request_count += len(req_list) # Get the set of all dynamic object names in the endpoint var_names = resource_gen_req.consumes.union( resource_gen_req.produces) # This dictionary will map dynamic object names to the values created during # this preprocessing create-once step. dynamic_object_values = {} for name in var_names: dynamic_object_values[name] = dependencies.get_variable(name) # Iterate through the entire request collection, searching for requests that include # the create_once resource. We want to "lock" the resources in these requests with # the dynamic object values that were created during this preprocessing step. for req_i in fuzzing_requests: # Set the variables in any requests whose consumers were produced # by the create_once resource generator if resource_gen_req.produces & req_i.consumes: req_i.set_id_values_for_create_once_dynamic_objects( dynamic_object_values, renderings) # Exclude any requests that produce the create_once object(s) if resource_gen_req.produces & req_i.produces: exclude_reqs.add(req_i) else: exclude_requests(exclude_reqs, destructors) raise InvalidCreateOnce(destructors) exclude_requests(exclude_reqs, destructors) # Reset all of the dynamic object values that were just created dependencies.reset_tlb() # Reset the garbage collector, so it doesn't delete any of the resources that were just created dependencies.set_saved_dynamic_objects() logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), request_count, logger.PREPROCESSING_GENERATION, None) # Return the list of destructors that were removed from the request collection. # These will be used to cleanup the create_once resources created during preprocessing. return list(destructors)
def generate_sequences(fuzzing_requests, checkers, fuzzing_jobs=1): """ Implements core restler algorithm. @param fuzzing_requests: The collection of requests that will be fuzzed @type fuzzing_requests: FuzzingRequestCollection @param checkers: The list of checkers to apply @type checkers: list[Checker] @param fuzzing_jobs: Optional number of fuzzing jobs for parallel fuzzing. Default value passed is one (sequential fuzzing). @type fuzzing_jobs: Int @return: None @rtype : None """ if not fuzzing_requests.size: return logger.create_network_log(logger.LOG_TYPE_TESTING) fuzzing_mode = Settings().fuzzing_mode max_len = Settings().max_sequence_length if fuzzing_mode == 'directed-smoke-test': return generate_sequences_directed_smoketest(fuzzing_requests, checkers) if fuzzing_jobs > 1: render = render_parallel global_lock = multiprocessing.Lock() fuzzing_pool = ThreadPool(fuzzing_jobs) else: global_lock = None fuzzing_pool = None render = render_sequential should_stop = False timeout_reached = False seq_collection_exhausted = False num_total_sequences = 0 while not should_stop: seq_collection = [sequences.Sequence()] # Only for bfs: If any checkpoint file is available, load state of # latest generation. Note that it only makes sense to use checkpoints # for the bfs exploration method, since it is the only systemic and # exhaustive method. min_len = 0 if fuzzing_mode == 'bfs': req_collection = GrammarRequestCollection() monitor = Monitor() req_collection, seq_collection, fuzzing_requests, monitor, min_len =\ saver.load(req_collection, seq_collection, fuzzing_requests, monitor) requests.GlobalRequestCollection.Instance( )._req_collection = req_collection fuzzing_monitor.FuzzingMonitor.__instance = monitor # Repeat external loop only for random walk if fuzzing_mode != 'random-walk': should_stop = True # Initialize fuzzing schedule fuzzing_schedule = {} logger.write_to_main(f"Setting fuzzing schemes: {fuzzing_mode}") for length in range(min_len, max_len): fuzzing_schedule[length] = fuzzing_mode # print(" - {}: {}".format(length + 1, fuzzing_schedule[length])) # print general request-related stats logger.print_req_collection_stats( fuzzing_requests, GrammarRequestCollection().candidate_values_pool) generation = 0 for length in range(min_len, max_len): # we can set this without locking, since noone else writes (main # driver is single-threaded) and every potential worker will just # read-access this value. generation = length + 1 fuzzing_mode = fuzzing_schedule[length] # extend sequences with new request templates seq_collection = extend(seq_collection, fuzzing_requests, global_lock) print(f"{formatting.timestamp()}: Generation: {generation} ") logger.write_to_main( f"{formatting.timestamp()}: Generation: {generation} / " f"Sequences Collection Size: {len(seq_collection)} " f"(After {fuzzing_schedule[length]} Extend)") # render templates try: seq_collection_exhausted = False seq_collection = render(seq_collection, fuzzing_pool, checkers, generation, global_lock) except TimeOutException: logger.write_to_main("Timed out...") timeout_reached = True seq_collection_exhausted = True # Increase fuzzing generation after timeout because the code # that does it would have never been reached. This is done so # the previous generation's test summary is logged correctly. Monitor().current_fuzzing_generation += 1 except ExhaustSeqCollectionException: logger.write_to_main("Exhausted collection...") seq_collection = [] seq_collection_exhausted = True logger.write_to_main( f"{formatting.timestamp()}: Generation: {generation} / " f"Sequences Collection Size: {len(seq_collection)} " f"(After {fuzzing_schedule[length]} Render)") # saving latest state saver.save(GrammarRequestCollection(), seq_collection, fuzzing_requests, Monitor(), generation) # Print stats for iteration of the current generation logger.print_generation_stats(GrammarRequestCollection(), Monitor(), global_lock) num_total_sequences += len(seq_collection) logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), Monitor().num_fully_rendered_requests( fuzzing_requests.all_requests), generation, global_lock) if timeout_reached or seq_collection_exhausted: if timeout_reached: should_stop = True break logger.write_to_main("--\n") if fuzzing_pool is not None: fuzzing_pool.close() fuzzing_pool.join() return num_total_sequences
def extend(seq_collection, fuzzing_requests, lock): """ Extends each sequence currently present in collection by any request from request collection whose dependencies can be resolved if appended at the end of the target sequence. @param seq_collection: List of sequences in sequence collection. @type seq_collection: List @param fuzzing_requests: The collection of requests to fuzz. @type fuzzing_requests: FuzzingRequestCollection. @param lock: Lock object used for sync of more than one fuzzing jobs. @type lock: thread.Lock object @return: The list of newly enxtended sequences. @rtype : List """ prev_len = len(seq_collection) # The functions that access the monitor of renderings (e.g., # "is_fully_rendered_request" and "num_fully_rendered_requests") answer # based on the latest _completed_ generation and the internal # counter that tracks the latest completed fuzzing generation is increased # after the end of @function render. However, inside the driver main-loop we # first run @function extend (since initially we start by an empty # sequence) and then run @function render, and thus, we need to temporarily # increase the generation counter in order to get a proper behaviour # when invoking "is_fully_rendered_request" in here after the first iteration # of the main-loop. Monitor().current_fuzzing_generation += 1 for req in fuzzing_requests: for i in range(prev_len): seq = seq_collection[i] # Extend sequence collection by adding requests that have # valid dependencies and skip the rest if not validate_dependencies(req, seq)\ and not Settings().ignore_dependencies: continue req_copy = copy.copy(req) req_copy._current_combination_id = 0 if seq.is_empty_sequence(): new_seq = sequences.Sequence(req_copy) else: new_seq = seq + sequences.Sequence(req_copy) seq_collection.append(new_seq) # Append each request to exactly one sequence if Settings().fuzzing_mode in ['bfs-fast', 'bfs-minimal']: break # See comment above... Monitor().current_fuzzing_generation -= 1 # In case of random walk, truncate sequence collection to # one randomly selected sequence if Settings().fuzzing_mode == 'random-walk': if len(seq_collection) > 0: rand_int = random.randint(prev_len, len(seq_collection) - 1) return seq_collection[rand_int:rand_int + 1] else: return [] # Drop previous generation and keep current extended generation return seq_collection[prev_len:]
def generate_sequences_directed_smoketest(fuzzing_requests, checkers): """ Checks whether each request can be successfully rendered. For each request: - Constructs a sequence that satisfies all dependencies by backtracking. - Renders this sequence. This allows debugging rendering on a per-request basis to resolve configuration or spec issues. """ def render_request(request, seq): """ Helper function that attempts to find a valid rendering for the request. The do-while loop will render each combination of the request until either a valid rendering is detected or all combinations have been exhausted. Side effects: request.stats.status_code updated request.stats.status_text updated request.stats updated with concrete response and request text (valid request or last combination) @return: Tuple containing rendered sequence object, response body, and failure information enum. @rtype : Tuple(RenderedSequence, str, FailureInformation) """ response_body = None rendering_information = None while True: renderings = seq.render(candidate_values_pool, global_lock) if renderings.failure_info: # Even though we will be returning renderings from this function, # the renderings object that is returned may be from an unrendered # sequence. We want to save the most recent info. rendering_information = renderings.failure_info # Perform this check/save here in case the last call to seq.render # returns an empty 'renderings' object. An empty renderings object # will be returned from seq.render if all request combinations are # exhausted prior to getting a valid status code. if renderings.final_request_response: request.stats.status_code = renderings.final_request_response.status_code request.stats.status_text = renderings.final_request_response.status_text # Get the last rendered request. The corresponding response should be # the last received response. request.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) request.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) response_body = renderings.final_request_response.body apply_checkers(checkers, renderings, global_lock) # If a valid rendering was found or the combinations have been # exhausted (empty rendering), exit the loop. if renderings.valid or renderings.sequence is None: return renderings, response_body, rendering_information global_lock = None candidate_values_pool = GrammarRequestCollection().candidate_values_pool # print general request-related stats logger.print_req_collection_stats( fuzzing_requests, GrammarRequestCollection().candidate_values_pool) logger.write_to_main( f"\n{formatting.timestamp()}: Starting directed-smoke-test\n") # Sort the request list prior to computing the request sequences, # so the prefixes are always in the same order for the algorithm fuzzing_request_list = list(fuzzing_requests._requests) fuzzing_request_list.sort(key=lambda x: x.method_endpoint_hex_definition) # sort the requests in fuzzing_requests by depth sorted_fuzzing_req_list = [] for request in fuzzing_request_list: req_list = compute_request_goal_seq(request, fuzzing_request_list) if len(req_list) > 0: sorted_fuzzing_req_list.append([len(req_list), request, req_list]) # Else an error message was printed and we skip this request # now sort by length (secondary sort by a hash of the request definition text) sorted_fuzzing_req_list.sort( key=lambda x: (x[0], x[1].method_endpoint_hex_definition)) logger.write_to_main(f"{formatting.timestamp()}: Will attempt to render " f"{len(sorted_fuzzing_req_list)} requests found\n") # the two following lists are indexed by request number and are of the same size. # memoize valid rendered sequences for each request and re-use those when going deeper valid_rendered_sequences_list = [] # memoize the first invalid prefix for each request first_invalid_prefix_list = [] # try to render all requests starting with the shallow ones for idx, request_triple in enumerate(sorted_fuzzing_req_list): req_list_length = request_triple[0] request = request_triple[1] req_list = request_triple[2] valid = False first_invalid_prefix = -1 # -1 denotes no invalid prefix by default request.stats.request_order = idx Found = False if (req_list_length > 1): # search for a valid matching prefix we can re-use; # unless path_regex is used we should always find a match # because we start with shallow sequences req_list_prefix = req_list[:-1] i = 0 while (not Found) and (i < idx): if sorted_fuzzing_req_list[i][2] == req_list_prefix: # we found a match Found = True logger.write_to_main( f"Found a matching prefix for request {idx} with previous request {i}" ) request.stats.matching_prefix[ "id"] = sorted_fuzzing_req_list[i][ 1].method_endpoint_hex_definition else: # continue searching i = i + 1 rendering_information = None response_body = None if Found: if valid_rendered_sequences_list[i].is_empty_sequence(): # then the current sequence will also be INVALID. # propagate the root-cause explaining why the prefix was invalid first_invalid_prefix = first_invalid_prefix_list[i] logger.write_to_main( f"\tbut that prefix was INVALID (root = {first_invalid_prefix})\n" ) request.stats.matching_prefix["valid"] = 0 # since valid = False by default, nothing else to do here else: # re-use the previous VALID prefix logger.write_to_main("\tand re-using that VALID prefix\n") request.stats.matching_prefix["valid"] = 1 new_seq = valid_rendered_sequences_list[i] req_copy = copy.copy(request) req_copy._current_combination_id = 0 new_seq = new_seq + sequences.Sequence(req_copy) new_seq.seq_i = 0 renderings, response_body, rendering_information = render_request( request, new_seq) valid = renderings.valid else: logger.write_to_main(f"Rendering request {idx} from scratch\n") # render the sequence. new_seq = sequences.Sequence() for req in req_list: req_copy = copy.copy(req) req_copy._current_combination_id = 0 if new_seq.is_empty_sequence(): new_seq = sequences.Sequence(req_copy) else: new_seq = new_seq + sequences.Sequence(req_copy) new_seq.seq_i = 0 renderings, response_body, rendering_information = render_request( req, new_seq) valid = renderings.valid logger.write_to_main( f"{formatting.timestamp()}: Request {idx}\n" f"{formatting.timestamp()}: Endpoint - {request.endpoint_no_dynamic_objects}\n" f"{formatting.timestamp()}: Hex Def - {request.method_endpoint_hex_definition}\n" f"{formatting.timestamp()}: Sequence length that satisfies dependencies: {req_list_length}" ) if valid: logger.write_to_main(f"{formatting.timestamp()}: Rendering VALID") request.stats.valid = 1 # remember this valid sequence valid_rendered_sequences_list.append(new_seq) first_invalid_prefix_list.append(first_invalid_prefix) else: logger.write_to_main( f"{formatting.timestamp()}: Rendering INVALID") request.stats.valid = 0 request.stats.error_msg = response_body # remember RESTler didn't find any valid sequence with an empty request sequence valid_rendered_sequences_list.append(sequences.Sequence()) if (first_invalid_prefix == -1): first_invalid_prefix = idx first_invalid_prefix_list.append(first_invalid_prefix) if rendering_information: if rendering_information == FailureInformation.PARSER: msg = ( "This request received a VALID status code, but the parser failed.\n" "Because of this, the request was set to INVALID.\n") elif rendering_information == FailureInformation.RESOURCE_CREATION: msg = ( "This request received a VALID status code, but the server " "indicated that there was a failure when creating the resource.\n" ) elif rendering_information == FailureInformation.SEQUENCE: msg = ( "This request was never rendered because the sequence failed to re-render.\n" "Because of this, the request was set to INVALID.\n") elif rendering_information == FailureInformation.BUG: msg = "A bug code was received after rendering this request." else: msg = "An unknown error occurred when processing this request." logger.write_to_main(f"{formatting.timestamp()}: {msg}") request.stats.failure = rendering_information rendering_information = None logger.format_rendering_stats_definition( request, GrammarRequestCollection().candidate_values_pool) logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), fuzzing_requests.size_all_requests, Monitor().current_fuzzing_generation, global_lock) Monitor().current_fuzzing_generation += 1 return len(valid_rendered_sequences_list)
def _exec_request_with_new_body(self, request, body_blocks, tracker, valid_is_violation=False): """ Render and send the new request and analyze the response @param request: Seed request @type request: Request @param body_blocks: Definition (request blocks) of the new body @type body_blocks: List @param tracker: Response tracker for this run @type tracker: ResponseTracker @param valid_is_violation: If valid response is violation @type valid_is_violation: Bool @return: None @rtype: None """ # substitute to the original request new_request = substitute_body(request, body_blocks) seq = copy(self._sequence) cnt = 0 # iterate through different value combinations for rendered_data, parser in new_request.render_iter( self._req_collection.candidate_values_pool): # check time budget if Monitor().remaining_time_budget <= 0: raise TimeOutException('Exceed Timeout') # stop fuzzing when reaching the bound if cnt > int(Settings().max_combinations): break cnt += 1 # stop fuzzing when reaching the global bound if self._global_bound > 0 and self._global_count > self._global_bound: break self._global_count += 1 # refresh the sequence to make sure the resource is not garbage collected if self._refresh_req: seq = self._refresh(request) # render the data rendered_data = seq.resolve_dependencies(rendered_data) # substitute if there is UUID suffix original_rendered_data = rendered_data uuid4_suffix_dict = self._get_custom_payload_uuid4_suffix() for uuid4_suffix in uuid4_suffix_dict: suffix = uuid4_suffix_dict[uuid4_suffix] len_suffix = len(suffix) # need the query to partition path and body try: partition = rendered_data.index('?') if suffix in rendered_data[:partition]: new_val_start = rendered_data[:partition].index(suffix) if new_val_start + len_suffix + 10 > partition: self._log('unexpected uuid') continue new_val = rendered_data[new_val_start:new_val_start + len_suffix + 10] # find all occurence in the body suffix_in_body = [ m.start() for m in re.finditer(suffix, rendered_data) ][1:] for si in suffix_in_body: old_val = rendered_data[si:si + len_suffix + 10] rendered_data = rendered_data.replace( old_val, new_val) except Exception: rendered_data = original_rendered_data # send out the request response = self._send_request(parser, rendered_data) request_utilities.call_response_parser(parser, response) self._set_refresh_req(request, response) if not response or not response.status_code: self._log('ERROR: no response received') continue # analyze response -- coverage tracker.process_response(response) if self._acc_response: hints = self._map_response_to_current_body_schema(response) for tag in hints: self._response_values[tag] = hints[tag] # analyze response -- error if self._rule_violation(seq, response, valid_is_violation): # Append the new request to the sequence before filing the bug seq.replace_last_sent_request_data(rendered_data, parser, response) err_seq = sequences.Sequence(seq.requests[:-1] + [new_request]) err_seq.set_sent_requests_for_replay( seq.sent_request_data_list) self._print_suspect_sequence(err_seq, response) bug_info = self._buckets.add_bug(request, rendered_data) if bug_info is not None: error_str = bug_info[0] new_body = bug_info[1] log_str = f'{error_str}\n{new_body}' BugBuckets.Instance().update_bug_buckets( err_seq, response.status_code, origin=self.__class__.__name__, checker_str=error_str, additional_log_str=log_str) self._refresh_req = True
def delete_create_once_resources(destructors, fuzzing_requests): """ Iterates through each destructor request and sends it to the server @param destructors: A list of destructor requests to send @type destructors: list(Request) @param fuzzing_requests: The global collection of requests to fuzz @type fuzzing_requests: FuzzingRequestCollection @return: None @rtype : None """ if not destructors: return candidate_values_pool = GrammarRequestCollection().candidate_values_pool logger.write_to_main("\nRendering for create-once resource destructors:\n") for destructor in destructors: status_codes = [] try: logger.write_to_main( f"{formatting.timestamp()}: Endpoint - {destructor.endpoint_no_dynamic_objects}" ) logger.write_to_main( f"{formatting.timestamp()}: Hex Def - {destructor.method_endpoint_hex_definition}" ) seq = sequences.Sequence([destructor]) renderings = seq.render( GrammarRequestCollection().candidate_values_pool, None, postprocessing=True) if not renderings.valid: logger.write_to_main( f"{formatting.timestamp()}: Rendering INVALID") else: logger.write_to_main( f"{formatting.timestamp()}: Rendering VALID") logger.format_rendering_stats_definition( destructor, GrammarRequestCollection().candidate_values_pool) if Settings().in_smoke_test_mode(): destructor.stats.request_order = 'Postprocessing' destructor.stats.valid = 1 destructor.stats.status_code = renderings.final_request_response.status_code destructor.stats.status_text = renderings.final_request_response.status_text destructor.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) destructor.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) except Exception as error: msg = f"Failed to delete create_once resource: {error!s}" logger.raw_network_logging(msg) logger.write_to_main(msg, print_to_console=True) if Settings().in_smoke_test_mode(): destructor.stats.request_order = 'Postprocessing' destructor.stats.valid = 0 if renderings and renderings.final_request_response: destructor.stats.status_code = renderings.final_request_response.status_code destructor.stats.status_text = renderings.final_request_response.status_text destructor.stats.error_msg = renderings.final_request_response.body destructor.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) destructor.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) pass Monitor().current_fuzzing_generation += 1 logger.print_request_rendering_stats(candidate_values_pool, fuzzing_requests, Monitor(), fuzzing_requests.size_all_requests, logger.POSTPROCESSING_GENERATION, None)