def _namespace_rule(self): """ Try to hijack objects of @param target_types and use them via a secondary attacker user. @param target_types: The types of the target object to attemp hijack. @type target_types: Set @return: None @rtype : None """ # For the target types (target dynamic objects), get the latest # values which we know will exist due to the previous rendering. # We will later on use these old values atop a new rendering. hijacked_values = {} consumed_types = self._sequence.consumes consumed_types = set(itertools.chain(*consumed_types)) # Exit the checker and do not re-render if nothing is consumed since # the checker will have nothing to work on anyways. if not consumed_types: return # Render only last request if not in exhaustive (expensive) mode. # If that last request does not consume anything, stop here. if self._mode != 'exhaustive' and not self._sequence.last_request.consumes: return self._render_original_sequence_start(self._sequence) for type in consumed_types: hijacked_values[type] = dependencies.get_variable(type) self._checker_log.checker_print(f"Hijacked values: {hijacked_values}") RAW_LOGGING(f"Hijacked values: {hijacked_values}") for i, req in enumerate(self._sequence): # Render only last request if not in exhaustive (expensive) mode. if self._mode != 'exhaustive' and i != self._sequence.length - 1: continue # Skip requests that are not consumers. if not req.consumes: continue dependencies.reset_tlb() self._render_attacker_subsequence(req) # Feed hijacked values. for type in hijacked_values: dependencies.set_variable(type, hijacked_values[type]) self._render_hijack_request(req)
def apply_create_once_resources(fuzzing_requests): """ Attempts to create all of the resources in the 'create_once' endpoints. @param fuzzing_requests: The collection of requests to be fuzzed @type fuzzing_requests: FuzzingRequestCollection @return: A list of destructors to use to cleanup the create_once resources @rtype : list(Request) """ def exclude_requests(pre_reqs, post_reqs): # Exclude any requests that produce or destroy the create_once endpoint for req_i in pre_reqs: fuzzing_requests.exclude_preprocessing_request(req_i) for req_i in post_reqs: fuzzing_requests.exclude_postprocessing_request(req_i) create_once_endpoints = Settings().create_once_endpoints if not create_once_endpoints: return logger.create_network_log(logger.LOG_TYPE_PREPROCESSING) destructors = set() exclude_reqs = set() request_count = 0 logger.write_to_main("Rendering for create-once resources:\n") # Iterate through each 'create_once' endpoint for endpoint in create_once_endpoints: # Verify that the endpoint exists in the request collection if endpoint in GrammarRequestCollection().request_id_collection: # The create_once resource generator resource_gen_req = None # Iterate through each of the requests that contain the create_once endpoint for req in GrammarRequestCollection( ).request_id_collection[endpoint]: if req not in fuzzing_requests: logger.write_to_main( "Warning: Create-once endpoint is not a request in the fuzzing list\n", True) break if not resource_gen_req and req.is_resource_generator(): resource_gen_req = req # Compute the sequence necessary to create the create_once resource req_list = driver.compute_request_goal_seq( resource_gen_req, fuzzing_requests) logger.write_to_main( f"{formatting.timestamp()}: Endpoint - {resource_gen_req.endpoint_no_dynamic_objects}" ) logger.write_to_main( f"{formatting.timestamp()}: Hex Def - {resource_gen_req.method_endpoint_hex_definition}" ) create_once_seq = sequences.Sequence(req_list) renderings = create_once_seq.render( GrammarRequestCollection().candidate_values_pool, None, preprocessing=True) # Make sure we were able to successfully create the create_once resource if not renderings.valid: logger.write_to_main( f"{formatting.timestamp()}: Rendering INVALID") exclude_requests(exclude_reqs, destructors) raise FailedToCreateResource(destructors) logger.write_to_main( f"{formatting.timestamp()}: Rendering VALID") logger.format_rendering_stats_definition( resource_gen_req, GrammarRequestCollection().candidate_values_pool) if Settings().in_smoke_test_mode(): resource_gen_req.stats.request_order = 'Preprocessing' resource_gen_req.stats.valid = 1 resource_gen_req.stats.status_code = renderings.final_request_response.status_code resource_gen_req.stats.status_text = renderings.final_request_response.status_text resource_gen_req.stats.sample_request.set_request_stats( renderings.sequence.sent_request_data_list[-1]. rendered_data) resource_gen_req.stats.sample_request.set_response_stats( renderings.final_request_response, renderings.final_response_datetime) if req.is_destructor(): # Add destructors to the destructor list that will be returned destructors.add(req) # Only continue processing if a resource generator was actually found for this endpoint if not resource_gen_req: continue request_count += len(req_list) # Get the set of all dynamic object names in the endpoint var_names = resource_gen_req.consumes.union( resource_gen_req.produces) # This dictionary will map dynamic object names to the values created during # this preprocessing create-once step. dynamic_object_values = {} for name in var_names: dynamic_object_values[name] = dependencies.get_variable(name) # Iterate through the entire request collection, searching for requests that include # the create_once resource. We want to "lock" the resources in these requests with # the dynamic object values that were created during this preprocessing step. for req_i in fuzzing_requests: # Set the variables in any requests whose consumers were produced # by the create_once resource generator if resource_gen_req.produces & req_i.consumes: req_i.set_id_values_for_create_once_dynamic_objects( dynamic_object_values, renderings) # Exclude any requests that produce the create_once object(s) if resource_gen_req.produces & req_i.produces: exclude_reqs.add(req_i) else: exclude_requests(exclude_reqs, destructors) raise InvalidCreateOnce(destructors) exclude_requests(exclude_reqs, destructors) # Reset all of the dynamic object values that were just created dependencies.reset_tlb() # Reset the garbage collector, so it doesn't delete any of the resources that were just created dependencies.set_saved_dynamic_objects() logger.print_request_rendering_stats( GrammarRequestCollection().candidate_values_pool, fuzzing_requests, Monitor(), request_count, logger.PREPROCESSING_GENERATION, None) # Return the list of destructors that were removed from the request collection. # These will be used to cleanup the create_once resources created during preprocessing. return list(destructors)
def render(self, candidate_values_pool, lock, preprocessing=False, postprocessing=False): """ Core routine that performs the rendering of restler sequences. In principal all requests of a sequence are being constantly rendered with a specific values combination @param request._current_combination_id which we know in the past led to a valid rendering and only the last request of the sequence is being rendered iteratively with all feasible value combinations. Each time a "valid rendering" is found for the last request of the sequence (where "valid rendering" is defined according to "VALID_CODES"), the routine returns a new sequence which has an end-to-end (i.e., all requests) "valid rendering" and can be added in the sequences collection in order to be used in the future as a building block for longer sequences. @param candidate_values_pool: The pool of values for primitive types. @type candidate_values_pool: Dict @param lock: Lock object used for sync of more than one fuzzing jobs. @type lock: thread.Lock object @param preprocessing: Set to true if rendering during preprocessing @type preprocessing: Bool @return: A RenderedSequence object containing the sequence, the final request's response, whether or not the final request received a valid status code, and a FailureInformation enum if there was a failure or bug detected during rendering. @rtype : RenderedSequence """ # Try rendering all primitive type value combinations for last request request = self.last_request # for clarity reasons, don't log requests whose render iterator is over if request._current_combination_id <\ request.num_combinations(candidate_values_pool): CUSTOM_LOGGING(self, candidate_values_pool) self._sent_request_data_list = [] for rendered_data, parser in\ request.render_iter(candidate_values_pool, skip=request._current_combination_id, preprocessing=preprocessing): # Hold the lock (because other workers may be rendering the same # request) and check whether the current rendering is known from the # past to lead to invalid status codes. If so, skip the current # rendering. if lock is not None: lock.acquire() should_skip = Monitor().is_invalid_rendering(request) if lock is not None: lock.release() # Skip the loop and don't forget to increase the counter. if should_skip: RAW_LOGGING("Skipping rendering: {}".\ format(request._current_combination_id)) request._current_combination_id += 1 continue # Clean up internal state self.status_codes = [] dependencies.reset_tlb() sequence_failed = False # Step A: Static template rendering # Render last known valid combination of primitive type values # for every request until the last for i in range(len(self.requests) - 1): prev_request = self.requests[i] prev_rendered_data, prev_parser =\ prev_request.render_current(candidate_values_pool, preprocessing=preprocessing) # substitute reference placeholders with resolved values if not Settings().ignore_dependencies: prev_rendered_data =\ self.resolve_dependencies(prev_rendered_data) prev_req_async_wait = Settings( ).get_max_async_resource_creation_time(prev_request.request_id) prev_producer_timing_delay = Settings( ).get_producer_timing_delay(prev_request.request_id) prev_response = request_utilities.send_request_data( prev_rendered_data) prev_response_to_parse, resource_error, async_waited = async_request_utilities.try_async_poll( prev_rendered_data, prev_response, prev_req_async_wait) prev_parser_threw_exception = False # Response may not exist if there was an error sending the request or a timeout if prev_parser and prev_response_to_parse: prev_parser_threw_exception = not request_utilities.call_response_parser( prev_parser, prev_response_to_parse, prev_request) prev_status_code = prev_response.status_code # If the async logic waited for the resource, this wait already included the required # producer timing delay. Here, set the producer timing delay to zero, so this wait is # skipped both below for this request and during replay if async_waited: prev_producer_timing_delay = 0 else: prev_req_async_wait = 0 self.append_data_to_sent_list(prev_rendered_data, prev_parser, prev_response, prev_producer_timing_delay, prev_req_async_wait) if not prev_status_code: logger.write_to_main( f"Error: Failed to get status code during valid sequence re-rendering.\n" ) sequence_failed = True break if prev_response.has_bug_code(): BugBuckets.Instance().update_bug_buckets(self, prev_status_code, reproduce=False, lock=lock) sequence_failed = True break if prev_parser_threw_exception: logger.write_to_main( "Error: Parser exception occurred during valid sequence re-rendering.\n" ) sequence_failed = True break if resource_error: logger.write_to_main( "Error: The resource was left in a Failed state after creation during valid sequence re-rendering.\n" ) sequence_failed = True break # If the previous request is a resource generator and we did not perform an async resource # creation wait, then wait for the specified duration in order for the backend to have a # chance to create the resource. if prev_producer_timing_delay > 0 and prev_request.is_resource_generator( ): print( f"Pausing for {prev_producer_timing_delay} seconds, request is a generator..." ) time.sleep(prev_producer_timing_delay) # register latest client/server interaction timestamp_micro = int(time.time() * 10**6) self.status_codes.append( status_codes_monitor.RequestExecutionStatus( timestamp_micro, prev_request.hex_definition, prev_status_code, prev_response.has_valid_code(), False)) if sequence_failed: self.status_codes.append( status_codes_monitor.RequestExecutionStatus( int(time.time() * 10**6), request.hex_definition, RESTLER_INVALID_CODE, False, True)) Monitor().update_status_codes_monitor(self, self.status_codes, lock) return RenderedSequence( failure_info=FailureInformation.SEQUENCE) # Step B: Dynamic template rendering # substitute reference placeholders with ressoved values # for the last request if not Settings().ignore_dependencies: rendered_data = self.resolve_dependencies(rendered_data) # Render candidate value combinations seeking for valid error codes request._current_combination_id += 1 req_async_wait = Settings().get_max_async_resource_creation_time( request.request_id) response = request_utilities.send_request_data(rendered_data) response_to_parse, resource_error, _ = async_request_utilities.try_async_poll( rendered_data, response, req_async_wait) parser_exception_occurred = False # Response may not exist if there was an error sending the request or a timeout if parser and response_to_parse: parser_exception_occurred = not request_utilities.call_response_parser( parser, response_to_parse, request) status_code = response.status_code if not status_code: return RenderedSequence(None) self.append_data_to_sent_list(rendered_data, parser, response, max_async_wait_time=req_async_wait) rendering_is_valid = not parser_exception_occurred\ and not resource_error\ and response.has_valid_code() # register latest client/server interaction and add to the status codes list response_datetime = datetime.datetime.now(datetime.timezone.utc) timestamp_micro = int(response_datetime.timestamp() * 10**6) self.status_codes.append( status_codes_monitor.RequestExecutionStatus( timestamp_micro, request.hex_definition, status_code, rendering_is_valid, False)) # add sequence's error codes to bug buckets. if response.has_bug_code(): BugBuckets.Instance().update_bug_buckets(self, status_code, lock=lock) Monitor().update_status_codes_monitor(self, self.status_codes, lock) # Register current rendering's status. if lock is not None: lock.acquire() Monitor().update_renderings_monitor(request, rendering_is_valid) if lock is not None: lock.release() if Monitor().remaining_time_budget <= 0 and not postprocessing: raise TimeOutException("Exceeded Timeout") if lock is not None: lock.acquire() # Deep copying here will try copying anything the class has access # to including the shared client monitor, which we update in the # above code block holding the lock, but then we release the # lock and one thread can be updating while another is copying. # This is a typlical nasty read after write syncronization bug. duplicate = copy.deepcopy(self) if lock is not None: lock.release() datetime_format = "%Y-%m-%d %H:%M:%S" # return a rendered clone if response indicates a valid status code if rendering_is_valid or Settings().ignore_feedback: return RenderedSequence( duplicate, valid=True, final_request_response=response, response_datetime=response_datetime.strftime( datetime_format)) else: information = None if response.has_valid_code(): if parser_exception_occurred: information = FailureInformation.PARSER elif resource_error: information = FailureInformation.RESOURCE_CREATION elif response.has_bug_code(): information = FailureInformation.BUG return RenderedSequence( duplicate, valid=False, failure_info=information, final_request_response=response, response_datetime=response_datetime.strftime( datetime_format)) return RenderedSequence(None)
def apply(self, rendered_sequence, lock): """ Applies check for resource hierarchy rule violations. @param rendered_sequence: Object containing the rendered sequence information @type rendered_sequence: RenderedSequence @param lock: Lock object used to sync more than one fuzzing job @type lock: thread.Lock @return: None @rtype : None """ if not rendered_sequence.valid: return self._sequence = rendered_sequence.sequence # We skip any sequence that contains DELETE methods so that we # keep in isolation this checker and the use-after-free checker. if self._sequence.has_destructor(): return consumes = self._sequence.consumes predecessors_types = consumes[:-1] # Last request is the victim -- our target! target_types = consumes[-1] # In the dictionary of "consumes" constraints, each request of the # sequence instance has its own dictionary of the dynamic variable # types produced by each request. We need to flatten this structure. predecessors_types = set(itertools.chain(*predecessors_types)) # Skip sequence if there are no predecessor dependencies or no # target objects to swap. if not predecessors_types.intersection(target_types)\ or not target_types - predecessors_types: return # For the victim types (target dynamic objects), get the lattest # values which we know will exist due to the previous rendering. # We will later on use these old values atop a new rendering. old_values = {} for target_type in target_types - predecessors_types: old_values[target_type] = dependencies.get_variable(target_type) # Reset tlb of all values and re-render all predecessor up to # the parent's parent. This will propagate new values for all # dynamic objects except for those with target type. That's what we # want and that's why we render up to the parent's parent (i.e., # up to length(seq) - 2. dependencies.reset_tlb() # Render sequence up to before the first predecessor that produces # the target type. that is, if any of the types produced by the # request is in the target types, then do not render this # predecessor and stop here. n_predecessors = 0 for req in self._sequence: if req.produces.intersection(target_types - predecessors_types): break n_predecessors += 1 new_seq = self._render_n_predecessor_requests(n_predecessors) # log some helpful info self._checker_log.checker_print("\nTarget types: {}".\ format(target_types - predecessors_types)) self._checker_log.checker_print( f"Predecesor types: {predecessors_types}") self._checker_log.checker_print("Clean tlb: {}".\ format(dependencies.tlb)) # Before rendering the last request, substitute all target types # (target dynamic object) with a value that does NOT belong to # the current rendering and should not (?) be accessible through # the new predecessors' rendering. for target_type in old_values: dependencies.set_variable(target_type, old_values[target_type]) self._checker_log.checker_print("Poluted tlb: {}".\ format(dependencies.tlb)) self._render_last_request(new_seq)