def generate_selection(spec_json, selection): ''' Returns a scenario object (with a top-level source_context_list entry, which will be removed in generate_test_file() later). ''' target_policy_delivery = util.PolicyDelivery(selection['delivery_type'], selection['delivery_key'], selection['delivery_value']) del selection['delivery_type'] del selection['delivery_key'] del selection['delivery_value'] # Parse source context list and policy deliveries of source contexts. # `util.ShouldSkip()` exceptions are raised if e.g. unsuppported # combinations of source contexts and policy deliveries are used. source_context_list_scheme = spec_json['source_context_list_schema'][ selection['source_context_list']] selection['source_context_list'] = [ util.SourceContext.from_json(source_context, target_policy_delivery, spec_json['source_context_schema']) for source_context in source_context_list_scheme['sourceContextList'] ] # Check if the subresource is supported by the innermost source context. innermost_source_context = selection['source_context_list'][-1] supported_subresource = spec_json['source_context_schema'][ 'supported_subresource'][innermost_source_context.source_context_type] if supported_subresource != '*': if selection['subresource'] not in supported_subresource: raise util.ShouldSkip() # Parse subresource policy deliveries. selection[ 'subresource_policy_deliveries'] = util.PolicyDelivery.list_from_json( source_context_list_scheme['subresourcePolicyDeliveries'], target_policy_delivery, spec_json['subresource_schema'] ['supported_delivery_type'][selection['subresource']]) # Generate per-scenario test description. selection['test_description'] = spec_json[ 'test_description_template'] % selection return selection
def generate_selection(spec_json, config, selection, spec, test_html_template_basename): test_filename = get_test_filename(config, selection) target_policy_delivery = util.PolicyDelivery(selection['delivery_type'], selection['delivery_key'], selection['delivery_value']) del selection['delivery_type'] del selection['delivery_key'] del selection['delivery_value'] # Parse source context list and policy deliveries of source contexts. # `util.ShouldSkip()` exceptions are raised if e.g. unsuppported # combinations of source contexts and policy deliveries are used. source_context_list_scheme = spec_json['source_context_list_schema'][ selection['source_context_list']] selection['source_context_list'] = [ util.SourceContext.from_json(source_context, target_policy_delivery, spec_json['source_context_schema']) for source_context in source_context_list_scheme['sourceContextList'] ] # Check if the subresource is supported by the innermost source context. innermost_source_context = selection['source_context_list'][-1] supported_subresource = spec_json['source_context_schema'][ 'supported_subresource'][innermost_source_context.source_context_type] if supported_subresource != '*': if selection['subresource'] not in supported_subresource: raise util.ShouldSkip() # Parse subresource policy deliveries. selection[ 'subresource_policy_deliveries'] = util.PolicyDelivery.list_from_json( source_context_list_scheme['subresourcePolicyDeliveries'], target_policy_delivery, spec_json['subresource_schema'] ['supported_delivery_type'][selection['subresource']]) # We process the top source context below, and do not include it in # `scenario` field in JavaScript. top_source_context = selection['source_context_list'].pop(0) assert (top_source_context.source_context_type == 'top') # Adjust the template for the test invoking JS. Indent it to look nice. indent = "\n" + " " * 8 selection['scenario'] = dump_test_parameters(selection).replace( "\n", indent) selection['spec_name'] = spec['name'] selection[ 'test_page_title'] = config.test_page_title_template % spec['title'] selection['spec_description'] = spec['description'] selection['spec_specification_url'] = spec['specification_url'] selection['helper_js'] = config.helper_js selection['sanity_checker_js'] = config.sanity_checker_js selection['spec_json_js'] = config.spec_json_js test_headers_filename = test_filename + ".headers" test_directory = os.path.dirname(test_filename) test_html_template = util.get_template(test_html_template_basename) disclaimer_template = util.get_template('disclaimer.template') html_template_filename = os.path.join(util.template_directory, test_html_template_basename) generated_disclaimer = disclaimer_template \ % {'generating_script_filename': os.path.relpath(sys.argv[0], util.test_root_directory), 'html_template_filename': os.path.relpath(html_template_filename, util.test_root_directory)} # Adjust the template for the test invoking JS. Indent it to look nice. selection['generated_disclaimer'] = generated_disclaimer.rstrip() selection[ 'test_description'] = config.test_description_template % selection selection['test_description'] = \ selection['test_description'].rstrip().replace("\n", "\n" + " " * 33) # Directory for the test files. try: os.makedirs(test_directory) except: pass delivery = handle_deliveries(top_source_context.policy_deliveries) if len(delivery['headers']) > 0: with open(test_headers_filename, "w") as f: for header in delivery['headers']: f.write('%s: %s\n' % (header, delivery['headers'][header])) selection['meta_delivery_method'] = delivery['meta'] # Obey the lint and pretty format. if len(selection['meta_delivery_method']) > 0: selection['meta_delivery_method'] = "\n " + \ selection['meta_delivery_method'] # Write out the generated HTML file. util.write_file(test_filename, test_html_template % selection)