def __init__(self, test_instance=None, procdef=None, exporter=None, helasModel=None, testedFiles=None, outputPath=None): """ Can be overloaded to add more options if necessary. The format above is typically useful because we don't aim at testing all processes for all exporters and all model, but we choose certain combinations which spans most possibilities. Notice that the process and model can anyway be recovered from the LoopAmplitude object, so it does not have to be specified here.""" if testedFiles is None: raise MadGraph5Error("TestedFiles must be specified in IOTest.") if outputPath is None: raise MadGraph5Error("outputPath must be specified in IOTest.") self.testedFiles = testedFiles self.test_instance = test_instance self.procdef = procdef self.helasModel = helasModel self.exporter_name = exporter # Security mesure if not str(path.dirname(_file_path)) in str(outputPath) and \ not str(outputPath).startswith('/tmp/'): raise MadGraph5Error("OutputPath must be within MG directory or"+\ " in /tmp/") else: self.outputPath = outputPath
def __new__(cls, walker=None, **opts): """Factory class to make plugin easy.""" if cls is VirtualWalker: if walker is None: raise MadGraph5Error( "VirtualWalker called without a walker name.") if not walker_classes_map.has_key(walker): raise MadGraph5Error("Unknown mapping walker of type '%s'." % walker) target_class = walker_classes_map[walker] return super(VirtualWalker, cls).__new__(target_class, **opts) else: return super(VirtualWalker, cls).__new__(cls, **opts)
def get_exporter_withName(self, exporter_name): """ Returns on demand the exporter of given nickname """ if exporter_name == 'default': self.loop_exporters[exporter_name] = loop_exporters.\ LoopProcessExporterFortranSA( _proc_file_path, {'clean':False, 'complex_mass':False, 'export_format':'madloop','mp':True, 'loop_dir':_loop_file_path, 'cuttools_dir':_cuttools_file_path, 'fortran_compiler':'gfortran', 'output_dependencies':'external', 'SubProc_prefix': 'P', 'compute_color_flows': False}) elif exporter_name == 'optimized': self.loop_exporters[exporter_name] = loop_exporters.\ LoopProcessOptimizedExporterFortranSA(\ _proc_file_path, {'clean':False, 'complex_mass':False, 'export_format':'madloop','mp':True, 'loop_dir':_loop_file_path, 'cuttools_dir':_cuttools_file_path, 'fortran_compiler':'gfortran', 'output_dependencies':'external', 'SubProc_prefix': 'P', 'compute_color_flows': False}) else: raise MadGraph5Error('Exporter with nickname '+\ '%s not implemented'%exporter_name) return self.loop_exporters[exporter_name]
def __init__(self, matrix_elements, python_helas_call_writer): """Initiate with matrix elements, helas call writer. Generate the process matrix element functions as strings.""" self.config_maps = {} if isinstance(matrix_elements, helas_objects.HelasMultiProcess): self.matrix_elements = matrix_elements.get('matrix_elements') elif isinstance(matrix_elements, group_subprocs.SubProcessGroup): self.config_maps = matrix_elements.get('diagram_maps') self.matrix_elements = matrix_elements.get('matrix_elements') elif isinstance(matrix_elements, helas_objects.HelasMatrixElementList): self.matrix_elements = matrix_elements elif isinstance(matrix_elements, helas_objects.HelasMatrixElement): self.matrix_elements = helas_objects.HelasMatrixElementList(\ [matrix_elements]) if not self.matrix_elements: raise MadGraph5Error("No matrix elements to export") self.model = self.matrix_elements[0].get('processes')[0].get('model') self.helas_call_writer = python_helas_call_writer if not isinstance(self.helas_call_writer, helas_call_writers.PythonUFOHelasCallWriter): raise Exception, \ "helas_call_writer not PythonUFOHelasCallWriter" self.matrix_methods = {}
def change_principal_cmd(self, name, allow_switch=True): old_cmd = self.current_interface if old_cmd == name: return elif not allow_switch: raise InvalidCmd( "Command not compatible with previous command: Can not combine LO/NLO feature." ) if name in list(self.interface_names.keys()): self.prompt = self.interface_names[name][0] + '>' self.cmd = self.interface_names[name][1] self.current_interface = name else: raise MadGraph5Error('Type of interface not valid: %s' % name) if self.interface_names[old_cmd][0] != self.interface_names[name][0]: logger.info("Switching from interface %s to %s"\ %(self.interface_names[old_cmd][0],\ self.interface_names[name][0])) # Setup the interface self.cmd.setup(self) if __debug__: self.debug_link_to_command()
def create_loop_pickle(self, my_proc_list, model, pickle_file, energy, \ chosen_runner): """ Create the pickle file for reference for the arguments here.""" # print "Creating loop pickle for chosen_runner=",chosen_runner allowed_chosen_runners = ['ML4', 'ML5_opt', 'ML5_default'] if chosen_runner not in allowed_chosen_runners: raise MadGraph5Error('The reference runner can only be in %s.'%\ allowed_chosen_runners) runner = None if chosen_runner == 'ML5_opt': runner = loop_me_comparator.LoopMG5Runner() runner.setup(_mg5_path, optimized_output=True) if chosen_runner == 'ML5_default': runner = loop_me_comparator.LoopMG5Runner() runner.setup(_mg5_path, optimized_output=False) if chosen_runner == 'ML4': runner = loop_me_comparator.LoopMG4Runner() # Replace here the path of your ML4 installation runner.setup( '/Users/valentin/Documents/Work/aMC@NLO_v4/ML4ParrallelTest/NLOComp' ) self.create_pickle(my_proc_list,pickle_file, runner, ref_runner=None, \ model=model,energy=energy) # Clean up the runner only if it is not ML4 if chosen_runner != 'ML4': runner.cleanup()
def approach_limit(self, PS_point, structure, scaling_parameter, process): """Produce a higher multiplicity phase-space point from PS_point, according to kinematic_variables that approach the limit of structure parametrically with scaling_parameter. """ # Decompose the counterterm decomposed = structure.decompose() # The rescaling of the convolution variables is done independently of the mapping # and should therefore not be considered decomposed = [step for step in decomposed if step.name() != "F"] # Always approach the limit at the same speed base = scaling_parameter**(1. / max(len(decomposed), 1)) #base = scaling_parameter # Prepare a momentum dictionary for each mapping mom_dict = sub.bidict() for leg in process['legs']: mom_dict[leg['number']] = frozenset([ leg['number'], ]) parent_index = max(leg['number'] for leg in process['legs']) + 1 fake_ct = sub.Counterterm(process=process, momenta_dict=mom_dict) closer_PS_point = PS_point.get_copy() # Walk the hike up and down for step in decomposed: mapping = self.determine_mapping(step) all_children = frozenset([leg.n for leg in step.get_all_legs()]) recoilers = self.get_recoilers(fake_ct, exclude=all_children) # Below is a hack to recoil against the Higgs for C(1,3),C(2,4) of g g > d d~ h. #recoilers = [sub.SubtractionLeg(5,25,sub.SubtractionLeg.FINAL),] new_ss = sub.SingularStructure(substructures=[ step, ], legs=recoilers) if step.name() == "C": mom_dict[parent_index] = all_children elif step.name() == "S": pass else: raise MadGraph5Error("Unrecognized structure of type " + step.name()) kin_variables = {} #misc.sprint('Now doing step: %s'%str(step)) #misc.sprint('Starting PS point:\n',str(closer_PS_point)) low_PS_point, _ = mapping.map_to_lower_multiplicity( closer_PS_point, new_ss, mom_dict, None, kin_variables) #misc.sprint('Mapped down PS point:\n',str(low_PS_point)) #misc.sprint('kin_variables=',kin_variables) mapping.rescale_kinematic_variables(new_ss, mom_dict, kin_variables, base) #misc.sprint('rescaled kin_variables=',base,kin_variables) closer_PS_point, _ = mapping.map_to_higher_multiplicity( low_PS_point, new_ss, mom_dict, kin_variables) #misc.sprint('Mapped up PS point:\n',str(closer_PS_point)) #misc.sprint('kin_variables=',kin_variables) if parent_index in mom_dict.keys(): del mom_dict[parent_index] return closer_PS_point
def test_mssm_equivalence(self): """Test the UFO and MG4 MSSM model correspond to the same model """ # import UFO model mssm_path = import_ufo.find_ufo_path('MSSM_SLHA2') ufo_model = import_ufo.import_model(mssm_path) #converter = import_ufo.UFOMG5Converter(model) #ufo_model = converter.load_model() ufo_model.pass_particles_name_in_mg_default() # import MG4 model model = base_objects.Model() if not MG4DIR: raise MadGraph5Error("Please provide a valid MG/ME path with -d") v4_path = os.path.join(MG4DIR, 'models', 'mssm_v4') if not os.path.isdir(v4_path): import_ufo.import_model_from_db('mssm_v4', local_dir=True) model.set('particles', files.read_from_file( os.path.join(v4_path,'particles.dat'), import_v4.read_particles_v4)) model.set('interactions', files.read_from_file( os.path.join(v4_path,'interactions.dat'), import_v4.read_interactions_v4, model['particles'])) #model.pass_particles_name_in_mg_default() # Checking the particles for particle in model['particles']: ufo_particle = ufo_model.get("particle_dict")[particle['pdg_code']] self.check_particles(particle, ufo_particle) # Skip test below until equivalence has been created by Benj and Claude return # Checking the interactions nb_vertex = 0 ufo_vertices = [] for ufo_vertex in ufo_model['interactions']: pdg_code_ufo = [abs(part['pdg_code']) for part in ufo_vertex['particles']] int_name = [part['name'] for part in ufo_vertex['particles']] rep = (pdg_code_ufo, int_name) pdg_code_ufo.sort() ufo_vertices.append(pdg_code_ufo) mg4_vertices = [] for vertex in model['interactions']: pdg_code_mg4 = [abs(part['pdg_code']) for part in vertex['particles']] pdg_code_mg4.sort() try: ufo_vertices.remove(pdg_code_mg4) except ValueError: mg4_vertices.append(pdg_code_mg4) self.assertEqual(ufo_vertices, []) self.assertEqual(mg4_vertices, [])
def clean_output(self, IOTestManagerInstance=None): """ Remove the output_path if existing. Careful!""" if not str(path.dirname(_file_path)) in str(self.outputPath) and \ not str(self.outputPath).startswith('/tmp/'): raise MadGraph5Error("Cannot safely remove %s." % str(self.outputPath)) else: if path.isdir(self.outputPath): shutil.rmtree(self.outputPath)
def change_principal_cmd(self, name): if name == 'MadGraph': self.cmd = MGcmd.MadGraphCmdWeb elif name == 'Loop': self.cmd = LoopCmd.LoopInterfaceWeb else: raise MadGraph5Error('Type of interface not valid') if __debug__: self.debug_link_to_command()
def do_add(self, line, *args, **opt): args = self.split_arg(line) # Check the validity of the arguments self.check_add(args) if args[0] == 'model': return self.add_model(args[1:]) elif args[0] != 'process': raise self.InvalidCmd( "The add command can only be used with process or model") else: line = ' '.join(args[1:]) proc_type = self.extract_process_type(line) if proc_type[1] not in ['real', 'LOonly']: run_interface.check_compiler(self.options, block=False) self.validate_model(proc_type[1]) #now generate the amplitudes as usual #self.options['group_subprocesses'] = 'False' collect_mirror_procs = False ignore_six_quark_processes = self.options['ignore_six_quark_processes'] if ',' in line: myprocdef, line = mg_interface.MadGraphCmd.extract_decay_chain_process( self, line) if myprocdef.are_decays_perturbed(): raise MadGraph5Error("Decay processes cannot be perturbed") else: myprocdef = mg_interface.MadGraphCmd.extract_process(self, line) self.proc_validity(myprocdef, 'aMCatNLO_%s' % proc_type[1]) # if myprocdef['perturbation_couplings']!=['QCD']: # message = ""FKS for reals only available in QCD for now, you asked %s" \ # % ', '.join(myprocdef['perturbation_couplings'])" # logger.info("%s. Checking for loop induced") # new_line = ln # # # raise self.InvalidCmd("FKS for reals only available in QCD for now, you asked %s" \ # % ', '.join(myprocdef['perturbation_couplings'])) try: self._fks_multi_proc.add( fks_base.FKSMultiProcess(myprocdef, collect_mirror_procs, ignore_six_quark_processes, OLP=self.options['OLP'])) except AttributeError: self._fks_multi_proc = fks_base.FKSMultiProcess( myprocdef, collect_mirror_procs, ignore_six_quark_processes, OLP=self.options['OLP'])
def __init__(self, main='MadGraph', *args, **opt): # define the interface if main in list(self.interface_names.keys()): self.prompt = self.interface_names[main][0] + '>' self.cmd = self.interface_names[main][1] self.current_interface = main else: raise MadGraph5Error('Type of interface not valid: %s' % main) self.cmd.__init__(self, *args, **opt) self.current_interface = main
def determine_mapping(cls, structure): if structure.name() == 'S': return cls.soft_map elif structure.name() == 'C' and not structure.substructures: return cls.collinear_map elif (structure.name() == 'C' and len(structure.substructures) == 1 and structure.substructures[0].name() == 'S' and not structure.substructures[0].substructures and len(structure.legs) == 1): return cls.soft_collinear_map else: raise MadGraph5Error(cls.cannot_handle_msg(structure))
def test_sm_equivalence(self): """Test the UFO and MG4 SM model correspond to the same model """ # import UFO model sm_path = import_ufo.find_ufo_path('sm') ufo_model = import_ufo.import_model(sm_path) ufo_model.pass_particles_name_in_mg_default() # import MG4 model model = base_objects.Model() v4_path = os.path.join(MG4DIR, 'models', 'sm_v4') if not os.path.isdir(v4_path): v4_path = os.path.join(MG4DIR, 'Models', 'sm') if not os.path.isdir(v4_path): raise MadGraph5Error("Please provide a valid MG/ME path with -d") model.set('particles', files.read_from_file( os.path.join(v4_path,'particles.dat'), import_v4.read_particles_v4)) model.set('interactions', files.read_from_file( os.path.join(v4_path,'interactions.dat'), import_v4.read_interactions_v4, model['particles'])) model.pass_particles_name_in_mg_default() # Checking the particles for particle in model['particles']: ufo_particle = ufo_model.get("particle_dict")[particle['pdg_code']] self.check_particles(particle, ufo_particle) # Checking the interactions nb_vertex = 0 ufo_vertices = [] for ufo_vertex in ufo_model['interactions']: pdg_code_ufo = [abs(part['pdg_code']) for part in ufo_vertex['particles']] int_name = [part['name'] for part in ufo_vertex['particles']] rep = (pdg_code_ufo, int_name) pdg_code_ufo.sort() ufo_vertices.append(pdg_code_ufo) mg4_vertices = [] for vertex in model['interactions']: pdg_code_mg4 = [abs(part['pdg_code']) for part in vertex['particles']] pdg_code_mg4.sort() try: ufo_vertices.remove(pdg_code_mg4) except ValueError: mg4_vertices.append(pdg_code_mg4) self.assertEqual(ufo_vertices, [[25,25,25,25]]) self.assertEqual(mg4_vertices, [])
def generate_subprocess_directory(self, matrix_element, dummy_helas_model, me_number): logger.info("Now generating Python output for %s" % (matrix_element.get('processes')[0].nice_string().replace( 'Process', 'process'))) exporter = self.MEExporter(matrix_element, self.helas_call_writers) try: matrix_methods = exporter.get_python_matrix_methods( gauge_check=False) assert (len(matrix_methods) == 1) except helas_call_writers.HelasWriterError, error: logger.critical(error) raise MadGraph5Error( "Error when generation python matrix_element_methods.")
def read_template_file(cls, filename, classpath=False): """Open a template file and return the contents.""" if isinstance(filename, tuple): file_path = filename[0] filename = filename[1] elif isinstance(filename, str): if classpath: file_path = cls.__template_path else: file_path = cls.template_path else: raise MadGraph5Error('Argument should be string or tuple.') return open(os.path.join(file_path, filename)).read()
def prepare_parameters(self): """Extract the parameters from the model, and store them in the two lists params_indep and params_dep""" # Keep only dependences on alphaS, to save time in execution keys = self.model['parameters'].keys() keys.sort(key=len) params_ext = [] for key in keys: if key == ('external', ): params_ext += [ p for p in self.model['parameters'][key] if p.name ] elif 'aS' in key: for p in self.model['parameters'][key]: self.params_dep.append( base_objects.ModelVariable(p.name, p.name + " = " + p.expr, p.type, p.depend)) else: for p in self.model['parameters'][key]: if p.name == 'ZERO': continue self.params_indep.append( base_objects.ModelVariable(p.name, p.name + " = " + p.expr, p.type, p.depend)) # For external parameters, want to read off the SLHA block code while params_ext: param = params_ext.pop(0) # Read value from the slha variable expression = "" assert param.value.imag == 0 if len(param.lhacode) == 1: expression = "%s = slha.get_block_entry(\"%s\", %d, %e);" % \ (param.name, param.lhablock.lower(), param.lhacode[0], param.value.real) elif len(param.lhacode) == 2: expression = "indices[0] = %d;\nindices[1] = %d;\n" % \ (param.lhacode[0], param.lhacode[1]) expression += "%s = slha.get_block_entry(\"%s\", indices, %e);" \ % (param.name, param.lhablock.lower(), param.value.real) else: raise MadGraph5Error( "Only support for SLHA blocks with 1 or 2 indices") self.params_indep.insert( 0, base_objects.ModelVariable(param.name, expression, 'real'))
def export(self,*args,**opts): """Overwrite this so as to force a pythia8 type of output if the output mode is PY8MEs.""" if self._export_format == 'plugin': # Also pass on the aloha model to the exporter (if it has been computed already) # so that it will be used when generating the model if self.plugin_output_format_selected == 'Python': self._curr_exporter = PluginExporters.PluginProcessExporterPython( self._export_dir, helas_call_writers.PythonUFOHelasCallWriter(self._curr_model)) elif self.plugin_output_format_selected == 'TF': self._curr_exporter = PluginExporters.PluginProcessExporterTF( self._export_dir, PluginExporters.UFOHelasCallWriterTF(self._curr_model)) else: raise MadGraph5Error("A plugin output format must have been specified at this stage.") super(MG5aMC_PythonMEsInterface,self).export(*args, **opts)
def addIOTest(self, folderName, testName, IOtest): """ Add the test (folderName, testName) to class attribute all_tests. """ if not self.need(testName=testName, folderName=folderName): return # Add this to the instance test_list if (folderName, testName) not in self.instance_tests: self.instance_tests.append((folderName, testName)) # Add this to the global test_list if (folderName, testName) in list(self.all_tests.keys()) and \ self.all_tests[(folderName, testName)]!=(IOtest,self): raise MadGraph5Error("Test (%s,%s) already defined." % (folderName, testName)) else: # We store the manager with self here too because it might have # variables related to its IOTests stored in it so that we will # give this instance back when calling IOtest.run(self). self.all_tests[(folderName, testName)] = (IOtest, self)
def determine_mapping(cls, structure): if structure.name() == 'S': return cls.soft_map elif structure.name() == 'C' and not structure.substructures: if structure.legs.has_initial_state_leg(): return cls.i_collinear_map else: return cls.f_collinear_map elif (structure.name() == 'C' and len(structure.substructures) == 1 and structure.substructures[0].name() == 'S' and not structure.substructures[0].substructures and len(structure.legs) == 1): if structure.legs.has_initial_state_leg(): return cls.i_soft_collinear_map else: return cls.f_soft_collinear_map else: logger.critical("Error while processing %s" % structure) raise MadGraph5Error(cls.cannot_handle_msg("SingularStructure"))
def create_loop_pickle(my_proc_list, model, pickle_file, energy, \ chosen_runner): """ Create the pickle file for reference for the arguments here.""" # print "Creating loop pickle for chosen_runner=",chosen_runner allowed_chosen_runners = ['ML5_opt', 'ML5_default'] if chosen_runner not in allowed_chosen_runners: raise MadGraph5Error('The reference runner can only be in %s.'%\ allowed_chosen_runners) runner = None if chosen_runner == 'ML5_opt': runner = loop_me_comparator.LoopMG5Runner() runner.setup(_mg5_path, optimized_output=True) if chosen_runner == 'ML5_default': runner = loop_me_comparator.LoopMG5Runner() runner.setup(_mg5_path, optimized_output=False) create_pickle(my_proc_list,pickle_file, runner, ref_runner=None, \ model=model,energy=energy) runner.cleanup()
def decompose_counterterm(cls, counterterm, counterterms): """Determine the sequence of elementary mappings that must be applied in order to approach the limit. """ complete_ss = counterterm.reconstruct_complete_singular_structure() decomposed_sss = sorted(complete_ss.decompose(), key=lambda x: x.__str__(True, True, True)) decomposed_cts = [] for ss in decomposed_sss: found = False for ct in counterterms: if len(ct.nodes) != 1: continue if ct.nodes[0].nodes: continue ss2 = ct.nodes[0].current['singular_structure'] if ss != ss2: continue decomposed_cts.append(ct) found = True break if not found: raise MadGraph5Error('Counterterm not found') return decomposed_cts
def procToFolderName(proc, sqso={}): """ Transform a string proc like 'u u~ > e+ e-' to a string for a folder name which would be uux_epem. Also adds a suffix to this name according to the squared split orders sqso if specified.""" res = ''.join(proc.split(' ')) equiv_strings = [('+', 'p'), ('-', 'm'), ('~', 'x'), ('>', '_')] for eq in equiv_strings: res = res.replace(eq[0], eq[1]) if sqso == {}: return res sq_order_re = re.compile( r"^\s*(?P<coup_name>\w*)\s*\^2\s*(?P<logical_operator>(==)|(<=)|=|>)") sqso_strings = {'==': '_eq_', '<=': '_le_', '=': '_le_', '>': '_gt_'} for coup, value in sqso.items(): parsed = sq_order_re.match(coup) if parsed is None: raise MadGraph5Error(" Could not parse squared orders %s" % coup) res = res + "_%ssq%s%i" % (parsed.group('coup_name'), sqso_strings[ parsed.group('logical_operator')], value) return res
def get_MadLoop_Banner(cls, style='classic', color='blue', top_frame_char = '=', bottom_frame_char = '=', left_frame_char = '{',right_frame_char = '}', print_frame=True, side_margin = 7, up_margin = 1): """ Writes out MadLoop banner.""" colors = {'black':30,'red':31,'green':32,'yellow':33, 'blue':34,'magenta':35,'cyan':36,'lightred':91,'lightgreen':92, 'lightyellow':93,'lightblue':94,'lightmagenta':95,'lightcyan':96, 'white':97,'none':-1} if style.lower()=='random': color = random.choice(['blue','green','red']) reference = "Ref: arXiv:1103.0621v2, arXiv:1405.0301" version = "v%(version)s (%(date)s)"%misc.get_pkg_info() versionref = "%s, %s"%(version,reference) if style.lower() not in cls.get_style_keys()+['random']: raise MadGraph5Error('Incorrect style in MadLoopBanner. Must be'+\ ' one of the following: %s'%str(cls.get_style_keys()+['random'])) if isinstance(color,int): color_start ="char(27)//'[%im"%int color_end = "char(27)//'[0m" elif color.lower() in colors: if color.lower()=='none': color_start = "" color_end = "" else: color_start ="char(27)//'[%im"%colors[color.lower()] color_end = "char(27)//'[0m" else: raise MadGraph5Error('Incorrect color in MadLoopBanner. Must be and'+\ ' intenger or one of the following: %s'%str(list(colors.keys()))) def format_banner(banner): """ Format the raw banner text to give it a frame, colors and a margin.""" def fw(*args): """Fortran write line""" elems = [] for arg in args: if arg.startswith('char('): elems.append("%s'"%arg) continue # Hard-set the single and double quotes in the text to # make sure it is not processed by the FileWriter. arg = arg.replace("'","'//char(39)//'") arg = arg.replace('"',"'//char(34)//'") if len(arg)>0: elems.append("'%s'"%arg) return "write(*,*) %s"%("//".join(elems)) banner_lines = banner.split('\n') formatted_lines = [] # Determine the target width width = side_margin*2 + max(len(line) for line in banner_lines) if print_frame: width += 2 # Print the upper frame if print_frame: formatted_lines.append(fw(" %s "%(top_frame_char*(width-2)))) # Print the upper margin for i in range(up_margin): formatted_lines.append(fw("%(lside)s%(width)s%(rside)s"% {'lside':left_frame_char if print_frame else '', 'rside':right_frame_char if print_frame else '', 'width':' '*(width-2)})) # Now print the banner for line in banner_lines: line_elements = [] line_elements.append((left_frame_char if print_frame else '')+' '*side_margin) # Colorize the logo line_elements.append(color_start) # Make sure to write the reference in black found = False for tag in [versionref, reference, version]: if tag in line: line_elements.extend([line[:line.index(tag)], color_end,tag,color_start, line[line.index(tag)+len(tag):]+ ' '*(width-2*(side_margin+1)-len(line))]) found = True break if not found: line_elements.append(line+ ' '*(width-2*(side_margin+1)-len(line))) line_elements.append(color_end) line_elements.append(' '*side_margin+(right_frame_char if print_frame else '')) formatted_lines.append(fw(*line_elements)) # Print the lower margin (of height equal to up margin) for i in range(up_margin): formatted_lines.append(fw("%(lside)s%(width)s%(rside)s"% {'lside':left_frame_char if print_frame else '', 'rside':right_frame_char if print_frame else '', 'width':' '*(width-2)})) # Print the lower frame if print_frame: formatted_lines.append(fw(" %s "%(bottom_frame_char*(width-2)))) return '\n'.join(formatted_lines) # Now we define the raw banner text for each style: return format_banner( cls.get_raw_banner(style.lower()) %{'versionref':versionref, 'ref':reference, 'version':version})
def runIOTests(self, update = False, force = 0, verbose=False, \ testKeys='instanceList'): """ Run the IOTests for this instance (defined in self.instance_tests) and compare the files of the chosen tests against the hardcoded ones stored in tests/input_files/IOTestsComparison. If you see an error in the comparison and you are sure that the newest output is correct (i.e. you understand that this modification is meant to be so). Then feel free to automatically regenerate this file with the newest version by doing ./test_manager -i U folderName/testName/fileName If update is True (meant to be used by __main__ only) then it will create/update/remove the files instead of testing them. The argument tests can be a list of tuple-keys describing the tests to cover. Otherwise it is the instance_test list. The force argument must be 10 if you do not want to monitor the modifications on the updated files. If it is 0 you will monitor all modified file and if 1 you will monitor each modified file of a given name only once. """ # First make sure that the tarball need not be untarred # Extract the tarball for hardcoded in all cases to make sure the # IOTestComparison folder is synchronized with it. if IOTestManager._compress_ref_fodler: if path.isdir(_hc_comparison_files): try: shutil.rmtree(_hc_comparison_files) except IOError: pass if path.isfile(_hc_comparison_tarball): tar = tarfile.open(_hc_comparison_tarball, mode='r:bz2') tar.extractall(path.dirname(_hc_comparison_files)) tar.close() else: raise MadGraph5Error( "Could not find the comparison tarball %s." % _hc_comparison_tarball) else: if not path.isdir(_hc_comparison_files): raise MadGraph5Error( "Could not find the comparison tarball %s." % _hc_comparison_tarball) # In update = True mode, we keep track of the modification to # provide summary information modifications = { 'updated': [], 'created': [], 'removed': [], 'missing': [] } # List all the names of the files for which modifications have been # reviewed at least once.The approach taken here is different than # with the list refusedFolder and refusedTest. # The key of the dictionary are the filenames and the value are string # determining the user answer for that file. reviewed_file_names = {} # Chose what test to cover if testKeys == 'instanceList': testKeys = self.instance_tests if verbose: print("\n== "+colored%(32,"Operational mode")+\ " : file %s ==\n"%(colored%(34,('UPDATE' if update else 'TESTING')))) for (folder_name, test_name) in testKeys: try: (iotest, iotestManager) = self.all_tests[(folder_name, test_name)] except KeyError: raise MadGraph5Error('Test (%s,%s) could not be found.'\ %(folder_name, test_name)) if verbose: print("Processing %s in %s" % (colored % (32, test_name), colored % (34, folder_name))) files_path = iotest.run(iotestManager) try: pass # files_path = iotest.run(iotestManager) except Exception as e: iotest.clean_output() if not verbose: raise e else: print(colored%(31," Test %s "%test_name+\ "crashed with the following error:\n %s."%str(e))) continue # First create the list of files to check as the user might be using # regular expressions. filesToCheck = [] # Store here the files reckognized as veto rules (with filename # starting with '-') veto_rules = [] for fname in iotest.testedFiles: # Disregard the veto rules regexp_finder = re.compile( r'^(?P<veto>-)?(?P<root_folder>.*)(\/)?\[(?P<regexp>.*)\]$' ) found = regexp_finder.search(fname) if not found is None: # folder without the final / base_path = pjoin(files_path, found.group('root_folder')) regexp = re.compile(found.group('regexp')) # In filesToCheck, we must remove the files_path/ prepended for root, dirnames, filenames in os.walk(base_path): for file in filenames: if not regexp.search(str(os.path.relpath( pjoin(root,file),base_path))) is None and \ not path.islink(pjoin(root,file)): new_target = os.path.relpath( pjoin(root, file), files_path) if found.group('veto') == '-': veto_rules.append(new_target) else: filesToCheck.append(new_target) else: fn = fname[1:] if fname.startswith('-') else fname if (not path.exists(pjoin(files_path, fn))) or path.islink( pjoin(files_path, fn)): if force in [0, 1]: answer = Cmd.timed_input(question= """The IOTest %s does not create file '%s'. Fix it! [type 'enter'] >"""\ %(test_name,fn),default="y") modifications['missing'].append( "%s/%s/%s" % (folder_name, test_name, path.basename(fname))) if verbose: print(" > [ %s ] "%(colored%(31,"MISSING"))+\ "%s/%s/%s"%(folder_name,test_name,path.basename(fname))) else: if fname.startswith('-'): veto_rules.append(fn) else: filesToCheck.append(fn) # Apply the trimming of the veto rules filesToCheck = [f for f in filesToCheck if f not in veto_rules] if update: # Remove files which are no longer used for comparison activeFiles = [self.toFileName(f) for f in filesToCheck] for file in glob.glob(pjoin(_hc_comparison_files,folder_name,\ test_name,'*')): # Ignore the .BackUp files and directories. Also ignore # a file which was previously flagged missing because it # was explicitly specified in the list of files that the # test *must* provide. if path.basename(file).endswith('.BackUp') or \ path.isdir(file) or \ pjoin(folder_name,test_name,path.basename(file)) in \ modifications['missing']: continue if path.basename(file) not in activeFiles: if force==0 or (force==1 and \ path.basename(file) not in list(reviewed_file_names.keys())): answer = Cmd.timed_input(question= """Obsolete ref. file %s in %s/%s detected, delete it? [y/n] >"""\ %(path.basename(file),folder_name,test_name) ,default="y") reviewed_file_names[path.basename(file)] = answer elif (force==1 and \ path.basename(file) in list(reviewed_file_names.keys())): answer = reviewed_file_names[path.basename(file)] else: answer = 'Y' if answer not in ['Y', 'y', '']: if verbose: print(" > [ %s ] "%(colored%(31,"IGNORED"))+\ "file deletion %s/%s/%s"%(folder_name,test_name, path.basename(file))) continue os.remove(file) if verbose: print(" > [ %s ] "%(colored%(31,"REMOVED"))+\ "%s/%s/%s"%(folder_name,test_name,path.basename(file))) modifications['removed'].append('/'.join( str(file).split('/')[-3:])) # Make sure it is not filtered out by the user-filter if self.filesChecked_filter != ['ALL']: new_filesToCheck = [] for file in filesToCheck: # Try if it matches any filter for filter in self.filesChecked_filter: # A regular expression if filter.endswith(']'): split = filter[:-1].split('[') # folder without the final / folder = split[0][:-1] if folder != path.dirname(pjoin(file)): continue search = re.compile('['.join(split[1:])) if not search.match(path.basename(file)) is None: new_filesToCheck.append(file) break # Just the exact filename elif filter == file: new_filesToCheck.append(file) break filesToCheck = new_filesToCheck # Now we can scan them and process them one at a time # Keep track of the folders and testNames the user did not want to # create refused_Folders = [] refused_testNames = [] for fname in filesToCheck: file_path = path.abspath(pjoin(files_path, fname)) self.assertTrue(path.isfile(file_path), 'File %s not found.' % str(file_path)) comparison_path = pjoin(_hc_comparison_files,\ folder_name,test_name,self.toFileName(fname)) if not update: if not os.path.isfile(comparison_path): iotest.clean_output() if not verbose: raise MadGraph5Error("Missing ref. files for test %s\n"%test_name+\ "Create them with './test_manager.py -U %s'"%test_name) continue else: print(colored % (31, 'The ref. file %s' % str( '/'.join(comparison_path.split('/')[-3:])) + ' does not exist.')) print(colored % (34, 'Consider creating it with ' + './test_manager.py -U %s' % test_name)) exit(0) goal = open(comparison_path).read() % misc.get_pkg_info() if not verbose: self.assertFileContains(open(file_path), goal) else: try: self.assertFileContains(open(file_path), goal) except AssertionError: if verbose: print(" > %s differs from the reference." % fname) else: if not path.isdir(pjoin(_hc_comparison_files, folder_name)): if force == 0: if folder_name in refused_Folders: continue answer = Cmd.timed_input( question= """New folder %s detected, create it? [y/n] >""" % folder_name, default="y") if answer not in ['Y', 'y', '']: refused_Folders.append(folder_name) if verbose: print(" > [ %s ] folder %s"\ %(colored%(31,"IGNORED"),folder_name)) continue if verbose: print(" > [ %s ] folder %s"%\ (colored%(32,"CREATED"),folder_name)) os.makedirs(pjoin(_hc_comparison_files, folder_name)) if not path.isdir( pjoin(_hc_comparison_files, folder_name, test_name)): if force == 0: if (folder_name, test_name) in refused_testNames: continue answer = Cmd.timed_input( question= """New test %s/%s detected, create it? [y/n] >""" % (folder_name, test_name), default="y") if answer not in ['Y', 'y', '']: refused_testNames.append( (folder_name, test_name)) if verbose: print(" > [ %s ] test %s/%s"\ %(colored%(31,"IGNORED"),folder_name,test_name)) continue if verbose: print(" > [ %s ] test %s/%s"\ %(colored%(32,"CREATED"),folder_name,test_name)) os.makedirs( pjoin(_hc_comparison_files, folder_name, test_name)) # Transform the package information to make it a template file = open(file_path, 'r') target = file.read() # So that if % appear, we cast them to %% which are not formatted. target = target.replace('%', '%%') # So that the version and date is automatically updated target = target.replace('MadGraph5_aMC@NLO v. %(version)s, %(date)s'\ %misc.get_pkg_info(), 'MadGraph5_aMC@NLO v. %(version)s, %(date)s') target = target.replace('v%(version)s (%(date)s)'\ %misc.get_pkg_info(), 'v%(version)s (%(date)s)') file.close() if os.path.isfile(comparison_path): file = open(comparison_path, 'r') existing = file.read() file.close() if existing == target: continue else: # Copying the existing reference as a backup tmp_path = pjoin(_hc_comparison_files,folder_name,\ test_name,self.toFileName(fname)+'.BackUp') if os.path.isfile(tmp_path): os.remove(tmp_path) file = open(tmp_path, 'w') file.write(target) file.close() if force==0 or (force==1 and path.basename(\ comparison_path) not in list(reviewed_file_names.keys())): text = \ """File %s in test %s/%s differs by the following (reference file first): """%(fname,folder_name,test_name) text += misc.Popen(['diff',str(comparison_path), str(tmp_path)],stdout=subprocess.PIPE).\ communicate()[0].decode('utf-8') # Remove the last newline if text[-1] == '\n': text = text[:-1] if (len(text.split('\n')) < 15): print(text) else: pydoc.pager(text) print("Difference displayed in editor.") answer = '' while answer not in ['y', 'n']: answer = Cmd.timed_input( question= """Ref. file %s differs from the new one (see diff. before), update it? [y/n/h/r] >""" % fname, default="y") if answer not in ['y', 'n']: if answer == 'r': pydoc.pager(text) else: print("reference path: %s" % comparison_path) print("code returns: %s" % tmp_path) os.remove(tmp_path) reviewed_file_names[path.basename(\ comparison_path)] = answer elif (force==1 and path.basename(\ comparison_path) in list(reviewed_file_names.keys())): answer = reviewed_file_names[path.basename(\ comparison_path)] else: answer = 'Y' if answer not in ['Y', 'y', '']: if verbose: print(" > [ %s ] %s"%\ (colored%(31,"IGNORED"),fname)) continue # Copying the existing reference as a backup back_up_path = pjoin(_hc_comparison_files,folder_name,\ test_name,self.toFileName(fname)+'.BackUp') if os.path.isfile(back_up_path): os.remove(back_up_path) cp(comparison_path, back_up_path) if verbose: print(" > [ %s ] %s"\ %(colored%(32,"UPDATED"),fname)) modifications['updated'].append('/'.join( comparison_path.split('/')[-3:])) else: if force==0 or (force==1 and path.basename(\ comparison_path) not in list(reviewed_file_names.keys())): answer = Cmd.timed_input( question= """New file %s detected, create it? [y/n] >""" % fname, default="y") reviewed_file_names[path.basename(\ comparison_path)] = answer elif (force==1 and path.basename(\ comparison_path) in list(reviewed_file_names.keys())): answer = reviewed_file_names[\ path.basename(comparison_path)] else: answer = 'Y' if answer not in ['Y', 'y', '']: if verbose: print(" > [ %s ] %s"%\ (colored%(31,"IGNORED"),fname)) continue if verbose: print(" > [ %s ] %s"%\ (colored%(32,"CREATED"),fname)) modifications['created'].append('/'.join( comparison_path.split('/')[-3:])) file = open(comparison_path, 'w') file.write(target) file.close() # Clean the iotest output iotest.clean_output() # Monitor the modifications when in creation files mode by returning the # modifications dictionary. if update: return modifications else: return 'test_over'
def runTest(self, *args, **opts): """ This method is added so that one can instantiate this class """ raise MadGraph5Error( 'runTest in IOTestManager not supposed to be called.')
def generate_matrix_elements(self, group=False): """Helper function to generate the matrix elements before exporting""" # Sort amplitudes according to number of diagrams, # to get most efficient multichannel output self._curr_amps.sort(key=lambda a: a.get_number_of_diagrams(), reverse=True) cpu_time1 = time.time() ndiags = 0 if not self._curr_matrix_elements.get_matrix_elements(): if group: raise MadGraph5Error("Cannot group subprocesses when "+\ "exporting to NLO") else: self._curr_matrix_elements = \ fks_helas.FKSHelasMultiProcess(\ self._fks_multi_proc, loop_optimized= self.options['loop_optimized_output']) if not self.options['low_mem_multicore_nlo_generation']: # generate the code the old way ndiags = sum([len(me.get('diagrams')) for \ me in self._curr_matrix_elements.\ get_matrix_elements()]) # assign a unique id number to all process and # generate a list of possible PDF combinations uid = 0 initial_states = [] for me in self._curr_matrix_elements.get_matrix_elements( ): uid += 1 # update the identification number me.get('processes')[0].set('uid', uid) try: initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \ p in me.born_matrix_element.get('processes'))))) except IndexError: initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \ p in me.born_matrix_element.get('processes'))))) for fksreal in me.real_processes: # Pick out all initial state particles for the two beams try: initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \ p in fksreal.matrix_element.get('processes'))))) except IndexError: initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \ p in fksreal.matrix_element.get('processes'))))) # remove doubles from the list checked = [] for e in initial_states: if e not in checked: checked.append(e) initial_states = checked self._curr_matrix_elements.set('initial_states', initial_states) else: #new NLO generation if self._curr_matrix_elements['has_loops']: self._curr_exporter.opt['mp'] = True self._curr_exporter.model = self._curr_model ndiags = 0 cpu_time2 = time.time() return ndiags, cpu_time2 - cpu_time1
def export(self, nojpeg=False, main_file_name="", group_processes=False): """Export a generated amplitude to file""" self._curr_helas_model = helas_call_writers.FortranUFOHelasCallWriter( self._curr_model) def generate_matrix_elements(self, group=False): """Helper function to generate the matrix elements before exporting""" # Sort amplitudes according to number of diagrams, # to get most efficient multichannel output self._curr_amps.sort(key=lambda a: a.get_number_of_diagrams(), reverse=True) cpu_time1 = time.time() ndiags = 0 if not self._curr_matrix_elements.get_matrix_elements(): if group: raise MadGraph5Error("Cannot group subprocesses when "+\ "exporting to NLO") else: self._curr_matrix_elements = \ fks_helas.FKSHelasMultiProcess(\ self._fks_multi_proc, loop_optimized= self.options['loop_optimized_output']) if not self.options['low_mem_multicore_nlo_generation']: # generate the code the old way ndiags = sum([len(me.get('diagrams')) for \ me in self._curr_matrix_elements.\ get_matrix_elements()]) # assign a unique id number to all process and # generate a list of possible PDF combinations uid = 0 initial_states = [] for me in self._curr_matrix_elements.get_matrix_elements( ): uid += 1 # update the identification number me.get('processes')[0].set('uid', uid) try: initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \ p in me.born_matrix_element.get('processes'))))) except IndexError: initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \ p in me.born_matrix_element.get('processes'))))) for fksreal in me.real_processes: # Pick out all initial state particles for the two beams try: initial_states.append(sorted(list(set((p.get_initial_pdg(1),p.get_initial_pdg(2)) for \ p in fksreal.matrix_element.get('processes'))))) except IndexError: initial_states.append(sorted(list(set((p.get_initial_pdg(1)) for \ p in fksreal.matrix_element.get('processes'))))) # remove doubles from the list checked = [] for e in initial_states: if e not in checked: checked.append(e) initial_states = checked self._curr_matrix_elements.set('initial_states', initial_states) else: #new NLO generation if self._curr_matrix_elements['has_loops']: self._curr_exporter.opt['mp'] = True self._curr_exporter.model = self._curr_model ndiags = 0 cpu_time2 = time.time() return ndiags, cpu_time2 - cpu_time1 # Start of the actual routine ndiags, cpu_time = generate_matrix_elements(self, group=group_processes) calls = 0 path = self._export_dir if self._export_format in ['NLO']: path = os.path.join(path, 'SubProcesses') #_curr_matrix_element is a FKSHelasMultiProcess Object self._fks_directories = [] proc_charac = self._curr_exporter.proc_characteristic for charac in ['has_isr', 'has_fsr', 'has_loops']: proc_charac[charac] = self._curr_matrix_elements[charac] # prepare for the generation # glob_directories_map is for the new NLO generation global glob_directories_map glob_directories_map = [] # Save processes instances generated self.born_processes_for_olp = [] self.born_processes = [] for ime, me in \ enumerate(self._curr_matrix_elements.get('matrix_elements')): if not self.options['low_mem_multicore_nlo_generation']: #me is a FKSHelasProcessFromReals calls = calls + \ self._curr_exporter.generate_directories_fks(me, self._curr_helas_model, ime, len(self._curr_matrix_elements.get('matrix_elements')), path,self.options['OLP']) self._fks_directories.extend(self._curr_exporter.fksdirs) self.born_processes_for_olp.append( me.born_matrix_element.get('processes')[0]) self.born_processes.append( me.born_matrix_element.get('processes')) else: glob_directories_map.append(\ [self._curr_exporter, me, self._curr_helas_model, ime, len(self._curr_matrix_elements.get('matrix_elements')), path, self.options['OLP']]) if self.options['low_mem_multicore_nlo_generation']: # start the pool instance with a signal instance to catch ctr+c logger.info('Writing directories...') original_sigint_handler = signal.signal( signal.SIGINT, signal.SIG_IGN) if self.ncores_for_proc_gen < 0: # use all cores pool = multiprocessing.Pool(maxtasksperchild=1) else: pool = multiprocessing.Pool( processes=self.ncores_for_proc_gen, maxtasksperchild=1) signal.signal(signal.SIGINT, original_sigint_handler) try: # the very large timeout passed to get is to be able to catch # KeyboardInterrupts diroutputmap = pool.map_async( generate_directories_fks_async, list(range(len(glob_directories_map)))).get(9999999) except KeyboardInterrupt: pool.terminate() raise KeyboardInterrupt pool.close() pool.join() #clean up tmp files containing final matrix elements for mefile in self._curr_matrix_elements.get( 'matrix_elements'): os.remove(mefile) for charac in ['nexternal', 'ninitial']: proc_charac[ charac] = self._curr_exporter.proc_characteristic[ charac] # ninitial and nexternal proc_charac['nexternal'] = max( [diroutput[4] for diroutput in diroutputmap]) ninitial_set = set( [diroutput[3] for diroutput in diroutputmap]) if len(ninitial_set) != 1: raise MadGraph5Error("Invalid ninitial values: %s" % ' ,'.join(list(ninitial_set))) proc_charac['ninitial'] = list(ninitial_set)[0] # max_n_matched_jets njet_set = set( [int(diroutput[6]) for diroutput in diroutputmap]) proc_charac['max_n_matched_jets'] = max(njet_set) self.born_processes = [] self.born_processes_for_olp = [] max_loop_vertex_ranks = [] for diroutput in diroutputmap: calls = calls + diroutput[0] self._fks_directories.extend(diroutput[1]) max_loop_vertex_ranks.append(diroutput[2]) if six.PY2: self.born_processes.extend(diroutput[5]) self.born_processes_for_olp.append(diroutput[5][0]) else: max_loop_vertex_ranks = [me.get_max_loop_vertex_rank() for \ me in self._curr_matrix_elements.get_virt_matrix_elements()] card_path = os.path.join(path, os.path.pardir, 'SubProcesses', \ 'procdef_mg5.dat') if self.options['loop_optimized_output'] and \ len(max_loop_vertex_ranks) > 0: self._curr_exporter.write_coef_specs_file( max_loop_vertex_ranks) if self._generate_info: self._curr_exporter.write_procdef_mg5( card_path, # self._curr_model['name'], self._generate_info) try: cmd.Cmd.onecmd(self, 'history .') except Exception: logger.debug('fail to run command \"history cmd\"') pass subproc_path = os.path.join(path, os.path.pardir, 'SubProcesses', \ 'initial_states_map.dat') self._curr_exporter.write_init_map( subproc_path, self._curr_matrix_elements.get('initial_states')) cpu_time1 = time.time()
def do_add(self, line, *args, **opt): args = self.split_arg(line) # Check the validity of the arguments self.check_add(args) if args[0] == 'model': return self.add_model(args[1:]) elif args[0] != 'process': raise self.InvalidCmd( "The add command can only be used with process or model") else: line = ' '.join(args[1:]) proc_type = self.extract_process_type(line) if proc_type[1] not in ['real', 'LOonly']: run_interface.check_compiler(self.options, block=False) #validate_model will reset self._generate_info; to avoid #this store it geninfo = self._generate_info self.validate_model(proc_type[1], coupling_type=proc_type[2]) self._generate_info = geninfo #now generate the amplitudes as usual #self.options['group_subprocesses'] = 'False' collect_mirror_procs = False ignore_six_quark_processes = self.options['ignore_six_quark_processes'] if ',' in line: myprocdef, line = mg_interface.MadGraphCmd.extract_decay_chain_process( self, line) if myprocdef.are_decays_perturbed(): raise MadGraph5Error("Decay processes cannot be perturbed") else: myprocdef = mg_interface.MadGraphCmd.extract_process(self, line) self.proc_validity(myprocdef, 'aMCatNLO_%s' % proc_type[1]) self._curr_proc_defs.append(myprocdef) # if myprocdef['perturbation_couplings']!=['QCD']: # message = ""FKS for reals only available in QCD for now, you asked %s" \ # % ', '.join(myprocdef['perturbation_couplings'])" # logger.info("%s. Checking for loop induced") # new_line = ln # # # raise self.InvalidCmd("FKS for reals only available in QCD for now, you asked %s" \ # % ', '.join(myprocdef['perturbation_couplings'])) ## # if the new nlo process generation mode is enabled, the number of cores to be # used has to be passed # ncores_for_proc_gen has the following meaning # 0 : do things the old way # > 0 use ncores_for_proc_gen # -1 : use all cores if self.options['low_mem_multicore_nlo_generation']: if self.options['nb_core']: self.ncores_for_proc_gen = int(self.options['nb_core']) else: self.ncores_for_proc_gen = -1 else: self.ncores_for_proc_gen = 0 # this is the options dictionary to pass to the FKSMultiProcess fks_options = { 'OLP': self.options['OLP'], 'ignore_six_quark_processes': self.options['ignore_six_quark_processes'], 'ncores_for_proc_gen': self.ncores_for_proc_gen } try: self._fks_multi_proc.add( fks_base.FKSMultiProcess(myprocdef, fks_options)) except AttributeError: self._fks_multi_proc = fks_base.FKSMultiProcess( myprocdef, fks_options)
def determine_mapping(cls, structure): if structure.name() == 'S' or structure.substructures: raise MadGraph5Error(cls.cannot_handle_msg(structure)) else: return cls.collinear_map