def compare_cross_section_to_values( self, values, my_proc_list = [], orders = {}, model = 'sm', filename = "", print_result = False, tolerance = 1e-02): if 'v4' in model: raise Exception, 'Not implemented' #old_mg5 = me_comparator.MG5OldRunner() #old_mg5.setup(mg5_path) #current_mg5 = me_comparator.MG5Runner() #current_mg5.setup(MG5DIR, MG5DIR) #current_mg5.store_proc_card = True else: current_mg5 = madevent_comparator.MG5Runner() current_mg5.setup(MG5DIR) current_mg5.store_proc_card = True # Create and setup a comparator my_comp = madevent_comparator.MadEventComparator() my_comp.set_me_runners(current_mg5) # Run the actual comparison my_comp.run_comparison(my_proc_list, [model], orders) # add the default value to the comparison my_comp.results.append(values) my_comp.me_runners =(my_comp.me_runners[0], madevent_comparator.FakeRunner()) # Assert that all process comparisons passed the tolerance cut my_comp.assert_processes(self, tolerance) # Do some cleanup my_comp.cleanup()
def compare_cross_section(self, my_proc_list=[], orders={}, model='sm', filename="", print_result=False, tolerance=1e-02): """ """ mg5_path = self.build_old_mg5() if 'v4' in model: raise Exception, 'Not implemented' #old_mg5 = me_comparator.MG5OldRunner() #old_mg5.setup(mg5_path) #current_mg5 = me_comparator.MG5Runner() #current_mg5.setup(MG5DIR, MG5DIR) #current_mg5.store_proc_card = True else: old_mg5 = madevent_comparator.MG5OldRunner() old_mg5.setup(mg5_path) current_mg5 = madevent_comparator.MG5Runner() current_mg5.setup(MG5DIR) current_mg5.store_proc_card = True self.nb_test += 1 if os.path.exists( pjoin(MG5DIR, 'models', 'paralel_test_model_%s' % model)): shutil.rmtree( pjoin(MG5DIR, 'models', 'paralel_test_model_%s' % model)) os.system('cp -rf %s %s' % (pjoin(mg5_path, 'models', model), pjoin(MG5DIR, 'models', 'paralel_test_model_%s' % model))) # Create and setup a comparator my_comp = madevent_comparator.MadEventComparator() my_comp.set_me_runners(old_mg5, current_mg5) # Run the actual comparison my_comp.run_comparison(my_proc_list, ['paralel_test_model_%s' % model, model], orders) # Print the output if filename: my_comp.output_result(filename=filename) # Store output to a pickle file in the input_files directory if print_result: print my_comp.results[0] # Assert that all process comparisons passed the tolerance cut my_comp.assert_processes(self, tolerance) # Do some cleanup my_comp.cleanup() return my_comp.results