def test_madspin_gridpack(self):

        self.out_dir = self.run_dir
        self.generate('g g > t t~', 'sm')

        #put the MadSpin card
        ff = open(pjoin(self.out_dir, 'Cards/madspin_card.dat'), 'w')
        orig_card =  open(pjoin(self.out_dir, 'Cards/madspin_card_default.dat')).read()
        ff.write('set ms_dir %s' % pjoin(self.out_dir, 'MSDIR1'))
        ff.write(orig_card)
        ff.close()
        
        #reduce the number of events
        files.cp(pjoin(_file_path, 'input_files', 'run_card_matching.dat'),
                 pjoin(self.out_dir, 'Cards/run_card.dat'))

        #create the gridpack        
        self.do('launch -f')
        
        #move the MS gridpack
        self.assertTrue(os.path.exists(pjoin(self.out_dir, 'MSDIR1')))
        files.mv(pjoin(self.out_dir, 'MSDIR1'), pjoin(self.out_dir, 'MSDIR2'))
        
        #put the MadSpin card
        ff = open(pjoin(self.out_dir, 'Cards/madspin_card.dat'), 'w')
        ff.write('set ms_dir %s' % pjoin(self.out_dir, 'MSDIR2'))
        ff.write(orig_card)
        ff.close()
               
        #create the gridpack        
        self.do('launch -f')
        
        self.check_parton_output('run_02_decayed_1', 100)           
    def finalize(self, matrix_elements, history, mg5options, flaglist):
        """call the mother class, and do a couple of other things relevant 
        to OS subtraction
        """
        super(MadSTRExporter, self).finalize(matrix_elements, history, mg5options, flaglist)

        os_ids = self.get_os_ids_from_file(pjoin(self.dir_path, 'SubProcesses', 'os_ids.mg'))

        # add the widths corresponding to the os_ids to coupl.inc
        particle_dict = self.model.get('particle_dict') 
        # list(set( is needed for particle/antiparticle with the same width
        keep_widths = list(set([particle_dict[idd].get('width') for idd in os_ids]))
        self.update_couplinc(keep_widths,pjoin(self.dir_path, 'Source', 'coupl.inc'))

        # also, add a function that returns these widths to MODEL 
        filename=pjoin(self.dir_path, 'Source', 'MODEL', 'get_mass_width_fcts.f')
        width_particles = [particle_dict[idd] for idd in os_ids]
        self.update_get_mass_width(width_particles, filename)

        # replace the common_run_interface with the one from madstr_plugin
        internal = pjoin(self.dir_path, 'bin', 'internal')
        files.mv(pjoin(internal, 'common_run_interface.py'), \
                 pjoin(internal, 'common_run_interface_MG.py'))
        files.cp(pjoin(plugin_path, 'common_run_interface.py'), internal)

        # finally patch fks_singular so that it won't complain about negative 
        # weights for the real emission
        subprocess.call('patch -p3 < %s' % pjoin(self.template_path, 'fks_singular_patch.txt'), cwd=self.dir_path, shell=True)
Exemple #3
0
 def copy_model_resources(self):
     """Make the copy/symbolic links"""
     model_path = pjoin(self.export_dir, 'Source', 'MODEL')
     if os.path.exists(pjoin(model_path, 'ident_card.dat')):
         mv(pjoin(model_path, 'ident_card.dat'),
            pjoin(self.export_dir, 'Cards'))
     cp(pjoin(model_path, 'param_card.dat'), pjoin(self.export_dir,
                                                   'Cards'))
     mv(pjoin(model_path, 'param_card.dat'),
        pjoin(self.export_dir, 'Cards', 'param_card_default.dat'))
    def test_madspin_gridpack(self):

        self.out_dir = self.run_dir
        self.generate('g g > t t~', 'sm')

        #put the MadSpin card
        ff = open(pjoin(self.out_dir, 'Cards/madspin_card.dat'), 'w')
        orig_card = open(pjoin(self.out_dir,
                               'Cards/madspin_card_default.dat')).read()
        ff.write('set ms_dir %s' % pjoin(self.out_dir, 'MSDIR1'))
        ff.write(orig_card)
        ff.close()

        run_card = banner.RunCardLO(
            pjoin(self.run_dir, 'Cards', 'run_card.dat'))
        self.assertFalse('ptj' in run_card.user_set)
        self.assertFalse('drjj' in run_card.user_set)
        self.assertFalse('ptj2min' in run_card.user_set)
        self.assertFalse('ptj3min' in run_card.user_set)
        self.assertFalse('mmjj' in run_card.user_set)
        self.assertTrue('ptheavy' in run_card.user_set)
        self.assertFalse('el' in run_card.user_set)
        self.assertFalse('ej' in run_card.user_set)
        self.assertFalse('polbeam1' in run_card.user_set)
        self.assertFalse('ptl' in run_card.user_set)

        #reduce the number of events
        files.cp(pjoin(_file_path, 'input_files', 'run_card_matching.dat'),
                 pjoin(self.out_dir, 'Cards/run_card.dat'))

        #create the gridpack
        self.do('launch -f')
        self.check_parton_output('run_01', 100)
        self.check_parton_output('run_01_decayed_1', 100)
        #move the MS gridpack
        self.assertTrue(os.path.exists(pjoin(self.out_dir, 'MSDIR1')))
        files.mv(pjoin(self.out_dir, 'MSDIR1'), pjoin(self.out_dir, 'MSDIR2'))

        #put the MadSpin card
        ff = open(pjoin(self.out_dir, 'Cards/madspin_card.dat'), 'w')
        ff.write('set ms_dir %s' % pjoin(self.out_dir, 'MSDIR2'))
        ff.write(orig_card)
        ff.close()

        #create the gridpack
        self.do('launch -f')

        self.check_parton_output('run_02_decayed_1', 100)

        self.assertEqual(self.debugging, False)
Exemple #5
0
    def add_to_file(self, path, seed=None):
        """Add the banner to a file and change the associate seed in the banner"""

        if seed is not None:
            self.set("run_card", "iseed", seed)
            
        ff = self.write("%s.tmp" % path, close_tag=False,
                        exclude=['MGGenerationInfo', '/header', 'init'])
        ff.write("## END BANNER##\n")
        if self.lhe_version >= 3:
        #add the original content
            [ff.write(line) if not line.startswith("<generator name='MadGraph5_aMC@NLO'")
                        else ff.write("<generator name='MadGraph5_aMC@NLO' version='%s'>" % self['mgversion'][:-1])
                        for line in open(path)]
        else:
            [ff.write(line) for line in open(path)]
        ff.write("</LesHouchesEvents>\n")
        ff.close()
        files.mv("%s.tmp" % path, path)
Exemple #6
0
    def add_to_file(self, path, seed=None):
        """Add the banner to a file and change the associate seed in the banner"""

        if seed is not None:
            self.set("run_card", "iseed", seed)
            
        ff = self.write("%s.tmp" % path, close_tag=False,
                        exclude=['MGGenerationInfo', '/header', 'init'])
        ff.write("## END BANNER##\n")
        if self.lhe_version >= 3:
        #add the original content
            [ff.write(line) if not line.startswith("<generator name='MadGraph5_aMC@NLO'")
                        else ff.write("<generator name='MadGraph5_aMC@NLO' version='%s'>" % self['mgversion'][:-1])
                        for line in open(path)]
        else:
            [ff.write(line) for line in open(path)]
        ff.write("</LesHouchesEvents>\n")
        ff.close()
        files.mv("%s.tmp" % path, path)
Exemple #7
0
    def do_collect(self, line):
        """MadWeight Function: making the collect of the results"""

        self.configure()
        args = self.split_arg(line)
        self.check_collect(args)
        xml_reader = MWParserXML()

        name = self.MWparam.name
        # 1. Concatanate the file. #############################################
        out_dir = pjoin(self.me_dir, 'Events', name)
        if '-refine' in args:
            out_path = pjoin(out_dir, 'refine.xml')
        else:
            out_path = pjoin(out_dir, 'output.xml')
            if os.path.exists(out_path):
                logger.warning(
                    'Output file already exists. Current one will be tagged with _old suffix'
                )
                logger.warning(
                    'Run "collect -refine to instead update your current results."'
                )
                files.mv(pjoin(out_dir, 'output.xml'),
                         pjoin(out_dir, 'output_old.xml'))
                files.mv(pjoin(out_dir, 'weights.out'),
                         pjoin(out_dir, 'weights.out'))
                for MWdir in self.MWparam.MW_listdir:
                    out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
                    files.mv(pjoin(out_dir, 'output.xml'),
                             pjoin(out_dir, 'output_old.xml'))
                out_dir = pjoin(self.me_dir, 'Events', name)

        fsock = open(out_path, 'w')
        fsock.write('<madweight>\n<banner>\n')
        # BANNER
        for card in [
                'proc_card_mg5.dat', 'MadWeight_card.dat', 'transfer_card.dat',
                'param_card.dat', 'run_card.dat'
        ]:
            cname = card[:-4]
            fsock.write('<%s>\n' % cname)
            fsock.write(
                open(pjoin(self.me_dir, 'Cards',
                           card)).read().replace('<', '!>'))
            fsock.write('</%s>\n' % cname)
        fsock.write('</banner>\n')
        at_least_one = False
        for MWdir in self.MWparam.MW_listdir:
            out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
            input_dir = pjoin(self.me_dir, 'SubProcesses', MWdir, name)
            if not os.path.exists(out_dir):
                os.mkdir(out_dir)
            if '-refine' in args:
                out_path = pjoin(out_dir, 'refine.xml')
            else:
                out_path = pjoin(out_dir, 'output.xml')
            fsock2 = open(out_path, 'w')
            fsock.write('<subprocess id=\'%s\'>\n' % MWdir)
            fsock2.write('<subprocess id=\'%s\'>\n' % MWdir)
            for output in glob.glob(pjoin(input_dir, 'output_*_*.xml')):
                at_least_one = True
                text = open(output).read()
                fsock2.write(text)
                fsock.write(text)
                os.remove(output)
            fsock.write('</subprocess>\n')
            fsock2.write('</subprocess>\n')
            fsock2.close()
        fsock.write('\n</madweight>\n')
        fsock.close()
        # 2. Special treatment for refine mode
        if '-refine' in args:
            xml_reader2 = MWParserXML(self.MWparam['mw_run']['log_level'])
            for MWdir in self.MWparam.MW_listdir:
                out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
                ref_output = xml_reader2.read_file(pjoin(
                    out_dir, 'refine.xml'))
                xml_reader2 = MWParserXML(self.MWparam['mw_run']['log_level'])
                base_output = xml_reader2.read_file(
                    pjoin(out_dir, 'output.xml'))

                base_output.refine(ref_output)
                files.mv(pjoin(out_dir, 'output.xml'),
                         pjoin(out_dir, 'output_old.xml'))
                base_output.write(pjoin(out_dir, 'output.xml'), MWdir)
        elif not at_least_one:
            logger.warning("Nothing to collect restore _old file as current.")
            out_dir = pjoin(self.me_dir, 'Events', name)
            files.mv(pjoin(out_dir, 'output_old.xml'),
                     pjoin(out_dir, 'output.xml'))
            files.mv(pjoin(out_dir, 'weights_old.out'),
                     pjoin(out_dir, 'weights.out'))
            for MWdir in self.MWparam.MW_listdir:
                out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
                files.mv(pjoin(out_dir, 'output.xml'),
                         pjoin(out_dir, 'output_old.xml'))

        # 3. Read the (final) log file for extracting data
        total = {}
        likelihood = {}
        err_likelihood = {}
        cards = set()
        events = set()
        tf_sets = set()
        for MW_dir in self.MWparam.MW_listdir:
            out_dir = pjoin(self.me_dir, 'Events', name, MW_dir)
            xml_reader = MWParserXML()
            data = xml_reader.read_file(pjoin(out_dir, 'output.xml'))
            #
            log_level = self.MWparam['mw_run']['log_level']
            generator = ((int(i), j, int(k), data[i][j][k]) for i in data
                         for j in data[i] for k in data[i][j])
            for card, event, tf_set, obj in generator:
                # update the full list of events/cards
                cards.add(card)
                events.add(event)
                tf_sets.add(tf_set)
                # now compute the associate value, error[square]
                if (card, event, tf_set) in total:
                    value, error = total[(card, event, tf_set)]
                else:
                    value, error = 0, 0
                obj.calculate_total()
                value, error = (value + obj.value, error + obj.error**2)
                total[(card, event, tf_set)] = (value, error)
                if tf_set == 1:
                    if value:
                        if card not in likelihood:
                            likelihood[card], err_likelihood[card] = 0, 0
                        likelihood[card] -= math.log(value)
                        err_likelihood[card] += error / value
                    else:
                        likelihood[card] = float('Inf')
                        err_likelihood[card] = float('nan')

        # write the weights file:
        fsock = open(pjoin(self.me_dir, 'Events', name, 'weights.out'), 'w')
        logger.info('Write output file with weight information: %s' %
                    fsock.name)
        fsock.write(
            '# Weight (un-normalize) for each card/event/set of transfer fct\n'
        )
        fsock.write(
            '# format: LHCO_event_number card_id tf_set_id value integration_error\n'
        )
        events = list(events)
        events.sort()
        cards = list(cards)
        cards.sort()
        tf_sets = list(tf_sets)
        tf_sets.sort()
        for event in events:
            for card in cards:
                for tf_set in tf_sets:
                    try:
                        value, error = total[(card, event, tf_set)]
                    except KeyError:
                        continue
                    error = math.sqrt(error)
                    fsock.write(
                        '%s %s %s %s %s \n' %
                        (event.replace('@', ' '), card, tf_set, value, error))

        # write the likelihood file:
        fsock = open(
            pjoin(self.me_dir, 'Events', name, 'un-normalized_likelihood.out'),
            'w')
        fsock.write(
            '# Warning:  this Likelihood needs a bin by bin normalization !\n')
        fsock.write(
            '# IF more than one set of transfer function are define. ONLY the first one is '
        )
        fsock.write('# include in this file.')
        fsock.write('# format: card_id value integration_error\n')
        for card in cards:
            value, error = likelihood[card], err_likelihood[card]
            error = math.sqrt(error)
            fsock.write('%s %s %s \n' % (card, value, error))
Exemple #8
0
    def do_launch(self, line):
        """end of the configuration launched the code"""
        
        args = self.split_arg(line)
        self.check_launch(args)

        model_line = self.banner.get('proc_card', 'full_model_line')

        if not self.has_standalone_dir:
            self.create_standalone_directory()        
            
        ff = open(pjoin(self.me_dir, 'rw_me','Cards', 'param_card.dat'), 'w')
        ff.write(self.banner['slha'])
        ff.close()
        ff = open(pjoin(self.me_dir, 'rw_me','Cards', 'param_card_orig.dat'), 'w')
        ff.write(self.banner['slha'])
        ff.close()        
        cmd = common_run_interface.CommonRunCmd.ask_edit_card_static(cards=['param_card.dat'],
                                   ask=self.ask, pwd=pjoin(self.me_dir,'rw_me'))
        new_card = open(pjoin(self.me_dir, 'rw_me', 'Cards', 'param_card.dat')).read()        

        

        # Find new tag in the banner and add information if needed
        if 'initrwgt' in self.banner:
            if 'type=\'mg_reweighting\'' in self.banner['initrwgt']:
                blockpat = re.compile(r'''<weightgroup type=\'mg_reweighting\'\s*>(?P<text>.*?)</weightgroup>''', re.I+re.M+re.S)
                before, content, after = blockpat.split(self.banner['initrwgt'])
                header_rwgt_other = before + after
                pattern = re.compile('<weight id=\'mg_reweight_(?P<id>\d+)\'>(?P<info>[^<>]*)</weight>', re.S+re.I+re.M)
                mg_rwgt_info = pattern.findall(content)
                maxid = 0
                for i, diff in mg_rwgt_info:
                    if int(i) > maxid:
                        maxid = int(i)
                maxid += 1
                rewgtid = maxid
            else:
                header_rwgt_other = self.banner['initrwgt'] 
                mg_rwgt_info = []
                rewgtid = 1
        else:
            self.banner['initrwgt']  = ''
            header_rwgt_other = ''
            mg_rwgt_info = []
            rewgtid = 1
        
        # add the reweighting in the banner information:
        #starts by computing the difference in the cards.
        s_orig = self.banner['slha']
        s_new = new_card
        old_param = check_param_card.ParamCard(s_orig.splitlines())
        new_param =  check_param_card.ParamCard(s_new.splitlines())
        card_diff = old_param.create_diff(new_param)
        if card_diff == '':
            raise self.InvalidCmd, 'original card and new card are identical'
        mg_rwgt_info.append((str(rewgtid), card_diff))
        
        # re-create the banner.
        self.banner['initrwgt'] = header_rwgt_other
        self.banner['initrwgt'] += '\n<weightgroup type=\'mg_reweighting\'>\n'
        for tag, diff in mg_rwgt_info:
            self.banner['initrwgt'] += '<weight id=\'mg_reweight_%s\'>%s</weight>\n' % \
                                       (tag, diff)
        self.banner['initrwgt'] += '\n</weightgroup>\n'
        self.banner['initrwgt'] = self.banner['initrwgt'].replace('\n\n', '\n')
            
        output = open( self.lhe_input.name +'rw', 'w')
        
        
        logger.info('starts to compute weight for events with the following modification to the param_card:')
        logger.info(card_diff)
        
        #write the banner to the output file
        self.banner.write(output, close_tag=False)
        # prepare the output file for the weight plot
        if self.mother:
            out_path = pjoin(self.mother.me_dir, 'Events', 'reweight.lhe')
            output2 = open(out_path, 'w')
            self.banner.write(output2, close_tag=False)
            new_banner = banner.Banner(self.banner)
            if not hasattr(self, 'run_card'):
                self.run_card = new_banner.charge_card('run_card')
            self.run_card['run_tag'] = 'reweight_%s' % rewgtid
            new_banner['slha'] = s_new   
            del new_banner['initrwgt']
            #ensure that original banner is kept untouched
            assert new_banner['slha'] != self.banner['slha']
            assert 'initrwgt' in self.banner 
            ff = open(pjoin(self.mother.me_dir,'Events',self.mother.run_name, '%s_%s_banner.txt' % \
                          (self.mother.run_name, self.run_card['run_tag'])),'w') 
            new_banner.write(ff)
            ff.close()
        
        # Loop over all events
        tag_name = 'mg_reweight_%s' % rewgtid
        start = time.time()
        cross = 0
        
        os.environ['GFORTRAN_UNBUFFERED_ALL'] = 'y'
        if self.lhe_input.closed:
            self.lhe_input = lhe_parser.EventFile(self.lhe_input.name)

        for event_nb,event in enumerate(self.lhe_input):
            #control logger
            if (event_nb % max(int(10**int(math.log10(float(event_nb)+1))),1000)==0): 
                    running_time = misc.format_timer(time.time()-start)
                    logger.info('Event nb %s %s' % (event_nb, running_time))
            if (event_nb==10001): logger.info('reducing number of print status. Next status update in 10000 events')


            weight = self.calculate_weight(event)
            cross += weight
            event.reweight_data[tag_name] = weight
            #write this event with weight
            output.write(str(event))
            if self.mother:
                event.wgt = weight
                event.reweight_data = {}
                output2.write(str(event))

        running_time = misc.format_timer(time.time()-start)
        logger.info('All event done  (nb_event: %s) %s' % (event_nb+1, running_time))        
        
        output.write('</LesHouchesEvents>\n')
        output.close()
        os.environ['GFORTRAN_UNBUFFERED_ALL'] = 'n'
        if self.mother:
            output2.write('</LesHouchesEvents>\n')
            output2.close()        
            # add output information
            if hasattr(self.mother, 'results'):
                run_name = self.mother.run_name
                results = self.mother.results
                results.add_run(run_name, self.run_card, current=True)
                results.add_detail('nb_event', event_nb+1)
                results.add_detail('cross', cross)
                results.add_detail('error', 'nan')
                self.mother.create_plot(mode='reweight', event_path=output2.name,
                                        tag=self.run_card['run_tag'])
                #modify the html output to add the original run
                if 'plot' in results.current.reweight:
                    html_dir = pjoin(self.mother.me_dir, 'HTML', run_name)
                    td = pjoin(self.mother.options['td_path'], 'td') 
                    MA = pjoin(self.mother.options['madanalysis_path'])
                    path1 = pjoin(html_dir, 'plots_parton')
                    path2 = pjoin(html_dir, 'plots_%s' % self.run_card['run_tag'])
                    outputplot = path2
                    combine_plots.merge_all_plots(path2, path1, outputplot, td, MA)
                #results.update_status(level='reweight')
                #results.update(status, level, makehtml=True, error=False)
                
                #old_name = self.mother.results.current['run_name']
                #new_run = '%s_rw_%s' % (old_name, rewgtid)
                #self.mother.results.add_run( new_run, self.run_card)
                #self.mother.results.add_detail('nb_event', event_nb+1)
                #self.mother.results.add_detail('cross', cross)
                #self.mother.results.add_detail('error', 'nan')
                #self.mother.do_plot('%s -f' % new_run)
                #self.mother.update_status('Reweight %s done' % rewgtid, 'madspin')
                #self.mother.results.def_current(old_name)
                #self.run_card['run_tag'] = self.run_card['run_tag'][9:]
                #self.mother.run_name = old_name
        self.lhe_input.close()
        files.mv(output.name, self.lhe_input.name)
        logger.info('Event %s have now the additional weight' % self.lhe_input.name)
        logger.info('new cross-section is : %g pb' % cross)
        self.terminate_fortran_executables(new_card_only=True)
        #store result
        self.all_cross_section[rewgtid] = cross
    def do_collect(self, line):
        """MadWeight Function: making the collect of the results"""
        
        self.configure()
        args = self.split_arg(line)
        self.check_collect(args)
        xml_reader = MWParserXML()
        
        name = self.MWparam.name
        # 1. Concatanate the file. #############################################
        out_dir = pjoin(self.me_dir, 'Events', name)
        if '-refine' in args:
            out_path = pjoin(out_dir, 'refine.xml') 
        else:
            out_path = pjoin(out_dir, 'output.xml')
            if os.path.exists(out_path):
                logger.warning('Output file already exists. Current one will be tagged with _old suffix')
                logger.warning('Run "collect -refine to instead update your current results."')
                files.mv(pjoin(out_dir, 'output.xml'), pjoin(out_dir, 'output_old.xml'))
                files.mv(pjoin(out_dir, 'weights.out'), pjoin(out_dir, 'weights.out'))
                for MWdir in self.MWparam.MW_listdir:
                    out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
                    files.mv(pjoin(out_dir, 'output.xml'), pjoin(out_dir, 'output_old.xml'))
                out_dir = pjoin(self.me_dir, 'Events', name)
                    
        fsock = open(out_path, 'w')
        fsock.write('<madweight>\n<banner>\n')
        # BANNER
        for card in ['proc_card_mg5.dat','MadWeight_card.dat','transfer_card.dat','param_card.dat','run_card.dat']:
            cname = card[:-4]
            fsock.write('<%s>\n' % cname)
            fsock.write(open(pjoin(self.me_dir,'Cards',card)).read().replace('<','!>'))
            fsock.write('</%s>\n' % cname)
        fsock.write('</banner>\n')
        at_least_one = False
        for MWdir in self.MWparam.MW_listdir:
            out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
            input_dir = pjoin(self.me_dir, 'SubProcesses', MWdir, name)
            if not os.path.exists(out_dir):
                os.mkdir(out_dir)
            if '-refine' in args:
                out_path = pjoin(out_dir, 'refine.xml') 
            else:
                out_path = pjoin(out_dir, 'output.xml')  
            fsock2 = open(out_path,'w')
            fsock.write('<subprocess id=\'%s\'>\n' % MWdir)
            fsock2.write('<subprocess id=\'%s\'>\n' % MWdir)
            for output in glob.glob(pjoin(input_dir, 'output_*_*.xml')):
                at_least_one = True
                text = open(output).read()
                fsock2.write(text)
                fsock.write(text)
                os.remove(output)
            fsock.write('</subprocess>\n')
            fsock2.write('</subprocess>\n')
            fsock2.close()
        fsock.write('\n</madweight>\n')          
        fsock.close()
        # 2. Special treatment for refine mode
        if '-refine' in args:
            xml_reader2 = MWParserXML(self.MWparam['mw_run']['log_level'])
            for MWdir in self.MWparam.MW_listdir:
                out_dir = pjoin(self.me_dir, 'Events',name, MWdir)
                ref_output = xml_reader2.read_file(pjoin(out_dir, 'refine.xml'))
                xml_reader2 = MWParserXML(self.MWparam['mw_run']['log_level'])
                base_output = xml_reader2.read_file(pjoin(out_dir, 'output.xml'))

                base_output.refine(ref_output)
                files.mv(pjoin(out_dir, 'output.xml'), pjoin(out_dir, 'output_old.xml'))
                base_output.write(pjoin(out_dir, 'output.xml'), MWdir)
        elif not at_least_one:
            logger.warning("Nothing to collect restore _old file as current.")
            out_dir = pjoin(self.me_dir, 'Events', name)
            files.mv(pjoin(out_dir, 'output_old.xml'), pjoin(out_dir, 'output.xml'))
            files.mv(pjoin(out_dir, 'weights_old.out'), pjoin(out_dir, 'weights.out'))
            for MWdir in self.MWparam.MW_listdir:
                out_dir = pjoin(self.me_dir, 'Events', name, MWdir)
                files.mv(pjoin(out_dir, 'output.xml'), pjoin(out_dir, 'output_old.xml'))
            
            
            
        # 3. Read the (final) log file for extracting data
        total = {}
        likelihood = {}
        err_likelihood = {}
        cards = set()
        events = set()
        tf_sets = set()
        for MW_dir in self.MWparam.MW_listdir:
            out_dir = pjoin(self.me_dir, 'Events', name, MW_dir)
            xml_reader = MWParserXML()
            data = xml_reader.read_file(pjoin(out_dir, 'output.xml'))
            #
            log_level = self.MWparam['mw_run']['log_level']            
            generator =  ((int(i),j,int(k),data[i][j][k]) for i in data 
                                                               for j in data[i] 
                                                               for k in data[i][j])
            for card, event, tf_set, obj in generator:
                # update the full list of events/cards
                cards.add(card)
                events.add(event)
                tf_sets.add(tf_set)
                # now compute the associate value, error[square]
                if (card,event, tf_set) in total:
                    value, error = total[(card, event, tf_set)]                    
                else:
                    value, error = 0, 0
                obj.calculate_total()
                value, error = (value + obj.value, error + obj.error**2) 
                total[(card, event, tf_set)] = (value, error)
                if tf_set == 1:
                    if value:
                        if card not in likelihood:
                            likelihood[card], err_likelihood[card] = 0, 0
                        likelihood[card] -= math.log(value)
                        err_likelihood[card] += error / value
                    else:
                        likelihood[card] = float('Inf')
                        err_likelihood[card] = float('nan')

                
        # write the weights file:
        fsock = open(pjoin(self.me_dir, 'Events', name, 'weights.out'), 'w')
        logger.info('Write output file with weight information: %s' % fsock.name)
        fsock.write('# Weight (un-normalize) for each card/event/set of transfer fct\n')
        fsock.write('# format: LHCO_event_number card_id tf_set_id value integration_error\n')
        events = list(events)
        events.sort()
        cards = list(cards)
        cards.sort()
        tf_sets = list(tf_sets)
        tf_sets.sort()
        for event in events:
            for card in cards:
                for tf_set in tf_sets:
                    try:
                        value, error = total[(card, event,tf_set)]
                    except KeyError:
                        continue
                    error = math.sqrt(error)
                    fsock.write('%s %s %s %s %s \n' % (event.replace('@', ' '), card, tf_set, value, error))
    
        # write the likelihood file:
        fsock = open(pjoin(self.me_dir, 'Events', name, 'un-normalized_likelihood.out'), 'w')
        fsock.write('# Warning:  this Likelihood needs a bin by bin normalization !\n')
        fsock.write('# IF more than one set of transfer function are define. ONLY the first one is ')
        fsock.write('# include in this file.')
        fsock.write('# format: card_id value integration_error\n')
        for card in cards:
            value, error = likelihood[card], err_likelihood[card]
            error = math.sqrt(error)
            fsock.write('%s %s %s \n' % (card, value, error))
Exemple #10
0
def runIOTests(arg=[''],update=True,force=0,synchronize=False):
    """ running the IOtests associated to expression. By default, this launch all 
    the tests created in classes inheriting IOTests.     
    """
    
    # Update the tarball, while removing the .backups.
    def noBackUps(tarinfo):
        if tarinfo.name.endswith('.BackUp'):
            return None
        else:
            return tarinfo
    
    if synchronize:
        print "Please, prefer updating the reference file automatically "+\
                                                          "rather than by hand."
        tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
        tar.add(_hc_comparison_files, \
                  arcname=path.basename(_hc_comparison_files), filter=noBackUps)
        tar.close()
        # I am too lazy to work out the difference with the existing tarball and
        # put it in the log. So this is why one should refrain from editing the
        # reference files by hand.
        text = " \nModifications performed by hand on %s at %s in"%(\
                         str(datetime.date.today()),misc.format_timer(0.0)[14:])
        text += '\n   MadGraph5_aMC@NLO v. %(version)s, %(date)s\n'%misc.get_pkg_info()
        log = open(_hc_comparison_modif_log,mode='a')
        log.write(text)
        log.close()
        print "INFO:: Ref. tarball %s updated"%str(_hc_comparison_tarball)
            
        return
    
    if len(arg)!=1 or not isinstance(arg[0],str):
        print "Exactly one argument, and in must be a string, not %s."%arg
        return
    arg=arg[0]

    # Extract the tarball for hardcoded comparison if necessary
    
    if not path.isdir(_hc_comparison_files):
        if path.isfile(_hc_comparison_tarball):
            tar = tarfile.open(_hc_comparison_tarball,mode='r:bz2')
            tar.extractall(path.dirname(_hc_comparison_files))
            tar.close()
        else:
            os.makedirs(_hc_comparison_files)
    
    # Make a backup of the comparison file directory in order to revert it if
    # the user wants to ignore the changes detected (only when updating the refs)
    hc_comparison_files_BackUp = _hc_comparison_files+'_BackUp'
    if update and path.isdir(_hc_comparison_files):
        if path.isdir(hc_comparison_files_BackUp):        
            shutil.rmtree(hc_comparison_files_BackUp)
        shutil.copytree(_hc_comparison_files,hc_comparison_files_BackUp)

    IOTestManager.testFolders_filter = arg.split('/')[0].split('&')
    IOTestManager.testNames_filter = arg.split('/')[1].split('&')
    IOTestManager.filesChecked_filter = '/'.join(arg.split('/')[2:]).split('&')
    #print "INFO:: Using folders %s"%str(IOTestManager.testFolders_filter)    
    #print "INFO:: Using test names %s"%str(IOTestManager.testNames_filter)         
    #print "INFO:: Using file paths %s"%str(IOTestManager.filesChecked_filter)
    
    # Initiate all the IOTests from all the setUp()
    IOTestsInstances = []
    start = time.time()
    for IOTestsClass in IOTestFinder():
        # Instantiate the class
        IOTestsInstances.append(IOTestsClass())
        # Run the setUp
        IOTestsInstances[-1].setUp()
        # Find the testIO defined and use them in load mode only, we will run
        # them later here.
        IOTestsFunctions = IOTestFinder()
        IOTestsFunctions.collect_function(IOTestsClass,prefix='testIO')
        for IOTestFunction in IOTestsFunctions:
            start = time.time()
            eval('IOTestsInstances[-1].'+IOTestFunction.split('.')[-1]+\
                                                             '(load_only=True)')
            setUp_time = time.time() - start
            if setUp_time > 0.5:                
                print colored%(34,"Loading IOtest %s is slow (%s)"%
                        (colored%(32,'.'.join(IOTestFunction.split('.')[-3:])),
                                             colored%(34,'%.2fs'%setUp_time)))
    
    if len(IOTestsInstances)==0:
        print "No IOTest found."
        return
    
    # runIOTests cannot be made a classmethod, so I use an instance, but it does 
    # not matter which one as no instance attribute will be used.
    try:
        modifications = IOTestsInstances[-1].runIOTests( update = update, 
           force = force, verbose=True, testKeys=IOTestManager.all_tests.keys())
    except KeyboardInterrupt:
        if update:
            # Remove the BackUp of the reference files.
            if not path.isdir(hc_comparison_files_BackUp):
                print "\nWARNING:: Update interrupted and modifications already "+\
                                              "performed could not be reverted."
            else:
                shutil.rmtree(_hc_comparison_files)
                mv(hc_comparison_files_BackUp,_hc_comparison_files)
                print colored%(34,
                "\nINFO:: Update interrupted, existing modifications reverted.")
            sys.exit(0)
        else:
            print "\nINFO:: IOTest runs interrupted."
            sys.exit(0)
 
    tot_time = time.time() - start
    
    if modifications == 'test_over':
        print colored%(32,"\n%d IOTests "%len(IOTestManager.all_tests.keys()))+\
                    "successfully tested in %s."%(colored%(34,'%.2fs'%tot_time))
        sys.exit(0)
    elif not isinstance(modifications,dict):
        print "Error during the files update."
        sys.exit(0)

    if sum(len(v) for v in modifications.values())>0:
        # Display the modifications
        text = colored%(34, " \nModifications performed on %s at %s in"%(\
                        str(datetime.date.today()),misc.format_timer(0.0)[14:]))
        text += colored%(34, 
        '\n   MadGraph5_aMC@NLO v. %(version)s, %(date)s\n'%misc.get_pkg_info())
        for key in modifications.keys():
            if len(modifications[key])==0:
                continue
            text += colored%(32,"The following reference files have been %s :"%key)
            text += '\n'+'\n'.join(["   %s"%mod for mod in modifications[key]])
            text += '\n'
        print text
        try:
            answer = Cmd.timed_input(question=
          "Do you want to apply the modifications listed above? [y/n] >",default="y")
        except KeyboardInterrupt:
            answer = 'n'

        if answer == 'y':
            log = open(_hc_comparison_modif_log,mode='a')
            log.write(text)
            log.close()
            if IOTestManager._compress_ref_fodler:
                tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
                tar.add(_hc_comparison_files, \
                      arcname=path.basename(_hc_comparison_files), filter=noBackUps)
                tar.close()
                print colored%(32,"INFO:: tarball %s updated"%str(_hc_comparison_tarball))
            else:
                print colored%(32,"INFO:: Reference %s updated"%\
                                    str(os.path.basename(_hc_comparison_files)))
                if len(modifications['created'])>0:
                    print colored%(31,"Some ref. files have been created; add "+\
                      "them to the revision with\n  "+
                      "bzr add tests/input_files/IOTestsComparison")
           
                # Make sure to remove the BackUp files
                filelist = glob.glob(os.path.join(_hc_comparison_files,
                                                            '*','*','*.BackUp'))
                for f in filelist:
                    os.remove(f)
        else:
            if path.isdir(hc_comparison_files_BackUp):
                shutil.rmtree(_hc_comparison_files)
                shutil.copytree(hc_comparison_files_BackUp,_hc_comparison_files)
                print colored%(32,"INFO:: No modifications applied.")
            else:
                print colored%(31,
                 "ERROR:: Could not revert the modifications. No backup found.")
    else:
        print colored%(32,"\nNo modifications performed. No update necessary.")
    
    # Remove the BackUp of the reference files.
    if path.isdir(hc_comparison_files_BackUp):
        shutil.rmtree(hc_comparison_files_BackUp)
Exemple #11
0
def runIOTests(arg=[''], update=True, force=0, synchronize=False):
    """ running the IOtests associated to expression. By default, this launch all 
    the tests created in classes inheriting IOTests.     
    """

    # Update the tarball, while removing the .backups.
    def noBackUps(tarinfo):
        if tarinfo.name.endswith('.BackUp'):
            return None
        else:
            return tarinfo

    if synchronize:
        print "Please, prefer updating the reference file automatically "+\
                                                          "rather than by hand."
        tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
        tar.add(_hc_comparison_files, \
                  arcname=path.basename(_hc_comparison_files), filter=noBackUps)
        tar.close()
        # I am too lazy to work out the difference with the existing tarball and
        # put it in the log. So this is why one should refrain from editing the
        # reference files by hand.
        text = " \nModifications performed by hand on %s at %s in"%(\
                         str(datetime.date.today()),misc.format_timer(0.0)[14:])
        text += '\n   MadGraph5_aMC@NLO v. %(version)s, %(date)s\n' % misc.get_pkg_info(
        )
        log = open(_hc_comparison_modif_log, mode='a')
        log.write(text)
        log.close()
        print "INFO:: Ref. tarball %s updated" % str(_hc_comparison_tarball)

        return

    if len(arg) != 1 or not isinstance(arg[0], str):
        print "Exactly one argument, and in must be a string, not %s." % arg
        return
    arg = arg[0]

    # Extract the tarball for hardcoded comparison if necessary

    if not path.isdir(_hc_comparison_files):
        if path.isfile(_hc_comparison_tarball):
            tar = tarfile.open(_hc_comparison_tarball, mode='r:bz2')
            tar.extractall(path.dirname(_hc_comparison_files))
            tar.close()
        else:
            os.makedirs(_hc_comparison_files)

    # Make a backup of the comparison file directory in order to revert it if
    # the user wants to ignore the changes detected (only when updating the refs)
    hc_comparison_files_BackUp = _hc_comparison_files + '_BackUp'
    if update and path.isdir(_hc_comparison_files):
        if path.isdir(hc_comparison_files_BackUp):
            shutil.rmtree(hc_comparison_files_BackUp)
        shutil.copytree(_hc_comparison_files, hc_comparison_files_BackUp)

    IOTestManager.testFolders_filter = arg.split('/')[0].split('&')
    IOTestManager.testNames_filter = arg.split('/')[1].split('&')
    IOTestManager.filesChecked_filter = '/'.join(arg.split('/')[2:]).split('&')
    #print "INFO:: Using folders %s"%str(IOTestManager.testFolders_filter)
    #print "INFO:: Using test names %s"%str(IOTestManager.testNames_filter)
    #print "INFO:: Using file paths %s"%str(IOTestManager.filesChecked_filter)

    # Initiate all the IOTests from all the setUp()
    IOTestsInstances = []
    start = time.time()
    for IOTestsClass in IOTestFinder():
        # Instantiate the class
        IOTestsInstances.append(IOTestsClass())
        # Run the setUp
        IOTestsInstances[-1].setUp()
        # Find the testIO defined and use them in load mode only, we will run
        # them later here.
        IOTestsFunctions = IOTestFinder()
        IOTestsFunctions.collect_function(IOTestsClass, prefix='testIO')
        for IOTestFunction in IOTestsFunctions:
            start = time.time()
            # Add all the tests automatically (i.e. bypass filters) if the
            # specified test is the name of the IOtest. the [7:] is to
            # skip the testIO prefix
            name_filer_bu = None
            if IOTestFunction.split('.')[-1][7:] in \
                                                 IOTestManager.testNames_filter:
                name_filer_bu = IOTestManager.testNames_filter
                IOTestManager.testNames_filter = ['ALL']
                existing_tests = IOTestManager.all_tests.keys()

            eval('IOTestsInstances[-1].'+IOTestFunction.split('.')[-1]+\
                                                             '(load_only=True)')
            if name_filer_bu:
                new_tests = [test[0] for test in IOTestManager.all_tests.keys() \
                                                  if test not in existing_tests]
                IOTestManager.testNames_filter = name_filer_bu + new_tests
                name_filer_bu = None

            setUp_time = time.time() - start
            if setUp_time > 0.5:
                print colored % (
                    34, "Loading IOtest %s is slow (%s)" %
                    (colored %
                     (32, '.'.join(IOTestFunction.split('.')[-3:])), colored %
                     (34, '%.2fs' % setUp_time)))

    if len(IOTestsInstances) == 0:
        print "No IOTest found."
        return

    # runIOTests cannot be made a classmethod, so I use an instance, but it does
    # not matter which one as no instance attribute will be used.
    try:
        modifications = IOTestsInstances[-1].runIOTests(
            update=update,
            force=force,
            verbose=True,
            testKeys=IOTestManager.all_tests.keys())
    except KeyboardInterrupt:
        if update:
            # Remove the BackUp of the reference files.
            if not path.isdir(hc_comparison_files_BackUp):
                print "\nWARNING:: Update interrupted and modifications already "+\
                                              "performed could not be reverted."
            else:
                shutil.rmtree(_hc_comparison_files)
                mv(hc_comparison_files_BackUp, _hc_comparison_files)
                print colored % (
                    34,
                    "\nINFO:: Update interrupted, existing modifications reverted."
                )
            sys.exit(0)
        else:
            print "\nINFO:: IOTest runs interrupted."
            sys.exit(0)

    tot_time = time.time() - start

    if modifications == 'test_over':
        print colored%(32,"\n%d IOTests "%len(IOTestManager.all_tests.keys()))+\
                    "successfully tested in %s."%(colored%(34,'%.2fs'%tot_time))
        sys.exit(0)
    elif not isinstance(modifications, dict):
        print "Error during the files update."
        sys.exit(0)

    if len(modifications['missing']) > 0:
        text = '\n'
        text += colored % (
            31,
            "The following files were not generated by the tests, fix this!")
        text += '\n' + '\n'.join(
            ["   %s" % mod for mod in modifications['missing']])
        print text
        modifications['missing'] = []

    if sum(len(v) for v in modifications.values()) > 0:
        # Display the modifications
        text = colored%(34, " \nModifications performed on %s at %s in"%(\
                        str(datetime.date.today()),misc.format_timer(0.0)[14:]))
        text += colored % (
            34, '\n   MadGraph5_aMC@NLO v. %(version)s, %(date)s\n' %
            misc.get_pkg_info())
        for key in modifications.keys():
            if len(modifications[key]) == 0:
                continue
            text += colored % (
                32, "The following reference files have been %s :" % key)
            text += '\n' + '\n'.join(
                ["   %s" % mod for mod in modifications[key]])
            text += '\n'
        print text
        try:
            answer = Cmd.timed_input(
                question=
                "Do you want to apply the modifications listed above? [y/n] >",
                default="y")
        except KeyboardInterrupt:
            answer = 'n'

        if answer == 'y':
            log = open(_hc_comparison_modif_log, mode='a')
            log.write(text)
            log.close()
            if IOTestManager._compress_ref_fodler:
                tar = tarfile.open(_hc_comparison_tarball, "w:bz2")
                tar.add(_hc_comparison_files, \
                      arcname=path.basename(_hc_comparison_files), filter=noBackUps)
                tar.close()
                print colored % (32, "INFO:: tarball %s updated" %
                                 str(_hc_comparison_tarball))
            else:
                print colored%(32,"INFO:: Reference %s updated"%\
                                    str(os.path.basename(_hc_comparison_files)))
                if len(modifications['created']) > 0:
                    print colored%(31,"Some ref. files have been created; add "+\
                      "them to the revision with\n  "+
                      "bzr add tests/input_files/IOTestsComparison")

                # Make sure to remove the BackUp files
                filelist = glob.glob(
                    os.path.join(_hc_comparison_files, '*', '*', '*.BackUp'))
                for f in filelist:
                    os.remove(f)
        else:
            if path.isdir(hc_comparison_files_BackUp):
                shutil.rmtree(_hc_comparison_files)
                shutil.copytree(hc_comparison_files_BackUp,
                                _hc_comparison_files)
                print colored % (32, "INFO:: No modifications applied.")
            else:
                print colored % (
                    31,
                    "ERROR:: Could not revert the modifications. No backup found."
                )
    else:
        print colored % (32,
                         "\nNo modifications performed. No update necessary.")

    # Remove the BackUp of the reference files.
    if path.isdir(hc_comparison_files_BackUp):
        shutil.rmtree(hc_comparison_files_BackUp)