Exemplo n.º 1
0
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None):
    """ Creates a node in the workflow for writing the veto_definer
    table. Returns a File instances for the output file.
    """
    if vetodef_file is None:
        vetodef_file = workflow.cp.get_opt_tags("workflow-segments",
                                                "segments-veto-definer-file",
                                                [])
        file_url = urlparse.urljoin('file:', urllib.pathname2url(vetodef_file))
        vdf_file = File(workflow.ifos,
                        'VETO_DEFINER',
                        workflow.analysis_time,
                        file_url=file_url)
        vdf_file.PFN(file_url, site='local')
    else:
        vdf_file = vetodef_file

    if tags is None: tags = []
    makedir(out_dir)
    node = PlotExecutable(workflow.cp,
                          'page_vetotable',
                          ifos=workflow.ifos,
                          out_dir=out_dir,
                          tags=tags).create_node()
    node.add_input_opt('--veto-definer-file', vdf_file)
    node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
    workflow += node
    return node.output_files[0]
Exemplo n.º 2
0
    def create_node(self, trig_files, inj_files, seg_dir, tags=[]):
        node = Node(self)

        # Set input / output options
        node.add_input_list_opt('--input-files', trig_files)
        node.add_input_list_opt('--inj-files', inj_files)

        node.add_opt('--ifo-tag', self.ifos)
        node.add_opt('--exclude-segments', '%s/bufferSeg.txt' % seg_dir)
        node.add_opt('--output-dir', self.out_dir)

        # Create output files as File objects
        name_string = inj_files[0].description
        seg = trig_files[0].segment

        f_file = File(self.ifos,
                      name_string,
                      seg,
                      extension="xml",
                      directory=self.out_dir,
                      store_file=self.retain_files,
                      tags=[inj_files[0].tag_str.replace("split0", "FOUND")])

        m_file = File(self.ifos,
                      name_string,
                      seg,
                      extension="xml",
                      directory=self.out_dir,
                      store_file=self.retain_files,
                      tags=[inj_files[0].tag_str.replace("split0", "MISSED")])

        return node, FileList([f_file, m_file])
Exemplo n.º 3
0
    def create_node(self,
                    trig_files=None,
                    segment_dir=None,
                    out_tags=[],
                    tags=[]):
        node = Node(self)

        if not trig_files:
            raise ValueError("%s must be supplied with trigger files" %
                             self.name)

        # Data options
        pad_data = self.cp.get('inspiral', 'pad-data')
        if pad_data is None:
            raise ValueError("The option pad-data is a required option of "
                             "%s. Please check the ini file." % self.name)

        num_trials = int(self.cp.get("trig_combiner", "num-trials"))
        trig_name = self.cp.get('workflow', 'trigger-name')
        node.add_opt('--grb-name', trig_name)

        node.add_opt('--pad-data', pad_data)
        node.add_opt('--segment-length',
                     self.cp.get('inspiral', 'segment-duration'))
        node.add_opt('--ifo-tag', self.ifos)
        node.add_opt('--user-tag', 'INSPIRAL')

        # Set input / output options
        node.add_input_list_opt('--input-files', trig_files)

        node.add_opt('--segment-dir', segment_dir)
        node.add_opt('--output-dir', self.out_dir)

        out_files = FileList([])
        for out_tag in out_tags:
            out_file = File(self.ifos,
                            'INSPIRAL',
                            trig_files[0].segment,
                            directory=self.out_dir,
                            extension='xml.gz',
                            tags=["GRB%s" % trig_name, out_tag],
                            store_file=self.retain_files)
            #out_file.PFN(out_file.cache_entry.path, site="local")
            out_files.append(out_file)

        for trial in range(1, num_trials + 1):
            out_file = File(self.ifos,
                            'INSPIRAL',
                            trig_files[0].segment,
                            directory=self.out_dir,
                            extension='xml.gz',
                            tags=["GRB%s" % trig_name,
                                  "OFFTRIAL_%d" % trial],
                            store_file=self.retain_files)
            #out_file.PFN(out_file.cache_entry.path, site="local")
            out_files.append(out_file)

        node.add_profile('condor', 'request_cpus', self.num_threads)

        return node, out_files
Exemplo n.º 4
0
    def create_node(self, parent, inj_trigs, inj_string, max_inc, segment):
        node = Node(self)

        trig_name = self.cp.get('workflow', 'trigger-name')
        node.add_opt('--inj-string', inj_string)
        node.add_opt('--max-inclination', max_inc)
        node.add_opt('--inj-cache', '%s' % parent.storage_path)

        out_files = FileList([])
        for inj_trig in inj_trigs:
            out_file_tag = [
                inj_string, "FILTERED", max_inc,
                inj_trig.tag_str.rsplit('_', 1)[-1]
            ]
            out_file = File(self.ifos,
                            inj_trig.description,
                            inj_trig.segment,
                            extension="xml",
                            directory=self.out_dir,
                            tags=out_file_tag)
            out_file.PFN(out_file.cache_entry.path, site="local")
            out_files.append(out_file)

        node.add_opt('--output-dir', self.out_dir)

        return node, out_files
Exemplo n.º 5
0
def get_ipn_sky_files(workflow, file_url, tags=None):
    '''
    Retreive the sky point files for searching over the IPN error box and
    populating it with injections.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    file_url : string
        The URL of the IPN sky points file.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    sky_points_file : pycbc.workflow.core.File
        File object representing the IPN sky points file.
    '''
    tags = tags or []
    ipn_sky_points = resolve_url(file_url)
    sky_points_url = urlparse.urljoin("file:",
                                      urllib.pathname2url(ipn_sky_points))
    sky_points_file = File(workflow.ifos,
                           "IPN_SKY_POINTS",
                           workflow.analysis_time,
                           file_url=sky_points_url,
                           tags=tags)
    sky_points_file.PFN(sky_points_url, site="local")

    return sky_points_file
Exemplo n.º 6
0
    def create_node(self, parent, tags=None):
        import Pegasus.DAX3 as dax
        if tags is None:
            tags = []
        node = Node(self)

        # Set input / output options
        node.add_opt('--trig-file', '%s' % parent.storage_path)
        #node._dax_node.uses(parent, link=dax.Link.INPUT, register=False,
        #                    transfer=False)
        #node._inputs += [parent]

        node.add_opt('--output-dir', self.out_dir)

        node.add_profile('condor', 'request_cpus', self.num_threads)

        # Adding output files as pycbc.workflow.core.File objects
        out_file = File(self.ifos, 'INSPIRAL', parent.segment,
                        directory=self.out_dir, extension='xml.gz',
                        tags=[parent.tag_str, 'CLUSTERED'],
                        store_file=self.retain_files)
        out_file.PFN(out_file.cache_entry.path, site="local")
        #node._dax_node.uses(out_file, link=dax.Link.OUTPUT, register=False,
        #                    transfer=False)
        #node._outputs += [out_file]
        out_file.node = node
        #node._add_output(out_file)

        return node, FileList([out_file])
Exemplo n.º 7
0
    def create_node(self,
                    trig_files=None,
                    segment_dir=None,
                    analysis_seg=None,
                    out_tags=[],
                    tags=[]):
        node = Node(self)

        if not trig_files:
            raise ValueError("%s must be supplied with trigger files" %
                             self.name)

        # Data options
        num_trials = int(self.cp.get("trig_combiner", "num-trials"))
        trig_name = self.cp.get('workflow', 'trigger-name')
        if all("COHERENT_NO_INJECTIONS" in t.name for t in trig_files) and \
                self.cp.has_option_tag('inspiral', 'do-short-slides',
                                       'coherent_no_injections'):
            node.add_opt('--short-slides')

        node.add_opt('--grb-name', trig_name)

        node.add_opt('--trig-start-time', analysis_seg[0])
        node.add_opt('--ifo-tag', self.ifos)
        node.add_opt('--user-tag', 'INSPIRAL')

        # Set input / output options
        node.add_input_list_opt('--input-files', trig_files)

        node.add_opt('--segment-dir', segment_dir)
        node.add_opt('--output-dir', self.out_dir)

        out_files = FileList([])
        for out_tag in out_tags:
            out_file = File(self.ifos,
                            'INSPIRAL',
                            trig_files[0].segment,
                            directory=self.out_dir,
                            extension='xml.gz',
                            tags=["GRB%s" % trig_name, out_tag],
                            store_file=self.retain_files)
            out_files.append(out_file)

        for trial in range(1, num_trials + 1):
            out_file = File(self.ifos,
                            'INSPIRAL',
                            trig_files[0].segment,
                            directory=self.out_dir,
                            extension='xml.gz',
                            tags=["GRB%s" % trig_name,
                                  "OFFTRIAL_%d" % trial],
                            store_file=self.retain_files)
            out_files.append(out_file)

        node.add_profile('condor', 'request_cpus', self.num_threads)

        return node, out_files
Exemplo n.º 8
0
def setup_gate_pregenerated(workflow, output_dir=None, tags=None):
    '''
    Setup CBC workflow to use pregenerated gating files.
    The file given in cp.get('workflow','gating-file-(ifo)') will
    be used as the --gating-file for all jobs for that ifo.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    output_dir : path string
       The directory where data products will be placed.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    gate_files : pycbc.workflow.core.FileList
        The FileList holding the gating files
    '''
    if tags is None:
        tags = []
    gate_files = FileList([])

    cp = workflow.cp
    global_seg = workflow.analysis_time
    user_tag = "PREGEN_GATE"

    for ifo in workflow.ifos:
        try:
            pre_gen_file = cp.get_opt_tags('workflow-gating',
                                           'gating-file-%s' % ifo.lower(),
                                           tags)
            pre_gen_file = resolve_url(pre_gen_file,
                                       os.path.join(os.getcwd(), output_dir))
            file_url = urlparse.urljoin('file:',
                                        urllib.pathname2url(pre_gen_file))
            curr_file = File(ifo, user_tag, global_seg, file_url, tags=tags)
            curr_file.PFN(file_url, site='local')
            gate_files.append(curr_file)

            logging.info("Using gating file %s for %s", file_url, ifo)

        except ConfigParser.Error:
            logging.info("No gating file specified for %s", ifo)

    return gate_files
Exemplo n.º 9
0
    def create_node(self, data_seg, valid_seg, parent=None, dfParents=None, tags=[]):
        node = LegacyAnalysisNode(self)
        
        if not dfParents: 
            raise ValueError("%s must be supplied with frame files" 
                              %(self.name))  
        
        pad_data = int(self.get_opt('pad-data'))
        if pad_data is None:
            raise ValueError("The option pad-data is a required option of "
                             "%s. Please check the ini file." % self.name)                                     
          
        # hide import here to avoid circular import
        from pycbc.workflow import int_gps_time_to_str
        node.add_opt('--gps-start-time', int_gps_time_to_str(data_seg[0] + pad_data))
        node.add_opt('--gps-end-time', int_gps_time_to_str(data_seg[1] - pad_data))   
         
        cache_file = dfParents[0]       
        
        #check the extension       
        extension = '.xml'
        gzipped = self.has_opt('write-compress')
        if gzipped is not None:
            extension += '.gz'
        
        #create the output file for this job 
        out_file = File(self.ifo, self.name, valid_seg,
                             extension=extension,
                             directory=self.out_dir,
                             tags=self.tags + tags,
                             store_file=self.retain_files)
 
        node.add_output_opt('--output-file', out_file)
        node.add_input_list_opt('--frame-files', dfParents)
        return node
Exemplo n.º 10
0
def setup_gate_pregenerated(workflow, tags=None):
    '''
    Setup CBC workflow to use pregenerated gating files.
    The file given in cp.get('workflow','pregenerated-gating-file-(ifo)') will 
    be used as the --gating-file for all matched-filtering jobs for that ifo.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    gate_files : pycbc.workflow.core.FileList
        The FileList holding the gating files
    '''
    if tags is None:
        tags = []
    gate_files = FileList([])

    cp = workflow.cp
    global_seg = workflow.analysis_time
    user_tag = "PREGEN_GATE"

    for ifo in workflow.ifos:
        try:
            pre_gen_file = cp.get_opt_tags(
                'workflow-gating', 'gating-pregenerated-file-%s' % ifo.lower(),
                tags)
            pre_gen_file = resolve_url(pre_gen_file)
            file_url = urlparse.urljoin('file:',
                                        urllib.pathname2url(pre_gen_file))
            curr_file = File(ifo, user_tag, global_seg, file_url, tags=tags)
            curr_file.PFN(file_url, site='local')
            gate_files.append(curr_file)

        except ConfigParser.Error:
            # It's unlikely, but not impossible, that only some ifos
            # will be gated
            logging.warn("No gating file specified for IFO %s." % (ifo, ))
            pass

    return gate_files
Exemplo n.º 11
0
def convert_cachelist_to_filelist(datafindcache_list):
    """
    Take as input a list of glue.lal.Cache objects and return a pycbc FileList
    containing all frames within those caches.

    Parameters
    -----------
    datafindcache_list : list of glue.lal.Cache objects
        The list of cache files to convert.

    Returns
    --------
    datafind_filelist : FileList of frame File objects
        The list of frame files.
    """
    datafind_filelist = FileList([])
    prev_file = None
    for cache in datafindcache_list:
        curr_ifo = cache.ifo
        for frame in cache:
            # Don't add a new workflow file entry for this frame if
            # if is a duplicate. These are assumed to be returned in time
            # order
            if prev_file:
                prev_name = prev_file.cache_entry.url.split('/')[-1]
                this_name = frame.url.split('/')[-1]
                if prev_name == this_name:
                    continue

            # Pegasus doesn't like "localhost" in URLs.
            frame.url = frame.url.replace('file://localhost','file://')

            currFile = File(curr_ifo, frame.description,
                    frame.segment, file_url=frame.url, use_tmp_subdirs=True)
            if frame.url.startswith('file://'):
                currFile.PFN(frame.url, site='local')
                if frame.url.startswith(
                    'file:///cvmfs/oasis.opensciencegrid.org/'):
                    # Datafind returned a URL valid on the osg as well
                    # so add the additional PFNs to allow OSG access.
                    currFile.PFN(frame.url, site='osg')
                    currFile.PFN(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'root://xrootd-local.unl.edu/user/'), site='osg')
                    currFile.PFN(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'gsiftp://red-gridftp.unl.edu/user/'), site='osg')
                    currFile.PFN(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'gsiftp://ldas-grid.ligo.caltech.edu/hdfs/'), site='osg')
            else:
                currFile.PFN(frame.url, site='notlocal')
            datafind_filelist.append(currFile)
            prev_file = currFile
    return datafind_filelist
Exemplo n.º 12
0
def convert_cachelist_to_filelist(datafindcache_list):
    """
    Take as input a list of glue.lal.Cache objects and return a pycbc FileList
    containing all frames within those caches.
   
    Parameters
    -----------
    datafindcache_list : list of glue.lal.Cache objects
        The list of cache files to convert.
  
    Returns
    --------
    datafind_filelist : FileList of frame File objects
        The list of frame files.
    """
    datafind_filelist = FileList([])
    prev_file = None
    for cache in datafindcache_list:
        curr_ifo = cache.ifo
        for frame in cache:
            # Don't add a new workflow file entry for this frame if
            # if is a duplicate. These are assumed to be returned in time
            # order
            if prev_file:
                prev_name = prev_file.cache_entry.url.split('/')[-1]
                this_name = frame.url.split('/')[-1]
                if prev_name == this_name:
                    continue

            # Pegasus doesn't like "localhost" in URLs.
            frame.url = frame.url.replace('file://localhost', 'file://')

            currFile = File(curr_ifo,
                            frame.description,
                            frame.segment,
                            file_url=frame.url,
                            use_tmp_subdirs=True)
            if frame.url.startswith('file://'):
                currFile.PFN(frame.url, site='local')
            else:
                currFile.PFN(frame.url, site='notlocal')
            datafind_filelist.append(currFile)
            prev_file = currFile
    return datafind_filelist
Exemplo n.º 13
0
def multi_segments_to_file(seg_list, filename, names, ifos):
    """ Save segments to an xml file
    
    Parameters
    ----------
    seg_list: glue.segments.segmentlist
        List of segment lists to write to disk
    filename : str
        name of the output file
    names : 
        name of each segment list
    ifos :
        list of ifos
        
    Returns
    -------
    File : Return a pycbc.core.File reference to the file
    """
    from pycbc.workflow.core import File

    # create XML doc and add process table
    outdoc = ligolw.Document()
    outdoc.appendChild(ligolw.LIGO_LW())
    process = ligolw_utils.process.register_to_xmldoc(outdoc, argv[0], {})

    for segs, ifo, name in zip(seg_list, ifos, names):
        fsegs = [(lal.LIGOTimeGPS(seg[0]), lal.LIGOTimeGPS(seg[1])) \
            for seg in segs]

        # add segments, segments summary, and segment definer tables using glue library
        with ligolw_segments.LigolwSegments(outdoc, process) as xmlsegs:
            xmlsegs.insert_from_segmentlistdict({ifo: fsegs}, name)

    # write file
    ligolw_utils.write_filename(outdoc, filename)

    # return a File instance
    url = urlparse.urlunparse(
        ['file', 'localhost', filename, None, None, None])
    f = File(ifo, name, segs, file_url=url, tags=[name])
    f.PFN(os.path.abspath(filename), site='local')
    return f
Exemplo n.º 14
0
    def create_node(self, bank):
        """
        Set up a CondorDagmanNode class to run lalapps_splitbank code

        Parameters
        ----------
        bank : pycbc.workflow.core.File 
            The OutFile containing the template bank to be split

        Returns
        --------
        node : pycbc.workflow.core.Node
            The node to run the job
        """
        node = Node(self)
        # FIXME: This is a hack because SplitBank fails if given an input file
        # whose path contains the character '-' or if the input file is not in
        # the same directory as the output. Therefore we just set the path to
        # be the local path
        fullPath = bank.cache_entry.path
        bank.cache_entry.path = os.path.basename(fullPath)
        node.add_input_opt('--bank-file', bank)
        # FIXME: Set the path back to what it was. This is part of the hack
        #        above and should be removed if possible.
        bank.cache_entry.path = fullPath

        # Get the output (taken from inspiral.py)
        url_list = []
        x = bank.filename.split('-')
        if len(x) != 4:
            errMsg = "Input file name is not compatible with splitbank. Name "
            errMsg += "must follow the lal cache standard, for example "
            errMsg += "H1-TMPLTBANK-900000000-1000.xml. "
            errMsg += "Got %s." % (bank.filename, )
            raise ValueError(errMsg)
        for i in range(0, self.num_banks):
            out_file = "%s-%s_%2.2d-%s-%s" % (x[0], x[1], i, x[2], x[3])
            out_url = urlparse.urlunparse([
                'file', 'localhost',
                os.path.join(self.out_dir, out_file), None, None, None
            ])
            url_list.append(out_url)

            job_tag = bank.description + "_" + self.name.upper()
            out_file = File(bank.ifo,
                            job_tag,
                            bank.segment,
                            file_url=out_url,
                            tags=bank.tags,
                            store_file=self.retain_files)
            node._add_output(out_file)
        return node
Exemplo n.º 15
0
def setup_psd_pregenerated(workflow, tags=None):
    '''
    Setup CBC workflow to use pregenerated psd files.
    The file given in cp.get('workflow','pregenerated-psd-file-(ifo)') will
    be used as the --psd-file argument to geom_nonspinbank, geom_aligned_bank
    and pycbc_plot_psd_file.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    psd_files : pycbc.workflow.core.FileList
        The FileList holding the gating files
    '''
    if tags is None:
        tags = []
    psd_files = FileList([])

    cp = workflow.cp
    global_seg = workflow.analysis_time
    user_tag = "PREGEN_PSD"

    # Check for one psd for all ifos
    try:
        pre_gen_file = cp.get_opt_tags('workflow-psd',
                        'psd-pregenerated-file', tags)
        pre_gen_file = resolve_url(pre_gen_file)
        file_url = urljoin('file:', pathname2url(pre_gen_file))
        curr_file = File(workflow.ifos, user_tag, global_seg, file_url,
                                                    tags=tags)
        curr_file.PFN(file_url, site='local')
        psd_files.append(curr_file)
    except ConfigParser.Error:
        # Check for one psd per ifo
        for ifo in workflow.ifos:
            try:
                pre_gen_file = cp.get_opt_tags('workflow-psd',
                                'psd-pregenerated-file-%s' % ifo.lower(),
                                tags)
                pre_gen_file = resolve_url(pre_gen_file)
                file_url = urljoin('file:', pathname2url(pre_gen_file))
                curr_file = File(ifo, user_tag, global_seg, file_url,
                                                            tags=tags)
                curr_file.PFN(file_url, site='local')
                psd_files.append(curr_file)

            except ConfigParser.Error:
                # It's unlikely, but not impossible, that only some ifos
                # will have pregenerated PSDs
                logging.warn("No psd file specified for IFO %s." % (ifo,))
                pass

    return psd_files
Exemplo n.º 16
0
    def create_node(self, parent, tags=[]):
        node = Node(self)

        # Set input / output options
        node.add_opt('--trig-file', '%s' % parent.storage_path)
        node.add_opt('--output-dir', self.out_dir)

        node.add_profile('condor', 'request_cpus', self.num_threads)

        # Adding output files as pycbc.workflow.core.File objects
        out_file = File(self.ifos, 'INSPIRAL', parent.segment,
                        directory=self.out_dir, extension='xml.gz',
                        tags=[parent.tag_str, 'CLUSTERED'],
                        store_file=self.retain_files)
        #out_file.PFN(out_file.cache_entry.path, site="local")

        return node, FileList([out_file])
Exemplo n.º 17
0
    def create_node(self, coinc_files, bank_file, background_bins, tags=[]):
        node = Node(self)
        node.add_input_list_opt('--coinc-files', coinc_files)
        node.add_input_opt('--bank-file', bank_file)
        node.add_opt('--background-bins', ' '.join(background_bins))

        names = [b.split(':')[0] for b in background_bins]

        output_files = [
            File(coinc_files[0].ifo_list,
                 self.name,
                 coinc_files[0].segment,
                 directory=self.out_dir,
                 tags=tags + ['mbin-%s' % i],
                 extension='.hdf') for i in range(len(background_bins))
        ]
        node.add_output_list_opt('--output-files', output_files)
        node.names = names
        return node
Exemplo n.º 18
0
def get_ipn_sky_files(workflow, tags=None):
    '''
    Retreive the sky point files for searching over the IPN error box and
    populating it with injections.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    cp : pycbc.workflow.core.Workflow
        The parsed configuration options for the workflow
    ipn_files : pycbc.workflow.core.FileList
        FileList holding the details of the IPN sky point files.
    '''
    if tags is None:
        tags = []
    cp = workflow.cp
    ipn_search_points = cp.get("workflow-inspiral", "ipn-search-points")
    ipn_search_points = resolve_url(ipn_search_points)
    search_points_url = urlparse.urljoin(
        "file:", urllib.pathname2url(ipn_search_points))
    search_points_file = File(workflow.ifos,
                              "SEARCH_POINTS",
                              workflow.analysis_time,
                              file_url=search_points_url,
                              tags=tags)
    search_points_file.PFN(search_points_url, site="local")

    ipn_sim_points = cp.get("workflow-injections", "ipn-sim-points")
    ipn_sim_points = resolve_url(ipn_sim_points)
    sim_points_url = urlparse.urljoin("file:",
                                      urllib.pathname2url(ipn_sim_points))
    sim_points_file = File(workflow.ifos,
                           "SIM_POINTS",
                           workflow.analysis_time,
                           file_url=sim_points_url,
                           tags=tags)
    sim_points_file.PFN(sim_points_url, site="local")

    return search_points_file, sim_points_file
Exemplo n.º 19
0
def setup_analysislogging(workflow, segs_list, insps, args, output_dir,
                          program_name="workflow", tags=[]):
    """
    This module sets up the analysis logging xml file that contains the
    following information:

    * Command line arguments that the code was run with
    * Segment list of times marked as SCIENCE
    * Segment list of times marked as SCIENCE and "OK" ie. not CAT_1 vetoed
    * Segment list of times marked as SCIENCE_OK and present on the cluster
    * The times that will be analysed by the matched-filter jobs

    Parameters
    -----------
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance.
    segs_list : pycbc.workflow.core.FileList
        A list of Files containing the information needed to generate the
        segments above. For segments generated at run time the associated
        segmentlist is a property of this object.
    insps : pycbc.workflow.core.FileList
        The output files from the matched-filtering module. Used to identify
        what times have been analysed in this workflow.
    output_dir : path
        Directory to output any files to.
    program_name : string (optional, default = "workflow")
        The program name to stick in the process/process_params tables.
    tags : list (optional, default = [])
        If given restrict to considering inspiral and segment files that
        are tagged with all tags in this list.
    """
    logging.info("Entering analysis logging module.")
    make_analysis_dir(output_dir)

    # Construct the summary XML file
    outdoc = ligolw.Document()
    outdoc.appendChild(ligolw.LIGO_LW())

    # Add process and process_params tables
    proc_id = process.register_to_xmldoc(outdoc, program_name,
                                            vars(args) ).process_id

    # Now add the various segment lists to this file
    summ_segs = segmentlist([workflow.analysis_time])
    
    # If tags is given filter by tags
    if tags:
        for tag in tags:
            segs_list = segs_list.find_output_with_tag(tag)
            insps = insps.find_output_with_tag(tag)

    for ifo in workflow.ifos:
        # Lets get the segment lists we need
        seg_ifo_files = segs_list.find_output_with_ifo(ifo)
        # SCIENCE
        sci_seg_file = seg_ifo_files.find_output_with_tag('SCIENCE')
        if len(sci_seg_file) == 1:
            sci_seg_file = sci_seg_file[0]
            sci_segs = sci_seg_file.segmentList
            sci_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id,
                                                   ifo, "CBC_WORKFLOW_SCIENCE", 0)
            segmentdb_utils.add_to_segment(outdoc, proc_id, sci_def_id,
                                                                      sci_segs)
            segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sci_def_id,
                                                         summ_segs, comment='')
        elif sci_seg_file:
            # FIXME: While the segment module is still fractured (#127) this
            #        may not work. Please update when #127 is resolved
            pass
            #err_msg = "Got %d files matching %s and %s. Expected 1 or 0." \
            #          %(len(sci_seg_file), ifo, 'SCIENCE')
            #raise ValueError(err_msg)

        # SCIENCE_OK
        sci_ok_seg_file = seg_ifo_files.find_output_with_tag('SCIENCE_OK')
        if len(sci_ok_seg_file) == 1:
            sci_ok_seg_file = sci_ok_seg_file[0]
            sci_ok_segs = sci_ok_seg_file.segmentList
            sci_ok_def_id = segmentdb_utils.add_to_segment_definer(outdoc,
                                       proc_id, ifo, "CBC_WORKFLOW_SCIENCE_OK", 0)
            segmentdb_utils.add_to_segment(outdoc, proc_id, sci_ok_def_id,
                                                                   sci_ok_segs)
            segmentdb_utils.add_to_segment_summary(outdoc, proc_id,
                                          sci_ok_def_id, summ_segs, comment='')
        elif sci_ok_seg_file:
            # FIXME: While the segment module is still fractured (#127) this
            #        may not work. Please update when #127 is resolved
            pass
            #err_msg = "Got %d files matching %s and %s. Expected 1 or 0." \
            #          %(len(sci_ok_seg_file), ifo, 'SCIENCE_OK')
            #raise ValueError(err_msg)


        # SCIENCE_AVAILABLE
        sci_available_seg_file = seg_ifo_files.find_output_with_tag(\
                                                           'SCIENCE_AVAILABLE')
        if len(sci_available_seg_file) == 1:
            sci_available_seg_file = sci_available_seg_file[0]
            sci_available_segs = sci_available_seg_file.segmentList
            sci_available_def_id = segmentdb_utils.add_to_segment_definer(\
                        outdoc, proc_id, ifo, "CBC_WORKFLOW_SCIENCE_AVAILABLE", 0)
            segmentdb_utils.add_to_segment(outdoc, proc_id,
                                      sci_available_def_id, sci_available_segs)
            segmentdb_utils.add_to_segment_summary(outdoc, proc_id,
                                   sci_available_def_id, summ_segs, comment='')
        elif sci_available_seg_file:
            # FIXME: While the segment module is still fractured (#127) this
            #        may not work. Please update when #127 is resolved
            pass
            #err_msg = "Got %d files matching %s and %s. Expected 1 or 0." \
            #          %(len(sci_available_seg_file), ifo, 'SCIENCE_AVAILABLE')
            #raise ValueError(err_msg)

        # ANALYSABLE - This one needs to come from inspiral outs
        ifo_insps = insps.find_output_with_ifo(ifo)
        analysable_segs = ifo_insps.get_times_covered_by_files()

        analysable_def_id = segmentdb_utils.add_to_segment_definer(outdoc,
                                     proc_id, ifo, "CBC_WORKFLOW_ANALYSABLE", 0)
        segmentdb_utils.add_to_segment(outdoc, proc_id, analysable_def_id,
                                                               analysable_segs)
        segmentdb_utils.add_to_segment_summary(outdoc, proc_id,
                                      analysable_def_id, summ_segs, comment='')

    summ_file = File(workflow.ifos, "WORKFLOW_SUMMARY",
                     workflow.analysis_time, extension=".xml",
                     directory=output_dir)
    summ_file.PFN(summ_file.storage_path, site='local')
    utils.write_filename(outdoc, summ_file.storage_path)

    return FileList([summ_file])
Exemplo n.º 20
0
def convert_cachelist_to_filelist(datafindcache_list):
    """
    Take as input a list of glue.lal.Cache objects and return a pycbc FileList
    containing all frames within those caches.

    Parameters
    -----------
    datafindcache_list : list of glue.lal.Cache objects
        The list of cache files to convert.

    Returns
    --------
    datafind_filelist : FileList of frame File objects
        The list of frame files.
    """
    prev_file = None
    prev_name = None
    this_name = None

    datafind_filelist = FileList([])

    for cache in datafindcache_list:
        # sort the cache into time sequential order
        cache.sort()
        curr_ifo = cache.ifo
        for frame in cache:
            # Pegasus doesn't like "localhost" in URLs.
            frame.url = frame.url.replace('file://localhost','file://')

            # Create one File() object for each unique frame file that we
            # get back in the cache.
            if prev_file:
                prev_name = os.path.basename(prev_file.cache_entry.url)
                this_name = os.path.basename(frame.url)

            if (prev_file is None) or (prev_name != this_name):
                currFile = File(curr_ifo, frame.description,
                    frame.segment, file_url=frame.url, use_tmp_subdirs=True)
                datafind_filelist.append(currFile)
                prev_file = currFile

            # Populate the PFNs for the File() we just created
            if frame.url.startswith('file://'):
                currFile.add_pfn(frame.url, site='local')
                if frame.url.startswith(
                    'file:///cvmfs/oasis.opensciencegrid.org/ligo/frames'):
                    # Datafind returned a URL valid on the osg as well
                    # so add the additional PFNs to allow OSG access.
                    currFile.add_pfn(frame.url, site='osg')
                    currFile.add_pfn(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'root://xrootd-local.unl.edu/user/'), site='osg')
                    currFile.add_pfn(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'gsiftp://red-gridftp.unl.edu/user/'), site='osg')
                    currFile.add_pfn(frame.url.replace(
                        'file:///cvmfs/oasis.opensciencegrid.org/',
                        'gsiftp://ldas-grid.ligo.caltech.edu/hdfs/'), site='osg')
                elif frame.url.startswith(
                    'file:///cvmfs/gwosc.osgstorage.org/'):
                    # Datafind returned a URL valid on the osg as well
                    # so add the additional PFNs to allow OSG access.
                    for s in ['osg', 'orangegrid', 'osgconnect']:
                        currFile.add_pfn(frame.url, site=s)
                        currFile.add_pfn(frame.url, site="{}-scratch".format(s))
            else:
                currFile.add_pfn(frame.url, site='notlocal')

    return datafind_filelist
Exemplo n.º 21
0
    def create_node(self, trig_files=None, segment_dir=None, analysis_seg=None,
                    slide_tag=None, out_tags=None, tags=None):
        import Pegasus.DAX3 as dax
        if out_tags is None:
            out_tags = []
        if tags is None:
            tags = []
        node = Node(self)

        if not trig_files:
            raise ValueError("%s must be supplied with trigger files"
                              % self.name)

        # Data options
        num_trials = int(self.cp.get("trig_combiner", "num-trials"))
        trig_name = self.cp.get('workflow', 'trigger-name')
        if all("COHERENT_NO_INJECTIONS" in t.name for t in trig_files) and \
                self.cp.has_option_tag('inspiral', 'do-short-slides',
                                       'coherent_no_injections'):
            node.add_opt('--short-slides')
        
        node.add_opt('--grb-name', trig_name)
        
        node.add_opt('--trig-start-time', analysis_seg[0])
        node.add_opt('--ifo-tag', self.ifos)
        node.add_opt('--user-tag', 'INSPIRAL')
        if tags:
            node.add_opt('--job-tag', '_'.join(tags))

        if slide_tag is not None:
            node.add_opt('--slide-tag', slide_tag)
            node.add_opt('--long-slides')
            tag_start=["TIMESLIDES_GRB%s_%s" % (trig_name, slide_tag)]+tags
        else:
            tag_start=["GRB%s" % trig_name]+tags

        # Set input / output options
        if all(hasattr(t.node, "executable") for t in trig_files):
            if all(t.node.executable.name == "trig_cluster"
                   for t in trig_files):
                node.add_opt('--input-files',
                             " ".join([t.storage_path for t in trig_files]))
                if self.cp.has_option_tag('inspiral', 'do-short-slides',
                                          'coherent_no_injections'):
                    node.add_opt('--short-slides')
            else:
                node.add_input_list_opt('--input-files', trig_files)
        else:
            node.add_opt('--input-files',
                         " ".join([t.storage_path for t in trig_files]))

        node.add_opt('--segment-dir', segment_dir)
        node.add_opt('--output-dir', self.out_dir)

        out_files = FileList([])
        for out_tag in out_tags:
            out_file = File(self.ifos, 'INSPIRAL', trig_files[0].segment,
                            directory=self.out_dir, extension='xml.gz',
                            tags=tag_start+[out_tag],
                            store_file=self.retain_files)
            out_files.append(out_file)
            #node._dax_node.uses(out_file, link=dax.Link.OUTPUT, register=False,
            #                    transfer=False)
            #node._outputs += [out_file]
            #out_file.node = node
            #node._add_output(out_file)

        for trial in range(1, num_trials + 1):
            out_file = File(self.ifos, 'INSPIRAL', trig_files[0].segment,
                            directory=self.out_dir, extension='xml.gz',
                            tags=tag_start+["OFFTRIAL_%d" % trial],
                            store_file=self.retain_files)
            out_files.append(out_file)
            #node._dax_node.uses(out_file, link=dax.Link.OUTPUT, register=False,
            #                    transfer=False)
            #node._outputs += [out_file]
            #out_file.node = node
            #node._add_output(out_file)

        node.add_profile('condor', 'request_cpus', self.num_threads)

        return node, out_files
Exemplo n.º 22
0
def setup_hardware_injection_page(workflow,
                                  input_files,
                                  cache_filename,
                                  inspiral_cachepattern,
                                  output_dir,
                                  tags=[],
                                  **kwargs):
    """
    This function sets up the nodes that will create the hardware injection page.

    Parameters
    -----------
    Workflow : ahope.Workflow
        The ahope workflow instance that the coincidence jobs will be added to.
    input_files : ahope.FileList
        An FileList of files that are used as input at this stage.
    cache_filename : str
        Filename of the ihope cache.
    inspiral_cachepattern : str
        The pattern that will be used to find inspiral filenames in the cache.
    output_dir : path
        The directory in which output files will be stored.
    tags : list of strings (optional, default = [])
        A list of the tagging strings that will be used for all jobs created
        by this call to the workflow. An example might be ['full_data'].
        This will be used to search the cache.

    Returns
    --------
    plot_files : ahope.FileList
        A list of the output files from this stage.
    """

    logging.info("Entering hardware injection page setup.")

    out_files = FileList([])

    # check if hardware injection section exists
    # if not then do not do add hardware injection job to the workflow
    if not workflow.cp.has_section('workflow-hardware-injections'):
        msg = "There is no workflow-hardware-injections section. "
        msg += "The hardware injection page will not be added to the workflow."
        logging.info(msg)
        logging.info("Leaving hardware injection page setup.")
        return out_files

    # make the output dir
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # create executable
    hwinjpage_job = Executable(workflow.cp, 'hardware_injection_page',
                               'vanilla', workflow.ifos, output_dir, tags)

    # retrieve hardware injection file
    hwinjDefUrl = workflow.cp.get_opt_tags('workflow-hardware-injections',
                                           'hwinj-definer-url', tags)
    hwinjDefBaseName = os.path.basename(hwinjDefUrl)
    hwinjDefNewPath = os.path.join(output_dir, hwinjDefBaseName)
    urllib.urlretrieve(hwinjDefUrl, hwinjDefNewPath)

    # update hwinj definer file location
    workflow.cp.set("workflow-hardware-injections", "hwinj-definer-file",
                    hwinjDefNewPath)

    # query for the hardware injection segments
    get_hardware_injection_segment_files(workflow, output_dir, hwinjDefNewPath)

    # create node
    node = Node(hwinjpage_job)
    node.add_opt('--gps-start-time', workflow.analysis_time[0])
    node.add_opt('--gps-end-time', workflow.analysis_time[1])
    node.add_opt('--source-xml', hwinjDefNewPath)
    node.add_opt('--segment-dir', output_dir)
    node.add_opt('--cache-file', cache_filename)
    node.add_opt('--cache-pattern', inspiral_cachepattern)
    node.add_opt('--analyze-injections', '')
    for ifo in workflow.ifos:
        node.add_opt('--%s-injections' % ifo.lower(), '')
    outfile = File(node.executable.ifo_string,
                   'HWINJ_SUMMARY',
                   workflow.analysis_time,
                   extension='html',
                   directory=output_dir)
    node.add_opt('--outfile', outfile.storage_path)

    # add node to workflow
    workflow.add_node(node)

    # make all input_files parents
    #for f in input_files:
    #    dep = dax.Dependency(parent=f.node._dax_node, child=node._dax_node)
    #    workflow._adag.addDependency(dep)

    out_files += node.output_files

    logging.info("Leaving hardware injection page setup.")

    return out_files
Exemplo n.º 23
0
def setup_injection_workflow(workflow,
                             output_dir=None,
                             inj_section_name='injections',
                             exttrig_file=None,
                             tags=None):
    """
    This function is the gateway for setting up injection-generation jobs in a
    workflow. It should be possible for this function to support a number
    of different ways/codes that could be used for doing this, however as this
    will presumably stay as a single call to a single code (which need not be
    inspinj) there are currently no subfunctions in this moudle.

    Parameters
    -----------
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance that the coincidence jobs will be added to.
    output_dir : path
        The directory in which injection files will be stored.
    inj_section_name : string (optional, default='injections')
        The string that corresponds to the option describing the exe location
        in the [executables] section of the .ini file and that corresponds to
        the section (and sub-sections) giving the options that will be given to
        the code at run time.
    tags : list of strings (optional, default = [])
        A list of the tagging strings that will be used for all jobs created
        by this call to the workflow. This will be used in output names.

    Returns
    --------
    inj_files : pycbc.workflow.core.FileList
        The list of injection files created by this call.
    inj_tags : list of strings
        The tag corresponding to each injection file and used to uniquely
        identify them. The FileList class contains functions to search
        based on tags.
    """
    if tags is None:
        tags = []
    logging.info("Entering injection module.")
    make_analysis_dir(output_dir)

    # Get full analysis segment for output file naming
    full_segment = workflow.analysis_time
    ifos = workflow.ifos

    # Identify which injections to do by presence of sub-sections in
    # the configuration file
    inj_tags = []
    inj_files = FileList([])

    for section in workflow.cp.get_subsections(inj_section_name):
        inj_tag = section.upper()
        curr_tags = tags + [inj_tag]

        # FIXME: Remove once fixed in pipedown
        # TEMPORARILY we require inj tags to end in "INJ"
        if not inj_tag.endswith("INJ"):
            err_msg = "Currently workflow requires injection names to end with "
            err_msg += "a inj suffix. Ie. bnslininj or bbhinj. "
            err_msg += "%s is not good." % (inj_tag.lower())
            raise ValueError(err_msg)

        # Parse for options in ini file
        injection_method = workflow.cp.get_opt_tags("workflow-injections",
                                                    "injections-method",
                                                    curr_tags)

        if injection_method in ["IN_WORKFLOW", "AT_RUNTIME"]:
            # FIXME: Add ability to specify different exes
            inj_job = LalappsInspinjExecutable(workflow.cp,
                                               inj_section_name,
                                               out_dir=output_dir,
                                               ifos='HL',
                                               tags=curr_tags)
            node = inj_job.create_node(full_segment)
            if injection_method == "AT_RUNTIME":
                workflow.execute_node(node)
            else:
                workflow.add_node(node)
            inj_file = node.output_files[0]
            inj_files.append(inj_file)
        elif injection_method == "PREGENERATED":
            injectionFilePath = workflow.cp.get_opt_tags(
                "workflow-injections", "injections-pregenerated-file",
                curr_tags)
            injectionFilePath = resolve_url(injectionFilePath)
            file_url = urlparse.urljoin('file:',
                                        urllib.pathname2url(injectionFilePath))
            inj_file = File('HL',
                            'PREGEN_inj_file',
                            full_segment,
                            file_url,
                            tags=curr_tags)
            inj_file.PFN(injectionFilePath, site='local')
            inj_files.append(inj_file)
        elif injection_method in ["IN_COH_PTF_WORKFLOW", "AT_COH_PTF_RUNTIME"]:
            inj_job = LalappsInspinjExecutable(workflow.cp,
                                               inj_section_name,
                                               out_dir=output_dir,
                                               ifos=ifos,
                                               tags=curr_tags)
            node = inj_job.create_node(full_segment, exttrig_file)
            if injection_method == "AT_COH_PTF_RUNTIME":
                workflow.execute_node(node)
            else:
                workflow.add_node(node)
            inj_file = node.output_files[0]

            if workflow.cp.has_option("workflow-injections", "em-bright-only"):
                em_filter_job = PycbcDarkVsBrightInjectionsExecutable(
                    workflow.cp,
                    'em_bright_filter',
                    tags=curr_tags,
                    out_dir=output_dir,
                    ifos=ifos)
                node = em_filter_job.create_node(inj_file, full_segment,
                                                 curr_tags)
                if injection_method == "AT_COH_PTF_RUNTIME":
                    workflow.execute_node(node)
                else:
                    workflow.add_node(node)
                inj_file = node.output_files[0]

            if workflow.cp.has_option("workflow-injections",
                                      "do-jitter-skyloc"):
                jitter_job = LigolwCBCJitterSkylocExecutable(
                    workflow.cp,
                    'jitter_skyloc',
                    tags=curr_tags,
                    out_dir=output_dir,
                    ifos=ifos)
                node = jitter_job.create_node(inj_file, full_segment,
                                              curr_tags)
                if injection_method == "AT_COH_PTF_RUNTIME":
                    workflow.execute_node(node)
                else:
                    workflow.add_node(node)
                inj_file = node.output_files[0]

            if workflow.cp.has_option("workflow-injections",
                                      "do-align-total-spin"):
                align_job = LigolwCBCAlignTotalSpinExecutable(
                    workflow.cp,
                    'align_total_spin',
                    tags=curr_tags,
                    out_dir=output_dir,
                    ifos=ifos)
                node = align_job.create_node(inj_file, full_segment, curr_tags)

                if injection_method == "AT_COH_PTF_RUNTIME":
                    workflow.execute_node(node)
                else:
                    workflow.add_node(node)
                inj_file = node.output_files[0]

            inj_files.append(inj_file)
        else:
            err = "Injection method must be one of IN_WORKFLOW, "
            err += "AT_RUNTIME or PREGENERATED. Got %s." % (injection_method)
            raise ValueError(err)

        inj_tags.append(inj_tag)

    logging.info("Leaving injection module.")
    return inj_files, inj_tags
Exemplo n.º 24
0
def setup_postproc_coh_PTF_online_workflow(workflow, trig_files, trig_cache,
        inj_trig_files, inj_files, inj_trig_caches, inj_caches, config_file,
        output_dir, html_dir, segment_dir, segs_plot, ifos, inj_tags=None,
        tags=None):
    """
    This module sets up a stripped down post-processing stage for the online
    workflow, using a coh_PTF style set up. This consists of running
    trig_combiner to find coherent triggers, and trig_cluster to cluster them.
    This process may be done in two stages to reduce memory requirements. It
    also runs injfinder to look for injections, and injcombiner to calculate
    injection statistics. Finally, efficiency and sbv_plotter jobs calculate
    efficiency and signal based veto statistics and make plots.
    
    Parameters
    -----------
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance that the coincidence jobs will be added to.
    trig_files : pycbc.workflow.core.FileList
        A FileList of the trigger files from the on/off source analysis jobs.
    trig_cache : pycbc.workflow.core.File
        A cache file pointing to the trigger files.
    inj_trig_files : pycbc.workflow.core.FileList
        A FileList of the trigger files produced by injection jobs.
    inj_files : pycbc.workflow.core.FileList
        A FileList of the injection set files.
    inj_trig_caches : pycbc.workflow.core.FileList
        A FileList containing the cache files that point to the injection
        trigger files.
    inj_caches : pycbc.workflow.core.FileList
        A FileList containing cache files that point to the injection files.
    config_file : pycbc.workflow.core.File
        The parsed configuration file.
    output_dir : path
        The directory in which output files will be stored.
    html_dir : path
        The directory where the result webpage will be placed.
    segment_dir : path
        The directory in which data segment information is stored.
    segs_plot : pycbc.workflow.core.File
        The plot showing the analysis segments for each IFO around the GRB time.
        This is produced at the time of workflow generation.
    ifos : list
        A list containing the analysis interferometers.
    inj_tags : list
        List containing the strings used to uniquely identify the injection
        sets included in the analysis.
    tags : list of strings (optional, default = [])
        A list of the tagging strings that will be used for all jobs created
        by this call to the workflow. An example might be ['POSTPROC1'] or
        ['DENTYSNEWPOSTPROC']. This will be used in output names.

    Returns
    --------
    pp_outs : pycbc.workflow.core.FileList
        A list of the output from this stage.
    """
    if inj_tags is None:
        inj_tags = []
    if tags is None:
        tags = []
    cp = workflow.cp
    full_segment = trig_files[0].segment
    trig_name = cp.get("workflow", "trigger-name")
    grb_string = "GRB" + trig_name
    num_trials = int(cp.get("trig_combiner", "num-trials"))

    pp_outs = FileList([])
    pp_nodes = []

    # Set up needed exe classes
    trig_combiner_class = select_generic_executable(workflow, "trig_combiner")

    trig_cluster_class = select_generic_executable(workflow, "trig_cluster")

    sbv_plotter_class = select_generic_executable(workflow, "sbv_plotter")
    
    efficiency_class = select_generic_executable(workflow, "efficiency")

    #horizon_dist_class = select_generic_executable(workflow, "horizon_dist")

    html_summary_class = select_generic_executable(workflow, "html_summary")

    # Set up trig_combiner job
    trig_combiner_out_tags = ["OFFSOURCE", "ONSOURCE", "ALL_TIMES"]
    if all("COHERENT_NO_INJECTIONS" in t.name for t in trig_files) and \
            cp.has_option_tag("inspiral", "do-short-slides",
                              "coherent_no_injections"):
        trig_combiner_out_tags.extend(["ZEROLAG_OFF", "ZEROLAG_ALL"])

    trig_combiner_jobs = trig_combiner_class(cp, "trig_combiner", ifo=ifos, 
                                             out_dir=output_dir, tags=tags)

    # Do first stage of trig_combiner and trig_cluster jobs if desired
    if workflow.cp.has_option("workflow-postproc", "do-two-stage-clustering"):
        logging.info("Doing two-stage clustering.")
        trig_combiner_s1_jobs = trig_combiner_class(cp, "trig_combiner",
                ifo=ifos, out_dir=output_dir, tags=tags+["INTERMEDIATE"])

        num_stage_one_jobs = int(workflow.cp.get("workflow-postproc",
            "num-stage-one-cluster-jobs"))
        num_inputs_per_job = -(-len(trig_files) // num_stage_one_jobs)
        split_trig_files = (trig_files[p:p + num_inputs_per_job] for p in \
                            xrange(0, len(trig_files), num_inputs_per_job))
        trig_cluster_s1_jobs = trig_cluster_class(cp, "trig_cluster", ifo=ifos,
                out_dir=output_dir, tags=tags+["INTERMEDIATE"])
        trig_cluster_s1_nodes = []
        trig_cluster_s1_outs = FileList([])
        for j, s1_inputs in zip(range(num_stage_one_jobs), split_trig_files):
            trig_combiner_s1_node, trig_combiner_s1_outs = \
                    trig_combiner_s1_jobs.create_node(s1_inputs,
                            segment_dir, workflow.analysis_time,
                            out_tags=trig_combiner_out_tags, tags=tags+[str(j)])
            pp_nodes.append(trig_combiner_s1_node)
            workflow.add_node(trig_combiner_s1_node)

            unclust_file = [f for f in trig_combiner_s1_outs \
                            if "ALL_TIMES" in f.tag_str][0]
            trig_cluster_s1_node, curr_outs = trig_cluster_s1_jobs.create_node(\
                    unclust_file)
            trig_cluster_s1_outs.extend(curr_outs)
            clust_file = curr_outs[0]
            trig_cluster_s1_node.set_memory(1300)
            trig_cluster_s1_nodes.append(trig_cluster_s1_node)
            pp_nodes.append(trig_cluster_s1_node)
            workflow.add_node(trig_cluster_s1_node)
            dep = dax.Dependency(parent=trig_combiner_s1_node._dax_node,
                                 child=trig_cluster_s1_node._dax_node)
            workflow._adag.addDependency(dep)

        trig_combiner_node, trig_combiner_outs = \
                trig_combiner_jobs.create_node(trig_cluster_s1_outs,
                        segment_dir, workflow.analysis_time,
                        out_tags=trig_combiner_out_tags, tags=tags)
        pp_nodes.append(trig_combiner_node)
        workflow.add_node(trig_combiner_node)
        pp_outs.extend(trig_combiner_outs)
        for trig_cluster_s1_node in trig_cluster_s1_nodes:
            dep = dax.Dependency(parent=trig_cluster_s1_node._dax_node,
                                 child=trig_combiner_node._dax_node)
            workflow._adag.addDependency(dep)

    else:
        trig_combiner_node, trig_combiner_outs = \
                trig_combiner_jobs.create_node(trig_files, segment_dir,
                        workflow.analysis_time, out_tags=trig_combiner_out_tags,
                        tags=tags)
        pp_nodes.append(trig_combiner_node)
        workflow.add_node(trig_combiner_node)
        pp_outs.extend(trig_combiner_outs)

    # Initialise trig_cluster class
    trig_cluster_outs = FileList([])
    trig_cluster_jobs = trig_cluster_class(cp, "trig_cluster", ifo=ifos,
                                           out_dir=output_dir, tags=tags)

    # Set up injfinder jobs
    if cp.has_section("workflow-injections"):
        injfinder_nodes = []
        injcombiner_parent_nodes = []
        inj_sbv_plotter_parent_nodes = []

        injfinder_exe = os.path.basename(cp.get("executables", "injfinder"))
        injfinder_class = select_generic_executable(workflow, "injfinder")
        injfinder_jobs = injfinder_class(cp, "injfinder", ifo=ifos,
                                         out_dir=output_dir, tags=tags)

        injcombiner_exe = os.path.basename(cp.get("executables",
                                                  "injcombiner"))
        injcombiner_class = select_generic_executable(workflow, "injcombiner")
        injcombiner_jobs = injcombiner_class(cp, "injcombiner", ifo=ifos,
                                             out_dir=output_dir, tags=tags)

        injfinder_outs = FileList([])
        for inj_tag in inj_tags:
            triggers = FileList([file for file in inj_trig_files \
                                 if inj_tag in file.tag_str])
            injections = FileList([file for file in inj_files \
                                   if inj_tag in file.tag_str])
            trig_cache = [file for file in inj_trig_caches \
                          if inj_tag in file.tag_str][0]
            inj_cache = [file for file in inj_caches \
                         if inj_tag in file.tag_str][0]
            injfinder_node, curr_outs = injfinder_jobs.create_node(\
                    triggers, injections, segment_dir, tags=[inj_tag])
            injfinder_nodes.append(injfinder_node)
            pp_nodes.append(injfinder_node)
            workflow.add_node(injfinder_node)
            injfinder_outs.extend(curr_outs)
            if "DETECTION" not in curr_outs[0].tagged_description:
                injcombiner_parent_nodes.append(injfinder_node)
            else:
                inj_sbv_plotter_parent_nodes.append(injfinder_node)

        pp_outs.extend(injfinder_outs)

        # Make injfinder output cache
        fm_cache = File(ifos, "foundmissed", full_segment,
                        extension="lcf", directory=output_dir)
        fm_cache.PFN(fm_cache.cache_entry.path, site="local")
        injfinder_outs.convert_to_lal_cache().tofile(\
                open(fm_cache.storage_path, "w"))
        pp_outs.extend(FileList([fm_cache]))

        # Set up injcombiner jobs
        injcombiner_outs = FileList([f for f in injfinder_outs \
                                     if "DETECTION" in f.tag_str])
        injcombiner_tags = [inj_tag for inj_tag in inj_tags \
                            if "DETECTION" not in inj_tag]
        injcombiner_out_tags = [i.tag_str.rsplit('_', 1)[0] for i in \
                                injcombiner_outs if "FOUND" in i.tag_str]
        injcombiner_nodes = []

        for injcombiner_tag in injcombiner_tags:
            max_inc = cp.get_opt_tags("injections", "max-inc",
                                      [injcombiner_tag])
            inj_str = injcombiner_tag.replace("INJ", "")
            inputs = FileList([f for f in injfinder_outs \
                               if injcombiner_tag in f.tagged_description])
            injcombiner_node, curr_outs = injcombiner_jobs.create_node(\
                    fm_cache, inputs, inj_str, max_inc, workflow.analysis_time)
            injcombiner_nodes.append(injcombiner_node)
            injcombiner_out_tags.append("%s_FILTERED_%s"
                                        % (inj_str.split(max_inc)[0], max_inc))
            injcombiner_outs.extend(curr_outs)
            pp_outs.extend(curr_outs)
            pp_nodes.append(injcombiner_node)
            workflow.add_node(injcombiner_node)
            for parent_node in injcombiner_parent_nodes:
                dep = dax.Dependency(parent=parent_node._dax_node,
                                     child=injcombiner_node._dax_node)
                workflow._adag.addDependency(dep)

        # Initialise injection_efficiency class
        inj_efficiency_jobs = efficiency_class(cp, "inj_efficiency", ifo=ifos,
                                               out_dir=output_dir, tags=tags)

    # Initialise sbv_plotter class
    sbv_plotter_outs = FileList([])
    sbv_plotter_jobs = sbv_plotter_class(cp, "sbv_plotter", ifo=ifos,
                                         out_dir=output_dir, tags=tags)

    # Initialise efficiency class
    efficiency_outs = FileList([])
    efficiency_jobs = efficiency_class(cp, "efficiency", ifo=ifos,
                                       out_dir=output_dir, tags=tags)

    # Add trig_cluster jobs and their corresponding plotting jobs
    for out_tag in trig_combiner_out_tags:
        unclust_file = [f for f in trig_combiner_outs \
                        if out_tag in f.tag_str][0]
        trig_cluster_node, curr_outs = trig_cluster_jobs.create_node(\
                unclust_file)
        trig_cluster_outs.extend(curr_outs)
        clust_file = curr_outs[0]
        if out_tag != "ONSOURCE":
            # Add memory requirememnt for jobs with potentially large files
            trig_cluster_node.set_memory(1300)
            pp_nodes.append(trig_cluster_node)
            workflow.add_node(trig_cluster_node)
            dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                                 child=trig_cluster_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add sbv_plotter job
            sbv_out_tags = [out_tag, "_clustered"]
            sbv_plotter_node = sbv_plotter_jobs.create_node(clust_file,
                                                            segment_dir,
                                                            tags=sbv_out_tags)
            pp_nodes.append(sbv_plotter_node)
            workflow.add_node(sbv_plotter_node)
            dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                                 child=sbv_plotter_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add injection sbv_plotter nodes if appropriate
            if out_tag == "OFFSOURCE":
                offsource_clustered = clust_file
                off_node = sbv_plotter_node

            if out_tag == "OFFSOURCE" and \
                    cp.has_section("workflow-injections"):
                found_inj_files = FileList([file for file in injcombiner_outs \
                                            if "FOUND" in file.tag_str])
                for curr_injs in found_inj_files:
                    curr_tags = [tag for tag in injcombiner_out_tags \
                                 if tag in curr_injs.name]
                    curr_tags.append("_clustered")
                    sbv_plotter_node = sbv_plotter_jobs.create_node(clust_file,
                            segment_dir, inj_file=curr_injs, tags=curr_tags)
                    pp_nodes.append(sbv_plotter_node)
                    workflow.add_node(sbv_plotter_node)
                    dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                                         child=sbv_plotter_node._dax_node)
                    workflow._adag.addDependency(dep)
                    if "DETECTION" in curr_injs.tagged_description:
                        for parent_node in inj_sbv_plotter_parent_nodes:
                            dep = dax.Dependency(parent=parent_node._dax_node,
                                    child=sbv_plotter_node._dax_node)
                            workflow._adag.addDependency(dep)
                    else:
                        for parent_node in injcombiner_nodes:
                            dep = dax.Dependency(parent=parent_node._dax_node,
                                    child=sbv_plotter_node._dax_node)
                            workflow._adag.addDependency(dep)

        else:
            pp_nodes.append(trig_cluster_node)
            workflow.add_node(trig_cluster_node)
            dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                                 child=trig_cluster_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add efficiency job for on/off
            efficiency_node = efficiency_jobs.create_node(clust_file,
                    offsource_clustered, segment_dir, tags=[out_tag])
            pp_nodes.append(efficiency_node)
            workflow.add_node(efficiency_node)
            dep = dax.Dependency(parent=off_node._dax_node,
                                 child=efficiency_node._dax_node)
            workflow._adag.addDependency(dep)

            if cp.has_section("workflow-injections"):
                for tag in injcombiner_out_tags:
                    if "_FILTERED_" in tag:
                        inj_set_tag = [t for t in inj_tags if \
                                       str(tag).replace("_FILTERED_", "") \
                                       in t][0]
                    else:
                        inj_set_tag = str(tag)
                    
                    found_file = [file for file in injcombiner_outs \
                                  if tag + "_FOUND" in file.tag_str][0]
                    missed_file = [file for file in injcombiner_outs \
                                   if tag + "_MISSED" in file.tag_str][0]
                    inj_efficiency_node = inj_efficiency_jobs.create_node(\
                            clust_file, offsource_clustered, segment_dir,
                            found_file, missed_file, tags=[out_tag, tag,
                                                           inj_set_tag])
                    pp_nodes.append(inj_efficiency_node)
                    workflow.add_node(inj_efficiency_node)
                    dep = dax.Dependency(parent=off_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)
                    for injcombiner_node in injcombiner_nodes:
                        dep = dax.Dependency(parent=injcombiner_node._dax_node,
                                child=inj_efficiency_node._dax_node)
                        workflow._adag.addDependency(dep)
                    for injfinder_node in injfinder_nodes:
                        dep = dax.Dependency(parent=injfinder_node._dax_node,
                                child=inj_efficiency_node._dax_node)
                        workflow._adag.addDependency(dep)

    # Add further trig_cluster jobs for trials
    trial = 1

    while trial <= num_trials:
        trial_tag = "OFFTRIAL_%d" % trial
        unclust_file = [f for f in trig_combiner_outs \
                        if trial_tag in f.tag_str][0]
        trig_cluster_node, clust_outs = trig_cluster_jobs.create_node(\
                unclust_file)
        clust_file = clust_outs[0]
        trig_cluster_outs.extend(clust_outs)
        pp_nodes.append(trig_cluster_node)
        workflow.add_node(trig_cluster_node)
        dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                             child=trig_cluster_node._dax_node)
        workflow._adag.addDependency(dep)

        # Add efficiency job
        efficiency_node = efficiency_jobs.create_node(clust_file,
                offsource_clustered, segment_dir, tags=[trial_tag])
        pp_nodes.append(efficiency_node)
        workflow.add_node(efficiency_node)
        dep = dax.Dependency(parent=off_node._dax_node,
                             child=efficiency_node._dax_node)
        workflow._adag.addDependency(dep)
        dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                             child=efficiency_node._dax_node)
        workflow._adag.addDependency(dep)

        # Adding inj_efficiency job
        if cp.has_section("workflow-injections"):
            for tag in injcombiner_out_tags:
                if "_FILTERED_" in tag:
                    inj_set_tag = [t for t in inj_tags if \
                                   str(tag).replace("_FILTERED_", "") in t][0]
                else:
                    inj_set_tag = str(tag)

                found_file = [file for file in injcombiner_outs \
                              if tag + "_FOUND" in file.tag_str][0]
                missed_file = [file for file in injcombiner_outs \
                               if tag + "_MISSED" in file.tag_str][0]
                inj_efficiency_node = inj_efficiency_jobs.create_node(\
                        clust_file, offsource_clustered, segment_dir,
                        found_file, missed_file, tags=[trial_tag, tag,
                                                       inj_set_tag])
                pp_nodes.append(inj_efficiency_node)
                workflow.add_node(inj_efficiency_node)
                dep = dax.Dependency(parent=off_node._dax_node,
                                     child=inj_efficiency_node._dax_node)
                workflow._adag.addDependency(dep)
                for injcombiner_node in injcombiner_nodes:
                    dep = dax.Dependency(parent=injcombiner_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)
                for injfinder_node in injfinder_nodes:
                    dep = dax.Dependency(parent=injfinder_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)

        trial += 1

    # Initialise html_summary class and set up job
    #FIXME: We may want this job to run even if some jobs fail
    html_summary_jobs = html_summary_class(cp, "html_summary", ifo=ifos,
                                           out_dir=output_dir, tags=tags)
    if cp.has_section("workflow-injections"):
        tuning_tags = [inj_tag for inj_tag in injcombiner_out_tags \
                       if "DETECTION" in inj_tag]
        exclusion_tags = [inj_tag for inj_tag in injcombiner_out_tags \
                          if "DETECTION" not in inj_tag]
        html_summary_node = html_summary_jobs.create_node(c_file=config_file,
                tuning_tags=tuning_tags, exclusion_tags=exclusion_tags,
                seg_plot=segs_plot, html_dir=html_dir)
    else:
        html_summary_node = html_summary_jobs.create_node(c_file=config_file,
                seg_plot=segs_plot, html_dir=html_dir)
    workflow.add_node(html_summary_node)
    for pp_node in pp_nodes:
        dep = dax.Dependency(parent=pp_node._dax_node,
                             child=html_summary_node._dax_node)
        workflow._adag.addDependency(dep)

    # Make the open box shell script
    try:
        open_box_cmd = html_summary_node.executable.get_pfn() + " "
    except:
        exe_path = html_summary_node.executable.get_pfn('nonlocal').replace(\
                "https", "http")
        exe_name = exe_path.rsplit('/', 1)[-1]
        open_box_cmd = "wget %s\n" % exe_path
        open_box_cmd += "chmod 500 ./%s\n./%s " % (exe_name, exe_name)
    open_box_cmd += ' '.join(html_summary_node._args + \
                             html_summary_node._options)
    open_box_cmd += " --open-box"
    open_box_path = "%s/open_the_box.sh" % output_dir
    f = open(open_box_path, "w")
    f.write("#!/bin/sh\n%s" % open_box_cmd)
    f.close()
    os.chmod(open_box_path, 0500)

    pp_outs.extend(trig_cluster_outs)

    return pp_outs
Exemplo n.º 25
0
def run_datafind_instance(cp, outputDir, connection, observatory, frameType,
                          startTime, endTime, ifo, tags=None):
    """
    This function will query the datafind server once to find frames between
    the specified times for the specified frame type and observatory.

    Parameters
    ----------
    cp : ConfigParser instance
        Source for any kwargs that should be sent to the datafind module
    outputDir : Output cache files will be written here. We also write the
        commands for reproducing what is done in this function to this
        directory.
    connection : datafind connection object
        Initialized through the glue.datafind module, this is the open
        connection to the datafind server.
    observatory : string
        The observatory to query frames for. Ex. 'H', 'L' or 'V'.  NB: not
        'H1', 'L1', 'V1' which denote interferometers.
    frameType : string
        The frame type to query for.
    startTime : int
        Integer start time to query the datafind server for frames.
    endTime : int
        Integer end time to query the datafind server for frames.
    ifo : string
        The interferometer to use for naming output. Ex. 'H1', 'L1', 'V1'.
        Maybe this could be merged with the observatory string, but this
        could cause issues if running on old 'H2' and 'H1' data.
    tags : list of string, optional (default=None)
        Use this to specify tags. This can be used if this module is being
        called more than once to give call specific configuration (by setting
        options in [workflow-datafind-${TAG}] rather than [workflow-datafind]).
        This is also used to tag the Files returned by the class to uniqueify
        the Files and uniquify the actual filename.
        FIXME: Filenames may not be unique with current codes!

    Returns
    --------
    dfCache : glue.lal.Cache instance
       The glue.lal.Cache representation of the call to the datafind
       server and the returned frame files.
    cacheFile : pycbc.workflow.core.File
        Cache file listing all of the datafind output files for use later in the pipeline.

    """
    if tags is None:
        tags = []

    seg = segments.segment([startTime, endTime])
    # Take the datafind kwargs from config (usually urltype=file is
    # given).
    dfKwargs = {}
    # By default ignore missing frames, this case is dealt with outside of here
    dfKwargs['on_gaps'] = 'ignore'
    if cp.has_section("datafind"):
        for item, value in cp.items("datafind"):
            dfKwargs[item] = value
    for tag in tags:
        if cp.has_section('datafind-%s' %(tag)):
            for item, value in cp.items("datafind-%s" %(tag)):
                dfKwargs[item] = value

    # It is useful to print the corresponding command to the logs
    # directory to check if this was expected.
    log_datafind_command(observatory, frameType, startTime, endTime,
                         os.path.join(outputDir,'logs'), **dfKwargs)
    logging.debug("Asking datafind server for frames.")
    dfCache = connection.find_frame_urls(observatory, frameType,
                                        startTime, endTime, **dfKwargs)
    logging.debug("Frames returned")
    # workflow format output file
    cache_file = File(ifo, 'DATAFIND', seg, extension='lcf',
                      directory=outputDir, tags=tags)
    cache_file.PFN(cache_file.cache_entry.path, site='local')

    dfCache.ifo = ifo
    # Dump output to file
    fP = open(cache_file.storage_path, "w")
    # FIXME: CANNOT use dfCache.tofile because it will print 815901601.00000
    #        as a gps time which is incompatible with the lal cache format
    #        (and the C codes) which demand an integer.
    #dfCache.tofile(fP)
    for entry in dfCache:
        start = str(int(entry.segment[0]))
        duration = str(int(abs(entry.segment)))
        print >> fP, "%s %s %s %s %s" \
            %(entry.observatory, entry.description, start, duration, entry.url)
        entry.segment = segments.segment(int(entry.segment[0]), int(entry.segment[1]))

    fP.close()
    return dfCache, cache_file
Exemplo n.º 26
0
def setup_postproc_coh_PTF_workflow(workflow,
                                    trig_files,
                                    trig_cache,
                                    inj_trig_files,
                                    inj_files,
                                    inj_trig_caches,
                                    inj_caches,
                                    config_file,
                                    output_dir,
                                    html_dir,
                                    segment_dir,
                                    ifos,
                                    inj_tags=[],
                                    tags=[]):
    """
    This module sets up the post-processing stage in the workflow, using a
    coh_PTF style set up. This consists of running trig_combiner to find
    coherent triggers, and injfinder to look for injections. It then runs
    a horizon_dist job, trig_cluster to cluster triggers, and injcombiner to
    calculate injection statistics. Finally, efficiency and sbv_plotter jobs
    calculate efficiency and signal based veto statistics and make plots.
    
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance that the jobs will be added to.
    trig_files : pycbc.workflow.core.FileList
        A FileList containing the combined databases.
   
    Returns
    --------
    
    """
    cp = workflow.cp
    full_segment = trig_files[0].segment
    trig_name = cp.get("workflow", "trigger-name")
    grb_string = "GRB" + trig_name
    num_trials = int(cp.get("trig_combiner", "num-trials"))

    pp_outs = FileList([])
    pp_nodes = []

    # Set up needed exe classes
    trig_combiner_exe = os.path.basename(cp.get("executables",
                                                "trig_combiner"))
    trig_combiner_class = select_generic_executable(workflow, "trig_combiner")

    trig_cluster_exe = os.path.basename(cp.get("executables", "trig_cluster"))
    trig_cluster_class = select_generic_executable(workflow, "trig_cluster")

    sbv_plotter_exe = os.path.basename(cp.get("executables", "sbv_plotter"))
    sbv_plotter_class = select_generic_executable(workflow, "sbv_plotter")

    efficiency_exe = os.path.basename(cp.get("executables", "efficiency"))
    efficiency_class = select_generic_executable(workflow, "efficiency")
    """
    horizon_dist_exe = os.path.basename(cp.get("executables",
                                               "horizon_dist"))
    horizon_dist_class = select_generic_executable(workflow,
                                                   "horizon_dist")
    """
    html_summary_exe = os.path.basename(cp.get("executables", "html_summary"))
    html_summary_class = select_generic_executable(workflow, "html_summary")

    # Set up trig_combiner job
    trig_combiner_out_tags = ["OFFSOURCE", "ONSOURCE", "ALL_TIMES"]
    trig_combiner_jobs = trig_combiner_class(cp,
                                             "trig_combiner",
                                             ifo=ifos,
                                             out_dir=output_dir,
                                             tags=tags)
    trig_combiner_node, trig_combiner_outs = trig_combiner_jobs.create_node(\
            trig_files, segment_dir, out_tags=trig_combiner_out_tags,
            tags=tags)
    pp_nodes.append(trig_combiner_node)
    workflow.add_node(trig_combiner_node)
    pp_outs.extend(trig_combiner_outs)

    # Initialise trig_cluster class
    trig_cluster_outs = FileList([])
    trig_cluster_jobs = trig_cluster_class(cp,
                                           "trig_cluster",
                                           ifo=ifos,
                                           out_dir=output_dir,
                                           tags=tags)

    # Set up injfinder jobs
    if cp.has_section("workflow-injections"):
        injfinder_nodes = []
        injcombiner_parent_nodes = []
        inj_sbv_plotter_parent_nodes = []

        injfinder_exe = os.path.basename(cp.get("executables", "injfinder"))
        injfinder_class = select_generic_executable(workflow, "injfinder")
        injfinder_jobs = injfinder_class(cp,
                                         "injfinder",
                                         ifo=ifos,
                                         out_dir=output_dir,
                                         tags=tags)

        injcombiner_exe = os.path.basename(cp.get("executables",
                                                  "injcombiner"))
        injcombiner_class = select_generic_executable(workflow, "injcombiner")
        injcombiner_jobs = injcombiner_class(cp,
                                             "injcombiner",
                                             ifo=ifos,
                                             out_dir=output_dir,
                                             tags=tags)

        injfinder_outs = FileList([])
        for inj_tag in inj_tags:
            triggers = FileList([file for file in inj_trig_files \
                                 if inj_tag in file.tag_str])
            injections = FileList([file for file in inj_files \
                                   if inj_tag in file.tag_str])
            trig_cache = [file for file in inj_trig_caches \
                          if inj_tag in file.tag_str][0]
            inj_cache = [file for file in inj_caches \
                         if inj_tag in file.tag_str][0]
            injfinder_node, curr_outs = injfinder_jobs.create_node(\
                    triggers, injections, segment_dir, tags=[inj_tag])
            injfinder_nodes.append(injfinder_node)
            pp_nodes.append(injfinder_node)
            workflow.add_node(injfinder_node)
            injfinder_outs.extend(curr_outs)
            if "DETECTION" not in curr_outs[0].tagged_description:
                injcombiner_parent_nodes.append(injfinder_node)
            else:
                inj_sbv_plotter_parent_nodes.append(injfinder_node)

        pp_outs.extend(injfinder_outs)

        # Make injfinder output cache
        fm_cache = File(ifos,
                        "foundmissed",
                        full_segment,
                        extension="lcf",
                        directory=output_dir)
        fm_cache.PFN(fm_cache.cache_entry.path, site="local")
        injfinder_outs.convert_to_lal_cache().tofile(\
                open(fm_cache.storage_path, "w"))
        pp_outs.extend(FileList([fm_cache]))

        # Set up injcombiner jobs
        injcombiner_outs = FileList([file for file in injfinder_outs \
                                     if "DETECTION" in file.tag_str])
        injcombiner_tags = [inj_tag for inj_tag in inj_tags \
                            if "DETECTION" not in inj_tag]
        injcombiner_out_tags = [injcombiner_outs[0].tag_str.rsplit('_', 1)[0]]
        injcombiner_nodes = []

        for injcombiner_tag in injcombiner_tags:
            max_inc = cp.get_opt_tags("injections", "max-inc",
                                      [injcombiner_tag])
            inj_str = injcombiner_tag[:4]
            inputs = FileList([file for file in injfinder_outs \
                               if injcombiner_tag in file.tagged_description])
            #                   if any(tag in file.tagged_description \
            #                          for tag in injcombiner_tags)])
            injcombiner_node, curr_outs = injcombiner_jobs.create_node(\
                    fm_cache, inputs, inj_str, max_inc, workflow.analysis_time)
            injcombiner_nodes.append(injcombiner_node)
            injcombiner_out_tags.append("%s_FILTERED_%s" % (inj_str, max_inc))
            injcombiner_outs.extend(curr_outs)
            pp_outs.extend(curr_outs)
            pp_nodes.append(injcombiner_node)
            workflow.add_node(injcombiner_node)
            for parent_node in injcombiner_parent_nodes:
                dep = dax.Dependency(parent=parent_node._dax_node,
                                     child=injcombiner_node._dax_node)
                workflow._adag.addDependency(dep)

        # Initialise injection_efficiency class
        inj_efficiency_jobs = efficiency_class(cp,
                                               "inj_efficiency",
                                               ifo=ifos,
                                               out_dir=output_dir,
                                               tags=tags)

    # Initialise sbv_plotter class
    sbv_plotter_outs = FileList([])
    sbv_plotter_jobs = sbv_plotter_class(cp,
                                         "sbv_plotter",
                                         ifo=ifos,
                                         out_dir=output_dir,
                                         tags=tags)

    # Initialise efficiency class
    efficiency_outs = FileList([])
    efficiency_jobs = efficiency_class(cp,
                                       "efficiency",
                                       ifo=ifos,
                                       out_dir=output_dir,
                                       tags=tags)

    # Add trig_cluster jobs and their corresponding plotting jobs
    for out_tag in trig_combiner_out_tags:
        unclust_file = [file for file in trig_combiner_outs \
                        if out_tag in file.tag_str][0]
        trig_cluster_node, curr_outs = trig_cluster_jobs.create_node(\
                unclust_file)
        trig_cluster_outs.extend(curr_outs)
        clust_file = curr_outs[0]
        if out_tag != "ONSOURCE":
            # Add memory requirememnt for jobs with potentially large files
            trig_cluster_node.set_memory(1300)
            pp_nodes.append(trig_cluster_node)
            workflow.add_node(trig_cluster_node)
            dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                                 child=trig_cluster_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add sbv_plotter job
            sbv_out_tags = [out_tag, "_clustered"]
            sbv_plotter_node = sbv_plotter_jobs.create_node(clust_file,
                                                            segment_dir,
                                                            tags=sbv_out_tags)
            pp_nodes.append(sbv_plotter_node)
            workflow.add_node(sbv_plotter_node)
            dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                                 child=sbv_plotter_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add injection sbv_plotter nodes if appropriate
            if out_tag == "OFFSOURCE" and \
                    cp.has_section("workflow-injections"):
                offsource_clustered = clust_file
                off_node = sbv_plotter_node

                found_inj_files = FileList([file for file in injcombiner_outs \
                                            if "FOUND" in file.tag_str])
                for curr_injs in found_inj_files:
                    curr_tags = [tag for tag in injcombiner_out_tags \
                                 if tag in curr_injs.name]
                    curr_tags.append("_clustered")
                    sbv_plotter_node = sbv_plotter_jobs.create_node(
                        clust_file,
                        segment_dir,
                        inj_file=curr_injs,
                        tags=curr_tags)
                    pp_nodes.append(sbv_plotter_node)
                    workflow.add_node(sbv_plotter_node)
                    dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                                         child=sbv_plotter_node._dax_node)
                    workflow._adag.addDependency(dep)
                    if "DETECTION" in curr_injs.tagged_description:
                        for parent_node in inj_sbv_plotter_parent_nodes:
                            dep = dax.Dependency(
                                parent=parent_node._dax_node,
                                child=sbv_plotter_node._dax_node)
                            workflow._adag.addDependency(dep)
                    else:
                        for parent_node in injcombiner_nodes:
                            dep = dax.Dependency(
                                parent=parent_node._dax_node,
                                child=sbv_plotter_node._dax_node)
                            workflow._adag.addDependency(dep)

            # Also add sbv_plotter job for unclustered triggers
            sbv_plotter_node = sbv_plotter_jobs.create_node(
                unclust_file, segment_dir, tags=[out_tag, "_unclustered"])
            sbv_plotter_node.set_memory(1300)
            pp_nodes.append(sbv_plotter_node)
            workflow.add_node(sbv_plotter_node)
            dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                                 child=sbv_plotter_node._dax_node)
            workflow._adag.addDependency(dep)
        else:
            pp_nodes.append(trig_cluster_node)
            workflow.add_node(trig_cluster_node)
            dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                                 child=trig_cluster_node._dax_node)
            workflow._adag.addDependency(dep)

            # Add efficiency job for on/off
            efficiency_node = efficiency_jobs.create_node(clust_file,
                                                          offsource_clustered,
                                                          segment_dir,
                                                          tags=[out_tag])
            pp_nodes.append(efficiency_node)
            workflow.add_node(efficiency_node)
            dep = dax.Dependency(parent=off_node._dax_node,
                                 child=efficiency_node._dax_node)
            workflow._adag.addDependency(dep)

            if cp.has_section("workflow-injections"):
                for tag in injcombiner_out_tags:
                    if "_FILTERED_" in tag:
                        inj_set_tag = [t for t in inj_tags if \
                                       str(tag).replace("_FILTERED_", "") \
                                       in t][0]
                    else:
                        inj_set_tag = str(tag)

                    found_file = [file for file in injcombiner_outs \
                                  if tag + "_FOUND" in file.tag_str][0]
                    missed_file = [file for file in injcombiner_outs \
                                   if tag + "_MISSED" in file.tag_str][0]
                    inj_efficiency_node = inj_efficiency_jobs.create_node(\
                            clust_file, offsource_clustered, segment_dir,
                            found_file, missed_file, tags=[out_tag, tag,
                                                           inj_set_tag])
                    pp_nodes.append(inj_efficiency_node)
                    workflow.add_node(inj_efficiency_node)
                    dep = dax.Dependency(parent=off_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)
                    for injcombiner_node in injcombiner_nodes:
                        dep = dax.Dependency(
                            parent=injcombiner_node._dax_node,
                            child=inj_efficiency_node._dax_node)
                        workflow._adag.addDependency(dep)
                    for injfinder_node in injfinder_nodes:
                        dep = dax.Dependency(
                            parent=injfinder_node._dax_node,
                            child=inj_efficiency_node._dax_node)
                        workflow._adag.addDependency(dep)

    # Add further trig_cluster jobs for trials
    trial = 1

    while trial <= num_trials:
        trial_tag = "OFFTRIAL_%d" % trial
        unclust_file = [file for file in trig_combiner_outs \
                        if trial_tag in file.tag_str][0]
        trig_cluster_node, clust_outs = trig_cluster_jobs.create_node(\
                unclust_file)
        clust_file = clust_outs[0]
        trig_cluster_outs.extend(clust_outs)
        pp_nodes.append(trig_cluster_node)
        workflow.add_node(trig_cluster_node)
        dep = dax.Dependency(parent=trig_combiner_node._dax_node,
                             child=trig_cluster_node._dax_node)
        workflow._adag.addDependency(dep)

        # Add efficiency job
        efficiency_node = efficiency_jobs.create_node(clust_file,
                                                      offsource_clustered,
                                                      segment_dir,
                                                      tags=[trial_tag])
        pp_nodes.append(efficiency_node)
        workflow.add_node(efficiency_node)
        dep = dax.Dependency(parent=off_node._dax_node,
                             child=efficiency_node._dax_node)
        workflow._adag.addDependency(dep)
        dep = dax.Dependency(parent=trig_cluster_node._dax_node,
                             child=efficiency_node._dax_node)
        workflow._adag.addDependency(dep)

        # Adding inj_efficiency job
        if cp.has_section("workflow-injections"):
            for tag in injcombiner_out_tags:
                if "_FILTERED_" in tag:
                    inj_set_tag = [t for t in inj_tags if \
                                   str(tag).replace("_FILTERED_", "") in t][0]
                else:
                    inj_set_tag = str(tag)

                found_file = [file for file in injcombiner_outs \
                              if tag + "_FOUND" in file.tag_str][0]
                missed_file = [file for file in injcombiner_outs \
                               if tag + "_MISSED" in file.tag_str][0]
                inj_efficiency_node = inj_efficiency_jobs.create_node(\
                        clust_file, offsource_clustered, segment_dir,
                        found_file, missed_file, tags=[trial_tag, tag,
                                                       inj_set_tag])
                pp_nodes.append(inj_efficiency_node)
                workflow.add_node(inj_efficiency_node)
                dep = dax.Dependency(parent=off_node._dax_node,
                                     child=inj_efficiency_node._dax_node)
                workflow._adag.addDependency(dep)
                for injcombiner_node in injcombiner_nodes:
                    dep = dax.Dependency(parent=injcombiner_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)
                for injfinder_node in injfinder_nodes:
                    dep = dax.Dependency(parent=injfinder_node._dax_node,
                                         child=inj_efficiency_node._dax_node)
                    workflow._adag.addDependency(dep)

        trial += 1

    # Initialise html_summary class and set up job
    #FIXME: We may want this job to run even if some jobs fail
    html_summary_jobs = html_summary_class(cp,
                                           "html_summary",
                                           ifo=ifos,
                                           out_dir=output_dir,
                                           tags=tags)
    if cp.has_section("workflow-injections"):
        tuning_tags = [inj_tag for inj_tag in injcombiner_out_tags \
                       if "DETECTION" in inj_tag]
        exclusion_tags = [inj_tag for inj_tag in injcombiner_out_tags \
                          if "DETECTION" not in inj_tag]
        html_summary_node = html_summary_jobs.create_node(
            c_file=config_file,
            tuning_tags=tuning_tags,
            exclusion_tags=exclusion_tags,
            html_dir=html_dir)
    else:
        html_summary_node = html_summary_jobs.create_node(c_file=config_file,
                                                          html_dir=html_dir)
    workflow.add_node(html_summary_node)
    for pp_node in pp_nodes:
        dep = dax.Dependency(parent=pp_node._dax_node,
                             child=html_summary_node._dax_node)
        workflow._adag.addDependency(dep)

    # Make the open box shell script
    open_box_cmd = html_summary_node.executable.get_pfn() + " "
    open_box_cmd += ' '.join(html_summary_node._args + \
                             html_summary_node._options)
    open_box_cmd += " --open-box"
    open_box_path = "%s/open_the_box.sh" % output_dir
    f = open(open_box_path, "w")
    f.write("#!/bin/sh\n%s" % open_box_cmd)
    f.close()
    os.chmod(open_box_path, 0500)

    pp_outs.extend(trig_cluster_outs)

    return pp_outs
Exemplo n.º 27
0
def make_exttrig_file(cp, ifos, sci_seg, out_dir):
    '''
    Make an ExtTrig xml file containing information on the external trigger

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    sci_seg : glue.segments.segment
    The science segment for the analysis run.
    
    out_dir : str
    The output directory, destination for xml file.

    Returns
    -------
    xml_file : pycbc.workflow.File object
    The xml file with external trigger information.

    '''
    # Initialise objects
    xmldoc = ligolw.Document()
    xmldoc.appendChild(ligolw.LIGO_LW())
    tbl = lsctables.New(lsctables.ExtTriggersTable)
    cols = tbl.validcolumns
    xmldoc.childNodes[-1].appendChild(tbl)
    row = tbl.appendRow()

    # Add known attributes for this GRB
    setattr(row, "event_ra", float(cp.get("workflow", "ra")))
    setattr(row, "event_dec", float(cp.get("workflow", "dec")))
    setattr(row, "start_time", int(cp.get("workflow", "trigger-time")))
    setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name")))

    # Fill in all empty rows
    for entry in cols.keys():
        if not hasattr(row, entry):
            if cols[entry] in ['real_4', 'real_8']:
                setattr(row, entry, 0.)
            elif cols[entry] == 'int_4s':
                setattr(row, entry, 0)
            elif cols[entry] == 'lstring':
                setattr(row, entry, '')
            elif entry == 'process_id':
                row.process_id = ilwd.ilwdchar("external_trigger:process_id:0")
            elif entry == 'event_id':
                row.event_id = ilwd.ilwdchar("external_trigger:event_id:0")
            else:
                print >> sys.stderr, "Column %s not recognized" % (entry)
                raise ValueError

    # Save file
    xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow",
                                                    "trigger-name"))
    xml_file_path = os.path.join(out_dir, xml_file_name)
    utils.write_filename(xmldoc, xml_file_path)
    xml_file_url = urlparse.urljoin("file:",
                                    urllib.pathname2url(xml_file_path))
    xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url)
    xml_file.PFN(xml_file_url, site="local")

    return xml_file
Exemplo n.º 28
0
def setup_timeslides_workflow(workflow, output_dir=None, tags=[],
                              timeSlideSectionName='ligolw_tisi'):
    '''
    Setup generation of time_slide input files in the workflow.
    Currently used
    only with ligolw_tisi to generate files containing the list of slides to be
    performed in each time slide job.

    Parameters
    -----------
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance that the coincidence jobs will be added to.
    output_dir : path
        The directory in which output files will be stored.
    tags : list of strings (optional, default = [])
        A list of the tagging strings that will be used for all jobs created
        by this call to the workflow. This will be used in output names.
    timeSlideSectionName : string (optional, default='injections')
        The string that corresponds to the option describing the exe location
        in the [executables] section of the .ini file and that corresponds to
        the section (and sub-sections) giving the options that will be given to
        the code at run time.
    Returns
    --------
    timeSlideOuts : pycbc.workflow.core.FileList
        The list of time slide files created by this call.
    '''
    logging.info("Entering time slides setup module.")
    make_analysis_dir(output_dir)
    # Get ifo list and full analysis segment for output file naming
    ifoList = workflow.ifos
    ifo_string = workflow.ifo_string
    fullSegment = workflow.analysis_time

    # Identify which time-slides to do by presence of sub-sections in the
    # configuration file
    all_sec = workflow.cp.sections()
    timeSlideSections = [sec for sec in all_sec if sec.startswith('tisi-')]
    timeSlideTags = [(sec.split('-')[-1]).upper() for sec in timeSlideSections]

    timeSlideOuts = FileList([])

    # FIXME: Add ability to specify different exes

    # Make the timeSlideFiles
    for timeSlideTag in timeSlideTags:
        currTags = tags + [timeSlideTag]

        timeSlideMethod = workflow.cp.get_opt_tags("workflow-timeslides",
                                                 "timeslides-method", currTags)

        if timeSlideMethod in ["IN_WORKFLOW", "AT_RUNTIME"]:
            timeSlideExeTag = workflow.cp.get_opt_tags("workflow-timeslides",
                                                    "timeslides-exe", currTags)
            timeSlideExe = select_generic_executable(workflow, timeSlideExeTag)
            timeSlideJob = timeSlideExe(workflow.cp, timeSlideExeTag, ifos=ifo_string,
                                             tags=currTags, out_dir=output_dir)
            timeSlideNode = timeSlideJob.create_node(fullSegment)
            if timeSlideMethod == "AT_RUNTIME":
                workflow.execute_node(timeSlideNode)
            else:
                workflow.add_node(timeSlideNode)
            tisiOutFile = timeSlideNode.output_files[0]
        elif timeSlideMethod == "PREGENERATED":
            timeSlideFilePath = workflow.cp.get_opt_tags("workflow-timeslides",
                                      "timeslides-pregenerated-file", currTags)
            file_url = urlparse.urljoin('file:', urllib.pathname2url(\
                                                  timeSlideFilePath))
            tisiOutFile = File(ifoString, 'PREGEN_TIMESLIDES',
                               fullSegment, file_url, tags=currTags)

        timeSlideOuts.append(tisiOutFile)

    return timeSlideOuts
Exemplo n.º 29
0
def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
    """
    Retrieve files needed to run coh_PTF jobs within a PyGRB workflow

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    run_dir : str
    The run directory, destination for retrieved files.

    bank_veto : Boolean
    If true, will retrieve the bank_veto_bank.xml file.

    summary_files : Boolean
    If true, will retrieve the summary page style files.

    Returns
    -------
    file_list : pycbc.workflow.FileList object
    A FileList containing the retrieved files.
    """
    if os.getenv("LAL_SRC") is None:
        raise ValueError("The environment variable LAL_SRC must be set to a "
                         "location containing the file lalsuite.git")
    else:
        lalDir = os.getenv("LAL_SRC")
        sci_seg = segments.segment(int(cp.get("workflow", "start-time")),
                                   int(cp.get("workflow", "end-time")))
        file_list = FileList([])

        # Bank veto
        if bank_veto:
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "bank_veto_bank.xml" % lalDir, "%s" % run_dir)
            bank_veto_url = "file://localhost%s/bank_veto_bank.xml" % run_dir
            bank_veto = File(ifos,
                             "bank_veto_bank",
                             sci_seg,
                             file_url=bank_veto_url)
            bank_veto.PFN(bank_veto.cache_entry.path, site="local")
            file_list.extend(FileList([bank_veto]))

        if summary_files:
            # summary.js file
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "coh_PTF_html_summary.js" % lalDir, "%s" % run_dir)
            summary_js_url = "file://localhost%s/coh_PTF_html_summary.js" \
                             % run_dir
            summary_js = File(ifos,
                              "coh_PTF_html_summary_js",
                              sci_seg,
                              file_url=summary_js_url)
            summary_js.PFN(summary_js.cache_entry.path, site="local")
            file_list.extend(FileList([summary_js]))

            # summary.css file
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "coh_PTF_html_summary.css" % lalDir, "%s" % run_dir)
            summary_css_url = "file://localhost%s/coh_PTF_html_summary.css" \
                              % run_dir
            summary_css = File(ifos,
                               "coh_PTF_html_summary_css",
                               sci_seg,
                               file_url=summary_css_url)
            summary_css.PFN(summary_css.cache_entry.path, site="local")
            file_list.extend(FileList([summary_css]))

        return file_list
Exemplo n.º 30
0
def setup_tmpltbank_pregenerated(workflow, tags=None):
    '''
    Setup CBC workflow to use a pregenerated template bank.
    The bank given in cp.get('workflow','pregenerated-template-bank') will be used
    as the input file for all matched-filtering jobs. If this option is
    present, workflow will assume that it should be used and not generate
    template banks within the workflow.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    tmplt_banks : pycbc.workflow.core.FileList
        The FileList holding the details of the template bank.
    '''
    if tags is None:
        tags = []
    # Currently this uses the *same* fixed bank for all ifos.
    # Maybe we want to add capability to analyse separate banks in all ifos?

    # Set up class for holding the banks
    tmplt_banks = FileList([])

    cp = workflow.cp
    global_seg = workflow.analysis_time
    user_tag = "PREGEN_TMPLTBANK"
    try:
        # First check if we have a bank for all ifos
        pre_gen_bank = cp.get_opt_tags('workflow-tmpltbank',
                                       'tmpltbank-pregenerated-bank', tags)
        pre_gen_bank = resolve_url(pre_gen_bank)
        file_url = urlparse.urljoin('file:', urllib.pathname2url(pre_gen_bank))
        curr_file = File(workflow.ifos,
                         user_tag,
                         global_seg,
                         file_url,
                         tags=tags)
        curr_file.PFN(file_url, site='local')
        tmplt_banks.append(curr_file)
    except ConfigParser.Error:
        # Okay then I must have banks for each ifo
        for ifo in workflow.ifos:
            try:
                pre_gen_bank = cp.get_opt_tags(
                    'workflow-tmpltbank',
                    'tmpltbank-pregenerated-bank-%s' % ifo.lower(), tags)
                pre_gen_bank = resolve_url(pre_gen_bank)
                file_url = urlparse.urljoin('file:',
                                            urllib.pathname2url(pre_gen_bank))
                curr_file = File(ifo,
                                 user_tag,
                                 global_seg,
                                 file_url,
                                 tags=tags)
                curr_file.PFN(file_url, site='local')
                tmplt_banks.append(curr_file)

            except ConfigParser.Error:
                err_msg = "Cannot find pregerated template bank in section "
                err_msg += "[workflow-tmpltbank] or any tagged sections. "
                if tags:
                    tagged_secs = " ".join("[workflow-tmpltbank-%s]" \
                                           %(ifo,) for ifo in workflow.ifos)
                    err_msg += "Tagged sections are %s. " % (tagged_secs, )
                err_msg += "I looked for 'tmpltbank-pregenerated-bank' option "
                err_msg += "and 'tmpltbank-pregenerated-bank-%s'." % (ifo, )
                raise ConfigParser.Error(err_msg)

    return tmplt_banks