예제 #1
0
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz",
                         open("tmp_psd.xml.gz", "rb").read(), "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
예제 #2
0
파일: live.py 프로젝트: prayush/pycbc
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz", open("tmp_psd.xml.gz", "rb").read(),
                         "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
예제 #3
0
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (
        not fap_filenames):  # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s" % (opts.classifier, ifo)
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id,
                         message="No iDQ timeseries for %s at %s" %
                         (opts.classifier, ifo),
                         tagname=idq.tagnames)
    sys.exit(0)

#===================================================================================================
### process FAP files

if opts.verbose:
    print "reading fap timeseries from:"
    for filename in fap_filenames:
        print '\t' + filename

#fig = isp.plt.figure(figsize=isp.rank_timeseries_figsize)
#rnk_ax = fig.add_axes(isp.rank_timeseries_axpos)
#fap_ax = rnk_ax.twinx()
fig = isp.plt.figure(figsize=isp.rank_splittimeseries_figsize)
예제 #4
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_snr.png')
            psd_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_psd.png')
            pylab.figure()
            for ifo in sorted(self.snr_series):
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pylab.plot(curr_snrs.sample_times, abs(curr_snrs),
                           c=ifo_color(ifo), label=ifo)
                if ifo in self.ifos:
                    snr = self.coinc_results['foreground/%s/%s' %
                                             (ifo, 'snr')]
                    endt = self.coinc_results['foreground/%s/%s' %
                                              (ifo, 'end_time')]
                    pylab.plot([endt], [snr], c=ifo_color(ifo), marker='x')

            pylab.legend()
            pylab.xlabel('GPS time (s)')
            pylab.ylabel('SNR')
            pylab.savefig(snr_series_plot_fname)
            pylab.close()

            pylab.figure()
            for ifo in sorted(self.snr_series):
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
                # Can't plot log(0) so start from point 1
                pylab.loglog(curr_psd.sample_frequencies[1:],
                             curr_psd[1:]**0.5, c=ifo_color(ifo), label=ifo)
            pylab.legend()
            pylab.xlim([10, 1300])
            pylab.ylim([3E-24, 1E-20])
            pylab.xlabel('Frequency (Hz)')
            pylab.ylabel('ASD')
            pylab.savefig(psd_series_plot_fname)
            pylab.close()

        if self.probabilities is not None:
            prob_fname = fname.replace('.xml.gz', '_probs.json')
            prob_plot_fname = prob_fname.replace('.json', '.png')

            prob_plot = {k: v for (k, v) in self.probabilities.items()
                         if v != 0.0}
            labels, sizes = zip(*prob_plot.items())
            colors = [source_color(label) for label in labels]
            fig, ax = pylab.subplots()
            ax.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
                   textprops={'fontsize': 15})
            ax.axis('equal')
            fig.savefig(prob_plot_fname)
            pylab.close()

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add info for tracking code version
            version_str = 'Using PyCBC version {}{} at {}'
            version_str = version_str.format(
                    pycbc_version.version,
                    ' (release)' if pycbc_version.release else '',
                    os.path.dirname(pycbc.__file__))
            gracedb.writeLog(gid, version_str)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text, tag_name=['analyst_comments'])

            # upload SNR series in HDF format and plots
            if self.snr_series is not None:
                gracedb.writeLog(gid, 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid, 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid, 'PSD plot upload',
                                 filename=psd_series_plot_fname,
                                 tag_name=['psd'], displayName=['PSDs'])

            # upload source probabilities in json format and plot
            if self.probabilities is not None:
                gracedb.writeLog(gid, 'source probabilities JSON file upload',
                                 filename=prob_fname, tag_name=['em_follow'])
                logging.info('Uploaded source probabilities for event %s', gid)
                gracedb.writeLog(gid, 'source probabilities plot upload',
                                 filename=prob_plot_fname,
                                 tag_name=['em_follow'])
                logging.info('Uploaded source probabilities pie chart for '
                             'event %s', gid)

        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #5
0
def parseAlert(queue, queueByGraceID, alert, t0, config):
    '''
    the way approval_processorMP digests lvalerts

    --> check if this alert is a command and delegate to parseCommand

    1) instantiates GraceDB client
    2) pulls childConfig settings
    3) makes sure we have the logger
    4) get lvalert specifics
    5) ensure we have the event_dict for the graceid = lvalert['uid']
    6) take proper action depending on the lvalert info coming in and currentstate of the event_dict 
    '''

    #-------------------------------------------------------------------
    # process commands sent via lvalert_commandMP
    #-------------------------------------------------------------------

    if alert['uid'] == 'command':  ### this is a command message!
        return parseCommand(queue, queueByGraceID, alert,
                            t0)  ### delegate to parseCommand and return

    #-------------------------------------------------------------------
    # extract relevant config parameters and set up necessary data structures
    #-------------------------------------------------------------------

    # instantiate GraceDB client from the childConfig
    client = config.get('general', 'client')
    g = GraceDb(client)

    # get other childConfig settings; save in configdict
    voeventerror_email = config.get('general', 'voeventerror_email')
    force_all_internal = config.get('general', 'force_all_internal')
    preliminary_internal = config.get('general', 'preliminary_internal')
    forgetmenow_timeout = config.getfloat('general', 'forgetmenow_timeout')
    approval_processorMPfiles = config.get('general',
                                           'approval_processorMPfiles')
    hardware_inj = config.get('labelCheck', 'hardware_inj')
    wait_for_hardware_inj = config.getfloat('labelCheck',
                                            'wait_for_hardware_inj')
    default_farthresh = config.getfloat('farCheck', 'default_farthresh')
    time_duration = config.getfloat('injectionCheck', 'time_duration')
    humanscimons = config.get('operator_signoffCheck', 'humanscimons')

    ### extract options about advocates
    advocates = config.get('advocate_signoffCheck', 'advocates')
    advocate_text = config.get('advocate_signoffCheck', 'advocate_text')
    advocate_email = config.get('advocate_signoffCheck', 'advocate_email')

    ### extract options for GRB alerts
    em_coinc_text = config.get('GRB_alerts', 'em_coinc_text')
    coinc_text = config.get('GRB_alerts', 'coinc_text')
    grb_email = config.get('GRB_alerts', 'grb_email')
    notification_text = config.get('GRB_alerts', 'notification_text')

    ### extract options about idq
    ignore_idq = config.get('idq_joint_fapCheck', 'ignore_idq')
    default_idqthresh = config.getfloat('idq_joint_fapCheck',
                                        'default_idqthresh')
    idq_pipelines = config.get('idq_joint_fapCheck', 'idq_pipelines')
    idq_pipelines = idq_pipelines.replace(' ', '')
    idq_pipelines = idq_pipelines.split(',')

    skymap_ignore_list = config.get('have_lvem_skymapCheck',
                                    'skymap_ignore_list')

    ### set up configdict (passed to local data structure: eventDicts)
    configdict = makeConfigDict(config)

    # set up logging
    ### FIXME: why not open the logger each time parseAlert is called?
    ###        that would allow you to better control which loggers are necessary and minimize the number of open files.
    ###        it also minimizes the possibility of something accidentally being written to loggers because they were left open.
    ###        what's more, this is a natural place to set up multiple loggers, one for all data and one for data pertaining only to this graceid

    global logger
    if globals().has_key('logger'):  # check to see if we have logger
        logger = globals()['logger']
    else:  # if not, set one up
        logger = loadLogger(config)
        logger.info(
            '\n{0} ************ approval_processorMP.log RESTARTED ************\n'
            .format(convertTime()))

    #-------------------------------------------------------------------
    # extract relevant info about this alert
    #-------------------------------------------------------------------

    # get alert specifics and event_dict information
    graceid = alert['uid']
    alert_type = alert['alert_type']
    description = alert['description']
    filename = alert['file']

    #-------------------------------------------------------------------
    # ensure we have an event_dict and ForgetMeNow tracking this graceid
    #-------------------------------------------------------------------

    if alert_type == 'new':  ### new event -> we must first create event_dict and set up ForgetMeNow queue item for G events

        ### create event_dict
        event_dict = EventDict(
        )  # create a new instance of EventDict class which is a blank event_dict
        if is_external_trigger(
                alert) == True:  # this is an external GRB trigger
            event_dict.grb_trigger_setup(
                alert['object'], graceid, g, config, logger
            )  # populate this event_dict with grb trigger info from lvalert
        else:
            event_dict.setup(
                alert['object'], graceid, configdict, g, config, logger
            )  # populate this event_dict with information from lvalert
        eventDicts[
            graceid] = event_dict  # add the instance to the global eventDicts
        eventDictionaries[
            graceid] = event_dict.data  # add the dictionary to the global eventDictionaries

        ### ForgetMeNow queue item
        item = ForgetMeNow(t0, forgetmenow_timeout, graceid, eventDicts, queue,
                           queueByGraceID, logger)
        queue.insert(item)  # add queue item to the overall queue

        ### set up queueByGraceID
        newSortedQueue = utils.SortedQueue(
        )  # create sorted queue for event candidate
        newSortedQueue.insert(
            item)  # put ForgetMeNow queue item into the sorted queue
        queueByGraceID[
            item.
            graceid] = newSortedQueue  # add queue item to the queueByGraceID
        saveEventDicts(
            approval_processorMPfiles
        )  # trying to see if expirationtime is updated from None

        message = '{0} -- {1} -- Created event dictionary for {1}.'.format(
            convertTime(), graceid)
        if loggerCheck(event_dict.data, message) == False:
            logger.info(message)
            g.writeLog(graceid,
                       'AP: Created event dictionary.',
                       tagname='em_follow')
        else:
            pass

    else:  ### not a new alert -> we may already be tracking this graceid

        if eventDicts.has_key(graceid):  ### we're already tracking it

            # get event_dict with expirationtime key updated for the rest of parseAlert
            event_dict = eventDicts[graceid]

            # find ForgetMeNow corresponding to this graceid and update expiration time
            for item in queueByGraceID[graceid]:
                if item.name == ForgetMeNow.name:  # selects the queue item that is a ForgetMeNow instance
                    item.setExpiration(t0)  # updates the expirationtime key
                    queue.resort(
                    )  ### may be expensive, but is needed to guarantee that queue remains sorted
                    queueByGraceID[graceid].resort()
                    break
            else:  ### we couldn't find a ForgetMeNow for this event! Something is wrong!
                os.system(
                    'echo \'ForgetMeNow KeyError\' | mail -s \'ForgetMeNow KeyError {0}\' {1}'
                    .format(graceid, advocate_email))
                raise KeyError(
                    'could not find ForgetMeNow for %s' % graceid
                )  ### Reed thinks this is necessary as a safety net.
                ### we want the process to terminate if things are not set up correctly to force us to fix it

        else:  # event_dict for event candidate does not exist. we need to create it with up-to-date information
            event_dict = EventDict(
            )  # create a new instance of the EventDict class which is a blank event_dict
            if is_external_trigger(alert) == True:
                event_dict.grb_trigger_setup(
                    g.events(graceid).next(), graceid, g, config, logger)
            else:
                event_dict.setup(
                    g.events(graceid).next(), graceid, configdict, g, config,
                    logger
                )  # fill in event_dict using queried event candidate dictionary
                event_dict.update(
                )  # update the event_dict with signoffs and iDQ info
            eventDicts[
                graceid] = event_dict  # add this instance to the global eventDicts
            eventDictionaries[
                graceid] = event_dict.data  # add the dictionary to the global eventDictionaries

            # create ForgetMeNow queue item and add to overall queue and queueByGraceID
            item = ForgetMeNow(t0, forgetmenow_timeout, graceid, eventDicts,
                               queue, queueByGraceID, logger)
            queue.insert(item)  # add queue item to the overall queue

            ### set up queueByGraceID
            newSortedQueue = utils.SortedQueue(
            )  # create sorted queue for new event candidate
            newSortedQueue.insert(
                item)  # put ForgetMeNow queue item into the sorted queue
            queueByGraceID[
                item.
                graceid] = newSortedQueue  # add queue item to the queueByGraceID

            message = '{0} -- {1} -- Created event dictionary for {1}.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Created event dictionary.',
                           tagname='em_follow')
            else:
                pass

    #--------------------
    # ignore alerts that are not relevant, like simulation or MDC events
    #--------------------

    # if the graceid starts with 'M' for MDCs or 'S' for Simulation, ignore
    if re.match('M', graceid) or re.match(
            'S',
            graceid):  ### FIXME: we want to make this a config-file option!
        message = '{0} -- {1} -- Mock data challenge or simulation. Ignoring.'.format(
            convertTime(), graceid)
        if loggerCheck(event_dict.data, message) == False:
            logger.info(message)
            g.writeLog(graceid,
                       'AP: Mock data challenge or simulation. Ignoring.',
                       tagname='em_follow')
        else:
            pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    #--------------------
    # take care of external GRB triggers
    #--------------------
    if is_external_trigger(
            alert
    ) == True:  # for now, grouped everything related to external triggers together below
        # if it's not a log message updating us about possible coincidence with gravitational-waves OR labels OR json file uploads we are not interested
        if alert_type == 'label':
            record_label(event_dict.data, description)
        if alert_type == 'update':
            # is this a comment containing coinc info that needs to be parsed?
            if 'comment' in alert['object'].keys():
                comment = alert['object']['comment']
                if 'Significant event in on-source' in comment:  # got comment structure from Dipongkar
                    coinc_pipeline, coinc_fap = record_coinc_info(
                        event_dict.data, comment, alert, logger)
                    # begin creating the dictionary that will turn into json file
                    message_dict = {}
                    # populate text field for the GCN circular-to-be
                    message_dict['message'] = coinc_text.format(
                        graceid, coinc_fap)
                    message_dict['loaded_to_gracedb'] = 0
                    # make json string and file
                    message_dict = json.dumps(message_dict)
                    tmpfile = open('/tmp/coinc_{0}.json'.format(graceid), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    # make sure to load with a comment that we look for to check off that it's been loaded into gracedb
                    # was it an online or offline pipeline?
                    if 'Online' in coinc_pipeline:
                        event_dict.data['grb_online_json'] = message_dict
                        g.writeLog(
                            graceid,
                            'GRB-GW Coincidence JSON file: grb_online_json',
                            '/tmp/coinc_{0}.json'.format(graceid),
                            tagname='em_follow')
                    elif 'Offline' in coinc_pipeline:
                        event_dict.data['grb_offline_json'] = message_dict
                        g.writeLog(
                            graceid,
                            'GRB-GW Coincidence JSON file: grb_offline_json',
                            '/tmp/coinc_{0}.json'.format(graceid),
                            tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(graceid))
                    ### alert via email
                    os.system(
                        'echo \{0}\' | mail -s \'Coincidence JSON created for {1}\' {2}'
                        .format(notification_text, graceid, grb_email))
                # is this the json file loaded into GraceDb?
                if 'GRB-GW Coincidence JSON file' in comment:
                    # if it is, find out which type of json it was and then message_dict['loaded_to_gracedb'] = 1
                    json_type = re.findall('file: (.*)', comment)[0]
                    message_dict = event_dict.data[json_type]
                    message_dict = json.loads(
                        message_dict)  # converts string to dictionary
                    message_dict['loaded_to_gracedb'] = 1
                    # when we send to observers, message_dict['sent_to_observers'] = 1
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    #--------------------
    # Appending which checks must be satisfied in preliminary_to_initial state before moving on
    #--------------------

    if humanscimons == 'yes':
        preliminary_to_initial.append('operator_signoffCheck')
    if advocates == 'yes':
        preliminary_to_initial.append('advocate_signoffCheck')

    #--------------------
    # update information based on the alert_type
    # includes extracting information from the alert
    # may also include generating VOEvents and issuing them
    #--------------------

    # actions for each alert_type
    currentstate = event_dict.data[
        'currentstate']  ### actions depend on the current state

    ### NOTE: we handle alert_type=="new" above as well and this conditional is slightly redundant...
    if alert_type == 'new':

        #----------------
        ### pass event through PipelineThrottle
        #----------------

        ### check if a PipelineThrottle exists for this node
        group = event_dict.data['group']
        pipeline = event_dict.data['pipeline']
        search = event_dict.data['search']
        key = generate_ThrottleKey(group, pipeline, search=search)
        if queueByGraceID.has_key(key):  ### a throttle already exists
            if len(queueByGraceID[key]) > 1:
                raise ValueError(
                    'too many QueueItems in SortedQueue for pipelineThrottle key=%s'
                    % key)
            item = queueByGraceID[key][
                0]  ### we expect there to be only one item in this SortedQueue

        else:  ### we need to make a throttle!
            # pull PipelineThrottle parameters from the config
            if config.has_section(key):
                throttleWin = config.getfloat(key, 'throttleWin')
                targetRate = config.getfloat(key, 'targetRate')
                requireManualReset = config.get(key, 'requireManualReset')
                conf = config.getfloat(key, 'conf')

            else:
                throttleWin = config.getfloat('default_PipelineThrottle',
                                              'throttleWin')
                targetRate = config.getfloat('default_PipelineThrottle',
                                             'targetRate')
                requireManualReset = config.get('default_PipelineThrottle',
                                                'requireManualReset')
                conf = config.getfloat('default_PipelineThrottle', 'conf')
            item = PipelineThrottle(t0,
                                    throttleWin,
                                    targetRate,
                                    group,
                                    pipeline,
                                    search=search,
                                    requireManualReset=False,
                                    conf=0.9,
                                    graceDB_url=client)

            queue.insert(item)  ### add to overall queue

            newSortedQueue = utils.SortedQueue(
            )  # create sorted queue for event candidate
            newSortedQueue.insert(
                item)  # put ForgetMeNow queue item into the sorted queue
            queueByGraceID[
                item.
                graceid] = newSortedQueue  # add queue item to the queueByGraceID

        item.addEvent(graceid, t0)  ### add new event to throttle
        ### this takes care of labeling in gracedb as necessary

        if item.isThrottled():
            ### send some warning message?
            return 0  ### we're done here because we're ignoring this event -> exit from parseAlert

#        #----------------
#        ### pass data to Grouper
#        #----------------
#        raise Warning("Grouper is not implemented yet! we're currently using a temporate groupTag and prototype code")

#        '''
#        need to extract groupTag from group_pipeline[_search] mapping.
#            These associations should be specified in the config file, so we'll have to specify this somehow.
#            probably just a "Grouper" section, with (option = value) pairs that look like (groupTag = nodeA nodeB nodeC ...)
#        '''
#        groupTag = 'TEMPORARY'

#        ### check to see if Grouper exists for this groupTag
#        if queueByGraceID.has_key(groupTag): ### at least one Grouper already exists

#            ### determine if any of the existing Groupers are still accepting new triggers
#            for item in queueByGraceID[groupTag]:
#                if item.isOpen():
#                    break ### this Grouper is still open, so we'll just use it
#            else: ### no Groupers are open, so we need to create one
#                item = Grouper(t0, grouperWin, groupTag, eventDicts, graceDB_url=client) ### create the actual QueueItem

#                queue.insert( item ) ### insert it in the overall queue

#                newSortedQueue = utils.SortedQueue() ### set up the SortedQueue for queueByGraceID
#                newSortedQueue.insert(item)
#                queueByGraceID[groupTag] = newSortedQueue

#        else: ### we need to make a Grouper
#            grouperWin = config.getfloat('grouper', 'grouperWin')
#            item = Grouper(t0, grouperWin, groupTag, eventDicts, graceDB_url=client) ### create the actual QueueItem

#            queue.insert( item ) ### insert it in the overall queue

#            newSortedQueue = utils.SortedQueue() ### set up the SortedQueue for queueByGraceID
#            newSortedQueue.insert(item)
#            queueByGraceID[groupTag] = newSortedQueue

#        item.addEvent( graceid ) ### add this graceid to the item

        return 0  ### we're done here. When Grouper makes a decision, we'll tick through the rest of the processes with a "selected" label

    elif alert_type == 'label':
        record_label(event_dict.data, description)

        if description == 'PE_READY':  ### PE_READY label was just applied. We may need to send an update alert

            message = '{0} -- {1} -- Sending update VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(
                    graceid,
                    'AP: Received PE_READY label. Sending update VOEvent.',
                    tagname='em_follow')
                process_alert(event_dict.data, 'update', g, config, logger)

            else:
                pass

            message = '{0} -- {1} -- State: {2} --> complete.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> complete.'.format(currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'complete'

            else:
                pass

        elif description == 'EM_READY':  ### EM_READY label was just applied. We may need to send an initial alert
            message = '{0} -- {1} -- Sending initial VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(
                    graceid,
                    'AP: Received EM_READY label. Sending initial VOEvent.',
                    tagname='em_follow')
                process_alert(event_dict.data, 'initial', g, config, logger)

            else:
                pass

            message = '{0} -- {1} -- State: {2} --> initial_to_update.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> initial_to_update.'.format(
                               currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'initial_to_update'

            else:
                pass

        elif description == "EM_Throttled":  ### the event is throttled and we need to turn off all processing for it

            event_dict.data[
                'currentstate'] = 'throttled'  ### update current state

            ### check if we need to send retractions
            voevents = event_dict.data['voevents']
            if len(voevents) > 0:
                if 'retraction' not in sorted(voevents)[-1]:
                    # there are existing VOEvents we've sent, but no retraction alert
                    process_alert(event_dict.data, 'retraction', g, config,
                                  logger)

            ### update ForgetMeNow expiration to handle all the clean-up?
            ### we probably do NOT want to change the clean-up schedule because we'll still likely receive a lot of alerts about this guy
            ### therefore, we just retain the local data and ignore him, rather than erasing the local data and having to query to reconstruct it repeatedly as new alerts come in
#            for item in queueByGraceID[graceid]: ### update expiration of the ForgetMeNow so it is immediately processed next.
#                if item.name == ForgetMeNow.name:
#                    time.setExpiration(-np.infty )
#                                                                ### FIXME: this can break the order in SortedQueue's. We need to pop and reinsert or call a manual resort
#                    queue.resort() ### may be expensive but is needed to guarantee that queue remains sorted
#                    queueByGraceID[graceid].resort()
#                    break
#            else:
#                raise ValueError('could not find ForgetMeNow QueueItem for graceid=%s'%graceid)

        elif description == "EM_Selected":  ### this event was selected by a Grouper
            raise NotImplementedError(
                'write logic to handle \"Selected\" labels')

        elif description == "EM_Superseded":  ### this event was superceded by another event within Grouper
            raise NotImplementedError(
                'write logic to handle \"Superseded" labels')

        elif (
                checkLabels(description.split(), config) > 0
        ):  ### some other label was applied. We may need to issue a retraction notice.
            event_dict.data['currentstate'] = 'rejected'

            ### check to see if we need to send a retraction
            voevents = event_dict.data['voevents']
            if len(voevents) > 0:
                if 'retraction' not in sorted(voevents[-1]):
                    # there are existing VOEvents we've sent, but no retraction alert
                    process_alert(event_dict.data, 'retraction', g, config,
                                  logger)

        saveEventDicts(
            approval_processorMPfiles)  ### save the updated eventDict to disk
        return 0

    ### FIXME: Reed left off commenting here...

    elif alert_type == 'update':
        # first the case that we have a new lvem skymap
        if (filename.endswith('.fits.gz') or filename.endswith('.fits')):
            if 'lvem' in alert['object'][
                    'tag_names']:  # we only care about skymaps tagged lvem for sharing with MOU partners
                submitter = alert['object']['issuer'][
                    'display_name']  # in the past, we used to care who submitted skymaps; keeping this functionality just in case
                record_skymap(event_dict.data, filename, submitter, logger)
            else:
                pass
        # interested in iDQ information or other updates
        else:
            if 'comment' in alert['object'].keys():
                comment = alert['object']['comment']
                if re.match(
                        'minimum glitch-FAP', comment
                ):  # looking to see if it's iDQ glitch-FAP information
                    record_idqvalues(event_dict.data, comment, logger)
                elif re.match(
                        'resent VOEvent', comment
                ):  # looking to see if another running instance of approval_processorMP sent a VOEvent
                    response = re.findall(
                        r'resent VOEvent (.*) in (.*)',
                        comment)  # extracting which VOEvent was re-sent
                    event_dict.data[response[0][1]].append(response[0][0])
                    saveEventDicts(approval_processorMPfiles)
                elif 'EM-Bright probabilities computed from detection pipeline' in comment:  # got comment structure from Shaon G.
                    record_em_bright(event_dict.data, comment, logger)
                elif 'Temporal coincidence with external trigger' in comment:  # got comment structure from Alex U.
                    exttrig, coinc_far = record_coinc_info(
                        event_dict.data, comment, alert, logger)
                    # create dictionary that will become json file
                    message_dict = {}
                    grb_instrument = eventDictionaries[exttrig]['pipeline']
                    message_dict['message'] = em_coinc_text.format(
                        exttrig, grb_instrument, graceid, coinc_far)
                    message_dict['loaded_to_gracedb'] = 0
                    message_dict = json.dumps(message_dict)
                    # update event dictionaries for both the gw and external trigger
                    eventDictionaries[exttrig][
                        'em_coinc_json'] = message_dict  # this updates the external trigger event_dict.data
                    event_dict.data[
                        'em_coinc_json'] = message_dict  # this updates the gw trigger event_dict.data
                    # load json file to the gw gracedb page
                    tmpfile = open('/tmp/coinc_{0}.json'.format(graceid), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    g.writeLog(graceid,
                               'GRB-GW Coincidence JSON file: em_coinc_json',
                               '/tmp/coinc_{0}.json'.format(graceid),
                               tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(graceid))
                    # load json file to the external trigger page
                    tmpfile = open('/tmp/coinc_{0}.json'.format(exttrig), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    g.writeLog(exttrig,
                               'GRB-GW Coincidence JSON file: em_coinc_json',
                               '/tmp/coinc_{0}.json'.format(exttrig),
                               tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(exttrig))
                    ### alert via email
                    os.system(
                        'echo \{0}\' | mail -s \'Coincidence JSON created for {1}\' {2}'
                        .format(notification_text, exttrig, grb_email))
                    saveEventDicts(approval_processorMPfiles)
                elif 'GRB-GW Coincidence JSON file' in comment:  # this is the comment that accompanies a loaded coinc json file
                    message_dict = event_dict.data['em_coinc_json']
                    message_dict = json.loads(
                        message_dict)  # converts string to dictionary
                    message_dict['loaded_to_gracedb'] = 1
                    saveEventDicts(approval_processorMPfiles)
                else:
                    pass

    elif alert_type == 'signoff':
        signoff_object = alert['object']
        record_signoff(event_dict.data, signoff_object)

    #---------------------------------------------
    # run checks specific to currentstate of the event candidate
    #---------------------------------------------

    passedcheckcount = 0

    if currentstate == 'new_to_preliminary':
        time.sleep(
            wait_for_hardware_inj
        )  #this is for those cases where we dont have the INJ label right away
        queried_dict = g.events(graceid).next()  #query gracedb for the graceid
        event_dict.data['labels'] = queried_dict['labels'].keys(
        )  #get the latest labels before running checks
        for Check in new_to_preliminary:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                # because in 'new_to_preliminary' state, no need to apply DQV label
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
        if passedcheckcount == len(new_to_preliminary):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Sending preliminary VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Sending preliminary VOEvent.',
                           tagname='em_follow')
                process_alert(event_dict.data, 'preliminary', g, config,
                              logger)
            else:
                pass
            message = '{0} -- {1} -- State: {2} --> preliminary_to_initial.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> preliminary_to_initial.'.format(
                               currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'preliminary_to_initial'
            else:
                pass
            labels = event_dict.data['labels']
            # notify the operators if we haven't previously processed this event
            instruments = event_dict.data['instruments']
            for instrument in instruments:
                if instrument in str(labels):
                    pass
                else:
                    message = '{0} -- {1} -- Labeling {2}OPS.'.format(
                        convertTime(), graceid, instrument)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(graceid,
                                   'AP: Labeling {0}OPS.'.format(instrument),
                                   tagname='em_follow')
                        g.writeLabel(graceid, '{0}OPS'.format(instrument))
                    else:
                        pass
            # notify the advocates if we haven't previously processed this event
            if 'ADV' in str(labels):
                pass
            else:
                message = '{0} -- {1} -- Labeling ADVREQ.'.format(
                    convertTime(), graceid)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Labeling ADVREQ.',
                               tagname='em_follow')
                    g.writeLabel(graceid, 'ADVREQ')
                    os.system(
                        'echo \'{0}\' | mail -s \'{1} passed criteria for follow-up\' {2}'
                        .format(advocate_text, graceid, advocate_email))
                    # expose event to LV-EM
                    url_perm_base = g.service_url + urllib.quote(
                        'events/{0}/perms/gw-astronomy:LV-EM:Observers/'.
                        format(graceid))
                    for perm in ['view', 'change']:
                        url = url_perm_base + perm
                        #g.put(url)
                else:
                    pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    elif currentstate == 'preliminary_to_initial':
        for Check in preliminary_to_initial:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                # need to set DQV label so long as it isn't the operator_signoffCheck or advocate_signoffCheck
                if 'signoffCheck' in Check:
                    message = '{0} -- {1} -- Not labeling DQV because signoffCheck is separate from explicit data quality checks.'.format(
                        convertTime(), graceid)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(
                            graceid,
                            'AP: Not labeling DQV because signoffCheck is separate from explicit data quality checks.',
                            tagname='em_follow')
                    else:
                        pass
                else:
                    message = '{0} -- {1} -- Labeling DQV.'.format(
                        convertTime(), graceid)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(graceid,
                                   'AP: Labeling DQV.',
                                   tagname='em_follow')
                        g.writeLabel(graceid, 'DQV')
                    else:
                        pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
                if Check == 'have_lvem_skymapCheck':  # we want to send skymaps out as quickly as possible, even if humans have not vetted the event
                    process_alert(
                        event_dict.data, 'preliminary', g, config, logger
                    )  # if it turns out we've sent this alert with this skymap before, the process_alert function will just not send this repeat
        if passedcheckcount == len(preliminary_to_initial):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Labeling EM_READY.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Labeling EM_READY.',
                           tagname='em_follow')
                g.writeLabel(graceid, 'EM_READY')
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    elif currentstate == 'initial_to_update':
        for Check in initial_to_update:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                # need to set DQV label
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                message = '{0} -- {1} -- Labeling DQV.'.format(
                    convertTime(), graceid)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Labeling DQV.',
                               tagname='em_follow')
                    g.writeLabel(graceid, 'DQV')
                else:
                    pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
        if passedcheckcount == len(initial_to_update):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Labeling PE_READY.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Labeling PE_READY.',
                           tagname='em_follow')
                g.writeLabel(graceid, 'PE_READY')
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    else:
        saveEventDicts(approval_processorMPfiles)
        return 0
예제 #6
0
	if dictionary[event]['FAR'] <= FAR_thresh:			
		#if FAR threshold is exceeded, write trigtimes and timeslides to files
		e_new += 1
		rr_trigtimes.write('%s\n'%dictionary[event]['gpstime'])
		rr_timeslides.write('%s\n'%" ".join([dictionary[event]['timeslides'][ifo] for ifo in ifos]))
		
		#If event is 0-lag, write to GraceDb if enabled
		if lag == '0lag' and gdb_flag:
			#Save dictionary as json file
			dic_path = rundir+'/GDB/%s.json'%('%s-%s'%(dictionary[event]['gpstime'],event))
			with open(dic_path, 'wt') as fp:
				json.dump(dictionary[event], fp)
			
			#Upload dictionary to GraceDb
			response = gdb.createEvent('Burst','LIB',dic_path, search='AllSky', filecontents=None)
			
			#Parse GraceDb ID
			response = json.loads(response.read())
			gid = str(response["graceid"])
			
			#Update GraceDb log with post-proc pages
#			gdb.writeLog(gid, message="Preliminary results: f_0 = %s, Q = %s"%(dictionary[event]['frequency'],dictionary[event]['quality']))
			gdb.writeLog(gid, message="Preliminary results: BSN = %s, BCI = %s, oSNR = %s"%(dictionary[event]['BSN'],dictionary[event]['BCI'],dictionary[event]['Omicron SNR']))
			gdb.writeLog(gid, message="Follow-up results will be written: https://ldas-jobs.ligo.caltech.edu/~ryan.lynch/%s/%s/followup/%s/%s/%s/posplots.html"%(lib_label,lag,'%s_%s_%s'%("".join(ifos),actual_start,stride-overlap),'%s-%s'%(dictionary[event]['gpstime'],e_new),"".join(ifos)))

rr_trigtimes.close()
rr_timeslides.close()

#run pipeline to make dag for reruns
os.system('%s/lalinference_2ndpipe_beta.py %s/runfiles/LIB_%s_reruns_beta.ini -r %s/LIB_%s_rr/ -p /usr1/ryan.lynch/logs/ -g %s/PostProc/LIB_trigs/LIB_%s_times_rr_%s.txt'%(infodir, rundir, lag, rundir, lag, rundir, lag, "".join(ifos)))
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(filename): # and ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (not fap_filenames): # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s"%(opts.classifier, ifo)
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ timeseries for %s at %s"%(opts.classifier, ifo))
    sys.exit(0)

#===================================================================================================
### process FAP files

if opts.verbose:
    print "reading fap timeseries from:"
    for filename in fap_filenames:
        print '\t' + filename

#fig = isp.plt.figure(figsize=isp.rank_timeseries_figsize)
#rnk_ax = fig.add_axes(isp.rank_timeseries_axpos)
#fap_ax = rnk_ax.twinx()
fig = isp.plt.figure(figsize=isp.rank_splittimeseries_figsize)
rnk_ax = fig.add_axes(isp.rank_splittimeseries_axpos)
# ## Find relevant files
# #########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([filename for filename in
                          idq.get_all_files_in_range(opts.input_dir,
                          opts.start, opts.end, pad=0, suffix='.xml.gz'
                          ) if opts.classifier in filename.split('/'
                          )[-1] and 'glitch' in filename
                          and opts.ifo in filename])
                        
if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ glitch tables data from "+opts.classifier+" available for the candidate  at "+opts.ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
    print "Found:"
    for filename in gchxml_filenames:
        print '\t' + filename


# ##############################################################
# ## Load and Merge xml files using in-memory sqlite database
# ##############################################################
# 
# load files into database
connection, cursor = idq_tables_dbutils.load_xml_files_into_database(\
# get start and end time for our look-up routines
#========================
event_gps_time = float(gdb_entry['gpstime'])

gps_start = event_gps_time - time_before
gps_end = event_gps_time + time_after

plotting_gps_start = event_gps_time - plotting_time_before
plotting_gps_end = event_gps_time + plotting_time_after

performance_gps_start = event_gps_time - performance_time_before
performance_gps_end = event_gps_time + performance_time_after

logger.info("Started searching for iDQ information within [%.3f, %.3f] at %s"%(gps_start, gps_end, ifo))
if not options.skip_gracedb_upload:
    gracedb.writeLog(gdb_id, message="Started searching for iDQ information within [%.3f, %.3f] at %s"%(gps_start, gps_end, ifo))

#=================================================
# LOGIC for waiting for idq data 
#=================================================
### figure out if we need to wait for time to pass
wait = gps_end - (idq.nowgps()+delay)
if wait > 0:
    logger.info("waiting %.2f seconds until we pass gps=%.3f"%(wait, gps_end))
    time.sleep(wait)

if options.realtime_log:
    ### now we need to parse the realtime log to figure out where the realtime job is
    logger.info("parsing %s to extract idq-realtime state"%options.realtime_log)

    realtime_log = open(options.realtime_log, "r") ### open realtime log for reading
#=================================================

### read in the config file
if opts.verbose:
    print "reading config from : %s\nand setting up schedule of checks"%(configfile)
config = ConfigParser.SafeConfigParser()
config.read(configfile)

### set up the schedule of checks
schedule = checks.config_to_schedule( config, event_type, verbose=opts.verbose )

### annotate gracedb with list of scheduled checks
if opts.annotate_gracedb:
    log = "event_supervisor scheduled to check: %s"%(", ".join([description for dt, foo, kwargs, email, description in schedule]))
    gracedb.writeLog( gdb_id, log )

### perform the scheduled checks
if opts.verbose:
    print "performing schedule"
#to = time.time() ### start time of our checking proceedures
to = time.mktime(time.strptime(gdb_entry['created'], '%Y-%m-%d %H:%M:%S %Z')) ### parse creation time from GraceDB

for dt, foo, kwargs, email, description in schedule:
    ### check current time stamp
    wait = dt - (time.time()-to)
    if wait > 0:
        if opts.verbose:
            print "\nwaiting %.3f seconds before performing : %s\n"%(wait, description)
        time.sleep( wait )
예제 #11
0
    def upload(
        self,
        fname,
        psds,
        low_frequency_cutoff,
        testing=True,
        extra_strings=None,
    ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event and PSDs are saved on disk
        # as GraceDB operations can fail later

        self.save(fname)

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)
        psd_xml_path = os.path.splitext(fname)[0] + '-psd.xml.gz'
        ligolw_utils.write_filename(psd_xmldoc, psd_xml_path, gz=True)

        if self.upload_snr_series:
            snr_series_fname = os.path.splitext(fname)[0] + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)

        # try connecting to GraceDB
        try:
            gracedb = GraceDb(self.gracedb_server) \
                    if self.gracedb_server is not None else GraceDb()
        except Exception as exc:
            logging.error('Cannot connect to GraceDB')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!',
                          fname)
            return None

        # create GraceDB event
        group = 'Test' if testing else 'CBC'
        try:
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        except Exception as exc:
            logging.error('Cannot create GraceDB event')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!',
                          fname)
            return None
        logging.info("Uploaded event %s", r["graceid"])

        if self.is_hardware_injection:
            try:
                gracedb.writeLabel(r['graceid'], 'INJ')
            except Exception as exc:
                logging.error("Cannot tag event %s as an injection",
                              r["graceid"])
                logging.error(str(exc))
            logging.info("Tagging event %s as an injection", r["graceid"])

        # upload PSDs
        try:
            gracedb.writeLog(r["graceid"],
                             "PyCBC PSD estimate from the time of event",
                             "psd.xml.gz",
                             open(psd_xml_path, "rb").read(), "psd").json()
        except Exception as exc:
            logging.error("Cannot upload PSDs for event %s", r["graceid"])
            logging.error(str(exc))
        logging.info("Uploaded PSDs for event %s", r["graceid"])

        # add other tags and comments
        try:
            gracedb.writeLog(
                r["graceid"],
                "Using PyCBC code hash %s" % pycbc_version.git_hash).json()
            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(r["graceid"], text).json()
        except Exception as exc:
            logging.error("Cannot write comments for event %s", r["graceid"])
            logging.error(str(exc))

        # upload SNR series in HDF format
        if self.upload_snr_series:
            try:
                gracedb.writeFile(r['graceid'], snr_series_fname)
            except Exception as exc:
                logging.error("Cannot upload HDF SNR series for event %s",
                              r["graceid"])
                logging.error(str(exc))

        return r['graceid']
예제 #12
0
snglfits.make_confidence_regions(
    verbose=opts.verbose)  ### make confidence region stuff

snglfits.make_antenna_patterns(
    verbose=opts.verbose)  ### compute antenna pattern statistics

#-----------

### generate final html document
htmlname = snglfits.write(verbose=opts.verbose)

### upload to GraceDb
if not opts.skip_gracedb_upload:
    if opts.verbose:
        print "uploading %s to GraceDb(%s)" % (htmlname, opts.graceDbURL)

    gdb = GraceDb(opts.graceDbURL)
    gdbdir = os.path.join(opts.graceDbURL, '..', 'events', opts.graceid,
                          'files')
    fitsbase = os.path.basename(fits)
    gdb.writeLog(
        opts.graceid,
        message=
        'skymap summary for <a href="%s">%s</a> can be found <a href=\"%s\">here</a>'
        % (os.path.join(gdbdir, fitsbase), fitsbase,
           os.path.join(gdbdir, os.path.basename(htmlname))),
        filename=htmlname,
        tagname=fits2html.standard_tagname + opts.graceDb_tagname +
        opts.graceDb_html_tagname)
#========================
# get start and end time for our look-up routines
#========================
event_gps_time = float(gdb_entry['gpstime'])

gps_start = event_gps_time - time_before
gps_end = event_gps_time + time_after

plotting_gps_start = event_gps_time - plotting_time_before
plotting_gps_end = event_gps_time + plotting_time_after

logger.info("Started searching for iDQ information within [%.3f, %.3f] at %s" %
            (gps_start, gps_end, ifo))
gracedb.writeLog(
    gdb_id,
    message="Started searching for iDQ information within [%.3f, %.3f] at %s" %
    (gps_start, gps_end, ifo))

#=================================================
# LOGIC for waiting for idq data
#=================================================
### figure out if we need to wait for time to pass
wait = gps_end - (idq.nowgps() + delay)
if wait > 0:
    logger.info("waiting %.2f seconds until we pass gps=%.3f" %
                (wait, gps_end))
    time.sleep(wait)

### now we need to parse the realtime log to figure out where the realtime job is
realtime_logname = config.get("general", "realtime_log")
logger.info("parsing %s to extract idq-realtime state" % realtime_logname)
예제 #14
0
    def upload(self, fname, psds, low_frequency_cutoff,
               testing=True,
               extra_strings=None,
               ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event and PSDs are saved on disk
        # as GraceDB operations can fail later

        self.save(fname)

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)
        psd_xml_path = os.path.splitext(fname)[0] + '-psd.xml.gz'
        ligolw_utils.write_filename(psd_xmldoc, psd_xml_path, gz=True)

        if self.upload_snr_series:
            snr_series_fname = os.path.splitext(fname)[0] + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)

        # try connecting to GraceDB
        try:
            gracedb = GraceDb(self.gracedb_server) \
                    if self.gracedb_server is not None else GraceDb()
        except Exception as exc:
            logging.error('Cannot connect to GraceDB')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!', fname)
            return None

        # create GraceDB event
        group = 'Test' if testing else 'CBC'
        try:
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        except Exception as exc:
            logging.error('Cannot create GraceDB event')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!', fname)
            return None
        logging.info("Uploaded event %s", r["graceid"])

        if self.is_hardware_injection:
            try:
                gracedb.writeLabel(r['graceid'], 'INJ')
            except Exception as exc:
                logging.error("Cannot tag event %s as an injection", r["graceid"])
                logging.error(str(exc))
            logging.info("Tagging event %s as an injection", r["graceid"])

        # upload PSDs
        try:
            gracedb.writeLog(r["graceid"],
                             "PyCBC PSD estimate from the time of event",
                             "psd.xml.gz", open(psd_xml_path, "rb").read(),
                             "psd").json()
        except Exception as exc:
            logging.error("Cannot upload PSDs for event %s", r["graceid"])
            logging.error(str(exc))
        logging.info("Uploaded PSDs for event %s", r["graceid"])

        # add other tags and comments
        try:
            gracedb.writeLog(r["graceid"],
                "Using PyCBC code hash %s" % pycbc_version.git_hash).json()
            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(r["graceid"], text).json()
        except Exception as exc:
            logging.error("Cannot write comments for event %s", r["graceid"])
            logging.error(str(exc))

        # upload SNR series in HDF format
        if self.upload_snr_series:
            try:
                gracedb.writeFile(r['graceid'], snr_series_fname)
            except Exception as exc:
                logging.error("Cannot upload HDF SNR series for event %s",
                              r["graceid"])
                logging.error(str(exc))

        return r['graceid']
예제 #15
0
<tr><td rowspan=2>bandwidth (Hz)</td><td>H1</td><td align=right>{bwH}</td><td align=right>{bwHlow}</td><td align=right>{bwHhigh}</td></tr> \
<tr><td>L1</td><td align=right>{bwL}</td><td align=right>{bwLlow}</td><td align=right>{bwLhigh}</td></tr> \
<tr><td rowspan=2>duration (s)</td><td>H1</td><td align=right>{durH}</td><td align=right>{durHlow}</td><td align=right>{durHhigh}</td></tr> \
<tr><td>L1</td><td align=right>{durL}</td><td align=right>{durLlow}</td><td align=right>{durLhigh}</td></tr></table> \
'.format(freqH=freq[0][0],freqHlow=freq[0][1],freqHhigh=freq[0][2],freqL=freq[1][0],freqLlow=freq[1][1],freqLhigh=freq[1][2],bwH=bandwidth[0][0],bwHlow=bandwidth[0][1],bwHhigh=bandwidth[0][2],bwL=bandwidth[1][0],bwLlow=bandwidth[1][1],bwLhigh=bandwidth[1][2],durH=dur_c[0],durL=dur_c[1],durHlow=dur_low[0],durLlow=dur_low[1],durHhigh=dur_high[0],durLhigh=dur_high[1])

BFtable = '<table> \
<tr><th colspan=2>BWB Bayes Factors</th></tr> \
<tr><td>lnBSG</td><td align=right>{BSG}+/-{errBSG}</td></tr> \
<tr><td>lnBSN</td><td align=right>{BSN}+/-{errBSN}</td></tr> \
</table>'.format(BSG=BSG,BSN=BSN,errBSG=err_SG,errBSN=err_SN)

# Sky map
skyname = glob.glob('skymap*.fits')[0]

os.system('cp {sky} BW_skymap.fits'.format(sky=skyname)) # (change name so it's clear on GraceDB which skymap is ours)

#skytag = ["sky_loc","lvem"]
skytag = ["sky_loc"]

# Actually send info to gracedb
gracedb = GraceDb()
#gracedb.writeLog(graceid, "BayesWave Skymap image", filename='plots/skymap.png', tagname='sky_loc')
gracedb.writeLog(graceid, "BayesWave skymap FITS", filename='BW_skymap.fits', tagname=skytag)
gracedb.writeLog(graceid, "<a href='https://ldas-jobs.ligo.caltech.edu/~meg.millhouse/O1/zero_lag/job_{0}'>BWB Follow-up results</a>".format(graceid), tagname='pe')
gracedb.writeLog(graceid,paramtable,tagname='pe')
gracedb.writeLog(graceid,BFtable,tagname='pe')

os.chdir('..')
os.system('cp -r '+dirname+' /home/meg.millhouse/public_html/O1/zero_lag/job_'+graceid)
for filename in all_files:
    if opts.classifier == idq.extract_fap_name(filename): # and opts.ifo in filename: ### this last bit not needed?
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (not fap_filenames): # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s"%(opts.classifier, opts.ifo)
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ timeseries for %s at %s"%(opts.classifier, opts.ifo))
    sys.exit(0)

#=================================================

# define plot
fig = plt.figure()
r_ax = plt.subplot(1, 1, 1)
f_ax = r_ax.twinx()
f_ax.set_yscale('log') # this may be fragile if fap=0 for all points in the plot. That's super rare, so maybe we don't have to worry about it?

r_ax.set_title(opts.ifo)

#=================================================
# RANK
#=================================================
예제 #17
0
# #########################################
# ## Find relevant files
# #########################################

if opts.verbose:
    print 'Finding relevant *glitch*.xml files'
gchxml_filenames = sorted([filename for filename in 
                          idq.get_all_files_in_range(opts.input_dir, opts.start, opts.end, pad=0, suffix='.xml.gz') 
                          if opts.classifier == idq.extract_xml_name(filename) 
                          and 'glitch' in filename
                          and opts.ifo in filename])
                        
if not gchxml_filenames:
    # no files found, print the message, and exit
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id, message="No iDQ glitch tables data from "+opts.classifier+" available for the candidate  at "+opts.ifo)
    print "No glitch files found, exiting."
    sys.exit(0)

if opts.verbose:
    print "Found:"
    for filename in gchxml_filenames:
        print '\t' + filename


# ##############################################################
# ## Load and Merge xml files using in-memory sqlite database
# ##############################################################
# 
# load files into database
connection, cursor = idq_tables_dbutils.load_xml_files_into_database(\
dt = [event.livetime(segs[fapThr][0])/T for fapThr in opts.FAPthr]
maxFAP = [segs[fapThr][1] for fapThr in opts.FAPthr]

### write json for calibration check
jsonfilename = idq.gdb_calib_json( gdbdir, ifo, opts.classifier, filetag, opts.start, opts.end-opts.start )
if opts.verbose:
    print "  %s"%jsonfilename
file_obj = open(jsonfilename, "w")
file_obj.write( json.dumps( {opts.classifier:{'nominal FAP':opts.FAPthr, 'maximum reported FAP':maxFAP, 'observed deadtime':dt, 'duration':T} } ) )
file_obj.close()

if not opts.skip_gracedb_upload:
    message = "iDQ calibration sanity check for %s at %s within [%.3f, %.3f]"%(opts.classifier, ifo, opts.start, opts.end)
    if opts.verbose:
        print "    "+message
    gracedb.writeLog( opts.gracedb_id, message=message, filename=jsonfilename )

### plot calibration check
fig = isp.plt.figure()
ax = fig.add_axes( isp.default_axpos )

if np.any(np.array(maxFAP)>0) and np.any(np.array(dt)>0): 
    ax.loglog(maxFAP, dt, marker='o', linestyle='none')
else:
    ax.plot(maxFAP, dt, marker='o', linestyle='none')

ax.set_xlabel('Nominal FAP')
ax.set_ylabel('Observed deadtime')

ax.grid(True)
xmin = 1e-4
예제 #19
0
<tr><td>lnBSN</td><td align=right>{BSN}+/-{errBSN}</td></tr> \
</table>'.format(BSG=BSG, BSN=BSN, errBSG=err_SG, errBSN=err_SN)

# Sky map
skyname = glob.glob('skymap*.fits')[0]

os.system('cp {sky} BW_skymap.fits'.format(sky=skyname)
          )  # (change name so it's clear on GraceDB which skymap is ours)

#skytag = ["sky_loc","lvem"]
skytag = ["sky_loc"]

# Actually send info to gracedb
gracedb = GraceDb()
#gracedb.writeLog(graceid, "BayesWave Skymap image", filename='plots/skymap.png', tagname='sky_loc')
gracedb.writeLog(graceid,
                 "BayesWave skymap FITS",
                 filename='BW_skymap.fits',
                 tagname=skytag)
gracedb.writeLog(
    graceid,
    "<a href='https://ldas-jobs.ligo.caltech.edu/~meg.millhouse/O1/zero_lag/job_{0}'>BWB Follow-up results</a>"
    .format(graceid),
    tagname='pe')
gracedb.writeLog(graceid, paramtable, tagname='pe')
gracedb.writeLog(graceid, BFtable, tagname='pe')

os.chdir('..')
os.system('cp -r ' + dirname +
          ' /home/meg.millhouse/public_html/O1/zero_lag/job_' + graceid)
예제 #20
0
        if 'rank' in filename:
            rank_filenames.append(filename)
        if 'fap' in filename:
            fap_filenames.append(filename)

rank_filenames.sort()
fap_filenames.sort()

if (not rank_filenames) or (
        not fap_filenames):  # we couldn't find either rank or fap files
    # exit gracefully
    if opts.verbose:
        print "no iDQ timeseries for %s at %s" % (opts.classifier, opts.ifo)
    if not opts.skip_gracedb_upload:
        gracedb.writeLog(opts.gracedb_id,
                         message="No iDQ timeseries for %s at %s" %
                         (opts.classifier, opts.ifo))
    sys.exit(0)

#=================================================

# define plot
fig = plt.figure()
r_ax = plt.subplot(1, 1, 1)
f_ax = r_ax.twinx()
f_ax.set_yscale(
    'log'
)  # this may be fragile if fap=0 for all points in the plot. That's super rare, so maybe we don't have to worry about it?

r_ax.set_title(opts.ifo)
예제 #21
0
if event['pipeline'].lower() not in allowed_pipelines:
    print "  not allowed to label this pipeline"
    sys.exit(1)

logs = gdb.logs(gid).json()['log']
result = dict((ifo, 1) for ifo in ifos)
for log in logs:
    comment = log['comment']
    if "minimum glitch-FAP for" in comment:
        gFAP = float(comment.split()[-1])
        for ifo in ifos:
            if ifo in comment:
                result[ifo] = gFAP
                break

jFAP = np.prod(result.values())  ### take product of gFAPs for 2 IFOs

if jFAP <= jFAP_thr:
    message = "iDQ veto generator computed joint glitch-FAP : %.3e <= %.3e; <b>This is probably a glitch</b> and I am applying a DQV label" % (
        jFAP, jFAP_thr)
    if annotate_gracedb:
        gdb.writeLabel(gid, 'DQV')
        gdb.writeLog(gid, message=message, tagname=['data_quality'])
    print message
else:
    message = "iDQ veto generator computed joint glitch-FAP : %.3e > %.3e; <b>This is probably not a glitch</b> and I am not applying a label" % (
        jFAP, jFAP_thr)
    if annotate_gracedb:
        gdb.writeLog(gid, message=message, tagname=['data_quality'])
    print message
예제 #22
0
    def upload(
        self,
        fname,
        psds,
        low_frequency_cutoff,
        testing=True,
        extra_strings=None,
    ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        self.save(fname)
        extra_strings = [] if extra_strings is None else extra_strings
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)

        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz",
                         open("tmp_psd.xml.gz", "rb").read(), "psd").json()
        gracedb.writeLog(r["graceid"], "using pycbc code hash %s" %
                         pycbc_version.git_hash).json()
        for text in extra_strings:
            gracedb.writeLog(r["graceid"], text).json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])

        if self.upload_snr_series:
            snr_series_fname = fname + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)
            GraceDb().writeFile(r['graceid'], snr_series_fname)

        return r['graceid']
예제 #23
0
graceid = alert['graceid']
if opts.verbose:
    print "found graceid=%s"%graceid

### figure out if this is the 
if (alert['alert_type']=='update') and alert['file'] and alert['file'].endswith('js') and ("_los" in alert['file']): ### this could be fragile...

    ### download file
    los = json.loads( gracedb.files( graceid, alert['file'] ).read() )

    ### iterate through IFO pairs
    for ifo_pair, data in los.items():
        mi = data['MI'] # mutual information
        hj = data['Hj'] # joint entropy
        mid = mi / hj   # mutual information distance

        if config.has_section(ifo_pair): ### assume ifo_pair is alphabetically ordered...
            thr = config.getfloat(ifo_pair, 'thr')
            if mid > thr:
                labels = config.get(ifo_pair, 'labels').split()
                gracedb.writeLog( graceid, message='Mutual Information Distance for %s in %s line-of-sight frame is above %.3f! Applying labels: %s'%(alert['file'].split('_los')[0], ifo_pair, thr, ", ".join(labels)))
                for label in labels:
                    gracedb.writeLabel( graceid, label )

            else:
                gracedb.writeLog( graceid, message="Mutual Information Distnace for %s in %s line-of-sight frame is below %.3f."%(alert['file'].split('_los')[0], ifo_pair, thr, ", ".join(labels)))

else:
    if opts.verbose:
        print "ignoring..."    
예제 #24
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.psds[ifo].save(snr_series_fname,
                                    group='%s/psd' % ifo)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add other tags and comments
            gracedb.writeLog(
                    gid, "Using PyCBC code hash %s" % pycbc_version.git_hash)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text)

            # upload SNR series in HDF format
            if self.snr_series is not None:
                gracedb.writeFile(gid, snr_series_fname)
        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #25
0
    def upload(self,
               fname,
               gracedb_server=None,
               testing=True,
               extra_strings=None,
               search='AllSky'):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        search: str
            String going into the "search" field of the GraceDB event.
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab as pl

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, search).json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # add info for tracking code version
            gracedb_tag_with_version(gracedb, gid)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text, tag_name=['analyst_comments'])
        except Exception as exc:
            logging.error(
                'Something failed during the upload/annotation of '
                'event %s on GraceDB. The event may not have been '
                'uploaded!', fname)
            logging.error(str(exc))

        # plot the SNR timeseries and noise PSDs
        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace(
                '.hdf', '_snr.png')
            asd_series_plot_fname = snr_series_fname.replace(
                '.hdf', '_asd.png')
            pl.figure()
            ref_time = int(self.merger_time)
            for ifo in sorted(self.snr_series):
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pl.plot(curr_snrs.sample_times - ref_time,
                        abs(curr_snrs),
                        c=ifo_color(ifo),
                        label=ifo)
                if ifo in self.ifos:
                    base = 'foreground/{}/'.format(ifo)
                    snr = self.coinc_results[base + 'snr']
                    mt = (self.coinc_results[base + 'end_time'] +
                          self.time_offset)
                    pl.plot([mt - ref_time], [snr],
                            c=ifo_color(ifo),
                            marker='x')
            pl.legend()
            pl.xlabel('GPS time from {:d} (s)'.format(ref_time))
            pl.ylabel('SNR')
            pl.savefig(snr_series_plot_fname)
            pl.close()

            generate_asd_plot(self.psds, asd_series_plot_fname)

            # Additionally save the PSDs into the snr_series file
            for ifo in sorted(self.psds):
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC**2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)

        if self.probabilities is not None:
            prob_fname = fname.replace('.xml.gz', '_probs.json')
            prob_plot_fname = prob_fname.replace('.json', '.png')

            prob_plot = {
                k: v
                for (k, v) in self.probabilities.items() if v != 0.0
            }
            labels, sizes = zip(*prob_plot.items())
            colors = [source_color(label) for label in labels]
            fig, ax = pl.subplots()
            ax.pie(sizes,
                   labels=labels,
                   colors=colors,
                   autopct='%1.1f%%',
                   textprops={'fontsize': 15})
            ax.axis('equal')
            fig.savefig(prob_plot_fname)
            pl.close()

        # upload SNR series in HDF format and plots
        if gid is not None and self.snr_series is not None:
            try:
                gracedb.writeLog(gid,
                                 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid,
                                 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid,
                                 'ASD plot upload',
                                 filename=asd_series_plot_fname,
                                 tag_name=['psd'],
                                 displayName=['ASDs'])
            except Exception as exc:
                logging.error('Failed to upload plots for %s', gid)
                logging.error(str(exc))

        # upload source probabilities in JSON format and plot
        if gid is not None and self.probabilities is not None:
            try:
                gracedb.writeLog(gid,
                                 'Source probabilities JSON file upload',
                                 filename=prob_fname,
                                 tag_name=['em_follow'])
                logging.info('Uploaded source probabilities for event %s', gid)
                gracedb.writeLog(gid,
                                 'Source probabilities plot upload',
                                 filename=prob_plot_fname,
                                 tag_name=['em_follow'])
                logging.info(
                    'Uploaded source probabilities pie chart for '
                    'event %s', gid)
            except Exception as exc:
                logging.error(
                    'Failed to upload source probability results for %s', gid)
                logging.error(str(exc))

        return gid
    json.dumps({
        opts.classifier: {
            'nominal FAP': opts.FAPthr,
            'maximum reported FAP': maxFAP,
            'observed deadtime': dt,
            'duration': T
        }
    }))
file_obj.close()

if not opts.skip_gracedb_upload:
    message = "iDQ calibration sanity check for %s at %s within [%.3f, %.3f]" % (
        opts.classifier, ifo, opts.start, opts.end)
    if opts.verbose:
        print "    " + message
    gracedb.writeLog(opts.gracedb_id, message=message, filename=jsonfilename)

### plot calibration check
fig = isp.plt.figure()
ax = fig.add_axes(isp.default_axpos)

if np.any(np.array(maxFAP) > 0) and np.any(np.array(dt) > 0):
    ax.loglog(maxFAP, dt, marker='o', linestyle='none')
else:
    ax.plot(maxFAP, dt, marker='o', linestyle='none')

ax.set_xlabel('Nominal FAP')
ax.set_ylabel('Observed deadtime')

ax.grid(True)
xmin = 1e-4
예제 #27
0
multfits.make_dT(verbose=opts.verbose)  ### make time-delay historgrams, et al

multfits.make_los(verbose=opts.verbose)  ### make line-of-sight projections

multfits.make_confidence_regions(
    verbose=opts.verbose)  ### make confidence region stuff

multfits.make_comparison(
    verbose=opts.verbose)  ### copmute comparison statistics

#-----------

### generate final html document
htmlname = multfits.write(verbose=opts.verbose)

### upload to GraceDb
if not opts.skip_gracedb_upload:
    if opts.verbose:
        print "uploading %s to GraceDb(%s)" % (htmlname, opts.graceDbURL)

    gdb = GraceDb(opts.graceDbURL)
    gdbdir = os.path.join(opts.graceDbURL, '..', 'events', opts.graceid,
                          'files')
    gdb.writeLog(
        opts.graceid,
        message='comparison of skymaps can be found <a href="%s">here</a>' %
        os.path.join(gdbdir, os.path.basename(htmlname)),
        filename=htmlname,
        tagname=fits2html.standard_tagname + opts.graceDb_tagname +
        opts.graceDb_html_tagname)
gps_start = event_gps_time - time_before
gps_end = event_gps_time + time_after

plotting_gps_start = event_gps_time - plotting_time_before
plotting_gps_end = event_gps_time + plotting_time_after

performance_gps_start = event_gps_time - performance_time_before
performance_gps_end = event_gps_time + performance_time_after

logger.info("Started searching for iDQ information within [%.3f, %.3f] at %s" %
            (gps_start, gps_end, ifo))
if not options.skip_gracedb_upload:
    gracedb.writeLog(
        gdb_id,
        message=
        "Started searching for iDQ information within [%.3f, %.3f] at %s" %
        (gps_start, gps_end, ifo),
        tagname=idq.tagnames)

#=================================================
# LOGIC for waiting for idq data
#=================================================
### figure out if we need to wait for time to pass
wait = gps_end - (idq.nowgps() + delay)
if wait > 0:
    logger.info("waiting %.2f seconds until we pass gps=%.3f" %
                (wait, gps_end))
    time.sleep(wait)

if options.realtime_log:
    ### now we need to parse the realtime log to figure out where the realtime job is
if opts.verbose:
    print "generating OmegaScans for : %s\n    gps : %.6f"%(opts.graceid, gps)

#-------------------------------------------------

### report to GraceDB that we've started follow-up
if upload_or_verbose:
    message = "automatic OmegaScans begun for: %s."%(", ".join(chansets))
    if not persist:
        message = message + " WARNING: we will not track the individual OmegaScan processes to ensure completion"

    if opts.verbose:
        print message
    if opts.upload:
        gdb.writeLog( opts.graceid, message=message, tagname=tagname )

### set up frame directory
frmdir = os.path.join(outdir, opts.graceid, "frames")
if os.path.exists(frmdir):
    if not opts.force: ### ignore existing data if --force was supplied
        raise ValueError( "directory=%s already exists!"%(frmdir) )
else:
    os.makedirs( frmdir )

### set up variables that are common for all chansets
username = getuser()
hostname = gethostname()

### iterate through chansets, processing each one separately
if persist:
예제 #30
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_snr.png')
            psd_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_psd.png')
            pylab.figure()
            for ifo in self.snr_series:
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pylab.plot(curr_snrs.sample_times, abs(curr_snrs),
                           c=ifo_color(ifo), label=ifo)
                if ifo in self.ifos:
                    snr = self.coinc_results['foreground/%s/%s' %
                                             (ifo, 'snr')]
                    endt = self.coinc_results['foreground/%s/%s' %
                                              (ifo, 'end_time')]
                    pylab.plot([endt], [snr], c=ifo_color(ifo), marker='x')

            pylab.legend()
            pylab.xlabel('GPS time (s)')
            pylab.ylabel('SNR')
            pylab.savefig(snr_series_plot_fname)
            pylab.close()

            pylab.figure()
            for ifo in self.snr_series:
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
                # Can't plot log(0) so start from point 1
                pylab.loglog(curr_psd.sample_frequencies[1:],
                             curr_psd[1:]**0.5, c=ifo_color(ifo), label=ifo)
            pylab.legend()
            pylab.xlim([20, 2000])
            pylab.ylim([1E-24, 1E-21])
            pylab.xlabel('Frequency (Hz)')
            pylab.ylabel('ASD')
            pylab.savefig(psd_series_plot_fname)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add other tags and comments
            gracedb.writeLog(
                    gid, "Using PyCBC code hash %s" % pycbc_version.git_hash)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text)

            # upload SNR series in HDF format and plots
            if self.snr_series is not None:
                gracedb.writeLog(gid, 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid, 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid, 'PSD plot upload',
                                 filename=psd_series_plot_fname,
                                 tag_name=['psd'], displayName=['PSDs'])

        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #31
0
    print "found %d existing FITS files" % len(localnames)
    for filename in localnames:
        print "    " + filename

#-------------------------------------------------
# BEGIN THE ANALYSIS
#-------------------------------------------------

### iterate over filenames, spawning a snglFITS job for each
for filename in filenames:

    if not opts.skip_gracedb_upload:
        gracedb.writeLog(
            graceid,
            message='started skymap summary for <a href="%s">%s</a>' %
            (os.path.join(graceDbURL, '..', 'events', graceid, 'files',
                          filename), filename),
            tagname=fits2html.standard_tagname)

    #--------------------

    ### download FITS file
    localname = os.path.join(outdir, filename)
    if opts.verbose:
        print('downloading from %s : %s -> %s' %
              (graceDbURL, filename, localname))

    file_obj = open(localname, 'w')
    file_obj.write(gracedb.files(graceid, filename).read())
    file_obj.close()