예제 #1
0
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz",
                         open("tmp_psd.xml.gz", "rb").read(), "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
예제 #2
0
파일: live.py 프로젝트: prayush/pycbc
    def upload(self, fname, psds, low_frequency_cutoff, testing=True):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb
        import lal
        import lal.series

        self.save(fname)
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        # Convert our psds to the xml psd format.
        # FIXME: we should not use lal.series!!!
        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries

        psd_xmldoc = lal.series.make_psd_xmldoc(psds_lal)
        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz", open("tmp_psd.xml.gz", "rb").read(),
                         "psd").json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])
예제 #3
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_snr.png')
            psd_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_psd.png')
            pylab.figure()
            for ifo in self.snr_series:
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pylab.plot(curr_snrs.sample_times, abs(curr_snrs),
                           c=ifo_color(ifo), label=ifo)
                if ifo in self.ifos:
                    snr = self.coinc_results['foreground/%s/%s' %
                                             (ifo, 'snr')]
                    endt = self.coinc_results['foreground/%s/%s' %
                                              (ifo, 'end_time')]
                    pylab.plot([endt], [snr], c=ifo_color(ifo), marker='x')

            pylab.legend()
            pylab.xlabel('GPS time (s)')
            pylab.ylabel('SNR')
            pylab.savefig(snr_series_plot_fname)
            pylab.close()

            pylab.figure()
            for ifo in self.snr_series:
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
                # Can't plot log(0) so start from point 1
                pylab.loglog(curr_psd.sample_frequencies[1:],
                             curr_psd[1:]**0.5, c=ifo_color(ifo), label=ifo)
            pylab.legend()
            pylab.xlim([20, 2000])
            pylab.ylim([1E-24, 1E-21])
            pylab.xlabel('Frequency (Hz)')
            pylab.ylabel('ASD')
            pylab.savefig(psd_series_plot_fname)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add other tags and comments
            gracedb.writeLog(
                    gid, "Using PyCBC code hash %s" % pycbc_version.git_hash)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text)

            # upload SNR series in HDF format and plots
            if self.snr_series is not None:
                gracedb.writeLog(gid, 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid, 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid, 'PSD plot upload',
                                 filename=psd_series_plot_fname,
                                 tag_name=['psd'], displayName=['PSDs'])

        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #4
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_snr.png')
            psd_series_plot_fname = snr_series_fname.replace('.hdf',
                                                             '_psd.png')
            pylab.figure()
            for ifo in sorted(self.snr_series):
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pylab.plot(curr_snrs.sample_times, abs(curr_snrs),
                           c=ifo_color(ifo), label=ifo)
                if ifo in self.ifos:
                    snr = self.coinc_results['foreground/%s/%s' %
                                             (ifo, 'snr')]
                    endt = self.coinc_results['foreground/%s/%s' %
                                              (ifo, 'end_time')]
                    pylab.plot([endt], [snr], c=ifo_color(ifo), marker='x')

            pylab.legend()
            pylab.xlabel('GPS time (s)')
            pylab.ylabel('SNR')
            pylab.savefig(snr_series_plot_fname)
            pylab.close()

            pylab.figure()
            for ifo in sorted(self.snr_series):
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
                # Can't plot log(0) so start from point 1
                pylab.loglog(curr_psd.sample_frequencies[1:],
                             curr_psd[1:]**0.5, c=ifo_color(ifo), label=ifo)
            pylab.legend()
            pylab.xlim([10, 1300])
            pylab.ylim([3E-24, 1E-20])
            pylab.xlabel('Frequency (Hz)')
            pylab.ylabel('ASD')
            pylab.savefig(psd_series_plot_fname)
            pylab.close()

        if self.probabilities is not None:
            prob_fname = fname.replace('.xml.gz', '_probs.json')
            prob_plot_fname = prob_fname.replace('.json', '.png')

            prob_plot = {k: v for (k, v) in self.probabilities.items()
                         if v != 0.0}
            labels, sizes = zip(*prob_plot.items())
            colors = [source_color(label) for label in labels]
            fig, ax = pylab.subplots()
            ax.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
                   textprops={'fontsize': 15})
            ax.axis('equal')
            fig.savefig(prob_plot_fname)
            pylab.close()

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add info for tracking code version
            version_str = 'Using PyCBC version {}{} at {}'
            version_str = version_str.format(
                    pycbc_version.version,
                    ' (release)' if pycbc_version.release else '',
                    os.path.dirname(pycbc.__file__))
            gracedb.writeLog(gid, version_str)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text, tag_name=['analyst_comments'])

            # upload SNR series in HDF format and plots
            if self.snr_series is not None:
                gracedb.writeLog(gid, 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid, 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid, 'PSD plot upload',
                                 filename=psd_series_plot_fname,
                                 tag_name=['psd'], displayName=['PSDs'])

            # upload source probabilities in json format and plot
            if self.probabilities is not None:
                gracedb.writeLog(gid, 'source probabilities JSON file upload',
                                 filename=prob_fname, tag_name=['em_follow'])
                logging.info('Uploaded source probabilities for event %s', gid)
                gracedb.writeLog(gid, 'source probabilities plot upload',
                                 filename=prob_plot_fname,
                                 tag_name=['em_follow'])
                logging.info('Uploaded source probabilities pie chart for '
                             'event %s', gid)

        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #5
0
def parseAlert(queue, queueByGraceID, alert, t0, config):
    '''
    the way approval_processorMP digests lvalerts

    --> check if this alert is a command and delegate to parseCommand

    1) instantiates GraceDB client
    2) pulls childConfig settings
    3) makes sure we have the logger
    4) get lvalert specifics
    5) ensure we have the event_dict for the graceid = lvalert['uid']
    6) take proper action depending on the lvalert info coming in and currentstate of the event_dict 
    '''

    #-------------------------------------------------------------------
    # process commands sent via lvalert_commandMP
    #-------------------------------------------------------------------

    if alert['uid'] == 'command':  ### this is a command message!
        return parseCommand(queue, queueByGraceID, alert,
                            t0)  ### delegate to parseCommand and return

    #-------------------------------------------------------------------
    # extract relevant config parameters and set up necessary data structures
    #-------------------------------------------------------------------

    # instantiate GraceDB client from the childConfig
    client = config.get('general', 'client')
    g = GraceDb(client)

    # get other childConfig settings; save in configdict
    voeventerror_email = config.get('general', 'voeventerror_email')
    force_all_internal = config.get('general', 'force_all_internal')
    preliminary_internal = config.get('general', 'preliminary_internal')
    forgetmenow_timeout = config.getfloat('general', 'forgetmenow_timeout')
    approval_processorMPfiles = config.get('general',
                                           'approval_processorMPfiles')
    hardware_inj = config.get('labelCheck', 'hardware_inj')
    wait_for_hardware_inj = config.getfloat('labelCheck',
                                            'wait_for_hardware_inj')
    default_farthresh = config.getfloat('farCheck', 'default_farthresh')
    time_duration = config.getfloat('injectionCheck', 'time_duration')
    humanscimons = config.get('operator_signoffCheck', 'humanscimons')

    ### extract options about advocates
    advocates = config.get('advocate_signoffCheck', 'advocates')
    advocate_text = config.get('advocate_signoffCheck', 'advocate_text')
    advocate_email = config.get('advocate_signoffCheck', 'advocate_email')

    ### extract options for GRB alerts
    em_coinc_text = config.get('GRB_alerts', 'em_coinc_text')
    coinc_text = config.get('GRB_alerts', 'coinc_text')
    grb_email = config.get('GRB_alerts', 'grb_email')
    notification_text = config.get('GRB_alerts', 'notification_text')

    ### extract options about idq
    ignore_idq = config.get('idq_joint_fapCheck', 'ignore_idq')
    default_idqthresh = config.getfloat('idq_joint_fapCheck',
                                        'default_idqthresh')
    idq_pipelines = config.get('idq_joint_fapCheck', 'idq_pipelines')
    idq_pipelines = idq_pipelines.replace(' ', '')
    idq_pipelines = idq_pipelines.split(',')

    skymap_ignore_list = config.get('have_lvem_skymapCheck',
                                    'skymap_ignore_list')

    ### set up configdict (passed to local data structure: eventDicts)
    configdict = makeConfigDict(config)

    # set up logging
    ### FIXME: why not open the logger each time parseAlert is called?
    ###        that would allow you to better control which loggers are necessary and minimize the number of open files.
    ###        it also minimizes the possibility of something accidentally being written to loggers because they were left open.
    ###        what's more, this is a natural place to set up multiple loggers, one for all data and one for data pertaining only to this graceid

    global logger
    if globals().has_key('logger'):  # check to see if we have logger
        logger = globals()['logger']
    else:  # if not, set one up
        logger = loadLogger(config)
        logger.info(
            '\n{0} ************ approval_processorMP.log RESTARTED ************\n'
            .format(convertTime()))

    #-------------------------------------------------------------------
    # extract relevant info about this alert
    #-------------------------------------------------------------------

    # get alert specifics and event_dict information
    graceid = alert['uid']
    alert_type = alert['alert_type']
    description = alert['description']
    filename = alert['file']

    #-------------------------------------------------------------------
    # ensure we have an event_dict and ForgetMeNow tracking this graceid
    #-------------------------------------------------------------------

    if alert_type == 'new':  ### new event -> we must first create event_dict and set up ForgetMeNow queue item for G events

        ### create event_dict
        event_dict = EventDict(
        )  # create a new instance of EventDict class which is a blank event_dict
        if is_external_trigger(
                alert) == True:  # this is an external GRB trigger
            event_dict.grb_trigger_setup(
                alert['object'], graceid, g, config, logger
            )  # populate this event_dict with grb trigger info from lvalert
        else:
            event_dict.setup(
                alert['object'], graceid, configdict, g, config, logger
            )  # populate this event_dict with information from lvalert
        eventDicts[
            graceid] = event_dict  # add the instance to the global eventDicts
        eventDictionaries[
            graceid] = event_dict.data  # add the dictionary to the global eventDictionaries

        ### ForgetMeNow queue item
        item = ForgetMeNow(t0, forgetmenow_timeout, graceid, eventDicts, queue,
                           queueByGraceID, logger)
        queue.insert(item)  # add queue item to the overall queue

        ### set up queueByGraceID
        newSortedQueue = utils.SortedQueue(
        )  # create sorted queue for event candidate
        newSortedQueue.insert(
            item)  # put ForgetMeNow queue item into the sorted queue
        queueByGraceID[
            item.
            graceid] = newSortedQueue  # add queue item to the queueByGraceID
        saveEventDicts(
            approval_processorMPfiles
        )  # trying to see if expirationtime is updated from None

        message = '{0} -- {1} -- Created event dictionary for {1}.'.format(
            convertTime(), graceid)
        if loggerCheck(event_dict.data, message) == False:
            logger.info(message)
            g.writeLog(graceid,
                       'AP: Created event dictionary.',
                       tagname='em_follow')
        else:
            pass

    else:  ### not a new alert -> we may already be tracking this graceid

        if eventDicts.has_key(graceid):  ### we're already tracking it

            # get event_dict with expirationtime key updated for the rest of parseAlert
            event_dict = eventDicts[graceid]

            # find ForgetMeNow corresponding to this graceid and update expiration time
            for item in queueByGraceID[graceid]:
                if item.name == ForgetMeNow.name:  # selects the queue item that is a ForgetMeNow instance
                    item.setExpiration(t0)  # updates the expirationtime key
                    queue.resort(
                    )  ### may be expensive, but is needed to guarantee that queue remains sorted
                    queueByGraceID[graceid].resort()
                    break
            else:  ### we couldn't find a ForgetMeNow for this event! Something is wrong!
                os.system(
                    'echo \'ForgetMeNow KeyError\' | mail -s \'ForgetMeNow KeyError {0}\' {1}'
                    .format(graceid, advocate_email))
                raise KeyError(
                    'could not find ForgetMeNow for %s' % graceid
                )  ### Reed thinks this is necessary as a safety net.
                ### we want the process to terminate if things are not set up correctly to force us to fix it

        else:  # event_dict for event candidate does not exist. we need to create it with up-to-date information
            event_dict = EventDict(
            )  # create a new instance of the EventDict class which is a blank event_dict
            if is_external_trigger(alert) == True:
                event_dict.grb_trigger_setup(
                    g.events(graceid).next(), graceid, g, config, logger)
            else:
                event_dict.setup(
                    g.events(graceid).next(), graceid, configdict, g, config,
                    logger
                )  # fill in event_dict using queried event candidate dictionary
                event_dict.update(
                )  # update the event_dict with signoffs and iDQ info
            eventDicts[
                graceid] = event_dict  # add this instance to the global eventDicts
            eventDictionaries[
                graceid] = event_dict.data  # add the dictionary to the global eventDictionaries

            # create ForgetMeNow queue item and add to overall queue and queueByGraceID
            item = ForgetMeNow(t0, forgetmenow_timeout, graceid, eventDicts,
                               queue, queueByGraceID, logger)
            queue.insert(item)  # add queue item to the overall queue

            ### set up queueByGraceID
            newSortedQueue = utils.SortedQueue(
            )  # create sorted queue for new event candidate
            newSortedQueue.insert(
                item)  # put ForgetMeNow queue item into the sorted queue
            queueByGraceID[
                item.
                graceid] = newSortedQueue  # add queue item to the queueByGraceID

            message = '{0} -- {1} -- Created event dictionary for {1}.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Created event dictionary.',
                           tagname='em_follow')
            else:
                pass

    #--------------------
    # ignore alerts that are not relevant, like simulation or MDC events
    #--------------------

    # if the graceid starts with 'M' for MDCs or 'S' for Simulation, ignore
    if re.match('M', graceid) or re.match(
            'S',
            graceid):  ### FIXME: we want to make this a config-file option!
        message = '{0} -- {1} -- Mock data challenge or simulation. Ignoring.'.format(
            convertTime(), graceid)
        if loggerCheck(event_dict.data, message) == False:
            logger.info(message)
            g.writeLog(graceid,
                       'AP: Mock data challenge or simulation. Ignoring.',
                       tagname='em_follow')
        else:
            pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    #--------------------
    # take care of external GRB triggers
    #--------------------
    if is_external_trigger(
            alert
    ) == True:  # for now, grouped everything related to external triggers together below
        # if it's not a log message updating us about possible coincidence with gravitational-waves OR labels OR json file uploads we are not interested
        if alert_type == 'label':
            record_label(event_dict.data, description)
        if alert_type == 'update':
            # is this a comment containing coinc info that needs to be parsed?
            if 'comment' in alert['object'].keys():
                comment = alert['object']['comment']
                if 'Significant event in on-source' in comment:  # got comment structure from Dipongkar
                    coinc_pipeline, coinc_fap = record_coinc_info(
                        event_dict.data, comment, alert, logger)
                    # begin creating the dictionary that will turn into json file
                    message_dict = {}
                    # populate text field for the GCN circular-to-be
                    message_dict['message'] = coinc_text.format(
                        graceid, coinc_fap)
                    message_dict['loaded_to_gracedb'] = 0
                    # make json string and file
                    message_dict = json.dumps(message_dict)
                    tmpfile = open('/tmp/coinc_{0}.json'.format(graceid), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    # make sure to load with a comment that we look for to check off that it's been loaded into gracedb
                    # was it an online or offline pipeline?
                    if 'Online' in coinc_pipeline:
                        event_dict.data['grb_online_json'] = message_dict
                        g.writeLog(
                            graceid,
                            'GRB-GW Coincidence JSON file: grb_online_json',
                            '/tmp/coinc_{0}.json'.format(graceid),
                            tagname='em_follow')
                    elif 'Offline' in coinc_pipeline:
                        event_dict.data['grb_offline_json'] = message_dict
                        g.writeLog(
                            graceid,
                            'GRB-GW Coincidence JSON file: grb_offline_json',
                            '/tmp/coinc_{0}.json'.format(graceid),
                            tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(graceid))
                    ### alert via email
                    os.system(
                        'echo \{0}\' | mail -s \'Coincidence JSON created for {1}\' {2}'
                        .format(notification_text, graceid, grb_email))
                # is this the json file loaded into GraceDb?
                if 'GRB-GW Coincidence JSON file' in comment:
                    # if it is, find out which type of json it was and then message_dict['loaded_to_gracedb'] = 1
                    json_type = re.findall('file: (.*)', comment)[0]
                    message_dict = event_dict.data[json_type]
                    message_dict = json.loads(
                        message_dict)  # converts string to dictionary
                    message_dict['loaded_to_gracedb'] = 1
                    # when we send to observers, message_dict['sent_to_observers'] = 1
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    #--------------------
    # Appending which checks must be satisfied in preliminary_to_initial state before moving on
    #--------------------

    if humanscimons == 'yes':
        preliminary_to_initial.append('operator_signoffCheck')
    if advocates == 'yes':
        preliminary_to_initial.append('advocate_signoffCheck')

    #--------------------
    # update information based on the alert_type
    # includes extracting information from the alert
    # may also include generating VOEvents and issuing them
    #--------------------

    # actions for each alert_type
    currentstate = event_dict.data[
        'currentstate']  ### actions depend on the current state

    ### NOTE: we handle alert_type=="new" above as well and this conditional is slightly redundant...
    if alert_type == 'new':

        #----------------
        ### pass event through PipelineThrottle
        #----------------

        ### check if a PipelineThrottle exists for this node
        group = event_dict.data['group']
        pipeline = event_dict.data['pipeline']
        search = event_dict.data['search']
        key = generate_ThrottleKey(group, pipeline, search=search)
        if queueByGraceID.has_key(key):  ### a throttle already exists
            if len(queueByGraceID[key]) > 1:
                raise ValueError(
                    'too many QueueItems in SortedQueue for pipelineThrottle key=%s'
                    % key)
            item = queueByGraceID[key][
                0]  ### we expect there to be only one item in this SortedQueue

        else:  ### we need to make a throttle!
            # pull PipelineThrottle parameters from the config
            if config.has_section(key):
                throttleWin = config.getfloat(key, 'throttleWin')
                targetRate = config.getfloat(key, 'targetRate')
                requireManualReset = config.get(key, 'requireManualReset')
                conf = config.getfloat(key, 'conf')

            else:
                throttleWin = config.getfloat('default_PipelineThrottle',
                                              'throttleWin')
                targetRate = config.getfloat('default_PipelineThrottle',
                                             'targetRate')
                requireManualReset = config.get('default_PipelineThrottle',
                                                'requireManualReset')
                conf = config.getfloat('default_PipelineThrottle', 'conf')
            item = PipelineThrottle(t0,
                                    throttleWin,
                                    targetRate,
                                    group,
                                    pipeline,
                                    search=search,
                                    requireManualReset=False,
                                    conf=0.9,
                                    graceDB_url=client)

            queue.insert(item)  ### add to overall queue

            newSortedQueue = utils.SortedQueue(
            )  # create sorted queue for event candidate
            newSortedQueue.insert(
                item)  # put ForgetMeNow queue item into the sorted queue
            queueByGraceID[
                item.
                graceid] = newSortedQueue  # add queue item to the queueByGraceID

        item.addEvent(graceid, t0)  ### add new event to throttle
        ### this takes care of labeling in gracedb as necessary

        if item.isThrottled():
            ### send some warning message?
            return 0  ### we're done here because we're ignoring this event -> exit from parseAlert

#        #----------------
#        ### pass data to Grouper
#        #----------------
#        raise Warning("Grouper is not implemented yet! we're currently using a temporate groupTag and prototype code")

#        '''
#        need to extract groupTag from group_pipeline[_search] mapping.
#            These associations should be specified in the config file, so we'll have to specify this somehow.
#            probably just a "Grouper" section, with (option = value) pairs that look like (groupTag = nodeA nodeB nodeC ...)
#        '''
#        groupTag = 'TEMPORARY'

#        ### check to see if Grouper exists for this groupTag
#        if queueByGraceID.has_key(groupTag): ### at least one Grouper already exists

#            ### determine if any of the existing Groupers are still accepting new triggers
#            for item in queueByGraceID[groupTag]:
#                if item.isOpen():
#                    break ### this Grouper is still open, so we'll just use it
#            else: ### no Groupers are open, so we need to create one
#                item = Grouper(t0, grouperWin, groupTag, eventDicts, graceDB_url=client) ### create the actual QueueItem

#                queue.insert( item ) ### insert it in the overall queue

#                newSortedQueue = utils.SortedQueue() ### set up the SortedQueue for queueByGraceID
#                newSortedQueue.insert(item)
#                queueByGraceID[groupTag] = newSortedQueue

#        else: ### we need to make a Grouper
#            grouperWin = config.getfloat('grouper', 'grouperWin')
#            item = Grouper(t0, grouperWin, groupTag, eventDicts, graceDB_url=client) ### create the actual QueueItem

#            queue.insert( item ) ### insert it in the overall queue

#            newSortedQueue = utils.SortedQueue() ### set up the SortedQueue for queueByGraceID
#            newSortedQueue.insert(item)
#            queueByGraceID[groupTag] = newSortedQueue

#        item.addEvent( graceid ) ### add this graceid to the item

        return 0  ### we're done here. When Grouper makes a decision, we'll tick through the rest of the processes with a "selected" label

    elif alert_type == 'label':
        record_label(event_dict.data, description)

        if description == 'PE_READY':  ### PE_READY label was just applied. We may need to send an update alert

            message = '{0} -- {1} -- Sending update VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(
                    graceid,
                    'AP: Received PE_READY label. Sending update VOEvent.',
                    tagname='em_follow')
                process_alert(event_dict.data, 'update', g, config, logger)

            else:
                pass

            message = '{0} -- {1} -- State: {2} --> complete.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> complete.'.format(currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'complete'

            else:
                pass

        elif description == 'EM_READY':  ### EM_READY label was just applied. We may need to send an initial alert
            message = '{0} -- {1} -- Sending initial VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(
                    graceid,
                    'AP: Received EM_READY label. Sending initial VOEvent.',
                    tagname='em_follow')
                process_alert(event_dict.data, 'initial', g, config, logger)

            else:
                pass

            message = '{0} -- {1} -- State: {2} --> initial_to_update.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> initial_to_update.'.format(
                               currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'initial_to_update'

            else:
                pass

        elif description == "EM_Throttled":  ### the event is throttled and we need to turn off all processing for it

            event_dict.data[
                'currentstate'] = 'throttled'  ### update current state

            ### check if we need to send retractions
            voevents = event_dict.data['voevents']
            if len(voevents) > 0:
                if 'retraction' not in sorted(voevents)[-1]:
                    # there are existing VOEvents we've sent, but no retraction alert
                    process_alert(event_dict.data, 'retraction', g, config,
                                  logger)

            ### update ForgetMeNow expiration to handle all the clean-up?
            ### we probably do NOT want to change the clean-up schedule because we'll still likely receive a lot of alerts about this guy
            ### therefore, we just retain the local data and ignore him, rather than erasing the local data and having to query to reconstruct it repeatedly as new alerts come in
#            for item in queueByGraceID[graceid]: ### update expiration of the ForgetMeNow so it is immediately processed next.
#                if item.name == ForgetMeNow.name:
#                    time.setExpiration(-np.infty )
#                                                                ### FIXME: this can break the order in SortedQueue's. We need to pop and reinsert or call a manual resort
#                    queue.resort() ### may be expensive but is needed to guarantee that queue remains sorted
#                    queueByGraceID[graceid].resort()
#                    break
#            else:
#                raise ValueError('could not find ForgetMeNow QueueItem for graceid=%s'%graceid)

        elif description == "EM_Selected":  ### this event was selected by a Grouper
            raise NotImplementedError(
                'write logic to handle \"Selected\" labels')

        elif description == "EM_Superseded":  ### this event was superceded by another event within Grouper
            raise NotImplementedError(
                'write logic to handle \"Superseded" labels')

        elif (
                checkLabels(description.split(), config) > 0
        ):  ### some other label was applied. We may need to issue a retraction notice.
            event_dict.data['currentstate'] = 'rejected'

            ### check to see if we need to send a retraction
            voevents = event_dict.data['voevents']
            if len(voevents) > 0:
                if 'retraction' not in sorted(voevents[-1]):
                    # there are existing VOEvents we've sent, but no retraction alert
                    process_alert(event_dict.data, 'retraction', g, config,
                                  logger)

        saveEventDicts(
            approval_processorMPfiles)  ### save the updated eventDict to disk
        return 0

    ### FIXME: Reed left off commenting here...

    elif alert_type == 'update':
        # first the case that we have a new lvem skymap
        if (filename.endswith('.fits.gz') or filename.endswith('.fits')):
            if 'lvem' in alert['object'][
                    'tag_names']:  # we only care about skymaps tagged lvem for sharing with MOU partners
                submitter = alert['object']['issuer'][
                    'display_name']  # in the past, we used to care who submitted skymaps; keeping this functionality just in case
                record_skymap(event_dict.data, filename, submitter, logger)
            else:
                pass
        # interested in iDQ information or other updates
        else:
            if 'comment' in alert['object'].keys():
                comment = alert['object']['comment']
                if re.match(
                        'minimum glitch-FAP', comment
                ):  # looking to see if it's iDQ glitch-FAP information
                    record_idqvalues(event_dict.data, comment, logger)
                elif re.match(
                        'resent VOEvent', comment
                ):  # looking to see if another running instance of approval_processorMP sent a VOEvent
                    response = re.findall(
                        r'resent VOEvent (.*) in (.*)',
                        comment)  # extracting which VOEvent was re-sent
                    event_dict.data[response[0][1]].append(response[0][0])
                    saveEventDicts(approval_processorMPfiles)
                elif 'EM-Bright probabilities computed from detection pipeline' in comment:  # got comment structure from Shaon G.
                    record_em_bright(event_dict.data, comment, logger)
                elif 'Temporal coincidence with external trigger' in comment:  # got comment structure from Alex U.
                    exttrig, coinc_far = record_coinc_info(
                        event_dict.data, comment, alert, logger)
                    # create dictionary that will become json file
                    message_dict = {}
                    grb_instrument = eventDictionaries[exttrig]['pipeline']
                    message_dict['message'] = em_coinc_text.format(
                        exttrig, grb_instrument, graceid, coinc_far)
                    message_dict['loaded_to_gracedb'] = 0
                    message_dict = json.dumps(message_dict)
                    # update event dictionaries for both the gw and external trigger
                    eventDictionaries[exttrig][
                        'em_coinc_json'] = message_dict  # this updates the external trigger event_dict.data
                    event_dict.data[
                        'em_coinc_json'] = message_dict  # this updates the gw trigger event_dict.data
                    # load json file to the gw gracedb page
                    tmpfile = open('/tmp/coinc_{0}.json'.format(graceid), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    g.writeLog(graceid,
                               'GRB-GW Coincidence JSON file: em_coinc_json',
                               '/tmp/coinc_{0}.json'.format(graceid),
                               tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(graceid))
                    # load json file to the external trigger page
                    tmpfile = open('/tmp/coinc_{0}.json'.format(exttrig), 'w')
                    tmpfile.write(message_dict)
                    tmpfile.close()
                    g.writeLog(exttrig,
                               'GRB-GW Coincidence JSON file: em_coinc_json',
                               '/tmp/coinc_{0}.json'.format(exttrig),
                               tagname='em_follow')
                    os.remove('/tmp/coinc_{0}.json'.format(exttrig))
                    ### alert via email
                    os.system(
                        'echo \{0}\' | mail -s \'Coincidence JSON created for {1}\' {2}'
                        .format(notification_text, exttrig, grb_email))
                    saveEventDicts(approval_processorMPfiles)
                elif 'GRB-GW Coincidence JSON file' in comment:  # this is the comment that accompanies a loaded coinc json file
                    message_dict = event_dict.data['em_coinc_json']
                    message_dict = json.loads(
                        message_dict)  # converts string to dictionary
                    message_dict['loaded_to_gracedb'] = 1
                    saveEventDicts(approval_processorMPfiles)
                else:
                    pass

    elif alert_type == 'signoff':
        signoff_object = alert['object']
        record_signoff(event_dict.data, signoff_object)

    #---------------------------------------------
    # run checks specific to currentstate of the event candidate
    #---------------------------------------------

    passedcheckcount = 0

    if currentstate == 'new_to_preliminary':
        time.sleep(
            wait_for_hardware_inj
        )  #this is for those cases where we dont have the INJ label right away
        queried_dict = g.events(graceid).next()  #query gracedb for the graceid
        event_dict.data['labels'] = queried_dict['labels'].keys(
        )  #get the latest labels before running checks
        for Check in new_to_preliminary:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                # because in 'new_to_preliminary' state, no need to apply DQV label
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
        if passedcheckcount == len(new_to_preliminary):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Sending preliminary VOEvent.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Sending preliminary VOEvent.',
                           tagname='em_follow')
                process_alert(event_dict.data, 'preliminary', g, config,
                              logger)
            else:
                pass
            message = '{0} -- {1} -- State: {2} --> preliminary_to_initial.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: State: {0} --> preliminary_to_initial.'.format(
                               currentstate),
                           tagname='em_follow')
                event_dict.data['currentstate'] = 'preliminary_to_initial'
            else:
                pass
            labels = event_dict.data['labels']
            # notify the operators if we haven't previously processed this event
            instruments = event_dict.data['instruments']
            for instrument in instruments:
                if instrument in str(labels):
                    pass
                else:
                    message = '{0} -- {1} -- Labeling {2}OPS.'.format(
                        convertTime(), graceid, instrument)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(graceid,
                                   'AP: Labeling {0}OPS.'.format(instrument),
                                   tagname='em_follow')
                        g.writeLabel(graceid, '{0}OPS'.format(instrument))
                    else:
                        pass
            # notify the advocates if we haven't previously processed this event
            if 'ADV' in str(labels):
                pass
            else:
                message = '{0} -- {1} -- Labeling ADVREQ.'.format(
                    convertTime(), graceid)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Labeling ADVREQ.',
                               tagname='em_follow')
                    g.writeLabel(graceid, 'ADVREQ')
                    os.system(
                        'echo \'{0}\' | mail -s \'{1} passed criteria for follow-up\' {2}'
                        .format(advocate_text, graceid, advocate_email))
                    # expose event to LV-EM
                    url_perm_base = g.service_url + urllib.quote(
                        'events/{0}/perms/gw-astronomy:LV-EM:Observers/'.
                        format(graceid))
                    for perm in ['view', 'change']:
                        url = url_perm_base + perm
                        #g.put(url)
                else:
                    pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    elif currentstate == 'preliminary_to_initial':
        for Check in preliminary_to_initial:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                # need to set DQV label so long as it isn't the operator_signoffCheck or advocate_signoffCheck
                if 'signoffCheck' in Check:
                    message = '{0} -- {1} -- Not labeling DQV because signoffCheck is separate from explicit data quality checks.'.format(
                        convertTime(), graceid)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(
                            graceid,
                            'AP: Not labeling DQV because signoffCheck is separate from explicit data quality checks.',
                            tagname='em_follow')
                    else:
                        pass
                else:
                    message = '{0} -- {1} -- Labeling DQV.'.format(
                        convertTime(), graceid)
                    if loggerCheck(event_dict.data, message) == False:
                        logger.info(message)
                        g.writeLog(graceid,
                                   'AP: Labeling DQV.',
                                   tagname='em_follow')
                        g.writeLabel(graceid, 'DQV')
                    else:
                        pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
                if Check == 'have_lvem_skymapCheck':  # we want to send skymaps out as quickly as possible, even if humans have not vetted the event
                    process_alert(
                        event_dict.data, 'preliminary', g, config, logger
                    )  # if it turns out we've sent this alert with this skymap before, the process_alert function will just not send this repeat
        if passedcheckcount == len(preliminary_to_initial):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Labeling EM_READY.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Labeling EM_READY.',
                           tagname='em_follow')
                g.writeLabel(graceid, 'EM_READY')
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    elif currentstate == 'initial_to_update':
        for Check in initial_to_update:
            eval('event_dict.{0}()'.format(Check))
            checkresult = event_dict.data[Check + 'result']
            if checkresult == None:
                pass
            elif checkresult == False:
                # need to set DQV label
                message = '{0} -- {1} -- Failed {2} in currentstate: {3}.'.format(
                    convertTime(), graceid, Check, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Failed {0} in currentstate: {1}.'.format(
                                   Check, currentstate),
                               tagname='em_follow')
                else:
                    pass
                message = '{0} -- {1} -- State: {2} --> rejected.'.format(
                    convertTime(), graceid, currentstate)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(
                        graceid,
                        'AP: State: {0} --> rejected.'.format(currentstate),
                        tagname='em_follow')
                    event_dict.data['currentstate'] = 'rejected'
                else:
                    pass
                message = '{0} -- {1} -- Labeling DQV.'.format(
                    convertTime(), graceid)
                if loggerCheck(event_dict.data, message) == False:
                    logger.info(message)
                    g.writeLog(graceid,
                               'AP: Labeling DQV.',
                               tagname='em_follow')
                    g.writeLabel(graceid, 'DQV')
                else:
                    pass
                saveEventDicts(approval_processorMPfiles)
                return 0
            elif checkresult == True:
                passedcheckcount += 1
        if passedcheckcount == len(initial_to_update):
            message = '{0} -- {1} -- Passed all {2} checks.'.format(
                convertTime(), graceid, currentstate)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Passed all {0} checks.'.format(currentstate),
                           tagname='em_follow')
            else:
                pass
            message = '{0} -- {1} -- Labeling PE_READY.'.format(
                convertTime(), graceid)
            if loggerCheck(event_dict.data, message) == False:
                logger.info(message)
                g.writeLog(graceid,
                           'AP: Labeling PE_READY.',
                           tagname='em_follow')
                g.writeLabel(graceid, 'PE_READY')
            else:
                pass
        saveEventDicts(approval_processorMPfiles)
        return 0

    else:
        saveEventDicts(approval_processorMPfiles)
        return 0
예제 #6
0
    def upload(self, fname, gracedb_server=None, testing=True,
               extra_strings=None):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.psds[ifo].save(snr_series_fname,
                                    group='%s/psd' % ifo)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # upload PSDs. Note that the PSDs are already stored in the
            # original event file and we just upload a copy of that same file
            # here. This keeps things as they were in O2 and can be removed
            # after updating the follow-up infrastructure
            psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
            gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
                             psd_fname, open(fname, "rb").read(), "psd")
            logging.info("Uploaded PSDs for event %s", gid)

            # add other tags and comments
            gracedb.writeLog(
                    gid, "Using PyCBC code hash %s" % pycbc_version.git_hash)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text)

            # upload SNR series in HDF format
            if self.snr_series is not None:
                gracedb.writeFile(gid, snr_series_fname)
        except Exception as exc:
            logging.error('Something failed during the upload/annotation of '
                          'event %s on GraceDB. The event may not have been '
                          'uploaded!', fname)
            logging.error(str(exc))

        return gid
예제 #7
0
    def upload(
        self,
        fname,
        psds,
        low_frequency_cutoff,
        testing=True,
        extra_strings=None,
    ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        self.save(fname)
        extra_strings = [] if extra_strings is None else extra_strings
        if testing:
            group = 'Test'
        else:
            group = 'CBC'

        gracedb = GraceDb()
        r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        logging.info("Uploaded event %s.", r["graceid"])

        if self.is_hardware_injection:
            gracedb.writeLabel(r['graceid'], 'INJ')
            logging.info("Tagging event %s as an injection", r["graceid"])

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)

        ligolw_utils.write_filename(psd_xmldoc, "tmp_psd.xml.gz", gz=True)
        gracedb.writeLog(r["graceid"],
                         "PyCBC PSD estimate from the time of event",
                         "psd.xml.gz",
                         open("tmp_psd.xml.gz", "rb").read(), "psd").json()
        gracedb.writeLog(r["graceid"], "using pycbc code hash %s" %
                         pycbc_version.git_hash).json()
        for text in extra_strings:
            gracedb.writeLog(r["graceid"], text).json()
        logging.info("Uploaded file psd.xml.gz to event %s.", r["graceid"])

        if self.upload_snr_series:
            snr_series_fname = fname + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)
            GraceDb().writeFile(r['graceid'], snr_series_fname)

        return r['graceid']
예제 #8
0
    def upload(self,
               fname,
               gracedb_server=None,
               testing=True,
               extra_strings=None,
               search='AllSky'):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        gracedb_server: string, optional
            URL to the GraceDB web API service for uploading the event.
            If omitted, the default will be used.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
            test trigger (True) or a production trigger (False).
        search: str
            String going into the "search" field of the GraceDB event.
        """
        from ligo.gracedb.rest import GraceDb
        import matplotlib
        matplotlib.use('Agg')
        import pylab as pl

        # first of all, make sure the event is saved on disk
        # as GraceDB operations can fail later
        self.save(fname)

        gid = None
        try:
            # try connecting to GraceDB
            gracedb = GraceDb(gracedb_server) \
                    if gracedb_server is not None else GraceDb()

            # create GraceDB event
            group = 'Test' if testing else 'CBC'
            r = gracedb.createEvent(group, "pycbc", fname, search).json()
            gid = r["graceid"]
            logging.info("Uploaded event %s", gid)

            if self.is_hardware_injection:
                gracedb.writeLabel(gid, 'INJ')
                logging.info("Tagging event %s as an injection", gid)

            # add info for tracking code version
            gracedb_tag_with_version(gracedb, gid)

            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(gid, text, tag_name=['analyst_comments'])
        except Exception as exc:
            logging.error(
                'Something failed during the upload/annotation of '
                'event %s on GraceDB. The event may not have been '
                'uploaded!', fname)
            logging.error(str(exc))

        # plot the SNR timeseries and noise PSDs
        if self.snr_series is not None:
            if fname.endswith('.xml.gz'):
                snr_series_fname = fname.replace('.xml.gz', '.hdf')
            else:
                snr_series_fname = fname.replace('.xml', '.hdf')
            snr_series_plot_fname = snr_series_fname.replace(
                '.hdf', '_snr.png')
            asd_series_plot_fname = snr_series_fname.replace(
                '.hdf', '_asd.png')
            pl.figure()
            ref_time = int(self.merger_time)
            for ifo in sorted(self.snr_series):
                curr_snrs = self.snr_series[ifo]
                curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
                pl.plot(curr_snrs.sample_times - ref_time,
                        abs(curr_snrs),
                        c=ifo_color(ifo),
                        label=ifo)
                if ifo in self.ifos:
                    base = 'foreground/{}/'.format(ifo)
                    snr = self.coinc_results[base + 'snr']
                    mt = (self.coinc_results[base + 'end_time'] +
                          self.time_offset)
                    pl.plot([mt - ref_time], [snr],
                            c=ifo_color(ifo),
                            marker='x')
            pl.legend()
            pl.xlabel('GPS time from {:d} (s)'.format(ref_time))
            pl.ylabel('SNR')
            pl.savefig(snr_series_plot_fname)
            pl.close()

            generate_asd_plot(self.psds, asd_series_plot_fname)

            # Additionally save the PSDs into the snr_series file
            for ifo in sorted(self.psds):
                # Undo dynamic range factor
                curr_psd = self.psds[ifo].astype(numpy.float64)
                curr_psd /= pycbc.DYN_RANGE_FAC**2.0
                curr_psd.save(snr_series_fname, group='%s/psd' % ifo)

        if self.probabilities is not None:
            prob_fname = fname.replace('.xml.gz', '_probs.json')
            prob_plot_fname = prob_fname.replace('.json', '.png')

            prob_plot = {
                k: v
                for (k, v) in self.probabilities.items() if v != 0.0
            }
            labels, sizes = zip(*prob_plot.items())
            colors = [source_color(label) for label in labels]
            fig, ax = pl.subplots()
            ax.pie(sizes,
                   labels=labels,
                   colors=colors,
                   autopct='%1.1f%%',
                   textprops={'fontsize': 15})
            ax.axis('equal')
            fig.savefig(prob_plot_fname)
            pl.close()

        # upload SNR series in HDF format and plots
        if gid is not None and self.snr_series is not None:
            try:
                gracedb.writeLog(gid,
                                 'SNR timeseries HDF file upload',
                                 filename=snr_series_fname)
                gracedb.writeLog(gid,
                                 'SNR timeseries plot upload',
                                 filename=snr_series_plot_fname,
                                 tag_name=['background'],
                                 displayName=['SNR timeseries'])
                gracedb.writeLog(gid,
                                 'ASD plot upload',
                                 filename=asd_series_plot_fname,
                                 tag_name=['psd'],
                                 displayName=['ASDs'])
            except Exception as exc:
                logging.error('Failed to upload plots for %s', gid)
                logging.error(str(exc))

        # upload source probabilities in JSON format and plot
        if gid is not None and self.probabilities is not None:
            try:
                gracedb.writeLog(gid,
                                 'Source probabilities JSON file upload',
                                 filename=prob_fname,
                                 tag_name=['em_follow'])
                logging.info('Uploaded source probabilities for event %s', gid)
                gracedb.writeLog(gid,
                                 'Source probabilities plot upload',
                                 filename=prob_plot_fname,
                                 tag_name=['em_follow'])
                logging.info(
                    'Uploaded source probabilities pie chart for '
                    'event %s', gid)
            except Exception as exc:
                logging.error(
                    'Failed to upload source probability results for %s', gid)
                logging.error(str(exc))

        return gid
예제 #9
0
graceid = alert['graceid']
if opts.verbose:
    print "found graceid=%s"%graceid

### figure out if this is the 
if (alert['alert_type']=='update') and alert['file'] and alert['file'].endswith('js') and ("_los" in alert['file']): ### this could be fragile...

    ### download file
    los = json.loads( gracedb.files( graceid, alert['file'] ).read() )

    ### iterate through IFO pairs
    for ifo_pair, data in los.items():
        mi = data['MI'] # mutual information
        hj = data['Hj'] # joint entropy
        mid = mi / hj   # mutual information distance

        if config.has_section(ifo_pair): ### assume ifo_pair is alphabetically ordered...
            thr = config.getfloat(ifo_pair, 'thr')
            if mid > thr:
                labels = config.get(ifo_pair, 'labels').split()
                gracedb.writeLog( graceid, message='Mutual Information Distance for %s in %s line-of-sight frame is above %.3f! Applying labels: %s'%(alert['file'].split('_los')[0], ifo_pair, thr, ", ".join(labels)))
                for label in labels:
                    gracedb.writeLabel( graceid, label )

            else:
                gracedb.writeLog( graceid, message="Mutual Information Distnace for %s in %s line-of-sight frame is below %.3f."%(alert['file'].split('_los')[0], ifo_pair, thr, ", ".join(labels)))

else:
    if opts.verbose:
        print "ignoring..."    
예제 #10
0
if event['pipeline'].lower() not in allowed_pipelines:
    print "  not allowed to label this pipeline"
    sys.exit(1)

logs = gdb.logs(gid).json()['log']
result = dict((ifo, 1) for ifo in ifos)
for log in logs:
    comment = log['comment']
    if "minimum glitch-FAP for" in comment:
        gFAP = float(comment.split()[-1])
        for ifo in ifos:
            if ifo in comment:
                result[ifo] = gFAP
                break

jFAP = np.prod(result.values())  ### take product of gFAPs for 2 IFOs

if jFAP <= jFAP_thr:
    message = "iDQ veto generator computed joint glitch-FAP : %.3e <= %.3e; <b>This is probably a glitch</b> and I am applying a DQV label" % (
        jFAP, jFAP_thr)
    if annotate_gracedb:
        gdb.writeLabel(gid, 'DQV')
        gdb.writeLog(gid, message=message, tagname=['data_quality'])
    print message
else:
    message = "iDQ veto generator computed joint glitch-FAP : %.3e > %.3e; <b>This is probably not a glitch</b> and I am not applying a label" % (
        jFAP, jFAP_thr)
    if annotate_gracedb:
        gdb.writeLog(gid, message=message, tagname=['data_quality'])
    print message
예제 #11
0
class PipelineThrottle(utils.QueueItem):
    '''
    A throttle that determines which events approval processor will actually track.
    This is implemented so that pipelines which are behaving badly do not trigger alerts.
    We delegate the task of updating the list of events upon expiration to this class's single task: Throttle(lvalerMPutils.Task)
    The decision making and applying labels, etc. is handled within *this* class. 
    In particular, .add() checks for state changes and applies labels as needed.

    assigns group_pipeline[_search] to self.graceid for easy lookup and management within queueByGraceID

    WARNING: all windowing is based off the time at which the alert is received by lvalert_listenMP rather than the gpstime or creation time. We may want to change this.
    '''
    name = 'pipeline throttle'

    def __init__(self,
                 t0,
                 eventDicts,
                 grouperWin,
                 win,
                 targetRate,
                 group,
                 pipeline,
                 search=None,
                 requireManualReset=False,
                 conf=0.9,
                 graceDB_url='https://gracedb.ligo.org/api/'):
        self.eventDicts = eventDicts  ### pointer to the dictionary of event dicts, needed for determining number of triggers with different gpstimes
        ### record data about the pipeline (equivalently, the lvalert node)
        self.group = group
        self.pipeline = pipeline
        self.search = search

        ### set self.graceid for easy lookup and automatic management
        self.graceid = generate_ThrottleKey(group, pipeline, search)

        self.description = "a throttle on the events approval processor will react to from %s" % (
            self.graceid)

        self.events = []  ### list managed by Throttle task

        self.win = win  ### the window over which we track events
        self.targetRate = targetRate  ### the target rate at which we expect events
        self.conf = conf  ### determines the upper limit on the acceptable number of events in win via a poisson one-sided confidence interval

        self.computeNthr()  ### sets self.Nthr

        self.graceDB = GraceDb(graceDB_url)

        tasks = [
            Throttle(self.events,
                     eventDicts,
                     grouperWin,
                     win,
                     self.Nthr,
                     requireManualReset=requireManualReset
                     )  ### there is only one task!
        ]
        super(PipelineThrottle, self).__init__(t0,
                                               tasks)  ### delegate to parent

    def computeNthr(self):
        '''
        determines the upper limit on the acceptable number of events within win based on targetRate and conf
        assumes triggers are poisson distributed in time

        finds the minimum Nthr such that
            \sum_{n=0}^{N} p(n|targetRate*win) >= conf
        via direct iteration.

        WARNING: this could be slow for large targetRate*win or large conf!
        '''
        ### handle special cases where algorithm won't converge
        if (self.conf > 1) or (self.conf < 0):
            raise ValueError('unphysical confidence level!')
        elif self.conf == 1:
            self.Nthr = np.infty

        ### set up direct iteration
        k = self.targetRate * self.win
        self.Nthr = 0
        logProb = self.__logProb__(self.Nthr, k)
        logConf = np.log(self.conf)

        ### integrate
        while logProb < logConf:
            self.Nthr += 1
            logProb = self.__sumLogs__(logProb, self.__logProb__(self.Nthr, k))

    def __sumLogs__(self, *logs):
        '''
        take the sum of logarithms to high precision
        '''
        logs = np.array(logs)
        maxLogs = np.max(logs)
        return np.log(np.sum(np.exp(logs - maxLogs))) + maxLogs

    def __logProb__(self, n, k):
        '''
        return the logarithm of the poisson probability
        '''
        return n * np.log(k) - k - self.__logFactorial__(n)

    def __logFactorial__(self, n):
        '''
        return the log of a factorial, using Stirling's approximation if n >= 100
        '''
        if n > 20:  ### use stirling's approximation because np.math.factorial returns a long float when n>20, which breaks np.log
            ### this should be accurate to ~1 part in 1e4, if not more so
            return 0.5 * np.log(np.pi * 2 * n) + n * np.log(n) - n
        else:
            return np.log(np.math.factorial(n))

    def isThrottled(self):
        '''
        determines if this pipeline is throttled
        delegates to the task
        '''
        return self.tasks[0].isThrottled()

    def addEvent(self, graceid, t0):
        '''
        adds a graceid to self.events and keeps them ordered
        Checks for state changes of self.throttled and applies labels in GraceDb as necessary
        '''
        wasThrottled = self.isThrottled(
        )  ### figure out if we're already throttled before adding event
        for i, (_, t1) in enumerate(self.events):  ### insert in order
            if t0 < t1:
                self.events.insert(i, (graceid, t0))
                break
        else:
            self.events.append((graceid, t0))
        ### NOTE: we do not update expiration because it should be handled within a call to execute()
        ### either the expiration is too early, in which case execture() is smart enough to handle this
        ### (note, we expect events to come in in order, so we shouldn't ever have to set the expiration to earlier than it was before...)
        ### or expiration is already infty, in which case we require a manual reset anyway

        if wasThrottled:  ### we are already throttled, so we just label the new graceid
            self.labelAsThrottled(graceid)

        elif self.isThrottled(
        ):  ### we were not throttled, but now we are, so we label everything as throttled.
            for graceid, _ in self.events:
                self.labelAsThrottled(graceid)

        self.complete = False  ### there is now at least one item being tracked
        ### FIXME: need pointer to queue and queueByGraceID to update complete attribute

    def labelAsThrottled(self, graceid):
        """
        attempts to label the graceid as "EM_Throttled"
        """
        try:
            self.graceDB.writeLabel(graceid, "EM_Throttled")
        except:
            pass  ### FIXME: print some intelligent error message here!

    def reset(self):
        '''
        Resets the throttle (empties self.events)
        After emptying self.events, we force self.tasks[0] to expire and then delegate to self.execute.
        This in turn marks the item as complete (as well as moving self.tasks[0] from self.tasks to self.completedTasks)

        The main use case is from within ResetThrottleTask, which uses reset() to mark the item complete and then removes it from queueByGraceID.
        '''
        while self.events:  ### modify this list in place instead of creating a new object. This way, the reference within self.tasks[0] is updated too
            self.events.pop(0)
        self.tasks[
            0].expiration = -np.infty  ### need to set this so that the call to self.execute is guaranteed to actually delegate to
        self.execute(verbose=False)
							dic_path = segdir+'/GDB/%s/%s.json'%(coin_group,'%s-%s'%(dictionary[event]['gpstime'],event))
							if not os.path.exists("%s/GDB/%s/"%(segdir,coin_group)):
								os.makedirs("%s/GDB/%s/"%(segdir,coin_group))
							with open(dic_path, 'wt') as fp:
								json.dump(gdb_dic, fp)
							
							#Upload dictionary to GraceDb
							response = gdb.createEvent('Burst','LIB',dic_path, search='AllSky', filecontents=None) #gdb.createEvent('Test','LIB',dic_path, search='AllSky', filecontents=None)
							
							#Parse GraceDb ID so that labels can be applied
							response = json.loads(response.read())
							gid = response["graceid"]
							
							#Mark GraceDb event as hardware injection if need be
							if any([run_dic['data']['inj flags'][ifo] for ifo in ifos]):
								gdb.writeLabel(gid, 'INJ')
			
							#Mark GraceDb event as data-quality veto if need be
							if any([run_dic['data']['DQV flags'][ifo] for ifo in ifos]):
								gdb.writeLabel(gid, 'DQV')

							#Mark that event has been uploaded to GDB
							dictionary[event]['GDB upload'] = True
		
		#If in signal training mode, match the LIB event with its injection 
		if coin_mode == 'sig_train':
			#find the times of the training injections
			inj_times = commands.getstatusoutput('%s/ligolw_print %s/training_injections/raw/*.xml -c "time_geocent_gps" -c "time_geocent_gps_ns" -d "."'%(bindir,segdir))[1].split()
			print "Inj times: ", inj_times

			#First mark all events as non-detections
예제 #13
0
    def upload(
        self,
        fname,
        psds,
        low_frequency_cutoff,
        testing=True,
        extra_strings=None,
    ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event and PSDs are saved on disk
        # as GraceDB operations can fail later

        self.save(fname)

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit,
                len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC**2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)
        psd_xml_path = os.path.splitext(fname)[0] + '-psd.xml.gz'
        ligolw_utils.write_filename(psd_xmldoc, psd_xml_path, gz=True)

        if self.upload_snr_series:
            snr_series_fname = os.path.splitext(fname)[0] + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)

        # try connecting to GraceDB
        try:
            gracedb = GraceDb(self.gracedb_server) \
                    if self.gracedb_server is not None else GraceDb()
        except Exception as exc:
            logging.error('Cannot connect to GraceDB')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!',
                          fname)
            return None

        # create GraceDB event
        group = 'Test' if testing else 'CBC'
        try:
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        except Exception as exc:
            logging.error('Cannot create GraceDB event')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!',
                          fname)
            return None
        logging.info("Uploaded event %s", r["graceid"])

        if self.is_hardware_injection:
            try:
                gracedb.writeLabel(r['graceid'], 'INJ')
            except Exception as exc:
                logging.error("Cannot tag event %s as an injection",
                              r["graceid"])
                logging.error(str(exc))
            logging.info("Tagging event %s as an injection", r["graceid"])

        # upload PSDs
        try:
            gracedb.writeLog(r["graceid"],
                             "PyCBC PSD estimate from the time of event",
                             "psd.xml.gz",
                             open(psd_xml_path, "rb").read(), "psd").json()
        except Exception as exc:
            logging.error("Cannot upload PSDs for event %s", r["graceid"])
            logging.error(str(exc))
        logging.info("Uploaded PSDs for event %s", r["graceid"])

        # add other tags and comments
        try:
            gracedb.writeLog(
                r["graceid"],
                "Using PyCBC code hash %s" % pycbc_version.git_hash).json()
            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(r["graceid"], text).json()
        except Exception as exc:
            logging.error("Cannot write comments for event %s", r["graceid"])
            logging.error(str(exc))

        # upload SNR series in HDF format
        if self.upload_snr_series:
            try:
                gracedb.writeFile(r['graceid'], snr_series_fname)
            except Exception as exc:
                logging.error("Cannot upload HDF SNR series for event %s",
                              r["graceid"])
                logging.error(str(exc))

        return r['graceid']
예제 #14
0
    def upload(self, fname, psds, low_frequency_cutoff,
               testing=True,
               extra_strings=None,
               ):
        """Upload this trigger to gracedb

        Parameters
        ----------
        fname: str
            The name to give the xml file associated with this trigger
        pds: dict of pybc.types.FrequencySeries
            A ifo keyed dictionary of psds to be uploaded in association
        with this trigger.
        low_frequency_cutoff: float
            The low frequency cutoff of the psds.
        testing: bool
            Switch to determine if the upload should be sent to gracedb as a
        test trigger (True) or a production trigger (False)
        """
        from ligo.gracedb.rest import GraceDb

        # first of all, make sure the event and PSDs are saved on disk
        # as GraceDB operations can fail later

        self.save(fname)

        psds_lal = {}
        for ifo in psds:
            psd = psds[ifo]
            kmin = int(low_frequency_cutoff / psd.delta_f)
            fseries = lal.CreateREAL8FrequencySeries(
                "psd", psd.epoch, low_frequency_cutoff, psd.delta_f,
                lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
            fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
            psds_lal[ifo] = fseries
        psd_xmldoc = make_psd_xmldoc(psds_lal)
        psd_xml_path = os.path.splitext(fname)[0] + '-psd.xml.gz'
        ligolw_utils.write_filename(psd_xmldoc, psd_xml_path, gz=True)

        if self.upload_snr_series:
            snr_series_fname = os.path.splitext(fname)[0] + '.hdf'
            for ifo in self.snr_series:
                self.snr_series[ifo].save(snr_series_fname,
                                          group='%s/snr' % ifo)
                self.snr_series_psd[ifo].save(snr_series_fname,
                                              group='%s/psd' % ifo)

        # try connecting to GraceDB
        try:
            gracedb = GraceDb(self.gracedb_server) \
                    if self.gracedb_server is not None else GraceDb()
        except Exception as exc:
            logging.error('Cannot connect to GraceDB')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!', fname)
            return None

        # create GraceDB event
        group = 'Test' if testing else 'CBC'
        try:
            r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
        except Exception as exc:
            logging.error('Cannot create GraceDB event')
            logging.error(str(exc))
            logging.error('Carrying on, but event %s will NOT be uploaded!', fname)
            return None
        logging.info("Uploaded event %s", r["graceid"])

        if self.is_hardware_injection:
            try:
                gracedb.writeLabel(r['graceid'], 'INJ')
            except Exception as exc:
                logging.error("Cannot tag event %s as an injection", r["graceid"])
                logging.error(str(exc))
            logging.info("Tagging event %s as an injection", r["graceid"])

        # upload PSDs
        try:
            gracedb.writeLog(r["graceid"],
                             "PyCBC PSD estimate from the time of event",
                             "psd.xml.gz", open(psd_xml_path, "rb").read(),
                             "psd").json()
        except Exception as exc:
            logging.error("Cannot upload PSDs for event %s", r["graceid"])
            logging.error(str(exc))
        logging.info("Uploaded PSDs for event %s", r["graceid"])

        # add other tags and comments
        try:
            gracedb.writeLog(r["graceid"],
                "Using PyCBC code hash %s" % pycbc_version.git_hash).json()
            extra_strings = [] if extra_strings is None else extra_strings
            for text in extra_strings:
                gracedb.writeLog(r["graceid"], text).json()
        except Exception as exc:
            logging.error("Cannot write comments for event %s", r["graceid"])
            logging.error(str(exc))

        # upload SNR series in HDF format
        if self.upload_snr_series:
            try:
                gracedb.writeFile(r['graceid'], snr_series_fname)
            except Exception as exc:
                logging.error("Cannot upload HDF SNR series for event %s",
                              r["graceid"])
                logging.error(str(exc))

        return r['graceid']