def process_row(self, channel, rate, bin_idx, buftime, row): """ Given a channel, rate, and the current buffer time, will process a row from a gstreamer buffer. """ # if segments provided, ensure that trigger falls within these segments if self.frame_segments[self.instrument]: trigger_seg = segments.segment( LIGOTimeGPS(row.end_time, row.end_time_ns), LIGOTimeGPS(row.end_time, row.end_time_ns)) if not self.frame_segments[self.instrument] or self.frame_segments[ self.instrument].intersects_segment(trigger_seg): waveform = self.waveforms[channel].index_to_waveform( rate, bin_idx, row.channel_index) trigger_time = row.end_time + row.end_time_ns * 1e-9 # append row for data transfer/saving channel_name = self.bin_to_channel(channel, bin_idx) feature_row = { 'timestamp': utils.floor_div(buftime, 1. / self.sample_rate), 'channel': channel_name, 'snr': row.snr, 'phase': row.phase, 'time': trigger_time, 'frequency': waveform['frequency'], 'q': waveform['q'], 'duration': waveform['duration'], } timestamp = utils.floor_div(buftime, self.buffer_size) self.feature_queue.append(timestamp, channel_name, feature_row)
def tofrequencyseries(bursttable, fcol='peak_frequency', pcol=None,\ name="", epoch=LIGOTimeGPS(), deltaF=0, f0=0,\ unit=lalStrainUnit): """ Returns a numpy.array and REAL8FrequencySeries built from these OmegaSpectrum triggers. The array holds the discrete frequencies at which the sectrum was calculated and the series holds the data and associated metadata. If pcol is not given, the series data is the square of the SNR of each 'trigger'. """ freq = bursttable.getColumnByName('peak_frequency') if pcol: data = bursttable.getColumnByName('pcol') else: data = bursttable.getColumnByName('snr')**2 freq, data = list(map(numpy.asarray, zip(*sorted(zip(freq, data))))) if int(epoch) == 0 and len(bursttable) != 0: epoch = LIGOTimeGPS(float(bursttable[0].get_time())) if deltaF == 0 and len(bursttable) > 1: deltaF = freq[1] - freq[0] if f0 == 0 and len(bursttable) != 0: f0 = freq[0] series = seriesutils.fromarray(data, name=name, epoch=epoch, deltaT=deltaF,\ f0=f0, unit=unit, frequencyseries=True) return freq, series
def _ligotimegps(s, ns=0): """Catch TypeError and cast `s` and `ns` to `int` """ from lal import LIGOTimeGPS try: return LIGOTimeGPS(s, ns) except TypeError: return LIGOTimeGPS(int(s), int(ns))
def create_FIR_whitener_kernel(length, duration, sample_rate, psd): assert psd # # Add another COMPLEX16TimeSeries and COMPLEX16FrequencySeries for kernel's FFT (Leo) # # Add another FFT plan for kernel FFT (Leo) fwdplan_kernel = lal.CreateForwardCOMPLEX16FFTPlan(length, 1) kernel_tseries = lal.CreateCOMPLEX16TimeSeries( name="timeseries of whitening kernel", epoch=LIGOTimeGPS(0.), f0=0., deltaT=1.0 / sample_rate, length=length, sampleUnits=lal.Unit("strain")) kernel_fseries = lal.CreateCOMPLEX16FrequencySeries( name="freqseries of whitening kernel", epoch=LIGOTimeGPS(0), f0=0.0, deltaF=1.0 / duration, length=length, sampleUnits=lal.Unit("strain s")) # # Obtain a kernel of zero-latency whitening filter and # adjust its length (Leo) # psd_fir_kernel = reference_psd.PSDFirKernel() (kernel, latency, fir_rate) = psd_fir_kernel.psd_to_linear_phase_whitening_fir_kernel( psd, nyquist=sample_rate / 2.0) ( kernel, theta ) = psd_fir_kernel.linear_phase_fir_kernel_to_minimum_phase_whitening_fir_kernel( kernel, fir_rate) kernel = kernel[-1::-1] # FIXME this is off by one sample, but shouldn't be. Look at the miminum phase function # assert len(kernel) == length if len(kernel) < length: kernel = numpy.append(kernel, numpy.zeros(length - len(kernel))) else: kernel = kernel[:length] kernel_tseries.data.data = kernel # # FFT of the kernel # lal.COMPLEX16TimeFreqFFT(kernel_fseries, kernel_tseries, fwdplan_kernel) #FIXME return kernel_fseries
def root_trigger(root_event, columns=OMICRON_COLUMNS): """Parse a `ROOT` tree entry into a `SnglBurst` object. @param root_event `ROOT` `TChain` object @param columns a list of valid `LIGO_LW` column names to load (defaults to all) @returns a `SnglBurst` built from the `ROOT` data """ t = lsctables.SnglBurst() t.search = u"omicron" flow = root_event.fstart fhigh = root_event.fend if 'flow' in columns: t.flow = flow if 'fhigh' in columns: t.fhigh = fhigh if 'bandwidth' in columns: t.bandwidth = fhigh - flow if 'central_freq' in columns: t.central_freq = root_event.frequency if 'peak_frequency' in columns: t.peak_frequency = root_event.frequency peak_time = LIGOTimeGPS(root_event.time) if 'time' in columns or 'peak_time' in columns: t.peak_time = peak_time.gpsSeconds if 'time' in columns or 'peak_time_ns' in columns: t.peak_time_ns = peak_time.gpsNanoSeconds start_time = LIGOTimeGPS(root_event.tstart) if 'start_time' in columns: t.start_time = start_time.gpsSeconds if 'start_time_ns' in columns: t.start_time_ns = start_time.gpsNanoSeconds stop_time = LIGOTimeGPS(root_event.tend) if 'stop_time' in columns: t.stop_time = stop_time.gpsSeconds if 'stop_time_ns' in columns: t.stop_time_ns = stop_time.gpsNanoSeconds if 'duration' in columns: t.duration = float(stop_time - start_time) if 'snr' in columns: t.snr = root_event.snr if 'amplitude' in columns: t.amplitude = root_event.snr**2 / 2. if 'confidence' in columns: t.confidence = root_event.snr return t
def get_output_cache(self): """ Returns a LAL cache of the output file name. Calling this method also induces the output name to get set, so it must be at least once. """ if not self.output_cache: self.output_cache = [ CacheEntry( self.get_ifo(), self.__usertag, segments.segment(LIGOTimeGPS(self.get_start()), LIGOTimeGPS(self.get_end())), "file://localhost" + os.path.abspath(self.get_output())) ] return self.output_cache
def root_multi_trigger(root_event, columns=CWB_MULTI_COLUMNS): """Parse a multi-detector Coherent WaveBurst `ROOT` tree entry into a `MultiBurst` object. @param root_event `ROOT` `TChain` object @param columns a list of valid `LIGO_LW` column names to load (defaults to all) @returns a `MultiBurst` built from the `ROOT` data """ ifos = get_ifos(root_event) first_ifo_idx = CWB_DETECTOR_INDEX.index(list(ifos)[0]) mb = lsctables.MultiBurst() if 'process_id' in columns: mb.process_id = lsctables.ProcessID(root_event.run) if 'event_id' in columns: mb.event_id = lsctables.MultiBurstTable.get_next_id() if 'ifos' in columns: mb.set_ifos(ifos) peak_time = LIGOTimeGPS(list(root_event.time)[first_ifo_idx]) if 'peak_time' in columns: mb.peak_time = peak_time.gpsSeconds if 'peak_time_ns' in columns: mb.peak_time_ns = peak_time.gpsNanoSeconds start_time = LIGOTimeGPS(list(root_event.start)[first_ifo_idx]) if 'start_time' in columns: mb.start_time = start_time.gpsSeconds if 'start_time_ns' in columns: mb.start_time_ns = start_time.gpsNanoSeconds if 'duration' in columns: mb.duration = float(list(root_event.duration)[first_ifo_idx]) fmin = min(root_event.low) fmax = min(root_event.high) if 'central_freq' in columns: mb.central_freq = list(root_event.frequency)[0] if 'bandwidth' in columns: mb.bandwidth = fmax - fmin if 'snr' in columns: mb.snr = min(root_event.rho) if 'confidence' in columns: mb.confidence = root_event.likelihood return mb
def rate_per_bin(table, stride, column, bins, start=None, end=None): """@returns a list of TimeSeries representing the rate of events in each bin for the given LIGO_LW table """ # get time tarray = triggers.get_time_column(table).astype(float) tarray.sort() # get limits if not start: start = tarray[0] if not end: end = tarray[-1] start = float(start) end = float(end) duration = end - start # contruct time bins stride = float(stride) duration = stride * round(duration / stride) bins = numpy.linspace(start, start + duration, num=duration // stride) # calculate rate per bin carray = triggers.get_column(str(column)) out = [] for bin_l, bin_r in bins: in_bin = (bin_l <= carray) & (carray < bin_r) rate = CreateREAL8TimeSeries( "Rate (Hz) [%s <= %s < %s]" % (bin_l, column, bin_r), LIGOTimeGPS(start), 0, stride, lalHertzUnit, bins.size - 1) hist, _ = numpy.histogram(tarray, bins=bins) rate.data.data = (hist / stride).astype(numpy.float64) out.append(rate) return out
def rate(table, stride, start=None, end=None): """@returns a TimeSeries of rate over time for all triggers in the given LIGO_LW table. """ # get time tarray = triggers.get_time_column(table).astype(float) tarray.sort() # get limits if not start: start = tarray[0] if not end: end = tarray[-1] start = float(start) end = float(end) duration = end - start # contruct time bins stride = float(stride) duration = stride * round(duration / stride) bins = numpy.linspace(start, start + duration, num=duration // stride) # calculate rate rate = CreateREAL8TimeSeries("Rate (Hz)", LIGOTimeGPS(start), 0, stride, lalHertzUnit, bins.size - 1) hist, _ = numpy.histogram(tarray, bins=bins) rate.data.data = hist.astype(numpy.float64) / stride return rate
def ascii_trigger(line, columns=OMICRON_COLUMNS): """Parse a line of `ASCII` text into a `SnglBurst` object @param line single line of `ASCII` text from an Omicron file @param columns a list of valid `LIGO_LW` column names to load (defaults to all) @returns a `SnglBurst` built from the `ASCII` data """ if isinstance(line, str): dat = map(float, _re_delim.split(line.rstrip())) else: dat = map(float, line) if len(dat) == 5: (peak, freq, duration, band, snr) = dat peak = LIGOTimeGPS(peak) start = peak - duration / 2. stop = peak + duration / 2. else: raise ValueError("Wrong number of columns in ASCII line. " "Cannot read.") t = lsctables.SnglBurst() t.search = u"omicron" if 'start_time' in columns: t.start_time = start.gpsSeconds if 'start_time_ns' in columns: t.start_time_ns = start.gpsNanoSeconds if 'time' in columns or 'peak_time' in columns: t.peak_time = peak.gpsSeconds if 'time' in columns or 'peak_time_ns' in columns: t.peak_time_ns = peak.gpsNanoSeconds if 'stop_time' in columns: t.stop_time = stop.gpsSeconds if 'stop_time_ns' in columns: t.stop_time_ns = stop.gpsNanoSeconds if 'duration' in columns: t.duration = duration if 'central_freq' in columns: t.central_freq = freq if 'peak_frequency' in columns: t.peak_frequency = freq if 'bandwidth' in columns: t.bandwidth = band if 'flow' in columns: t.flow = freq - band / 2. if 'fhigh' in columns: t.fhigh = freq + band / 2. if 'snr' in columns: t.snr = snr if 'amplitude' in columns: t.amplitude = snr**2 / 2. if 'confidence' in columns: t.confidence = snr return t
def integrate_net_pat(gpstime, network, npts=100): """ Calculate the squared network antenna pattern integrated over solid angle of the whole sky gpstime -- time of event network -- detector configuration npts -- number of points at a given r.a. Returns: Double for integral of network antenna pattern squared over solid angle """ network_factor = 0 # Find ra and dec points over the grid gps = LIGOTimeGPS(gpstime) gmst_rad = GreenwichMeanSiderealTime(gps) ra_grid, dec_grid = _sph_grid(npts) #import pdb; pdb.set_trace() delta_ra = ra_grid[10][1] - ra_grid[10][0] delta_dec = dec_grid[31][10] - dec_grid[30][10] # Iterate over all points for ra_rad in ra_grid[0]: for dec_rad in dec_grid[:, 0]: net_pat = net_antenna_pattern_point(gpstime, network, ra_rad, dec_rad)[0] network_factor += (net_pat**2) * np.sin(dec_rad + np.pi / 2) * delta_ra * delta_dec return network_factor / (4 * np.pi)
def endElement(self): if self.Type == u"ISO-8601": import dateutil.parser self.pcdata = dateutil.parser.parse(self.pcdata) elif self.Type == u"GPS": from lal import LIGOTimeGPS # FIXME: remove cast to string when lal swig # can cast from unicode self.pcdata = LIGOTimeGPS(str(self.pcdata)) elif self.Type == u"Unix": self.pcdata = float(self.pcdata) else: # unsupported time type. not impossible that # calling code has overridden TimeTypes set in # glue.ligolw.types; just accept it as a string pass
def gps_to_str(gps_time, form=None): """ Convert a LIGOTimeGPS time object into a string. The output format can be given explicitly, but will default as shown in the example. Example: \code >>> gps_to_str(1000000000) 'September 14 2011, 01:46:25 UTC' \endcode @returns a string with the given format. """ if not isinstance(gps_time, LIGOTimeGPS): gps_time = LIGOTimeGPS(float(gps_time)) nano = gps_time.gpsNanoSeconds utc = _datetime.datetime(*_gps_to_utc(int(gps_time))[:6]) utc += _datetime.timedelta(microseconds=nano / 1000.0) if nano and not form: form = "%B %d %Y, %H:%M:%S.%f UTC" elif not form: form = "%B %d %Y, %H:%M:%S UTC" utc_str = utc.strftime(form) return utc_str
def test_tconvert(self): # from GPS date = time.tconvert(GPS) self.assertEqual(date, DATE) # from GPS using LAL LIGOTimeGPS try: from lal import LIGOTimeGPS except ImportError: pass else: d = time.tconvert(LIGOTimeGPS(GPS)) self.assertEqual(d, DATE) # to GPS gps = time.tconvert(date) self.assertEqual(gps, GPS) # special cases now = time.tconvert() now2 = time.tconvert('now') self.assertEqual(now, now2) today = time.tconvert('today') yesterday = time.tconvert('yesterday') self.assertAlmostEqual(today - yesterday, 86400) self.assertTrue(now >= today) tomorrow = time.tconvert('tomorrow') self.assertAlmostEqual(tomorrow - today, 86400)
def timeDelay(gpsTime, rightAscension, declination, unit, det1, det2): """ timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ) Calculates the time delay in seconds between the detectors 'det1' and 'det2' (e.g. 'H1') for a sky location at (rightAscension and declination) which must be given in certain units ('radians' or 'degree'). The time is passes as GPS time. A positive time delay means the GW arrives first at 'det2', then at 'det1'. Example: antenna.timeDelay( 877320548.000, 355.084,31.757, 'degree','H1','L1') 0.0011604683260994519 Given these values, the signal arrives first at detector L1, and 1.16 ms later at H2 """ # check the input arguments if unit == 'radians': ra_rad = rightAscension de_rad = declination elif unit == 'degree': ra_rad = rightAscension / 180.0 * pi de_rad = declination / 180.0 * pi else: raise ValueError, "Unknown unit %s" % unit # check input values if ra_rad < 0.0 or ra_rad > 2 * pi: raise ValueError, "ERROR. right ascension=%f "\ "not within reasonable range."\ % (rightAscension) if de_rad < -pi or de_rad > pi: raise ValueError, "ERROR. declination=%f not within reasonable range."\ % (declination) if det1 == det2: return 0.0 gps = LIGOTimeGPS(gpsTime) # create detector-name map detMap = { 'H1': 'LHO_4k', 'H2': 'LHO_2k', 'L1': 'LLO_4k', 'G1': 'GEO_600', 'V1': 'VIRGO', 'T1': 'TAMA_300' } x1 = inject.cached_detector[detMap[det1]].location x2 = inject.cached_detector[detMap[det2]].location timedelay = ArrivalTimeDiff(list(x1), list(x2), ra_rad, de_rad, gps) return timedelay
def utc_to_gps(utc_time): """Convert the given `datetime.datetime` into a GPS time @returns a LIGOTimeGPS """ if not isinstance(utc_time, _datetime.datetime): utc_time = _datetime.datetime.combine(utc_time, _datetime.time()) _check_utc(utc_time) return LIGOTimeGPS(_utc_to_gps(utc_time.utctimetuple()))
def get_output(self): """ Returns the file name of output from the ring code. This must be kept synchronized with the name of the output file in ring.c. """ if self._AnalysisNode__output is None: if None in (self.get_start(), self.get_end(), self.get_ifo(), self.__usertag): raise ValueError, "start time, end time, ifo, or user tag has not been set" seg = segments.segment(LIGOTimeGPS(self.get_start()), LIGOTimeGPS(self.get_end())) self.set_output( os.path.join( self.output_dir, "%s-STRINGSEARCH_%s-%d-%d.xml.gz" % (self.get_ifo(), self.__usertag, int(self.get_start()), int(self.get_end()) - int(self.get_start())))) return self._AnalysisNode__output
def get_strain_from_gwf_files( gwf_files: Dict[str, List[Union[str, bytes, os.PathLike]]], gps_start: int, window: int, original_sampling_rate: int = 4096, target_sampling_rate: int = 4096, as_pycbc_timeseries: bool = True, channel: str = 'GDS-CALIB_STRAIN', check_integrity: bool = True, ): assert isinstance(gps_start, int), 'time is not an int' assert isinstance(window, int), 'interval_width is not an int' assert isinstance(original_sampling_rate, int), 'original_sampling_rate is not an int' assert isinstance(target_sampling_rate, int), 'target_sampling_rate is not an int' assert (original_sampling_rate % target_sampling_rate) == 0, ( 'Invalid target_sampling_rate: Not a divisor of original_sampling_rate!' ) sampling_factor = int(original_sampling_rate / target_sampling_rate) samples = defaultdict(list) for ifo in gwf_files: detector_channel = f'{ifo}:{channel}' for file_path in gwf_files[ifo]: strain = read_frame( str(file_path), detector_channel, start_time=gps_start, end_time=gps_start + window, check_integrity=check_integrity, ) samples[ifo].append(strain[::sampling_factor]) samples[ifo] = np.ascontiguousarray(np.concatenate(samples[ifo])) if not as_pycbc_timeseries: return samples else: # Convert strain of both detectors to a TimeSeries object timeseries = { ifo: TimeSeries(initial_array=samples[ifo], delta_t=1.0 / target_sampling_rate, epoch=LIGOTimeGPS(gps_start)) for ifo in samples } return timeseries
def net_antenna_pattern(gpstime, network, psi=0, npts=100, norm=False): # FIXME: need to check on this gps = LIGOTimeGPS(gpstime) gmst_rad = GreenwichMeanSiderealTime(gps) ra_grid, dec_grid = _sph_grid(npts) net_pat = np.zeros(ra_grid.shape[0] * ra_grid.shape[1]) net_align = np.zeros(ra_grid.shape[0] * ra_grid.shape[1]) net_dpf = np.zeros(ra_grid.shape[0] * ra_grid.shape[1]) psi_rad = 0 i = 0 #import pdb; pdb.set_trace() for ra_rad, de_rad in zip(ra_grid.flat, dec_grid.flat): fp = [ ComputeDetAMResponse(detectors[ifo].response, ra_rad, de_rad, psi_rad, gmst_rad)[0] for ifo in network ] fx = [ ComputeDetAMResponse(detectors[ifo].response, ra_rad, de_rad, psi_rad, gmst_rad)[1] for ifo in network ] fp = np.asarray(fp) fx = np.asarray(fx) fp2, fx2 = np.dot(fp, fp), np.dot(fx, fx) net_dpf[i] = psi_dpf = 0.5 * np.arctan2(2 * np.dot(fp, fx), (fp2 - fx2)) fp, fx = fp * np.cos(psi_dpf) + fx * np.sin(psi_dpf), \ -fp * np.sin(psi_dpf) + fx * np.cos(psi_dpf) fp2, fx2 = np.dot(fp, fp), np.dot(fx, fx) net_pat[i] = np.sqrt(fp2 + fx2) net_align[i] = np.sqrt(fx2 / fp2) i += 1 ra_grid *= 180 / np.pi dec_grid *= 180 / np.pi net_pat = net_pat.reshape(ra_grid.shape) net_align = net_align.reshape(ra_grid.shape) net_dpf = net_dpf.reshape(ra_grid.shape) / 2 # we multiplied by two above if norm: net_pat /= len(network) return ra_grid, dec_grid, net_pat, net_align, net_dpf
def net_antenna_pattern_point(gpstime, network, ra_rad, de_rad, psi=0, norm=False): """ Only get the network antenna pattern at a given ra and dec of interest """ # FIXME: need to check on this gps = LIGOTimeGPS(gpstime) gmst_rad = GreenwichMeanSiderealTime(gps) psi_rad = 0 #i = 0 fp = [ ComputeDetAMResponse(detectors[ifo].response, ra_rad, de_rad, psi_rad, gmst_rad)[0] for ifo in network ] fx = [ ComputeDetAMResponse(detectors[ifo].response, ra_rad, de_rad, psi_rad, gmst_rad)[1] for ifo in network ] #print network #for ifo in network: # print ifo, ComputeDetAMResponse(detectors[ifo].response, ra_rad, de_rad, psi_rad, gmst_rad)[0] fp = np.asarray(fp) fx = np.asarray(fx) fp2, fx2 = np.dot(fp, fp), np.dot(fx, fx) net_dpf = psi_dpf = 0.5 * np.arctan2(2 * np.dot(fp, fx), (fp2 - fx2)) fp, fx = fp * np.cos(psi_dpf) + fx * np.sin(psi_dpf), \ -fp * np.sin(psi_dpf) + fx * np.cos(psi_dpf) fp2, fx2 = np.dot(fp, fp), np.dot(fx, fx) net_pat = np.sqrt(fp2 + fx2) net_align = np.sqrt(fx2 / fp2) #net_pat = net_pat.reshape(ra_grid.shape) #net_align = net_align.reshape(ra_grid.shape) #net_dpf = net_dpf.reshape(ra_grid.shape) / 2 # we multiplied by two above if norm: net_pat /= len(network) return net_pat, net_align, net_dpf
def to_pycbc_timeseries(self): """ Output the time series strain data as a :class:`pycbc.types.timeseries.TimeSeries`. """ try: from pycbc.types.timeseries import TimeSeries from lal import LIGOTimeGPS except ModuleNotFoundError: raise ModuleNotFoundError( "Cannot output strain data as PyCBC TimeSeries") return TimeSeries(self.time_domain_strain, delta_t=(1. / self.sampling_frequency), epoch=LIGOTimeGPS(self.start_time))
def to_pycbc_frequencyseries(self): """ Output the frequency series strain data as a :class:`pycbc.types.frequencyseries.FrequencySeries`. """ try: from pycbc.types.frequencyseries import FrequencySeries from lal import LIGOTimeGPS except ImportError: raise ImportError( "Cannot output strain data as PyCBC FrequencySeries") return FrequencySeries(self.frequency_domain_strain, delta_f=1 / self.duration, epoch=LIGOTimeGPS(self.start_time))
def to_lal_frequencyseries(self): """ Output the frequency series strain data as a LAL FrequencySeries object. """ try: from lal import CreateCOMPLEX16FrequencySeries, LIGOTimeGPS, SecondUnit except ModuleNotFoundError: raise ModuleNotFoundError( "Cannot output strain data as PyCBC TimeSeries") lal_data = CreateCOMPLEX16FrequencySeries( "", LIGOTimeGPS(self.start_time), self.frequency_array[0], 1 / self.duration, SecondUnit, len(self.frequency_domain_strain)) lal_data.data.data[:] = self.frequency_domain_strain return lal_data
def to_lal_timeseries(self): """ Output the time series strain data as a LAL TimeSeries object. """ try: from lal import CreateREAL8TimeSeries, LIGOTimeGPS, SecondUnit except ModuleNotFoundError: raise ModuleNotFoundError( "Cannot output strain data as PyCBC TimeSeries") lal_data = CreateREAL8TimeSeries("", LIGOTimeGPS(self.start_time), 0, 1 / self.sampling_frequency, SecondUnit, len(self.time_domain_strain)) lal_data.data.data[:] = self.time_domain_strain return lal_data
def appsink_new_snr_buffer(self, elem): """Callback function for SNR appsink.""" with self.lock: # Note: be sure to set property="%s_%d" % (instrument, index) for appsink element instrument = elem.name.split("_")[0] index = int(elem.name.split("_")[1]) cur_bank = self.snr_document.bank_snrs_dict[instrument][index] sample = elem.emit("pull-sample") if sample is None: return Gst.FlowReturn.OK success, rate = sample.get_caps().get_structure(0).get_int("rate") assert success == True if cur_bank.deltaT is None: cur_bank.deltaT = 1. / rate else: # sampling rate should not be changing assert cur_bank.deltaT == 1. / rate, "Data has different sampling rate." buf = sample.get_buffer() if buf.mini_object.flags & Gst.BufferFlags.GAP or buf.n_memory( ) == 0: return Gst.FlowReturn.OK # add the time offset of template end time here, this offset should be the same for each templates cur_time_stamp = LIGOTimeGPS( 0, sample.get_buffer().pts) + cur_bank.sngl_inspiral_table[0].end if cur_bank.s >= cur_time_stamp and cur_bank.e > cur_time_stamp: # record the first timestamp closet to start time cur_bank.epoch = cur_time_stamp cur_bank.data = [pipeio.array_from_audio_sample(sample)] elif cur_bank.s <= cur_time_stamp < cur_bank.e: cur_bank.data.append(pipeio.array_from_audio_sample(sample)) else: Gst.FlowReturn.OK return Gst.FlowReturn.OK
def appsink_statevector_new_buffer(self, elem, ifo, bitmaskdict): if self.kafka_server is not None: with self.lock: # retrieve data from appsink buffer buf = elem.emit("pull-sample").get_buffer() result, mapinfo = buf.map(Gst.MapFlags.READ) buf_timestamp = LIGOTimeGPS(0, buf.pts) if mapinfo.data: s = StringIO.StringIO(mapinfo.data) time, state = s.getvalue().split('\n')[0].split() state = int(state) buf.unmap(mapinfo) monitor_dict = {} monitor_dict['time'] = float(buf_timestamp) for key, bitmask in bitmaskdict.items(): all_bits_on = bitmask & bitmask monitor = state & bitmask if monitor == all_bits_on: monitor_dict[key] = 1 else: monitor_dict[key] = 0 # Check if kafka server is now available if it's supposed to be used if self.producer is None: from kafka import KafkaProducer from kafka import errors try: self.producer = KafkaProducer( bootstrap_servers = [self.kafka_server], key_serializer = lambda m: json.dumps(m).encode('utf-8'), value_serializer = lambda m: json.dumps(m).encode('utf-8'), ) except errors.NoBrokersAvailable: self.producer = None if self.verbose: print("No brokers available for kafka. Defaulting to not pushing to kafka.") else: self.producer.send("%s_statevector_bit_check_%s" % (ifo, self.machine), value = monitor_dict) return Gst.FlowReturn.OK
utcd = utc for i in range(0, 10): utcd[2] = utc[2] + i utcd = lal.GPSToUTC(lal.UTCToGPS(utcd)) dt = datetime.datetime(*utcd[0:6]) assert(utcd[6] == dt.weekday()) lal.CheckMemoryLeaks() print("PASSED 'tm' struct conversions") # check LIGOTimeGPS operations print("checking LIGOTimeGPS operations ...") from lal import LIGOTimeGPS t0 = LIGOTimeGPS() assert(t0 == 0 and isinstance(t0, LIGOTimeGPS)) assert(t0 != None and not t0 is None) t1 = LIGOTimeGPS(10.5) t2 = LIGOTimeGPS(10, 500000000) assert(not t0 and t1 and t2) assert(t1 == t2 and isinstance(t1, LIGOTimeGPS)) t3 = +t1 t3 = -t2 assert(t1 == t2 and t1 >= t2 and t2 >= t1) assert(abs(-t1) == t1) assert(float(t1) == 10.5) assert(t1 + 3.5 == 14 and isinstance(t1 + 3.5, LIGOTimeGPS)) t2 -= 5.5 assert(t2 == 5 and isinstance(t2, LIGOTimeGPS)) assert(t2 + 5.5 >= t1 and t2 + 3 != t2) assert(t2 - 5 == t0 and isinstance(t2 - 5, LIGOTimeGPS)) assert(t1 * 3 == 31.5 and isinstance(t1 * 3, LIGOTimeGPS)) assert(t2 / 2.5 == 2 and isinstance(t2 / 2.5, LIGOTimeGPS))
utc[2] = utc[2] + i utc[6] = (utc[6] + i) % 7 utc[7] = utc[7] + i utc[8] = -1 + (i % 3) assert (lal.GPSToUTC(gps)[0:8] == tuple(utc[0:8])) assert (lal.UTCToGPS(utc) == gps) utc = lal.GPSToUTC(lal.UTCToGPS(utc)) dt = datetime.datetime(*utc[0:6]) assert (utc[6] == dt.weekday()) lal.CheckMemoryLeaks() print("PASSED 'tm' struct conversions") # check LIGOTimeGPS operations print("checking LIGOTimeGPS operations ...") from lal import LIGOTimeGPS t0 = LIGOTimeGPS() assert (type(LIGOTimeGPS(t0)) is LIGOTimeGPS) assert (is_value_and_type(t0, 0, LIGOTimeGPS)) assert (t0 != None and not t0 is None) t1 = LIGOTimeGPS(10.5) t2 = LIGOTimeGPS(10, 500000000) assert (not t0 and t1 and t2) assert (is_value_and_type(t1, t2, LIGOTimeGPS)) t3 = +t1 t3 = -t2 assert (t1 == t2 and t1 >= t2 and t2 >= t1) assert (abs(-t1) == t1) assert (float(t1) == 10.5) assert (is_value_and_type(t1 + 3.5, 14, LIGOTimeGPS)) assert (is_value_and_type(3.5 + t1, 14, LIGOTimeGPS)) t2 -= 5.5
def split_bins(cafepacker, extentlimit, verbose=False): """ Split bins in CafePacker so that each bin has an extent no longer than extentlimit. """ # # loop over all bins in cafepacker.bins. loop is backwards because # list grows in size as bins are split # for idx in range(len(cafepacker.bins) - 1, -1, -1): # # retrieve bin # origbin = cafepacker.bins[idx] # # how many pieces? if bin doesn't need splitting move to # next # n = int(math.ceil(float(abs(origbin.extent)) / extentlimit)) if n <= 1: continue # # calculate the times of the splits, and then build # segmentlistdicts for clipping. # extents = [origbin.extent[0]] + [ LIGOTimeGPS(origbin.extent[0] + i * float(abs(origbin.extent)) / n) for i in range(1, n) ] + [origbin.extent[1]] if verbose: print("\tsplitting cache spanning %s at %s" % (str(origbin.extent), ", ".join( str(extent) for extent in extents[1:-1])), file=sys.stderr) extents = [ segments.segment(*bounds) for bounds in zip(extents[:-1], extents[1:]) ] # # build new bins, pack objects from origbin into new bins # newbins = [] for extent in extents: # # append new bin # newbins.append(LALCacheBin()) # # test each cache entry in original bin # extent_plus_max_gap = extent.protract(cafepacker.max_gap) for cache_entry in origbin.objects: # # quick check of gap # if cache_entry.segment.disjoint(extent_plus_max_gap): continue # # apply each offset vector # cache_entry_segs = cache_entry.segmentlistdict for offset_vector in cafepacker.offset_vectors: cache_entry_segs.offsets.update(offset_vector) # # test against bin # if cache_entry_segs.intersects_segment(extent): # # object is coicident with # bin # newbins[-1].add(cache_entry) break # # override the bin's extent # newbins[-1].extent = extent # # replace original bin with split bins. # cafepacker.bins[idx:idx + 1] = newbins
def to_coinc_xml_object(self, file_name): outdoc = ligolw.Document() outdoc.appendChild(ligolw.LIGO_LW()) ifos = list(self.sngl_files.keys()) proc_id = ligolw_process.register_to_xmldoc( outdoc, 'pycbc', {}, ifos=ifos, comment='', version=pycbc_version.git_hash, cvs_repository='pycbc/' + pycbc_version.git_branch, cvs_entry_time=pycbc_version.date).process_id search_summ_table = lsctables.New(lsctables.SearchSummaryTable) coinc_h5file = self.coinc_file.h5file try: start_time = coinc_h5file['segments']['coinc']['start'][:].min() end_time = coinc_h5file['segments']['coinc']['end'][:].max() except KeyError: start_times = [] end_times = [] for ifo_comb in coinc_h5file['segments']: if ifo_comb == 'foreground_veto': continue seg_group = coinc_h5file['segments'][ifo_comb] start_times.append(seg_group['start'][:].min()) end_times.append(seg_group['end'][:].max()) start_time = min(start_times) end_time = max(end_times) num_trigs = len(self.sort_arr) search_summary = return_search_summary(start_time, end_time, num_trigs, ifos) search_summ_table.append(search_summary) outdoc.childNodes[0].appendChild(search_summ_table) sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable) coinc_def_table = lsctables.New(lsctables.CoincDefTable) coinc_event_table = lsctables.New(lsctables.CoincTable) coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable) coinc_event_map_table = lsctables.New(lsctables.CoincMapTable) time_slide_table = lsctables.New(lsctables.TimeSlideTable) # Set up time_slide table time_slide_id = lsctables.TimeSlideID(0) for ifo in ifos: time_slide_row = lsctables.TimeSlide() time_slide_row.instrument = ifo time_slide_row.time_slide_id = time_slide_id time_slide_row.offset = 0 time_slide_row.process_id = proc_id time_slide_table.append(time_slide_row) # Set up coinc_definer table coinc_def_id = lsctables.CoincDefID(0) coinc_def_row = lsctables.CoincDef() coinc_def_row.search = "inspiral" coinc_def_row.description = \ "sngl_inspiral<-->sngl_inspiral coincidences" coinc_def_row.coinc_def_id = coinc_def_id coinc_def_row.search_coinc_type = 0 coinc_def_table.append(coinc_def_row) bank_col_names = ['mass1', 'mass2', 'spin1z', 'spin2z'] bank_col_vals = {} for name in bank_col_names: bank_col_vals[name] = self.get_bankfile_array(name) coinc_event_names = ['ifar', 'time', 'fap', 'stat'] coinc_event_vals = {} for name in coinc_event_names: if name == 'time': coinc_event_vals[name] = self.get_end_time() else: coinc_event_vals[name] = self.get_coincfile_array(name) sngl_col_names = [ 'snr', 'chisq', 'chisq_dof', 'bank_chisq', 'bank_chisq_dof', 'cont_chisq', 'cont_chisq_dof', 'end_time', 'template_duration', 'coa_phase', 'sigmasq' ] sngl_col_vals = {} for name in sngl_col_names: sngl_col_vals[name] = self.get_snglfile_array_dict(name) sngl_event_count = 0 for idx in range(len(self.sort_arr)): # Set up IDs and mapping values coinc_id = lsctables.CoincID(idx) # Set up sngls # FIXME: As two-ifo is hardcoded loop over all ifos sngl_combined_mchirp = 0 sngl_combined_mtot = 0 net_snrsq = 0 for ifo in ifos: # If this ifo is not participating in this coincidence then # ignore it and move on. if not sngl_col_vals['snr'][ifo][1][idx]: continue event_id = lsctables.SnglInspiralID(sngl_event_count) sngl_event_count += 1 sngl = return_empty_sngl() sngl.event_id = event_id sngl.ifo = ifo net_snrsq += sngl_col_vals['snr'][ifo][0][idx]**2 for name in sngl_col_names: val = sngl_col_vals[name][ifo][0][idx] if name == 'end_time': sngl.set_end(LIGOTimeGPS(val)) else: setattr(sngl, name, val) for name in bank_col_names: val = bank_col_vals[name][idx] setattr(sngl, name, val) sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta( sngl.mass1, sngl.mass2) sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta( sngl.mass1, sngl.mass2) sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr sngl_combined_mchirp += sngl.mchirp sngl_combined_mtot += sngl.mtotal sngl_inspiral_table.append(sngl) # Set up coinc_map entry coinc_map_row = lsctables.CoincMap() coinc_map_row.table_name = 'sngl_inspiral' coinc_map_row.coinc_event_id = coinc_id coinc_map_row.event_id = event_id coinc_event_map_table.append(coinc_map_row) sngl_combined_mchirp = sngl_combined_mchirp / len(ifos) sngl_combined_mtot = sngl_combined_mtot / len(ifos) # Set up coinc inspiral and coinc event tables coinc_event_row = lsctables.Coinc() coinc_inspiral_row = lsctables.CoincInspiral() coinc_event_row.coinc_def_id = coinc_def_id coinc_event_row.nevents = len(ifos) coinc_event_row.instruments = ','.join(ifos) coinc_inspiral_row.set_ifos(ifos) coinc_event_row.time_slide_id = time_slide_id coinc_event_row.process_id = proc_id coinc_event_row.coinc_event_id = coinc_id coinc_inspiral_row.coinc_event_id = coinc_id coinc_inspiral_row.mchirp = sngl_combined_mchirp coinc_inspiral_row.mass = sngl_combined_mtot coinc_inspiral_row.set_end( LIGOTimeGPS(coinc_event_vals['time'][idx])) coinc_inspiral_row.snr = net_snrsq**0.5 coinc_inspiral_row.false_alarm_rate = coinc_event_vals['fap'][idx] coinc_inspiral_row.combined_far = 1. / coinc_event_vals['ifar'][idx] # Transform to Hz coinc_inspiral_row.combined_far = \ coinc_inspiral_row.combined_far / YRJUL_SI coinc_event_row.likelihood = coinc_event_vals['stat'][idx] coinc_inspiral_row.minimum_duration = 0. coinc_event_table.append(coinc_event_row) coinc_inspiral_table.append(coinc_inspiral_row) outdoc.childNodes[0].appendChild(coinc_def_table) outdoc.childNodes[0].appendChild(coinc_event_table) outdoc.childNodes[0].appendChild(coinc_event_map_table) outdoc.childNodes[0].appendChild(time_slide_table) outdoc.childNodes[0].appendChild(coinc_inspiral_table) outdoc.childNodes[0].appendChild(sngl_inspiral_table) ligolw_utils.write_filename(outdoc, file_name)
def generate_sample(static_arguments, event_tuple, waveform_params=None): """ Generate a single sample (or example) by taking a piece of LIGO background noise (real or synthetic, depending on `event_tuple`), optionally injecting a simulated waveform (depending on `waveform_params`) and post-processing the result (whitening, band-passing). Args: static_arguments (dict): A dictionary containing global technical parameters for the sample generation, for example the target_sampling_rate of the output. event_tuple (tuple): A tuple `(event_time, file_path)`, which specifies the GPS time at which to make an injection and the path of the HDF file which contains said GPS time. If `file_path` is `None`, synthetic noise will be used instead and the `event_time` only serves as a seed for the corresponding (random) noise generator. waveform_params (dict): A dictionary containing the randomly sampled parameters that are passed as inputs to the waveform model (e.g., the masses, spins, position, ...). Returns: A tuple `(sample, injection_parameters)`, which contains the generated `sample` itself (a dict with keys `{'event_time', 'h1_strain', 'l1_strain'}`), and the `injection_parameters`, which are either `None` (in case no injection was made), or a dict containing the `waveform_params` and some additional parameters (e.g., single detector SNRs). """ # ------------------------------------------------------------------------- # Define shortcuts for some elements of self.static_arguments # ------------------------------------------------------------------------- # Read out frequency-related arguments original_sampling_rate = static_arguments['original_sampling_rate'] target_sampling_rate = static_arguments['target_sampling_rate'] f_lower = static_arguments['f_lower'] delta_f = static_arguments['delta_f'] fd_length = static_arguments['fd_length'] # Get the width of the noise sample that we either select from the raw # HDF files, or generate synthetically noise_interval_width = static_arguments['noise_interval_width'] # Get how many seconds before and after the event time to use seconds_before_event = static_arguments['seconds_before_event'] seconds_after_event = static_arguments['seconds_after_event'] # Get the event time and the dict containing the HDF file path event_time, hdf_file_paths = event_tuple # ------------------------------------------------------------------------- # Get the background noise (either from data or synthetically) # ------------------------------------------------------------------------- # If the event_time is None, we generate synthetic noise if hdf_file_paths is None: # Create an artificial PSD for the noise # TODO: Is this the best choice for this task? psd = aLIGOZeroDetHighPower(length=fd_length, delta_f=delta_f, low_freq_cutoff=f_lower) # Actually generate the noise using the PSD and LALSimulation noise = dict() for i, det in enumerate(('H1', 'L1')): # Compute the length of the noise sample in time steps noise_length = noise_interval_width * target_sampling_rate # Generate the noise for this detector noise[det] = noise_from_psd(length=noise_length, delta_t=(1.0 / target_sampling_rate), psd=psd, seed=(2 * event_time + i)) # Manually fix the noise start time to match the fake event time. # However, for some reason, the correct setter method seems broken? start_time = event_time - noise_interval_width / 2 # noinspection PyProtectedMember noise[det]._epoch = LIGOTimeGPS(start_time) # Otherwise we select the noise from the corresponding HDF file else: kwargs = dict(hdf_file_paths=hdf_file_paths, gps_time=event_time, interval_width=noise_interval_width, original_sampling_rate=original_sampling_rate, target_sampling_rate=target_sampling_rate, as_pycbc_timeseries=True) noise = get_strain_from_hdf_file(**kwargs) # ------------------------------------------------------------------------- # If applicable, make an injection # ------------------------------------------------------------------------- # If no waveform parameters are given, we are not making an injection. # In this case, there are no detector signals and no injection # parameters, and the strain is simply equal to the noise if waveform_params is None: detector_signals = None output_signals = None injection_parameters = None output_signals = None strain = noise # Otherwise, we need to simulate a waveform for the given waveform_params # and add it into the noise to create the strain else: # --------------------------------------------------------------------- # Simulate the waveform with the given injection parameters # --------------------------------------------------------------------- # Actually simulate the waveform with these parameters waveform = get_waveform(static_arguments=static_arguments, waveform_params=waveform_params) # Get the detector signals by projecting on the antenna patterns detector_signals = \ get_detector_signals(static_arguments=static_arguments, waveform_params=waveform_params, event_time=event_time, waveform=waveform) # Store the output_signal output_signals = {} output_signals = detector_signals.copy() # --------------------------------------------------------------------- # Add the waveform into the noise as is to calculate the NOMF-SNR # --------------------------------------------------------------------- # Store the dummy strain, the PSDs and the SNRs for the two detectors strain_ = {} psds = {} snrs = {} # Calculate these quantities for both detectors for det in ('H1', 'L1'): # Add the simulated waveform into the noise to get the dummy strain strain_[det] = noise[det].add_into(detector_signals[det]) # Estimate the Power Spectral Density from the dummy strain, this 1 is the psd segment duration of the strain psds[det] = strain_[det].psd(1) psds[det] = interpolate(psds[det], delta_f=delta_f) # Use the PSD estimate to calculate the optimal matched # filtering SNR for this injection and this detector snrs[det] = sigma(htilde=detector_signals[det], psd=psds[det], low_frequency_cutoff=f_lower) # Calculate the network optimal matched filtering SNR for this # injection (which we need for scaling to the chosen injection SNR) nomf_snr = np.sqrt(snrs['H1']**2 + snrs['L1']**2) # --------------------------------------------------------------------- # Add the waveform into the noise with the chosen injection SNR # --------------------------------------------------------------------- # Compute the rescaling factor #injection_snr = waveform_params['injection_snr'] injection_snr = static_arguments['injection_snr'] scale_factor = 1.0 * injection_snr / nomf_snr strain = {} for det in ('H1', 'L1'): # Add the simulated waveform into the noise, using a scaling # factor to ensure that the resulting NOMF-SNR equals the chosen # injection SNR strain[det] = noise[det].add_into(scale_factor * detector_signals[det]) output_signals[det] = scale_factor * output_signals[det] # --------------------------------------------------------------------- # Store some information about the injection we just made # --------------------------------------------------------------------- # Store the information we have computed ourselves injection_parameters = {'scale_factor': scale_factor, 'h1_snr': snrs['H1'], 'l1_snr': snrs['L1']} # Also add the waveform parameters we have sampled for key, value in waveform_params.iteritems(): injection_parameters[key] = value # ------------------------------------------------------------------------- # Whiten and bandpass the strain (also for noise-only samples) # ------------------------------------------------------------------------- for det in ('H1', 'L1'): # Get the whitening parameters segment_duration = static_arguments['whitening_segment_duration'] max_filter_duration = static_arguments['whitening_max_filter_duration'] # Whiten the strain (using the built-in whitening of PyCBC) # We don't need to remove the corrupted samples here, because we # crop the strain later on strain[det] = \ strain[det].whiten(segment_duration=segment_duration, max_filter_duration=max_filter_duration, remove_corrupted=False) if waveform_params is not None: output_signals[det] = \ signal_whiten(psd = psds[det], signal = output_signals[det], segment_duration = segment_duration, max_filter_duration = max_filter_duration) # Get the limits for the bandpass bandpass_lower = static_arguments['bandpass_lower'] bandpass_upper = static_arguments['bandpass_upper'] # Apply a high-pass to remove everything below `bandpass_lower`; # If bandpass_lower = 0, do not apply any high-pass filter. if bandpass_lower != 0: strain[det] = strain[det].highpass_fir(frequency=bandpass_lower, remove_corrupted=False, order=512) if waveform_params is not None: output_signals[det] = output_signals[det].highpass_fir(frequency=bandpass_lower, remove_corrupted=False, order=512) # Apply a low-pass filter to remove everything above `bandpass_upper`. # If bandpass_upper = sampling rate, do not apply any low-pass filter. if bandpass_upper != target_sampling_rate: strain[det] = strain[det].lowpass_fir(frequency=bandpass_upper, remove_corrupted=False, order=512) if waveform_params is not None: output_signals[det] = output_signals[det].lowpass_fir(frequency=bandpass_upper, remove_corrupted=False, order=512) # ------------------------------------------------------------------------- # Cut strain (and signal) time series to the pre-specified length # ------------------------------------------------------------------------- for det in ('H1', 'L1'): # Define some shortcuts for slicing a = event_time - seconds_before_event b = event_time + seconds_after_event # Cut the strain to the desired length strain[det] = strain[det].time_slice(a, b) # If we've made an injection, also cut the simulated signal if waveform_params is not None: # Cut the detector signals to the specified length detector_signals[det] = detector_signals[det].time_slice(a, b) output_signals[det] = output_signals[det].time_slice(a, b) # Also add the detector signals to the injection parameters injection_parameters['h1_signal'] = \ np.array(detector_signals['H1']) injection_parameters['l1_signal'] = \ np.array(detector_signals['L1']) injection_parameters['h1_output_signal'] = \ np.array(output_signals['H1']) injection_parameters['l1_output_signal'] = \ np.array(output_signals['L1']) # ------------------------------------------------------------------------- # Collect all available information about this sample and return results # ------------------------------------------------------------------------- # The whitened strain is numerically on the order of O(1), so we can save # it as a 32-bit float (unlike the original signal, which is down to # O(10^-{30}) and thus requires 64-bit floats). sample = {'event_time': event_time, 'h1_strain': np.array(strain['H1']).astype(np.float32), 'l1_strain': np.array(strain['L1']).astype(np.float32)} return sample, injection_parameters
class Time(Element): """ Time element. """ tagName = u"Time" Name = attributeproxy(u"Name") Type = attributeproxy(u"Type", default = u"ISO-8601") def __init__(self, *args): super(Time, self).__init__(*args) if self.Type not in ligolwtypes.TimeTypes: raise ElementError("invalid Type for Time: '%s'" % self.Type) def endElement(self): if self.Type == u"ISO-8601": import dateutil.parser self.pcdata = dateutil.parser.parse(self.pcdata) elif self.Type == u"GPS": from lal import LIGOTimeGPS # FIXME: remove cast to string when lal swig # can cast from unicode self.pcdata = LIGOTimeGPS(str(self.pcdata)) elif self.Type == u"Unix": self.pcdata = float(self.pcdata) else: # unsupported time type. not impossible that # calling code has overridden TimeTypes set in # glue.ligolw.types; just accept it as a string pass def write(self, fileobj = sys.stdout, indent = u""): fileobj.write(self.start_tag(indent)) if self.pcdata is not None: if self.Type == u"ISO-8601": fileobj.write(xmlescape(unicode(self.pcdata.isoformat()))) elif self.Type == u"GPS": fileobj.write(xmlescape(unicode(self.pcdata))) elif self.Type == u"Unix": fileobj.write(xmlescape(u"%.16g" % self.pcdata)) else: # unsupported time type. not impossible. # assume correct thing to do is cast to # unicode and let calling code figure out # how to ensure that does the correct # thing. fileobj.write(xmlescape(unicode(self.pcdata))) fileobj.write(self.end_tag(u"")) fileobj.write(u"\n") @classmethod def now(cls, Name = None): """ Instantiate a Time element initialized to the current UTC time in the default format (ISO-8601). The Name attribute will be set to the value of the Name parameter if given. """ import datetime self = cls() if Name is not None: self.Name = Name self.pcdata = datetime.datetime.utcnow() return self @classmethod def from_gps(cls, gps, Name = None): """ Instantiate a Time element initialized to the value of the given GPS time. The Name attribute will be set to the value of the Name parameter if given. Note: the new Time element holds a reference to the GPS time, not a copy of it. Subsequent modification of the GPS time object will be reflected in what gets written to disk. """ self = cls(AttributesImpl({u"Type": u"GPS"})) if Name is not None: self.Name = Name self.pcdata = gps return self