def toframefile(filename, channel, data, start, dx, **frargs):

  """
    Write numpy array data to GWF frame file using the given arguments.

    Arguments:

      filename : string
        name of file to write
      channel : string
        name of channel to write
      data : numpy.array
        array of data to write
      start : float
        GPS start time (s) or minimum frequency (Hz)
      dx : float
        GPS time step (s) or frequency step (Hz)

    Unnamed arguments are held in frargs. For usage, see documentation for
    pylal.Fr.frputvect.
  """

  datadict = frargs
  datadict['name']  = channel
  datadict['data']  = data
  datadict['start'] = start
  datadict['dx']    = dx

  Fr.frputvect(filename, [datadict], verbose=False)
def add_noise_frames_to_signal_frames(noise_frame_files, noise_frame_channel, inj_frame_file, inj_frame_channel, ifo, outfile):
	"""
	Take noise frame and time-sorted injection frames, and write a single output frame containing both channels between start and stop of noise frame
	"""		
	#Load in injection frame and create value and time arrays
	inj_value_array, inj_start_time, __, inj_dt, __, __ = Fr.frgetvect1d(filename=inj_frame_file,channel='%s:%s'%(ifo,inj_frame_channel))
	len_inj_array = len(inj_value_array)
	inj_time_array = np.arange(inj_start_time, inj_start_time + inj_dt*len_inj_array, inj_dt)
	
	#Initialize noise times to actually add
	noise_final_array = np.zeros(len(inj_value_array))

	#Load in inj frames and create value and time arrays
	for i,noise_frame_file in enumerate(noise_frame_files):
		#Load in noise information for this frame
		noise_value_array, noise_start_time, __, noise_dt, __, __ = Fr.frgetvect1d(filename=noise_frame_file,channel='%s:%s'%(ifo,noise_frame_channel))
		len_noise_array = len(noise_value_array)
		noise_time_array = np.arange(noise_start_time, noise_start_time + noise_dt*len_noise_array, noise_dt)
		
		#Keep on injection information contained within the noise frame
		tmp_truth_array = (noise_time_array >= inj_time_array[0]) * (noise_time_array <= inj_time_array[-1])
		tmp_value_array = noise_value_array[tmp_truth_array]
		tmp_time_array = noise_time_array[tmp_truth_array]
		
		noise_final_array[ (inj_time_array >= tmp_time_array[0]) * (inj_time_array <= tmp_time_array[-1]) ] += tmp_value_array
	
	#Save the final noise + inj array
	frames_dic = {}
	frames_dic['noise'] = dict(name='%s:%s'%(ifo,noise_frame_channel), data=noise_final_array, start=inj_start_time, dx=inj_dt, type=1)
	frames_dic['inj'] = dict(name='%s:%s'%(ifo,inj_frame_channel), data=inj_value_array, start=inj_start_time, dx=inj_dt, type=1)

	Fr.frputvect(outfile, frames_dic.values())
예제 #3
0
    def test_1d_two_channels_roundtrip(self):
        """ roundtrip test call with two channels in a frame """
        a = Fr.frgetvect1d("./test.dat", "Adc1")
        Fr.frputvect('writetest.gwf', [{
            'name': 'Adc1',
            'data': a[0],
            'start': a[1],
            'dx': a[3],
            'kind': 'ADC',
            'x_unit': a[4],
            'y_unit': a[5]
        }, {
            'name': 'reverse',
            'data': a[0][::-1],
            'start': a[1],
            'dx': a[3],
            'kind': 'ADC',
            'x_unit': a[4],
            'y_unit': a[5]
        }])
        b = Fr.frgetvect1d("writetest.gwf", "Adc1")
        self.assert_(numpy.alltrue(a[0] == b[0]))
        self.assert_(numpy.alltrue(a[1:] == b[1:]))

        c = Fr.frgetvect1d("writetest.gwf", "reverse")
        self.assert_(numpy.alltrue(a[0][::-1] == c[0]))
        self.assert_(numpy.alltrue(a[1:] == c[1:]))
        os.remove("writetest.gwf")
예제 #4
0
 def test_1d_default_roundtrip(self):
     """ roundtrip test call with default values """
     a = Fr.frgetvect1d("./test.dat","Adc1")
     Fr.frputvect('writetest.gwf', [{'name':'Adc1', 'data':a[0],
         'start':a[1], 'dx':a[3], 'kind':'ADC', 'x_unit':a[4],
         'y_unit':a[5]}])
     b = Fr.frgetvect1d("writetest.gwf", "Adc1")
     self.assert_(numpy.alltrue(a[0] == b[0]))
     self.assert_(numpy.alltrue(a[1:] == b[1:]))
     os.remove("writetest.gwf")
예제 #5
0
    def test_1d_two_channels_roundtrip(self):
        """ roundtrip test call with two channels in a frame """
        a = Fr.frgetvect1d("./test.dat","Adc1")
        Fr.frputvect('writetest.gwf', [{'name':'Adc1', 'data':a[0],
        'start':a[1], 'dx':a[3], 'kind':'ADC', 'x_unit':a[4],
        'y_unit':a[5]},{'name':'reverse', 'data':a[0][::-1], 'start':a[1],
        'dx':a[3], 'kind':'ADC', 'x_unit':a[4], 'y_unit': a[5]}])
        b = Fr.frgetvect1d("writetest.gwf", "Adc1")
        self.assert_(numpy.alltrue(a[0] == b[0]))
        self.assert_(numpy.alltrue(a[1:] == b[1:]))

        c = Fr.frgetvect1d("writetest.gwf", "reverse")
        self.assert_(numpy.alltrue(a[0][::-1] == c[0]))
        self.assert_(numpy.alltrue(a[1:] == c[1:]))
        os.remove("writetest.gwf")
예제 #6
0
 def test_1d_keywords_roundtrip(self):
     """ roundtrip test call with keyword arguments """
     a = Fr.frgetvect1d("./test.dat", "Adc1", span=1)
     Fr.frputvect('writetest.gwf', [{
         'name': 'Adc1',
         'data': a[0],
         'start': a[1],
         'dx': a[3],
         'kind': 'ADC',
         'x_unit': a[4],
         'y_unit': a[5]
     }])
     b = Fr.frgetvect1d("writetest.gwf", "Adc1")
     self.assert_(numpy.alltrue(a[0] == b[0]))
     self.assert_(numpy.alltrue(a[1:] == b[1:]))
     os.remove("writetest.gwf")
예제 #7
0
def write_frame(TimeSeries, ifo, usertag, outdir):
    """
    Write a frame 
    """

    # Construct name
    site=ifo.strip('1')

    frame_name = '{site}-{ifo}_{usertag}-{epoch}-{datalen}.gwf'.format(
            site=site, ifo=ifo, usertag=usertag,
            epoch=str(int(TimeSeries.epoch)),
            datalen=str(int(TimeSeries.data.length * TimeSeries.deltaT)))

    channel_list = [
            {'name':'%s:STRAIN'%ifo, 
                'data':np.array(TimeSeries.data.data),
                'start':TimeSeries.epoch,
                'dx':TimeSeries.deltaT,
                'kind':'SIM'}, 
            ]


    print 'writing frame %s...'%frame_name

    frame_out_path = '%s/%s'%(os.path.abspath(outdir), frame_name)
    Fr.frputvect(frame_out_path, channel_list)

    #
    # Generate a cache file
    #

    # setup url
    path, filename = os.path.split(frame_out_path.strip())
    url = "file://localhost%s" % os.path.abspath(os.path.join(path, filename))

    # create cache entry
    c=gluelal.CacheEntry.from_T050017(url)

    # write to file
    cache_file = frame_out_path.replace('gwf','lcf')
    f=open(cache_file,'w')
    f.writelines('%s\n'%str(c))
    f.close()

    return frame_out_path,cache_file
예제 #8
0
 def test_cache(self):
     # Knowing the middle of our array will be helpful, because we will put half on one frame, 
     # and half on hte other. We will also need this to read in a segment of hte cache that 
     # crosses this seam.
     half = int((self.size/2)*self.delta_t)
     # These need to be named so that lalapps_path2cache can turn them into a single cache file.
     frmfile1 = tempfile.NamedTemporaryFile(prefix='H-frame-'+str(int(self.epoch))+'-'+str(half)+'.')
     frmfile2 = tempfile.NamedTemporaryFile(prefix='H-frame-'+str(int(self.epoch+half))+'-'+str(half)+'.')
     frmfile3 = tempfile.NamedTemporaryFile(prefix='H-frame-'+str(int(self.epoch+half+16))+'-'+str(half-16)+'.')
     # We will need access to the actual filenames.
     frmname1 = frmfile1.name
     frmname2 = frmfile2.name
     frmname3 = frmfile3.name
     
     firsthalf1 = self.data1[0:(self.size/2)]
     secondhalf1 = self.data1[(self.size/2):]
     # This third piece will be paired up with the first one to create a cache file with a gap
     # of 16 seconds after the half-way point
     gaphalf1 = self.data1[(self.size/2 + 16/self.delta_t):]
     
     # The same is done to the second dataset
     firsthalf2 = self.data2[0:(self.size/2)]
     secondhalf2 = self.data2[(self.size/2):]
     gaphalf2 = self.data2[(self.size/2 + 16/self.delta_t):]
     
     # Now we will create a frame file, this will hold the first half of our data
     Fr.frputvect(frmname1,[{'name':'channel1', 'data':firsthalf1, 'start':int(self.epoch), 'dx':self.delta_t,'type':1},
                             {'name':'channel2', 'data':firsthalf2, 'start':int(self.epoch), 'dx':self.delta_t,'type':1}])
     # This will hold the second half
     Fr.frputvect(frmname2,[{'name':'channel1', 'data':secondhalf1, 'start':int(self.epoch+half), 'dx':self.delta_t,'type':1},
                             {'name':'channel2', 'data':secondhalf2, 'start':int(self.epoch+half), 'dx':self.delta_t,'type':1}])
                             
     # This third one will hold the second half, but without the first 16 seconds, so we can check importing from a cache with holes.
     Fr.frputvect(frmname3,[{'name':'channel1', 'data':gaphalf1, 'start':int(self.epoch + half + 16), 'dx':self.delta_t,'type':1},
                             {'name':'channel2', 'data':gaphalf2, 'start':int(self.epoch + half + 16), 'dx':self.delta_t,'type':1}])
     
     # These files are what path2cache will actually read, they will hold a list of frames
     frmlist1 = tempfile.NamedTemporaryFile()
     frmlist2 = tempfile.NamedTemporaryFile()
     
     listname1 = frmlist1.name
     listname2 = frmlist2.name
     
     # The first one will contain the complete set, split over two frames
     with open(listname1, 'w') as f1:
         f1.write(frmname1+'\n')
         f1.write(frmname2)
         
     # This second one will use the gap frame for the second half, so there
     # will be a 16 second gap in the middle of this cache
     with open(listname2, 'w') as f2:
         f2.write(frmname1+'\n')
         f2.write(frmname3)
         
     cache1 = tempfile.NamedTemporaryFile()
     cache2 = tempfile.NamedTemporaryFile()
     
     cachename1 = cache1.name
     cachename2 = cache2.name
     
     # Now we can actually make the caches from the list of frames
     subprocess.call(['lalapps_path2cache','-i',listname1,'-o',cachename1])
     subprocess.call(['lalapps_path2cache','-i',listname2,'-o',cachename2])
        
     with self.context:
         if _options['scheme'] == 'cpu':
             # Reading just one channel first
             ts = pycbc.frame.read_cache(cachename1,'channel1',self.epoch,self.epoch+self.size*self.delta_t)
             self.checkCurrentState([ts],[self.data1],self.places)
             self.assertTrue(ts.start_time == self.epoch)
             self.assertTrue(ts.end_time-ts.start_time == self.size*self.delta_t)
             
             # Now reading multiple channels
             ts = pycbc.frame.read_cache(cachename1,['channel1','channel2'],self.epoch,self.epoch+self.size*self.delta_t)
             self.assertTrue(type(ts) is list)
             self.checkCurrentState(ts, [self.data1,self.data2], self.places)
             self.assertTrue(ts[0].start_time == self.epoch)
             self.assertTrue(ts[1].start_time == self.epoch)
             self.assertTrue(ts[0].end_time-ts[0].start_time == self.size*self.delta_t)
             self.assertTrue(ts[1].end_time-ts[1].start_time == self.size*self.delta_t)
             
             # Now reading in a specific segment with an integer
             start = self.epoch + 10
             end = self.epoch + half + 50
             startind = 10/self.delta_t
             endind = (half + 50) / self.delta_t
             ts = pycbc.frame.read_cache(cachename1, 'channel1', start=int(start), end=int(end))
             
             # Now we'll check all the values
             self.checkCurrentState((ts,), (self.data1[startind:endind],), self.places)
             # The duration
             self.assertTrue((40+half) - (float(ts.end_time)-float(ts.start_time)) < self.delta_t)
             # And the start
             self.assertTrue(ts.start_time == self.epoch+10)
             
             # The same, but with a LIGOTimeGPS for the start and end times
             ts = pycbc.frame.read_cache(cachename1, 'channel1', start=start, end=end)
             # Now we'll check all the values
             self.checkCurrentState((ts,), (self.data1[startind:endind],), self.places)
             # The duration
             self.assertTrue((40+half) - (float(ts.end_time)-float(ts.start_time)) < self.delta_t)
             # And the start
             self.assertTrue(ts.start_time == self.epoch+10)
             
             # And now some cases that should raise errors
             
             # There should be an error if there are gaps in the data requested
             self.assertRaises(ValueError, pycbc.frame.read_cache,cachename2,'channel1',
                                 self.epoch,self.epoch+self.size*self.delta_t)
             
             # There must be a span grater than 0
             self.assertRaises(ValueError, pycbc.frame.read_cache,cachename1,'channel1',
                                 start=self.epoch,end=self.epoch)
             # The start must be before the end
             self.assertRaises(ValueError, pycbc.frame.read_cache,cachename1,'channel1',
                                 start=self.epoch+1,end=self.epoch)
             # Non integer times should also raise an error
             badtime = lal.LIGOTimeGPS(int(self.epoch)+5,1000)
             
             self.assertRaises(ValueError, pycbc.frame.read_cache,cachename1,'channel1',
                                 start=self.epoch,end=badtime)
             self.assertRaises(ValueError, pycbc.frame.read_cache,cachename1,'channel1',
                                 start=float(self.epoch),end=float(badtime))
예제 #9
0
    def test_frame(self):
        
        # This is a file in the temp directory that will be deleted when it is garbage collected
        frmfile = tempfile.NamedTemporaryFile()  
        filename = frmfile.name
        
        # Now we will create a frame file, specifiying that it is a timeseries
        Fr.frputvect(filename,[{'name':'channel1', 'data':self.data1, 'start':int(self.epoch), 'dx':self.delta_t,'type':1},
                                {'name':'channel2', 'data':self.data2, 'start':int(self.epoch), 'dx':self.delta_t,'type':1}])
        
        with self.context:
            if _options['scheme'] == 'cpu':
                # Reading just one channel first
                ts1 = pycbc.frame.read_frame(filename,'channel1')
                # Chacking all values
                self.checkCurrentState((ts1,),(self.data1,),self.places)
                # Now checking the start time
                self.assertTrue(ts1.start_time == self.epoch)
                # And the duration
                self.assertTrue(ts1.end_time-ts1.start_time == self.size*self.delta_t)
                
                # Now reading multiple channels
                ts2 = pycbc.frame.read_frame(filename,['channel1','channel2'])
                # We should get back a list
                self.assertTrue(type(ts2) is list)
                self.checkCurrentState(ts2, (self.data1,self.data2), self.places)
                self.assertTrue(ts2[0].start_time == self.epoch)
                self.assertTrue(ts2[1].start_time == self.epoch)
                self.assertTrue(ts2[0].end_time-ts2[0].start_time == self.size*self.delta_t)
                self.assertTrue(ts2[1].end_time-ts2[1].start_time == self.size*self.delta_t)
                
                # These are the times and indices for the segment we will try to read
                start = self.epoch+10
                end = self.epoch+50
                startind = int(10/self.delta_t)
                endind = int(50/self.delta_t)
                
                # Now reading in a specific segment with an integer
                ts3 = pycbc.frame.read_frame(filename, 'channel1', start=int(start), end=int(end))
                
                # The same, but with a LIGOTimeGPS for the start and end times
                ts4 = pycbc.frame.read_frame(filename, 'channel1', start=start, end=end)

                # Now we will check those two TimeSeries
                self.checkCurrentState((ts3,ts4), (self.data1[startind:endind],self.data1[startind:endind]), self.places)
                self.assertTrue(40 - (float(ts3.end_time)-float(ts3.start_time)) < self.delta_t)
                self.assertTrue(ts3.start_time == start)
                
                self.assertTrue(40 - (float(ts4.end_time)-float(ts4.start_time)) < self.delta_t)
                self.assertTrue(ts4.start_time == start)

                # And now some cases that should raise errors

                # There must be a span grater than 0
                self.assertRaises(ValueError, pycbc.frame.read_frame,filename,'channel1',
                                    start=self.epoch,end=self.epoch)
                # The start must be before the end
                self.assertRaises(ValueError, pycbc.frame.read_frame,filename,'channel1',
                                    start=self.epoch+1,end=self.epoch)
                # Non integer times should also raise an error
                badtime = lal.LIGOTimeGPS(int(self.epoch)+5,1000)
                
                self.assertRaises(ValueError, pycbc.frame.read_frame,filename,'channel1',
                                    start=self.epoch,end=badtime)
                self.assertRaises(ValueError, pycbc.frame.read_frame,filename,'channel1',
                                    start=float(self.epoch),end=float(badtime))
예제 #10
0
def write_frame(det_data, ifo, seed, epoch, datalen, outdir):
    """
    Write a frame 
    """

    # Construct name
    site = ifo.strip('1')

    frame_name = '{site}-{ifo}_{wf_name}_{seed}-{epoch}-{datalen}.gwf'.format(
        site=site,
        ifo=ifo,
        wf_name=det_data.waveform_name,
        seed=seed,
        epoch=str(int(epoch)),
        datalen=str(int(datalen)))

    channel_list = [
        {
            'name': '%s:STRAIN' % ifo,
            'data': np.array(det_data.td_response.data),
            'start': epoch,
            'dx': 1.0 / 16384,
            'kind': 'SIM'
        },
        {
            'name': '%s:SIGNAL' % ifo,
            'data': np.array(det_data.td_signal.data),
            'start': epoch,
            'dx': 1.0 / 16384,
            'kind': 'SIM'
        },
        {
            'name': '%s:NOISE' % ifo,
            'data': np.array(det_data.td_noise.data),
            'start': epoch,
            'dx': 1.0 / 16384,
            'kind': 'SIM'
        },
    ]

    print 'writing frame %s...' % frame_name

    frame_out_path = '%s/%s' % (os.path.abspath(outdir), frame_name)
    Fr.frputvect(frame_out_path, channel_list)

    #
    # Generate a cache file
    #

    # setup url
    path, filename = os.path.split(frame_out_path.strip())
    url = "file://localhost%s" % os.path.abspath(os.path.join(path, filename))

    # create cache entry
    c = gluelal.CacheEntry.from_T050017(url)

    # write to file
    cache_file = frame_out_path.replace('gwf', 'lcf')
    f = open(cache_file, 'w')
    f.writelines('%s\n' % str(c))
    f.close()

    return frame_out_path, cache_file
예제 #11
0
    def generate_gwf(self, mdc, directory, channel="SCIENCE", force=False):
        """
        Produce the gwf file which corresponds to the MDC set over the period of this frame.

        Parameters
        ----------
        mdc : MDCSet object
           The MDC set which should be used to produce this frame.
        directory : str
           The root directory where all of the frames are to be stored, for example
           "/home/albert.einstein/data/mdc/frames/"
           would cause the SineGaussian injections to be made in the directories under
           "/home/albert.einstein/data/mdc/frames/sg"
        channel : str
           The name of the channel which the injections should be made into. This is prepended by the initials
           for each interferometer, so there will be a channel for each interferometer in the gwf.
        force : bool
           If true this forces the recreation of a GWF file even if it already exists.

        Outputs
        -------
        gwf
           The GWF file for this frame.
        """
        ifosstr = "".join(set(ifo[0] for ifo in self.ifos))
        family = mdc.waveforms[0].waveform
        filename = "{}-{}-{}-{}.gwf".format(ifosstr, family, self.start, self.duration)

        head_date = str(self.start)[:5]
        frameloc = directory+"/"+mdc.directory_path()+"/"+head_date+"/"
        #print frameloc, filename
        if not os.path.isfile(frameloc + filename) or force:
            data = []
            # Define the start point of the time series top be generated for the injection
            epoch = lal.LIGOTimeGPS(self.start)
            # Loop through each interferometer
            for ifo in self.ifos:
                # Calculate the number of samples in the timeseries
                nsamp = (self.end-self.start)*16384
                # Make the timeseries
                h_resp = lal.CreateREAL8TimeSeries("inj time series", epoch, 0, 1.0/16384, lal.StrainUnit, nsamp)
                # Loop over all of the injections corresponding to this frame
                rowlist = self.get_rowlist(mdc)
                if len(rowlist)==0: return
                for row in rowlist:
                    sim_burst = mdc.waveforms[row]
                    # Produce the time domain waveform for this injection
                    hp, hx = lalburst.GenerateSimBurst(sim_burst, 1.0/16384);
                    # Apply detector response
                    det = lalsimulation.DetectorPrefixToLALDetector(ifo)
                    # Produce the total strains
                    h_tot = lalsimulation.SimDetectorStrainREAL8TimeSeries(hp, hx,
                                                                           sim_burst.ra, sim_burst.dec, sim_burst.psi, det)
                    # Inject the waveform into the overall timeseries
                    lalsimulation.SimAddInjectionREAL8TimeSeries(h_resp, h_tot, None)

                # Write out the data to the list which will eventually become our frame
                data.append({"name": "%s:%s" % (ifo, channel),
                             "data": h_resp.data.data,
                             "start": float(epoch),
                             "dx": h_resp.deltaT,
                             "kind": "SIM"})

            # Make the directory in which to store the files
            # if it doesn't exist already
            mkdir(frameloc)
            # Write out the frame file
            Fr.frputvect(frameloc+filename, data)