Пример #1
0
 def _main(self):
     """Sets core, and calls main"""
     affinity.set_core(-1)
     for ring_name in self.ring_names:
         if ring_name not in self.header:
             self.header[ring_name] = {}
     self.main()
Пример #2
0
 def run(self):
     #affinity.set_openmp_cores(cpus) # TODO
     core = self.core
     if core is not None:
         affinity.set_core(core if isinstance(core, int) else core[0])
     self.bind_proclog.update({'ncore': 1, 'core0': affinity.get_core()})
     if self.gpu is not None:
         device.set_device(self.gpu)
     self.cache_scope_hierarchy()
     with ExitStack() as oring_stack:
         active_orings = self.begin_writing(oring_stack, self.orings)
         try:
             self.main(active_orings)
         except Exception:
             self.pipeline.block_init_queue.put((self, False))
             sys.stderr.write("From block instantiated here:\n")
             sys.stderr.write(self.init_trace)
             raise
 def main(self):
     global QUEUE
     
     if self.core is not None:
         cpu_affinity.set_core(self.core)
     self.bind_proclog.update({'ncore': 1, 
                               'core0': cpu_affinity.get_core(),})
     
     for iseq in self.iring.read(guarantee=self.guarantee):
         ihdr = json.loads(iseq.header.tostring())
         
         self.sequence_proclog.update(ihdr)
         
         self.log.info("Writer: Start of new sequence: %s", str(ihdr))
         
         # Setup the ring metadata and gulp sizes
         time_tag = ihdr['time_tag']
         navg     = ihdr['navg']
         nbl      = ihdr['nbl']
         chan0    = ihdr['chan0']
         nchan    = ihdr['nchan']
         chan_bw  = ihdr['bw'] / nchan
         npol     = ihdr['npol']
         pols     = ['XX','XY','YX','YY']
         
         igulp_size = self.ntime_gulp*nbl*nchan*npol*8        # ci32
         ishape = (self.ntime_gulp,nbl,nchan,npol)
         self.iring.resize(igulp_size, 10*igulp_size*(4 if self.fast else 1))
         
         norm_factor = navg // (2*NCHAN)
         
         first_gulp = True
         was_active = False
         prev_time = time.time()
         iseq_spans = iseq.read(igulp_size)
         for ispan in iseq_spans:
             if ispan.size < igulp_size:
                 continue # Ignore final gulp
             curr_time = time.time()
             acquire_time = curr_time - prev_time
             prev_time = curr_time
             
             ## On our first span, update the pipeline lag for the queue
             ## so that we start recording at the right times
             if first_gulp:
                 QUEUE.update_lag(LWATime(time_tag, format='timetag').datetime)
                 self.log.info("Current pipeline lag is %s", QUEUE.lag)
                 first_gulp = False
                 
             ## Setup and load
             idata = ispan.data_view('ci32').reshape(ishape)
             idata = idata.view(numpy.int32)
             idata = idata.reshape(ishape+(2,))
             idata = idata[...,0] + 1j*idata[...,1]
             idata /= norm_factor
             idata = idata.astype(numpy.complex64)
             
             ## Determine what to do
             if QUEUE.active is not None:
                 ### Recording active - write
                 if not QUEUE.active.is_started:
                     self.log.info("Started operation - %s", QUEUE.active)
                     QUEUE.active.start(self.station, chan0, navg, nchan, chan_bw, npol, pols)
                     was_active = True
                 QUEUE.active.write(time_tag, idata)
             elif was_active:
                 ### Recording just finished
                 #### Clean
                 was_active = False
                 QUEUE.clean()
                 
                 #### Close
                 self.log.info("Ended operation - %s", QUEUE.previous)
                 QUEUE.previous.stop()
                 
             time_tag += navg
             
             curr_time = time.time()
             process_time = curr_time - prev_time
             prev_time = curr_time
             self.perf_proclog.update({'acquire_time': acquire_time, 
                                       'reserve_time': -1, 
                                       'process_time': process_time,})
             
     self.log.info("WriterOp - Done")
 def main(self):
     if self.core is not None:
         cpu_affinity.set_core(self.core)
     self.bind_proclog.update({'ncore': 1, 
                               'core0': cpu_affinity.get_core(),})
     
     for iseq in self.iring.read(guarantee=self.guarantee):
         ihdr = json.loads(iseq.header.tostring())
         
         self.sequence_proclog.update(ihdr)
         
         self.log.info("Statistics: Start of new sequence: %s", str(ihdr))
         
         # Setup the ring metadata and gulp sizes
         time_tag = ihdr['time_tag']
         navg     = ihdr['navg']
         nbl      = ihdr['nbl']
         nstand   = ihdr['nstand']
         chan0    = ihdr['chan0']
         nchan    = ihdr['nchan']
         chan_bw  = ihdr['bw'] / nchan
         npol     = ihdr['npol']
         
         igulp_size = self.ntime_gulp*nbl*nchan*npol*8        # ci32
         ishape = (self.ntime_gulp,nbl,nchan,npol)
         
         autos = [i*(2*(nstand-1)+1-i)//2 + i for i in range(nstand)]
         data_pols = ['XX', 'YY']
         last_save = 0.0
         
         prev_time = time.time()
         iseq_spans = iseq.read(igulp_size)
         for ispan in iseq_spans:
             if ispan.size < igulp_size:
                 continue # Ignore final gulp
             curr_time = time.time()
             acquire_time = curr_time - prev_time
             prev_time = curr_time
             
             ## Setup and load
             idata = ispan.data_view('ci32').reshape(ishape)
             
             if time.time() - last_save > 60:
                 ## Timestamp
                 tt = LWATime(time_tag, format='timetag')
                 ts = tt.unix
                 
                 ## Pull out the auto-correlations
                 adata = idata.view(numpy.int32)
                 adata = adata.reshape(ishape+(2,))
                 adata = adata[0,autos,:,:,0]
                 adata = adata[:,:,[0,3]]
                 
                 ## Run the statistics over all times/channels
                 ##  * only really works for ntime_gulp=1
                 data_min = numpy.min(adata, axis=1)
                 data_max = numpy.max(adata, axis=1)
                 data_avg = numpy.mean(adata, axis=1)
                 
                 ## Save
                 for data,name in zip((data_min,data_avg,data_max), ('min','avg','max')):
                     value = MultiMonitorPoint([data[:,i].tolist() for i in range(data.shape[1])],
                                               timestamp=ts, field=data_pols)
                     self.client.write_monitor_point('statistics/%s' % name, value)
                     
                 last_save = time.time()
                 
             time_tag += navg * self.ntime_gulp
             
             curr_time = time.time()
             process_time = curr_time - prev_time
             prev_time = curr_time
             self.perf_proclog.update({'acquire_time': acquire_time, 
                                       'reserve_time': -1, 
                                       'process_time': process_time,})
             
     self.log.info("StatisticsOp - Done")
 def main(self):
     cpu_affinity.set_core(self.core)
     self.bind_proclog.update({'ncore': 1, 
                               'core0': cpu_affinity.get_core(),})
     
     for iseq in self.iring.read(guarantee=self.guarantee):
         ihdr = json.loads(iseq.header.tostring())
         
         self.sequence_proclog.update(ihdr)
         
         self.log.info("Baseline: Start of new sequence: %s", str(ihdr))
         
         # Setup the ring metadata and gulp sizes
         time_tag = ihdr['time_tag']
         navg     = ihdr['navg']
         nbl      = ihdr['nbl']
         nstand   = ihdr['nstand']
         chan0    = ihdr['chan0']
         nchan    = ihdr['nchan']
         chan_bw  = ihdr['bw'] / nchan
         npol     = ihdr['npol']
         
         igulp_size = self.ntime_gulp*nbl*nchan*npol*8
         ishape = (self.ntime_gulp,nbl,nchan,npol)
         self.iring.resize(igulp_size)
         
         # Setup the arrays for the frequencies and baseline lenghts
         freq = chan0*chan_bw + numpy.arange(nchan)*chan_bw
         uvw = get_zenith_uvw(self.station, LWATime(time_tag, format='timetag'))
         uvw[:,2] = 0
         dist = numpy.sqrt((uvw**2).sum(axis=1))
         valid = numpy.where(dist > 0.1)[0]
         last_save = 0.0
         
         prev_time = time.time()
         for ispan in iseq.read(igulp_size):
             if ispan.size < igulp_size:
                 continue # Ignore final gulp
             curr_time = time.time()
             acquire_time = curr_time - prev_time
             prev_time = curr_time
             
             ## Setup and load
             idata = ispan.data_view('ci32').reshape(ishape)
             
             if time.time() - last_save > 60:
                 ## Timestamp
                 tt = LWATime(time_tag, format='timetag')
                 ts = tt.unix
                 
                 ## Plot
                 bdata = idata[0,...]
                 bdata = bdata.view(numpy.int32)
                 bdata = bdata.reshape(ishape+(2,))
                 bdata = bdata[0,:,:,:,0] + 1j*bdata[0,:,:,:,1]
                 bdata = bdata.astype(numpy.complex64)
                 im = self._plot_baselines(time_tag, freq, dist, bdata, valid)
                 
                 ## Save
                 mp = ImageMonitorPoint.from_image(im)
                 self.client.write_monitor_point('diagnostics/baselines',
                                                 mp, timestamp=ts)
                 
                 last_save = time.time()
                 
             time_tag += navg * self.ntime_gulp
             
             curr_time = time.time()
             process_time = curr_time - prev_time
             prev_time = curr_time
             self.perf_proclog.update({'acquire_time': acquire_time, 
                                       'reserve_time': 0.0, 
                                       'process_time': process_time,})
             
     self.log.info("BaselineOp - Done")
 def main(self):
     cpu_affinity.set_core(self.core)
     self.bind_proclog.update({'ncore': 1, 
                               'core0': cpu_affinity.get_core(),})
     
     for iseq in self.iring.read(guarantee=self.guarantee):
         ihdr = json.loads(iseq.header.tostring())
         
         self.sequence_proclog.update(ihdr)
         
         self.log.info("Spectra: Start of new sequence: %s", str(ihdr))
         
         # Setup the ring metadata and gulp sizes
         time_tag = ihdr['time_tag']
         navg     = ihdr['navg']
         nbl      = ihdr['nbl']
         nstand   = ihdr['nstand']
         chan0    = ihdr['chan0']
         nchan    = ihdr['nchan']
         chan_bw  = ihdr['bw'] / nchan
         npol     = ihdr['npol']
         
         igulp_size = self.ntime_gulp*nbl*nchan*npol*8   # ci32
         ishape = (self.ntime_gulp,nbl,nchan,npol)
         
         # Setup the arrays for the frequencies and auto-correlations
         freq = chan0*chan_bw + numpy.arange(nchan)*chan_bw
         autos = [i*(2*(nstand-1)+1-i)//2 + i for i in range(nstand)]
         last_save = 0.0
         
         prev_time = time.time()
         for ispan in iseq.read(igulp_size):
             if ispan.size < igulp_size:
                 continue # Ignore final gulp
             curr_time = time.time()
             acquire_time = curr_time - prev_time
             prev_time = curr_time
             
             ## Setup and load
             idata = ispan.data_view('ci32').reshape(ishape)
             
             if time.time() - last_save > 60:
                 ## Timestamp
                 tt = LWATime(time_tag, format='timetag')
                 ts = tt.unix
                 
                 ## Pull out the auto-correlations
                 adata = idata.view(numpy.int32)
                 adata = adata.reshape(ishape+(2,))
                 adata = adata[0,autos,:,:,0]
                 adata = adata[:,:,[0,3]]
                 
                 ## Plot
                 im = self._plot_spectra(time_tag, freq, 10*numpy.log10(adata))
                 
                 ## Save
                 mp = ImageMonitorPoint.from_image(im)
                 self.client.write_monitor_point('diagnostics/spectra',
                                                 mp, timestamp=ts)
                 
                 last_save = time.time()
                 
             time_tag += navg * self.ntime_gulp
             
             curr_time = time.time()
             process_time = curr_time - prev_time
             prev_time = curr_time
             self.perf_proclog.update({'acquire_time': acquire_time, 
                                       'reserve_time': 0.0, 
                                       'process_time': process_time,})
             
     self.log.info("SpectraOp - Done")
Пример #7
0
 def main(self):
     """Initiate the block's processing"""
     affinity.set_core(self.core)
     waterfall_matrix = self.generate_waterfall_matrix()
     self.save_waterfall_plot(waterfall_matrix)
Пример #8
0
 def main(self):
     """Initiate the block's processing"""
     affinity.set_core(self.core)
     self.dedisperse()
Пример #9
0
    def main(self):
        global QUEUE

        if self.core is not None:
            cpu_affinity.set_core(self.core)
        self.bind_proclog.update({
            'ncore': 1,
            'core0': cpu_affinity.get_core(),
        })

        for iseq in self.iring.read(guarantee=self.guarantee):
            ihdr = json.loads(iseq.header.tostring())

            self.sequence_proclog.update(ihdr)

            self.log.info("Writer: Start of new sequence: %s", str(ihdr))

            # Setup the ring metadata and gulp sizes
            time_tag = ihdr['time_tag']
            navg = ihdr['navg']
            nbeam = ihdr['nbeam']
            chan0 = ihdr['chan0']
            nchan = ihdr['nchan']
            chan_bw = ihdr['bw'] / nchan
            npol = ihdr['npol']
            pols = ihdr['pols']
            pols = pols.replace('CR', 'XY_real')
            pols = pols.replace('CI', 'XY_imag')

            igulp_size = self.ntime_gulp * nbeam * nchan * npol * 4  # float32
            ishape = (self.ntime_gulp, nbeam, nchan, npol)
            self.iring.resize(igulp_size, 10 * igulp_size)

            first_gulp = True
            was_active = False
            prev_time = time.time()
            iseq_spans = iseq.read(igulp_size)
            for ispan in iseq_spans:
                if ispan.size < igulp_size:
                    continue  # Ignore final gulp
                curr_time = time.time()
                acquire_time = curr_time - prev_time
                prev_time = curr_time

                ## On our first span, update the pipeline lag for the queue
                ## so that we start recording at the right times
                if first_gulp:
                    QUEUE.update_lag(
                        LWATime(time_tag, format='timetag').datetime)
                    self.log.info("Current pipeline lag is %s", QUEUE.lag)
                    first_gulp = False

                ## Setup and load
                idata = ispan.data_view(numpy.float32).reshape(ishape)

                ## Determine what to do
                if QUEUE.active is not None:
                    ### Recording active - write
                    if not QUEUE.active.is_started:
                        self.log.info("Started operation - %s", QUEUE.active)
                        QUEUE.active.start(self.beam, chan0, navg, nchan,
                                           chan_bw, npol, pols)
                        was_active = True
                    QUEUE.active.write(time_tag, idata)
                elif was_active:
                    ### Recording just finished - clean
                    #### Clean
                    was_active = False
                    QUEUE.clean()

                    #### Close
                    self.log.info("Ended operation - %s", QUEUE.previous)
                    QUEUE.previous.stop()

                time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))

                curr_time = time.time()
                process_time = curr_time - prev_time
                prev_time = curr_time
                self.perf_proclog.update({
                    'acquire_time': acquire_time,
                    'reserve_time': -1,
                    'process_time': process_time,
                })

        self.log.info("WriterOp - Done")
Пример #10
0
    def main(self):
        if self.core is not None:
            cpu_affinity.set_core(self.core)
        self.bind_proclog.update({
            'ncore': 1,
            'core0': cpu_affinity.get_core(),
        })

        for iseq in self.iring.read(guarantee=self.guarantee):
            ihdr = json.loads(iseq.header.tostring())

            self.sequence_proclog.update(ihdr)

            self.log.info("Statistics: Start of new sequence: %s", str(ihdr))

            # Setup the ring metadata and gulp sizes
            time_tag = ihdr['time_tag']
            navg = ihdr['navg']
            nbeam = ihdr['nbeam']
            chan0 = ihdr['chan0']
            nchan = ihdr['nchan']
            chan_bw = ihdr['bw'] / nchan
            npol = ihdr['npol']
            pols = ihdr['pols']

            igulp_size = self.ntime_gulp * nbeam * nchan * npol * 4  # float32
            ishape = (self.ntime_gulp, nbeam, nchan, npol)

            data_pols = pols.split(',')
            last_save = 0.0

            prev_time = time.time()
            iseq_spans = iseq.read(igulp_size)
            for ispan in iseq_spans:
                if ispan.size < igulp_size:
                    continue  # Ignore final gulp
                curr_time = time.time()
                acquire_time = curr_time - prev_time
                prev_time = curr_time

                ## Setup and load
                idata = ispan.data_view(numpy.float32).reshape(ishape)
                idata = idata.reshape(-1, npol)

                if time.time() - last_save > 60:
                    ## Timestamp
                    tt = LWATime(time_tag, format='timetag')
                    ts = tt.unix

                    ## Run the statistics over all times/channels
                    ##  * only really works for nbeam=1
                    data_min = numpy.min(idata, axis=0)
                    data_max = numpy.max(idata, axis=0)
                    data_avg = numpy.mean(idata, axis=0)

                    ## Save
                    for data, name in zip((data_min, data_avg, data_max),
                                          ('min', 'avg', 'max')):
                        value = MultiMonitorPoint(data.tolist(),
                                                  timestamp=ts,
                                                  field=data_pols)
                        self.client.write_monitor_point(
                            'statistics/%s' % name, value)

                    last_save = time.time()

                time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))

                curr_time = time.time()
                process_time = curr_time - prev_time
                prev_time = curr_time
                self.perf_proclog.update({
                    'acquire_time': acquire_time,
                    'reserve_time': -1,
                    'process_time': process_time,
                })

        self.log.info("StatisticsOp - Done")
Пример #11
0
    def main(self):
        if self.core is not None:
            cpu_affinity.set_core(self.core)
        self.bind_proclog.update({
            'ncore': 1,
            'core0': cpu_affinity.get_core(),
        })

        # Setup the figure
        ## Import
        import matplotlib
        matplotlib.use('Agg')
        from matplotlib import pyplot as plt
        from matplotlib.ticker import MultipleLocator

        ## Create
        fig = plt.Figure(figsize=(6, 6))
        ax = fig.gca()

        for iseq in self.iring.read(guarantee=self.guarantee):
            ihdr = json.loads(iseq.header.tostring())

            self.sequence_proclog.update(ihdr)

            self.log.info("Spectra: Start of new sequence: %s", str(ihdr))

            # Setup the ring metadata and gulp sizes
            time_tag = ihdr['time_tag']
            navg = ihdr['navg']
            nbeam = ihdr['nbeam']
            chan0 = ihdr['chan0']
            nchan = ihdr['nchan']
            chan_bw = ihdr['bw'] / nchan
            npol = ihdr['npol']
            pols = ihdr['pols']

            igulp_size = self.ntime_gulp * nbeam * nchan * npol * 4  # float32
            ishape = (self.ntime_gulp, nbeam, nchan, npol)

            nchan_pipeline = nchan // NPIPELINE
            frange = (numpy.arange(nchan) + chan0) * CHAN_BW
            last_save = 0.0

            prev_time = time.time()
            iseq_spans = iseq.read(igulp_size)
            for ispan in iseq_spans:
                if ispan.size < igulp_size:
                    continue  # Ignore final gulp
                curr_time = time.time()
                acquire_time = curr_time - prev_time
                prev_time = curr_time

                ## Setup and load
                idata = ispan.data_view(numpy.float32).reshape(ishape)

                if time.time() - last_save > 60:
                    ## Timestamp
                    tt = LWATime(time_tag, format='timetag')
                    ts = tt.datetime.strftime('%y%m%d %H:%M:%S')

                    ## Average over time
                    sdata = idata.mean(axis=0)

                    ## Create a diagnostic plot after suming the flags across polarization
                    ax.cla()
                    ax.plot(frange / 1e6,
                            numpy.log10(sdata[0, :, 0]) * 10,
                            color='#1F77B4')
                    ax.plot(frange / 1e6,
                            numpy.log10(sdata[0, :, 1]) * 10,
                            color='#FF7F0E')
                    ylim = ax.get_ylim()
                    for b in range(1, NPIPELINE):
                        linestyle = ':'
                        if b % 4 == 0:
                            linestyle = '--'
                        ax.vlines(frange[b * nchan_pipeline] / 1e6,
                                  *ylim,
                                  linestyle=linestyle,
                                  color='black',
                                  alpha=0.2)
                    ax.set_ylim(ylim)
                    ax.set_xlim((frange[0] / 1e6, frange[-1] / 1e6))
                    ax.set_xlabel('Frequency [MHz]')
                    ax.set_ylabel('Power [arb. dB]')
                    ax.xaxis.set_major_locator(MultipleLocator(base=10.0))
                    fig.tight_layout()

                    ## Save
                    tt = LWATime(time_tag, format='timetag')
                    mp = ImageMonitorPoint.from_figure(fig)
                    self.client.write_monitor_point('diagnostics/spectra',
                                                    mp,
                                                    timestamp=tt.unix)

                    last_save = time.time()

                time_tag += navg * self.ntime_gulp * (int(FS) // int(CHAN_BW))

                curr_time = time.time()
                process_time = curr_time - prev_time
                prev_time = curr_time
                self.perf_proclog.update({
                    'acquire_time': acquire_time,
                    'reserve_time': -1,
                    'process_time': process_time,
                })

        self.log.info("SpectraOp - Done")