def test_block_chainer(self): with bf.Pipeline() as pipeline: bc = bf.BlockChainer() bc.blocks.read_sigproc([self.fil_file], gulp_nframe=100) bc.blocks.transpose(['freq', 'time', 'pol']) bc.views.split_axis('time', 1) bc.custom(identity_block)() pipeline.run()
def run_benchmark(self): with bf.Pipeline() as pipeline: datafile = "numpy_data0.bin" bc = bf.BlockChainer() bc.blocks.binary_read([datafile], gulp_size=GULP_SIZE, gulp_nframe=GULP_FRAME, dtype='cf32') bc.blocks.copy('cuda', gulp_nframe=GULP_FRAME) for _ in range(NUMBER_FFT): bc.blocks.fft(['gulped'], axis_labels=['ft_gulped'], gulp_nframe=GULP_FRAME_FFT) bc.blocks.fft(['ft_gulped'], axis_labels=['gulped'], inverse=True, gulp_nframe=GULP_FRAME_FFT) start = timer() pipeline.run() end = timer() self.total_clock_time = end - start
import bifrost as bf from argparse import ArgumentParser if __name__ == "__main__": parser = ArgumentParser(description="Command line utility for creating" "spectra from GuppiRaw files.") parser.add_argument('filenames', nargs='+', type=str, help='Names of files to read') parser.add_argument('-f', default=1, dest='f_avg', type=int, help='Number of channels to average together after FFT') parser.add_argument('-N', default=1, dest='n_int', type=int, help='number of integrations per dump') args = parser.parse_args() print("Building pipeline") bc = bf.BlockChainer() bc.blocks.read_guppi_raw(args.filenames, core=0) bc.blocks.copy(space='cuda', core=1) with bf.block_scope(fuse=True, gpu=0): bc.blocks.transpose(['time', 'pol', 'freq', 'fine_time']) bc.blocks.fft(axes='fine_time', axis_labels='fine_freq', apply_fftshift=True) bc.blocks.detect('stokes') bc.views.merge_axes('freq', 'fine_freq') bc.blocks.reduce('freq', args.f_avg) bc.blocks.accumulate(args.n_int) bc.blocks.copy(space='cuda_host', core=2) bc.blocks.write_sigproc(core=3) print("Running pipeline") bf.get_default_pipeline().shutdown_on_signals() bf.get_default_pipeline().run() print("All done")