Example #1
0
def dada_rficlean_dedisperse_fold_pipeline():
    """This function creates the example pipeline,
        and executes it. It prints 'done' when the execution
        has finished."""
    data_ring = Ring()
    clean_ring = Ring()
    histogram = np.zeros(100).astype(np.float)
    datafilename = '/data1/mcranmer/data/real/2016_xaa.dada'
    imagename = '/data1/mcranmer/data/fake/test_picture.png'
    blocks = []
    blocks.append(DadaReadBlock(datafilename, data_ring, gulp_nframe=128))
    #blocks.append(
    #KurtosisBlock(data_ring, clean_ring))
    blocks.append(WaterfallBlock(data_ring, imagename, gulp_nframe=128))
    threads = [threading.Thread(target=block.main) for block in blocks]
    print "Loaded threads"
    for thread in threads:
        thread.daemon = True
        print "Starting thread", thread
        thread.start()
    for thread in threads:
        # wait for thread to terminate
        thread.join()
    # test file has large signal to noise ratio
    print "Done waterfall."
Example #2
0
 def test_add_block(self):
     """Try some syntax on an addition block."""
     my_ring = Ring()
     blocks = []
     blocks.append([TestingBlock([1, 2]), [], [0]])
     blocks.append([TestingBlock([1, 6]), [], [1]])
     blocks.append([TestingBlock([9, 2]), [], [2]])
     blocks.append([TestingBlock([6, 2]), [], [3]])
     blocks.append([TestingBlock([1, 2]), [], [4]])
     blocks.append([
         MultiAddBlock(),
         {'in_1': 0, 'in_2': 1, 'out_sum': 'first_sum'}])
     blocks.append([
         MultiAddBlock(),
         {'in_1': 2, 'in_2': 3, 'out_sum': 'second_sum'}])
     blocks.append([
         MultiAddBlock(),
         {'in_1': 'first_sum', 'in_2': 'second_sum', 'out_sum': 'third_sum'}])
     blocks.append([
         MultiAddBlock(),
         {'in_1': 'third_sum', 'in_2': 4, 'out_sum': my_ring}])
     def assert_result_of_addition(array):
         """Make sure that the above arrays add up to what we expect"""
         np.testing.assert_almost_equal(array, [18, 14])
     blocks.append((NumpyBlock(assert_result_of_addition, outputs=0), {'in_1': my_ring}))
     Pipeline(blocks).main()
Example #3
0
def read_dedisperse_waterfall_pipeline():
    """This function creates the example pipeline,
        and executes it. It prints 'done' when the execution
        has finished."""
    data_ring = Ring()
    histogram = np.zeros(100).astype(np.float)
    datafilename = ['/data1/mcranmer/data/fake/pulsar_DM1_256chan.fil']
    imagename = '/data1/mcranmer/data/fake/test_picture.png'
    blocks = []
    blocks.append(SigprocReadBlock(datafilename, data_ring, gulp_nframe=128))
    blocks.append(WaterfallBlock(data_ring, imagename, gulp_nframe=128))
    blocks.append(
        FoldBlock(data_ring,
                  histogram,
                  gulp_size=4096 * 100,
                  dispersion_measure=1))
    threads = [threading.Thread(target=block.main) for block in blocks]
    print "Loaded threads"
    for thread in threads:
        thread.daemon = True
        print "Starting thread", thread
        thread.start()
    for thread in threads:
        # wait for thread to terminate
        thread.join()
    # test file has large signal to noise ratio
    print "Done waterfall."
    print histogram
Example #4
0
def read_dedisperse_and_fold_pipeline():
    """This function creates the example pipeline,
        and executes it. It prints 'done' when the execution
        has finished."""
    data_ring = Ring()
    histogram = np.zeros(100).astype(np.float)
    filename = ['/data1/mcranmer/data/fake/simple_pulsar_DM10_128ch.fil']
    blocks = []
    blocks.append(SigprocReadBlock(filename, data_ring))
    blocks.append(DedisperseBlock(data_ring))
    blocks.append(
        FoldBlock(data_ring,
                  histogram,
                  gulp_size=4096 * 128 * 100,
                  dispersion_measure=10))
    threads = [threading.Thread(target=block.main) for block in blocks]

    for thread in threads:
        thread.daemon = True
        thread.start()
    for thread in threads:
        # wait for thread to terminate
        thread.join()
    # test file has large signal to noise ratio
    assert np.max(histogram) / np.min(histogram) > 3
Example #5
0
 def __init__(self, blocks):
     self.blocks = blocks
     self.rings = {}
     for index in self.unique_ring_names():
         if isinstance(index, Ring):
             self.rings[str(index)] = index
         else:
             self.rings[index] = Ring()
Example #6
0
 def test_pass_rings(self):
     """Pass rings entirely instead of naming/numerating them"""
     block_set_one = []
     block_set_two = []
     ring1 = Ring()
     block_set_one.append((TestingBlock([1, 2, 3]), [], [ring1]))
     block_set_two.append((WriteAsciiBlock('.log.txt', gulp_size=3 * 4), [ring1], []))
     open('.log.txt', 'w').close()
     Pipeline(block_set_one).main() # The ring should communicate between the pipelines
     Pipeline(block_set_two).main()
     result = np.loadtxt('.log.txt').astype(np.float32)
     np.testing.assert_almost_equal(result, [1, 2, 3])
Example #7
0
def read_and_fold_pipeline_128chan():
    """This function creates a pipeline that reads
        in a sigproc file and executes it. 
        It prints 'done' when the execution has finished."""
    raw_data_ring = Ring()
    histogram = np.zeros(100).astype(np.float)
    filenames = ['/data1/mcranmer/data/fake/simple_pulsar_DM0_128ch.fil']
    blocks = []
    blocks.append(SigprocReadBlock(filenames, raw_data_ring))
    blocks.append(FoldBlock(raw_data_ring, histogram, gulp_size=4096 * 100))
    threads = [threading.Thread(target=block.main) for block in blocks]

    for thread in threads:
        thread.daemon = True
        thread.start()
    for thread in threads:
        # wait for thread to terminate
        thread.join()
    ## Make sure that the histogram is not flat
    assert np.max(histogram) / np.min(histogram) > 3
def main(argv):
    global QUEUE
    
    parser = argparse.ArgumentParser(
                 description="Data recorder for slow/fast visibility data"
                 )
    parser.add_argument('-a', '--address', type=str, default='127.0.0.1',
                        help='IP address to listen to')
    parser.add_argument('-p', '--port', type=int, default=10000,
                        help='UDP port to receive data on')
    parser.add_argument('-o', '--offline', action='store_true',
                        help='run in offline using the specified file to read from')
    parser.add_argument('-c', '--cores', type=str, default='0,1,2,3,4,5',
                        help='comma separated list of cores to bind to')
    parser.add_argument('-g', '--gulp-size', type=int, default=1,
                        help='gulp size for ring buffers')
    parser.add_argument('-l', '--logfile', type=str,
                        help='file to write logging to')
    parser.add_argument('-r', '--record-directory', type=str, default=os.path.abspath('.'),
                        help='directory to save recorded files to')
    parser.add_argument('-t', '--record-directory-quota', type=quota_size, default=0,
                        help='quota for the recording directory, 0 disables the quota')
    parser.add_argument('-q', '--quick', action='store_true',
                        help='run in fast visibiltiy mode')
    parser.add_argument('-i', '--nint-per-file', type=int, default=1,
                        help='number of integrations to write per measurement set')
    parser.add_argument('-n', '--no-tar', action='store_true',
                        help='do not store the measurement sets inside a tar file')
    parser.add_argument('-f', '--fork', action='store_true',
                        help='fork and run in the background')
    args = parser.parse_args()
    
    # Process the -q/--quick option
    station = ovro
    if args.quick:
        args.nint_per_file = max([10, args.nint_per_file])
        station = ovro.select_subset(list(range(1, 48+1)))
        
    # Fork, if requested
    if args.fork:
        stderr = '/tmp/%s_%i.stderr' % (os.path.splitext(os.path.basename(__file__))[0], args.port)
        daemonize(stdin='/dev/null', stdout='/dev/null', stderr=stderr)
        
    # Setup logging
    log = logging.getLogger(__name__)
    logFormat = logging.Formatter('%(asctime)s [%(levelname)-8s] %(message)s',
                                  datefmt='%Y-%m-%d %H:%M:%S')
    logFormat.converter = time.gmtime
    if args.logfile is None:
        logHandler = logging.StreamHandler(sys.stdout)
    else:
        logHandler = LogFileHandler(args.logfile)
    logHandler.setFormatter(logFormat)
    log.addHandler(logHandler)
    log.setLevel(logging.DEBUG)
    
    log.info("Starting %s with PID %i", os.path.basename(__file__), os.getpid())
    log.info("Cmdline args:")
    for arg in vars(args):
        log.info("  %s: %s", arg, getattr(args, arg))
        
    # Setup the subsystem ID
    mcs_id = 'drv'
    if args.quick:
        mcs_id += 'f'
    else:
        mcs_id += 's'
    base_ip = int(args.address.split('.')[-1], 10)
    base_port = args.port % 100
    mcs_id += str(base_ip*100 + base_port)
    
    # Setup the cores and GPUs to use
    cores = [int(v, 10) for v in args.cores.split(',')]
    log.info("CPUs:         %s", ' '.join([str(v) for v in cores]))
    
    # Setup the socket, if needed
    isock = None
    if not args.offline:
        iaddr = Address(args.address, args.port)
        isock = UDPSocket()
        isock.bind(iaddr)
        
    # Setup the rings
    capture_ring = Ring(name="capture")
    
    # Setup antennas
    nant = len(station.antennas)
    nbl = nant*(nant+1)//2
    
    # Setup the recording directory, if needed
    if not os.path.exists(args.record_directory):
        status = os.system('mkdir -p %s' % args.record_directory)
        if status != 0:
            raise RuntimeError("Unable to create directory: %s" % args.record_directory)
    else:
        if not os.path.isdir(os.path.realpath(args.record_directory)):
            raise RuntimeError("Cannot record to a non-directory: %s" % args.record_directory)
            
    # Setup the blocks
    ops = []
    if args.offline:
        ops.append(DummyOp(log, isock, capture_ring, (NPIPELINE//16)*nbl,
                           ntime_gulp=args.gulp_size, slot_ntime=(10 if args.quick else 6),
                           fast=args.quick, core=cores.pop(0)))
    else:
        ops.append(CaptureOp(log, isock, capture_ring, (NPIPELINE//16)*nbl,   # two pipelines/recorder
                             ntime_gulp=args.gulp_size, slot_ntime=(10 if args.quick else 6),
                             fast=args.quick, core=cores.pop(0)))
    if not args.quick:
        ops.append(SpectraOp(log, mcs_id, capture_ring,
                             ntime_gulp=args.gulp_size, core=cores.pop(0)))
        ops.append(BaselineOp(log, mcs_id, station, capture_ring,
                              ntime_gulp=args.gulp_size, core=cores.pop(0)))
    ops.append(StatisticsOp(log, mcs_id, capture_ring,
                            ntime_gulp=args.gulp_size, core=cores.pop(0)))
    ops.append(WriterOp(log, station, capture_ring,
                        ntime_gulp=args.gulp_size, fast=args.quick, core=cores.pop(0)))
    ops.append(GlobalLogger(log, mcs_id, args, QUEUE, quota=args.record_directory_quota))
    ops.append(VisibilityCommandProcessor(log, mcs_id, args.record_directory, QUEUE,
                                          nint_per_file=args.nint_per_file,
                                          is_tarred=not args.no_tar))
    
    # Setup the threads
    threads = [threading.Thread(target=op.main) for op in ops]
    
    # Setup signal handling
    shutdown_event = setup_signal_handling(ops)
    ops[0].shutdown_event = shutdown_event
    ops[-2].shutdown_event = shutdown_event
    ops[-1].shutdown_event = shutdown_event
    
    # Launch!
    log.info("Launching %i thread(s)", len(threads))
    for thread in threads:
        #thread.daemon = True
        thread.start()
        
    while not shutdown_event.is_set():
        signal.pause()
    log.info("Shutdown, waiting for threads to join")
    for thread in threads:
        thread.join()
    log.info("All done")
    return 0
Example #9
0
def main(argv):
    parser = argparse.ArgumentParser(
        description="Data recorder for power beams")
    parser.add_argument('-a',
                        '--address',
                        type=str,
                        default='127.0.0.1',
                        help='IP address to listen to')
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=10000,
                        help='UDP port to receive data on')
    parser.add_argument(
        '-o',
        '--offline',
        action='store_true',
        help='run in offline using the specified file to read from')
    parser.add_argument('-b',
                        '--beam',
                        type=int,
                        default=1,
                        help='beam to receive data for')
    parser.add_argument('-c',
                        '--cores',
                        type=str,
                        default='0,1,2,3',
                        help='comma separated list of cores to bind to')
    parser.add_argument('-g',
                        '--gulp-size',
                        type=int,
                        default=1000,
                        help='gulp size for ring buffers')
    parser.add_argument('-l',
                        '--logfile',
                        type=str,
                        help='file to write logging to')
    parser.add_argument('-r',
                        '--record-directory',
                        type=str,
                        default=os.path.abspath('.'),
                        help='directory to save recorded files to')
    parser.add_argument(
        '-q',
        '--record-directory-quota',
        type=quota_size,
        default=0,
        help='quota for the recording directory, 0 disables the quota')
    parser.add_argument('-f',
                        '--fork',
                        action='store_true',
                        help='fork and run in the background')
    args = parser.parse_args()

    # Fork, if requested
    if args.fork:
        stderr = '/tmp/%s_%i.stderr' % (os.path.splitext(
            os.path.basename(__file__))[0], tuning)
        daemonize(stdin='/dev/null', stdout='/dev/null', stderr=stderr)

    # Setup logging
    log = logging.getLogger(__name__)
    logFormat = logging.Formatter('%(asctime)s [%(levelname)-8s] %(message)s',
                                  datefmt='%Y-%m-%d %H:%M:%S')
    logFormat.converter = time.gmtime
    if args.logfile is None:
        logHandler = logging.StreamHandler(sys.stdout)
    else:
        logHandler = LogFileHandler(args.logfile)
    logHandler.setFormatter(logFormat)
    log.addHandler(logHandler)
    log.setLevel(logging.DEBUG)

    log.info("Starting %s with PID %i", os.path.basename(__file__),
             os.getpid())
    log.info("Cmdline args:")
    for arg in vars(args):
        log.info("  %s: %s", arg, getattr(args, arg))

    # Setup the subsystem ID
    mcs_id = 'dr%i' % args.beam

    # Setup the cores and GPUs to use
    cores = [int(v, 10) for v in args.cores.split(',')]
    log.info("CPUs:         %s", ' '.join([str(v) for v in cores]))

    # Setup the socket, if needed
    isock = None
    if not args.offline:
        iaddr = Address(args.address, args.port)
        isock = UDPSocket()
        isock.bind(iaddr)

    # Setup the rings
    capture_ring = Ring(name="capture")
    write_ring = Ring(name="write")

    # Setup the recording directory, if needed
    if not os.path.exists(args.record_directory):
        status = os.system('mkdir -p %s' % args.record_directory)
        if status != 0:
            raise RuntimeError("Unable to create directory: %s" %
                               args.record_directory)
    else:
        if not os.path.isdir(os.path.realpath(args.record_directory)):
            raise RuntimeError("Cannot record to a non-directory: %s" %
                               args.record_directory)

    # Setup the blocks
    ops = []
    if args.offline:
        ops.append(
            DummyOp(log,
                    isock,
                    capture_ring,
                    NPIPELINE,
                    ntime_gulp=args.gulp_size,
                    slot_ntime=1000,
                    core=cores.pop(0)))
    else:
        ops.append(
            CaptureOp(log,
                      isock,
                      capture_ring,
                      NPIPELINE,
                      ntime_gulp=args.gulp_size,
                      slot_ntime=1000,
                      core=cores.pop(0)))
    ops.append(
        SpectraOp(log,
                  mcs_id,
                  capture_ring,
                  ntime_gulp=args.gulp_size,
                  core=cores.pop(0)))
    ops.append(
        StatisticsOp(log,
                     mcs_id,
                     capture_ring,
                     ntime_gulp=args.gulp_size,
                     core=cores.pop(0)))
    ops.append(
        WriterOp(log,
                 capture_ring,
                 beam=args.beam,
                 ntime_gulp=args.gulp_size,
                 core=cores.pop(0)))
    ops.append(
        GlobalLogger(log,
                     mcs_id,
                     args,
                     QUEUE,
                     quota=args.record_directory_quota))
    ops.append(
        PowerBeamCommandProcessor(log, mcs_id, args.record_directory, QUEUE))

    # Setup the threads
    threads = [threading.Thread(target=op.main) for op in ops]

    # Setup signal handling
    shutdown_event = setup_signal_handling(ops)
    ops[0].shutdown_event = shutdown_event
    ops[-2].shutdown_event = shutdown_event
    ops[-1].shutdown_event = shutdown_event

    # Launch!
    log.info("Launching %i thread(s)", len(threads))
    for thread in threads:
        #thread.daemon = True
        thread.start()

    while not shutdown_event.is_set():
        signal.pause()
    log.info("Shutdown, waiting for threads to join")
    for thread in threads:
        thread.join()
    log.info("All done")
    return 0