コード例 #1
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_copy_stream(self, stream_name, tmpdir):
        stream = getattr(self, stream_name)
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename, 'w', template=stream) as f5w:
            if stream_name == 'fh':
                assert f5w.bps == 2
            else:
                assert not hasattr(f5w, 'bps')
            self.check(stream, f5w)
            header0 = f5w.header0
            self.check(stream, header0)
            f5w.write(self.data)
            # Check repr works, though ignore the contents for now.
            repr(f5w)

        with h5py.File(filename, 'r') as h5:
            assert set(h5.keys()) == {'header', 'payload'}
            header = hdf5.HDF5Header.fromfile(h5)
            self.check(stream, header)
            assert header == header0
            payload = hdf5.HDF5Payload.fromfile(h5, header)
            assert_array_equal(payload.data, self.data)
            assert_array_equal(payload[:], self.data)

        with hdf5.open(filename, 'r') as f5r:
            self.check(stream, f5r)
            assert f5r.header0 == header0
            data = f5r.read()
            assert_array_equal(data, self.data)
            # Check repr works, though ignore the contents for now.
            repr(f5r)

        # Should also work when closed.
        repr(f5w)
        repr(f5r)
コード例 #2
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_stream_as_output(self, stream_name, tmpdir):
        stream = getattr(self, stream_name)
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename, 'w', template=stream) as f5w:
            stream.seek(0)
            stream.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(stream, f5r)
            data = f5r.read()
            assert_array_equal(data, self.data)
コード例 #3
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_stream_as_input(self, tmpdir):
        # This is not perfect, since unless one gives a template, a writer
        # tries to unsqueeze data by default, which the wrapper cannot handle.
        # But as a proof of principle it works.  TODO: improve!
        stream = self.wrapped
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename, 'w', template=stream) as f5w:
            f5w.write(stream)

        with hdf5.open(filename, 'r') as f5r:
            self.check(stream, f5r)
            data = f5r.read()
            assert_array_equal(data, self.data)
コード例 #4
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_stream_shape_override(self, tmpdir):
        filename = str(tmpdir.join('doubled.hdf5'))
        shape = (self.wrapped.shape[0] * 2, ) + self.wrapped.shape[1:]
        with hdf5.open(filename, 'w', template=self.wrapped,
                       shape=shape) as f5w:
            assert f5w.shape == shape
            assert f5w.samples_per_frame == shape[0]
            self.check(self.wrapped, f5w, exclude=('shape', ))
            f5w.write(self.data)
            f5w.write(self.data)

        with hdf5.open(filename, 'r') as f5r:
            assert f5r.shape == shape
            for i in range(2):
                data = f5r.read(self.wrapped.shape[0])
                assert_array_equal(data, self.data)
コード例 #5
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_complex_baseband(self, tmpdir):
        filename = str(tmpdir.join('copy.hdf5'))
        with baseband.vdif.open(baseband.data.SAMPLE_AROCHIME_VDIF,
                                'rs',
                                sample_rate=800 * u.MHz / 2048) as fh:
            data = fh.read()
            fh.seek(0)
            with hdf5.open(filename, 'w', template=fh) as f5w:
                assert f5w.complex_data
                fh.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(fh, f5r)
            recovered = f5r.read()

        assert_array_equal(recovered, data)
コード例 #6
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_copy_stream_copy(self, stream_name, tmpdir):
        # Check that we can copy ourselves and not mess up depending
        # on raw vs encoded data.
        stream = getattr(self, stream_name)
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename, 'w', template=stream) as f5w:
            f5w.write(self.data)

        copyname = str(tmpdir.join('copycopy.hdf5'))
        with hdf5.open(filename, 'r') as f5r:
            with hdf5.open(copyname, 'w', template=f5r) as f5w:
                if stream_name == 'fh':
                    assert f5w.bps == 2
                else:
                    assert not hasattr(f5w, 'bps')
                self.check(stream, f5w)
                header0 = f5w.header0
                self.check(stream, header0)
                f5w.write(self.data)
コード例 #7
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_complex_stream_as_i1(self, tmpdir):
        # Not a particularly good idea, but just to show it is possible.
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename,
                       'w',
                       template=self.wrapped,
                       encoded_dtype='i1') as f5w:
            assert not f5w.complex_data
            assert f5w.header0.encoded_dtype == 'i1'
            self.wrapped.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(self.wrapped, f5r,
                       ('sample_shape', 'dtype', 'sample_rate', 'time'))
            assert f5r.header0.encoded_dtype == 'i1'
            recovered = f5r.read()

        # Will not recover correctly, given the use of int, but should be
        # within tolerance.
        assert np.allclose(recovered, self.data, atol=0.5)
コード例 #8
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_stream_as_f2(self, tmpdir):
        stream = self.wrapped
        filename = str(tmpdir.join('copy.hdf5'))
        with hdf5.open(filename, 'w', template=stream,
                       encoded_dtype='<f2') as f5w:
            assert f5w.header0.encoded_dtype == '<f2'
            assert f5w.header0.dtype == '=f4'
            assert not f5w.complex_data
            stream.seek(0)
            stream.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(f5w, f5r)
            assert f5r.header0.encoded_dtype == '<f2'
            assert f5r.dtype == '=f4'
            data = f5r.read()
            assert np.allclose(data,
                               self.data,
                               atol=0,
                               rtol=np.finfo('f2').eps)
コード例 #9
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_complex_stream(self, tmpdir):
        filename = str(tmpdir.join('copy.hdf5'))
        with baseband.vdif.open(baseband.data.SAMPLE_AROCHIME_VDIF,
                                'rs',
                                sample_rate=800 * u.MHz / 2048) as fh:
            wrapped = SetAttribute(fh)
            data = wrapped.read()
            wrapped.seek(0)
            with hdf5.open(filename, 'w', template=wrapped) as f5w:
                assert f5w.complex_data
                assert f5w.header0.encoded_dtype == 'c8'
                wrapped.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(wrapped, f5r,
                       ('sample_shape', 'dtype', 'sample_rate', 'time'))
            assert f5r.header0.encoded_dtype == 'c8'
            recovered = f5r.read()

        # Cannot recover exactly, given scaling, but should be within
        # tolerance for float16.
        assert_array_equal(recovered, data)
コード例 #10
0
ファイル: test_hdf5.py プロジェクト: rikvl/baseband-tasks
    def test_complex_stream_as_c4(self, tmpdir):
        filename = str(tmpdir.join('copy.hdf5'))
        with baseband.vdif.open(baseband.data.SAMPLE_AROCHIME_VDIF,
                                'rs',
                                sample_rate=800 * u.MHz / 2048) as fh:
            wrapped = SetAttribute(fh)
            data = wrapped.read()
            wrapped.seek(0)
            with hdf5.open(filename, 'w', template=wrapped,
                           encoded_dtype='c4') as f5w:
                assert f5w.complex_data
                assert f5w.header0.encoded_dtype == hdf5.payload.DTYPE_C4
                wrapped.read(out=f5w)

        with hdf5.open(filename, 'r') as f5r:
            self.check(wrapped, f5r,
                       ('sample_shape', 'dtype', 'sample_rate', 'time'))
            assert f5r.header0.encoded_dtype == hdf5.payload.DTYPE_C4
            recovered = f5r.read()

        # Cannot recover exactly, given scaling, but should be within
        # tolerance for float16.
        assert np.allclose(recovered, data, atol=0, rtol=np.finfo('f2').eps)
コード例 #11
0
# Rounding Time
start = Time(rh.time)
start_time_str = start.iso.__str__()
new_time = Time(start_time_str, precision = -1)
new_time_str = new_time.iso.__str__()
start_time = Time(new_time_str) + dt
print("Opened stream reader with sample shape:", rh.sample_shape)
print("Starting at time:", start_time)

# Initial waterfall interpretor
WF = sr.Fold(rh, dispersion_measure, frequency, sideband, polyco_file, polarization, fullpol,start=start_time, nthreads=1)
print("Initialized waterfall interpretor with shape:", WF.integrator.shape)

# EXPERIMENTAL: Create stream writer.
h5w = hdf5.open("/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/hdf5_files/yy/" + fname[:-5] + ".hdf5", 'w', template=WF.integrator)
print("Output File name: " + "/mnt/scratch-lustre/fsyed/B1133+16/Analysis2020/gk049e/hdf5_files/yy/" + fname[:-5] + ".hdf5")

# Determine how many samples to output at a time. I reccomend 1.
nsamples = WF.integrator.shape[0]
nsamples_per_output = 1
times = []

# Start the timer
print("Starting timer")
runtime_start = time.time()

# Loop through integrator, creating one time bin at a time
try:

    while WF.integrator.tell() < nsamples - nsamples_per_output: