Example #1
0
    def findDatafiles(self, path, startDate=None, endDate=None):

        if not os.path.isdir(path):
            return []

        try:
            digitalReadObj = digital_rf_hdf5.read_hdf5(path, load_all_metadata=True)
        except:
            digitalReadObj = digital_rf_hdf5.read_hdf5(path)

        channelNameList = digitalReadObj.get_channels()

        if not channelNameList:
            return []

        metadata_dict = digitalReadObj.get_rf_file_metadata(channelNameList[0])

        sample_rate = metadata_dict['sample_rate'][0]

        this_metadata_file = digitalReadObj.get_metadata(channelNameList[0])

        try:
            timezone = this_metadata_file['timezone'].value
        except:
            timezone = 0

        startUTCSecond, endUTCSecond = digitalReadObj.get_bounds(channelNameList[0])/sample_rate - timezone

        startDatetime = datetime.datetime.utcfromtimestamp(startUTCSecond)
        endDatatime = datetime.datetime.utcfromtimestamp(endUTCSecond)

        if not startDate:
            startDate = startDatetime.date()

        if not endDate:
            endDate = endDatatime.date()

        dateList = []

        thisDatetime = startDatetime

        while(thisDatetime<=endDatatime):

            thisDate = thisDatetime.date()

            if thisDate < startDate:
                continue

            if thisDate > endDate:
                break

            dateList.append(thisDate)
            thisDatetime += datetime.timedelta(1)

        return dateList
Example #2
0
def analyze_prc(dirn="",channel="hfrx",idx0=0,an_len=1000000,clen=10000,station=0,Nranges=1000,cache=True):
    g = []
    if type(dirn) is str:
        g = drf.read_hdf5(dirn)
    else:
        g = dirn

    code = stuffr.create_pseudo_random_code(len=clen,seed=station)
    N = an_len/clen
    res = numpy.zeros([N,Nranges],dtype=numpy.complex64)
    r = stuffr.create_estimation_matrix(code=code,cache=cache,rmax=Nranges)
    B = r['B']
    spec = numpy.zeros([N,Nranges],dtype=numpy.complex64)
    
    zspec = numpy.zeros(clen,dtype=numpy.float64)

    for i in numpy.arange(N):
        z = g.read_vector_c81d(idx0+i*clen,clen,channel)
        z = z-numpy.median(z) # remove dc
        res[i,:] = numpy.dot(B,z)
    for i in numpy.arange(Nranges):
        spec[:,i] = numpy.fft.fftshift(numpy.fft.fft(scipy.signal.blackmanharris(N)*res[:,i]))

    ret = {}
    ret['res'] = res
    ret['spec'] = spec
    return(ret)
    
    next_sample = start_index
    t2 = time.time()
    count = 0
    for i in range(1000):
        arr = test_read_obj.read_vector_raw(next_sample, read_size, channel_name)
        next_sample += read_size
        count += 1
        if count % 100 == 0:
            print('%i out of 1000' % (count))
    seconds = time.time() - t2
    speedMB = (read_size*1000*4)/(1.0E6*seconds)
    print('Total read time %i seconds, speed %1.2f MB/s' % (int(seconds), speedMB))

t = time.time()
test_read_obj = digital_rf_hdf5.read_hdf5('/tmp/benchmark', load_all_metadata=True)
print('metadata analysis took %f seconds' % (time.time() - t))

print("\nTest 0 - read Hdf5 files with no compress, no checksum - channel name = junk0")
test_read('junk0', test_read_obj)

print("\nTest 0.1 - read Hdf5 files with no compress, no checksum, small read size - channel name = junk0")
test_read('junk0', test_read_obj, 1000)

print("\nTest call to reload to update metadata")
t = time.time()
test_read_obj.reload()
print('reload took %f seconds' % (time.time() - t))


print("\nTest 1 -read Hdf5 files with no compress, but with level 9 checksum - channel name = junk1")
Example #4
0
data = numpy.array(base_data, numpy.int64)
for i in range(100):
    data_object.rf_write_blocks(data, global_sample_arr, block_sample_arr)
    global_sample_arr += 205
data_object.close()
print("done test 4.1")

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
t = time.time()
# test relative paths
pwd = os.getcwd()
os.chdir('/tmp')
testReadObj = digital_rf_hdf5.read_hdf5(['hdf5', 'hdf52'])
print('init took %f' % (time.time() - t))
time.sleep(1)
t = time.time()
testReadObj.reload()
print('init took %f' % (time.time() - t))
channels = testReadObj.get_channels()
print(channels)

print('working on channel4.1')
start_index, end_index = testReadObj.get_bounds('junk4.1')
print((start_index, end_index))
print('calling get_continuous_blocks')
cont_data_arr = testReadObj.get_continuous_blocks(139436843434L, 139436843538L, 'junk4.1')
print(cont_data_arr)
# normal read
Example #5
0
parser.add_option("-a",
                  "--ascii_out",
                  dest="ascii_out",
                  action="store_true",
                  help="output delays in ascii")

parser.add_option("-n",
                  "--latest",
                  dest="latest",
                  action="store_true",
                  help="Latest recorded delay")

(op, args) = parser.parse_args()

d = drf.read_hdf5(op.dir)
b0 = d.get_bounds("000")
b1 = d.get_bounds("001")
sample_rate = 100.0
#print(b0)
#print(b1)

t_now = time.time()
if op.baseline_time < 0.0:
    op.baseline_time = t_now - 5 * 60.0

if op.t0 < 0.0:
    op.t0 = t_now - 5 * 60.0

if op.t1 < 0.0:
    op.t1 = b0[1] / sample_rate - 2
# set up fake realtime data by copying files
os.system('rm -rf /tmp/hdf52')
os.system('mkdir /tmp/hdf52')
os.system('mkdir /tmp/hdf52/junk1')
os.system('cp -r /tmp/hdf5/junk0/2014-03-09T12-30-30 /tmp/hdf52/junk1/')
os.system('mkdir /tmp/hdf52/junk1/2014-03-09T12-30-34')
files = glob.glob('/tmp/hdf5/junk0/2014-03-09T12-30-34/*')
files.sort()
for thisFile in files[:5]:
    shutil.copy(thisFile, '/tmp/hdf52/junk1/2014-03-09T12-30-34/')

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
testReadObj = digital_rf_hdf5.read_hdf5(['/tmp/hdf52'], load_all_metadata=False)
channels = testReadObj.get_channels()
print(channels)

print('working on junk1')
start_index, end_index = testReadObj.get_bounds('junk1')
print(('bounds are: ', start_index, end_index))
if update:
    cont_data_arr = testReadObj.get_continuous_blocks(start_index, end_index, 'junk1')
    print(('continuous data is ', cont_data_arr))
result = testReadObj.read_vector_raw(start_index, end_index-start_index, 'junk1')
print('got %i samples' % (len(result)))


# simulate realtime update
time.sleep(5)
def detect_meteors(rf_dir, id_dir, noise_dir, output_dir,
                   t0=None, t1=None, rxch='zenith-l', txch='tx-h'):
    """Function to detect and summarize meteor head echoes.


    Arguments
    ---------

    rf_dir : string or list
        RF data directory or directories.

    id_dir : string
        ID code metadata directory.

    noise_dir : string
        RX noise metadata directory.

    output_dir : string
        Meteor data output directory.

    t0 : float, optional
        Start time, seconds since epoch. If None, start at beginning of data.

    t1 : float, optional
        End time, seconds since epoch. If None, end at end of data.

    rxch : string, optional
        Receiver channel to process.

    txch : string, optional
        Transmitter channel.

    """
    rfo = drf.read_hdf5(rf_dir)
    ido = dmd.read_digital_metadata(id_dir)
    no = dmd.read_digital_metadata(noise_dir)

    if t0 is None or t1 is None:
        bounds = []
        bounds.append(rfo.get_bounds(rxch))
        bounds.append(rfo.get_bounds(txch))
        bounds.append(ido.get_bounds())
        bounds.append(no.get_bounds())
        bounds = np.asarray(bounds)

        ss = np.max(bounds[:, 0])
        se = np.min(bounds[:, 1])

        fs = rfo.get_metadata(rxch)['sample_rate'].value

        if t0 is None:
            s0 = ss
        else:
            s0 = int(np.round(t0*fs))

        if t1 is None:
            s1 = se
        else:
            s1 = int(np.round(t1*fs))

    tmm = TimingModeManager.TimingModeManager()
    if os.path.exists('/tmp/tmm.hdf5'):
        tmm.loadFromHdf5('/tmp/tmm.hdf5', skip_lowlevel=True)
    else:
        tmm.loadFromHdf5(skip_lowlevel=True)

    for k, (tx, rx) in enumerate(data_generator(rfo, ido, no, tmm, s0, s1, rxch, txch)):
        #FIXME call processing functions here
        pass
Example #8
0
# set up fake realtime data by copying files
os.system('rm -rf /tmp/hdf52')
os.system('mkdir /tmp/hdf52')
os.system('mkdir /tmp/hdf52/junk1')
os.system('cp -r /tmp/hdf5/junk0/2014-03-09T12-30-30 /tmp/hdf52/junk1/')
os.system('mkdir /tmp/hdf52/junk1/2014-03-09T12-30-34')
files = glob.glob('/tmp/hdf5/junk0/2014-03-09T12-30-34/*')
files.sort()
for thisFile in files[:5]:
    shutil.copy(thisFile, '/tmp/hdf52/junk1/2014-03-09T12-30-34/')

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
testReadObj = digital_rf_hdf5.read_hdf5(['/tmp/hdf52'],
                                        load_all_metadata=update)
channels = testReadObj.get_channels()
print(channels)

print('working on junk1')
start_index, end_index = testReadObj.get_bounds('junk1')
print(('bounds are: ', start_index, end_index))
cont_data_arr = testReadObj.get_continuous_blocks(start_index, end_index,
                                                  'junk1')
print(('continuous data is ', cont_data_arr))
result = testReadObj.read_vector_raw(cont_data_arr[0][0], cont_data_arr[0][1],
                                     'junk1')
print('got %i samples' % (len(result)))

# simulate realtime update
time.sleep(5)
Example #9
0
                      default=6000000,
                      type="int",
                      help="An length (%default)")
    parser.add_option("-c", "--code_length",
                      dest="codelen",
                      action="store",
                      default=10000,
                      type="int",
                      help="Code length (%default)")

    matplotlib.use('Agg')

    (op, args) = parser.parse_args()
    os.system("mkdir -p %s/hfradar"%(op.datadir))
    
    d = drf.read_hdf5(op.datadir)
    sr = 100e3#d.get_metadata("hfrx")["sample_rate"].value
    b = d.get_bounds(op.channel)
    print(b)
    idx = numpy.array(b[0])
    if os.path.isfile("%s/hfradar/last.dat"%(op.datadir)):
        idx = numpy.fromfile("%s/hfradar/last.dat"%(op.datadir),dtype=numpy.int)
    while True:
        d = drf.read_hdf5(op.datadir)
        b = d.get_bounds(op.channel)
        if b[0] > idx:
            idx = numpy.array(b[0])

        while idx+op.anlen > b[1]:
            d = drf.read_hdf5(op.datadir)
            b = d.get_bounds(op.channel)
Example #10
0
data = numpy.array(base_data, numpy.int64)
for i in range(100):
    data_object.rf_write_blocks(data, global_sample_arr, block_sample_arr)
    global_sample_arr += 205
data_object.close()
print("done test 4.1")

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
t = time.time()
# test relative paths
pwd = os.getcwd()
os.chdir('/tmp')
testReadObj = digital_rf_hdf5.read_hdf5(['hdf5', 'hdf52'])
print('init took %f' % (time.time() - t))
time.sleep(1)
t = time.time()
testReadObj.reload()
print('init took %f' % (time.time() - t))
channels = testReadObj.get_channels()
print(channels)

print('working on channel4.1')
start_index, end_index = testReadObj.get_bounds('junk4.1')
print((start_index, end_index))
print('calling get_continuous_blocks')
cont_data_arr = testReadObj.get_continuous_blocks(139436843434L, 139436843538L, 'junk4.1')
print(cont_data_arr)
# normal read
Example #11
0
    # parse the command line arguments
    try:
        opts, args = parse_command_line()
    except:
        traceback.print_exc(file=sys.stdout)

        sys.exit()

    for idx,p in enumerate(opts.input):
        print("drf path %s" % p)

        try:
            print "loading data"

            dio = digital_rf_hdf5.read_hdf5(p)

            chans = dio.get_channels()

            if opts.channel == []:
                channel = chans[0]
                sub_chan= 0
            else:
                chstr = string.split(opts.channel[idx],':')
                channel = chstr[0]
                sub_chan = int(chstr[1])

            ustart, ustop = dio.get_bounds(channel)
            print ustart, ustop

            cfreq = dio.get_metadata(channel)['center_frequencies'].value[0]
Example #12
0
    # parse the command line arguments
    try:
        opts, args = parse_command_line()
    except:
        traceback.print_exc(file=sys.stdout)

        sys.exit()

    for idx, p in enumerate(opts.input):
        print("drf path %s" % p)

        try:
            print "loading data"

            dio = digital_rf_hdf5.read_hdf5(p)

            chans = dio.get_channels()

            if opts.channel == []:
                channel = chans[0]
                sub_chan = 0
            else:
                chstr = string.split(opts.channel[idx], ':')
                channel = chstr[0]
                sub_chan = int(chstr[1])

            ustart, ustop = dio.get_bounds(channel)
            print ustart, ustop

            cfreq = dio.get_metadata(channel)['center_frequencies'].value[0]
"""example_digital_rf_hdf5.py is an example script using the digital_rf_hdf5 module

Assumes one of the example Digital RF scripts has already been run (C: example_rf_write_hdf5, or
Python: example_digital_rf_hdf5.py)

$Id: example_read_digital_rf.py 814 2015-09-10 15:52:10Z brideout $
"""
# Millstone imports
import digital_rf_hdf5

testReadObj = digital_rf_hdf5.read_hdf5(['/tmp/hdf5'])
channels = testReadObj.get_channels()
if len(channels) == 0:
    raise IOError, """Please run one of the example write scripts 
        C: example_rf_write_hdf5, or Python: example_digital_rf_hdf5.py
        before running this example"""
print('found channels: %s' % (str(channels)))

print('working on channel junk0')
start_index, end_index = testReadObj.get_bounds('junk0')
print('get_bounds returned %i - %i' % (start_index, end_index))
cont_data_arr = testReadObj.get_continuous_blocks(start_index, end_index,
                                                  'junk0')
print(
    'The following is a list of all continuous block of data in (start_sample, length) format: %s'
    % (str(cont_data_arr)))

# read data - the first 3 reads of four should succeed, the fourth read will be beyond the available data
start_sample = cont_data_arr[0][0]
for i in range(4):
    try:
Example #14
0
        elif opt in ('-l'):
            log_scale = True
        elif opt in ('-d'):
            detrend = True
        elif opt in ('-m'):
            cl, bl = string.split(val, ':')
            msl_code_length = int(cl)
            msl_baud_length = int(bl)

    for f in input_files:
        print("file %s" % f)

        try:
            print "loading data"

            drf = digital_rf_hdf5.read_hdf5(f)

            chans = drf.get_channels()
            if channel == '':
                chidx = 0
            else:
                chidx = chans.index(channel)

            ustart, ustop = drf.get_bounds(chans[chidx])
            print ustart, ustop

            md = drf.get_rf_file_metadata(chans[chidx])
            print md

            sfreq = md['sample_rate']
Example #15
0
parser.add_option(
    "-1", "--t1", dest="t1", action="store", default=-1.0, type="float", help="End time in unix seconds, default: now"
)

parser.add_option("-p", "--plot", dest="plot", action="store_true", help="plot relative time delay")

parser.add_option("-o", "--overview_plot", dest="overview_plot", action="store_true", help="plot sparse overview plot")

parser.add_option("-a", "--ascii_out", dest="ascii_out", action="store_true", help="output delays in ascii")

parser.add_option("-n", "--latest", dest="latest", action="store_true", help="Latest recorded delay")

(op, args) = parser.parse_args()

d = drf.read_hdf5(op.dir)
b0 = d.get_bounds("000")
b1 = d.get_bounds("001")
sample_rate = 100.0
# print(b0)
# print(b1)

t_now = time.time()
if op.baseline_time < 0.0:
    op.baseline_time = t_now - 5 * 60.0

if op.t0 < 0.0:
    op.t0 = t_now - 5 * 60.0

if op.t1 < 0.0:
    op.t1 = b0[1] / sample_rate - 2
os.system("rm -rf /tmp/hdf52/junk4.1 ; mkdir /tmp/hdf52/junk4.1");
data_object = digital_rf_hdf5.write_hdf5_channel("/tmp/hdf52/junk4.1", 'i8', 40, files_per_directory, start_global_index,
                                                 sample_rate, "FAKE_UUID_2", 1, False, True);
data = numpy.array(base_data, numpy.int64)
for i in range(100):
    data_object.rf_write_blocks(data, global_sample_arr, block_sample_arr)
    global_sample_arr += 205
data_object.close()
print("done test 4.1")

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
t = time.time()
testReadObj = digital_rf_hdf5.read_hdf5(['/tmp/hdf5', '/tmp/hdf52'])
print('init took %f' % (time.time() - t))
channels = testReadObj.get_channels()
print('existing channels are <%s>' % (str(channels)))

chan_name = 'junk4.1'
print('\nworking on channel %s' % (chan_name))
start_index, end_index = testReadObj.get_bounds(chan_name)
print('Bounds are %i to %i' % (start_index, end_index))

# test one of get_continuous_blocks - end points in gaps
start_sample = 139436843434L
end_sample = 139436843538L
print('\ncalling get_continuous_blocks between %i and %i (edges in data gaps)' % (start_sample, end_sample))
cont_data_arr = testReadObj.get_continuous_blocks(start_sample, end_sample, chan_name)
corr_result = numpy.array([[139436843436, 10],
Example #17
0
def detect_meteors(rf_dir,
                   id_dir,
                   noise_dir,
                   output_dir,
                   t0=None,
                   t1=None,
                   rxch='zenith-l',
                   txch='tx-h'):
    """Function to detect and summarize meteor head echoes.


    Arguments
    ---------

    rf_dir : string or list
        RF data directory or directories.

    id_dir : string
        ID code metadata directory.

    noise_dir : string
        RX noise metadata directory.

    output_dir : string
        Meteor data output directory.

    t0 : float, optional
        Start time, seconds since epoch. If None, start at beginning of data.

    t1 : float, optional
        End time, seconds since epoch. If None, end at end of data.

    rxch : string, optional
        Receiver channel to process.

    txch : string, optional
        Transmitter channel.

    """
    rfo = drf.read_hdf5(rf_dir)
    ido = dmd.read_digital_metadata(id_dir)
    no = dmd.read_digital_metadata(noise_dir)

    if t0 is None or t1 is None:
        bounds = []
        bounds.append(rfo.get_bounds(rxch))
        bounds.append(rfo.get_bounds(txch))
        bounds.append(ido.get_bounds())
        bounds.append(no.get_bounds())
        bounds = np.asarray(bounds)

        ss = np.max(bounds[:, 0])
        se = np.min(bounds[:, 1])

        fs = rfo.get_metadata(rxch)['sample_rate'].value

        if t0 is None:
            s0 = ss
        else:
            s0 = int(np.round(t0 * fs))

        if t1 is None:
            s1 = se
        else:
            s1 = int(np.round(t1 * fs))

    tmm = TimingModeManager.TimingModeManager()
    if os.path.exists('/tmp/tmm.hdf5'):
        tmm.loadFromHdf5('/tmp/tmm.hdf5', skip_lowlevel=True)
    else:
        tmm.loadFromHdf5(skip_lowlevel=True)

    for k, (tx, rx) in enumerate(
            data_generator(rfo, ido, no, tmm, s0, s1, rxch, txch)):
        #FIXME call processing functions here
        pass
    
    next_sample = start_index
    t2 = time.time()
    count = 0
    while next_sample < end_index:
        arr = test_read_obj.read_vector(next_sample, FILE_SAMPLES, channel_name)
        next_sample += FILE_SAMPLES
        count += 1
        if count % 100 == 0:
            print('%i out of 1000' % (count))
    seconds = time.time() - t2
    speedMB = (N_WRITES*4*WRITE_BLOCK_SIZE)/(1.0E6*seconds)
    print('Total read time %i seconds, speed %1.2f MB/s' % (int(seconds), speedMB))

t = time.time()
test_read_obj = digital_rf_hdf5.read_hdf5('/tmp/benchmark')
print('metadata analysis took %f seconds' % (time.time() - t))

print("\nTest 0 - read Hdf5 files with no compress, no checksum - channel name = junk0")
test_read('junk0', test_read_obj)

print("\nTest call to reload to update metadata")
t = time.time()
test_read_obj.reload()
print('reload took %f seconds' % (time.time() - t))


print("\nTest 1 -read Hdf5 files with no compress, but with level 9 checksum - channel name = junk1")
test_read('junk1', test_read_obj)

Example #19
0
    def setup(self, path = None,
                    startDate = None,
                    endDate = None,
                    startTime = datetime.time(0,0,0),
                    endTime = datetime.time(23,59,59),
                    channelList = None,
                    nSamples = None,
                    ippKm = 60,
                    online = False,
                    delay = 60,
                    buffer_size = 1024,
                    **kwargs):
        '''
        In this method we should set all initial parameters.

        Inputs:
            path
            startDate
            endDate
            startTime
            endTime
            set
            expLabel
            ext
            online
            delay
        '''

        if not os.path.isdir(path):
            raise ValueError("[Reading] Directory %s does not exist" %path)

        try:
            self.digitalReadObj = digital_rf_hdf5.read_hdf5(path, load_all_metadata=True)
        except:
            self.digitalReadObj = digital_rf_hdf5.read_hdf5(path)

        channelNameList = self.digitalReadObj.get_channels()

        if not channelNameList:
            raise ValueError("[Reading] Directory %s does not have any files" %path)

        if not channelList:
            channelList = list(range(len(channelNameList)))

        ##########  Reading metadata ######################

        metadata_dict = self.digitalReadObj.get_rf_file_metadata(channelNameList[channelList[0]])

        self.__sample_rate = metadata_dict['sample_rate'][0]
#         self.__samples_per_file = metadata_dict['samples_per_file'][0]
        self.__deltaHeigth = 1e6*0.15/self.__sample_rate

        this_metadata_file = self.digitalReadObj.get_metadata(channelNameList[channelList[0]])

        self.__frequency = None
        try:
            self.__frequency = this_metadata_file['center_frequencies'].value
        except:
            self.__frequency = this_metadata_file['fc'].value

        if not self.__frequency:
            raise ValueError("Center Frequency is not defined in metadata file")

        try:
            self.__timezone = this_metadata_file['timezone'].value
        except:
            self.__timezone = 0

        self.__firstHeigth = 0

        try:
            codeType = this_metadata_file['codeType'].value
        except:
            codeType = 0

        nCode = 1
        nBaud = 1
        code = numpy.ones((nCode, nBaud), dtype=numpy.int)

        if codeType:
            nCode = this_metadata_file['nCode'].value
            nBaud = this_metadata_file['nBaud'].value
            code = this_metadata_file['code'].value

        if not ippKm:
            try:
                #seconds to km
                ippKm = 1e6*0.15*this_metadata_file['ipp'].value
            except:
                ippKm = None

        ####################################################
        startUTCSecond = None
        endUTCSecond = None

        if startDate:
            startDatetime = datetime.datetime.combine(startDate, startTime)
            startUTCSecond = (startDatetime-datetime.datetime(1970,1,1)).total_seconds() + self.__timezone

        if endDate:
            endDatetime = datetime.datetime.combine(endDate, endTime)
            endUTCSecond = (endDatetime-datetime.datetime(1970,1,1)).total_seconds() + self.__timezone

        start_index, end_index = self.digitalReadObj.get_bounds(channelNameList[channelList[0]])

        if not startUTCSecond:
            startUTCSecond = start_index/self.__sample_rate

        if start_index > startUTCSecond*self.__sample_rate:
            startUTCSecond = start_index/self.__sample_rate

        if not endUTCSecond:
            endUTCSecond = end_index/self.__sample_rate

        if end_index < endUTCSecond*self.__sample_rate:
            endUTCSecond = end_index/self.__sample_rate

        if not nSamples:
            if not ippKm:
                raise ValueError("[Reading] nSamples or ippKm should be defined")

            nSamples = int(ippKm / (1e6*0.15/self.__sample_rate))

        channelBoundList = []
        channelNameListFiltered = []

        for thisIndexChannel in channelList:
            thisChannelName =  channelNameList[thisIndexChannel]
            start_index, end_index = self.digitalReadObj.get_bounds(thisChannelName)
            channelBoundList.append((start_index, end_index))
            channelNameListFiltered.append(thisChannelName)

        self.profileIndex = 0

        self.__delay = delay
        self.__ippKm = ippKm
        self.__codeType = codeType
        self.__nCode = nCode
        self.__nBaud = nBaud
        self.__code = code

        self.__datapath = path
        self.__online = online
        self.__channelList = channelList
        self.__channelNameList = channelNameListFiltered
        self.__channelBoundList = channelBoundList
        self.__nSamples = nSamples
        self.__samples_to_read = int(buffer_size*nSamples)
        self.__nChannels = len(self.__channelList)

        self.__startUTCSecond = startUTCSecond
        self.__endUTCSecond = endUTCSecond

        self.__timeInterval = 1.0 * self.__samples_to_read/self.__sample_rate #Time interval

        if online:
#             self.__thisUnixSample = int(endUTCSecond*self.__sample_rate - 4*self.__samples_to_read)
            startUTCSecond = numpy.floor(endUTCSecond)

        self.__thisUnixSample = int(startUTCSecond*self.__sample_rate) - self.__samples_to_read

        self.__data_buffer = numpy.zeros((self.__nChannels, self.__samples_to_read), dtype = numpy.complex)

        self.__setFileHeader()
        self.isConfig = True

        print("[Reading] USRP Data was found from %s to %s " %(
                                                      datetime.datetime.utcfromtimestamp(self.__startUTCSecond - self.__timezone),
                                                      datetime.datetime.utcfromtimestamp(self.__endUTCSecond - self.__timezone)
                                                      ))

        print("[Reading] Starting process from %s to %s" %(datetime.datetime.utcfromtimestamp(startUTCSecond - self.__timezone),
                                                           datetime.datetime.utcfromtimestamp(endUTCSecond - self.__timezone)
                                                           ))
"""test_millstone_data_access.py is a test of using digital_rf_hdf5.read_hdf5 for 
reading small amounts of data from a large data set.

$Id: test_millstone_data_access.py 410 2014-05-23 20:33:46Z brideout $
"""

# constants - modify as needed
top_level_dir = '/data0/ringbuffer_h5'
test_chan = 'misa-l'

import digital_rf_hdf5
import time
t = time.time()
o = digital_rf_hdf5.read_hdf5(top_level_dir)
print('overall init took %f' % (time.time() - t))
print('The channels are <%s>' % (str(o.get_channels())))
b = o.get_bounds(test_chan)
print('Bounds of channel %s are <%s>' % (test_chan, str(b)))

print('Test getting small subsets of get_continuous_blocks')
for i in range(5):

    start_index = b[0] + long(0.2*i*(b[1]-b[0]))
    end_index = start_index + 10000000
    t = time.time()
    print('continuous blocks between %i and %i are <%s>' % (start_index, end_index, str(o.get_continuous_blocks(start_index,end_index,test_chan))))
    print('took %f' % (time.time() - t))

print('Test getting small subsets of read_vector at different places')
for i in range(5):
# set up fake realtime data by copying files
os.system("rm -rf /tmp/hdf52")
os.system("mkdir /tmp/hdf52")
os.system("mkdir /tmp/hdf52/junk1")
os.system("cp -r /tmp/hdf5/junk0/2014-03-09T12-30-30 /tmp/hdf52/junk1/")
os.system("mkdir /tmp/hdf52/junk1/2014-03-09T12-30-34")
files = glob.glob("/tmp/hdf5/junk0/2014-03-09T12-30-34/*")
files.sort()
for thisFile in files[:5]:
    shutil.copy(thisFile, "/tmp/hdf52/junk1/2014-03-09T12-30-34/")

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
testReadObj = digital_rf_hdf5.read_hdf5(["/tmp/hdf52"], load_all_metadata=update)
channels = testReadObj.get_channels()
print(channels)

print("working on junk1")
start_index, end_index = testReadObj.get_bounds("junk1")
print(("bounds are: ", start_index, end_index))
cont_data_arr = testReadObj.get_continuous_blocks(start_index, end_index, "junk1")
print(("continuous data is ", cont_data_arr))
result = testReadObj.read_vector_raw(cont_data_arr[0][0], cont_data_arr[0][1], "junk1")
print("got %i samples" % (len(result)))


# simulate realtime update
time.sleep(5)
for thisFile in files[5:]:
    count = 0
    for i in range(1000):
        arr = test_read_obj.read_vector_raw(next_sample, read_size,
                                            channel_name)
        next_sample += read_size
        count += 1
        if count % 100 == 0:
            print('%i out of 1000' % (count))
    seconds = time.time() - t2
    speedMB = (read_size * 1000 * 4) / (1.0E6 * seconds)
    print('Total read time %i seconds, speed %1.2f MB/s' %
          (int(seconds), speedMB))


t = time.time()
test_read_obj = digital_rf_hdf5.read_hdf5('/tmp/benchmark',
                                          load_all_metadata=True)
print('metadata analysis took %f seconds' % (time.time() - t))

print(
    "\nTest 0 - read Hdf5 files with no compress, no checksum - channel name = junk0"
)
test_read('junk0', test_read_obj)

print(
    "\nTest 0.1 - read Hdf5 files with no compress, no checksum, small read size - channel name = junk0"
)
test_read('junk0', test_read_obj, 1000)

print("\nTest call to reload to update metadata")
t = time.time()
test_read_obj.reload()
"""test_millstone_data_access.py is a test of using digital_rf_hdf5.read_hdf5 for 
reading small amounts of data from a large data set.

$Id: test_millstone_data_access.py 410 2014-05-23 20:33:46Z brideout $
"""

# constants - modify as needed
top_level_dir = '/data0/ringbuffer_h5'
test_chan = 'misa-l'

import digital_rf_hdf5
import time
t = time.time()
o = digital_rf_hdf5.read_hdf5(top_level_dir)
print('overall init took %f' % (time.time() - t))
print('The channels are <%s>' % (str(o.get_channels())))
b = o.get_bounds(test_chan)
print('Bounds of channel %s are <%s>' % (test_chan, str(b)))

print('Test getting small subsets of get_continuous_blocks')
for i in range(5):

    start_index = b[0] + long(0.2 * i * (b[1] - b[0]))
    end_index = start_index + 10000000
    t = time.time()
    print('continuous blocks between %i and %i are <%s>' %
          (start_index, end_index,
           str(o.get_continuous_blocks(start_index, end_index, test_chan))))
    print('took %f' % (time.time() - t))

print('Test getting small subsets of read_vector at different places')
    t2 = time.time()
    count = 0
    while next_sample < end_index:
        arr = test_read_obj.read_vector_raw(next_sample, FILE_SAMPLES, channel_name)
        if len(arr) != FILE_SAMPLES:
            raise IOError, '%i != %i' % (len(arr), FILE_SAMPLES)
        next_sample += FILE_SAMPLES
        count += 1
        if count % 100 == 0:
            print('%i out of 1000' % (count))
    seconds = time.time() - t2
    speedMB = (N_WRITES*4*WRITE_BLOCK_SIZE)/(1.0E6*seconds)
    print('Total read time %i seconds, speed %1.2f MB/s' % (int(seconds), speedMB))

t = time.time()
test_read_obj = digital_rf_hdf5.read_hdf5('/tmp/benchmark')
print('metadata analysis took %f seconds' % (time.time() - t))

print("\nTest 0 - read Hdf5 files with no compress, no checksum - channel name = junk0")
test_read('junk0', test_read_obj)

print("\nTest call to reload to update metadata")
t = time.time()
test_read_obj.reload()
print('reload took %f seconds' % (time.time() - t))


print("\nTest 1 -read Hdf5 files with no compress, but with level 9 checksum - channel name = junk1")
test_read('junk1', test_read_obj)

data_object = digital_rf_hdf5.write_hdf5_channel(
    "/tmp/hdf52/junk4.1", "i8", 40, files_per_directory, start_global_index, sample_rate, "FAKE_UUID_2", 1, False, True
)
data = numpy.array(base_data, numpy.int64)
for i in range(100):
    data_object.rf_write_blocks(data, global_sample_arr, block_sample_arr)
    global_sample_arr += 205
data_object.close()
print("done test 4.1")

# sleep for 4 seconds to make sure system knows all files closed
time.sleep(4)

# read
t = time.time()
testReadObj = digital_rf_hdf5.read_hdf5(["/tmp/hdf5", "/tmp/hdf52"])
print("init took %f" % (time.time() - t))
channels = testReadObj.get_channels()
print("existing channels are <%s>" % (str(channels)))

chan_name = "junk4.1"
print("\nworking on channel %s" % (chan_name))
start_index, end_index = testReadObj.get_bounds(chan_name)
print("Bounds are %i to %i" % (start_index, end_index))

# test one of get_continuous_blocks - end points in gaps
start_sample = 139436843434L
end_sample = 139436843538L
print("\ncalling get_continuous_blocks between %i and %i (edges in data gaps)" % (start_sample, end_sample))
cont_data_arr = testReadObj.get_continuous_blocks(start_sample, end_sample, chan_name)
corr_result = numpy.array(