def download_data(self):
        start, end = self.datetimerange

        for station, group_path in zip(self.stations, self.station_groups):
            if not group_path in self.data:
                print "Downloading data for station", station
                download_data(self.data, group_path, station,
                              start, end, get_blobs=True)
Example #2
0
    def download_data(self):
        start, end = self.datetimerange

        for station, group_path in zip(self.stations, self.station_groups):
            if not group_path in self.data:
                print "Downloading data for station", station
                download_data(self.data,
                              group_path,
                              station,
                              start,
                              end,
                              get_blobs=True)
def first_download(station): # Not used in the current script; could be integrated later on
    """ Downloads data from HDF5

    Including data for referencedate (if necessary)
    """
    group = '/s%s' % station

    with openFile(filepath, 'a') as datafile:
        if group not in datafile:
            download_data(datafile, group, station, START, STOP)

            nextday = REFDATE + timedelta(days=1)
            if REFDATE < START or nextday > STOP:
                download_data(datafile, '/s%s' % station, station, REFDATE, nextday)
def download_part(station_id, start, stop):
    """ Download HiSPARC data from start till stop

    """
    hisp_station = int(station_id)
    tree = '/s' + station_id
    path = 'data.root.s' + station_id
    filename = 'data_s%d_%s_%s.h5' % (hisp_station,
                                      start.strftime('%Y%m%d'),
                                      stop.strftime('%Y%m%d'))
    with tables.openFile(filename, 'w') as data:
        download_data(data, tree, hisp_station, start, stop)
        remove_duplicate_events(data, eval(path))
        weather_data_in_file = check_if_weather(data, filename)
        if weather_data_in_file:
            remove_duplicate_events(data, eval(path), kind='weather')
    print ''
    print "'" + filename + "' has downloaded."
    print 'You can find it at the location: ' + os.getcwd()
    print ''
    return filename
Example #5
0
    This script is a small demonstration which shows how to download some
    data from one particular detector, in this case 4001 at KVI, and make
    a simple pulse height histogram.
"""

import tables
import datetime
from hisparc.publicdb import download_data
import pylab

if __name__ == '__main__':
    # Open data file and download data
    data = tables.openFile('kvi.h5', 'w')
    download_data(data, '/hisparc/kvi', station_id=4001,
                  start=datetime.datetime(2009, 11, 23),
                  end=datetime.datetime(2009, 11, 24))

    # Fetch pulseheights...
    ph = [x['pulseheights'] for x in data.root.hisparc.kvi.events]
    # ...and rearrange
    ph = zip(*ph)
    # ...and drop detectors 3 and 4 (slave not attached)
    ph = ph[:2]

    # Convert to mV
    ph = [[x * .57 for x in det] for det in ph]

    # Bin pulseheights of detector 1 and 2
    pylab.hist(ph[0], bins=100, range=[0, 2000], histtype='step')
    pylab.hist(ph[1], bins=100, range=[0, 2000], histtype='step')
Example #6
0
    pylab.figure()
    for x in data:
        pylab.hist(x, bins=bins, log=True, histtype='step')
    pylab.title(title)
    pylab.xlabel('pulse height (raw ADC values)')
    pylab.ylabel('count')
    if xlim:
        pylab.xlim(xlim)

if __name__ == '__main__':
    if os.path.exists('jos.h5'):
        data = tables.openFile('jos.h5', 'r')
    else:
        data = tables.openFile('jos.h5', 'w')
        download_data(data, '/hisparc/s501', station_id=501,
                      start=datetime.datetime(2009, 1, 3),
                      end=datetime.datetime(2009, 1, 11))
        download_data(data, '/hisparc/s505', station_id=505,
                      start=datetime.datetime(2009, 1, 3),
                      end=datetime.datetime(2009, 1, 11))

    ph = [x['pulseheights'] for x in data.root.hisparc.s501.events]
    twohigh = zip(*[x for x in ph if (x >= 123).tolist().count(True) >= 2])
    #threelow = zip(*[x for x in ph if (x >= 53).tolist().count(True) >= 3])
    #twoscint = zip(*[x[:2] for x in ph if (x[:2] >= 123).tolist().count(True) >= 2])

    make_hist(twohigh, bins=range(0, 2000, 20),
              title="Trigger: at least two high (20 ADC values per bin)")
    make_hist(twohigh, bins=range(0, 2000, 1),
              title="Trigger: at least two high (1 ADC values per bin)")
    make_hist(twohigh, bins=range(0, 2000, 1),
Example #7
0
    and you're set.
"""

import tables
import datetime
from hisparc.publicdb import download_data
from hisparc.analysis import coincidences
import time

if __name__ == '__main__':
    data = tables.openFile('test.h5', 'w')

    t0 = time.time()
    for station in range(501, 506):
        download_data(
            data, '/hisparc/station' + str(station), station_id=station,
            start=datetime.datetime(2009, 2, 24, 12),
            end=datetime.datetime(2009, 2, 24, 13))

    t1 = time.time()
    coincidences, timestamps = coincidences.search_coincidences(
                                    data,
                                    ['/hisparc/station501',
                                     '/hisparc/station502',
                                     '/hisparc/station503',
                                     '/hisparc/station504',
                                     '/hisparc/station505'],
                                    shifts=[None, None, -15, None, None])
    t2 = time.time()
    print 'Download: %f (%.1f%%)' % (t1 - t0, 100 * (t1 - t0) / (t2 - t0))
    print 'Processing: %f (%.1f%%)' % (t2 - t1,
                                       100 * (t2 - t1) / (t2 - t0))