예제 #1
0
    def test_pbar_hide_output(self):
        """Empty output when not showing progressbar"""

        pb = utils.pbar(self.iterable, show=False, fd=self.output)
        self.assertEqual(list(pb), self.iterable)
        self.assertEqual(self.output.getvalue(), '')

        pb = utils.pbar(self.iterable, show=True, fd=self.output)
        self.assertEqual(list(pb), self.iterable)
        self.assertNotEqual(self.output.getvalue(), '')
예제 #2
0
def update_additional_local_tsv():
    """Get location tsv data for all stations"""

    station_numbers = Network().station_numbers()

    for type in ['eventtime', 'detector_timing_offsets']:
        try:
            mkdir(path.join(LOCAL_BASE, type))
        except OSError:
            pass
        for number in pbar(station_numbers):
            url = API.src_urls[type].format(station_number=number,
                                            year='', month='', day='', hour='')
            try:
                data = API._retrieve_url(url.strip('/'), base=SRC_BASE)
            except:
                print 'Failed to get %s data for station %d' % (type, number)
                continue
            data = '\n'.join(d for d in data.split('\n')
                             if len(d) and d[0] != '#')
            if data:
                tsv_path = path.join(LOCAL_BASE,
                                     url.strip('/') + extsep + 'tsv')
                with open(tsv_path, 'w') as tsvfile:
                    tsvfile.write(data)

    type = 'station_timing_offsets'
    network = HiSPARCNetwork()

    try:
        mkdir(path.join(LOCAL_BASE, type))
    except OSError:
        pass
    for number1, number2 in pbar(combinations(station_numbers, 2)):
        distance = network.calc_distance_between_stations(number1, number2)
        if distance is None or distance > 1e3:
            continue
        try:
            mkdir(path.join(LOCAL_BASE, type, str(number1)))
        except OSError:
            pass
        url = API.src_urls[type].format(station_1=number1, station_2=number2)
        try:
            data = API._retrieve_url(url.strip('/'), base=SRC_BASE)
        except:
            print ('Failed to get %s data for station pair %d-%d' %
                   (type, number1, number2))
            continue
        data = '\n'.join(d for d in data.split('\n') if len(d) and d[0] != '#')
        if data:
            tsv_path = path.join(LOCAL_BASE, url.strip('/') + extsep + 'tsv')
            with open(tsv_path, 'w') as tsvfile:
                tsvfile.write(data)
예제 #3
0
    def store_coincidences(self):
        print "Storing coincidences..."
        if '/coincidences' not in self.data:
            group = self.data.create_group('/', 'coincidences')
            group._v_attrs.cluster = self.cluster

            self.c_index = []
            self.coincidences = self.data.create_table(group, 'coincidences',
                                                       storage.Coincidence)
            self.observables = self.data.create_table(group, 'observables',
                                                      storage.EventObservables)

            for coincidence in pbar(self.data.root.c_index):
                self.store_coincidence(coincidence)

            c_index = self.data.create_vlarray(group, 'c_index',
                                               tables.UInt32Col())
            for coincidence in self.c_index:
                c_index.append(coincidence)
            c_index.flush()
            self.c_index = c_index
        else:
            # Force new cluster geometry
            group = self.data.get_node('/', 'coincidences')
            group._v_attrs.cluster = self.cluster
예제 #4
0
def write_jsons(data):
    if not os.path.exists(os.path.join(EVENT_DISPLAY_DIR, 'data/')):
        os.mkdir(os.path.join(EVENT_DISPLAY_DIR, 'data'))

    # Create subcluster station location and coincidences JSONs
    for subcluster in Network().subclusters():
        subcluster_name = subcluster['name'].lower().replace(' ', '_')

        stations = build_station_json(data, subcluster['number'])
        with open('data/stations_' + subcluster_name + '.json', 'w') as f:
            json.dump(stations, f)

        coincidences = build_coincidence_json(data, subcluster_name)
        with open('data/coincidences_' + subcluster_name + '.json', 'w') as f:
            json.dump(coincidences, f)

    # Create network station location and coincidences JSONs
    stations = build_station_json(data)
    with open('data/stations_network.json', 'w') as f:
        json.dump(stations, f)

    coincidences = build_coincidence_json(data)
    with open('data/coincidences_network.json', 'w') as f:
        json.dump(coincidences, f)

    # Create station events JSONs
    for station in pbar(STATIONS):
        events = build_events_json(data, station)
        with open('data/events_s%d.json' % station, 'w') as f:
            json.dump(events, f)
예제 #5
0
def write_jsons(data):
    if not os.path.exists(os.path.join(EVENT_DISPLAY_DIR, 'data/')):
        os.mkdir(os.path.join(EVENT_DISPLAY_DIR, 'data'))

    # Create subcluster station location and coincidences JSONs
    for subcluster in Network().subclusters():
        subcluster_name = subcluster['name'].lower().replace(' ', '_')

        stations = build_station_json(data, subcluster['number'])
        with open('data/stations_' + subcluster_name + '.json', 'w') as f:
            json.dump(stations, f)

        coincidences = build_coincidence_json(data, subcluster_name)
        with open('data/coincidences_' + subcluster_name + '.json', 'w') as f:
            json.dump(coincidences, f)

    # Create network station location and coincidences JSONs
    stations = build_station_json(data)
    with open('data/stations_network.json', 'w') as f:
        json.dump(stations, f)

    coincidences = build_coincidence_json(data)
    with open('data/coincidences_network.json', 'w') as f:
        json.dump(coincidences, f)

    # Create station events JSONs
    for station in pbar(STATIONS):
        events = build_events_json(data, station)
        with open('data/events_s%d.json' % station, 'w') as f:
            json.dump(events, f)
예제 #6
0
    def store_coincidences(self):
        print "Storing coincidences..."
        if '/coincidences' not in self.data:
            group = self.data.create_group('/', 'coincidences')
            group._v_attrs.cluster = self.cluster

            self.c_index = []
            self.coincidences = self.data.create_table(group,
                                                      'coincidences',
                                                      storage.Coincidence)
            self.observables = self.data.create_table(group, 'observables',
                                            storage.EventObservables)

            for coincidence in pbar(self.data.root.c_index):
                self.store_coincidence(coincidence)

            c_index = self.data.create_vlarray(group, 'c_index',
                                              tables.UInt32Col())
            for coincidence in self.c_index:
                c_index.append(coincidence)
            c_index.flush()
            self.c_index = c_index
        else:
            # Force new cluster geometry
            group = self.data.get_node('/', 'coincidences')
            group._v_attrs.cluster = self.cluster
예제 #7
0
def write_jsons(data):
    if not os.path.exists('data/'):
        os.mkdir('data')

    for subcluster in Network().subclusters():
        subcluster_name = subcluster['name'].lower().replace(' ', '_')

        stations = build_station_json(data, subcluster['number'])
        with open('data/stations_' + subcluster_name + '.json', 'w') as f:
            json.dump(stations, f)

        coincidences = build_coincidence_json(data, subcluster_name)
        with open('data/coincidences_' + subcluster_name + '.json', 'w') as f:
            json.dump(coincidences, f)

    stations = build_station_json(data)
    with open('data/stations_network.json', 'w') as f:
        json.dump(stations, f)

    coincidences = build_coincidence_json(data)
    with open('data/coincidences_network.json', 'w') as f:
        json.dump(coincidences, f)

    for station in pbar(STATIONS):
        events = build_events_json(data, station)
        with open('data/events_s%d.json' % station, 'w') as f:
            json.dump(events, f)
예제 #8
0
def migrate():
    """
    Find unmigrated tables in datastore
    migrate tables
    check datastore again for unmigrated tables
    """

    logging.info('******************')
    logging.info('Starting migration')
    logging.info('******************')

    queue = get_queue(DATASTORE_PATH)
    print('migrating:')
    for path in pbar(queue.keys()):
        logging.info('Migrating: %s' % path)
        with tables.open_file(path, 'a') as data:
            migration = MigrateSingles(data)
            for table in queue[path]:
                logging.debug('Processing table: %s' % table)
                migration.migrate_table(table)

    queue = get_queue(DATASTORE_PATH)
    if queue:
        logging.error('Found unprocessed tables after migration')
        for path in queue.keys():
            logging.error('Unprocessed tables in: %s' % path)
            for table in queue[path]:
                logging.error('%s' % table)
    else:
        logging.info('********************')
        logging.info('Migration succesful!')
        logging.info('********************')
예제 #9
0
def get_queue(datastore_path):
    queue = {}
    logging.info('Searching for unmigrated singles tables')

    print('Looking for singles tables in datastore.')

    # Singles tables were added in Feb, 2016.
    for fn in pbar(glob.glob(datastore_path + '/201[6,7]/*/*h5')):

        singles_tables = []
        with tables.open_file(fn, 'r') as data:
            for node in data.walk_nodes('/', 'Table'):
                table_path = node._v_pathname
                if '/singles' in table_path:
                    if 'singles_old' in table_path:
                        continue
                    type_ = type(node.description.mas_ch1_low)
                    if type_ == tables.UInt16Col:
                        logging.debug('Found: %s' % table_path)
                        singles_tables.append(table_path)
                    elif type_ == tables.Int32Col:
                        logging.debug('Skipping migrated: %s' % table_path)
                        continue
                    else:
                        logging.error('%s in unknown format!' % table_path)

        if singles_tables:
            queue[fn] = singles_tables
            logging.info('Found %d tables in %s' % (len(singles_tables), fn))

    n = sum(len(v) for v in queue.itervalues())
    logging.info('Found %d unmigrated tables '
                 'in %d datastore files.' % (n, len(queue)))
    return queue
def make_datasets_failed_reconstructions_scatter(data):
    global dt1, dt2, phis_sim, phis_rec
    global gdt1, gdt2, gphis_sim, gphis_rec

    group = data.root.simulations.E_1PeV.zenith_22_5.shower_0
    observables = group.observables
    coincidences = group.coincidences

    dt1, dt2, phis_sim, phis_rec = [], [], [], []
    gdt1, gdt2, gphis_sim, gphis_rec = [], [], [], []

    for event, coincidence in pbar(izip(observables, coincidences),
                                   len(observables)):
        assert event['id'] == coincidence['id']
        if min(event['n1'], event['n3'], event['n4']) >= 1.:
            theta, phi = reconstruct_angle(event, 10.)
            assert not isnan(phi)

            if isnan(theta):
                dt1.append(event['t1'] - event['t3'])
                dt2.append(event['t1'] - event['t4'])
                phis_sim.append(coincidence['phi'])
                phis_rec.append(phi)
            else:
                gdt1.append(event['t1'] - event['t3'])
                gdt2.append(event['t1'] - event['t4'])
                gphis_sim.append(coincidence['phi'])
                gphis_rec.append(phi)
예제 #11
0
def perform_simulations():
    cq = CorsikaQuery(OVERVIEW)
    s = set()
    for energy in pbar(cq.available_parameters('energy', particle='proton')):
        for zenith in cq.available_parameters('zenith',
                                              particle='proton',
                                              energy=energy):
            sims = cq.simulations(particle='proton',
                                  zenith=zenith,
                                  energy=energy,
                                  iterator=False)
            selected_seeds = cq.seeds(sims)
            n = min(len(selected_seeds), N)
            for seeds in choice(selected_seeds, n, replace=False):
                if seeds in s:
                    continue
                if not os.path.exists(
                        '/data/hisparc/corsika/data/{seeds}/corsika.h5'.format(
                            seeds=seeds)):
                    continue
                s.add(seeds)
                if energy >= 16:
                    perform_job(seeds, 'long')
                elif energy >= 14.5:
                    perform_job(seeds, 'generic')
                else:
                    perform_job(seeds, 'short')
    cq.finish()
예제 #12
0
def binned_stat_idx(events, idx_ranges):

    stats = {}
    for field_name in FIELD_NAMES:
        if field_name == 'event_rate':
            stats[field_name] = get_event_rate(idx_ranges)
        else:
            stats[field_name] = []

    for start, stop in pbar(zip(idx_ranges[:-1], idx_ranges[1:])):
        slice = events.read(start, stop)
        for field, field_name in zip(FIELDS, FIELD_NAMES):
            if field_name == 'event_rate':
                # Event time determined above
                continue
            elif field_name == 'mpv':
                # Simply the MPV value, not fraction of bad MPV
                mpvs = reconstruct_mpv(slice)
                stats[field_name].append(mpvs)
            elif isinstance(field, tuple):
                data = column_stack(slice[f] for f in field)
                stats[field_name].append(frac_bad(data))
            else:
                data = slice[field]
                stats[field_name].append(frac_bad(data))

    for field_name in FIELD_NAMES:
        stats[field_name] = array(stats[field_name]).T

    return stats
def make_datasets_failed_reconstructions_scatter(data):
    global dt1, dt2, phis_sim, phis_rec
    global gdt1, gdt2, gphis_sim, gphis_rec

    group = data.root.simulations.E_1PeV.zenith_22_5.shower_0
    observables = group.observables
    coincidences = group.coincidences

    dt1, dt2, phis_sim, phis_rec = [], [], [], []
    gdt1, gdt2, gphis_sim, gphis_rec = [], [], [], []

    for event, coincidence in pbar(izip(observables, coincidences),
                                   len(observables)):
        assert event['id'] == coincidence['id']
        if min(event['n1'], event['n3'], event['n4']) >= 1.:
            theta, phi = reconstruct_angle(event, 10.)
            assert not isnan(phi)

            if isnan(theta):
                dt1.append(event['t1'] - event['t3'])
                dt2.append(event['t1'] - event['t4'])
                phis_sim.append(coincidence['phi'])
                phis_rec.append(phi)
            else:
                gdt1.append(event['t1'] - event['t3'])
                gdt2.append(event['t1'] - event['t4'])
                gphis_sim.append(coincidence['phi'])
                gphis_rec.append(phi)
예제 #14
0
    def test_pbar_generator(self):
        """Return original generator, not a progressbar"""

        generator = (x for x in self.iterable)
        pb = utils.pbar(generator)
        self.assertIsInstance(pb, types.GeneratorType)
        self.assertEqual(list(pb), self.iterable)
예제 #15
0
    def test_pbar_generator_known_length(self):
        """Return progressbar for generator with known length"""

        generator = (y for y in self.iterable)
        pb = utils.pbar(generator, length=len(self.iterable), fd=self.output)
        self.assertIsInstance(pb, progressbar.ProgressBar)
        self.assertEqual(list(pb), self.iterable)
예제 #16
0
def foreignkey_to_m2m(apps, schema_editor):
    """Backwards migrations"""
    fk_model = apps.get_model('coincidences', 'Event')
    if fk_model.objects.all().count():
        print('')
    for event in pbar(fk_model.objects.all().iterator(), length=fk_model.objects.all().count()):
        event.coincidence.events.add(event)
예제 #17
0
    def sort(self):
        """Sort the table"""

        chunk = self.nrows_in_chunk
        nrows = self.nrows
        parts = int(nrows / chunk) + 1
        if parts == 1:
            if self.progress:
                print("Sorting table in memory and writing to disk.")
            self._sort_chunk(self.outtable, 0, nrows)
        else:
            if self.progress:
                print("Sorting in %d chunks of %d rows:" % (parts, chunk))

            for idx, start in pbar(enumerate(range(0, nrows, chunk)),
                                   length=parts,
                                   show=self.progress):
                table_name = 'temp_table_%d' % idx
                table = self.hdf5_temp.create_table('/',
                                                    table_name,
                                                    self.description,
                                                    expectedrows=chunk)
                iterator = self._sort_chunk(table, start, start + chunk)
                self._iterators.append(iterator)

            rowbuf = self.outtable._get_container(self._BUFSIZE)
            idx = 0

            if self.progress:
                print("Merging:")

            for keyedrow in pbar(merge(*self._iterators),
                                 length=nrows,
                                 show=self.progress):
                x, row = keyedrow

                if idx == self._BUFSIZE:
                    self.outtable.append(rowbuf)
                    self.outtable.flush()
                    idx = 0

                rowbuf[idx] = row.fetch_all_fields()
                idx += 1

            # store last lines in buffer
            self.outtable.append(rowbuf[0:idx])
            self.outtable.flush()
예제 #18
0
def download_events_data(data):
    """Download event data for each station into a separate table"""

    for station in pbar(STATIONS):
        group = '/s%d' % station
        if group not in data:
            download_data(data, group, station, start=START, end=END,
                          progress=False)
예제 #19
0
def get_combined_results():
    zenith = []
    zenith_in = []
    azimuth = []
    azimuth_in = []
    energy_in = []
    size_in = []
    r_in = []

    zenith_init = []
    azimuth_init = []
    energy_init = []
    size_init = []
    r_init = []

    for path in pbar(glob.glob(PATHS)):
        with tables.open_file(path, 'r') as data:
            recs = data.root.coincidences.reconstructions
            filtered_recs = recs.read_where(
                's501 & s502 & s503 & s504 & s505 & s506')
            zenith.extend(degrees(filtered_recs['zenith']))
            zenith_in.extend(degrees(filtered_recs['reference_zenith']))
            azimuth.extend(degrees(filtered_recs['azimuth']))
            azimuth_in.extend(degrees(filtered_recs['reference_azimuth']))
            energy_in.extend(log10(filtered_recs['reference_energy']))
            size_in.extend(log10(filtered_recs['reference_size']))
            r_in.extend(
                sqrt(filtered_recs['reference_x']**2 +
                     filtered_recs['reference_y']**2))

            zenith_init.extend(degrees(recs.col('reference_zenith')))
            azimuth_init.extend(degrees(recs.col('reference_azimuth')))
            energy_init.extend(log10(recs.col('reference_energy')))
            size_init.extend(log10(recs.col('reference_size')))
            r_init.extend(
                sqrt(recs.col('reference_x')**2 + recs.col('reference_y')**2))

    zenith = array(zenith)
    filter = ~isnan(zenith)

    zenith = zenith.compress(filter)
    zenith_in = array(zenith_in).compress(filter)
    azimuth = array(azimuth).compress(filter)
    azimuth_in = array(azimuth_in).compress(filter)
    energy_in = array(energy_in).compress(filter)
    size_in = array(size_in).compress(filter)
    r_in = array(r_in).compress(filter)

    zenith_init = array(zenith_init)
    azimuth_init = array(azimuth_init)
    energy_init = array(energy_init)
    size_init = array(size_init)
    r_init = array(r_init)

    print sum(filter), len(filter)

    return (zenith, zenith_in, azimuth, azimuth_in, energy_in, size_in, r_in,
            zenith_init, azimuth_init, energy_init, size_init, r_init)
예제 #20
0
def get_pair_distance_energy_array(distances, energies, n=8):
    results = []
    for distance in pbar(distances):
        sens = DistancePairAreaEnergySensitivity(distance=distance, n=n)
        sens.main()
        areas = sens.get_area_energy(energies)
        results.append(areas)
    results = np.array(results)
    return results
예제 #21
0
def update_local_json():
    for type in pbar(['stations', 'subclusters', 'clusters', 'countries']):
        update_toplevel_json(type)

    for arg_type, type in [('stations', 'station_info'),
                           ('subclusters', 'stations_in_subcluster'),
                           ('clusters', 'subclusters_in_cluster'),
                           ('countries', 'clusters_in_country')]:
        update_sublevel_json(arg_type, type)
예제 #22
0
    def sort(self):
        """Sort the table"""

        chunk = self.nrows_in_chunk
        nrows = self.nrows
        parts = int(nrows / chunk) + 1
        if parts == 1:
            if self.progress:
                print("Sorting table in memory and writing to disk.")
            self._sort_chunk(self.outtable, 0, nrows)
        else:
            if self.progress:
                print("Sorting in %d chunks of %d rows:" % (parts, chunk))

            for idx, start in pbar(enumerate(range(0, nrows, chunk)),
                                   length=parts, show=self.progress):
                table_name = 'temp_table_%d' % idx
                table = self.hdf5_temp.create_table('/', table_name,
                                                    self.description,
                                                    expectedrows=chunk)
                iterator = self._sort_chunk(table, start, start + chunk)
                self._iterators.append(iterator)

            rowbuf = self.outtable._get_container(self._BUFSIZE)
            idx = 0

            if self.progress:
                print("Merging:")

            for keyedrow in pbar(merge(*self._iterators), length=nrows,
                                 show=self.progress):
                x, row = keyedrow

                if idx == self._BUFSIZE:
                    self.outtable.append(rowbuf)
                    self.outtable.flush()
                    idx = 0

                rowbuf[idx] = row.fetch_all_fields()
                idx += 1

            # store last lines in buffer
            self.outtable.append(rowbuf[0:idx])
            self.outtable.flush()
예제 #23
0
def m2m_to_foreignkey(apps, schema_editor):
    """Forwards migrations"""
    m2m_model = apps.get_model('coincidences', 'Coincidence')
    coincidences = m2m_model.objects.all().annotate(n_events=Count('events')).exclude(n_events=0)
    if coincidences.count():
        print('')
    for coincidence in pbar(coincidences.iterator(), length=coincidences.count()):
        for event in coincidence.events.all():
            event.coinc = coincidence
            event.save()
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('histograms', 'NetworkHistogram')
    if model.objects.all().count():
        print('')
    for histogram in pbar(model.objects.all().iterator(),
                          length=model.objects.all().count()):
        histogram.bins = histogram.old_bins
        histogram.values = histogram.old_values
        histogram.save()
예제 #25
0
def download_events_data(data):
    """Download event data for each station into a separate table"""

    for station in pbar(STATIONS):
        group = '/s%d' % station
        if group not in data:
            download_data(data,
                          group,
                          station,
                          start=START,
                          end=END,
                          progress=False)
예제 #26
0
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('coincidences', 'Event')
    if model.objects.all().count():
        print('')
    for event in pbar(model.objects.all().iterator(),
                      length=model.objects.all().count()):
        event.pulseheights = mv_to_adc(event.old_pulseheights)
        event.integrals = mvns_to_adcsample(event.old_integrals)
        event.traces = traces_mv_to_adc(event.old_traces)
        if not (len(event.traces[0]) == len(event.traces[-1])):
            event.traces = event.traces[:2]
        event.save()
예제 #27
0
def generate_json():
    """Get the API info data for each station"""

    station_numbers = Network().station_numbers()
    station_info = {}

    for number in pbar(station_numbers):
        try:
            station = Station(number)
            station_info[number] = station.info
        except:
            continue

    return station_info
예제 #28
0
    def reconstruct_angles(self, coincidences_group):
        coincidences_group = self.data.get_node(coincidences_group)
        self.data_group = coincidences_group
        coincidences = coincidences_group.coincidences

        self.cluster = coincidences_group._v_attrs.cluster
        self.results_group._v_attrs.cluster = self.cluster

        sel_coincidences = coincidences.read_where('N >= 1')
        for coincidence in pbar(sel_coincidences):
            self.reconstruct_individual_stations(coincidence)
            self.reconstruct_cluster_stations(coincidence)

        self.data.flush()
예제 #29
0
def generate_json():
    """Get the API info data for each station"""

    station_numbers = Network().station_numbers()
    station_info = {}

    for number in pbar(station_numbers):
        try:
            station = Station(number)
            station_info[number] = station.info
        except:
            continue

    return station_info
예제 #30
0
    def reconstruct_angles(self, coincidences_group):
        coincidences_group = self.data.get_node(coincidences_group)
        self.data_group = coincidences_group
        coincidences = coincidences_group.coincidences

        self.cluster = coincidences_group._v_attrs.cluster
        self.results_group._v_attrs.cluster = self.cluster

        sel_coincidences = coincidences.read_where('N >= 3')
        for coincidence in pbar(sel_coincidences):
            self.reconstruct_individual_stations(coincidence)
            self.reconstruct_cluster_stations(coincidence)

        self.data.flush()
예제 #31
0
def multiple_jobs(n, energy, particle, zenith, azimuth, queue, corsika):
    """Use this to sumbit multiple jobs to Stoomboot

    :param n: Number of jobs to submit
    :param energy: log10(E[eV]) energy of primary particle
    :param particle: Particle kind (as string, see
                     :mod:`~sapphire.corsika.particles` for possibilities)
    :param zenith: Zenith angle in degrees of the primary particle
    :param azimuth: Azimuth angle in degrees of the primary particle
    :param queue: Stoomboot queue to submit to
    :param corsika: Name of the CORSIKA executable to use

    """
    print textwrap.dedent("""\
        Batch submitting jobs to Stoomboot:
        Number of jobs      {n}
        Particle energy     10^{e} eV
        Primary particle    {p}
        Zenith angle        {z} degrees
        Azimuth angle       {a} degrees
        Stoomboot queue     {q}
        CORSIKA executable  {c}
        """.format(n=n,
                   e=energy,
                   p=particle,
                   z=zenith,
                   a=azimuth,
                   q=queue,
                   c=corsika))

    available_slots = check_queue(queue)
    if available_slots <= 0:
        n = 0
        print 'Submitting no jobs because queue is full.'
        return
    elif available_slots < n:
        n = available_slots
        print 'Submitting {n} jobs because queue is almost full.'.format(n=n)

    for _ in pbar(xrange(n)):
        batch = CorsikaBatch(energy=energy,
                             particle=particle,
                             zenith=zenith,
                             azimuth=azimuth,
                             queue=queue,
                             corsika=corsika)
        batch.run()
    print 'Done.'
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('histograms', 'DailyDataset')
    multi_model = apps.get_model('histograms', 'MultiDailyDataset')
    if model.objects.all().count():
        print('')
    for dataset in pbar(model.objects.all().iterator(),
                        length=model.objects.all().count()):
        if dataset.type.slug in ['barometer', 'temperature']:
            dataset.x = dataset.old_x
            dataset.y = dataset.old_y
            dataset.save()
        else:
            multi_model.objects.create(source=dataset.source,
                                       type=dataset.type,
                                       x=dataset.old_x,
                                       y=dataset.old_y)
            dataset.delete()
예제 #33
0
def get_tcc_values(data, force_new=False):
    if 'tcc' in data.root and not force_new:
        return data.root.tcc.read()
    else:
        h_events = data.root.hisparc.cluster_kascade.station_601.events
        c_index = data.root.kascade.c_index

        tcc = []
        for idx in pbar(c_index):
            event = h_events[idx['h_idx']]
            value = calculate_tcc(event)
            tcc.append(value)

        tcc = array(tcc)
        if 'tcc' in data.root:
            data.remove_node('/tcc')
        data.create_array('/', 'tcc', tcc)
        return tcc
예제 #34
0
def get_integrals(data):
    blobs = data.get_node(GROUP, 'blobs')
    events = data.get_node(GROUP, 'events')

    raw_integrals = []
    filtered_integrals = []
    source_integrals = []

    for event in pbar(events[:2000]):
        # traces = get_traces_from_api(API_STATION, event)
        traces = get_traces_from_blobs(event, blobs)
        raw, filtered = determine_integrals(traces, event)
        source = event['integrals']
        raw_integrals.append(raw)
        filtered_integrals.append(filtered)
        source_integrals.append(source)
    return (array(raw_integrals), array(filtered_integrals),
            array(source_integrals))
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('histograms', 'DailyDataset')
    multi_model = apps.get_model('histograms', 'MultiDailyDataset')
    if model.objects.all().count():
        print('')
    for dataset in pbar(model.objects.all().iterator(), length=model.objects.all().count()):
        if dataset.type.slug in ['barometer', 'temperature']:
            dataset.x = dataset.old_x
            dataset.y = dataset.old_y
            dataset.save()
        else:
            multi_model.objects.create(
                source=dataset.source,
                type=dataset.type,
                x=dataset.old_x,
                y=dataset.old_y)
            dataset.delete()
예제 #36
0
def get_tcc_values(data, force_new=False):
    if 'tcc' in data.root and not force_new:
        return data.root.tcc.read()
    else:
        h_events = data.root.hisparc.cluster_kascade.station_601.events
        c_index = data.root.kascade.c_index

        tcc = []
        for idx in pbar(c_index):
            event = h_events[idx['h_idx']]
            value = calculate_tcc(event)
            tcc.append(value)

        tcc = array(tcc)
        if 'tcc' in data.root:
            data.remove_node('/tcc')
        data.create_array('/', 'tcc', tcc)
        return tcc
예제 #37
0
    def reconstruct_core_positions(self, hisparc_group, kascade_group, tcc):
        hisparc_group = self.data.get_node(hisparc_group)

        hisparc_table = self.data.get_node(hisparc_group, 'events')
        c_index = self.data.get_node(kascade_group, 'c_index')
        kascade_table = self.data.get_node(kascade_group, 'events')

        self.cluster = hisparc_group._v_attrs.cluster
        self._store_cluster_with_results()

        for idx, tcc_value in pbar(zip(c_index[:self.N], tcc)):
            hisparc_event = hisparc_table[idx['h_idx']]
            kascade_event = kascade_table[idx['k_idx']]

            if tcc_value >= 10:
                x, y, N = self.reconstruct_core_position(hisparc_event)
                self.store_reconstructed_event(hisparc_event,
                                               kascade_event, x, y, N)

        self.results_table.flush()
예제 #38
0
    def reconstruct_core_positions(self, hisparc_group, kascade_group, tcc):
        hisparc_group = self.data.get_node(hisparc_group)

        hisparc_table = self.data.get_node(hisparc_group, 'events')
        c_index = self.data.get_node(kascade_group, 'c_index')
        kascade_table = self.data.get_node(kascade_group, 'events')

        self.cluster = hisparc_group._v_attrs.cluster
        self._store_cluster_with_results()

        for idx, tcc_value in pbar(zip(c_index[:self.N], tcc)):
            hisparc_event = hisparc_table[idx['h_idx']]
            kascade_event = kascade_table[idx['k_idx']]

            if tcc_value >= 10:
                x, y, N = self.reconstruct_core_position(hisparc_event)
                self.store_reconstructed_event(hisparc_event, kascade_event, x,
                                               y, N)

        self.results_table.flush()
예제 #39
0
def update_local_tsv():
    """Get configuration tsv data for all stations"""

    station_numbers = Network().station_numbers()

    for type in ['gps', 'trigger', 'layout', 'voltage', 'current',
                 'electronics']:
        subdir = API.src_urls[type].split('/')[0]
        try:
            mkdir(path.join(LOCAL_BASE, subdir))
        except OSError:
            pass

        for number in pbar(station_numbers):
            url = API.src_urls[type].format(station_number=number)
            try:
                get_and_store_tsv(url)
            except:
                if type != 'layout':
                    print 'Failed to get %s for station %d' % (type, number)
                continue
예제 #40
0
def multiple_jobs(n, energy, particle, zenith, azimuth, queue, corsika):
    """Use this to sumbit multiple jobs to Stoomboot

    :param n: Number of jobs to submit
    :param energy: log10(E[eV]) energy of primary particle
    :param particle: Particle kind (as string, see
                     :mod:`~sapphire.corsika.particles` for possibilities)
    :param zenith: Zenith angle in degrees of the primary particle
    :param azimuth: Azimuth angle in degrees of the primary particle
    :param queue: Stoomboot queue to submit to
    :param corsika: Name of the CORSIKA executable to use

    """
    print textwrap.dedent("""\
        Batch submitting jobs to Stoomboot:
        Number of jobs      {n}
        Particle energy     10^{e} eV
        Primary particle    {p}
        Zenith angle        {z} degrees
        Azimuth angle       {a} degrees
        Stoomboot queue     {q}
        CORSIKA executable  {c}
        """.format(n=n, e=energy, p=particle, z=zenith, a=azimuth, q=queue,
                   c=corsika))

    available_slots = check_queue(queue)
    if available_slots <= 0:
        n = 0
        print 'Submitting no jobs because queue is full.'
        return
    elif available_slots < n:
        n = available_slots
        print 'Submitting {n} jobs because queue is almost full.'.format(n=n)

    for _ in pbar(xrange(n)):
        batch = CorsikaBatch(energy=energy, particle=particle, zenith=zenith,
                             azimuth=azimuth, queue=queue, corsika=corsika)
        batch.run()
    print 'Done.'
예제 #41
0
def update_sublevel_json(arg_type, type):
    subdir = API.urls[type].split('/')[0]
    try:
        mkdir(path.join(LOCAL_BASE, subdir))
    except OSError:
        pass

    url = API.urls[arg_type]
    try:
        numbers = [x['number'] for x in loads(API._retrieve_url(url))]
    except:
        print 'Failed to get %s data' % type
        return

    kwarg = API.urls[type].split('/')[1].strip('{}')
    for number in pbar(numbers):
        url = API.urls[type].format(**{kwarg: number,
                                       'year': '', 'month': '', 'day': ''})
        try:
            get_and_store_json(url.strip('/'))
        except:
            print 'Failed to get %s data for %s %d' % (type, arg_type, number)
            return
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('histograms', 'DailyHistogram')
    multi_model = apps.get_model('histograms', 'MultiDailyHistogram')
    if model.objects.all().count():
        print('')
    for histogram in pbar(model.objects.all().iterator(), length=model.objects.all().count()):
        if not histogram.type.has_multiple_datasets:
            histogram.bins = histogram.old_bins
            histogram.values = histogram.old_values
            histogram.save()
        else:
            if histogram.type.slug == 'pulseheight':
                new_bins = mv_to_adc(histogram.old_bins)
            elif histogram.type.slug == 'pulseintegral':
                new_bins = mvns_to_adcsample(histogram.old_bins)
            else:
                new_bins = histogram.old_bins
            multi_model.objects.create(
                source=histogram.source,
                type=histogram.type,
                bins=new_bins,
                values=histogram.old_values)
            histogram.delete()
def serialiseddatafield_to_arrayfield(apps, schema_editor):
    """Forwards migrations"""
    model = apps.get_model('histograms', 'DailyHistogram')
    multi_model = apps.get_model('histograms', 'MultiDailyHistogram')
    if model.objects.all().count():
        print('')
    for histogram in pbar(model.objects.all().iterator(),
                          length=model.objects.all().count()):
        if not histogram.type.has_multiple_datasets:
            histogram.bins = histogram.old_bins
            histogram.values = histogram.old_values
            histogram.save()
        else:
            if histogram.type.slug == 'pulseheight':
                new_bins = mv_to_adc(histogram.old_bins)
            elif histogram.type.slug == 'pulseintegral':
                new_bins = mvns_to_adcsample(histogram.old_bins)
            else:
                new_bins = histogram.old_bins
            multi_model.objects.create(source=histogram.source,
                                       type=histogram.type,
                                       bins=new_bins,
                                       values=histogram.old_values)
            histogram.delete()
예제 #44
0
    x = range(len(d1))
    graph = Plot()
    graph.plot(x, d1, markstyle='mark size=.5pt')
    graph.plot(x, d2, markstyle='mark size=.5pt', linestyle='red')
    graph.plot(x, d3, markstyle='mark size=.5pt', linestyle='green')
    graph.plot(x, d4, markstyle='mark size=.5pt', linestyle='blue')
    graph.set_ylabel('$\Delta t$ [ns]')
    graph.set_xlabel('Date')
    graph.set_xlimits(0, max(x))
    graph.set_ylimits(-LIMITS, LIMITS)
    graph.save_as_pdf('detector_offset_drift_%s_%d' % (type, station))


if __name__ == '__main__':

    for station in pbar(STATIONS):
        # Determine offsets for first day of each month
        output = open('offsets_%d.tsv' % station, 'wb')
        csvwriter = csv.writer(output, delimiter='\t')
        offsets = []
        timestamps = []
        for y in range(2010, 2016):
            for m in range(1, 13):
                if y == 2015 and m >= 4:
                    continue
                timestamps.append(datetime_to_gps(date(y, m, 1)))
                path = os.path.join(DATA_PATH, str(y), str(m),
                                    '%d_%d_1.h5' % (y, m))
                with tables.open_file(path, 'r') as data:
                    offsets.append(determine_offset(data, station))
                csvwriter.writerow([timestamps[-1]] + offsets[-1])
예제 #45
0
def download_events_data(data):
    for station in pbar(STATIONS):
        group = '/s%d' % station
        if group not in data:
            download_data(data, group, station, start=START, end=END,
                          progress=False)
예제 #46
0
    def test_pbar_generator_wrong_length(self):
        """Raise exception for generator with wrong length"""

        generator = (y for y in self.iterable)
        pb = utils.pbar(generator, length=len(self.iterable) - 5, fd=self.output)
        self.assertRaises(ValueError, list, pb)
예제 #47
0
파일: upload.py 프로젝트: HiSPARC/datastore
        self.n = len(self.files)

    def __len__(self):
        return self.n

    def __iter__(self):
        for fn in self.files:
            with open(fn, 'rb') as f:
                data = pickle.load(f)
                yield data['station_id'], data['event_list']


if __name__ == '__main__':
    datastore = Uploader(DATASTORE_VM, STATION_LIST)

    folder_to_upload = sys.argv[1]
    pickles = YieldPickles(folder_to_upload)

    print('Uploading %d pickles from folder: %s' % (len(pickles),
                                                    folder_to_upload))
    for sn, event_list in pbar(pickles):
        assert sn in [98, 99]
        try:
            r = datastore.upload(sn, event_list)
        except Exception as exc:
            print('Connection failed: ', str(exc))
            break
        if r != '100':
            print('Datastore responded with errorcoded: ', r, type(r))
            break
예제 #48
0
Note: I normalized the event_ids in the events table. The event_ids were
1-based, and were modified to be 0-based. The following code was used:

    events.modify_column(column=range(events.nrows), colname='event_id')

"""
import tables

from sapphire.utils import pbar

DATA_PATH = '/Users/arne/Datastore/kascade/kascade-20080912.h5'


if __name__ == "__main__":
    ids = [1, 2, 3, 4]
    with tables.open_file(DATA_PATH, 'r') as data:
        event_ids = []
        recs = data.root.reconstructions.iterrows()
        events = data.root.hisparc.cluster_kascade.station_601.events.iterrows()
        for rec in pbar(recs, length=data.root.reconstructions.nrows):
            for event in events:
                if all(rec['n%d' % id] == event['n%d' % id] for id in ids):
                    event_ids.append(event['event_id'])
                    break
    with tables.open_file(DATA_PATH, 'a') as data:
        data.create_array('/', 'c_index', event_ids)
        events = data.root.hisparc.cluster_kascade.station_601.events
        integrals = events.read_coordinates(event_ids, 'integrals')
        data.create_array('/', 'reconstructions_integrals', integrals)
        data.create_array('/', 'reconstructions_integrals_n', integrals / 5000.)
예제 #49
0
            station_group = '/cluster_simulations/station_%d' % station.number
            rec_events = ReconstructESDEvents(data,
                                              station_group,
                                              station,
                                              overwrite=True,
                                              progress=False)
            rec_events.prepare_output()
            rec_events.offsets = [d.offset for d in station.detectors]
            rec_events.store_offsets()
            rec_events.reconstruct_directions()
            rec_events.store_reconstructions()

        # Reconstruct coincidences
        rec_coins = ReconstructESDCoincidences(data,
                                               '/coincidences',
                                               overwrite=True,
                                               progress=False)
        rec_coins.prepare_output()
        rec_coins.offsets = {
            station.number:
            [d.offset + station.gps_offset for d in station.detectors]
            for station in cluster.stations
        }
        rec_coins.reconstruct_directions()
        rec_coins.store_reconstructions()


if __name__ == "__main__":
    for path in pbar(glob.glob(PATHS)):
        reconstruct_simulations(path)
예제 #50
0
 def test_pbar_iterable(self):
     pb = utils.pbar(self.iterable, fd=self.output)
     self.assertIsInstance(pb, progressbar.ProgressBar)
     self.assertEqual(list(pb), self.iterable)
예제 #51
0
def perform_simulations():
    for id in pbar(range(10)):
        script = SCRIPT.format(job_id=id)
        submit_job(script, 'spa_sim_%d' % id, 'long')