コード例 #1
0
def download_coincidences_pair(pair):
    path = DATAPATH % tuple(pair)
    tmp_path = path + '_tmp'
    if os.path.exists(path):
        print 'Skipping', pair
        return
    print 'Starting', pair, datetime.datetime.now()
    distance = distance_between_stations(*pair)
    timestamp_ranges = get_timestamp_ranges(pair)
    total_exposure = get_total_exposure(timestamp_ranges)
    with tables.open_file(tmp_path, 'w') as data:
        data.set_node_attr('/', 'total_exposure', total_exposure)
        data.set_node_attr('/', 'distance', distance)
        for ts_start, ts_end in timestamp_ranges:
            download_coincidences(data, stations=list(pair),
                                  start=gps_to_datetime(ts_start),
                                  end=gps_to_datetime(ts_end),
                                  progress=False)
        try:
            coin = data.get_node('/coincidences')
        except tables.NoSuchNodeError:
            print 'No coincidences for', pair
            os.rename(tmp_path, path)
            return
        rate = coin.coincidences.nrows / total_exposure
        data.set_node_attr('/', 'n_rate', rate)
        data.set_node_attr('/', 'n_coincidences', coin.coincidences.nrows)
    os.rename(tmp_path, path)
    determine_rate(path)
    print 'Finished', pair, datetime.datetime.now()
コード例 #2
0
def download_coincidences_data(data):
    for subcluster in Network().subclusters():
        group = ('/coincidences_%s' %
                 subcluster['name'].lower().replace(' ', '_'))
        if group in data:
            continue
        stations = Network().station_numbers(subcluster=subcluster['number'])
        if len(stations) < 2:
            continue
        download_coincidences(data, group=group, stations=stations,
                              start=START, end=END)

    # Entire network
    if '/coincidences' not in data:
        download_coincidences(data, start=START, end=END)
コード例 #3
0
def download_sciencepark_coincidences():
    """Download a dataset for analysis

    This script downloads coincidence data from the Science Park stations.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of station offsets.
    After this coincidences with many events (6+ or 7+) will be reconstructed.
    Note: Station 510 'overlaps' with 501. Station 507 is excluded.

    """
    path = COIN_PATH
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(START, END):
            download_coincidences(data, stations=STATIONS,
                                  start=startdt, end=enddt, n=2)
コード例 #4
0
def download_sciencepark_dataset_n3():
    """Download a dataset for analysis

    To be used to check correctness of handling station timing offsets

    """
    stations = range(501, 512)
    start = datetime(2016, 1, 10)
    end = datetime(2016, 2, 1)
    with tables.open_file(PATH, 'w') as data:
        download_coincidences(data,
                              stations=stations,
                              start=start,
                              end=end,
                              n=3)
コード例 #5
0
ファイル: download_dataset.py プロジェクト: 153957/topaz
def download_501_510_dataset():
    """Download a dataset for analysis

    """
    print "Downloading 501-510 dataset."
    stations = [501, 510]

    start = datetime(2015, 1, 20)
    end = datetime(2015, 2, 1)

    with tables.open_file(PATH + 'c_501_510_150120_150201.h5', 'a') as data:
        download_coincidences(data,
                              stations=stations,
                              start=start,
                              end=end,
                              n=2)
コード例 #6
0
def download_sciencepark_dataset_n11():
    """Download a dataset for analysis

    This script downloads coincidence data from the Science Park stations.
    Station 507 is excluded because its detector positions are not well known.
    Coincidences with at least 11 events in a coincidence are included.

    """
    stations = range(501, 512)
    start = (2015, 6)
    end = (2015, 12)
    path = os.path.join(DATASTORE, 'sciencepark_n11_150701_151105.h5')
    print "Downloading n11 Science Park dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=11)
コード例 #7
0
def download_aarhus_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Aarhus stations.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences that include each station will be reconstructed.

    """
    stations = [20001, 20002, 20003]
    start = (2012, 1)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'aarhus_n2_120101_140801.h5')
    print "Downloading Aarhus dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=2)
コード例 #8
0
def download_sciencepark_dataset_n7():
    """Download a dataset for analysis

    This script downloads coincidence data from the Science Park stations.
    Station 507 is excluded because its detector positions are not well known.
    Coincidences with at least 7 events in a coincidence are included.
    Note: Station 510 'overlaps' with 501.

    """
    stations = [501, 502, 503, 504, 505, 506, 508, 509, 510]
    start = (2012, 1)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'sciencepark_n7_120101_150401.h5')
    print "Downloading n7 Science Park dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=7)
コード例 #9
0
def download_sciencepark_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Science Park stations.
    Station 507 is excluded because its detector positions are not well known.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences with many events (6+ or 7+) will be reconstructed.
    Note: Station 510 'overlaps' with 501.

    """
    stations = [501, 502, 503, 504, 505, 506, 508, 509, 510]
    path = os.path.join(DATASTORE, 'sciencepark_n2_100101_150401.h5')
    print "Downloading Science Park dataset."
    with tables.open_file(path, 'a') as data:
        download_coincidences(data, stations=stations,
                              start=datetime(2010, 1, 1),
                              end=datetime(2015, 4, 1), n=2)
コード例 #10
0
def download_zaanlands_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Zaanland stations.
    Three 2-detector stations, all on the roof of the Zaanlands Lyceum.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences that include each station will be reconstructed.

    """
    stations = [102, 104, 105]
    start = (2012, 6)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'zaanlands_n2_120601_140801.h5')
    print "Downloading Zaanlands dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=2)
コード例 #11
0
def download_twente_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Twente stations.
    Three 2-detector stations, all on the roof of the TU Carre building.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences that include each station will be reconstructed.

    """
    stations = [7001, 7002, 7003]
    start = (2011, 8)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'twente_n2_110801_140801.h5')
    print "Downloading Twente dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=2)
コード例 #12
0
def download_501_510_dataset():
    """Download a dataset for analysis

    """
    print "Downloading 501-510 dataset."
    stations = [501, 510]

    start = datetime(2014, 10, 1)
    end = datetime(2014, 10, 10)

    with tables.open_file(
            '/Users/arne/Datastore/501_510/c_501_510_141001_141011.h5',
            'a') as data:
        download_coincidences(data,
                              stations=stations,
                              start=start,
                              end=end,
                              n=2)

    with tables.open_file(
            '/Users/arne/Datastore/501_510/e_501_510_141001_141011.h5',
            'a') as data:
        download_data(data, '/s501', 501, start=start, end=end)
        download_data(data, '/s510', 510, start=start, end=end)

    start = datetime(2014, 11, 1)
    end = datetime(2014, 11, 10)

    with tables.open_file(
            '/Users/arne/Datastore/501_510/c_501_510_141101_141111.h5',
            'a') as data:
        download_coincidences(data,
                              stations=stations,
                              start=start,
                              end=end,
                              n=2)

    with tables.open_file(
            '/Users/arne/Datastore/501_510/e_501_510_141101_141111.h5',
            'a') as data:
        download_data(data, '/s501', 501, start=start, end=end)
        download_data(data, '/s510', 510, start=start, end=end)
コード例 #13
0
def download_alphen_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Alphen ad Rijn stations.
    Three 2-detector stations, on high schools forming a triangle.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    These stations are far appart, so few coincidences are found that
    include all three stations. Using a larger coincidence window may help.

    """
    stations = [3301, 3302, 3303]
    start = (2010, 12)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'alphen_n2_101201_140801.h5')
    print "Downloading Alphen ad Rijn dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=2)
コード例 #14
0
def download_eindhoven_dataset():
    """Download a dataset for analysis

    This script downloads coincidence data from the Eindhoven stations.
    Four 2-detector stations, on the roofs of Universiteit Eindhoven.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences that include at least three station will be
    reconstructed.

    """
    stations = [8001, 8004, 8008, 8009]
    start = (2011, 10)
    end = (2015, 4)
    path = os.path.join(DATASTORE, 'eindhoven_n2_111001_140801.h5')
    print "Downloading Eindhoven dataset."
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data, stations=stations, start=startdt,
                                  end=enddt, n=2)
コード例 #15
0
def download_coincidences_data(data):
    """Download coincidence data for each subcluster and for all stations"""

    for subcluster in Network().subclusters():
        group = ('/coincidences_%s' %
                 subcluster['name'].lower().replace(' ', '_'))
        if group in data:
            continue
        stations = Network().station_numbers(subcluster=subcluster['number'])
        if len(stations) < 2:
            continue
        download_coincidences(data,
                              group=group,
                              stations=stations,
                              start=START,
                              end=END)

    # Entire network
    if '/coincidences' not in data:
        download_coincidences(data, start=START, end=END)
コード例 #16
0
ファイル: download_dataset.py プロジェクト: 153957/topaz
def download_sciencepark_coincidences():
    """Download a dataset for analysis

    This script downloads coincidence data from the Science Park stations.
    Station 507 is excluded because its detector positions are not well known.
    Coincidences with at least 2 events in a coincidence are included.
    This allows for determination of detector and station offsets.
    After this coincidences with many events (6+ or 7+) will be reconstructed.
    Note: Station 510 'overlaps' with 501.

    """
    start = (2015, 11)
    end = (2016, 2)
    stations = [501, 502, 503, 504, 505, 506, 508, 509, 510, 511]
    path = os.path.join(DATASTORE, 'dataset_sciencepark_n10_151101_160201.h5')
    with tables.open_file(path, 'a') as data:
        for startdt, enddt in monthrange(start, end):
            download_coincidences(data,
                                  stations=stations,
                                  start=startdt,
                                  end=enddt,
                                  n=10)
コード例 #17
0
def download_data(data):
    download_coincidences(data, stations=STATIONS, start=START, end=END)