def main():
    file_handeler = H5pyHandeler(
        os.path.join(get_store_path(), 'final_network_retrain',
                     'templates_to_download.hf5'))
    multi_handeler = H5pyMultiHandeler()
    multi_handeler.add_file_handeler(file_handeler)
    with multi_handeler as mh:
        generator = fg.FileGenerator(mh, [(0, 0), (-1, 0), (-1, 1), (2, 3)],
                                     batch_size=2,
                                     shuffle=True)
        print(generator[0])
    return
def getPercentage():
    resultPath = os.path.join(get_store_path(), 'long_data_2', 'results')
    collectedResultsPath = os.path.join(resultPath, 'collected_results.hf5')

    target = float(22 * len(np.arange(3, 202, 2)))

    current = 0.0

    with h5py.File(collectedResultsPath, 'r') as f:
        for k in f.keys():
            current += len(f[k].keys())

    return (current / target)
def main():
    resultPath = os.path.join(get_store_path(), 'long_data_2', 'results')
    collectedResultsPath = os.path.join(resultPath, 'collected_results.hf5')

    with h5py.File(collectedResultsPath, 'a') as f:
        highLevelIndices = list(np.arange(3, 202, 2, dtype=int))

        checkIndices = {}

        existingKeys = f.keys()

        existingKeys = [int(pt) for pt in existingKeys]

        #Check which files have not been put into the final one
        for idx in highLevelIndices:
            if idx in existingKeys:
                currIdx = f[str(idx)].keys()
                currIdx = [int(pt) for pt in currIdx]
                if len(currIdx) < 22:
                    checkIndices[idx] = []
                    for i in range(22):
                        if not i in currIdx:
                            checkIndices[idx].append(i)
            else:
                checkIndices[idx] = list(range(22))

        if len(checkIndices.keys()) == 0:
            print("File already complete.")
        else:
            for highLevel in sorted(checkIndices.keys()):
                print("Checking highLevel: {}".format(highLevel))
                lowLevelList = checkIndices[highLevel]
                if not str(highLevel) in f.keys() and newFileExistsQ(
                        highLevel, lowLevelList):
                    f.create_group(str(highLevel))
                if newFileExistsQ(highLevel, lowLevelList):
                    for lowLevel in lowLevelList:
                        print("Got something new: ({}, {})".format(
                            highLevel, lowLevel))
                        writeToFile(f[str(highLevel)], highLevel, lowLevel)

    print("The file is to {0:.2f}% complete.".format(100 * getPercentage()))

    return
Example #4
0
def get_results_path():
    return os.path.join(get_store_path(), 'long_data_2', 'results')
Example #5
0
def getRunStatsPath(highLevel):
    return os.path.join(get_store_path(), 'long_data_2',
                        'run_stats_' + str(highLevel) + '.hdf')
def evaluate_dual_output_form(net_name,
                              temp_name,
                              format_name,
                              show=True,
                              screen_name=None):
    """Evaluates the performance of a network with two outputs.
    
    This function expects the network to have two outputs. The first should be
    the predicted SNR while the second is a representation of a boolean value
    that indicates whether there is a GW in the data or not. This boolean value
    should be an array with two entries (p, 1-p), where p is the "probabilty"
    of a GW being present. Thus the structure of a single output needs to be:
    [SNR, [p, 1-p]]
    This function also creates a few plots.
    
    Arguments
    ---------
    net_name : str
        The name of the networks '.hf5' file (file extension NOT included).
    
    temp_name : str
        The name of the datas '.hf5' file (file extension NOT included).
    
    Returns
    -------
    list
        A list with five values. The first entry represents how many signals
        the network correctly predicted to have a GW in them. The second
        represents how many signals the network correctly predicted to have no
        GW in them. The third how many it falsly predicted to have no GW in the
        data, the fourth how many it falsly predicted to have a GW in the data,
        the fifth represents the number of samples where the network had a bias
        of less then 60% towards one or the other output. (i.e. the output for
        the bool value was something like [0.55, 0.45])
    """
    saves_path = get_store_path()
    net_path = os.path.join(saves_path, net_name + '.hf5')
    temp_path = os.path.join(saves_path, temp_name + '.hf5')
    format_path = os.path.join(saves_path, format_name + '.py')

    net = keras.models.load_model(net_path)

    d_form = imp.load_source('d_form', format_path)

    te_d = d_form.format_data_segment(load_testing_data(temp_path))
    te_l = d_form.format_label_segment(load_testing_labels(temp_path))
    te_c = load_testing_calculated_snr(temp_path)

    res = net.predict(te_d, verbose=1)

    predicted_snr = [pt[0] for pt in res[0]]

    predicted_bool = [pt[0] > pt[1] for pt in res[1]]

    l = [0, 0, 0, 0, 0]

    #print('Len predicted bool: {}'.format(len(predicted_bool)))
    #print('Len res: {}'.format(len(res)))

    for i in range(len(predicted_bool)):
        if predicted_bool[i] == bool(te_l[1][i][0]):
            #Correctly predicted
            if predicted_bool[i]:
                #GW is in the signal
                l[0] += 1
            else:
                #GW is not in the signal
                l[1] += 1
        else:
            #Falsly predicted
            if predicted_bool[i]:
                #Network predicts signal but there is none in the data
                l[3] += 1
            else:
                #Network predicts no signal but there is one in the data
                l[2] += 1

        if abs(res[1][i][0] - 0.5) < 0.1:
            l[4] += 1

    plot_path = os.path.join(saves_path, net_name + '_removed_negatives.png')

    #Do the necessary plots
    x_pt_1 = [pt[0] for i, pt in enumerate(te_l[0]) if predicted_bool[i]]
    x_pt_2 = [pt for i, pt in enumerate(te_c) if predicted_bool[i]]
    y_pt = [pt[0] for i, pt in enumerate(res[0]) if predicted_bool[i]]

    #print(x_pt_1)
    #print(x_pt_2)
    #print(y_pt)

    _do_plot(net_name if screen_name == None else str(screen_name),
             np.array(x_pt_1),
             np.array(x_pt_2),
             np.array(y_pt),
             plot_path,
             show=show)

    return (l)
def get_data_dir():
    #return os.path.join(get_store_path(), 'long_data_2')
    return os.path.join(get_store_path(), 'bbhTest')
def get_parent_dir_path():
    return os.path.join(get_store_path(), PARENT_DIR_NAME)
from detection_pipeline import evaluate_ts_from_generator
from pycbc.types import TimeSeries
from progress_bar import progress_tracker

#Command line input, should be integer
data_num = int(sys.argv[1])
#Name of directory
dir_name = 'evaluation_results'
#Name of data file including extension
data_name = 'data-1.hdf'
#Name of file that stores time series results
ts_name = 'resulting_ts_' + str(data_num) + '.hf5'
#Name of file containing triggers
trigger_name = 'triggers_' + str(data_num) + '.hf5'
#The path to the directory, where results are stored
dir_path = os.path.join(get_store_path(), dir_name)
#Path to the data that should be evaluated
#data_path = os.path.join('/home/ahnitz/projects/ml_mock_data', data_name)
data_path = os.path.join(get_store_path(), data_name)
#Path where resulting time series are stored
ts_path = os.path.join(dir_path, ts_name)
#Path to the triggers of the data
trigger_path = os.path.join(dir_path, trigger_name)
#Path to the network
#net_path = '/home/marlin.schaefer/master_project/bns_net/saves/tcn_collect_inception_res_net_rev_6_248201905643/tcn_collect_inception_res_net_rev_6_epoch_21.hf5'
net_path = os.path.join(get_store_path(),
                        'tcn_collect_inception_res_net_rev_6_248201905643',
                        'tcn_collect_inception_res_net_rev_6_epoch_21.hf5')

#Thresholds for the network
snr_threshold = 6.7010087966918945
def partialPath(highLevel, lowLevel):
    resultPath = os.path.join(get_store_path(), 'long_data_2', 'results')
    return (os.path.join(resultPath,
                         'data_' + str(highLevel) + '_part_' + str(lowLevel)))
Example #11
0
def get_dir_path():
    return os.path.join(get_store_path(), 'final_network_retrain_dev21_3')
from run_net import run_net
from aux_functions import get_store_path
import numpy as np
import os
from generate_split_data import generate_template

ep = 150
wiki_e = True
file_name = 'templates'
num_signals = 20000
num_noise = 100000

generate_template(os.path.join(get_store_path(), file_name + '.hf5'),
                  num_signals,
                  num_noise,
                  snr=[8.0, 15.0])

if __name__ == "__main__":
    run_net('combination_net',
            file_name,
            ini_file='testing_net.ini',
            create_wiki_entry=False,
            overwrite_template_file=False,
            epochs=1,
            use_data_object=True,
            show_snr_plot=False,
            overwrite_net_file=True,
            evaluate_on_large_testing_set=False)