Esempio n. 1
0
]
field_names = copy.copy(field_names_for_indexing)

fprdict = defaultdict(list)
tprdict = defaultdict(list)
pvalues = defaultdict(list)
aucdict = defaultdict(list)
testingfields = defaultdict(list)

# Loop over each of the csv files in the testing data set
for (ff, file_name) in enumerate(file_names_testing):
    print('Reading in ' + file_name)

    # Read in .csv file to dictionary
    file = '../Data/AllCSV/' + str(file_name)
    data = modelfunctions.read_file_to_dict(file)

    # Binarize aftershocks
    grid_aftershock_count = np.double(data['aftershocksyn'])
    aftershock_count = preprocessing.binarize(
        grid_aftershock_count.reshape(1, -1))[0]

    #skip if no aftershocks
    if len(np.unique(aftershock_count)) < 2:
        continue

    # find best-performing sign convention for Coulomb failure stress change for this slip distribution
    num = modelfunctions.FindWhichCFS(data, aftershock_count)
    if num == 1: field_names[0] = 'stresses_full_cfs_1'
    if num == 2: field_names[0] = 'stresses_full_cfs_2'
    if num == 3: field_names[0] = 'stresses_full_cfs_3'
Esempio n. 2
0
                                             'file_names_training')
file_names_testing = modelfunctions.readHDF('../Data/Testing_FileNames.h5',
                                            'file_names_testing')

training = defaultdict(list)
testing = defaultdict(list)

field_names = [
    'stresses_full_xx', 'stresses_full_yy', 'stresses_full_xy',
    'stresses_full_xz', 'stresses_full_yz', 'stresses_full_zz'
]

for i, file_name in enumerate(file_names_testing):
    print(i, file_name)
    print('testing eq')
    data = modelfunctions.read_file_to_dict(pathtofiles + str(file_name))
    grid_aftershock_count = np.double(data['aftershocksyn'])
    if len(np.unique(grid_aftershock_count)) < 2:
        continue
    tmp = grid_aftershock_count.tolist()
    testing['aftershocksyn'].extend(tmp)
    for j in range(0, len(field_names)):
        testing[field_names[j]].extend(np.double(data[field_names[j]]))

for i, file_name in enumerate(file_names_training):
    print(i, file_name)
    print('training')
    data = modelfunctions.read_file_to_dict(pathtofiles + str(file_name))
    grid_aftershock_count = np.double(data['aftershocksyn'])
    if len(np.unique(grid_aftershock_count)) < 2:
        continue