Example #1
0
import os
import copy
import gc
import numpy as np
from sklearn import metrics
from sklearn import preprocessing
import pdb as check
from collections import defaultdict
import h5py
import modelfunctions
import dill
import pickle

#load testing data file names
testFileNames = '../Data/Testing_FileNames.h5'
file_names_testing = modelfunctions.readHDF(testFileNames,
                                            'file_names_testing')

#output file name
filenamePickle = './AllROC_OtherQuantities.pkl'

#fields to calculate
field_names_for_indexing = [
    'stresses_full_cfs_1', 'stresses_full_max_shear', 'von_mises'
]
field_names = copy.copy(field_names_for_indexing)

fprdict = defaultdict(list)
tprdict = defaultdict(list)
pvalues = defaultdict(list)
aucdict = defaultdict(list)
testingfields = defaultdict(list)
Example #2
0
from __future__ import unicode_literals
from __future__ import division
import csv
import os
import copy
import gc
import numpy as np
import pdb as check
import h5py
import modelfunctions

filenameWeight = '../Data/TheBestWeights.h5'

degvec = np.arange(0, 360, 10)
for deg in degvec:
    IN = modelfunctions.readHDF(
        './IN_IdealizedOkadaCase_' + str(int(deg)) + '.h5', 'IN')
    shp = np.shape(IN)
    INFinal = np.zeros([shp[0], shp[1] * 2])
    INFinal[:, :6] = np.abs(IN)
    INFinal[:, 6:] = -1. * np.abs(IN)
    model = modelfunctions.create_model()
    model.load_weights(filenameWeight)
    fieldvalsEQ = model.predict(INFinal)
    modelfunctions.writeHDF(
        './NN_Outputs_IdealizedOkadaCase_' + str(int(deg)) + '.h5', IN,
        fieldvalsEQ)
                       width_ratios=[1, .2, 1, .2, 1, .2, 1, .001],
                       height_ratios=[30, 1, 30, 1])
rowscale = 2
cmap = plt.get_cmap('Reds')
new_cmap = truncate_colormap(cmap, 0.0, 0.75)

#loop over slip distributions
for filenum, filename in enumerate(files):

    #load fault info
    fn = [
        'x1Utm', 'y1Utm', 'x2Utm', 'y2Utm', 'x3Utm', 'y3Utm', 'x4Utm', 'y4Utm'
    ]
    fault = defaultdict()
    for field in fn:
        fault[field] = modelfunctions.readHDF(filename[:-9] + '.h5', field)

    #read in data
    file = str(pathtofiles + str(filename))
    data = modelfunctions.read_file_to_dict(file)
    grid_aftershock_count = np.double(data['aftershocksyn'])

    #load model
    model = modelfunctions.create_model()
    model.load_weights(filenameWeight)

    #prepare inputs to NN
    IN = modelfunctions.LoadInputsDict(data, field_names_in)

    #run NN prediction for this slip distribution
    data['ANN'] = model.predict(IN)
Example #4
0
#import csv
import os
import copy
import gc
import numpy as np
import pdb as check
from collections import defaultdict
import modelfunctions

pathtofiles = '../Data/AllCSV/'

file_names_training = modelfunctions.readHDF('../Data/Training_FileNames.h5',
                                             'file_names_training')
file_names_testing = modelfunctions.readHDF('../Data/Testing_FileNames.h5',
                                            'file_names_testing')

training = defaultdict(list)
testing = defaultdict(list)

field_names = [
    'stresses_full_xx', 'stresses_full_yy', 'stresses_full_xy',
    'stresses_full_xz', 'stresses_full_yz', 'stresses_full_zz'
]

for i, file_name in enumerate(file_names_testing):
    print(i, file_name)
    print('testing eq')
    data = modelfunctions.read_file_to_dict(pathtofiles + str(file_name))
    grid_aftershock_count = np.double(data['aftershocksyn'])
    if len(np.unique(grid_aftershock_count)) < 2:
        continue