def load_topology_realisations(path, **args): """ Loads all model topology realisations and returns them as an array of NoddyTopology objects **Arguments**: - *path* = The root directory that models should be loaded from. All models with the same base_name as this class will be loaded (including subdirectoriess) **Optional Arguments**: - *load_attributes* = True if nodes and edges in the topology network should be attributed with properties such as volume and surface area and lithology colour. Default is True. - *verbose* = True if this function should write debug information to the print buffer. Default is True. **Returns**: - a list of NoddyTopology objects """ vb = args.get('verbose', True) attr = args.get('load_attributes', True) if vb: print "Loading models in %s" % path # array of topology objects from pynoddy.output import NoddyTopology topologies = [] for root, dirnames, filenames in os.walk(path): # walk the directory for f in filenames: if '.g23' in f: # find all topology files base = os.path.join(root, f.split('.')[0]) if vb: print 'Loading %s' % base # load & store topology topologies.append(NoddyTopology(base, load_attributes=attr)) return topologies
def test_resolution(self, numTrials, **kwds): """Tests the sensitivity of a model to block size by generating models of different resolutions and comparing them. **Arguments**: - *numTrials* = the number of different model resolutions to test **Optional Keywords**: - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions are left in the same directory as the .his file they derive from. Default is True. - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is True. **Returns**: - The function returns a list containing the cumulative number of model topologies observed, starting from the highest resolution (smallest block size) to the lowest block size (largest block size) """ # get args outFile = kwds.get("output", "") cleanup = kwds.get("cleanup", True) verbose = kwds.get("verbose", True) # import pynoddy bindings import pynoddy # store null volume threshold and then set to zero old_threshold = pynoddy.null_volume_threshold pynoddy.null_volume_threshold = 0 # place to keep topologies self.topo_list = [] self.res_list = [] self.nUnique = 0 # number of unique topologies self.count = [] # number of differen topologies observed at each step self.size = [] # number of edges (relationships) observed at each step # run test step = (self.maxSize - self.minSize) / numTrials # step change between resolutions for res in range(self.minSize, self.maxSize, step): if verbose: print ("Computing model with %d block size" % res) # change cube size self.change_cube_size(res, type="Geophysics") self.change_cube_size(res, type="Geology") print "Cube size: %d:" % self.get_cube_size() # store cube size self.res_list.append(res) # save history file basename = self.path + "_cube_size_%d" % res self.write_history(basename + ".his") # run saved history file if verbose: print ("Running resolution %d... " % res) print (pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY")) print ("Complete.\n") else: pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY") # calculate topology if verbose: print ("Computing model topologies...") print (pynoddy.compute_topology(basename)) print ("Finished.\n") else: pynoddy.compute_topology(basename, 1) # load and store topology output topo = NoddyTopology(basename + "_0001") # cull small nodes # topo.filter_node_volumes(self.min_node_volume) # see if this is on the list if topo.is_unique(self.topo_list): self.nUnique += 1 # increment unique topologies # store cumulative sequence self.count.append(self.nUnique) # add to list of observed topologies self.topo_list.append(topo) # append number of edges to edges list self.size.append(topo.graph.number_of_edges()) # cleanup if cleanup: import os, glob # remove noddy files for f in glob.glob(basename + "*"): os.remove(f) print "Complete. A total of %d topologies were observed" % self.nUnique print "The size of the network at each step was:" print self.size print "The cumulative observation sequence was:" print self.count # restore pynoddy.null_volume_threshold = old_threshold return self.count
def test_resolution(self, numTrials, **kwds): '''Tests the sensitivity of a model to block size by generating models of different resolutions and comparing them. **Arguments**: - *numTrials* = the number of different model resolutions to test **Optional Keywords**: - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions are left in the same directory as the .his file they derive from. Default is True. - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False. **Returns**: - The function returns a list containing the cumulative number of model topologies observed, starting from the highest resolution (smallest block size) to the lowest block size (largest block size) ''' #get args outFile = kwds.get("output", "") cleanup = kwds.get("cleanup", True) verbose = kwds.get("verbose", False) #import pynoddy bindings import pynoddy #store null volume threshold and then set to zero old_threshold = pynoddy.null_volume_threshold pynoddy.null_volume_threshold = 0 #place to keep topologies self.topo_list = [] self.res_list = [] self.nUnique = 0 #number of unique topologies self.count = [] #number of differen topologies observed at each step self.size = [] #number of edges (relationships) observed at each step #run test step = (self.maxSize - self.minSize) / numTrials #step change between resolutions for res in range(self.minSize, self.maxSize, step): if verbose: print(("Computing model with %d block size" % res)) #change cube size self.change_cube_size(res) self.change_cube_size(res) print("Cube size: %d:" % self.get_cube_size()) #store cube size self.res_list.append(res) #save history file basename = self.path + "_cube_size_%d" % res self.write_history(basename + ".his") #run saved history file if verbose: print(("Running resolution %d... " % res)) print((pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY"))) print("Complete.\n") else: pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY") #calculate topology if verbose: print('Computing model topologies...') print((pynoddy.compute_topology(basename))) print('Finished.\n') else: pynoddy.compute_topology(basename, 1) #load and store topology output topo = NoddyTopology(basename + "_0001") #cull small nodes #topo.filter_node_volumes(self.min_node_volume) #see if this is on the list if topo.is_unique(self.topo_list): self.nUnique += 1 #increment unique topologies #store cumulative sequence self.count.append(self.nUnique) #add to list of observed topologies self.topo_list.append(topo) #append number of edges to edges list self.size.append(topo.graph.number_of_edges()) #cleanup if cleanup: import os, glob #remove noddy files for f in glob.glob(basename + "*"): os.remove(f) print("Complete. A total of %d topologies were observed" % self.nUnique) print("The size of the network at each step was:") print(self.size) print("The cumulative observation sequence was:") print(self.count) #restore pynoddy.null_volume_threshold = old_threshold return self.count
def __init__(self,path, params=None,n=None, **kwds): ''' Performs a topological uncertainty analysis. If a directory is given, all the history files within the directory are loaded and the analyses performed on them. If a history file is given, n perturbations are performed on it using the params file. **Arguments**: - *path* = The directory or history file to perform this analysis on. **Optional Arguments**: - *params* = The params file to use for MonteCarlo perturbation (if a history file is provided) - *n* = The number of model perturbations to generate (if a history file is provided) **Optional Keywords**: - *verbose* = True if this experiment should write to the print buffer. Default is True - *threads* = The number of threads this experiment should utilise. The default is 4. - *force* = True if all noddy models should be recalculated. Default is False. ''' #init variables self.base_history_path = None self.base_path = path #if a history file has been given, this will be changed vb = kwds.get("verbose",True) n_threads = kwds.get("threads",4) force = kwds.get("force",False) #a history file has been given, generate model stuff if '.' in path: if not '.his' in path: #foobar print "Error: please provide a valid history file (*.his)" return if params is None or n is None: #need this info print "Error: please provide valid arguments [params,n]" self.base_history_path = path self.base_path=path.split('.')[0] #trim file extension self.num_trials = n #ensure path exists if not os.path.exists(self.base_path): os.makedirs(self.base_path) #do monte carlo simulations MC = MonteCarlo(path,params) MC.generate_model_instances(self.base_path,n, sim_type='TOPOLOGY', verbose=vb, threads=n_threads, write_changes=None) else: #ensure that models have been run MonteCarlo.generate_models_from_existing_histories(self.base_path,sim_type='TOPOLOGY',force_recalculate=force,verbose=vb,threads=n_threads) #load models from base directory self.models = TopologyAnalysis.ModelRealisation.loadModels(self.base_path, verbose=vb) ########################################### #GENERATE TOPOLOGY LISTS ########################################### #declare lists self.all_litho_topologies=[] self.all_struct_topologies=[] #generate lists for m in self.models: self.all_litho_topologies.append(m.topology) self.all_struct_topologies.append(m.topology.collapse_stratigraphy()) ############################################ #FIND UNIQUE TOPOLOGIES ############################################ self.accumulate_litho_topologies = [] self.accumulate_struct_topologies = [] self.unique_litho_topologies=NoddyTopology.calculate_unique_topologies(self.all_litho_topologies, output=self.accumulate_litho_topologies) self.unique_struct_topologies=NoddyTopology.calculate_unique_topologies(self.all_struct_topologies, output=self.accumulate_struct_topologies) ############################################ #GENERATE SUPER TOPOLOGY ############################################ self.super_litho_topology = NoddyTopology.combine_topologies(self.all_litho_topologies) self.super_struct_topology = NoddyTopology.combine_topologies(self.all_struct_topologies)
#generate 100 random perturbations using 4 separate threads (in TOPOLOGY mode) output_name = "mc_out" <<<<<<< HEAD:pynoddy/experiment/MonteCarlo.py n = 10 mc.generate_model_instances(output_name,n,sim_type="TOPOLOGY",threads=4) ======= n = 4 print(mc.generate_model_instances(output_name,n,threads=4)) >>>>>>> refs/remotes/flohorovicic/master:pynoddy/experiment/monte_carlo_bak.py #load output topologies = MonteCarlo.load_topology_realisations(output_name, verbose=True) #calculate unique topologies from pynoddy.output import NoddyTopology uTopo = NoddyTopology.calculate_unique_topologies(topologies,output="accumulate.csv") print "%d unique topologies found in %d simulations" % (len(uTopo),len(topologies)) #cleanup #mc.cleanup() # ################################################### # #run existing .his files example (in TOPOLOGY mode) # ################################################### # # #setup working environment # os.chdir(r'C:\Users\Sam\SkyDrive\Documents\Masters\Models\Primitive\monte carlo test') # path = 'multi_his_test' # basename='GBasin123_random_draw' # # #run noddy in 'TOPOLOGY' mode (multithreaded)
def test_resolution(self,numTrials, **kwds): '''Tests the sensitivity of a model to block size by generating models of different resolutions and comparing them. **Arguments**: - *numTrials* = the number of different model resolutions to test **Optional Keywords**: - *output* = a csv file to write the output to - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions are left in the same directory as the .his file they derive from. Default is True. - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False. **Returns**: - The function returns a list containing the cumulative number of model topologies observed, starting from the highest resolution (smallest block size) to the lowest block size (largest block size) ''' #get args outFile = kwds.get("output", "") cleanup = kwds.get("cleanup",True) verbose = kwds.get("verbose",False) #import pynoddy bindings import pynoddy #place to keep topologies topo_list = [] res_list = [] nUnique = 0 #number of unique topologies count = [] #run test step = (self.maxSize - self.minSize) / numTrials #step change between resolutions for res in range(self.minSize,self.maxSize,step): if verbose: print("Computing model with %d block size" % res) #change cube size self.change_cube_size(res,type="Geophysics") self.change_cube_size(res,type="Geology") print"Cube size: %d:" % self.get_cube_size() #store cube size res_list.append(res) #save history file basename = self.path + "_cube_size_%d" % res self.write_history(basename + ".his") #run saved history file if verbose: print("Running resolution %d... " % res) print(pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY")) print ("Complete.\n") else: pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY") #calculate topology if verbose: print('Computing model topologies...') print(pynoddy.compute_topology(basename,1)) print('Finished.\n') else: pynoddy.compute_topology(basename,1) #load and store topology output topo = NoddyTopology(basename+"_0001") #cull small nodes #topo.filter_node_volumes(self.min_node_volume) #see if this is on the list if topo.is_unique(topo_list): nUnique+=1 #increment unique topologies #store cumulative sequence count.append(nUnique) #add to list of observed topologies topo_list.append(topo) #cleanup if cleanup: import os, glob #remove noddy files for f in glob.glob(basename+"*"): os.remove(f) print "Complete. A total of %d topologies were observed" % nUnique print "The cumulative observation sequence was:" print count #write output if outFile != "": f = open(outFile,'w') f.write("trial_resolution,cumulative_topologies\n") for i in range(0,len(res_list)): f.write("%d,%d\n" % (res_list[i],count[i])) f.close() return count