예제 #1
0
    def generate_models_from_existing_histories(path,**kwds):
        '''
        Processes all existing his files in the given directory
        
        **Arguments**:
         - *path* = The directory that will be searched for .his files
        **Optional Kewords**:
         - *threads* = The number of seperate threads to run when generating noddy models. For optimum
                       performance this should equal the number of logical cores - 1, unless RAM is a 
                       limiting factor (at this point every thread requires at least 2Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES', 
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *force_recalculate* = Forces the recalculation of existing noddy files. Default is False, hence this
                       function will not run history files that are already associated with Noddy data files.
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        '''
        
        #get keywords
        vb = kwds.get("verbose",True)
        stype = kwds.get("sim_type","BLOCK")
        threads = kwds.get("threads",1)
        force = kwds.get("force_recalculate",False)
        
        if threads >= 1: #spawn threads
            
            #gather list of his files
            his_files = []
            for root, dirnames, filenames in os.walk(path): #walk the directory
                for f in filenames:
                    if ('.his' in f): #find all topology files with correct basename
                        his_files.append(os.path.join(root,f))
                        
            #spawn threads untill finished
            from threading import Thread
            thread_list = []            

            while len(his_files) > 0:
                for n in range(0,threads):
                    if len(his_files) > 0:
                        #get path
                        p = his_files[0]
                        his_files.remove(p)
                        
                        #initialise thread
                        t = Thread(target=MonteCarlo.generate_models_from_existing_histories,args=(p,),kwargs={'threads' : 0, 'sim_type' : stype, 'verbose' : vb,'force_recalculate' : force})
                        thread_list.append(t)
                        
                        #start thread
                        t.start()
                                
                #now wait for threads to finish
                for t in thread_list:
                    t.join()
            
        else: #run given file
            output = path.split('.')[0]
            
            #call noddy
            if force or not os.path.exists(output+".g01"): #if noddy files don't exist, or force is true
                if vb:
                    print("Running %s... " % output)
                    print(pynoddy.compute_model(path, output, sim_type = stype))
                    print ("Complete.")
                else:
                    pynoddy.compute_model(path,output, sim_type = stype) 
                    
            #call topology if in TOPOLOGY mode
            if 'TOPOLOGY' in stype:
                if force or not os.path.exists(output+".g23"): #if topology files don't exist, or force is true
                    if vb:
                        print("Running topology on %s... " % output)
                        print(pynoddy.compute_topology(output))
                        print ("Complete.")
                    else:
                        pynoddy.compute_topology(output)
                elif vb:
                    print "Topology files alread exist for %s. Skipping." % path
            
            #flush print buffer
            sys.stdout.flush()   
예제 #2
0
    def test_resolution(self, numTrials, **kwds):
        """Tests the sensitivity of a model to block size by generating models of different
        resolutions and comparing them.
        **Arguments**:
            - *numTrials* = the number of different model resolutions to test
        **Optional Keywords**:
            - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
            are left in the same directory as the .his file they derive from. Default is True.
            - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is True.
        **Returns**:
            - The function returns a list containing the cumulative number of model topologies
            observed, starting from the highest resolution (smallest block size) to the lowest block
            size (largest block size)
        
        """
        # get args
        outFile = kwds.get("output", "")
        cleanup = kwds.get("cleanup", True)
        verbose = kwds.get("verbose", True)

        # import pynoddy bindings
        import pynoddy

        # store null volume threshold and then set to zero
        old_threshold = pynoddy.null_volume_threshold
        pynoddy.null_volume_threshold = 0

        # place to keep topologies
        self.topo_list = []
        self.res_list = []

        self.nUnique = 0  # number of unique topologies
        self.count = []  # number of differen topologies observed at each step
        self.size = []  # number of edges (relationships) observed at each step

        # run test
        step = (self.maxSize - self.minSize) / numTrials  # step change between resolutions
        for res in range(self.minSize, self.maxSize, step):
            if verbose:
                print ("Computing model with %d block size" % res)

            # change cube size
            self.change_cube_size(res, type="Geophysics")
            self.change_cube_size(res, type="Geology")
            print "Cube size: %d:" % self.get_cube_size()

            # store cube size
            self.res_list.append(res)

            # save history file
            basename = self.path + "_cube_size_%d" % res

            self.write_history(basename + ".his")

            # run saved history file
            if verbose:
                print ("Running resolution %d... " % res)
                print (pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY"))
                print ("Complete.\n")
            else:
                pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY")

            # calculate topology
            if verbose:
                print ("Computing model topologies...")
                print (pynoddy.compute_topology(basename))
                print ("Finished.\n")
            else:
                pynoddy.compute_topology(basename, 1)

            # load and store topology output
            topo = NoddyTopology(basename + "_0001")

            # cull small nodes
            # topo.filter_node_volumes(self.min_node_volume)

            # see if this is on the list
            if topo.is_unique(self.topo_list):
                self.nUnique += 1  # increment unique topologies

            # store cumulative sequence
            self.count.append(self.nUnique)

            # add to list of observed topologies
            self.topo_list.append(topo)

            # append number of edges to edges list
            self.size.append(topo.graph.number_of_edges())

            # cleanup
            if cleanup:
                import os, glob

                # remove noddy files
                for f in glob.glob(basename + "*"):
                    os.remove(f)

        print "Complete. A total of %d topologies were observed" % self.nUnique
        print "The size of the network at each step was:"
        print self.size

        print "The cumulative observation sequence was:"
        print self.count

        # restore
        pynoddy.null_volume_threshold = old_threshold

        return self.count
예제 #3
0
    def test_resolution(self, numTrials, **kwds):
        '''Tests the sensitivity of a model to block size by generating models of different
        resolutions and comparing them.
        **Arguments**:
            - *numTrials* = the number of different model resolutions to test
        **Optional Keywords**:
            - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
            are left in the same directory as the .his file they derive from. Default is True.
            - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False.
        **Returns**:
            - The function returns a list containing the cumulative number of model topologies
            observed, starting from the highest resolution (smallest block size) to the lowest block
            size (largest block size)
        
        '''
        #get args
        outFile = kwds.get("output", "")
        cleanup = kwds.get("cleanup", True)
        verbose = kwds.get("verbose", False)

        #import pynoddy bindings
        import pynoddy

        #store null volume threshold and then set to zero
        old_threshold = pynoddy.null_volume_threshold
        pynoddy.null_volume_threshold = 0

        #place to keep topologies
        self.topo_list = []
        self.res_list = []

        self.nUnique = 0  #number of unique topologies
        self.count = []  #number of differen topologies observed at each step
        self.size = []  #number of edges (relationships) observed at each step

        #run test
        step = (self.maxSize -
                self.minSize) / numTrials  #step change between resolutions
        for res in range(self.minSize, self.maxSize, step):
            if verbose:
                print(("Computing model with %d block size" % res))

            #change cube size
            self.change_cube_size(res)
            self.change_cube_size(res)
            print("Cube size: %d:" % self.get_cube_size())

            #store cube size
            self.res_list.append(res)

            #save history file
            basename = self.path + "_cube_size_%d" % res

            self.write_history(basename + ".his")

            #run saved history file
            if verbose:
                print(("Running resolution %d... " % res))
                print((pynoddy.compute_model(basename + ".his",
                                             basename + "_0001",
                                             sim_type="TOPOLOGY")))
                print("Complete.\n")
            else:
                pynoddy.compute_model(basename + ".his",
                                      basename + "_0001",
                                      sim_type="TOPOLOGY")

            #calculate topology
            if verbose:
                print('Computing model topologies...')
                print((pynoddy.compute_topology(basename)))
                print('Finished.\n')
            else:
                pynoddy.compute_topology(basename, 1)

            #load and store topology output
            topo = NoddyTopology(basename + "_0001")

            #cull small nodes
            #topo.filter_node_volumes(self.min_node_volume)

            #see if this is on the list
            if topo.is_unique(self.topo_list):
                self.nUnique += 1  #increment unique topologies

            #store cumulative sequence
            self.count.append(self.nUnique)

            #add to list of observed topologies
            self.topo_list.append(topo)

            #append number of edges to edges list
            self.size.append(topo.graph.number_of_edges())

            #cleanup
            if cleanup:
                import os, glob
                #remove noddy files
                for f in glob.glob(basename + "*"):
                    os.remove(f)

        print("Complete. A total of %d topologies were observed" %
              self.nUnique)
        print("The size of the network at each step was:")
        print(self.size)

        print("The cumulative observation sequence was:")
        print(self.count)

        #restore
        pynoddy.null_volume_threshold = old_threshold

        return self.count
예제 #4
0
 def generate_model_instances(self, path, count, **kwds):
     '''
     Generates the specified of randomly varied Noddy models.
     
     **Arguments**:
      - *path* = The directory that Noddy models should be generated in
      - *count* = The number of random variations to generate
     **Optional Kewords**:
      - *threads* = The number of seperate threads to run when generating noddy models. Note that RAM is 
                    often a limiting factor (at this point every thread requires at least ~1Gb of ram).
     - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES', 
                    'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
     - *write_changes* = A file (path) to write the parameters used in each model realisation to (minus the extension). 
                    The default is a file called 'parameters.csv'. Set as None to disable writing.
     - *verbose* = True if this function sends output to the print buffer. Default is True.
     '''
     
     #get args
     vb = kwds.get("verbose",True)
     stype = kwds.get("sim_type","BLOCK")
     threads = kwds.get("threads",1)
     changes = kwds.get("write_changes","parameters")
     
     #store path for later
     self.instance_path = path       
     
     #get start time (for timing runs)
     import time as time
     if vb:
         start_time = time.time()
     
     #get variables for seed
     seed_base = os.getpid() * int(time.time() / 1000000)
     nodeID = 1 #this will be changed later if running on a linux box
     
     #ensure directory exists
     if not os.path.isdir(path):
         os.makedirs(path)
             
     if threads > 1: #multithreaded - spawn required number of threads
     
         #calculate & create node directory (for multi-node instances)
         import platform
         if (platform.system() == 'Linux'): #running linux - might be a cluster, so get node name
             nodename = os.uname()[1] #the name of the node it is running on (linux only)
             
             #move into node subdirectory
             path = path.join(path,nodename) 
         
             #append node name to output
             if not changes is None:
                 changes = "%s_%s" % (changes,nodename) #append node name to output
             
             #change nodeID for seed
             nodeID = hash(nodename)
             
             
         #import thread stuff
         from threading import Thread
         import platform
         
         thread_list = []
         for t in range(0,threads):
             
             #create subdirectory for this thread
             threadpath=os.path.join(path,"thread_%d" % t)
             if not os.path.isdir(threadpath):
                 os.makedirs(threadpath)
                 
             #make copy of this object 
             import copy
             t_his = copy.deepcopy(self)
                 
             #calculate number of models to run in this thread
             n = count / threads
             if (t == 0): #first thread gets remainder
                 n = n + count % threads
             
             #calculate changes path
             change_path = None
             if not changes is None:
                 change_path = "%s_thread%d" % (changes,t)
             
             #set random seed (nodeID * process ID * threadID * time in seconds)
             t_his.set_random_seed(nodeID * seed_base * t)
             
             #initialise thread
             t = Thread(target=t_his.generate_model_instances,args=(threadpath,n),kwargs={'sim_type' : stype, 'verbose' : vb, 'write_changes' : change_path})
             
             thread_list.append(t)
             
             #start thread
             t.start()
             
             #thread.start_new_thread(t_his.generate_model_instances,(threadpath,n),{'sim_type' : stype, 'verbose' : vb})
             
         #now wait for threads to finish
         for t in thread_list:
             t.join()
             
         #now everything is finished!
         if vb:
             print "Finito!"
             
             elapsed = time.time() - start_time
             print "Generated %d models in %d seconds\n\n" % (count,elapsed)
             
     else: #only 1 thread (or instance of a thread), so run noddy
         for n in range(1,count+1): #numbering needs to start at 1 for topology
             #calculate filename & output path
             outputfile = "%s_%04d" % (self.basename,n)
             outputpath = os.path.join(path,outputfile)
             
             if vb:
                 print "Constructing %s... " % outputfile
                 
             #do random perturbation
             self.random_perturbation(verbose=vb)
             
             #save history
             self.write_history(outputpath + ".his")
             
             #run noddy
             if vb:
                 print("Complete.\nRunning %s... " % outputfile)
                 print(pynoddy.compute_model(outputpath + ".his",outputpath, sim_type = stype))
                 print ("Complete.")
             else:
                 pynoddy.compute_model(outputpath + ".his",outputpath, sim_type = stype)
             
             #run topology if necessary
             if "TOPOLOGY" in stype:
                 if vb:
                     print("Complete. Calculating Topology... ")
                     print(pynoddy.compute_topology(outputpath))
                     print ("Complete.")
                 else:
                     pynoddy.compute_topology(outputpath)
                     
             #flush print buffer
             sys.stdout.flush()
                
         #write changes
         if not (changes is None):
             print "Writing parameter changes to %s..." % (changes + ".csv")
             self.write_parameter_changes(changes+".csv")
             print "Complete."
예제 #5
0
    def generate_models_from_existing_histories(path,
                                                verbose=True,
                                                sim_type="BLOCK",
                                                threads=1,
                                                force_recalculate=False,
                                                **kwds):
        """
        Processes all existing his files in the given directory

        **Arguments**:
         - *path* = The directory that will be searched for .his files
        **Optional Arguments**:
         - *threads* = The number of seperate threads to run when generating noddy models. For optimum
                       performance this should equal the number of logical cores - 1, unless RAM is a
                       limiting factor (at this point every thread requires at least 2Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES',
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *force_recalculate* = Forces the recalculation of existing noddy files. Default is False, hence this
                       function will not run history files that are already associated with Noddy data files.
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        """

        # get argument values
        vb = verbose
        stype = sim_type
        # threads = threads # Note to Sam: not required
        force = force_recalculate

        if threads >= 1:  # spawn threads

            # gather list of his files
            his_files = []
            for root, dirnames, filenames in os.walk(
                    path):  # walk the directory
                for f in filenames:
                    if ('.his' in f
                        ):  # find all topology files with correct basename
                        his_files.append(os.path.join(root, f))

            # spawn threads until finished
            from threading import Thread
            thread_list = []

            while len(his_files) > 0:
                for n in range(0, threads):
                    if len(his_files) > 0:
                        # get path
                        p = his_files[0]
                        his_files.remove(p)

                        # initialise thread
                        t = Thread(target=MonteCarlo.
                                   generate_models_from_existing_histories,
                                   args=(p, ),
                                   kwargs={
                                       'threads': 0,
                                       'sim_type': stype,
                                       'verbose': vb,
                                       'force_recalculate': force
                                   })
                        thread_list.append(t)

                        # start thread
                        t.start()

                # now wait for threads to finish
                for t in thread_list:
                    t.join()

        else:  # run given file
            output = path.split('.')[0]

            # call noddy
            if force or not os.path.exists(
                    output +
                    ".g12"):  # if noddy files don't exist, or force is true
                if vb:
                    print("Running %s... " % output)
                    print(pynoddy.compute_model(path, output, sim_type=stype))
                    print("Complete.")
                else:
                    pynoddy.compute_model(path, output, sim_type=stype)

                    # call topology if in TOPOLOGY mode
            if 'TOPOLOGY' in stype:
                if force or not os.path.exists(
                        output + ".g23"
                ):  # if topology files don't exist, or force is true
                    if vb:
                        print("Running topology on %s... " % output)
                        print(pynoddy.compute_topology(output))
                        print("Complete.")
                    else:
                        pynoddy.compute_topology(output)
                elif vb:
                    print "Topology files alread exist for %s. Skipping." % path

            # flush print buffer
            sys.stdout.flush()
예제 #6
0
    def generate_model_instances(self, path, count, **kwds):
        """
        Generates the specified of randomly varied Noddy models.

        **Arguments**:
         - *path* = The directory that Noddy models should be generated in
         - *count* = The number of random variations to generate
        **Optional Kewords**:
         - *threads* = The number of seperate threads to run when generating noddy models. Note that RAM is
                       often a limiting factor (at this point every thread requires at least ~1Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES',
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *write_changes* = A file (path) to write the parameters used in each model realisation to
                        (minus the extension).
                       The default is None (no file written).
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        - *seed* = The random seed to use in this experiment. If not specified,
                    threads are seeded with PID * TID * time (*nodeID).
       """

        # get args
        vb = kwds.get("verbose", False)
        stype = kwds.get("sim_type", "BLOCK")
        threads = kwds.get("threads", 1)
        changes = kwds.get("write_changes", None)

        # store path for later
        self.instance_path = path

        # get start time (for timing runs)
        import time as time
        if vb:
            start_time = time.time()

        # get variables for seed
        seed_base = os.getpid() * int(time.time() / 1000000)
        nodeID = 1  # this will be changed later if running on a linux box

        # ensure directory exists
        if not os.path.isdir(path):
            os.makedirs(path)

        if threads > 1:  # multithreaded - spawn required number of threads

            # calculate & create node directory (for multi-node instances)
            import platform
            if (platform.system() == 'Linux'
                ):  # running linux - might be a cluster, so get node name
                nodename = os.uname()[
                    1]  # the name of the node it is running on (linux only)

                # move into node subdirectory
                path = os.path.join(path, nodename)

                # append node name to output
                if not changes is None:
                    changes = "%s_%s" % (changes, nodename
                                         )  # append node name to output

                # change nodeID for seed
                nodeID = hash(nodename)

            # import thread stuff
            from threading import Thread

            thread_list = []
            for t in range(0, threads):

                # create subdirectory for this thread
                threadpath = os.path.join(path, "thread_%d" % t)
                if not os.path.isdir(threadpath):
                    os.makedirs(threadpath)

                # make copy of this object
                import copy
                t_his = copy.deepcopy(self)

                # calculate number of models to run in this thread
                n = count / threads
                if (t == 0):  # first thread gets remainder
                    n = n + count % threads

                # calculate changes path
                change_path = None
                if not changes is None:
                    change_path = "%s_thread%d" % (changes, t)

                # set random seed (nodeID * process ID * threadID * time in seconds)
                t_his.set_random_seed(nodeID + seed_base + t)

                if kwds.has_key(
                        'seed'
                ):  # override default seed, for reproducable results
                    t_his.set_random_seed(kwds['seed'] +
                                          t)  # specifed seed + threadID

                # initialise thread
                t = Thread(target=t_his.generate_model_instances,
                           args=(threadpath, n),
                           kwargs={
                               'sim_type': stype,
                               'verbose': vb,
                               'write_changes': change_path
                           })

                thread_list.append(t)

                # start thread
                t.start()

                # thread.start_new_thread(t_his.generate_model_instances,(threadpath,n),
                # {'sim_type' : stype, 'verbose' : vb})

            # now wait for threads to finish
            for t in thread_list:
                t.join()

            # now everything is finished!
            if vb:
                print "Finito!"

                elapsed = time.time() - start_time
                print "Generated %d models in %d seconds\n\n" % (count,
                                                                 elapsed)

        else:  # only 1 thread (or instance of a thread), so run noddy
            for n in range(1, count +
                           1):  # numbering needs to start at 1 for topology
                # calculate filename & output path
                outputfile = "%s_%04d" % (self.basename, n)
                outputpath = os.path.join(path, outputfile)

                if vb:
                    print "Constructing %s... " % outputfile

                # do random perturbation
                self.random_perturbation(verbose=vb)

                # save history
                self.write_history(outputpath + ".his")

                # run noddy
                if vb:
                    print("Complete.\nRunning %s... " % outputfile)
                    print(
                        pynoddy.compute_model(outputpath + ".his",
                                              outputpath,
                                              sim_type=stype))
                    print("Complete.")
                else:
                    pynoddy.compute_model(outputpath + ".his",
                                          outputpath,
                                          sim_type=stype)

                # run topology if necessary
                if "TOPOLOGY" in stype:
                    if vb:
                        print("Complete. Calculating Topology... ")
                        print(pynoddy.compute_topology(outputpath))
                        print("Complete.")
                    else:
                        pynoddy.compute_topology(outputpath)

                # flush print buffer
                sys.stdout.flush()

            # write changes
            if not (changes is None):
                if vb:
                    print "Writing parameter changes to %s..." % (changes +
                                                                  ".csv")
                self.write_parameter_changes(changes + ".csv")
                if vb:
                    print "Complete."
예제 #7
0
    
try:
    txt = pynoddy.compute_model(history_path, output_name, sim_type = 'TOPOLOGY') 
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)

if not err:
    print("Succesfully called Noddy executable in TOPOLOGY mode.")
    
#####################
##Test Topology
#####################
try:
    txt = pynoddy.compute_topology(output_name)
except Exception as e:
    sys.stderr.write("Error - could not call Topology executable... %s\n" % e)
    sys.stderr.write("Topology log: %s\n" % txt)
    err = True
    
if not err:
    print("Succesfully called Topology executable")
    
#####################
#Test NoddyOutput
#####################
try:
    nout = pynoddy.output.NoddyOutput(output_name)
except Exception as e:
    sys.stderr.write("Error - could not call load Noddy output as a NoddyOutput object... %s\n" % e)
예제 #8
0
try:
    txt = pynoddy.compute_model(history_path, output_name, sim_type='TOPOLOGY')
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)

if not err:
    print "Succesfully called Noddy executable in TOPOLOGY mode."

#####################
##Test Topology
#####################
try:
    txt = pynoddy.compute_topology(output_name)
except Exception as e:
    sys.stderr.write("Error - could not call Topology executable... %s\n" % e)
    sys.stderr.write("Topology log: %s\n" % txt)
    err = True

if not err:
    print "Succesfully called Topology executable"

#####################
#Test NoddyOutput
#####################
try:
    nout = pynoddy.output.NoddyOutput(output_name)
except Exception as e:
    sys.stderr.write(
예제 #9
0
 def test_resolution(self,numTrials, **kwds):
     '''Tests the sensitivity of a model to block size by generating models of different
     resolutions and comparing them.
     **Arguments**:
         - *numTrials* = the number of different model resolutions to test
     **Optional Keywords**:
         - *output* = a csv file to write the output to
         - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
         are left in the same directory as the .his file they derive from. Default is True.
         - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False.
     **Returns**:
         - The function returns a list containing the cumulative number of model topologies
         observed, starting from the highest resolution (smallest block size) to the lowest block
         size (largest block size)
     
     '''
     #get args
     outFile = kwds.get("output", "")
     cleanup = kwds.get("cleanup",True)
     verbose = kwds.get("verbose",False)
     
     #import pynoddy bindings
     import pynoddy
     
     #place to keep topologies
     topo_list = []
     res_list = []
     
     nUnique = 0 #number of unique topologies
     count = []
     
     #run test
     step = (self.maxSize - self.minSize) / numTrials #step change between resolutions
     for res in range(self.minSize,self.maxSize,step):
         if verbose:
             print("Computing model with %d block size" % res)    
        
         #change cube size
         self.change_cube_size(res,type="Geophysics")
         self.change_cube_size(res,type="Geology")
         print"Cube size: %d:" % self.get_cube_size()
        
         #store cube size
         res_list.append(res)
         
         #save history file
         basename = self.path + "_cube_size_%d" % res
         
         self.write_history(basename + ".his")
         
         #run saved history file
         if verbose:
             print("Running resolution %d... " % res)
             print(pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY"))
             print ("Complete.\n")
         else:
             pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY")
         
         #calculate topology
         if verbose:
             print('Computing model topologies...')
             print(pynoddy.compute_topology(basename,1))
             print('Finished.\n')
         else:
            pynoddy.compute_topology(basename,1)
            
         #load and store topology output
         topo = NoddyTopology(basename+"_0001")
         
         #cull small nodes
         #topo.filter_node_volumes(self.min_node_volume)
         
         #see if this is on the list
         if topo.is_unique(topo_list):
             nUnique+=1 #increment unique topologies
         
         #store cumulative sequence
         count.append(nUnique)
         
         #add to list of observed topologies
         topo_list.append(topo)
         
         #cleanup
         if cleanup:
             import os, glob
             #remove noddy files
             for f in glob.glob(basename+"*"):
                 os.remove(f)
         
     print "Complete. A total of %d topologies were observed" % nUnique
     print "The cumulative observation sequence was:"
     print count
     
     #write output
     if outFile != "":
         f = open(outFile,'w')
         f.write("trial_resolution,cumulative_topologies\n")
         
         for i in range(0,len(res_list)):
             f.write("%d,%d\n" % (res_list[i],count[i]))
        
         f.close()
     
     return count