コード例 #1
0
    def get_model_lines(self, **kwds):
        """Get base model along the defined sampling lines

        **Optional keywords**:
            - *model_type* = 'base', 'current' : model type (select base to get freezed model)
            - *resolution* = float : model resolution to calculate distance at sampling lines
        """
        resolution = kwds.get("resolution", 1)
        model_type = kwds.get("model_type", 'current')

        import copy

        tmp_his = copy.deepcopy(self)

        current_lines = np.array([])
        # get model for all sampling lines
        for sl in list(self.sampling_lines.values()):
            # 2. set values
            tmp_his.set_origin(sl['x'], sl['y'], sl['z_min'])
            tmp_his.set_extent(resolution, resolution, sl['z_max'])
            tmp_his.change_cube_size(resolution)

            # test if base model:
            if model_type == 'base':
                # set base events:
                tmp_his.events = self.base_events.copy()

            elif model_type == 'current':
                # use current model, do nothing for now
                pass

            else:
                raise AttributeError("Model type %s not known, please check!" %
                                     model_type)

            # 3. save temporary file
            tmp_his_file = "tmp_1D_drillhole.his"
            tmp_his.write_history(tmp_his_file)
            tmp_out_file = "tmp_1d_out"
            # 4. run noddy
            import pynoddy
            import pynoddy.output

            pynoddy.compute_model(tmp_his_file, tmp_out_file)
            # 5. open output
            tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            # 6.
            current_lines = np.append(current_lines, tmp_out.block[0, 0, :])

        # if base model: store as class variable:

        # test if base model:
        if model_type == 'base':
            self.base_model_lines = current_lines

        return current_lines
コード例 #2
0
ファイル: __init__.py プロジェクト: Leguark/pynoddy
    def get_model_lines(self, **kwds):
        """Get base model along the defined sampling lines

        **Optional keywords**:
            - *model_type* = 'base', 'current' : model type (select base to get freezed model)
            - *resolution* = float : model resolution to calculate distance at sampling lines
        """
        resolution = kwds.get("resolution", 1)
        model_type = kwds.get("model_type", 'current')

        import copy

        tmp_his = copy.deepcopy(self)

        current_lines = np.array([])
        # get model for all sampling lines
        for sl in self.sampling_lines.values():
            # 2. set values
            tmp_his.set_origin(sl['x'], sl['y'], sl['z_min'])
            tmp_his.set_extent(resolution, resolution, sl['z_max'])
            tmp_his.change_cube_size(resolution)

            # test if base model:
            if model_type == 'base':
                # set base events:
                tmp_his.events = self.base_events.copy()

            elif model_type == 'current':
                # use current model, do nothing for now
                pass

            else:
                raise AttributeError("Model type %s not known, please check!" % model_type)

            # 3. save temporary file
            tmp_his_file = "tmp_1D_drillhole.his"
            tmp_his.write_history(tmp_his_file)
            tmp_out_file = "tmp_1d_out"
            # 4. run noddy
            import pynoddy
            import pynoddy.output

            pynoddy.compute_model(tmp_his_file, tmp_out_file)
            # 5. open output
            tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            # 6.
            current_lines = np.append(current_lines, tmp_out.block[0,0,:])

        # if base model: store as class variable:

        # test if base model:
        if model_type == 'base':
            self.base_model_lines = current_lines

        return current_lines
コード例 #3
0
    def perform_sampling(self, **kwds):
        """perform sampling step and save all history files"""
        np.random.seed(12345)
        self.all_samples = []
        for i in range(self.n):
            N_tmp = copy.deepcopy(self.NH)
            if np.mod(i, 100) == 0:
                print("Sampling step %d" % i)

            step_samples = []
            #===================================================================
            # First step: continuous changes of parameters
            #===================================================================
            for id, event in N_tmp.events.items():
                if event.event_type == 'UNCONFORMITY':
                    height = np.random.randn() * self.unconf_stdev
                    event.change_height(height)
                    step_samples.append(height)
                elif event.event_type == 'FAULT':
                    fault_dip = np.random.randn() * self.dip_stdev
                    event.properties['Dip'] += fault_dip
                    step_samples.append(fault_dip)
                elif event.event_type == 'FOLD':
                    fold_wavelength = np.random.randn() * self.fold_wavelength
                    fold_amplitude = np.random.randn() * self.fold_amplitude
                    fold_position = np.random.randn() * self.fold_position
                    event.properties['Wavelength'] += fold_wavelength
                    event.properties['Amplitude'] += fold_amplitude
                    event.properties['X'] += fold_position
                    step_samples.append(fold_wavelength)
                    step_samples.append(fold_amplitude)
                    step_samples.append(fold_position)
            #===================================================================
            # Second step: Resample fault event order
            #===================================================================

            # genreate new random order for resampling
            new_order = np.random.choice((6, 7, 8), size=3, replace=False)
            N_tmp.reorder_events({
                6: new_order[0],
                7: new_order[1],
                8: new_order[2]
            })

            #===================================================================
            # Create history file
            #===================================================================
            tmp_his = os.path.join("tmp", "GBasin123_random_draw_%04d.his" % i)
            tmp_out = os.path.join("tmp", "GBasin123_random_draw_%04d" % i)
            N_tmp.write_history(tmp_his)
            if self.compute:
                # directly compute model
                pynoddy.compute_model(tmp_his, tmp_out)
            self.all_samples.append(step_samples)
コード例 #4
0
 def perform_sampling(self, **kwds):
     """perform sampling step and save all history files"""
     np.random.seed(12345)
     self.all_samples = []
     for i in range(self.n):
         N_tmp = copy.deepcopy(self.NH)
         if np.mod(i,100) == 0:
             print("Sampling step %d" % i)
             
         step_samples = []
         #===================================================================
         # First step: continuous changes of parameters
         #===================================================================
         for id, event in N_tmp.events.items():
             if event.event_type == 'UNCONFORMITY':
                 height = np.random.randn() * self.unconf_stdev
                 event.change_height(height)
                 step_samples.append(height)
             elif event.event_type == 'FAULT':
                 fault_dip = np.random.randn() * self.dip_stdev
                 event.properties['Dip'] += fault_dip
                 step_samples.append(fault_dip)
             elif event.event_type == 'FOLD':
                 fold_wavelength = np.random.randn() * self.fold_wavelength
                 fold_amplitude = np.random.randn() * self.fold_amplitude
                 fold_position = np.random.randn() * self.fold_position
                 event.properties['Wavelength'] += fold_wavelength
                 event.properties['Amplitude'] += fold_amplitude
                 event.properties['X'] += fold_position
                 step_samples.append(fold_wavelength)
                 step_samples.append(fold_amplitude)
                 step_samples.append(fold_position)
         #===================================================================
         # Second step: Resample fault event order
         #===================================================================
             
         # genreate new random order for resampling
         new_order = np.random.choice((6,7,8), size = 3, replace=False)
         N_tmp.reorder_events({6 : new_order[0],
                               7 : new_order[1],
                               8 : new_order[2]})
             
         #===================================================================
         # Create history file 
         #===================================================================
         tmp_his = os.path.join("tmp", "GBasin123_random_draw_%04d.his" % i)
         tmp_out = os.path.join("tmp", "GBasin123_random_draw_%04d" % i)
         N_tmp.write_history(tmp_his)
         if self.compute:
             # directly compute model
             pynoddy.compute_model(tmp_his, tmp_out)
         self.all_samples.append(step_samples)
コード例 #5
0
ファイル: history.py プロジェクト: samthiele/pynoddy
 def get_drillhole_data(self, x, y, **kwds):
     """Get geology values along 1-D profile at position x,y with a 1 m resolution
     
     The following steps are performed:
     1. creates a copy of the entire object,
     2. sets values of origin, extent and geology cube size, 
     3. saves model to a temporary file, 
     4. runs Noddy on that file
     5. opens and analyses output
     6. deletes temporary files
     
     Note: this method only works if write access to current directory
     is enabled and noddy can be executed!
     
     **Arguments**:
         - *x* = float: x-position of drillhole
         - *y* = float: y-position of drillhole
     
     **Optional Arguments**:
         - *z_min* = float : minimum depth of drillhole (default: model range)
         - *z_max* = float : maximum depth of drillhole (default: model range)
         - *resolution* = float : resolution along profile (default: 1 m)
     """
     # resolve keywords
     resolution = kwds.get("resolution", 1)
     self.get_extent()
     self.get_origin()
     z_min = kwds.get("z_min", self.origin_z)
     z_max = kwds.get("z_max", self.extent_z)
     # 1. create copy
     import copy
     tmp_his = copy.deepcopy(self)
     tmp_his.write_history("test.his")
     # 2. set values
     tmp_his.set_origin(x, y, z_min)
     tmp_his.set_extent(resolution, resolution, z_max)
     tmp_his.change_cube_size(resolution)
     # 3. save temporary file
     tmp_his_file = "tmp_1D_drillhole.his"
     tmp_his.write_history(tmp_his_file)
     tmp_out_file = "tmp_1d_out"
     # 4. run noddy
     import pynoddy
     import pynoddy.output
     
     pynoddy.compute_model(tmp_his_file, tmp_out_file)
     # 5. open output
     tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
     # 6. 
     return tmp_out.block[0,0,:]
コード例 #6
0
ファイル: Copy of history.py プロジェクト: wangcug/pynoddy
 def get_drillhole_data(self, x, y, **kwds):
     """Get geology values along 1-D profile at position x,y with a 1 m resolution
     
     The following steps are performed:
     1. creates a copy of the entire object,
     2. sets values of origin, extent and geology cube size, 
     3. saves model to a temporary file, 
     4. runs Noddy on that file
     5. opens and analyses output
     6. deletes temporary files
     
     Note: this method only works if write access to current directory
     is enabled and noddy can be executed!
     
     **Arguments**:
         - *x* = float: x-position of drillhole
         - *y* = float: y-positoin of drillhole
     
     **Optional Arguments**:
         - *z_min* = float : minimum depth of drillhole (default: model range)
         - *z_max* = float : maximum depth of drillhole (default: model range)
         - *resolution* = float : resolution along profile (default: 1 m)
     """
     # resolve keywords
     resolution = kwds.get("resolution", 1)
     self.get_extent()
     self.get_origin()
     z_min = kwds.get("z_min", self.origin_z)
     z_max = kwds.get("z_max", self.extent_z)
     # 1. create copy
     import copy
     tmp_his = copy.deepcopy(self)
     # 2. set values
     tmp_his.set_origin(x, y, z_min)
     tmp_his.set_extent(resolution, resolution, z_max)
     tmp_his.change_cube_size(resolution)
     # 3. save temporary file
     tmp_his_file = "tmp_1D_drillhole.his"
     tmp_his.write_history(tmp_his_file)
     tmp_out_file = "tmp_1d_out"
     # 4. run noddy
     import pynoddy
     import pynoddy.output
     pynoddy.compute_model(tmp_his_file, tmp_out_file)
     # 5. open output
     tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
     # 6.
     return tmp_out.block[0, 0, :]
コード例 #7
0
ファイル: __init__.py プロジェクト: jesserobertson/pynoddy
    def export_to_vtk(self, **kwds):
        """Export model to VTK
        
        Export the geology blocks to VTK for visualisation of the entire 3-D model in an
        external VTK viewer, e.g. Paraview.
        
        ..Note:: Requires pyevtk, available for free on: https://github.com/firedrakeproject/firedrake/tree/master/python/evtk
        
        **Optional keywords**:
            - *vtk_filename* = string : filename of VTK file (default: output_name)
            - *data* = np.array : data array to export to VKT (default: entire block model)
            - *recompute* = bool : recompute the block model (default: True)
            - *model_type* = 'current', 'base' : model type (base "freezed" model can be plotted for comparison)
            
        ..Note:: If data is defined, the model is not recomputed and the data from this array is plotted
        """
        if kwds.has_key("data"):
            super(Experiment, self).export_to_vtk(**kwds)
        else:
            recompute = kwds.get("recompute", True)  # recompute by default
            if recompute:
                import pynoddy
                import pynoddy.output

                # re-compute noddy model
                #  save temporary file
                tmp_his_file = "tmp_section.his"
                tmp_out_file = "tmp_section_out"

                # reset to base model?
                if kwds.has_key("model_type") and (kwds["model_type"] == "base"):
                    # 1. create copy
                    import copy

                    tmp_his = copy.deepcopy(self)
                    tmp_his.events = self.base_events.copy()
                    tmp_his.write_history(tmp_his_file)
                else:
                    self.write_history(tmp_his_file)

                pynoddy.compute_model(tmp_his_file, tmp_out_file)
                # open output
                # tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
                # tmp_out.export_to_vtk(**kwds)
                super(Experiment, self).set_basename(tmp_out_file)
                super(Experiment, self).load_model_info()
                super(Experiment, self).load_geology()
            super(Experiment, self).export_to_vtk(**kwds)
コード例 #8
0
    def export_to_vtk(self, **kwds):
        """Export model to VTK
        
        Export the geology blocks to VTK for visualisation of the entire 3-D model in an
        external VTK viewer, e.g. Paraview.
        
        ..Note:: Requires pyevtk, available for free on: https://github.com/firedrakeproject/firedrake/tree/master/python/evtk
        
        **Optional keywords**:
            - *vtk_filename* = string : filename of VTK file (default: output_name)
            - *data* = np.array : data array to export to VKT (default: entire block model)
            - *recompute* = bool : recompute the block model (default: True)
            - *model_type* = 'current', 'base' : model type (base "freezed" model can be plotted for comparison)
            
        ..Note:: If data is defined, the model is not recomputed and the data from this array is plotted
        """
        if kwds.has_key("data"):
            super(Experiment, self).export_to_vtk(**kwds)
        else:
            recompute = kwds.get("recompute", True)  # recompute by default
            if recompute:
                import pynoddy
                import pynoddy.output
                # re-compute noddy model
                #  save temporary file
                tmp_his_file = "tmp_section.his"
                tmp_out_file = "tmp_section_out"

                # reset to base model?
                if kwds.has_key("model_type") and (kwds['model_type']
                                                   == 'base'):
                    # 1. create copy
                    import copy
                    tmp_his = copy.deepcopy(self)
                    tmp_his.events = self.base_events.copy()
                    tmp_his.write_history(tmp_his_file)
                else:
                    self.write_history(tmp_his_file)

                pynoddy.compute_model(tmp_his_file, tmp_out_file)
                # open output
                # tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
                # tmp_out.export_to_vtk(**kwds)
                super(Experiment, self).set_basename(tmp_out_file)
                super(Experiment, self).load_model_info()
                super(Experiment, self).load_geology()
            super(Experiment, self).export_to_vtk(**kwds)
コード例 #9
0
 def test_compute_model(self):
     history = os.path.join(package_directory,
                            "../test/simple_two_faults.his")
     output_name = os.path.join(package_directory,
                                "../test/simple_two_faults_out")
     return_val = pynoddy.compute_model(history, output_name)
     assert_equals(return_val,
                   "",
                   msg="Problem with Noddy computation: %s" % return_val)
コード例 #10
0
    def block_generate(self, i):
        """Generates noddy block model from trace values at index i."""
        # assign values at index i from db
        for tn in self.prior_names:
            if "Layer" in tn:
                p = tn.split("_")
                self.ex.events[int(p[1])].layers[int(p[3])].properties[p[0]] = self.db.trace(tn, chain=-1)[i]
            else:
                p = tn.split("_")
                self.ex.events[int(p[1])].properties[p[0]] = self.db.trace(tn, chain=-1)[i]

        # write history file
        self.ex.write_history("tmp_history.his")
        # define output name
        output_name = "tmp_output"
        # compute pynoddy model with topology flag
        pynoddy.compute_model("tmp_history.his", output_name,
                              sim_type="TOPOLOGY")
        # load output
        out = pynoddy.output.NoddyOutput(output_name)
        return out.block
コード例 #11
0
    def block_generate(self, i):
        """Generates noddy block model from trace values at index i."""
        # assign values at index i from db
        for tn in self.prior_names:
            if "Layer" in tn:
                p = tn.split("_")
                self.ex.events[int(p[1])].layers[int(
                    p[3])].properties[p[0]] = self.db.trace(tn, chain=-1)[i]
            else:
                p = tn.split("_")
                self.ex.events[int(p[1])].properties[p[0]] = self.db.trace(
                    tn, chain=-1)[i]

        # write history file
        self.ex.write_history("tmp_history.his")
        # define output name
        output_name = "tmp_output"
        # compute pynoddy model with topology flag
        pynoddy.compute_model("tmp_history.his",
                              output_name,
                              sim_type="TOPOLOGY")
        # load output
        out = pynoddy.output.NoddyOutput(output_name)
        return out.block
コード例 #12
0
ファイル: __init__.py プロジェクト: pytzcarraldo/pynoddy
 def test_compute_model(self):
     history = os.path.join(package_directory, "../test/simple_two_faults.his")
     output_name = os.path.join(package_directory, "../test/simple_two_faults_out")
     return_val = pynoddy.compute_model(history, output_name)
     assert_equals(return_val, "", msg="Problem with Noddy computation: %s" % return_val)
コード例 #13
0
ファイル: __init__.py プロジェクト: sebastopol/pynoddy
    def get_section(self, direction='y', position='center', **kwds):
        """Get geological section of the model (re-computed at required resolution) as noddy object

        **Arguments**:
            - *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
            - *position* = int or 'center' : cell position of section as integer value
                or identifier (default: 'center')
        
        **Optional arguments**:
            - *resolution* = float : set resolution for section (default: self.cube_size)
            - *model_type* = 'current', 'base' : model type (base "freezed" model can be plotted for comparison)
            - *compute_output* = bool : provide output from command line call (default: True)
            - *remove_tmp_files* = bool : remove temporary files (default: True)
        """
        remove_tmp_files = kwds.get("remove_tmp_files", True)
        compute_output = kwds.get("compute_output", True)
        self.get_cube_size()
        self.get_extent()
        resolution = kwds.get("resolution", self.cube_size)
        model_type = kwds.get("model_type", 'current')

        #         self.determine_model_stratigraphy()

        # code copied from noddy.history.HistoryFile.get_drillhole_data()
        self.get_origin()
        z_min = kwds.get("z_min", self.origin_z)
        z_max = kwds.get("z_max", self.extent_z)

        # 1. create copy
        import copy
        tmp_his = copy.deepcopy(self)

        # 2. set values

        if direction == 'y':
            x_min = self.origin_x
            x_max = self.extent_x
            if position == 'center' or position == 'centre':  # AE and BE friendly :-)
                y_pos = (self.extent_y - self.origin_y - resolution) / 2.
            else:  # set position excplicity to cell
                y_pos = position
            tmp_his.set_origin(x_min, y_pos, z_min)  # z_min)
            tmp_his.set_extent(x_max, resolution, z_max)
            tmp_his.change_cube_size(resolution)

        elif direction == 'x':
            y_min = self.origin_y
            y_max = self.extent_y
            if position == 'center' or position == 'centre':
                x_pos = (self.extent_x - self.origin_x - resolution) / 2.
            else:  # set position excplicity to cell
                x_pos = position
            tmp_his.set_origin(x_pos, y_min, z_min)  # z_min)
            tmp_his.set_extent(resolution, y_max, z_max)
            tmp_his.change_cube_size(resolution)

        if model_type == 'base':
            tmp_his.events = self.base_events.copy()

        # 3. save temporary file
        tmp_his_file = "tmp_section.his"
        tmp_his.write_history(tmp_his_file)
        tmp_out_file = "tmp_section_out"
        # 4. run noddy
        import pynoddy.output



        pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
        # 5. open output
        i = 0
        while not 'tmp_out' in locals():
            i += 1
            pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            try:
                tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            except IOError:  # try again
                pass

            # just in case break statement:
            if i > 20:
                raise IOError

            #     # print("Try again")
            # pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            # try:
            #     tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            # except IOError: # and again
            #     # print("and again...")
            #     pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            #     try:
            #         tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            #     except IOError:
            #         # print("and again...")
            #         pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            #         try:
            #             tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            #         except IOError:
            #             # print("and again...")
            #             pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)

        # 6.
        #         tmp_out.plot_section(direction = direction, player_labels = self.model_stratigraphy, **kwds)
        # return tmp_out.block

        # remove temorary file
        # find all files that match base name of output file (depends on Noddy compute type!)
        import os
        if remove_tmp_files:
            for f in os.listdir('.'):
                if os.path.splitext(f)[0] == tmp_out_file:
                    os.remove(f)

        return tmp_out
コード例 #14
0
    def get_section(self, direction='y', position='center', **kwds):
        """Get geological section of the model (re-computed at required resolution) as noddy object

        **Arguments**:
            - *direction* = 'x', 'y', 'z' : coordinate direction of section plot (default: 'y')
            - *position* = int or 'center' : cell position of section as integer value
                or identifier (default: 'center')
        
        **Optional arguments**:
            - *resolution* = float : set resolution for section (default: self.cube_size)
            - *model_type* = 'current', 'base' : model type (base "freezed" model can be plotted for comparison)
            - *compute_output* = bool : provide output from command line call (default: True)
            - *remove_tmp_files* = bool : remove temporary files (default: True)
        """
        remove_tmp_files = kwds.get("remove_tmp_files", True)
        compute_output = kwds.get("compute_output", True)
        self.get_cube_size()
        self.get_extent()
        resolution = kwds.get("resolution", self.cube_size)
        model_type = kwds.get("model_type", 'current')

        #         self.determine_model_stratigraphy()

        # code copied from noddy.history.HistoryFile.get_drillhole_data()
        self.get_origin()
        z_min = kwds.get("z_min", self.origin_z)
        z_max = kwds.get("z_max", self.extent_z)

        # 1. create copy
        import copy
        tmp_his = copy.deepcopy(self)

        # 2. set values

        if direction == 'y':
            x_min = self.origin_x
            x_max = self.extent_x
            if position == 'center' or position == 'centre':  # AE and BE friendly :-)
                y_pos = (self.extent_y - self.origin_y - resolution) / 2.
            else:  # set position excplicity to cell
                y_pos = position
            tmp_his.set_origin(x_min, y_pos, z_min)  # z_min)
            tmp_his.set_extent(x_max, resolution, z_max)
            tmp_his.change_cube_size(resolution)

        elif direction == 'x':
            y_min = self.origin_y
            y_max = self.extent_y
            if position == 'center' or position == 'centre':
                x_pos = (self.extent_x - self.origin_x - resolution) / 2.
            else:  # set position excplicity to cell
                x_pos = position
            tmp_his.set_origin(x_pos, y_min, z_min)  # z_min)
            tmp_his.set_extent(resolution, y_max, z_max)
            tmp_his.change_cube_size(resolution)

        if model_type == 'base':
            tmp_his.events = self.base_events.copy()

        # 3. save temporary file
        tmp_his_file = "tmp_section.his"
        tmp_his.write_history(tmp_his_file)
        tmp_out_file = "tmp_section_out"
        # 4. run noddy
        import pynoddy.output

        pynoddy.compute_model(tmp_his_file,
                              tmp_out_file,
                              output=compute_output)
        # 5. open output
        i = 0
        # if False:
        while not 'tmp_out' in locals():
            i += 1
            pynoddy.compute_model(tmp_his_file,
                                  tmp_out_file,
                                  output=compute_output)
            try:
                tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            except IOError:  # try again
                pass

            # just in case break statement:
            if i > 20:
                raise IOError("Computation attempts")

            #     # print("Try again")
            # pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            # try:
            #     tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            # except IOError: # and again
            #     # print("and again...")
            #     pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            #     try:
            #         tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            #     except IOError:
            #         # print("and again...")
            #         pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)
            #         try:
            #             tmp_out = pynoddy.output.NoddyOutput(tmp_out_file)
            #         except IOError:
            #             # print("and again...")
            #             pynoddy.compute_model(tmp_his_file, tmp_out_file, output=compute_output)

        # 6.
        #         tmp_out.plot_section(direction = direction, player_labels = self.model_stratigraphy, **kwds)
        # return tmp_out.block

        # remove temorary file
        # find all files that match base name of output file (depends on Noddy compute type!)
        import os
        if remove_tmp_files:
            for f in os.listdir('.'):
                if os.path.splitext(f)[0] == tmp_out_file:
                    os.remove(f)

        return tmp_out
コード例 #15
0
def test_simulation_noddy():
    pynoddy.compute_model(history, output, noddy_path=noddy_exec)
コード例 #16
0
ファイル: check_pynoddy.py プロジェクト: freestylewhl/pynoddy
    test_history = NoddyHistory(history_path)
except Exception as e:
    sys.stderr.write(
        "An error occured while loading a NoddyHistory from a .his file... %s\n"
        % e)
    err = True
if not err:
    print "Succesfully loaded a history file"

#####################
##Test Noddy
#####################
output_name = "test_out"

try:
    txt = pynoddy.compute_model(history_path, output_name)
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)

if not err:
    print "Succesfully called Noddy executable in BLOCK mode."

try:
    txt = pynoddy.compute_model(history_path, output_name, sim_type='TOPOLOGY')
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)
コード例 #17
0
    def generate_model_instances(self, path, count, **kwds):
        """
        Generates the specified of randomly varied Noddy models.

        **Arguments**:
         - *path* = The directory that Noddy models should be generated in
         - *count* = The number of random variations to generate
        **Optional Kewords**:
         - *threads* = The number of seperate threads to run when generating noddy models. Note that RAM is
                       often a limiting factor (at this point every thread requires at least ~1Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES',
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *write_changes* = A file (path) to write the parameters used in each model realisation to
                        (minus the extension).
                       The default is None (no file written).
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        - *seed* = The random seed to use in this experiment. If not specified,
                    threads are seeded with PID * TID * time (*nodeID).
       """

        # get args
        vb = kwds.get("verbose", False)
        stype = kwds.get("sim_type", "BLOCK")
        threads = kwds.get("threads", 1)
        changes = kwds.get("write_changes", None)

        # store path for later
        self.instance_path = path

        # get start time (for timing runs)
        import time as time
        if vb:
            start_time = time.time()

        # get variables for seed
        seed_base = os.getpid() * int(time.time() / 1000000)
        nodeID = 1  # this will be changed later if running on a linux box

        # ensure directory exists
        if not os.path.isdir(path):
            os.makedirs(path)

        if threads > 1:  # multithreaded - spawn required number of threads

            # calculate & create node directory (for multi-node instances)
            import platform
            if (platform.system() == 'Linux'
                ):  # running linux - might be a cluster, so get node name
                nodename = os.uname()[
                    1]  # the name of the node it is running on (linux only)

                # move into node subdirectory
                path = os.path.join(path, nodename)

                # append node name to output
                if not changes is None:
                    changes = "%s_%s" % (changes, nodename
                                         )  # append node name to output

                # change nodeID for seed
                nodeID = hash(nodename)

            # import thread stuff
            from threading import Thread

            thread_list = []
            for t in range(0, threads):

                # create subdirectory for this thread
                threadpath = os.path.join(path, "thread_%d" % t)
                if not os.path.isdir(threadpath):
                    os.makedirs(threadpath)

                # make copy of this object
                import copy
                t_his = copy.deepcopy(self)

                # calculate number of models to run in this thread
                n = count / threads
                if (t == 0):  # first thread gets remainder
                    n = n + count % threads

                # calculate changes path
                change_path = None
                if not changes is None:
                    change_path = "%s_thread%d" % (changes, t)

                # set random seed (nodeID * process ID * threadID * time in seconds)
                t_his.set_random_seed(nodeID + seed_base + t)

                if kwds.has_key(
                        'seed'
                ):  # override default seed, for reproducable results
                    t_his.set_random_seed(kwds['seed'] +
                                          t)  # specifed seed + threadID

                # initialise thread
                t = Thread(target=t_his.generate_model_instances,
                           args=(threadpath, n),
                           kwargs={
                               'sim_type': stype,
                               'verbose': vb,
                               'write_changes': change_path
                           })

                thread_list.append(t)

                # start thread
                t.start()

                # thread.start_new_thread(t_his.generate_model_instances,(threadpath,n),
                # {'sim_type' : stype, 'verbose' : vb})

            # now wait for threads to finish
            for t in thread_list:
                t.join()

            # now everything is finished!
            if vb:
                print "Finito!"

                elapsed = time.time() - start_time
                print "Generated %d models in %d seconds\n\n" % (count,
                                                                 elapsed)

        else:  # only 1 thread (or instance of a thread), so run noddy
            for n in range(1, count +
                           1):  # numbering needs to start at 1 for topology
                # calculate filename & output path
                outputfile = "%s_%04d" % (self.basename, n)
                outputpath = os.path.join(path, outputfile)

                if vb:
                    print "Constructing %s... " % outputfile

                # do random perturbation
                self.random_perturbation(verbose=vb)

                # save history
                self.write_history(outputpath + ".his")

                # run noddy
                if vb:
                    print("Complete.\nRunning %s... " % outputfile)
                    print(
                        pynoddy.compute_model(outputpath + ".his",
                                              outputpath,
                                              sim_type=stype))
                    print("Complete.")
                else:
                    pynoddy.compute_model(outputpath + ".his",
                                          outputpath,
                                          sim_type=stype)

                # run topology if necessary
                if "TOPOLOGY" in stype:
                    if vb:
                        print("Complete. Calculating Topology... ")
                        print(pynoddy.compute_topology(outputpath))
                        print("Complete.")
                    else:
                        pynoddy.compute_topology(outputpath)

                # flush print buffer
                sys.stdout.flush()

            # write changes
            if not (changes is None):
                if vb:
                    print "Writing parameter changes to %s..." % (changes +
                                                                  ".csv")
                self.write_parameter_changes(changes + ".csv")
                if vb:
                    print "Complete."
コード例 #18
0
    def generate_models_from_existing_histories(path,
                                                verbose=True,
                                                sim_type="BLOCK",
                                                threads=1,
                                                force_recalculate=False,
                                                **kwds):
        """
        Processes all existing his files in the given directory

        **Arguments**:
         - *path* = The directory that will be searched for .his files
        **Optional Arguments**:
         - *threads* = The number of seperate threads to run when generating noddy models. For optimum
                       performance this should equal the number of logical cores - 1, unless RAM is a
                       limiting factor (at this point every thread requires at least 2Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES',
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *force_recalculate* = Forces the recalculation of existing noddy files. Default is False, hence this
                       function will not run history files that are already associated with Noddy data files.
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        """

        # get argument values
        vb = verbose
        stype = sim_type
        # threads = threads # Note to Sam: not required
        force = force_recalculate

        if threads >= 1:  # spawn threads

            # gather list of his files
            his_files = []
            for root, dirnames, filenames in os.walk(
                    path):  # walk the directory
                for f in filenames:
                    if ('.his' in f
                        ):  # find all topology files with correct basename
                        his_files.append(os.path.join(root, f))

            # spawn threads until finished
            from threading import Thread
            thread_list = []

            while len(his_files) > 0:
                for n in range(0, threads):
                    if len(his_files) > 0:
                        # get path
                        p = his_files[0]
                        his_files.remove(p)

                        # initialise thread
                        t = Thread(target=MonteCarlo.
                                   generate_models_from_existing_histories,
                                   args=(p, ),
                                   kwargs={
                                       'threads': 0,
                                       'sim_type': stype,
                                       'verbose': vb,
                                       'force_recalculate': force
                                   })
                        thread_list.append(t)

                        # start thread
                        t.start()

                # now wait for threads to finish
                for t in thread_list:
                    t.join()

        else:  # run given file
            output = path.split('.')[0]

            # call noddy
            if force or not os.path.exists(
                    output +
                    ".g12"):  # if noddy files don't exist, or force is true
                if vb:
                    print("Running %s... " % output)
                    print(pynoddy.compute_model(path, output, sim_type=stype))
                    print("Complete.")
                else:
                    pynoddy.compute_model(path, output, sim_type=stype)

                    # call topology if in TOPOLOGY mode
            if 'TOPOLOGY' in stype:
                if force or not os.path.exists(
                        output + ".g23"
                ):  # if topology files don't exist, or force is true
                    if vb:
                        print("Running topology on %s... " % output)
                        print(pynoddy.compute_topology(output))
                        print("Complete.")
                    else:
                        pynoddy.compute_topology(output)
                elif vb:
                    print "Topology files alread exist for %s. Skipping." % path

            # flush print buffer
            sys.stdout.flush()
コード例 #19
0
    def test_resolution(self, numTrials, **kwds):
        '''Tests the sensitivity of a model to block size by generating models of different
        resolutions and comparing them.
        **Arguments**:
            - *numTrials* = the number of different model resolutions to test
        **Optional Keywords**:
            - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
            are left in the same directory as the .his file they derive from. Default is True.
            - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False.
        **Returns**:
            - The function returns a list containing the cumulative number of model topologies
            observed, starting from the highest resolution (smallest block size) to the lowest block
            size (largest block size)
        
        '''
        #get args
        outFile = kwds.get("output", "")
        cleanup = kwds.get("cleanup", True)
        verbose = kwds.get("verbose", False)

        #import pynoddy bindings
        import pynoddy

        #store null volume threshold and then set to zero
        old_threshold = pynoddy.null_volume_threshold
        pynoddy.null_volume_threshold = 0

        #place to keep topologies
        self.topo_list = []
        self.res_list = []

        self.nUnique = 0  #number of unique topologies
        self.count = []  #number of differen topologies observed at each step
        self.size = []  #number of edges (relationships) observed at each step

        #run test
        step = (self.maxSize -
                self.minSize) / numTrials  #step change between resolutions
        for res in range(self.minSize, self.maxSize, step):
            if verbose:
                print(("Computing model with %d block size" % res))

            #change cube size
            self.change_cube_size(res)
            self.change_cube_size(res)
            print("Cube size: %d:" % self.get_cube_size())

            #store cube size
            self.res_list.append(res)

            #save history file
            basename = self.path + "_cube_size_%d" % res

            self.write_history(basename + ".his")

            #run saved history file
            if verbose:
                print(("Running resolution %d... " % res))
                print((pynoddy.compute_model(basename + ".his",
                                             basename + "_0001",
                                             sim_type="TOPOLOGY")))
                print("Complete.\n")
            else:
                pynoddy.compute_model(basename + ".his",
                                      basename + "_0001",
                                      sim_type="TOPOLOGY")

            #calculate topology
            if verbose:
                print('Computing model topologies...')
                print((pynoddy.compute_topology(basename)))
                print('Finished.\n')
            else:
                pynoddy.compute_topology(basename, 1)

            #load and store topology output
            topo = NoddyTopology(basename + "_0001")

            #cull small nodes
            #topo.filter_node_volumes(self.min_node_volume)

            #see if this is on the list
            if topo.is_unique(self.topo_list):
                self.nUnique += 1  #increment unique topologies

            #store cumulative sequence
            self.count.append(self.nUnique)

            #add to list of observed topologies
            self.topo_list.append(topo)

            #append number of edges to edges list
            self.size.append(topo.graph.number_of_edges())

            #cleanup
            if cleanup:
                import os, glob
                #remove noddy files
                for f in glob.glob(basename + "*"):
                    os.remove(f)

        print("Complete. A total of %d topologies were observed" %
              self.nUnique)
        print("The size of the network at each step was:")
        print(self.size)

        print("The cumulative observation sequence was:")
        print(self.count)

        #restore
        pynoddy.null_volume_threshold = old_threshold

        return self.count
コード例 #20
0
ファイル: MonteCarlo.py プロジェクト: jesserobertson/pynoddy
    def generate_models_from_existing_histories(path,**kwds):
        '''
        Processes all existing his files in the given directory
        
        **Arguments**:
         - *path* = The directory that will be searched for .his files
        **Optional Kewords**:
         - *threads* = The number of seperate threads to run when generating noddy models. For optimum
                       performance this should equal the number of logical cores - 1, unless RAM is a 
                       limiting factor (at this point every thread requires at least 2Gb of ram).
        - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES', 
                       'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
        - *force_recalculate* = Forces the recalculation of existing noddy files. Default is False, hence this
                       function will not run history files that are already associated with Noddy data files.
        - *verbose* = True if this function sends output to the print buffer. Default is True.
        '''
        
        #get keywords
        vb = kwds.get("verbose",True)
        stype = kwds.get("sim_type","BLOCK")
        threads = kwds.get("threads",1)
        force = kwds.get("force_recalculate",False)
        
        if threads >= 1: #spawn threads
            
            #gather list of his files
            his_files = []
            for root, dirnames, filenames in os.walk(path): #walk the directory
                for f in filenames:
                    if ('.his' in f): #find all topology files with correct basename
                        his_files.append(os.path.join(root,f))
                        
            #spawn threads untill finished
            from threading import Thread
            thread_list = []            

            while len(his_files) > 0:
                for n in range(0,threads):
                    if len(his_files) > 0:
                        #get path
                        p = his_files[0]
                        his_files.remove(p)
                        
                        #initialise thread
                        t = Thread(target=MonteCarlo.generate_models_from_existing_histories,args=(p,),kwargs={'threads' : 0, 'sim_type' : stype, 'verbose' : vb,'force_recalculate' : force})
                        thread_list.append(t)
                        
                        #start thread
                        t.start()
                                
                #now wait for threads to finish
                for t in thread_list:
                    t.join()
            
        else: #run given file
            output = path.split('.')[0]
            
            #call noddy
            if force or not os.path.exists(output+".g01"): #if noddy files don't exist, or force is true
                if vb:
                    print("Running %s... " % output)
                    print(pynoddy.compute_model(path, output, sim_type = stype))
                    print ("Complete.")
                else:
                    pynoddy.compute_model(path,output, sim_type = stype) 
                    
            #call topology if in TOPOLOGY mode
            if 'TOPOLOGY' in stype:
                if force or not os.path.exists(output+".g23"): #if topology files don't exist, or force is true
                    if vb:
                        print("Running topology on %s... " % output)
                        print(pynoddy.compute_topology(output))
                        print ("Complete.")
                    else:
                        pynoddy.compute_topology(output)
                elif vb:
                    print "Topology files alread exist for %s. Skipping." % path
            
            #flush print buffer
            sys.stdout.flush()   
コード例 #21
0
ファイル: resolution_test.py プロジェクト: Leguark/pynoddy
 def test_resolution(self,numTrials, **kwds):
     '''Tests the sensitivity of a model to block size by generating models of different
     resolutions and comparing them.
     **Arguments**:
         - *numTrials* = the number of different model resolutions to test
     **Optional Keywords**:
         - *output* = a csv file to write the output to
         - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
         are left in the same directory as the .his file they derive from. Default is True.
         - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is False.
     **Returns**:
         - The function returns a list containing the cumulative number of model topologies
         observed, starting from the highest resolution (smallest block size) to the lowest block
         size (largest block size)
     
     '''
     #get args
     outFile = kwds.get("output", "")
     cleanup = kwds.get("cleanup",True)
     verbose = kwds.get("verbose",False)
     
     #import pynoddy bindings
     import pynoddy
     
     #place to keep topologies
     topo_list = []
     res_list = []
     
     nUnique = 0 #number of unique topologies
     count = []
     
     #run test
     step = (self.maxSize - self.minSize) / numTrials #step change between resolutions
     for res in range(self.minSize,self.maxSize,step):
         if verbose:
             print("Computing model with %d block size" % res)    
        
         #change cube size
         self.change_cube_size(res,type="Geophysics")
         self.change_cube_size(res,type="Geology")
         print"Cube size: %d:" % self.get_cube_size()
        
         #store cube size
         res_list.append(res)
         
         #save history file
         basename = self.path + "_cube_size_%d" % res
         
         self.write_history(basename + ".his")
         
         #run saved history file
         if verbose:
             print("Running resolution %d... " % res)
             print(pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY"))
             print ("Complete.\n")
         else:
             pynoddy.compute_model(basename+".his", basename+"_0001", sim_type = "TOPOLOGY")
         
         #calculate topology
         if verbose:
             print('Computing model topologies...')
             print(pynoddy.compute_topology(basename,1))
             print('Finished.\n')
         else:
            pynoddy.compute_topology(basename,1)
            
         #load and store topology output
         topo = NoddyTopology(basename+"_0001")
         
         #cull small nodes
         #topo.filter_node_volumes(self.min_node_volume)
         
         #see if this is on the list
         if topo.is_unique(topo_list):
             nUnique+=1 #increment unique topologies
         
         #store cumulative sequence
         count.append(nUnique)
         
         #add to list of observed topologies
         topo_list.append(topo)
         
         #cleanup
         if cleanup:
             import os, glob
             #remove noddy files
             for f in glob.glob(basename+"*"):
                 os.remove(f)
         
     print "Complete. A total of %d topologies were observed" % nUnique
     print "The cumulative observation sequence was:"
     print count
     
     #write output
     if outFile != "":
         f = open(outFile,'w')
         f.write("trial_resolution,cumulative_topologies\n")
         
         for i in range(0,len(res_list)):
             f.write("%d,%d\n" % (res_list[i],count[i]))
        
         f.close()
     
     return count
コード例 #22
0
ファイル: MonteCarlo.py プロジェクト: jesserobertson/pynoddy
 def generate_model_instances(self, path, count, **kwds):
     '''
     Generates the specified of randomly varied Noddy models.
     
     **Arguments**:
      - *path* = The directory that Noddy models should be generated in
      - *count* = The number of random variations to generate
     **Optional Kewords**:
      - *threads* = The number of seperate threads to run when generating noddy models. Note that RAM is 
                    often a limiting factor (at this point every thread requires at least ~1Gb of ram).
     - *sim_type* = The type of simulation to run. This can be any of: 'BLOCK', 'GEOPHYSICS', 'SURFACES', 
                    'BLOCK_GEOPHYS', 'TOPOLOGY', 'BLOCK_SURFACES', 'ALL'. Default is 'BLOCK'.
     - *write_changes* = A file (path) to write the parameters used in each model realisation to (minus the extension). 
                    The default is a file called 'parameters.csv'. Set as None to disable writing.
     - *verbose* = True if this function sends output to the print buffer. Default is True.
     '''
     
     #get args
     vb = kwds.get("verbose",True)
     stype = kwds.get("sim_type","BLOCK")
     threads = kwds.get("threads",1)
     changes = kwds.get("write_changes","parameters")
     
     #store path for later
     self.instance_path = path       
     
     #get start time (for timing runs)
     import time as time
     if vb:
         start_time = time.time()
     
     #get variables for seed
     seed_base = os.getpid() * int(time.time() / 1000000)
     nodeID = 1 #this will be changed later if running on a linux box
     
     #ensure directory exists
     if not os.path.isdir(path):
         os.makedirs(path)
             
     if threads > 1: #multithreaded - spawn required number of threads
     
         #calculate & create node directory (for multi-node instances)
         import platform
         if (platform.system() == 'Linux'): #running linux - might be a cluster, so get node name
             nodename = os.uname()[1] #the name of the node it is running on (linux only)
             
             #move into node subdirectory
             path = path.join(path,nodename) 
         
             #append node name to output
             if not changes is None:
                 changes = "%s_%s" % (changes,nodename) #append node name to output
             
             #change nodeID for seed
             nodeID = hash(nodename)
             
             
         #import thread stuff
         from threading import Thread
         import platform
         
         thread_list = []
         for t in range(0,threads):
             
             #create subdirectory for this thread
             threadpath=os.path.join(path,"thread_%d" % t)
             if not os.path.isdir(threadpath):
                 os.makedirs(threadpath)
                 
             #make copy of this object 
             import copy
             t_his = copy.deepcopy(self)
                 
             #calculate number of models to run in this thread
             n = count / threads
             if (t == 0): #first thread gets remainder
                 n = n + count % threads
             
             #calculate changes path
             change_path = None
             if not changes is None:
                 change_path = "%s_thread%d" % (changes,t)
             
             #set random seed (nodeID * process ID * threadID * time in seconds)
             t_his.set_random_seed(nodeID * seed_base * t)
             
             #initialise thread
             t = Thread(target=t_his.generate_model_instances,args=(threadpath,n),kwargs={'sim_type' : stype, 'verbose' : vb, 'write_changes' : change_path})
             
             thread_list.append(t)
             
             #start thread
             t.start()
             
             #thread.start_new_thread(t_his.generate_model_instances,(threadpath,n),{'sim_type' : stype, 'verbose' : vb})
             
         #now wait for threads to finish
         for t in thread_list:
             t.join()
             
         #now everything is finished!
         if vb:
             print "Finito!"
             
             elapsed = time.time() - start_time
             print "Generated %d models in %d seconds\n\n" % (count,elapsed)
             
     else: #only 1 thread (or instance of a thread), so run noddy
         for n in range(1,count+1): #numbering needs to start at 1 for topology
             #calculate filename & output path
             outputfile = "%s_%04d" % (self.basename,n)
             outputpath = os.path.join(path,outputfile)
             
             if vb:
                 print "Constructing %s... " % outputfile
                 
             #do random perturbation
             self.random_perturbation(verbose=vb)
             
             #save history
             self.write_history(outputpath + ".his")
             
             #run noddy
             if vb:
                 print("Complete.\nRunning %s... " % outputfile)
                 print(pynoddy.compute_model(outputpath + ".his",outputpath, sim_type = stype))
                 print ("Complete.")
             else:
                 pynoddy.compute_model(outputpath + ".his",outputpath, sim_type = stype)
             
             #run topology if necessary
             if "TOPOLOGY" in stype:
                 if vb:
                     print("Complete. Calculating Topology... ")
                     print(pynoddy.compute_topology(outputpath))
                     print ("Complete.")
                 else:
                     pynoddy.compute_topology(outputpath)
                     
             #flush print buffer
             sys.stdout.flush()
                
         #write changes
         if not (changes is None):
             print "Writing parameter changes to %s..." % (changes + ".csv")
             self.write_parameter_changes(changes+".csv")
             print "Complete."
コード例 #23
0
print "Start Perturbing"

for gen_hist in range(1, num_hist + 1, 1):
    history = perturb(history, 2, num_fold, num_event, gen_hist - 1)
    perturbed_path = str(gen_hist) + '.his'
    history.write_history(perturbed_path)
    history = pynoddy.NoddyHistory(history_path)

print "Start Generating output and diagram"
for load_hist in range(1, num_hist + 1, 1):
    if load_hist % 10 == 0:
        print "Processing history file " + str(load_hist)
    perturbed_path = str(load_hist) + '.his'
    output_name = "out_" + str(load_hist)
    pynoddy.compute_model(perturbed_path, output_name)
    model_output = pynoddy.output.NoddyOutput(output_name)
    model_output.export_to_vtk(vtk_filename=output_name)

    #Figure generate
    fig = plt.figure(figsize=(15, 5))
    ax1 = fig.add_subplot(131)
    ax2 = fig.add_subplot(132)
    ax3 = fig.add_subplot(133)
    model_output.plot_section('x',
                              position='center',
                              ax=ax1,
                              colorbar=False,
                              title="Cross-section of X-axis")
    model_output.plot_section('y',
                              position='center',
コード例 #24
0
ファイル: ResolutionTest.py プロジェクト: Japhiolite/pynoddy
    def test_resolution(self, numTrials, **kwds):
        """Tests the sensitivity of a model to block size by generating models of different
        resolutions and comparing them.
        **Arguments**:
            - *numTrials* = the number of different model resolutions to test
        **Optional Keywords**:
            - *cleanup* = True if this function should delete any models it creates. Otherwise models of different resolutions
            are left in the same directory as the .his file they derive from. Default is True.
            - *verbose* = If true, this function sends information to the print buffer. Otherwise it runs silently. Default is True.
        **Returns**:
            - The function returns a list containing the cumulative number of model topologies
            observed, starting from the highest resolution (smallest block size) to the lowest block
            size (largest block size)
        
        """
        # get args
        outFile = kwds.get("output", "")
        cleanup = kwds.get("cleanup", True)
        verbose = kwds.get("verbose", True)

        # import pynoddy bindings
        import pynoddy

        # store null volume threshold and then set to zero
        old_threshold = pynoddy.null_volume_threshold
        pynoddy.null_volume_threshold = 0

        # place to keep topologies
        self.topo_list = []
        self.res_list = []

        self.nUnique = 0  # number of unique topologies
        self.count = []  # number of differen topologies observed at each step
        self.size = []  # number of edges (relationships) observed at each step

        # run test
        step = (self.maxSize - self.minSize) / numTrials  # step change between resolutions
        for res in range(self.minSize, self.maxSize, step):
            if verbose:
                print ("Computing model with %d block size" % res)

            # change cube size
            self.change_cube_size(res, type="Geophysics")
            self.change_cube_size(res, type="Geology")
            print "Cube size: %d:" % self.get_cube_size()

            # store cube size
            self.res_list.append(res)

            # save history file
            basename = self.path + "_cube_size_%d" % res

            self.write_history(basename + ".his")

            # run saved history file
            if verbose:
                print ("Running resolution %d... " % res)
                print (pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY"))
                print ("Complete.\n")
            else:
                pynoddy.compute_model(basename + ".his", basename + "_0001", sim_type="TOPOLOGY")

            # calculate topology
            if verbose:
                print ("Computing model topologies...")
                print (pynoddy.compute_topology(basename))
                print ("Finished.\n")
            else:
                pynoddy.compute_topology(basename, 1)

            # load and store topology output
            topo = NoddyTopology(basename + "_0001")

            # cull small nodes
            # topo.filter_node_volumes(self.min_node_volume)

            # see if this is on the list
            if topo.is_unique(self.topo_list):
                self.nUnique += 1  # increment unique topologies

            # store cumulative sequence
            self.count.append(self.nUnique)

            # add to list of observed topologies
            self.topo_list.append(topo)

            # append number of edges to edges list
            self.size.append(topo.graph.number_of_edges())

            # cleanup
            if cleanup:
                import os, glob

                # remove noddy files
                for f in glob.glob(basename + "*"):
                    os.remove(f)

        print "Complete. A total of %d topologies were observed" % self.nUnique
        print "The size of the network at each step was:"
        print self.size

        print "The cumulative observation sequence was:"
        print self.count

        # restore
        pynoddy.null_volume_threshold = old_threshold

        return self.count
コード例 #25
0
ファイル: check_pynoddy.py プロジェクト: pytzcarraldo/pynoddy
try:
    test_history = NoddyHistory(history_path)
except Exception as e:
    sys.stderr.write("An error occured while loading a NoddyHistory from a .his file... %s\n" % e)
    err = True
if not err:
    print("Succesfully loaded a history file")
    

#####################
##Test Noddy
#####################
output_name = "test_out"

try:
    txt = pynoddy.compute_model(history_path, output_name) 
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)

if not err:
    print("Succesfully called Noddy executable in BLOCK mode.")
    
try:
    txt = pynoddy.compute_model(history_path, output_name, sim_type = 'TOPOLOGY') 
except Exception as e:
    sys.stderr.write("Error - could not call Noddy executable... %s\n" % e)
    sys.stderr.write("Noddy log: %s\n" % txt)
    sys.exit(1)
コード例 #26
0
ファイル: run_experiment.py プロジェクト: wangcug/pynoddy
 def compute_his(self, history_file):
     """Run a single history file"""
     output_name = 'tmp'
     pynoddy.compute_model(history_file, output_name)
コード例 #27
0
    def estimate_uncertainty(self, n_trials, **kwds):
        """
        Samples the specified number of models, given the pdf's defined in the params file used to create this model.

        **Arguments**:
         - *n_trials* = The number of random draws to produce. The variation between these random draws
                        is used to estimate uncertainty.
        **Optional Keywords**:
         - *verbose* = If true, this funciton prints information to the print buffer. Default is True.
         - *model_path* = The directory to write models to. Default is a local directory called 'tmp'.
         - *cleanup* = True if this function should delete any models it creates (they're not needed anymore). Default
                       is True.
        """
        vb = kwds.get('verbose', False)
        model_path = kwds.get('model_path', 'tmp')
        cleanup = kwds.get('cleanup', True)

        # generate & load initial model
        self.write_history('tmp.his')
        pynoddy.compute_model('tmp.his', self.basename)
        self.load_model_info()
        self.load_geology()
        os.remove('tmp.his')

        # perform monte carlo sampling
        if vb:
            print "Producing model realisations..."
        self.generate_model_instances(model_path, n_trials, verbose=vb, write_changes=None)

        # thought: it would be more efficient (memory wise) to load models 1 at a time rather than
        # dumping them all in memory....

        # load results
        if vb:
            print "Loading models..."

        models = MonteCarlo.load_noddy_realisations(model_path, verbose=vb)
        self.models = models

        # compute strat column
        # self.determine_model_stratigraphy()
        # self.n_rocktypes = len(self.model_stratigraphy)

        # self.nx = models[0].nx
        # self.ny = models[0].ny
        # self.nz = models[0].nz

        # calculate probabilities for each lithology. p_block[lithology][x][y][z] = p(lithology | x, y ,z)
        self.p_block = [[[[0. for z in range(self.nz)] for y in range(self.ny)] for x in range(self.nx)] for l in
                        range(self.n_rocktypes)]
        p1 = 1 / float(n_trials)  # probability increment gained on each observation
        for m in models:
            # loop through voxels
            for x in range(self.nx):
                for y in range(self.ny):
                    for z in range(self.nz):
                        # get litho
                        litho = int(m.block[x][y][z]) - 1

                        # update litho probability
                        self.p_block[litho][x][y][z] += p1

        # calculate entropy & store in self.e_block
        self.e_block = np.ndarray((self.nx, self.ny, self.nz))
        for x in range(self.nx):
            for y in range(self.ny):
                for z in range(self.nz):
                    entropy = 0  # calculate shannons information entropy
                    for litho in range(self.n_rocktypes):
                        p = self.p_block[litho][x][y][z]

                        # fix domain to 0 < p < 1
                        if p == 0:
                            p = 0.0000000000000001
                        if p >= 0.9999999999999999:
                            p = 0.9999999999999999

                        # calculate
                        entropy += p * math.log(p, 2) + (1 - p) * (math.log(1 - p, 2))

                    entropy = entropy * -1 / float(self.n_rocktypes)  # divide by n
                    self.e_block[x][y][z] = entropy

        # cleanup
        if vb:
            print "Cleaning up..."
        if cleanup:
            self.cleanup()
        if vb:
            print "Finished."