コード例 #1
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_missing_all_physics(self):
        net = op.network.Cubic(shape=[4, 4, 4])
        op.geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts)
        phase = op.phases.GenericPhase(network=net)

        Dict.to_dict(network=net, phases=[phase], flatten=True,
                     interleave=True, categorize_by=[])
コード例 #2
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
 def test_save_and_load(self, tmpdir):
     D = Dict.to_dict(network=self.net, phases=[self.phase_1],
                      flatten=False, interleave=False,
                      categorize_by=[])
     fname = tmpdir.join('test.dct')
     Dict.save(dct=D, filename=fname)
     dct = Dict.load(filename=fname)
     assert len(dct.keys()) == 2
     os.remove(fname)
コード例 #3
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
 def test_from_dict_not_interleaved_not_flatted_not_categorized(self):
     D = Dict.to_dict(network=self.net, phases=[self.phase_1],
                      flatten=False, interleave=False,
                      categorize_by=[])
     proj = Dict.from_dict(D)
     assert len(proj) == 6
     assert len(proj.geometries().values()) == 0
     assert len(proj.phases().values()) == 0
     assert len(proj.physics().values()) == 0
コード例 #4
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
 def test_from_dict_interleaved_categorized_by_object(self):
     D = Dict.to_dict(network=self.net, phases=[self.phase_1],
                      flatten=False, interleave=True,
                      categorize_by=['object'])
     proj = Dict.from_dict(D)
     assert len(proj) == 2
     assert len(proj.geometries().values()) == 0
     assert len(proj.phases().values()) == 1
     assert len(proj.physics().values()) == 0
コード例 #5
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
 def test_from_dict_not_interleaved_not_flatted_cat_by_obj_data_elem(self):
     D = Dict.to_dict(network=self.net, phases=[self.phase_1],
                      flatten=False, interleave=False,
                      categorize_by=['object', 'element', 'data'])
     # Ensure that data and element categorizations are ripped out
     proj = Dict.from_dict(D)
     assert len(proj) == 6
     assert len(proj.geometries().values()) == 2
     assert len(proj.phases().values()) == 1
     assert len(proj.physics().values()) == 2
コード例 #6
0
    def test_to_dict_missing_all_physics(self):
        net = op.network.Cubic(shape=[4, 4, 4])
        op.geometry.GenericGeometry(network=net, pores=net.Ps, throats=net.Ts)
        phase = op.phases.GenericPhase(network=net)

        Dict.to_dict(network=net,
                     phases=[phase],
                     flatten=True,
                     interleave=True,
                     categorize_by=[])
コード例 #7
0
 def test_save_and_load(self, tmpdir):
     D = Dict.to_dict(network=self.net,
                      phases=[self.phase_1],
                      flatten=False,
                      interleave=False,
                      categorize_by=[])
     fname = tmpdir.join('test.dct')
     Dict.save(dct=D, filename=fname)
     dct = Dict.load(filename=fname)
     assert len(dct.keys()) == 2
     os.remove(fname)
コード例 #8
0
 def test_from_dict_not_interleaved_not_flatted_not_categorized(self):
     D = Dict.to_dict(network=self.net,
                      phases=[self.phase_1],
                      flatten=False,
                      interleave=False,
                      categorize_by=[])
     proj = Dict.from_dict(D)
     assert len(proj) == 6
     assert len(proj.geometries().values()) == 0
     assert len(proj.phases().values()) == 0
     assert len(proj.physics().values()) == 0
コード例 #9
0
 def test_from_dict_interleaved_categorized_by_object(self):
     D = Dict.to_dict(network=self.net,
                      phases=[self.phase_1],
                      flatten=False,
                      interleave=True,
                      categorize_by=['object'])
     proj = Dict.from_dict(D)
     assert len(proj) == 2
     assert len(proj.geometries().values()) == 0
     assert len(proj.phases().values()) == 1
     assert len(proj.physics().values()) == 0
コード例 #10
0
 def test_from_dict_not_interleaved_not_flatted_cat_by_obj_data_elem(self):
     D = Dict.to_dict(network=self.net,
                      phases=[self.phase_1],
                      flatten=False,
                      interleave=False,
                      categorize_by=['object', 'element', 'data'])
     # Ensure that data and element categorizations are ripped out
     proj = Dict.from_dict(D)
     assert len(proj) == 6
     assert len(proj.geometries().values()) == 2
     assert len(proj.phases().values()) == 1
     assert len(proj.physics().values()) == 2
コード例 #11
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_flat_not_interleaved_categorized_by_data_element(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=True, interleave=False,
                         categorize_by=['data', 'element'])

        assert set(D.keys()) == set([i.name for i in self.net.project])

        d = set(['pore', 'throat'])
        assert d.issubset(D['net_01']['labels'].keys())
        assert d.issubset(D['net_01']['properties'].keys())
        assert d.issubset(D['geo_01']['labels'].keys())
        assert d.issubset(D['geo_01']['properties'].keys())
        assert d.issubset(D['geo_02']['labels'].keys())
        assert d.issubset(D['geo_02']['properties'].keys())
        assert d.issubset(D['phase_01']['labels'].keys())
        assert d.issubset(D['phase_01']['properties'].keys())
        assert d.issubset(D['phase_02']['labels'].keys())
        assert d.issubset(D['phase_02']['properties'].keys())
        assert d.issubset(D['phys_01']['labels'].keys())
        assert d.issubset(D['phys_01']['properties'].keys())
        assert d.issubset(D['phys_02']['labels'].keys())
        assert d.issubset(D['phys_02']['properties'].keys())
        assert d.issubset(D['phys_03']['labels'].keys())
        assert d.issubset(D['phys_03']['properties'].keys())
        assert d.issubset(D['phys_04']['labels'].keys())
        assert d.issubset(D['phys_04']['properties'].keys())
コード例 #12
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_not_flat_not_interleaved_categorized_by_data(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=False, interleave=False,
                         categorize_by=['data'])

        a = set(['network', 'phase', 'physics', 'geometry'])
        b = set(['net_01', 'phase_01', 'phase_02'])
        c = set(['labels', 'properties'])
        d = set(['pore', 'throat'])

        # Ensure NOT categorized by object
        assert b == set(D.keys())

        # Ensure NOT flattened
        assert set(['geo_01', 'geo_02']).issubset(D['net_01'].keys())
        assert set(['phys_01', 'phys_02']).issubset(D['phase_01'].keys())
        assert set(['phys_03', 'phys_04']).issubset(D['phase_02'].keys())

        # Ensure categorized by data
        assert c.issubset(D['net_01'].keys())
        assert c.issubset(D['phase_01'].keys())
        assert c.issubset(D['phase_02'].keys())
        assert c.issubset(D['net_01']['geo_01'].keys())
        assert c.issubset(D['net_01']['geo_02'].keys())
        assert c.issubset(D['phase_01']['phys_01'].keys())
        assert c.issubset(D['phase_01']['phys_02'].keys())
        assert c.issubset(D['phase_02']['phys_03'].keys())
        assert c.issubset(D['phase_02']['phys_04'].keys())
コード例 #13
0
 def test_to_dict_categorize_by_project(self):
     D = Dict.to_dict(network=self.net,
                      phases=[self.phase_1, self.phase_2],
                      flatten=False,
                      interleave=True,
                      categorize_by=['project'])
     assert 'proj_01' in D.keys()
コード例 #14
0
    def test_to_dict_flat_not_interleaved_categorized_by_data_element(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=True,
                         interleave=False,
                         categorize_by=['data', 'element'])

        assert set(D.keys()) == set([i.name for i in self.net.project])

        d = set(['pore', 'throat'])
        assert d.issubset(D['net_01']['labels'].keys())
        assert d.issubset(D['net_01']['properties'].keys())
        assert d.issubset(D['geo_01']['labels'].keys())
        assert d.issubset(D['geo_01']['properties'].keys())
        assert d.issubset(D['geo_02']['labels'].keys())
        assert d.issubset(D['geo_02']['properties'].keys())
        assert d.issubset(D['phase_01']['labels'].keys())
        assert d.issubset(D['phase_01']['properties'].keys())
        assert d.issubset(D['phase_02']['labels'].keys())
        assert d.issubset(D['phase_02']['properties'].keys())
        assert d.issubset(D['phys_01']['labels'].keys())
        assert d.issubset(D['phys_01']['properties'].keys())
        assert d.issubset(D['phys_02']['labels'].keys())
        assert d.issubset(D['phys_02']['properties'].keys())
        assert d.issubset(D['phys_03']['labels'].keys())
        assert d.issubset(D['phys_03']['properties'].keys())
        assert d.issubset(D['phys_04']['labels'].keys())
        assert d.issubset(D['phys_04']['properties'].keys())
コード例 #15
0
ファイル: MAT.py プロジェクト: zmhhaha/OpenPNM
    def save(cls, network, phases=[], filename=''):
        r"""
        Write Network to a Mat file for exporting to Matlab.

        Parameters
        ----------
        network : OpenPNM Network Object

        filename : string
            Desired file name, defaults to network name if not given

        phases : list of phase objects ([])
            Phases that have properties we want to write to file

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Write to file
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='mat')

        d = Dict.to_dict(network=network, phases=phases, interleave=True)
        d = FlatDict(d, delimiter='|')
        d = sanitize_dict(d)
        new_d = {}
        for key in list(d.keys()):
            new_key = key.replace('|', '_').replace('.', '_')
            new_d[new_key] = d.pop(key)

        spio.savemat(file_name=filename, mdict=new_d)
コード例 #16
0
    def test_to_dict_not_flat_not_interleaved_categorized_by_data(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=False,
                         categorize_by=['data'])

        _ = set(['network', 'phase', 'physics', 'geometry'])
        b = set(['net_01', 'phase_01', 'phase_02'])
        c = set(['labels', 'properties'])
        _ = set(['pore', 'throat'])

        # Ensure NOT categorized by object
        assert b == set(D.keys())

        # Ensure NOT flattened
        assert set(['geo_01', 'geo_02']).issubset(D['net_01'].keys())
        assert set(['phys_01', 'phys_02']).issubset(D['phase_01'].keys())
        assert set(['phys_03', 'phys_04']).issubset(D['phase_02'].keys())

        # Ensure categorized by data
        assert c.issubset(D['net_01'].keys())
        assert c.issubset(D['phase_01'].keys())
        assert c.issubset(D['phase_02'].keys())
        assert c.issubset(D['net_01']['geo_01'].keys())
        assert c.issubset(D['net_01']['geo_02'].keys())
        assert c.issubset(D['phase_01']['phys_01'].keys())
        assert c.issubset(D['phase_01']['phys_02'].keys())
        assert c.issubset(D['phase_02']['phys_03'].keys())
        assert c.issubset(D['phase_02']['phys_04'].keys())
コード例 #17
0
ファイル: MAT.py プロジェクト: PMEAL/OpenPNM
    def save(cls, network, phases=[], filename=''):
        r"""
        Write Network to a Mat file for exporting to Matlab.

        Parameters
        ----------
        network : OpenPNM Network Object

        filename : string
            Desired file name, defaults to network name if not given

        phases : list of phase objects ([])
            Phases that have properties we want to write to file

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Write to file
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='mat')

        d = Dict.to_dict(network=network, phases=phases, interleave=True)
        d = FlatDict(d, delimiter='|')
        d = sanitize_dict(d)
        new_d = {}
        for key in list(d.keys()):
            new_key = key.replace('|', '_').replace('.', '_')
            new_d[new_key] = d.pop(key)

        spio.savemat(file_name=filename, mdict=new_d)
コード例 #18
0
    def import_data(cls, filename, project=None, delim=' | '):
        r"""
        Opens a 'csv' file, reads in the data, and adds it to the **Network**

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project object is supplied then one will be created and
            returned.

        Returns
        -------
        project : list
            An OpenPNM project containing the data assigned to Generic
            versions of the objects from which it was exported.

        """
        from pandas import read_table

        if project is None:
            project = ws.new_project()

        fname = cls._parse_filename(filename, ext='csv')
        a = read_table(filepath_or_buffer=fname,
                       sep=',',
                       skipinitialspace=True,
                       index_col=False,
                       true_values=['T', 't', 'True', 'true', 'TRUE'],
                       false_values=['F', 'f', 'False', 'false', 'FALSE'])

        dct = {}
        # First parse through all the items and re-merge columns
        keys = sorted(list(a.keys()))
        for item in keys:
            m = re.search(r'\[.\]', item)  # The dot '.' is a wildcard
            if m:  # m is None if pattern not found, otherwise merge cols
                pname = re.split(r'\[.\]', item)[0]  # Get base propname
                # Find all other keys with same base propname
                merge_keys = [k for k in a.keys() if k.startswith(pname)]
                # Rerieve and remove arrays with same base propname
                merge_cols = [a.pop(k) for k in merge_keys]
                # Merge arrays into multi-column array and store in DataFrame
                dct[pname] = np.vstack(merge_cols).T
                # Remove key from list of keys
                for k in keys:
                    if k.startswith(pname):
                        keys.pop(keys.index(k))
            else:
                dct[item] = np.array(a.pop(item))

        project = Dict.from_dict(dct, project=project, delim=delim)

        return project
コード例 #19
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_flattened_not_interleaved(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=True, interleave=False, categorize_by=[])

        a = set([i.name for i in self.net.project])

        assert a == set(D.keys())
        assert set(['geo_01', 'geo_02']).isdisjoint(D['net_01'].keys())
        assert set(['phys_01', 'phys_02']).isdisjoint(D['phase_01'].keys())
        assert set(['phys_03', 'phys_04']).isdisjoint(D['phase_02'].keys())
コード例 #20
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_interleaved_categorized_by_element(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=False, interleave=True,
                         categorize_by=['element'])

        b = set(['net_01', 'phase_01', 'phase_02'])
        assert set(D.keys()) == b

        d = set(['pore', 'throat'])
        assert d.issubset(D['net_01'].keys())
        assert d.issubset(D['phase_01'].keys())
        assert d.issubset(D['phase_02'].keys())
コード例 #21
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_interleaved_categorized_by_data(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=False, interleave=True,
                         categorize_by=['data'])

        b = set(['net_01', 'phase_01', 'phase_02'])
        assert set(D.keys()) == b

        d = set(['labels', 'properties'])
        assert d.issubset(D['net_01'].keys())
        assert d.issubset(D['phase_01'].keys())
        assert d.issubset(D['phase_02'].keys())
コード例 #22
0
ファイル: VTK.py プロジェクト: wenqianzhang95/OpenPNM
    def load(cls, filename, project=None, delim=' | '):
        r"""
        Read in pore and throat data from a saved VTK file.

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project is supplied then one will be created and returned.

        """
        net = {}

        filename = cls._parse_filename(filename, ext='vtp')
        tree = ET.parse(filename)
        piece_node = tree.find('PolyData').find('Piece')

        # Extract connectivity
        conn_element = piece_node.find('Lines').find('DataArray')
        conns = VTK._element_to_array(conn_element, 2)
        # Extract coordinates
        coord_element = piece_node.find('Points').find('DataArray')
        coords = VTK._element_to_array(coord_element, 3)

        # Extract pore data
        for item in piece_node.find('PointData').iter('DataArray'):
            key = item.get('Name')
            array = VTK._element_to_array(item)
            net[key] = array
        # Extract throat data
        for item in piece_node.find('CellData').iter('DataArray'):
            key = item.get('Name')
            array = VTK._element_to_array(item)
            net[key] = array

        if project is None:
            project = ws.new_project()
        project = Dict.from_dict(dct=net, project=project, delim=delim)

        # Clean up data values, if necessary, like convert array's of
        # 1's and 0's into boolean.
        project = cls._convert_data(project)

        # Add coords and conns to network
        network = project.network
        network.update({'throat.conns': conns})
        network.update({'pore.coords': coords})

        return project
コード例 #23
0
    def test_to_dict_flattened_not_interleaved(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=True,
                         interleave=False,
                         categorize_by=[])

        a = set([i.name for i in self.net.project])

        assert a == set(D.keys())
        assert set(['geo_01', 'geo_02']).isdisjoint(D['net_01'].keys())
        assert set(['phys_01', 'phys_02']).isdisjoint(D['phase_01'].keys())
        assert set(['phys_03', 'phys_04']).isdisjoint(D['phase_02'].keys())
コード例 #24
0
    def test_to_dict_interleaved_categorized_by_data(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=True,
                         categorize_by=['data'])

        b = set(['net_01', 'phase_01', 'phase_02'])
        assert set(D.keys()) == b

        d = set(['labels', 'properties'])
        assert d.issubset(D['net_01'].keys())
        assert d.issubset(D['phase_01'].keys())
        assert d.issubset(D['phase_02'].keys())
コード例 #25
0
    def test_to_dict_interleaved_categorized_by_element(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=True,
                         categorize_by=['element'])

        b = set(['net_01', 'phase_01', 'phase_02'])
        assert set(D.keys()) == b

        d = set(['pore', 'throat'])
        assert d.issubset(D['net_01'].keys())
        assert d.issubset(D['phase_01'].keys())
        assert d.issubset(D['phase_02'].keys())
コード例 #26
0
ファイル: VTK.py プロジェクト: PMEAL/OpenPNM
    def load(cls, filename, project=None, delim=' | '):
        r"""
        Read in pore and throat data from a saved VTK file.
        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.
        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project is supplied then one will be created and returned.
        """
        net = {}

        filename = cls._parse_filename(filename, ext='vtp')
        tree = ET.parse(filename)
        piece_node = tree.find('PolyData').find('Piece')

        # Extract connectivity
        conn_element = piece_node.find('Lines').find('DataArray')
        conns = VTK._element_to_array(conn_element, 2)
        # Extract coordinates
        coord_element = piece_node.find('Points').find('DataArray')
        coords = VTK._element_to_array(coord_element, 3)

        # Extract pore data
        for item in piece_node.find('PointData').iter('DataArray'):
            key = item.get('Name')
            array = VTK._element_to_array(item)
            net[key] = array
        # Extract throat data
        for item in piece_node.find('CellData').iter('DataArray'):
            key = item.get('Name')
            array = VTK._element_to_array(item)
            net[key] = array

        if project is None:
            project = ws.new_project()
        project = Dict.from_dict(dct=net, project=project, delim=delim)

        # Clean up data values, if necessary, like convert array's of
        # 1's and 0's into boolean.
        project = cls._convert_data(project)

        # Add coords and conns to network
        network = project.network
        network.update({'throat.conns': conns})
        network.update({'pore.coords': coords})

        return project
コード例 #27
0
    def test_to_dict_not_flat_not_interleaved_cat_by_element_data_object(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=False,
                         categorize_by=['element', 'data', 'object'])

        _ = set(['network', 'phase', 'physics', 'geometry'])
        _ = set(['net_01', 'phase_01', 'phase_02'])
        _ = set(['labels', 'properties'])
        d = set(['pore', 'throat'])
        e = set(['network', 'phase'])

        # Check if categorized by object, but not flattened
        assert e == set(D.keys())
        assert 'geometry' in D['network']['net_01'].keys()
        assert 'physics' in D['phase']['phase_01'].keys()
        assert 'physics' in D['phase']['phase_02'].keys()

        # Ensure it's categorized by object, data, and element
        assert d.issubset(D['network']['net_01']['labels'].keys())
        assert d.issubset(D['phase']['phase_01']['properties'].keys())
        assert d.issubset(D['phase']['phase_01']['labels'].keys())
        assert d.issubset(D['phase']['phase_02']['properties'].keys())
        assert d.issubset(D['phase']['phase_02']['labels'].keys())
        path = D['network']['net_01']['geometry']['geo_01']['properties']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_01']['labels']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_02']['properties']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_02']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_01']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_01']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_02']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_02']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_03']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_03']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_04']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_04']['labels']
        assert d.issubset(path.keys())
コード例 #28
0
ファイル: CSV.py プロジェクト: PMEAL/OpenPNM
    def load(cls, filename, project=None, delim=' | '):
        r"""
        Opens a 'csv' file, reads in the data, and adds it to the **Network**

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project object is supplied then one will be created and
            returned.

        """
        if project is None:
            project = ws.new_project()

        fname = cls._parse_filename(filename, ext='csv')
        a = pd.read_table(filepath_or_buffer=fname,
                          sep=',',
                          skipinitialspace=True,
                          index_col=False,
                          true_values=['T', 't', 'True', 'true', 'TRUE'],
                          false_values=['F', 'f', 'False', 'false', 'FALSE'])

        dct = {}
        # First parse through all the items and re-merge columns
        keys = sorted(list(a.keys()))
        for item in keys:
            m = re.search(r'\[.\]', item)  # The dot '.' is a wildcard
            if m:  # m is None if pattern not found, otherwise merge cols
                pname = re.split(r'\[.\]', item)[0]  # Get base propname
                # Find all other keys with same base propname
                merge_keys = [k for k in a.keys() if k.startswith(pname)]
                # Rerieve and remove arrays with same base propname
                merge_cols = [a.pop(k) for k in merge_keys]
                # Merge arrays into multi-column array and store in DataFrame
                dct[pname] = sp.vstack(merge_cols).T
                # Remove key from list of keys
                [keys.pop(keys.index(k)) for k in keys if k.startswith(pname)]
            else:
                dct[item] = sp.array(a.pop(item))

        project = Dict.from_dict(dct, project=project, delim=delim)

        return project
コード例 #29
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_not_flat_not_interleaved_cat_by_element_data_object(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=False, interleave=False,
                         categorize_by=['element', 'data', 'object'])

        a = set(['network', 'phase', 'physics', 'geometry'])
        b = set(['net_01', 'phase_01', 'phase_02'])
        c = set(['labels', 'properties'])
        d = set(['pore', 'throat'])
        e = set(['network', 'phase'])

        # Check if categorized by object, but not flattened
        assert e == set(D.keys())
        assert 'geometry' in D['network']['net_01'].keys()
        assert 'physics' in D['phase']['phase_01'].keys()
        assert 'physics' in D['phase']['phase_02'].keys()

        # Ensure it's categorized by object, data, and element
        assert d.issubset(D['network']['net_01']['labels'].keys())
        assert d.issubset(D['phase']['phase_01']['properties'].keys())
        assert d.issubset(D['phase']['phase_01']['labels'].keys())
        assert d.issubset(D['phase']['phase_02']['properties'].keys())
        assert d.issubset(D['phase']['phase_02']['labels'].keys())
        path = D['network']['net_01']['geometry']['geo_01']['properties']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_01']['labels']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_02']['properties']
        assert d.issubset(path.keys())
        path = D['network']['net_01']['geometry']['geo_02']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_01']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_01']['labels']
        assert d.issubset(path.keys())
        D['phase']['phase_01']['physics']['phys_02']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_01']['physics']['phys_02']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_03']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_03']['labels']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_04']['properties']
        assert d.issubset(path.keys())
        path = D['phase']['phase_02']['physics']['phys_04']['labels']
        assert d.issubset(path.keys())
コード例 #30
0
ファイル: MAT.py プロジェクト: PMEAL/OpenPNM
    def load(cls, filename, project=None):
        r"""
        Loads data onto the given network from an appropriately formatted
        'mat' file (i.e. MatLAB output).

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project object is supplied then one will be created and
            returned.

        Returns
        -------
        If no project object is supplied then one will be created and returned.

        """
        filename = cls._parse_filename(filename=filename, ext='mat')
        data = spio.loadmat(filename)
        # Reinsert the '.' separator into the array names
        for item in list(data.keys()):
            if item in ['__header__', '__version__', '__globals__']:
                data.pop(item)
                continue
            elif '_pore_' in item:
                path, prop = item.split('_pore_')
                new_key = path + '|pore.' + prop
            elif '_throat_' in item:
                path, prop = item.split('_throat_')
                new_key = path + '|throat.' + prop
            data[new_key] = data.pop(item)

        if project is None:
            project = ws.new_project()
        project = Dict.from_dict(data, project=project, delim='|')

        project = cls._convert_data(project)

        return project
コード例 #31
0
ファイル: MAT.py プロジェクト: zmhhaha/OpenPNM
    def load(cls, filename, project=None):
        r"""
        Loads data onto the given network from an appropriately formatted
        'mat' file (i.e. MatLAB output).

        Parameters
        ----------
        filename : string (optional)
            The name of the file containing the data to import.  The formatting
            of this file is outlined below.

        project : OpenPNM Project object
            A GenericNetwork is created and added to the specified Project.
            If no Project object is supplied then one will be created and
            returned.

        Returns
        -------
        If no project object is supplied then one will be created and returned.

        """
        filename = cls._parse_filename(filename=filename, ext='mat')
        data = spio.loadmat(filename)
        # Reinsert the '.' separator into the array names
        for item in list(data.keys()):
            if item in ['__header__', '__version__', '__globals__']:
                data.pop(item)
                continue
            elif '_pore_' in item:
                path, prop = item.split('_pore_')
                new_key = path + '|pore.' + prop
            elif '_throat_' in item:
                path, prop = item.split('_throat_')
                new_key = path + '|throat.' + prop
            data[new_key] = data.pop(item)

        if project is None:
            project = ws.new_project()
        project = Dict.from_dict(data, project=project, delim='|')

        project = cls._convert_data(project)

        return project
コード例 #32
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
    def test_to_dict_not_flat_not_interleaved_categorized_by_object(self):
        D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                         flatten=False, interleave=False,
                         categorize_by=['object'])

        a = set(['network', 'phase', 'physics', 'geometry'])
        b = set(['net_01', 'phase_01', 'phase_02'])
        c = set(['labels', 'properties'])
        d = set(['pore', 'throat'])
        e = set(['network', 'phase'])

        # Ensure categorized by object
        assert e == set(D.keys())

        # Ensure flatten, which occurs when categorized by object
        keys = D['network']['net_01']['geometry'].keys()
        assert set(['geo_01', 'geo_02']).issubset(keys)
        keys = D['phase'].keys()
        assert set(['phase_01', 'phase_02']).issubset(keys)
        keys = D['phase']['phase_01']['physics'].keys()
        assert set(['phys_01', 'phys_02']).issubset(keys)
コード例 #33
0
ファイル: DictTest.py プロジェクト: zmhhaha/OpenPNM
    def test_to_dict_not_flattened_not_interleaved(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=False,
                         categorize_by=[])

        a = set(['network', 'phase', 'physics', 'geometry'])
        b = set(['net_01', 'phase_01', 'phase_02'])
        c = set(['labels', 'properties'])
        d = set(['pore', 'throat'])

        # Ensure NOT categorized by object
        assert b == set(D.keys())

        # Ensure NOT flattened
        assert set(['geo_01', 'geo_02']).issubset(D['net_01'].keys())
        assert set(['phys_01', 'phys_02']).issubset(D['phase_01'].keys())
        assert set(['phys_03', 'phys_04']).issubset(D['phase_02'].keys())
        # Ensure no cross talk between phases
        assert set(['phys_01', 'phys_02']).isdisjoint(D['phase_02'].keys())
        assert set(['phys_03', 'phys_04']).isdisjoint(D['phase_01'].keys())
コード例 #34
0
    def test_to_dict_not_flat_not_interleaved_categorized_by_object(self):
        D = Dict.to_dict(network=self.net,
                         phases=[self.phase_1, self.phase_2],
                         flatten=False,
                         interleave=False,
                         categorize_by=['object'])

        _ = set(['network', 'phase', 'physics', 'geometry'])
        _ = set(['net_01', 'phase_01', 'phase_02'])
        _ = set(['labels', 'properties'])
        _ = set(['pore', 'throat'])
        e = set(['network', 'phase'])

        # Ensure categorized by object
        assert e == set(D.keys())

        # Ensure flatten, which occurs when categorized by object
        keys = D['network']['net_01']['geometry'].keys()
        assert set(['geo_01', 'geo_02']).issubset(keys)
        keys = D['phase'].keys()
        assert set(['phase_01', 'phase_02']).issubset(keys)
        keys = D['phase']['phase_01']['physics'].keys()
        assert set(['phys_01', 'phys_02']).issubset(keys)
コード例 #35
0
ファイル: HDF5.py プロジェクト: PMEAL/OpenPNM
    def to_hdf5(cls, network=None, phases=[], element=['pore', 'throat'],
                filename='', interleave=True, flatten=False, categorize_by=[]):
        r"""
        Creates an HDF5 file containing data from the specified objects,
        and categorized according to the given arguments.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        element : string or list of strings
            An indication of whether 'pore' and/or 'throat' data are desired.
            The default is both.

        interleave : boolean (default is ``True``)
            When ``True`` (default) the data from all Geometry objects (and
            Physics objects if ``phases`` are given) is interleaved into
            a single array and stored as a network property (or Phase
            property for Physics data). When ``False``, the data for each
            object are stored under their own dictionary key, the structuring
            of which depends on the value of the ``flatten`` argument.

        flatten : boolean (default is ``True``)
            When ``True``, all objects are accessible from the top level
            of the dictionary.  When ``False`` objects are nested under their
            parent object.  If ``interleave`` is ``True`` this argument is
            ignored.

        categorize_by : string or list of strings
            Indicates how the dictionaries should be organized.  The list can
            contain any, all or none of the following strings:

            **'objects'** : If specified the dictionary keys will be stored
            under a general level corresponding to their type (e.g.
            'network/net_01/pore.all'). If  ``interleave`` is ``True`` then
            only the only categories are *network* and *phase*, since
            *geometry* and *physics* data get stored under their respective
            *network* and *phase*.

            **'data'** : If specified the data arrays are additionally
            categorized by ``label`` and ``property`` to separate *boolean*
            from *numeric* data.

            **'elements'** : If specified the data arrays are additionally
            categorized by ``pore`` and ``throat``, meaning that the propnames
            are no longer prepended by a 'pore.' or 'throat.'

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename, ext='hdf')

        dct = Dict.to_dict(network=network, phases=phases, element=element,
                           interleave=interleave, flatten=flatten,
                           categorize_by=categorize_by)
        d = FlatDict(dct, delimiter='/')

        f = h5py.File(filename, "w")
        for item in d.keys():
            tempname = '_'.join(item.split('.'))
            arr = d[item]
            if d[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del d[item]
            elif 'U' in str(arr[0].dtype):
                pass
            else:
                f.create_dataset(name='/'+tempname, shape=arr.shape,
                                 dtype=arr.dtype, data=arr)
        return f
コード例 #36
0
ファイル: VTK.py プロジェクト: wenqianzhang95/OpenPNM
    def save(cls, network, phases=[], filename='', delim=' | ', fill_nans=None):
        r"""
        Save network and phase data to a single vtp file for visualizing in
        Paraview

        Parameters
        ----------
        network : OpenPNM Network Object
            The Network containing the data to be written

        phases : list, optional
            A list containing OpenPNM Phase object(s) containing data to be
            written

        filename : string, optional
            Filename to write data.  If no name is given the file is named
            after the network

        delim : string
            Specify which character is used to delimit the data names.  The
            default is ' | ' which creates a nice clean output in the Paraview
            pipeline viewer (e.g. net | property | pore | diameter)

        fill_nans : scalar
            The value to use to replace NaNs with.  The VTK file format does
            not work with NaNs, so they must be dealt with.  The default is
            `None` which means property arrays with NaNs are not written to the
            file.  Other useful options might be 0 or -1, but the user must
            be aware that these are not real values, only place holders.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)

        am = Dict.to_dict(network=network, phases=phases, interleave=True,
                          categorize_by=['object', 'data'])
        am = FlatDict(am, delimiter=delim)
        key_list = list(sorted(am.keys()))

        network = network[0]
        points = network['pore.coords']
        pairs = network['throat.conns']
        num_points = np.shape(points)[0]
        num_throats = np.shape(pairs)[0]

        root = ET.fromstring(VTK._TEMPLATE)
        piece_node = root.find('PolyData').find('Piece')
        piece_node.set("NumberOfPoints", str(num_points))
        piece_node.set("NumberOfLines", str(num_throats))
        points_node = piece_node.find('Points')
        coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3)
        points_node.append(coords)
        lines_node = piece_node.find('Lines')
        connectivity = VTK._array_to_element("connectivity", pairs)
        lines_node.append(connectivity)
        offsets = VTK._array_to_element("offsets", 2*np.arange(len(pairs))+2)
        lines_node.append(offsets)

        point_data_node = piece_node.find('PointData')
        cell_data_node = piece_node.find('CellData')
        for key in key_list:
            array = am[key]
            if array.dtype == 'O':
                logger.warning(key + ' has dtype object,' +
                               ' will not write to file')
            else:
                if array.dtype == np.bool:
                    array = array.astype(int)
                if np.any(np.isnan(array)):
                    if fill_nans is None:
                        logger.warning(key + ' has nans,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isnan(array)] = fill_nans
                element = VTK._array_to_element(key, array)
                if (array.size == num_points):
                    point_data_node.append(element)
                elif (array.size == num_throats):
                    cell_data_node.append(element)

        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='vtp')

        tree = ET.ElementTree(root)
        tree.write(filename)

        with open(filename, 'r+') as f:
            string = f.read()
            string = string.replace('</DataArray>', '</DataArray>\n\t\t\t')
            f.seek(0)
            # consider adding header: '<?xml version="1.0"?>\n'+
            f.write(string)
コード例 #37
0
ファイル: DictTest.py プロジェクト: PMEAL/OpenPNM
 def test_to_dict_categorize_by_project(self):
     D = Dict.to_dict(network=self.net, phases=[self.phase_1, self.phase_2],
                      flatten=False, interleave=True,
                      categorize_by=['project'])
     assert 'sim_01' in D.keys()
コード例 #38
0
ファイル: HDF5.py プロジェクト: wlgjs622/OpenPNM
    def to_hdf5(cls,
                network=None,
                phases=[],
                element=['pore', 'throat'],
                filename='',
                interleave=True,
                flatten=False,
                categorize_by=[]):
        r"""
        Creates an HDF5 file containing data from the specified objects,
        and categorized according to the given arguments.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        element : string or list of strings
            An indication of whether 'pore' and/or 'throat' data are desired.
            The default is both.

        interleave : boolean (default is ``True``)
            When ``True`` (default) the data from all Geometry objects (and
            Physics objects if ``phases`` are given) is interleaved into
            a single array and stored as a network property (or Phase
            property for Physics data). When ``False``, the data for each
            object are stored under their own dictionary key, the structuring
            of which depends on the value of the ``flatten`` argument.

        flatten : boolean (default is ``True``)
            When ``True``, all objects are accessible from the top level
            of the dictionary.  When ``False`` objects are nested under their
            parent object.  If ``interleave`` is ``True`` this argument is
            ignored.

        categorize_by : string or list of strings
            Indicates how the dictionaries should be organized.  The list can
            contain any, all or none of the following strings:

            **'objects'** : If specified the dictionary keys will be stored
            under a general level corresponding to their type (e.g.
            'network/net_01/pore.all'). If  ``interleave`` is ``True`` then
            only the only categories are *network* and *phase*, since
            *geometry* and *physics* data get stored under their respective
            *network* and *phase*.

            **'data'** : If specified the data arrays are additionally
            categorized by ``label`` and ``property`` to separate *boolean*
            from *numeric* data.

            **'elements'** : If specified the data arrays are additionally
            categorized by ``pore`` and ``throat``, meaning that the propnames
            are no longer prepended by a 'pore.' or 'throat.'

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename, ext='hdf')

        dct = Dict.to_dict(network=network,
                           phases=phases,
                           element=element,
                           interleave=interleave,
                           flatten=flatten,
                           categorize_by=categorize_by)
        d = FlatDict(dct, delimiter='/')

        f = hdfFile(filename, "w")
        for item in d.keys():
            tempname = '_'.join(item.split('.'))
            arr = d[item]
            if d[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del d[item]
            elif 'U' in str(arr[0].dtype):
                pass
            else:
                f.create_dataset(name='/' + tempname,
                                 shape=arr.shape,
                                 dtype=arr.dtype,
                                 data=arr)
        return f
コード例 #39
0
ファイル: XDMF.py プロジェクト: zmhhaha/OpenPNM
    def save(cls, network, phases=[], filename=''):
        r"""
        Saves (transient/steady-state) data from the given objects into the
        specified file.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        Notes
        -----
        This method only saves the data, not any of the pore-scale models or
        other attributes.  To save an actual OpenPNM Project use the
        ``Workspace`` object.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Check if any of the phases has time series
        transient = GenericIO.is_transient(phases=phases)

        if filename == '':
            filename = project.name
        path = cls._parse_filename(filename=filename, ext='xmf')
        # Path is a pathlib object, so slice it up as needed
        fname_xdf = path.name
        d = Dict.to_dict(network, phases=phases, interleave=True,
                         flatten=False, categorize_by=['element', 'data'])
        D = FlatDict(d, delimiter='/')
        # Identify time steps
        t_steps = []
        if transient:
            for key in D.keys():
                if '@' in key:
                    t_steps.append(key.split('@')[1])
        t_grid = create_grid(Name="TimeSeries", GridType="Collection",
                             CollectionType="Temporal")
        # If steady-state, define '0' time step
        if not transient:
            t_steps.append('0')
        # Setup xdmf file
        root = create_root('Xdmf')
        domain = create_domain()
        # Iterate over time steps present
        for t in range(len(t_steps)):
            # Define the hdf file
            if not transient:
                fname_hdf = path.stem+".hdf"
            else:
                fname_hdf = path.stem+'@'+t_steps[t]+".hdf"
            path_p = path.parent
            f = h5py.File(path_p.joinpath(fname_hdf), "w")
            # Add coordinate and connection information to top of HDF5 file
            f["coordinates"] = network["pore.coords"]
            f["connections"] = network["throat.conns"]
            # geometry coordinates
            row, col = f["coordinates"].shape
            dims = ' '.join((str(row), str(col)))
            hdf_loc = fname_hdf + ":coordinates"
            geo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                        Format='HDF', DataType="Float")
            geo = create_geometry(GeometryType="XYZ")
            geo.append(geo_data)
            # topolgy connections
            row, col = f["connections"].shape  # col first then row
            dims = ' '.join((str(row), str(col)))
            hdf_loc = fname_hdf + ":connections"
            topo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                         Format="HDF", NumberType="Int")
            topo = create_topology(TopologyType="Polyline",
                                   NodesPerElement=str(2),
                                   NumberOfElements=str(row))
            topo.append(topo_data)
            # Make HDF5 file with all datasets, and no groups
            for item in D.keys():
                if D[item].dtype == 'O':
                    logger.warning(item + ' has dtype object,' +
                                   ' will not write to file')
                    del D[item]
                elif 'U' in str(D[item][0].dtype):
                    pass
                elif ('@' in item and t_steps[t] == item.split('@')[1]):
                    f.create_dataset(name='/'+item.split('@')[0]+'@t',
                                     shape=D[item].shape,
                                     dtype=D[item].dtype,
                                     data=D[item])
                elif ('@' not in item and t == 0):
                    f.create_dataset(name='/'+item, shape=D[item].shape,
                                     dtype=D[item].dtype, data=D[item])
            # Create a grid
            grid = create_grid(Name=t_steps[t], GridType="Uniform")
            time = create_time(type='Single', Value=t_steps[t])
            grid.append(time)
            # Add pore and throat properties
            for item in D.keys():
                if item not in ['coordinates', 'connections']:
                    if (('@' in item and t_steps[t] == item.split('@')[1]) or
                            ('@' not in item)):
                        attr_type = 'Scalar'
                        shape = D[item].shape
                        dims = (''.join([str(i) +
                                         ' ' for i in list(shape)[::-1]]))
                        if '@' in item:
                            item = item.split('@')[0]+'@t'
                            hdf_loc = fname_hdf + ":" + item
                        elif ('@' not in item and t == 0):
                            hdf_loc = fname_hdf + ":" + item
                        elif ('@' not in item and t > 0):
                            hdf_loc = (path.stem+'@'+t_steps[0]+".hdf" +
                                       ":" + item)
                        attr = create_data_item(value=hdf_loc,
                                                Dimensions=dims,
                                                Format='HDF',
                                                Precision='8',
                                                DataType='Float')
                        name = item.replace('/', ' | ')
                        if 'throat' in item:
                            Center = "Cell"
                        else:
                            Center = "Node"
                        el_attr = create_attribute(Name=name, Center=Center,
                                                   AttributeType=attr_type)
                        el_attr.append(attr)
                        grid.append(el_attr)
                    else:
                        pass
            grid.append(topo)
            grid.append(geo)
            t_grid.append(grid)
            # CLose the HDF5 file
            f.close()
        domain.append(t_grid)
        root.append(domain)
        with open(path_p.joinpath(fname_xdf), 'w') as file:
            file.write(cls._header)
            file.write(ET.tostring(root).decode("utf-8"))
コード例 #40
0
ファイル: XDMF.py プロジェクト: wenqianzhang95/OpenPNM
    def save(cls, network, phases=[], filename=''):
        r"""
        Saves data from the given objects into the specified file.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        Notes
        -----
        This method only saves the data, not any of the pore-scale models or
        other attributes.  To save an actual OpenPNM Project use the
        ``Workspace`` object.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]

        if filename == '':
            filename = project.name
        path = cls._parse_filename(filename=filename, ext='xmf')
        # Path is a pathlib object, so slice it up as needed
        fname_xdf = path.name
        fname_hdf = path.stem+".hdf"
        path = path.parent
        f = h5py.File(path.joinpath(fname_hdf), "w")

        d = Dict.to_dict(network, phases=phases, interleave=True,
                         flatten=False, categorize_by=['element', 'data'])

        # Make HDF5 file with all datasets, and no groups
        D = FlatDict(d, delimiter='/')
        for item in D.keys():
            if D[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del D[item]
            elif 'U' in str(D[item][0].dtype):
                pass
            else:
                f.create_dataset(name='/'+item, shape=D[item].shape,
                                 dtype=D[item].dtype, data=D[item])
        # Add coordinate and connection information to top of HDF5 file
        f["coordinates"] = network["pore.coords"]
        f["connections"] = network["throat.conns"]

        # setup xdmf file
        root = create_root('Xdmf')
        domain = create_domain()
        grid = create_grid(Name="Structure", GridType="Uniform")

        # geometry coordinates
        row, col = f["coordinates"].shape
        dims = ' '.join((str(row), str(col)))
        hdf_loc = fname_hdf + ":coordinates"
        geo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                    Format='HDF', DataType="Float")
        geo = create_geometry(GeometryType="XYZ")
        geo.append(geo_data)

        # topolgy connections
        row, col = f["connections"].shape  # col first then row
        dims = ' '.join((str(row), str(col)))
        hdf_loc = fname_hdf + ":connections"
        topo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                     Format="HDF", NumberType="Int")
        topo = create_topology(TopologyType="Polyline",
                               NodesPerElement=str(2),
                               NumberOfElements=str(row))
        topo.append(topo_data)

        # Add pore and throat properties
        for item in D.keys():
            if item not in ['coordinates', 'connections']:
                attr_type = 'Scalar'
                shape = f[item].shape
                dims = ''.join([str(i) + ' ' for i in list(shape)[::-1]])
                hdf_loc = fname_hdf + ":" + item
                attr = create_data_item(value=hdf_loc,
                                        Dimensions=dims,
                                        Format='HDF',
                                        Precision='8',
                                        DataType='Float')
                name = item.replace('/', ' | ')
                if 'throat' in item:
                    Center = "Cell"
                else:
                    Center = "Node"
                el_attr = create_attribute(Name=name, Center=Center,
                                           AttributeType=attr_type)
                el_attr.append(attr)
                grid.append(el_attr)

        grid.append(topo)
        grid.append(geo)
        domain.append(grid)
        root.append(domain)

        with open(path.joinpath(fname_xdf), 'w') as file:
            file.write(cls._header)
            file.write(ET.tostring(root).decode("utf-8"))

        # CLose the HDF5 file
        f.close()
コード例 #41
0
ファイル: VTK.py プロジェクト: lpz95/OpenPNM
    def export_data(cls,
                    network,
                    phases=[],
                    filename="",
                    delim=" | ",
                    fill_nans=None,
                    fill_infs=None):
        r"""
        Save network and phase data to a single vtp file for visualizing in
        Paraview.

        Parameters
        ----------
        network : OpenPNM Network Object
            The Network containing the data to be written
        phases : list, optional
            A list containing OpenPNM Phase object(s) containing data to be
            written
        filename : string, optional
            Filename to write data.  If no name is given the file is named
            after the network
        delim : string
            Specify which character is used to delimit the data names.  The
            default is ' | ' which creates a nice clean output in the Paraview
            pipeline viewer (e.g. net | property | pore | diameter)
        fill_nans : scalar
            The value to use to replace NaNs with.  The VTK file format does
            not work with NaNs, so they must be dealt with.  The default is
            `None` which means property arrays with NaNs are not written to the
            file.  Other useful options might be 0 or -1, but the user must
            be aware that these are not real values, only place holders.
        fill_infs : scalar
            The value to use to replace infs with.  The default is ``None``
            which means that property arrays containing ``None`` will *not*
            be written to the file, and a warning will be issued.  A useful
            value is

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        # Check if any of the phases has time series
        transient = GenericIO._is_transient(phases=phases)
        if transient:
            logger.warning("vtp format does not support transient data, " +
                           "use xdmf instead")
        if filename == "":
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext="vtp")

        am = Dict.to_dict(
            network=network,
            phases=phases,
            interleave=True,
            categorize_by=["object", "data"],
        )
        am = FlatDict(am, delimiter=delim)
        key_list = list(sorted(am.keys()))

        network = network[0]
        points = network["pore.coords"]
        pairs = network["throat.conns"]
        num_points = np.shape(points)[0]
        num_throats = np.shape(pairs)[0]

        root = ET.fromstring(VTK._TEMPLATE)
        piece_node = root.find("PolyData").find("Piece")
        piece_node.set("NumberOfPoints", str(num_points))
        piece_node.set("NumberOfLines", str(num_throats))
        points_node = piece_node.find("Points")
        coords = VTK._array_to_element("coords", points.T.ravel("F"), n=3)
        points_node.append(coords)
        lines_node = piece_node.find("Lines")
        connectivity = VTK._array_to_element("connectivity", pairs)
        lines_node.append(connectivity)
        offsets = VTK._array_to_element("offsets",
                                        2 * np.arange(len(pairs)) + 2)
        lines_node.append(offsets)

        point_data_node = piece_node.find("PointData")
        cell_data_node = piece_node.find("CellData")
        for key in key_list:
            array = am[key]
            if array.dtype == "O":
                logger.warning(key + " has dtype object," +
                               " will not write to file")
            else:
                if array.dtype == np.bool:
                    array = array.astype(int)
                if np.any(np.isnan(array)):
                    if fill_nans is None:
                        logger.warning(key + " has nans," +
                                       " will not write to file")
                        continue
                    else:
                        array[np.isnan(array)] = fill_nans
                if np.any(np.isinf(array)):
                    if fill_infs is None:
                        logger.warning(key + " has infs," +
                                       " will not write to file")
                        continue
                    else:
                        array[np.isinf(array)] = fill_infs
                element = VTK._array_to_element(key, array)
                if array.size == num_points:
                    point_data_node.append(element)
                elif array.size == num_throats:
                    cell_data_node.append(element)

        tree = ET.ElementTree(root)
        tree.write(filename)

        with open(filename, "r+") as f:
            string = f.read()
            string = string.replace("</DataArray>", "</DataArray>\n\t\t\t")
            f.seek(0)
            # consider adding header: '<?xml version="1.0"?>\n'+
            f.write(string)
コード例 #42
0
ファイル: VTK.py プロジェクト: PMEAL/OpenPNM
    def save(cls, network, phases=[], filename='', delim=' | ',
             fill_nans=None, fill_infs=None):
        r"""
        Save network and phase data to a single vtp file for visualizing in
        Paraview
        Parameters
        ----------
        network : OpenPNM Network Object
            The Network containing the data to be written
        phases : list, optional
            A list containing OpenPNM Phase object(s) containing data to be
            written
        filename : string, optional
            Filename to write data.  If no name is given the file is named
            after the network
        delim : string
            Specify which character is used to delimit the data names.  The
            default is ' | ' which creates a nice clean output in the Paraview
            pipeline viewer (e.g. net | property | pore | diameter)
        fill_nans : scalar
            The value to use to replace NaNs with.  The VTK file format does
            not work with NaNs, so they must be dealt with.  The default is
            `None` which means property arrays with NaNs are not written to the
            file.  Other useful options might be 0 or -1, but the user must
            be aware that these are not real values, only place holders.
        fill_infs : scalar
            The value to use to replace infs with.  The default is ``None``
            which means that property arrays containing ``None`` will *not*
            be written to the file, and a warning will be issued.  A useful
            value is
        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        # Check if any of the phases has time series
        transient = GenericIO.is_transient(phases=phases)
        if transient:
            logger.warning('vtp format does not support transient data, ' +
                           'use xdmf instead')
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='vtp')

        am = Dict.to_dict(network=network, phases=phases, interleave=True,
                          categorize_by=['object', 'data'])
        am = FlatDict(am, delimiter=delim)
        key_list = list(sorted(am.keys()))

        network = network[0]
        points = network['pore.coords']
        pairs = network['throat.conns']
        num_points = np.shape(points)[0]
        num_throats = np.shape(pairs)[0]

        root = ET.fromstring(VTK._TEMPLATE)
        piece_node = root.find('PolyData').find('Piece')
        piece_node.set("NumberOfPoints", str(num_points))
        piece_node.set("NumberOfLines", str(num_throats))
        points_node = piece_node.find('Points')
        coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3)
        points_node.append(coords)
        lines_node = piece_node.find('Lines')
        connectivity = VTK._array_to_element("connectivity", pairs)
        lines_node.append(connectivity)
        offsets = VTK._array_to_element("offsets", 2*np.arange(len(pairs))+2)
        lines_node.append(offsets)

        point_data_node = piece_node.find('PointData')
        cell_data_node = piece_node.find('CellData')
        for key in key_list:
            array = am[key]
            if array.dtype == 'O':
                logger.warning(key + ' has dtype object,' +
                               ' will not write to file')
            else:
                if array.dtype == np.bool:
                    array = array.astype(int)
                if np.any(np.isnan(array)):
                    if fill_nans is None:
                        logger.warning(key + ' has nans,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isnan(array)] = fill_nans
                if np.any(np.isinf(array)):
                    if fill_infs is None:
                        logger.warning(key + ' has infs,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isinf(array)] = fill_infs
                element = VTK._array_to_element(key, array)
                if (array.size == num_points):
                    point_data_node.append(element)
                elif (array.size == num_throats):
                    cell_data_node.append(element)

        tree = ET.ElementTree(root)
        tree.write(filename)

        with open(filename, 'r+') as f:
            string = f.read()
            string = string.replace('</DataArray>', '</DataArray>\n\t\t\t')
            f.seek(0)
            # consider adding header: '<?xml version="1.0"?>\n'+
            f.write(string)
コード例 #43
0
ファイル: Pandas.py プロジェクト: zmhhaha/OpenPNM
    def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):
        r"""
        Convert the Network (and optionally Phase) data to Pandas DataFrames.

        Parameters
        ----------
        network: OpenPNM Network Object
            The network containing the data to be stored

        phases : list of OpenPNM Phase Objects
            The data on each supplied phase will be added to DataFrame

        join : boolean
            If ``False`` (default), two DataFrames are returned with *pore*
            data in one, and *throat* data in the other.  If ``True`` the pore
            and throat data are combined into a single DataFrame.  This can be
            problematic as it will put NaNs into all the *pore* columns which
            are shorter than the *throat* columns.

        Returns
        -------
        Pandas ``DataFrame`` object containing property and label data in each
        column.  If ``join`` was False (default) the two DataFrames are
        returned i a named tuple, or else a single DataFrame with pore and
        throat data in the same file, despite the column length being
        different.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)

        # Initialize pore and throat data dictionary using Dict class
        pdata = Dict.to_dict(network=network,
                             phases=phases,
                             element='pore',
                             interleave=True,
                             flatten=True,
                             categorize_by=['object'])
        tdata = Dict.to_dict(network=network,
                             phases=phases,
                             element='throat',
                             interleave=True,
                             flatten=True,
                             categorize_by=['object'])
        pdata = FlatDict(pdata, delimiter=delim)
        tdata = FlatDict(tdata, delimiter=delim)

        # Scan data and convert non-1d arrays to multiple columns
        for key in list(pdata.keys()):
            if sp.shape(pdata[key]) != (network[0].Np, ):
                arr = pdata.pop(key)
                tmp = sp.split(arr, arr.shape[1], axis=1)
                cols = range(len(tmp))
                pdata.update(
                    {key + '[' + str(i) + ']': tmp[i].squeeze()
                     for i in cols})
        for key in list(tdata.keys()):
            if sp.shape(tdata[key]) != (network[0].Nt, ):
                arr = tdata.pop(key)
                tmp = sp.split(arr, arr.shape[1], axis=1)
                cols = range(len(tmp))
                tdata.update(
                    {key + '[' + str(i) + ']': tmp[i].squeeze()
                     for i in cols})

        # Convert sanitized dictionaries to DataFrames
        pdata = pd.DataFrame(sanitize_dict(pdata))
        tdata = pd.DataFrame(sanitize_dict(tdata))

        # Prepare DataFrames to be returned
        if join:
            data = tdata.join(other=pdata, how='left')
        else:
            nt = namedtuple('dataframes', ('pore', 'throat'))
            data = nt(pore=pdata, throat=tdata)

        return data