Example #1
0
File: MAT.py Project: PMEAL/OpenPNM
    def save(cls, network, phases=[], filename=''):
        r"""
        Write Network to a Mat file for exporting to Matlab.

        Parameters
        ----------
        network : OpenPNM Network Object

        filename : string
            Desired file name, defaults to network name if not given

        phases : list of phase objects ([])
            Phases that have properties we want to write to file

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Write to file
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='mat')

        d = Dict.to_dict(network=network, phases=phases, interleave=True)
        d = FlatDict(d, delimiter='|')
        d = sanitize_dict(d)
        new_d = {}
        for key in list(d.keys()):
            new_key = key.replace('|', '_').replace('.', '_')
            new_d[new_key] = d.pop(key)

        spio.savemat(file_name=filename, mdict=new_d)
def parse(config_file):
    config = {'config_modified_time': os.path.getmtime(config_file)}
    stream = open(config_file, 'r')
    config.update(yaml.load(stream))

    # Change the value of the $ref tags in the dictionary.
    config = FlatDict(config)
    ref = []

    for key in config:
        if '$ref' in key:
            config[key.replace(':$ref', '')] = config[config[key].replace(
                '#/', '').replace('/', ':')]
    config = config.as_dict()
    # print(config)
    stream.close()

    # Setup logging,
    level = logging.getLevelName(config['logging']['level'])

    logging.basicConfig(level=level)

    # Setup backoff logging for when we get URL errors.
    logging.getLogger('backoff').addHandler(logging.StreamHandler())
    logging.getLogger('backoff').setLevel(level)

    return config
Example #3
0
def _beautify(data, *, colors: bool, table: bool) -> str:
    """
    1. Returns table if `table=True`
    1. Returns colored JSON if `json=True`
    1. Returns plain JSON otherwise.
    """
    if table:
        # one dict
        if isinstance(data, dict):
            data = FlatDict(data, delimiter='.').items()
            return tabulate(data, headers=('key', 'value'), tablefmt='fancy_grid')
        # list of dicts
        if isinstance(data, list) and data and isinstance(data[0], dict):
            table = []
            for row in data:
                row = FlatDict(row, delimiter='.')
                keys = tuple(row)
                row = [v for _, v in sorted(row.items())]
                table.append(row)
            return tabulate(table, headers=keys, tablefmt='fancy_grid')

    json_params = dict(indent=2, sort_keys=True, ensure_ascii=False)
    dumped = json.dumps(data, **json_params)
    if not colors:
        return dumped
    return highlight(dumped, lexers.JsonLexer(), formatters.TerminalFormatter())
def get_ilo_provider_object(ilo_client, pType):
    providerObject = []
    uri = '/redfish/v1/providers/'
    response = ilo_client.get(uri)
    if response.status_code == 200:
        if 'Members' in response.json():
            providerListLink = response.json()['Members']
            for provider in providerListLink:
                provider_uri = provider['@odata.id']
                provider_resource = ilo_client.get(provider_uri)
                if provider_resource.status_code == 200 and pType:
                    resource_list = provider_resource.json()['Resources']
                    for resource in resource_list:
                        if resource['Type'].startswith(pType):
                            resource_uri = resource['href']
                            resource_response = ilo_client.get(resource_uri)
                            if resource_response.status_code == 200:
                                providerObject.append(
                                    FlatDict(resource_response.json(), '.'))
                            else:
                                raise AssertionError(
                                    'Unable to retrieve object for provider type: %s'
                                    % pType)
                elif provider_resource.status_code == 200 and pType is None:
                    providerObject.append(
                        FlatDict(provider_resource.json(), '.'))
                else:
                    raise AssertionError(
                        'Unable to retrieve provider data in provider list for: %s'
                        % provider['@odata.id'])
    else:
        raise AssertionError('Unable to retrieve provider list for: %s' %
                             ilo_client._host)
    return providerObject
Example #5
0
    def save(cls, network, phases=[], filename=''):
        r"""
        Write Network to a Mat file for exporting to Matlab.

        Parameters
        ----------
        network : OpenPNM Network Object

        filename : string
            Desired file name, defaults to network name if not given

        phases : list of phase objects ([])
            Phases that have properties we want to write to file

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Write to file
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='mat')

        d = Dict.to_dict(network=network, phases=phases, interleave=True)
        d = FlatDict(d, delimiter='|')
        d = sanitize_dict(d)
        new_d = {}
        for key in list(d.keys()):
            new_key = key.replace('|', '_').replace('.', '_')
            new_d[new_key] = d.pop(key)

        spio.savemat(file_name=filename, mdict=new_d)
Example #6
0
    def _prune_config(self, config):
        """
        This method cleans up the configuration object by removing fields
        with no value
        """
        # Flatten dictionary to account for nested dictionary
        flat_current_config = FlatDict(config, delimiter='_')
        # Iterate through keys and remove if value is still empty string
        for key in flat_current_config.keys():
            if flat_current_config.get(key) == '':
                del flat_current_config[key]

        return flat_current_config.as_dict()
def error_422_handler(exc, req, resp, params):
    update_content_type(req, resp)
    resp.status = exc.status

    if req.api_version == '1.0':
        exc_data = {
            'title': exc.title,
            'description': _('Field value error'),
            'code': getattr(exc, 'code') or 'entity_error'
        }
        if hasattr(exc, 'errors'):
            exc_data['errors'] = exc.errors

        result = ErrorSchema().dump(exc_data)
        resp.text = json.dumps(result, cls=LazyEncoder)
    else:
        _exc_code = exc.status.lower().replace(' ', '_')
        _errors = []
        if hasattr(exc, 'errors'):
            flat = FlatDict(exc.errors, delimiter='/')
            for field, errors in flat.items():
                if not isinstance(errors, list):
                    errors = [
                        str(errors),
                    ]

                for title in errors:
                    _error = {
                        'id': uuid4(),
                        'title': _('Field error'),
                        'detail': _(title),
                        'status': resp.status,
                        'code': getattr(exc, 'code') or _exc_code,
                        'source': {
                            'pointer': '/{}'.format(field)
                        }
                    }
                    _errors.append(_error)
        else:
            _error = {
                'id': uuid4(),
                'code': getattr(exc, 'code') or _exc_code,
                'title': exc.title,
                'detail': _('Field value error'),
                'status': resp.status
            }
            _errors.append(_error)

        result = ErrorsSchema().dump({'errors': _errors})

        resp.text = json.dumps(result, cls=LazyEncoder)
Example #8
0
def get_distances(origin, destinations, departure_time: datetime):

    result_dict = gmaps.distance_matrix(origin,
                                        destinations,
                                        mode="driving",
                                        units="metric",
                                        departure_time=departure_time)

    result_dict_flat = FlatDict(result_dict, delimiter=".")
    dict_list = [
        FlatDict(r, delimiter=".") for r in result_dict["rows"][0]["elements"]
    ]
    df_distances = pd.DataFrame(dict_list)
    return df_distances
Example #9
0
 def log_experiment_input(self) -> None:
     logger.info(
         f'Using the following config: \n{pformat(jsons.dump(self.config))}'
     )
     dump_to_file(
         self.config,
         os.path.join(self.path_to_trained_model, CONFIG_FILE_NAME))
     if self.comet_experiment:
         flat_config = FlatDict(jsons.dump(self.config))
         for name, value in flat_config.items():
             self.comet_experiment.log_parameter(name, value)
         self.comet_experiment.log_metric(MODEL_AVAILABLE_METRIC_NAME,
                                          False)
         self.comet_experiment.log_metric(TERMINATED_NORMALLY_METRIC_NAME,
                                          False)
Example #10
0
def main():
    with open('input.json', 'r') as fobj:
        j_in = json.load(fobj)
    data_in = j_in

    fd = FlatDict('root', data_in)
    out = fd.out()

    print '----------------------------------------------------------------'
    print 'input:'
    print json.dumps(data_in, indent=4)
    print '----------------------------------------------------------------'
    print 'output:'
    print json.dumps(out, indent=4, sort_keys=True)
    print '---------------------------------------------------------------!'
 def __reformatted_params(self, **kwargs):
     return {
         'StackName': self.stack_name,
         **self.task_config['properties'],
         'Parameters': [{'ParameterKey': x, 'ParameterValue': str(y)} for (x, y) in
                        FlatDict(self.task_config['properties']['Parameters']).items()]
     }
Example #12
0
    def extract_rating(self, amenity_list):
        """ For a given store and a particular amenity, 
        this function computes the total and average rating for that amenity. 
        This object appears as a list and each element of that list refers to 1 instance 
        of that amenity. 
        The function calculates the total number of ratings across all the components 
        (i.e., all instances of that amenity around the store) and accumulates the total 
        rating score. 
        Finally it returns the total number of ratings and average ratings. """

        total_ratings, avg_rating, total_score = 0, 0, 0
        if len(amenity_list
               ) == 0:  # The store does not have that amenity around it
            return total_ratings, avg_rating

        # looping over each amenity.
        for i in range(len(amenity_list)):
            """flatting the nested dictionary that contains multiple elements of an
            instance of amentiy.
            In the flattened dictionary, 'user_ratings_total' is the total number of ratings 
            and 'rating' is the average rating for the amenity. 
            Total score is obtained by multiplying these two components. """

            tmp_dict = FlatDict(amenity_list[i])
            if ('user_ratings_total' in tmp_dict) and ('rating' in tmp_dict):
                total_ratings += tmp_dict['user_ratings_total']
                total_score += tmp_dict['user_ratings_total'] * tmp_dict[
                    'rating']

        # adding a small value in the denominator to avoid ZeroDivisionError Exception
        return total_ratings, total_score / (1e-6 + total_ratings)
Example #13
0
 def prepare_data(self, name, data):
     data = dict(data)
     field_data = {k: v for k, v in data.items() if k.startswith(name)}
     data.update(
         FlatDict(field_data,
                  delimiter=constants.SEPARATOR_LOOKUP_FILTER).as_dict())
     return data
Example #14
0
def make_sims_dict(params, quiet=True, prefix=""):
    """
    make_sims_dict(params,quiet=True)

    Extracting values from pandas table to generate a nested dictionary
    simset which can be used by clone_sims to set up simulation array

    Parameters
    ----------
    params : obj
        Extracts list of parameters from userfile keys.

    quiet : bool
        Flag to print output.

    prefix : string
        Prefix to be added to the names of the generated simulations. Nothing by default.

    Returns
    -------
    simset: dictionary array of parameters per simulation

    Examples
    --------
    >>> simset = pc.pipelines.make_sims_dict(params)
    >>> simset['nu1e-3nu_hyper31e-9nxgrid400chi_hyper31e-9']
    {'run.in'      :
        {'viscosity_run_pars':
            {'nu'        : ' 1e-3',
             'ivisc'     : " ['nu-shock', 'nu-const', 'nu-hyper3']",
             'nu_hyper3' : ' 1e-9'},
         'entropy_run_pars'  :
            {'chi_hyper3': '  1e-9', 'iheatcond': " 'hyper3'"}},
     'cparam.local':
            {'nxgrid'    : ' 400'}}
    """

    simset = dict()
    # Create nested dictionary of simulation values
    for index, row in params.iterrows():
        param_nested_dict = FlatDict(dict(row), delimiter="/").as_dict()
        simkey = prefix
        if not quiet:
            print(param_nested_dict)
            print(row.keys)
        for value, key in zip(row, row.keys()):
            # Only 'compile' uses bool. Arguments otherwise are string.
            if not type(value) == bool:
                if not "'" in value:
                    if len(simkey) > 0:
                        simkey += "_"
                    simkey += "{}_{}".format(
                        key.split("/")[-1], value.strip(" "))
        simset[simkey] = param_nested_dict
    return simset
Example #15
0
 def prepare_data(self, name, data):
     data = dict(data)
     if name in data:
         nkey = '%s%s%s' % (name, constants.SEPARATOR_LOOKUP_FILTER,
                            constants.LOOKUP_FILTER_TERM)
         data[nkey] = data.pop(name)
     field_data = {k: v for k, v in data.items() if k.startswith(name)}
     data.update(
         FlatDict(field_data,
                  delimiter=constants.SEPARATOR_LOOKUP_FILTER).as_dict())
     return data
Example #16
0
    def _apply_env_config(self):
        """
        This method checks the environment variables for any OKTA
        configuration parameters and applies them if available.
        """
        # Flatten current config and join with underscores
        # (for environment variable format)
        flattened_config = FlatDict(self._config, delimiter='_')
        flattened_keys = flattened_config.keys()

        # Create empty result config and populate
        updated_config = FlatDict({}, delimiter='_')

        # Go through keys and search for it in the environment vars
        # using the format described in the README
        for key in flattened_keys:
            env_key = ConfigSetter._OKTA + "_" + key.upper()
            env_value = os.environ.get(env_key, None)

            if env_value is not None:
                # If value is found, add to config
                if "scopes" in env_key.lower():
                    updated_config[key] = env_value.split(',')
                else:
                    updated_config[key] = env_value
            # apply to current configuration
        self._apply_config(updated_config.as_dict())
    def process(self, config=None):
        if config is None:
            config = {}

        config = FlatDict(config, delimiter='_')
        environ_config = {}

        for key in config.keys():
            env_key = '_'.join([self.prefix, key.upper()])
            env_key = self.aliases.get(env_key, env_key)
            value = environ.get(env_key)

            if value:
                if isinstance(config[key], int):
                    value = int(value)

                environ_config[key] = value

        _extend_dict(config, environ_config)
        config = config.as_dict()

        return config
Example #18
0
def export(request):
    request.require_administrator()

    date_from = request.params.get("date_from")
    date_to = request.params.get("date_to")
    user = request.params.get("user")

    hits = audit_cget(
        request=request,
        date_from=date_from,
        date_to=date_to,
        user=user,
    )

    hits = map(lambda h: h.to_dict(), hits)
    hits = map(lambda h: FlatDict(h, delimiter='.'), hits)
    hits = list(hits)

    if len(hits) == 0:
        raise HTTPNotFound()

    buf = StringIO()
    writer = csv.writer(buf, dialect='excel')

    headrow = (
        '@timestamp',
        'request.method',
        'request.path',
        'request.query_string',
        'request.remote_addr',
        'response.status_code',
        'response.route_name',
        'user.id',
        'user.keyname',
        'user.display_name',
        'context.id',
        'context.model',
    )
    writer.writerow(headrow)

    for hit in hits:
        datarow = map(lambda key: hit.get(key), headrow)
        writer.writerow(datarow)

    content_disposition = 'attachment; filename=audit.csv'

    return Response(buf.getvalue(),
                    content_type='text/csv',
                    content_disposition=content_disposition)
Example #19
0
    def _parse_with_config(self, parser):

        config = confuse.Configuration('kafl', modname='kafl_fuzzer')

        # check default config search paths
        config.read(defaults=True, user=True)

        # local / workdir config
        workdir_config = os.path.join(os.getcwd(), 'kafl.yaml')
        if os.path.exists(workdir_config):
            config.set_file(workdir_config, base_for_paths=True)

        # ENV based config
        if 'KAFL_CONFIG' in os.environ:
            config.set_file(os.environ['KAFL_CONFIG'], base_for_paths=True)

        # merge all configs into a flat dictionary, delimiter = ':'
        config_values = FlatDict(config.flatten())
        if 'KAFL_CONFIG_DEBUG' in os.environ:
            print("Options picked up from config: %s" % str(config_values))

        # adopt defaults into parser, fixup 'required' and file/path fields
        for action in parser._actions:
            #print("action: %s" % repr(action))
            if action.dest in config_values:
                if action.type == parse_is_file:
                    action.default = config[action.dest].as_filename()
                elif isinstance(action, argparse._AppendAction):
                    assert ("append are not supported in in yaml config")
                    #action.default = [config[action.dest].as_str()]
                else:
                    action.default = config[action.dest].get()
                action.required = False
                config_values.pop(action.dest)

        # remove options not defined in argparse (set_defaults() imports everything)
        for option in config_values:
            if 'KAFL_CONFIG_DEBUG' in os.environ:
                logger.warn("Dropping unrecognized option '%s'." % option)
            config_values.pop(option)

        # allow unrecognized options?
        #parser.set_defaults(**config_values)

        args = parser.parse_args()

        if 'KAFL_CONFIG_DEBUG' in os.environ:
            print("Final parsed args: %s" % repr(args))
        return args
Example #20
0
    def __spark_args(self):
        # reformat spark conf
        flat_conf_args = list()

        spark_arguments = {
            'master': self.task_config.get('master', None),
            'class': self.task_config.get('class', None)
        }

        source_code = self.task_config.get("application_source")

        for conf_arg in [
                '{}={}'.format(k, v)
                for (k,
                     v) in FlatDict(self.task_config.get('conf', {})).items()
        ]:
            flat_conf_args.append('--conf')
            flat_conf_args.append(conf_arg)

        spark_conf = self.__parse_spark_arguments(spark_arguments)
        spark_conf.extend(flat_conf_args)
        spark_conf.extend([source_code])
        return spark_conf
Example #21
0
    def to_hdf5(cls, network=None, phases=[], element=['pore', 'throat'],
                filename='', interleave=True, flatten=False, categorize_by=[]):
        r"""
        Creates an HDF5 file containing data from the specified objects,
        and categorized according to the given arguments.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        element : string or list of strings
            An indication of whether 'pore' and/or 'throat' data are desired.
            The default is both.

        interleave : boolean (default is ``True``)
            When ``True`` (default) the data from all Geometry objects (and
            Physics objects if ``phases`` are given) is interleaved into
            a single array and stored as a network property (or Phase
            property for Physics data). When ``False``, the data for each
            object are stored under their own dictionary key, the structuring
            of which depends on the value of the ``flatten`` argument.

        flatten : boolean (default is ``True``)
            When ``True``, all objects are accessible from the top level
            of the dictionary.  When ``False`` objects are nested under their
            parent object.  If ``interleave`` is ``True`` this argument is
            ignored.

        categorize_by : string or list of strings
            Indicates how the dictionaries should be organized.  The list can
            contain any, all or none of the following strings:

            **'objects'** : If specified the dictionary keys will be stored
            under a general level corresponding to their type (e.g.
            'network/net_01/pore.all'). If  ``interleave`` is ``True`` then
            only the only categories are *network* and *phase*, since
            *geometry* and *physics* data get stored under their respective
            *network* and *phase*.

            **'data'** : If specified the data arrays are additionally
            categorized by ``label`` and ``property`` to separate *boolean*
            from *numeric* data.

            **'elements'** : If specified the data arrays are additionally
            categorized by ``pore`` and ``throat``, meaning that the propnames
            are no longer prepended by a 'pore.' or 'throat.'

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename, ext='hdf')

        dct = Dict.to_dict(network=network, phases=phases, element=element,
                           interleave=interleave, flatten=flatten,
                           categorize_by=categorize_by)
        d = FlatDict(dct, delimiter='/')

        f = h5py.File(filename, "w")
        for item in d.keys():
            tempname = '_'.join(item.split('.'))
            arr = d[item]
            if d[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del d[item]
            elif 'U' in str(arr[0].dtype):
                pass
            else:
                f.create_dataset(name='/'+tempname, shape=arr.shape,
                                 dtype=arr.dtype, data=arr)
        return f
def post_ris_object(ilo_client, uri, data):
    resp = ilo_client.post(uri, data)
    if resp.status_code == 200:
        return FlatDict(resp.json(), '.')
    else:
        return resp.status_code
def get_ris_object(ilo_client, uri):
    resp = ilo_client.get(uri)
    if resp.status_code == 200:
        return FlatDict(resp.json(), '.')
    else:
        return resp.status_code
Example #24
0
    def save(cls, network, phases=[], filename='', delim=' | ', fill_nans=None):
        r"""
        Save network and phase data to a single vtp file for visualizing in
        Paraview

        Parameters
        ----------
        network : OpenPNM Network Object
            The Network containing the data to be written

        phases : list, optional
            A list containing OpenPNM Phase object(s) containing data to be
            written

        filename : string, optional
            Filename to write data.  If no name is given the file is named
            after the network

        delim : string
            Specify which character is used to delimit the data names.  The
            default is ' | ' which creates a nice clean output in the Paraview
            pipeline viewer (e.g. net | property | pore | diameter)

        fill_nans : scalar
            The value to use to replace NaNs with.  The VTK file format does
            not work with NaNs, so they must be dealt with.  The default is
            `None` which means property arrays with NaNs are not written to the
            file.  Other useful options might be 0 or -1, but the user must
            be aware that these are not real values, only place holders.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)

        am = Dict.to_dict(network=network, phases=phases, interleave=True,
                          categorize_by=['object', 'data'])
        am = FlatDict(am, delimiter=delim)
        key_list = list(sorted(am.keys()))

        network = network[0]
        points = network['pore.coords']
        pairs = network['throat.conns']
        num_points = np.shape(points)[0]
        num_throats = np.shape(pairs)[0]

        root = ET.fromstring(VTK._TEMPLATE)
        piece_node = root.find('PolyData').find('Piece')
        piece_node.set("NumberOfPoints", str(num_points))
        piece_node.set("NumberOfLines", str(num_throats))
        points_node = piece_node.find('Points')
        coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3)
        points_node.append(coords)
        lines_node = piece_node.find('Lines')
        connectivity = VTK._array_to_element("connectivity", pairs)
        lines_node.append(connectivity)
        offsets = VTK._array_to_element("offsets", 2*np.arange(len(pairs))+2)
        lines_node.append(offsets)

        point_data_node = piece_node.find('PointData')
        cell_data_node = piece_node.find('CellData')
        for key in key_list:
            array = am[key]
            if array.dtype == 'O':
                logger.warning(key + ' has dtype object,' +
                               ' will not write to file')
            else:
                if array.dtype == np.bool:
                    array = array.astype(int)
                if np.any(np.isnan(array)):
                    if fill_nans is None:
                        logger.warning(key + ' has nans,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isnan(array)] = fill_nans
                element = VTK._array_to_element(key, array)
                if (array.size == num_points):
                    point_data_node.append(element)
                elif (array.size == num_throats):
                    cell_data_node.append(element)

        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='vtp')

        tree = ET.ElementTree(root)
        tree.write(filename)

        with open(filename, 'r+') as f:
            string = f.read()
            string = string.replace('</DataArray>', '</DataArray>\n\t\t\t')
            f.seek(0)
            # consider adding header: '<?xml version="1.0"?>\n'+
            f.write(string)
Example #25
0
    def _apply_config(self, new_config: dict):
        """This method applies a config dictionary to the current config,
           overwriting values and adding new entries (if present).

        Arguments:
            config {dict} -- A dictionary of client configuration details
        """
        # Update current configuration with new configuration
        # Flatten both dictionaries to account for nested dictionary values
        flat_current_client = FlatDict(self._config['client'], delimiter='_')
        flat_current_testing = FlatDict(self._config['testing'], delimiter='_')

        flat_new_client = FlatDict(new_config.get('client', {}), delimiter='_')
        flat_new_testing = FlatDict(new_config.get('testing', {}),
                                    delimiter='_')
        flat_current_client.update(flat_new_client)
        flat_current_testing.update(flat_new_testing)
        # Update values in current config and unflatten
        self._config = {
            'client': flat_current_client.as_dict(),
            'testing': flat_current_testing.as_dict()
        }
Example #26
0
    def export_data(cls, network, filename):
        r"""
        Exports an OpenPNM network to a paraview state file.

        Parameters
        ----------
        network : GenericNetwork
            The network containing the desired data.
        filename : str
            Path to saved .vtp file.

        Notes
        -----
        Outputs a pvsm file that can be opened in Paraview. The pvsm file will
        be saved with the same name as .vtp file

        """
        try:
            import paraview.simple
        except ModuleNotFoundError:
            msg = (
                "The paraview python bindings must be installed using "
                "conda install -c conda-forge paraview, however this may require"
                " using a virtualenv since conflicts with other packages are common."
                " This is why it is not explicitly included as a dependency in"
                " porespy.")
            raise ModuleNotFoundError(msg)
        paraview.simple._DisableFirstRenderCameraReset()
        file = os.path.splitext(filename)[0]
        x, y, z = np.ptp(network.coords, axis=0)
        if sum(op.topotools.dimensionality(network)) == 2:
            zshape = 0
            xshape = y
            yshape = x
        elif sum(op.topotools.dimensionality(network)) == 3:
            zshape = x
            xshape = z
            yshape = y
        maxshape = max(xshape, yshape, zshape)
        shape = np.array([xshape, yshape, zshape])
        # Create a new 'XML PolyData Reader'
        Path = os.getcwd() + "\\" + file + '.vtp'
        water = op.phases.Water(network=network)
        net_vtp = paraview.simple.XMLPolyDataReader(FileName=[Path])
        p = op.io.Dict.to_dict(network,
                               phases=[water],
                               element=['pore'],
                               flatten=False,
                               categorize_by=['data'
                                              'object'])
        p = FlatDict(p, delimiter=' | ')
        t = op.io.Dict.to_dict(network,
                               phases=[water],
                               element=['throat'],
                               flatten=False,
                               categorize_by=['data'
                                              'object'])
        t = FlatDict(t, delimiter=' | ')
        net_vtp.CellArrayStatus = t.keys()
        net_vtp.PointArrayStatus = p.keys()
        # Get active view
        render_view = paraview.simple.GetActiveViewOrCreate('RenderView')
        # Uncomment following to set a specific view size
        # render_view.ViewSize = [1524, 527]
        # Get layout
        _ = paraview.simple.GetLayout()
        # Show data in view
        net_vtp_display = paraview.simple.Show(net_vtp, render_view,
                                               'GeometryRepresentation')
        # Trace defaults for the display properties.
        net_vtp_display.Representation = 'Surface'
        net_vtp_display.ColorArrayName = [None, '']
        net_vtp_display.OSPRayScaleArray = [
            f'network | {network.name} | labels | pore.all'
        ]
        net_vtp_display.OSPRayScaleFunction = 'PiecewiseFunction'
        net_vtp_display.SelectOrientationVectors = 'None'
        net_vtp_display.ScaleFactor = (maxshape - 1) / 10
        net_vtp_display.SelectScaleArray = 'None'
        net_vtp_display.GlyphType = 'Arrow'
        net_vtp_display.GlyphTableIndexArray = 'None'
        net_vtp_display.GaussianRadius = (maxshape - 1) / 200
        net_vtp_display.SetScaleArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        net_vtp_display.ScaleTransferFunction = 'PiecewiseFunction'
        net_vtp_display.OpacityArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        net_vtp_display.OpacityTransferFunction = 'PiecewiseFunction'
        net_vtp_display.DataAxesGrid = 'GridAxesRepresentation'
        net_vtp_display.PolarAxes = 'PolarAxesRepresentation'
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        net_vtp_display.ScaleTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        net_vtp_display.OpacityTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Reset view to fit data
        render_view.ResetCamera()
        # Get the material library
        _ = paraview.simple.GetMaterialLibrary()
        # Update the view to ensure updated data information
        render_view.Update()
        # Create a new 'Glyph'
        glyph = paraview.simple.Glyph(Input=net_vtp, GlyphType='Arrow')
        glyph.OrientationArray = ['POINTS', 'No orientation array']
        glyph.ScaleArray = ['POINTS', 'No scale array']
        glyph.ScaleFactor = 1
        glyph.GlyphTransform = 'Transform2'
        # Properties modified on glyph
        glyph.GlyphType = 'Sphere'
        glyph.ScaleArray = [
            'POINTS',
            'network | ' + network.name + ' | properties | pore.diameter'
        ]
        # Show data in view
        glyph_display = paraview.simple.Show(glyph, render_view,
                                             'GeometryRepresentation')
        # Trace defaults for the display properties.
        glyph_display.Representation = 'Surface'
        glyph_display.ColorArrayName = [None, '']
        glyph_display.OSPRayScaleArray = 'Normals'
        glyph_display.OSPRayScaleFunction = 'PiecewiseFunction'
        glyph_display.SelectOrientationVectors = 'None'
        glyph_display.ScaleFactor = (maxshape - 1) / 10
        glyph_display.SelectScaleArray = 'None'
        glyph_display.GlyphType = 'Arrow'
        glyph_display.GlyphTableIndexArray = 'None'
        glyph_display.GaussianRadius = (maxshape - 1) / 200
        glyph_display.SetScaleArray = ['POINTS', 'Normals']
        glyph_display.ScaleTransferFunction = 'PiecewiseFunction'
        glyph_display.OpacityArray = ['POINTS', 'Normals']
        glyph_display.OpacityTransferFunction = 'PiecewiseFunction'
        glyph_display.DataAxesGrid = 'GridAxesRepresentation'
        glyph_display.PolarAxes = 'PolarAxesRepresentation'
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        glyph_display.ScaleTransferFunction.Points = [
            -0.97, 0, 0.5, 0, 0.97, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        glyph_display.OpacityTransferFunction.Points = [
            -0.97, 0, 0.5, 0, 0.97, 1, 0.5, 0
        ]
        # Update the view to ensure updated data information
        render_view.Update()
        # Set active source
        paraview.simple.SetActiveSource(net_vtp)
        # Create a new 'Shrink'
        shrink1 = paraview.simple.Shrink(Input=net_vtp)
        # Properties modified on shrink1
        shrink1.ShrinkFactor = 1.0
        # Show data in view
        shrink_display = paraview.simple.Show(
            shrink1, render_view, 'UnstructuredGridRepresentation')
        # Trace defaults for the display properties.
        shrink_display.Representation = 'Surface'
        shrink_display.ColorArrayName = [None, '']
        shrink_display.OSPRayScaleArray = [
            'network | ' + network.name + ' | labels | pore.all'
        ]
        shrink_display.OSPRayScaleFunction = 'PiecewiseFunction'
        shrink_display.SelectOrientationVectors = 'None'
        shrink_display.ScaleFactor = (maxshape - 1) / 10
        shrink_display.SelectScaleArray = 'None'
        shrink_display.GlyphType = 'Arrow'
        shrink_display.GlyphTableIndexArray = 'None'
        shrink_display.GaussianRadius = (maxshape - 1) / 200
        shrink_display.SetScaleArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        shrink_display.ScaleTransferFunction = 'PiecewiseFunction'
        shrink_display.OpacityArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        shrink_display.OpacityTransferFunction = 'PiecewiseFunction'
        shrink_display.DataAxesGrid = 'GridAxesRepresentation'
        shrink_display.PolarAxes = 'PolarAxesRepresentation'
        shrink_display.ScalarOpacityUnitDistance = 1.0349360947089783
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        shrink_display.ScaleTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        shrink_display.OpacityTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Hide data in view
        paraview.simple.Hide(net_vtp, render_view)
        # Update the view to ensure updated data information
        render_view.Update()
        # Create a new 'Cell Data to Point Data'
        cellDatatoPointData1 = paraview.simple.CellDatatoPointData(
            Input=shrink1)
        cellDatatoPointData1.CellDataArraytoprocess = t
        # Show data in view
        cell_data_to_point_data_display = paraview.simple.Show(
            cellDatatoPointData1, render_view,
            'UnstructuredGridRepresentation')
        # Trace defaults for the display properties.
        cell_data_to_point_data_display.Representation = 'Surface'
        cell_data_to_point_data_display.ColorArrayName = [None, '']
        cell_data_to_point_data_display.OSPRayScaleArray = [
            'network | ' + network.name + ' | labels | pore.all'
        ]
        cell_data_to_point_data_display.OSPRayScaleFunction = 'PiecewiseFunction'
        cell_data_to_point_data_display.SelectOrientationVectors = 'None'
        cell_data_to_point_data_display.ScaleFactor = (maxshape - 1) / 10
        cell_data_to_point_data_display.SelectScaleArray = 'None'
        cell_data_to_point_data_display.GlyphType = 'Arrow'
        cell_data_to_point_data_display.GlyphTableIndexArray = 'None'
        cell_data_to_point_data_display.GaussianRadius = (maxshape - 1) / 200
        cell_data_to_point_data_display.SetScaleArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        cell_data_to_point_data_display.ScaleTransferFunction = 'PiecewiseFunction'
        cell_data_to_point_data_display.OpacityArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        cell_data_to_point_data_display.OpacityTransferFunction = 'PiecewiseFunction'
        cell_data_to_point_data_display.DataAxesGrid = 'GridAxesRepresentation'
        cell_data_to_point_data_display.PolarAxes = 'PolarAxesRepresentation'
        cell_data_to_point_data_display.ScalarOpacityUnitDistance = 1.0349360947089783
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        cell_data_to_point_data_display.ScaleTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        cell_data_to_point_data_display.OpacityTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Hide data in view
        paraview.simple.Hide(shrink1, render_view)
        # Update the view to ensure updated data information
        render_view.Update()
        # Set active source
        paraview.simple.SetActiveSource(shrink1)
        # Set active source
        paraview.simple.SetActiveSource(cellDatatoPointData1)
        # Set active source
        paraview.simple.SetActiveSource(shrink1)
        # Create a new 'Extract Surface'
        extractSurface1 = paraview.simple.ExtractSurface(Input=shrink1)
        # Show data in view
        extract_surface_display = paraview.simple.Show(
            extractSurface1, render_view, 'GeometryRepresentation')
        # Trace defaults for the display properties.
        extract_surface_display.Representation = 'Surface'
        extract_surface_display.ColorArrayName = [None, '']
        extract_surface_display.OSPRayScaleArray = [
            'network | ' + network.name + ' | labels | pore.all'
        ]
        extract_surface_display.OSPRayScaleFunction = 'PiecewiseFunction'
        extract_surface_display.SelectOrientationVectors = 'None'
        extract_surface_display.ScaleFactor = (maxshape - 1) / 10
        extract_surface_display.SelectScaleArray = 'None'
        extract_surface_display.GlyphType = 'Arrow'
        extract_surface_display.GlyphTableIndexArray = 'None'
        extract_surface_display.GaussianRadius = (maxshape - 1) / 200
        extract_surface_display.SetScaleArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        extract_surface_display.ScaleTransferFunction = 'PiecewiseFunction'
        extract_surface_display.OpacityArray = [
            'POINTS', 'network | ' + network.name + ' | labels | pore.all'
        ]
        extract_surface_display.OpacityTransferFunction = 'PiecewiseFunction'
        extract_surface_display.DataAxesGrid = 'GridAxesRepresentation'
        extract_surface_display.PolarAxes = 'PolarAxesRepresentation'
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        extract_surface_display.ScaleTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        extract_surface_display.OpacityTransferFunction.Points = [
            1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Hide data in view
        paraview.simple.Hide(shrink1, render_view)
        # Update the view to ensure updated data information
        render_view.Update()
        # create a new 'Tube'
        tube = paraview.simple.Tube(Input=extractSurface1)
        tube.Scalars = [
            'POINTS', 'network |' + network.name + '| labels | pore.all'
        ]
        tube.Vectors = [None, '1']
        tube.Radius = 0.04
        # Set active source
        paraview.simple.SetActiveSource(extractSurface1)
        # Destroy tube
        paraview.simple.Delete(tube)
        del tube
        # Set active source
        paraview.simple.SetActiveSource(shrink1)
        # Set active source
        paraview.simple.SetActiveSource(cellDatatoPointData1)
        # Set active source
        paraview.simple.SetActiveSource(extractSurface1)
        # Create a new 'Tube'
        tube = paraview.simple.Tube(Input=extractSurface1)
        tube.Scalars = [
            'POINTS', 'network |' + network.name + ' | labels | pore.all'
        ]
        tube.Vectors = [None, '1']
        tube.Radius = 0.04
        # Properties modified on tube
        tube.Vectors = ['POINTS', '1']
        # Show data in view
        tube_display = paraview.simple.Show(tube, render_view,
                                            'GeometryRepresentation')
        # Trace defaults for the display properties.
        tube_display.Representation = 'Surface'
        tube_display.ColorArrayName = [None, '']
        tube_display.OSPRayScaleArray = 'TubeNormals'
        tube_display.OSPRayScaleFunction = 'PiecewiseFunction'
        tube_display.SelectOrientationVectors = 'None'
        tube_display.ScaleFactor = (maxshape) / 10
        tube_display.SelectScaleArray = 'None'
        tube_display.GlyphType = 'Arrow'
        tube_display.GlyphTableIndexArray = 'None'
        tube_display.GaussianRadius = (maxshape) / 200
        tube_display.SetScaleArray = ['POINTS', 'TubeNormals']
        tube_display.ScaleTransferFunction = 'PiecewiseFunction'
        tube_display.OpacityArray = ['POINTS', 'TubeNormals']
        tube_display.OpacityTransferFunction = 'PiecewiseFunction'
        tube_display.DataAxesGrid = 'GridAxesRepresentation'
        tube_display.PolarAxes = 'PolarAxesRepresentation'
        # Init the 'PiecewiseFunction' selected for 'ScaleTransferFunction'
        tube_display.ScaleTransferFunction.Points = [
            -1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Init the 'PiecewiseFunction' selected for 'OpacityTransferFunction'
        tube_display.OpacityTransferFunction.Points = [
            -1, 0, 0.5, 0, 1, 1, 0.5, 0
        ]
        # Hide data in view
        paraview.simple.Hide(extractSurface1, render_view)
        # Update the view to ensure updated data information
        render_view.Update()
        # Saving camera placements for all active views
        # Current camera placement for render_view
        render_view.CameraPosition = [(xshape + 1) / 2, (yshape + 1) / 2,
                                      4.3 * np.sqrt(np.sum(shape / 2)**2)]
        render_view.CameraFocalPoint = [(xi + 1) / 2 for xi in shape]
        render_view.CameraParallelScale = np.sqrt(np.sum(shape / 2)**2)
        paraview.simple.SaveState(f"{file}.pvsm")
Example #27
0
    def from_dict(cls, dct, project=None, delim=' | '):
        r"""
        This method converts a correctly formatted dictionary into OpenPNM
        objects, and returns a handle to the *project* containing them.

        Parameters
        ----------
        dct : dictionary
            The Python dictionary containing the data.  The nesting and
            labeling of the dictionary is used to create the appropriate
            OpenPNM objects.

        project : OpenPNM Project Object
            The project with which the created objects should be associated.
            If not supplied, one will be created.

        Returns
        -------
        An OpenPNM Project containing the objects created to store the given
        data.

        Notes
        -----
        The requirement of a *correctly formed* dictionary is rather strict,
        and essentially means a dictionary produced by the ``to_dict`` method
        of this class.

        """
        if project is None:
            project = ws.new_project()

        # Uncategorize pore/throat and labels/properties, if present
        fd = FlatDict(dct, delimiter=delim)
        # If . is the delimiter, replace with | otherwise things break
        if delim == '.':
            delim = ' | '
            for key in list(fd.keys()):
                new_key = key.replace('.', delim)
                fd[new_key] = fd.pop(key)
        d = FlatDict(delimiter=delim)
        for key in list(fd.keys()):
            new_key = key.replace('pore' + delim, 'pore.')
            new_key = new_key.replace('throat' + delim, 'throat.')
            new_key = new_key.replace('labels' + delim, '')
            new_key = new_key.replace('properties' + delim, '')
            d[new_key] = fd.pop(key)

        # Plase data into correctly categorized dicts, for later handling
        objs = {
            'network': NestedDict(),
            'geometry': NestedDict(),
            'physics': NestedDict(),
            'phase': NestedDict(),
            'algorithm': NestedDict(),
            'base': NestedDict()
        }
        for item in d.keys():
            path = item.split(delim)
            if len(path) > 2:
                if path[-3] in objs.keys():
                    # Item is categorized by type, so note it
                    objs[path[-3]][path[-2]][path[-1]] = d[item]
                else:
                    # item is nested, not categorized; make it a base
                    objs['base'][path[-2]][path[-1]] = d[item]
            else:
                # If not categorized by type, make it a base
                objs['base'][path[-2]][path[-1]] = d[item]

        # Convert to OpenPNM Objects, attempting to infer type
        for objtype in objs.keys():
            for name in objs[objtype].keys():
                # Create empty object, using dummy name to avoid error
                obj = project._new_object(objtype=objtype, name='')
                # Overwrite name
                obj._set_name(name=name, validate=False)
                # Update new object with data from dict
                obj.update(objs[objtype][name])

        return project
Example #28
0
    def from_dict(cls, dct, project=None, delim=' | '):
        r"""
        This method converts a correctly formatted dictionary into OpenPNM
        objects, and returns a handle to the *project* containing them.

        Parameters
        ----------
        dct : dictionary
            The Python dictionary containing the data.  The nesting and
            labeling of the dictionary is used to create the appropriate
            OpenPNM objects.

        project : OpenPNM Project Object
            The project with which the created objects should be associated.
            If not supplied, one will be created.

        Returns
        -------
        An OpenPNM Project containing the objects created to store the given
        data.

        Notes
        -----
        The requirement of a *correctly formed* dictionary is rather strict,
        and essentially means a dictionary produced by the ``to_dict`` method
        of this class.

        """
        if project is None:
            project = ws.new_project()

        # Uncategorize pore/throat and labels/properties, if present
        fd = FlatDict(dct, delimiter=delim)
        # If . is the delimiter, replace with | otherwise things break
        if delim == '.':
            delim = ' | '
            for key in list(fd.keys()):
                new_key = key.replace('.', delim)
                fd[new_key] = fd.pop(key)
        d = FlatDict(delimiter=delim)
        for key in list(fd.keys()):
            new_key = key.replace('pore' + delim, 'pore.')
            new_key = new_key.replace('throat' + delim, 'throat.')
            new_key = new_key.replace('labels' + delim, '')
            new_key = new_key.replace('properties' + delim, '')
            d[new_key] = fd.pop(key)

        # Plase data into correctly categorized dicts, for later handling
        objs = {'network': NestedDict(),
                'geometry': NestedDict(),
                'physics': NestedDict(),
                'phase': NestedDict(),
                'algorithm': NestedDict(),
                'base': NestedDict()}
        for item in d.keys():
            path = item.split(delim)
            if len(path) > 2:
                if path[-3] in objs.keys():
                    # Item is categorized by type, so note it
                    objs[path[-3]][path[-2]][path[-1]] = d[item]
                else:
                    # item is nested, not categorized; make it a base
                    objs['base'][path[-2]][path[-1]] = d[item]
            else:
                # If not categorized by type, make it a base
                objs['base'][path[-2]][path[-1]] = d[item]

        # Convert to OpenPNM Objects, attempting to infer type
        for objtype in objs.keys():
            for name in objs[objtype].keys():
                # Create empty object, using dummy name to avoid error
                obj = project._new_object(objtype=objtype, name='')
                # Overwrite name
                obj._set_name(name=name, validate=False)
                # Update new object with data from dict
                obj.update(objs[objtype][name])

        return project
Example #29
0
File: VTK.py Project: PMEAL/OpenPNM
    def save(cls, network, phases=[], filename='', delim=' | ',
             fill_nans=None, fill_infs=None):
        r"""
        Save network and phase data to a single vtp file for visualizing in
        Paraview
        Parameters
        ----------
        network : OpenPNM Network Object
            The Network containing the data to be written
        phases : list, optional
            A list containing OpenPNM Phase object(s) containing data to be
            written
        filename : string, optional
            Filename to write data.  If no name is given the file is named
            after the network
        delim : string
            Specify which character is used to delimit the data names.  The
            default is ' | ' which creates a nice clean output in the Paraview
            pipeline viewer (e.g. net | property | pore | diameter)
        fill_nans : scalar
            The value to use to replace NaNs with.  The VTK file format does
            not work with NaNs, so they must be dealt with.  The default is
            `None` which means property arrays with NaNs are not written to the
            file.  Other useful options might be 0 or -1, but the user must
            be aware that these are not real values, only place holders.
        fill_infs : scalar
            The value to use to replace infs with.  The default is ``None``
            which means that property arrays containing ``None`` will *not*
            be written to the file, and a warning will be issued.  A useful
            value is
        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        # Check if any of the phases has time series
        transient = GenericIO.is_transient(phases=phases)
        if transient:
            logger.warning('vtp format does not support transient data, ' +
                           'use xdmf instead')
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename=filename, ext='vtp')

        am = Dict.to_dict(network=network, phases=phases, interleave=True,
                          categorize_by=['object', 'data'])
        am = FlatDict(am, delimiter=delim)
        key_list = list(sorted(am.keys()))

        network = network[0]
        points = network['pore.coords']
        pairs = network['throat.conns']
        num_points = np.shape(points)[0]
        num_throats = np.shape(pairs)[0]

        root = ET.fromstring(VTK._TEMPLATE)
        piece_node = root.find('PolyData').find('Piece')
        piece_node.set("NumberOfPoints", str(num_points))
        piece_node.set("NumberOfLines", str(num_throats))
        points_node = piece_node.find('Points')
        coords = VTK._array_to_element("coords", points.T.ravel('F'), n=3)
        points_node.append(coords)
        lines_node = piece_node.find('Lines')
        connectivity = VTK._array_to_element("connectivity", pairs)
        lines_node.append(connectivity)
        offsets = VTK._array_to_element("offsets", 2*np.arange(len(pairs))+2)
        lines_node.append(offsets)

        point_data_node = piece_node.find('PointData')
        cell_data_node = piece_node.find('CellData')
        for key in key_list:
            array = am[key]
            if array.dtype == 'O':
                logger.warning(key + ' has dtype object,' +
                               ' will not write to file')
            else:
                if array.dtype == np.bool:
                    array = array.astype(int)
                if np.any(np.isnan(array)):
                    if fill_nans is None:
                        logger.warning(key + ' has nans,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isnan(array)] = fill_nans
                if np.any(np.isinf(array)):
                    if fill_infs is None:
                        logger.warning(key + ' has infs,' +
                                       ' will not write to file')
                        continue
                    else:
                        array[np.isinf(array)] = fill_infs
                element = VTK._array_to_element(key, array)
                if (array.size == num_points):
                    point_data_node.append(element)
                elif (array.size == num_throats):
                    cell_data_node.append(element)

        tree = ET.ElementTree(root)
        tree.write(filename)

        with open(filename, 'r+') as f:
            string = f.read()
            string = string.replace('</DataArray>', '</DataArray>\n\t\t\t')
            f.seek(0)
            # consider adding header: '<?xml version="1.0"?>\n'+
            f.write(string)
Example #30
0
    def save(cls, network, phases=[], filename=''):
        r"""
        Saves data from the given objects into the specified file.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        Notes
        -----
        This method only saves the data, not any of the pore-scale models or
        other attributes.  To save an actual OpenPNM Project use the
        ``Workspace`` object.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]

        if filename == '':
            filename = project.name
        path = cls._parse_filename(filename=filename, ext='xmf')
        # Path is a pathlib object, so slice it up as needed
        fname_xdf = path.name
        fname_hdf = path.stem+".hdf"
        path = path.parent
        f = h5py.File(path.joinpath(fname_hdf), "w")

        d = Dict.to_dict(network, phases=phases, interleave=True,
                         flatten=False, categorize_by=['element', 'data'])

        # Make HDF5 file with all datasets, and no groups
        D = FlatDict(d, delimiter='/')
        for item in D.keys():
            if D[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del D[item]
            elif 'U' in str(D[item][0].dtype):
                pass
            else:
                f.create_dataset(name='/'+item, shape=D[item].shape,
                                 dtype=D[item].dtype, data=D[item])
        # Add coordinate and connection information to top of HDF5 file
        f["coordinates"] = network["pore.coords"]
        f["connections"] = network["throat.conns"]

        # setup xdmf file
        root = create_root('Xdmf')
        domain = create_domain()
        grid = create_grid(Name="Structure", GridType="Uniform")

        # geometry coordinates
        row, col = f["coordinates"].shape
        dims = ' '.join((str(row), str(col)))
        hdf_loc = fname_hdf + ":coordinates"
        geo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                    Format='HDF', DataType="Float")
        geo = create_geometry(GeometryType="XYZ")
        geo.append(geo_data)

        # topolgy connections
        row, col = f["connections"].shape  # col first then row
        dims = ' '.join((str(row), str(col)))
        hdf_loc = fname_hdf + ":connections"
        topo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                     Format="HDF", NumberType="Int")
        topo = create_topology(TopologyType="Polyline",
                               NodesPerElement=str(2),
                               NumberOfElements=str(row))
        topo.append(topo_data)

        # Add pore and throat properties
        for item in D.keys():
            if item not in ['coordinates', 'connections']:
                attr_type = 'Scalar'
                shape = f[item].shape
                dims = ''.join([str(i) + ' ' for i in list(shape)[::-1]])
                hdf_loc = fname_hdf + ":" + item
                attr = create_data_item(value=hdf_loc,
                                        Dimensions=dims,
                                        Format='HDF',
                                        Precision='8',
                                        DataType='Float')
                name = item.replace('/', ' | ')
                if 'throat' in item:
                    Center = "Cell"
                else:
                    Center = "Node"
                el_attr = create_attribute(Name=name, Center=Center,
                                           AttributeType=attr_type)
                el_attr.append(attr)
                grid.append(el_attr)

        grid.append(topo)
        grid.append(geo)
        domain.append(grid)
        root.append(domain)

        with open(path.joinpath(fname_xdf), 'w') as file:
            file.write(cls._header)
            file.write(ET.tostring(root).decode("utf-8"))

        # CLose the HDF5 file
        f.close()
Example #31
0
    def to_hdf5(cls,
                network=None,
                phases=[],
                element=['pore', 'throat'],
                filename='',
                interleave=True,
                flatten=False,
                categorize_by=[]):
        r"""
        Creates an HDF5 file containing data from the specified objects,
        and categorized according to the given arguments.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        element : string or list of strings
            An indication of whether 'pore' and/or 'throat' data are desired.
            The default is both.

        interleave : boolean (default is ``True``)
            When ``True`` (default) the data from all Geometry objects (and
            Physics objects if ``phases`` are given) is interleaved into
            a single array and stored as a network property (or Phase
            property for Physics data). When ``False``, the data for each
            object are stored under their own dictionary key, the structuring
            of which depends on the value of the ``flatten`` argument.

        flatten : boolean (default is ``True``)
            When ``True``, all objects are accessible from the top level
            of the dictionary.  When ``False`` objects are nested under their
            parent object.  If ``interleave`` is ``True`` this argument is
            ignored.

        categorize_by : string or list of strings
            Indicates how the dictionaries should be organized.  The list can
            contain any, all or none of the following strings:

            **'objects'** : If specified the dictionary keys will be stored
            under a general level corresponding to their type (e.g.
            'network/net_01/pore.all'). If  ``interleave`` is ``True`` then
            only the only categories are *network* and *phase*, since
            *geometry* and *physics* data get stored under their respective
            *network* and *phase*.

            **'data'** : If specified the data arrays are additionally
            categorized by ``label`` and ``property`` to separate *boolean*
            from *numeric* data.

            **'elements'** : If specified the data arrays are additionally
            categorized by ``pore`` and ``throat``, meaning that the propnames
            are no longer prepended by a 'pore.' or 'throat.'

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        if filename == '':
            filename = project.name
        filename = cls._parse_filename(filename, ext='hdf')

        dct = Dict.to_dict(network=network,
                           phases=phases,
                           element=element,
                           interleave=interleave,
                           flatten=flatten,
                           categorize_by=categorize_by)
        d = FlatDict(dct, delimiter='/')

        f = hdfFile(filename, "w")
        for item in d.keys():
            tempname = '_'.join(item.split('.'))
            arr = d[item]
            if d[item].dtype == 'O':
                logger.warning(item + ' has dtype object,' +
                               ' will not write to file')
                del d[item]
            elif 'U' in str(arr[0].dtype):
                pass
            else:
                f.create_dataset(name='/' + tempname,
                                 shape=arr.shape,
                                 dtype=arr.dtype,
                                 data=arr)
        return f
Example #32
0
def get_options_based_on_normalizer(normalizer: Callable) -> str:
    flattened_config = FlatDict(Config.default_config, delimiter=".")

    for k in flattened_config:
        if Config._get_normalizer(k) == normalizer:
            yield k
Example #33
0
    def save(cls, network, phases=[], filename=''):
        r"""
        Saves (transient/steady-state) data from the given objects into the
        specified file.

        Parameters
        ----------
        network : OpenPNM Network Object
            The network containing the desired data

        phases : list of OpenPNM Phase Objects (optional, default is none)
            A list of phase objects whose data are to be included

        Notes
        -----
        This method only saves the data, not any of the pore-scale models or
        other attributes.  To save an actual OpenPNM Project use the
        ``Workspace`` object.

        """
        project, network, phases = cls._parse_args(network=network,
                                                   phases=phases)
        network = network[0]
        # Check if any of the phases has time series
        transient = GenericIO.is_transient(phases=phases)

        if filename == '':
            filename = project.name
        path = cls._parse_filename(filename=filename, ext='xmf')
        # Path is a pathlib object, so slice it up as needed
        fname_xdf = path.name
        d = Dict.to_dict(network, phases=phases, interleave=True,
                         flatten=False, categorize_by=['element', 'data'])
        D = FlatDict(d, delimiter='/')
        # Identify time steps
        t_steps = []
        if transient:
            for key in D.keys():
                if '@' in key:
                    t_steps.append(key.split('@')[1])
        t_grid = create_grid(Name="TimeSeries", GridType="Collection",
                             CollectionType="Temporal")
        # If steady-state, define '0' time step
        if not transient:
            t_steps.append('0')
        # Setup xdmf file
        root = create_root('Xdmf')
        domain = create_domain()
        # Iterate over time steps present
        for t in range(len(t_steps)):
            # Define the hdf file
            if not transient:
                fname_hdf = path.stem+".hdf"
            else:
                fname_hdf = path.stem+'@'+t_steps[t]+".hdf"
            path_p = path.parent
            f = h5py.File(path_p.joinpath(fname_hdf), "w")
            # Add coordinate and connection information to top of HDF5 file
            f["coordinates"] = network["pore.coords"]
            f["connections"] = network["throat.conns"]
            # geometry coordinates
            row, col = f["coordinates"].shape
            dims = ' '.join((str(row), str(col)))
            hdf_loc = fname_hdf + ":coordinates"
            geo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                        Format='HDF', DataType="Float")
            geo = create_geometry(GeometryType="XYZ")
            geo.append(geo_data)
            # topolgy connections
            row, col = f["connections"].shape  # col first then row
            dims = ' '.join((str(row), str(col)))
            hdf_loc = fname_hdf + ":connections"
            topo_data = create_data_item(value=hdf_loc, Dimensions=dims,
                                         Format="HDF", NumberType="Int")
            topo = create_topology(TopologyType="Polyline",
                                   NodesPerElement=str(2),
                                   NumberOfElements=str(row))
            topo.append(topo_data)
            # Make HDF5 file with all datasets, and no groups
            for item in D.keys():
                if D[item].dtype == 'O':
                    logger.warning(item + ' has dtype object,' +
                                   ' will not write to file')
                    del D[item]
                elif 'U' in str(D[item][0].dtype):
                    pass
                elif ('@' in item and t_steps[t] == item.split('@')[1]):
                    f.create_dataset(name='/'+item.split('@')[0]+'@t',
                                     shape=D[item].shape,
                                     dtype=D[item].dtype,
                                     data=D[item])
                elif ('@' not in item and t == 0):
                    f.create_dataset(name='/'+item, shape=D[item].shape,
                                     dtype=D[item].dtype, data=D[item])
            # Create a grid
            grid = create_grid(Name=t_steps[t], GridType="Uniform")
            time = create_time(type='Single', Value=t_steps[t])
            grid.append(time)
            # Add pore and throat properties
            for item in D.keys():
                if item not in ['coordinates', 'connections']:
                    if (('@' in item and t_steps[t] == item.split('@')[1]) or
                            ('@' not in item)):
                        attr_type = 'Scalar'
                        shape = D[item].shape
                        dims = (''.join([str(i) +
                                         ' ' for i in list(shape)[::-1]]))
                        if '@' in item:
                            item = item.split('@')[0]+'@t'
                            hdf_loc = fname_hdf + ":" + item
                        elif ('@' not in item and t == 0):
                            hdf_loc = fname_hdf + ":" + item
                        elif ('@' not in item and t > 0):
                            hdf_loc = (path.stem+'@'+t_steps[0]+".hdf" +
                                       ":" + item)
                        attr = create_data_item(value=hdf_loc,
                                                Dimensions=dims,
                                                Format='HDF',
                                                Precision='8',
                                                DataType='Float')
                        name = item.replace('/', ' | ')
                        if 'throat' in item:
                            Center = "Cell"
                        else:
                            Center = "Node"
                        el_attr = create_attribute(Name=name, Center=Center,
                                                   AttributeType=attr_type)
                        el_attr.append(attr)
                        grid.append(el_attr)
                    else:
                        pass
            grid.append(topo)
            grid.append(geo)
            t_grid.append(grid)
            # CLose the HDF5 file
            f.close()
        domain.append(t_grid)
        root.append(domain)
        with open(path_p.joinpath(fname_xdf), 'w') as file:
            file.write(cls._header)
            file.write(ET.tostring(root).decode("utf-8"))
def child_page_recursive(pages,
                         space_id,
                         parent_page_id,
                         table_prefix,
                         recheck_pages_meet_criteria=False,
                         config_modified=False):
    """Recursively inserts page information into the database after making requests to the Confluence API.

    Args:
        pages (dict): A dictionary of pages to crawl through, have a look at the example config for more information.
        space_id (int): The top level space_id that the information relates to.
        parent_page_id (int): The current pages parent page id.
        table_prefix (str): The current database table name prefix.
        recheck_pages_meet_criteria (bool): Ensures that all current pages meet the criteria set out in the config file.
            If this is False, it will assume that all pages in the database meet the criteria and will only take delta changes for these.
        config_modified (bool): Whether the config has been modified since last launch.
    """
    # if the child page has not been updated since we last stored the information, then no need to check labels/title!
    for page_type in pages:
        for page_identifier in pages[page_type]:

            # Create tables to store the pages in and the information they contain.
            # table = table_prefix + '_' + page_type + '_' + page_identifier
            table = table_prefix.replace(
                ' ', '') + '_' + page_identifier.replace('_', '').replace(
                    ' ', '')[:5].lower()
            DatabaseAPI.create_table(table)
            info_table = table + '__info'
            DatabaseAPI.create_table(info_table, True)

            try:
                child_pages = ConfluenceAPI.get_child_page_ids(parent_page_id)
            except:
                logger.warning(
                    'child_page_recursive: Unable to get child pages for: %s' %
                    str(parent_page_id))
                continue
            for child_page_id in child_pages:

                # Decision tree to see if the current page meets the criteria provided in the config file.
                # if we are not forced to recheck the page meets the criteria then use the pages in the database table.
                # else, check to see if the page meets either criteria.
                page_meets_criteria = False
                if not recheck_pages_meet_criteria and not config_modified:
                    if DatabaseAPI.check_data_exists(table, parent_page_id,
                                                     child_page_id):
                        # If the page already exists in the database ignore
                        # checking the page meets the criteria, unless forced to.
                        page_meets_criteria = True
                else:
                    if page_type == 'titles':
                        if child_pages[child_page_id][
                                'name'] == page_identifier:
                            page_meets_criteria = True
                    elif page_type == 'labels':
                        try:
                            if page_identifier in ConfluenceAPI.get_page_labels(
                                    child_page_id):
                                # Check that the page meets the criteria given,
                                # i.e. it is labelled as something/title is something and needs to be updated.
                                page_meets_criteria = True
                        except:
                            logger.warning(
                                'child_page_recursive: Unable to retrieve labels for: %s'
                                % str(child_page_id))
                            continue

                if page_meets_criteria:
                    page_updated = DatabaseAPI.insert_or_update(
                        table, parent_page_id, child_page_id,
                        child_pages[child_page_id]['name'],
                        child_pages[child_page_id]['last_updated'], True)

                    # If the current page information was updated since the last run,
                    # delete all children information and re-fill it.
                    page_content = ''
                    if page_updated or config_modified:
                        logger.info(
                            'Updating information in space %s for page: %s' %
                            (str(space_id),
                             child_pages[child_page_id]['name']))
                        DatabaseAPI.delete(info_table, child_page_id)
                        try:
                            page_content = ConfluenceAPI.get_page_content(
                                child_page_id)
                        except:
                            logger.warning(
                                'child_page_recursive: Unable to get page content for: %s'
                                % str(child_page_id))
                            continue

                    for page_info_type in pages[page_type][page_identifier]:
                        if page_info_type == 'pages':
                            child_page_recursive(
                                pages[page_type][page_identifier]
                                [page_info_type], space_id, child_page_id,
                                table, recheck_pages_meet_criteria,
                                config_modified)
                        else:
                            if page_updated or config_modified:
                                try:
                                    if page_info_type == 'panels':
                                        for panel_identifier in pages[
                                                page_type][page_identifier][
                                                    page_info_type]:
                                            panel = FlatDict(
                                                ConfluenceAPI.get_panel(
                                                    page_content,
                                                    panel_identifier,
                                                    space_id))
                                            for k, v in panel.items():
                                                # For each key remove list numbers.
                                                # i.e. FlatDict will put in :0, :1: for each list element.
                                                k = re.sub(':(\d+)', '', k)
                                                k = re.sub(':(\d+):', ':', k)
                                                DatabaseAPI.insert_or_update(
                                                    info_table, child_page_id,
                                                    k, v,
                                                    child_pages[child_page_id]
                                                    ['last_updated'])
                                    elif page_info_type == 'page_properties':
                                        # Get all page properties and put the values into the database.
                                        page_properties = ConfluenceAPI.get_page_properties(
                                            child_page_id, space_id,
                                            pages[page_type][page_identifier]
                                            [page_info_type])
                                        for page_property in page_properties:
                                            for val in page_properties[
                                                    page_property]:
                                                DatabaseAPI.insert_or_update(
                                                    info_table, child_page_id,
                                                    page_property, val,
                                                    child_pages[child_page_id]
                                                    ['last_updated'])
                                    elif page_info_type == 'headings':
                                        for heading_identifier in pages[
                                                page_type][page_identifier][
                                                    page_info_type]:
                                            heading = FlatDict(
                                                ConfluenceAPI.get_heading(
                                                    page_content,
                                                    heading_identifier))
                                            for k, v in heading.items():
                                                # For each key remove list numbers.
                                                # i.e. FlatDict will put in :0, :1: for each list element.
                                                k = re.sub(':(\d+)', '', k)
                                                k = re.sub(':(\d+):', ':', k)
                                                DatabaseAPI.insert_or_update(
                                                    info_table, child_page_id,
                                                    k, v,
                                                    child_pages[child_page_id]
                                                    ['last_updated'])
                                    elif page_info_type == 'page':
                                        page_information = FlatDict(
                                            ConfluenceAPI.get_page(
                                                page_content,
                                                child_pages[child_page_id]
                                                ['name']))
                                        for k, v in page_information.items():
                                            # For each key remove list numbers.
                                            # i.e. FlatDict will put in :0, :1: for each list element.
                                            k = re.sub(':(\d+)', '', k)
                                            k = re.sub(':(\d+):', ':', k)
                                            DatabaseAPI.insert_or_update(
                                                info_table, child_page_id, k,
                                                v, child_pages[child_page_id]
                                                ['last_updated'])
                                    elif page_info_type == 'url':
                                        for url_type in pages[page_type][
                                                page_identifier][
                                                    page_info_type]:
                                            url = ConfluenceAPI.get_page_urls(
                                                child_page_id, url_type)
                                            DatabaseAPI.insert_or_update(
                                                info_table, child_page_id,
                                                url_type, url,
                                                child_pages[child_page_id]
                                                ['last_updated'])
                                    else:
                                        logger.warning(
                                            'child_page_recursive: Unknown page information retrieval type: %s'
                                            % page_info_type)
                                except:
                                    logger.error(
                                        'child_page_recursive: Error inserting data for page with id: %s, name: %s'
                                        % (str(child_page_id),
                                           child_pages[child_page_id]['name']))
                else:
                    # Cleanup the ignore, info and default table by removing any information associated with page.
                    # Child pages get cleaned up by the cleanup method.
                    DatabaseAPI.delete(table, parent_page_id, child_page_id)
                    DatabaseAPI.delete(info_table, child_page_id)