Exemplo n.º 1
0
    def at(directory: str or tuple or list or math.Tensor or 'Scene', id: int or math.Tensor or None = None) -> 'Scene':
        """
        Creates a `Scene` for an existing directory.

        See Also:
            `Scene.create()`, `Scene.list()`.

        Args:
            directory: Either directory containing scene folder if `id` is given, or scene path if `id=None`.
            id: (Optional) Scene `id`, will be determined from `directory` if not specified.

        Returns:
            `Scene` object for existing scene.
        """
        if isinstance(directory, Scene):
            assert id is None, f"Got id={id} but directory is already a Scene."
            return directory
        if isinstance(directory, (tuple, list)):
            directory = math.wrap(directory, batch('scenes'))
        directory = math.map(lambda d: expanduser(d), math.wrap(directory))
        if id is None:
            paths = directory
        else:
            id = math.wrap(id)
            paths = math.map(lambda d, i: join(d, f"sim_{i:06d}"), directory, id)
        # test all exist
        for path in math.flatten(paths):
            if not isdir(path):
                raise IOError(f"There is no scene at '{path}'")
        return Scene(paths)
Exemplo n.º 2
0
    def copy_calling_script(self, full_trace=False, include_context_information=True):
        """
        Copies the Python file that called this method into the `src` folder of this `Scene`.

        In batch mode, the script is copied to all scenes.

        Args:
            full_trace: Whether to include scripts that indirectly called this method.
            include_context_information: If True, writes the phiflow version and `sys.argv` into `context.json`.
        """
        script_paths = [frame.filename for frame in inspect.stack()]
        script_paths = list(filter(lambda path: not _is_phi_file(path), script_paths))
        script_paths = set(script_paths) if full_trace else [script_paths[0]]
        self.subpath('src', create=True)
        for script_path in script_paths:
            if script_path.endswith('.py'):
                self.copy_src(script_path, only_external=False)
            elif 'ipython' in script_path:
                from IPython import get_ipython
                cells = get_ipython().user_ns['In']
                blocks = [f"#%% In[{i}]\n{cell}" for i, cell in enumerate(cells)]
                text = "\n\n".join(blocks)
                self.copy_src_text('ipython.py', text)
        if include_context_information:
            for path in math.flatten(self._paths):
                with open(join(path, 'src', 'context.json'), 'w') as context_file:
                    json.dump({
                        'phi_version': phi_version,
                        'argv': sys.argv
                    }, context_file)
Exemplo n.º 3
0
    def copy_calling_script(self,
                            full_trace=False,
                            include_context_information=True):
        """
        Copies the Python file that called this method into the `src` folder of this `Scene`.

        In batch mode, the script is copied to all scenes.

        Args:
            full_trace: Whether to include scripts that indirectly called this method.
            include_context_information: If True, writes the phiflow version and `sys.argv` into `context.json`.
        """
        script_paths = [frame[1] for frame in inspect.stack()]
        script_paths = list(
            filter(lambda path: not _is_phi_file(path), script_paths))
        script_paths = set(script_paths) if full_trace else [script_paths[0]]
        for path in math.flatten(self._paths):
            self.subpath('src', create=True)
            for script_path in script_paths:
                shutil.copy(script_path,
                            join(path, 'src', basename(script_path)))
            if include_context_information:
                with open(join(path, 'src', 'context.json'),
                          'w') as context_file:
                    json.dump({
                        'phi_version': phi_version,
                        'argv': sys.argv
                    }, context_file)
Exemplo n.º 4
0
 def _init_properties(self):
     if self._properties is not None:
         return
     dfile = join(next(iter(math.flatten(self._paths))), "description.json")
     if isfile(dfile):
         with open(dfile) as stream:
             self._properties = json.load(stream)
     else:
         self._properties = {}
Exemplo n.º 5
0
 def exist_properties(self):
     """
     Checks whether the file `description.json` exists or has existed.
     """
     if self._properties is not None:
         return True  # must have been written or read
     else:
         json_file = join(next(iter(math.flatten(self._paths))), "description.json")
         return isfile(json_file)
Exemplo n.º 6
0
def sparse_values(dimensions, extended_active_mask, extended_fluid_mask, sorting=None, periodic=False):
    """
    Builds a sparse matrix such that when applied to a flattened pressure channel, it calculates the laplace
    of that channel, taking into account obstacles and empty cells.

    :param dimensions: valid simulation dimensions. Pressure channel should be of shape (batch size, dimensions..., 1)
    :param extended_active_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :param extended_fluid_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :return: SciPy sparse matrix that acts as a laplace on a flattened pressure channel given obstacles and empty cells
    """
    N = int(np.prod(dimensions))
    d = len(dimensions)
    dims = range(d)

    values_list = []
    diagonal_entries = 0  # diagonal matrix entries

    gridpoints_linear = np.arange(N)
    gridpoints = np.stack(np.unravel_index(gridpoints_linear, dimensions)) # d * (N^2) array mapping from linear to spatial frames

    for dim in dims:
        lower_active, self_active, upper_active = _dim_shifted(extended_active_mask, dim, (-1, 0, 1), diminish_others=(1, 1))
        lower_accessible, upper_accessible = _dim_shifted(extended_fluid_mask, dim, (-1, 1), diminish_others=(1, 1))

        stencil_upper = upper_active * self_active
        stencil_lower = lower_active * self_active
        stencil_center = - lower_accessible - upper_accessible

        diagonal_entries += math.flatten(stencil_center)

        dim_direction = math.expand_dims([1 if i == dim else 0 for i in range(d)], axis=-1)
        # --- Stencil upper cells ---
        upper_points, upper_idx = wrap_or_discard(gridpoints + dim_direction, dim, dimensions, periodic=collapsed_gather_nd(periodic, [dim, 1]))
        values_list.append(math.gather(math.flatten(stencil_upper), upper_idx))
        # --- Stencil lower cells ---
        lower_points, lower_idx = wrap_or_discard(gridpoints - dim_direction, dim, dimensions, periodic=collapsed_gather_nd(periodic, [dim, 0]))
        values_list.append(math.gather(math.flatten(stencil_lower), lower_idx))

    values_list.insert(0, math.minimum(diagonal_entries, -1.))
    values = math.concat(values_list, axis=0)
    if sorting is not None:
        values = math.gather(values, sorting)
    return values
Exemplo n.º 7
0
 def _init_properties(self):
     if self._properties is not None:
         return
     json_file = join(next(iter(math.flatten(self._paths))), "description.json")
     if isfile(json_file):
         with open(json_file) as stream:
             self._properties = json.load(stream)
         if '__tensors__' in self._properties:
             for key in self._properties['__tensors__']:
                 self._properties[key] = math.from_dict(self._properties[key])
     else:
         self._properties = {}
Exemplo n.º 8
0
def sparse_pressure_matrix(dimensions, extended_active_mask, extended_fluid_mask, periodic=False):
    """
Builds a sparse matrix such that when applied to a flattened pressure channel, it calculates the laplace
of that channel, taking into account obstacles and empty cells.

    :param dimensions: valid simulation dimensions. Pressure channel should be of shape (batch size, dimensions..., 1)
    :param extended_active_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :param extended_fluid_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :return: SciPy sparse matrix that acts as a laplace on a flattened pressure channel given obstacles and empty cells
    """
    N = int(np.prod(dimensions))
    d = len(dimensions)
    A = scipy.sparse.lil_matrix((N, N), dtype=np.float32)
    dims = range(d)

    diagonal_entries = np.zeros(N, extended_active_mask.dtype)  # diagonal matrix entries

    gridpoints_linear = np.arange(N)
    gridpoints = np.stack(np.unravel_index(gridpoints_linear, dimensions))  # d * (N^2) array mapping from linear to spatial frames

    for dim in dims:
        lower_active, self_active, upper_active = _dim_shifted(extended_active_mask, dim, (-1, 0, 1), diminish_others=(1,1))
        lower_accessible, upper_accessible = _dim_shifted(extended_fluid_mask, dim, (-1, 1), diminish_others=(1, 1))

        stencil_upper = upper_active * self_active
        stencil_lower = lower_active * self_active
        stencil_center = - lower_accessible - upper_accessible

        diagonal_entries += math.flatten(stencil_center)

        dim_direction = math.expand_dims([1 if i == dim else 0 for i in range(d)], axis=-1)
        # --- Stencil upper cells ---
        upper_points, upper_idx = wrap_or_discard(gridpoints + dim_direction, dim, dimensions, periodic=collapsed_gather_nd(periodic, [dim, 1]))
        A[gridpoints_linear[upper_idx], upper_points] = stencil_upper.flatten()[upper_idx]
        # --- Stencil lower cells ---
        lower_points, lower_idx = wrap_or_discard(gridpoints - dim_direction, dim, dimensions, periodic=collapsed_gather_nd(periodic, [dim, 0]))
        A[gridpoints_linear[lower_idx], lower_points] = stencil_lower.flatten()[lower_idx]

    A[gridpoints_linear, gridpoints_linear] = math.minimum(diagonal_entries, -1)  # avoid 0, could lead to NaN

    return scipy.sparse.csc_matrix(A)
Exemplo n.º 9
0
def plot_scalars(scene: str or tuple or list or Scene or math.Tensor,
                 names: str or tuple or list or math.Tensor = None,
                 reduce: str or tuple or list or math.Shape = 'names',
                 down='',
                 smooth=1,
                 smooth_alpha=0.2,
                 smooth_linewidth=2.,
                 size=(8, 6),
                 transform: Callable = None,
                 tight_layout=True,
                 grid: str or dict = 'y',
                 log_scale='',
                 legend='upper right',
                 x='steps',
                 xlim=None,
                 ylim=None,
                 titles=True,
                 labels: math.Tensor = None,
                 xlabel: str = None,
                 ylabel: str = None,
                 colors: math.Tensor = 'default'):
    """

    Args:
        scene: `str` or `Tensor`. Scene paths containing the data to plot.
        names: Data files to plot for each scene. The file must be located inside the scene directory and have the name `log_<name>.txt`.
        reduce: Tensor dimensions along which all curves are plotted in the same diagram.
        down: Tensor dimensions along which diagrams are ordered top-to-bottom instead of left-to-right.
        smooth: `int` or `Tensor`. Number of data points to average, -1 for all.
        smooth_alpha: Opacity of the non-smoothed curves under the smoothed curves.
        smooth_linewidth: Line width of the smoothed curves.
        size: Figure size in inches.
        transform: Function `T(x,y) -> (x,y)` transforming the curves.
        tight_layout:
        grid:
        log_scale:
        legend:
        x:
        xlim:
        ylim:
        titles:
        labels:
        xlabel:
        ylabel:
        colors: Line colors as `str`, `int` or `Tensor`. Integers are interpreted as indices of the default color list.

    Returns:
        MatPlotLib [figure](https://matplotlib.org/stable/api/figure_api.html#matplotlib.figure.Figure)
    """
    scene = Scene.at(scene)
    additional_reduce = ()
    if names is None:
        first_path = next(iter(math.flatten(scene.paths)))
        names = [_str(n) for n in os.listdir(first_path)]
        names = [n[4:-4] for n in names if n.endswith('.txt') and n.startswith('log_')]
        names = math.wrap(names, batch('names'))
        additional_reduce = ['names']
    elif isinstance(names, str):
        names = math.wrap(names)
    elif isinstance(names, (tuple, list)):
        names = math.wrap(names, batch('names'))
    else:
        assert isinstance(names, math.Tensor), f"Invalid argument 'names': {type(names)}"
    if not isinstance(colors, math.Tensor):
        colors = math.wrap(colors)
    if xlabel is None:
        xlabel = 'Iterations' if x == 'steps' else 'Time (s)'

    shape = (scene.shape & names.shape)
    batches = shape.without(reduce).without(additional_reduce)

    cycle = list(plt.rcParams['axes.prop_cycle'].by_key()['color'])
    fig, axes = plt.subplots(batches.only(down).volume, batches.without(down).volume, figsize=size)
    axes = axes if isinstance(axes, numpy.ndarray) else [axes]

    for b, axis in zip(batches.meshgrid(), axes):
        assert isinstance(axis, plt.Axes)
        names_equal = names[b].rank == 0
        paths_equal = scene.paths[b].rank == 0
        if titles is not None and titles is not False:
            if isinstance(titles, str):
                axis.set_title(titles)
            elif names_equal:
                axis.set_title(display_name(str(names[b])))
            elif paths_equal:
                axis.set_title(os.path.basename(scene.paths[b].native()))
        if labels is not None:
            curve_labels = labels
        elif names_equal:
            curve_labels = math.map(os.path.basename, scene.paths[b])
        elif paths_equal:
            curve_labels = names[b]
        else:
            curve_labels = math.map(lambda p, n: f"{os.path.basename(p)} - {n}", scene.paths[b], names[b])

        def single_plot(name, path, label, i, color, smooth):
            logging.debug(f"Reading {os.path.join(path, f'log_{name}.txt')}")
            curve = numpy.loadtxt(os.path.join(path, f"log_{name}.txt"))
            if curve.ndim == 2:
                x_values, values, *_ = curve.T
            else:
                values = curve
                x_values = np.arange(len(values))
            if x == 'steps':
                pass
            else:
                assert x == 'time', f"x must be 'steps' or 'time' but got {x}"
                logging.debug(f"Reading {os.path.join(path, 'log_step_time.txt')}")
                _, x_values, *_ = numpy.loadtxt(os.path.join(path, "log_step_time.txt")).T
                values = values[:len(x_values)]
                x_values = np.cumsum(x_values[:len(values)])
            if transform:
                x_values, values = transform(np.stack([x_values, values]))
            if color == 'default':
                color = cycle[i]
            try:
                color = int(color)
            except ValueError:
                pass
            if isinstance(color, Number):
                color = cycle[int(color)]
            logging.debug(f"Plotting curve {label}")
            axis.plot(x_values, values, color=color, alpha=smooth_alpha, linewidth=1)
            curve = np.stack([x_values, values], -1)
            axis.plot(*smooth_uniform_curve(curve, smooth), color=color, linewidth=smooth_linewidth, label=label)
            if grid:
                if isinstance(grid, dict):
                    axis.grid(**grid)
                else:
                    grid_axis = 'both' if 'x' in grid and 'y' in grid else grid
                    axis.grid(which='both', axis=grid_axis, linestyle='--', linewidth=size[1] * 0.3)
            if 'x' in log_scale:
                axis.set_xscale('log')
            if 'y' in log_scale:
                axis.set_yscale('log')
            if xlim:
                axis.set_xlim(xlim)
            if ylim:
                axis.set_ylim(ylim)
            if xlabel:
                axis.set_xlabel(xlabel)
            if ylabel:
                axis.set_ylabel(ylabel)
            return name

        math.map(single_plot, names[b], scene.paths[b], curve_labels, math.range_tensor(shape.after_gather(b)), colors, smooth)
        if legend:
            axis.legend(loc=legend)
    # Final touches
    if tight_layout:
        plt.tight_layout()
    return fig
Exemplo n.º 10
0
 def remove(self):
     """ Deletes the scene directory and all contained files. """
     for p in math.flatten(self._paths):
         p = abspath(p)
         if isdir(p):
             shutil.rmtree(p)
Exemplo n.º 11
0
 def copy_src_text(self, filename, text):
     for path in math.flatten(self._paths):
         target = join(path, 'src', filename)
         with open(target, "w") as file:
             file.writelines(text)
Exemplo n.º 12
0
 def mkdir(self):
     for path in math.flatten(self._paths):
         isdir(path) or os.mkdir(path)
Exemplo n.º 13
0
 def copy_src(self, script_path, only_external=True):
     for path in math.flatten(self._paths):
         if not only_external or not _is_phi_file(script_path):
             shutil.copy(script_path, join(path, 'src', basename(script_path)))
Exemplo n.º 14
0
def sparse_values(dimensions,
                  extended_active_mask,
                  extended_fluid_mask,
                  sorting=None):
    """
    Builds a sparse matrix such that when applied to a flattened pressure channel, it calculates the laplace
    of that channel, taking into account obstacles and empty cells.

    :param dimensions: valid simulation dimensions. Pressure channel should be of shape (batch size, dimensions..., 1)
    :param extended_active_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :param extended_fluid_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :return: SciPy sparse matrix that acts as a laplace on a flattened pressure channel given obstacles and empty cells
    """
    N = int(np.prod(dimensions))
    d = len(dimensions)
    dims = range(d)

    values_list = []
    center_values = None  # diagonal matrix entries

    gridpoints_linear = np.arange(N)
    gridpoints = np.stack(np.unravel_index(
        gridpoints_linear,
        dimensions))  # d * (N^2) array mapping from linear to spatial frames

    for dim in dims:
        upper_indices = tuple(
            [slice(None)] +
            [slice(2, None) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])
        center_indices = tuple(
            [slice(None)] +
            [slice(1, -1) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])
        lower_indices = tuple(
            [slice(None)] +
            [slice(0, -2) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])

        self_active = extended_active_mask[center_indices]
        stencil_upper = extended_active_mask[upper_indices] * self_active
        stencil_lower = extended_active_mask[lower_indices] * self_active
        stencil_center = -extended_fluid_mask[
            upper_indices] - extended_fluid_mask[lower_indices]

        if center_values is None:
            center_values = math.flatten(stencil_center)
        else:
            center_values = center_values + math.flatten(stencil_center)

        dim_direction = np.zeros_like(gridpoints)
        dim_direction[dim] = 1
        # Upper frames
        upper_indices = gridpoints + dim_direction
        upper_in_range_inx = np.nonzero(
            upper_indices[dim] < dimensions[dim])[0]
        values_list.append(
            math.gather(math.flatten(stencil_upper), upper_in_range_inx))
        # Lower frames
        lower_indices = gridpoints - dim_direction
        lower_in_range_inx = np.nonzero(lower_indices[dim] >= 0)[0]
        values_list.append(
            math.gather(math.flatten(stencil_lower), lower_in_range_inx))

    center_values = math.minimum(center_values, -1.)
    values_list.insert(0, center_values)

    values = math.concat(values_list, axis=0)
    if sorting is not None:
        values = math.gather(values, sorting)
    return values
Exemplo n.º 15
0
def sparse_pressure_matrix(dimensions, extended_active_mask,
                           extended_fluid_mask):
    """
    Builds a sparse matrix such that when applied to a flattened pressure channel, it calculates the laplace
    of that channel, taking into account obstacles and empty cells.

    :param dimensions: valid simulation dimensions. Pressure channel should be of shape (batch size, dimensions..., 1)
    :param extended_active_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :param extended_fluid_mask: Binary tensor with 2 more entries in every dimension than 'dimensions'.
    :return: SciPy sparse matrix that acts as a laplace on a flattened pressure channel given obstacles and empty cells
    """
    N = int(np.prod(dimensions))
    d = len(dimensions)
    A = scipy.sparse.lil_matrix((N, N), dtype=np.float32)
    dims = range(d)

    center_values = None  # diagonal matrix entries

    gridpoints_linear = np.arange(N)
    gridpoints = np.stack(np.unravel_index(
        gridpoints_linear,
        dimensions))  # d * (N^2) array mapping from linear to spatial frames

    for dim in dims:
        upper_indices = tuple(
            [slice(None)] +
            [slice(2, None) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])
        center_indices = tuple(
            [slice(None)] +
            [slice(1, -1) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])
        lower_indices = tuple(
            [slice(None)] +
            [slice(0, -2) if i == dim else slice(1, -1)
             for i in dims] + [slice(None)])

        self_active = extended_active_mask[center_indices]
        stencil_upper = extended_active_mask[upper_indices] * self_active
        stencil_lower = extended_active_mask[lower_indices] * self_active
        stencil_center = -extended_fluid_mask[
            upper_indices] - extended_fluid_mask[lower_indices]

        if center_values is None:
            center_values = math.flatten(stencil_center)
        else:
            center_values = center_values + math.flatten(stencil_center)

        # Find entries in matrix
        dim_direction = np.zeros_like(gridpoints)
        dim_direction[dim] = 1
        # Upper frames
        upper_indices = gridpoints + dim_direction
        upper_in_range_inx = np.nonzero(upper_indices[dim] < dimensions[dim])
        upper_indices_linear = np.ravel_multi_index(
            upper_indices[:, upper_in_range_inx], dimensions)
        A[gridpoints_linear[upper_in_range_inx],
          upper_indices_linear] = stencil_upper.flatten()[upper_in_range_inx]
        # Lower frames
        lower_indices = gridpoints - dim_direction
        lower_in_range_inx = np.nonzero(lower_indices[dim] >= 0)
        lower_indices_linear = np.ravel_multi_index(
            lower_indices[:, lower_in_range_inx], dimensions)
        A[gridpoints_linear[lower_in_range_inx],
          lower_indices_linear] = stencil_lower.flatten()[lower_in_range_inx]

    A[gridpoints_linear, gridpoints_linear] = math.minimum(center_values, -1)

    return scipy.sparse.csc_matrix(A)
Exemplo n.º 16
0
 def grad(_x, _y, df):
     return math.flatten(math.expand(df * 0, batch(tmp=2))),
Exemplo n.º 17
0
 def _write_properties(self):
     for path in math.flatten(self.paths):
         with open(join(path, "description.json"), "w") as out:
             json.dump(self._properties, out, indent=2)