Exemple #1
0
def blur(field, radius, cutoff=None, kernel="1/1+x"):
    """
Warning: This function can cause NaN in the gradients, reason unknown.

Runs a blur kernel over the given tensor.
    :param field: tensor
    :param radius: weight function curve scale
    :param cutoff: kernel size
    :param kernel: Type of blur kernel (str). Must be in ('1/1+x', 'gauss')
    :return:
    """
    if cutoff is None:
        cutoff = min(int(round(radius * 3)), *field.shape[1:-1])

    xyz = np.meshgrid(
        *[range(-int(cutoff), (cutoff) + 1) for _ in field.shape[1:-1]])
    d = math.to_float(np.sqrt(np.sum([x**2 for x in xyz], axis=0)))
    if kernel == "1/1+x":
        weights = math.to_float(1) / (d / radius + 1)
    elif kernel.lower() == "gauss":
        weights = math.exp(-d / radius / 2)
    else:
        raise ValueError("Unknown kernel: %s" % kernel)
    weights /= math.sum(weights)
    weights = math.reshape(weights, list(weights.shape) + [1, 1])
    return math.conv(field, weights)
Exemple #2
0
def broyden(function, x0, inv_J0, accuracy=1e-5, max_iterations=1000, back_prop=False):
    """
    Broyden's method for finding a root of the given function.
    Given a function `f: R^n -> R^n`, it finds an x such that `f(x)=0` within the specified `accuracy`.

    Boryden's method does not require explicit computations of the Jacobian except for the initial inverse `inv_J0`.

    :param function: Differentiable black-box function mapping from tensors like x0 to tensors like y
    :param x0: initial guess for x
    :param inv_J0: Approximation of the inverse Jacobian matrix of f at x0. The closer this is to the true matrix, the fewer iterations will be required.
    :param max_iterations: (optional) maximum number of CG iterations to perform
    :param back_prop: Whether to enable auto-differentiation. This induces a memory cost scaling with the number of iterations. Otherwise, the memory cost is constant.
    :param accuracy: (optional) the algorithm terminates once |f(x)| ≤ accuracy for every entry. If None, the algorithm runs until `max_iterations` is reached.
    :return: list of SolveResults with [0] being the forward solve result, [1] backward solve result (will be added once backward pass is computed)
    :rtype: SolveResult
    """
    x0 = math.to_float(x0)
    y0 = function(x0)
    inv_J0 = math.to_float(inv_J0)

    def broyden_loop(x, y, inv_J, iterations):
        # --- Adjust our guess for x ---
        dx = - math.einsum('bij,bj->bi', inv_J, y)  # - J^-1 * y
        next_x = x + dx
        next_y = function(next_x)
        df = next_y - y
        dy_back_projected = math.einsum('bij,bj->bi', inv_J, df)
        # --- Approximate next inverted Jacobian ---
        numerator = math.einsum('bi,bj,bjk->bik', dx - dy_back_projected, dx, inv_J)  # (dx - J^-1 * df) * dx^T * J^-1
        denominator = math.einsum('bi,bi->b', dx, dy_back_projected)  # dx^T * J^-1 * df
        next_inv_J = inv_J + numerator / denominator
        return [next_x, next_y, next_inv_J, iterations + 1]

    x_, y_, _, iterations = math.while_loop(_max_residual_condition(1, accuracy), broyden_loop, [x0, y0, inv_J0, 0], back_prop=back_prop, name='Broyden', maximum_iterations=max_iterations)
    return SolveResult(iterations, x_, y_)
Exemple #3
0
def conjugate_gradient(function, y, x0, accuracy=1e-5, max_iterations=1000, back_prop=False):
    """
    Solve the linear system of equations `A·x=y`  using the conjugate gradient (CG) algorithm.
    A, x and y can have arbitrary matching shapes, i.e. this method can be used to solve vector and matrix equations.

    Since representing the matrix A in memory might not be feasible, a linear function of x can be specified that computes `function(x) = Ax`.

    The implementation is based on https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf

    :param y: Desired output of `f(x)`
    :param function: linear function of x that returns A·x
    :param x0: initial guess for the value of x
    :param accuracy: (optional) the algorithm terminates once |f(x)-y| ≤ accuracy for every entry. If None, the algorithm runs until `max_iterations` is reached.
    :param max_iterations: (optional) maximum number of CG iterations to perform
    :param back_prop: Whether to enable auto-differentiation. This induces a memory cost scaling with the number of iterations. Otherwise, the memory cost is constant.
    :return: Pair containing the result for x and the number of iterations performed
    """
    y = math.to_float(y)
    x0 = math.to_float(x0)
    dx0 = residual0 = y - function(x0)
    dy0 = function(dx0)
    non_batch_dims = tuple(range(1, len(y.shape)))

    def cg_loop(x, dx, dy, residual, iterations):
        dx_dy = math.sum(dx * dy, axis=non_batch_dims, keepdims=True)
        step_size = math.divide_no_nan(math.sum(dx * residual, axis=non_batch_dims, keepdims=True), dx_dy)
        x += step_size * dx
        residual -= step_size * dy
        dx = residual - math.divide_no_nan(math.sum(residual * dy, axis=non_batch_dims, keepdims=True) * dx, dx_dy)
        dy = function(dx)
        return [x, dx, dy, residual, iterations + 1]

    x_, _, _, residual_, iterations_ = math.while_loop(_max_residual_condition(3, accuracy), cg_loop, [x0, dx0, dy0, residual0, 0], back_prop=back_prop, name="ConjGrad", maximum_iterations=max_iterations)
    return SolveResult(iterations_, x_, residual_)
Exemple #4
0
def fftfreq(resolution, mode='vector', dtype=None):
    """
    Returns the discrete Fourier transform sample frequencies.
    These are the frequencies corresponding to the components of the result of `math.fft` on a tensor of shape `resolution`.

    :param resolution: grid resolution measured in cells
    :param mode: one of (None, 'vector', 'absolute', 'square')
    :param dtype: data type of the returned tensor
    :return: tensor holding the frequencies of the corresponding values computed by math.fft
    """
    assert mode in ('vector', 'absolute', 'square')
    k = np.meshgrid(*[np.fft.fftfreq(int(n)) for n in resolution],
                    indexing='ij')
    k = math.expand_dims(math.stack(k, -1), 0)
    if dtype is not None:
        k = k.astype(dtype)
    else:
        k = math.to_float(k)
    if mode == 'vector':
        return k
    k = math.sum(k**2, axis=-1, keepdims=True)
    if mode == 'square':
        return k
    else:
        return math.sqrt(k)
Exemple #5
0
def _conv_laplace_3d(tensor):
    """
    3D/Cube laplace stencil in 3D+2D [3,3,3,1,1]
    array([[[[[ 0.]], [[ 0.]], [[ 0.]]],
            [[[ 0.]], [[ 1.]], [[ 0.]]],
            [[[ 0.]], [[ 0.]], [[ 0.]]]],
           [[[[ 0.]], [[ 1.]], [[ 0.]]],
            [[[ 1.]], [[-6.]], [[ 1.]]],
            [[[ 0.]], [[ 1.]], [[ 0.]]]],
           [[[[ 0.]], [[ 0.]], [[ 0.]]],
            [[[ 0.]], [[ 1.]], [[ 0.]]],
            [[[ 0.]], [[ 0.]], [[ 0.]]]]]
    returns ...

    padding explicitly done in laplace(), hence here not needed
    """
    kernel = math.to_float([[[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]],
                            [[0., 1., 0.], [1., -6., 1.], [0., 1., 0.]],
                            [[0., 0., 0.], [0., 1., 0.], [0., 0., 0.]]])
    kernel = kernel.reshape((3, 3, 3, 1, 1))
    if tensor.shape[-1] == 1:
        return math.conv(tensor, kernel, padding='VALID')
    else:
        return math.concat([
            math.conv(tensor[..., i:i + 1], kernel, padding='VALID')
            for i in range(tensor.shape[-1])
        ], -1)
Exemple #6
0
def _conv_laplace_2d(tensor):
    kernel = math.to_float([[0., 1., 0.], [1., -4., 1.], [0., 1., 0.]])
    kernel = kernel.reshape((3, 3, 1, 1))
    if tensor.shape[-1] == 1:
        return math.conv(tensor, kernel, padding='VALID')
    else:
        return math.concat([
            math.conv(tensor[..., i:i + 1], kernel, padding='VALID')
            for i in range(tensor.shape[-1])
        ], -1)
Exemple #7
0
def l_n_loss(tensor, n, batch_norm=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l_n_loss(tensor, n, batch_norm) for tensor in all_tensors)
    total_loss = math.sum(tensor**n) / n
    if batch_norm:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Exemple #8
0
 def genarray(shape):
     fft = randn(shape, dtype) + 1j * randn(shape, dtype)
     k = fftfreq(shape[1:-1], mode='absolute')
     shape_fac = math.sqrt(math.mean(shape[1:-1]))  # 16: 4, 64: 8, 256: 24,
     fft *= (1 / (k + 1))**power * power * shape_fac
     array = math.real(math.ifft(fft))
     if dtype is not None:
         return array.astype(dtype)
     else:
         return math.to_float(array)
Exemple #9
0
def l1_loss(tensor, batch_norm=True, reduce_batches=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l1_loss(tensor, batch_norm, reduce_batches) for tensor in all_tensors)
    if reduce_batches:
        total_loss = math.sum(math.abs(tensor))
    else:
        total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape))))
    if batch_norm and reduce_batches:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Exemple #10
0
def indices_tensor(tensor, dtype=None):
    """
    Returns an index tensor of the same spatial shape as the given tensor.
    Each index denotes the location within the tensor starting from zero.
    Indices are encoded as vectors in the index tensor.

    :param tensor: a tensor of shape (batch size, spatial dimensions..., component size)
    :param dtype: NumPy data type or `None` for default
    :return: an index tensor of shape (1, spatial dimensions..., spatial rank)
    """
    spatial_dimensions = list(tensor.shape[1:-1])
    idx_zyx = np.meshgrid(*[range(dim) for dim in spatial_dimensions],
                          indexing='ij')
    idx = np.stack(idx_zyx, axis=-1).reshape([
        1,
    ] + spatial_dimensions + [len(spatial_dimensions)])
    if dtype is not None:
        return idx.astype(dtype)
    else:
        return math.to_float(idx)
Exemple #11
0
def randn(shape, dtype=None):
    array = np.random.randn(*_none_to_one(shape))
    if dtype is not None:
        return array.astype(dtype)
    else:
        return math.to_float(array)
Exemple #12
0
def ones(shape, dtype=None):
    if dtype is not None:
        return np.ones(_none_to_one(shape), dtype)
    else:
        return math.to_float(np.ones(_none_to_one(shape), np.int8))