Ejemplo n.º 1
0
 def cg_loop(x, dx, dy, residual, iterations):
     dx_dy = math.sum(dx * dy, axis=non_batch_dims, keepdims=True)
     step_size = math.divide_no_nan(math.sum(dx * residual, axis=non_batch_dims, keepdims=True), dx_dy)
     x += step_size * dx
     residual -= step_size * dy
     dx = residual - math.divide_no_nan(math.sum(residual * dy, axis=non_batch_dims, keepdims=True) * dx, dx_dy)
     dy = function(dx)
     return [x, dx, dy, residual, iterations + 1]
Ejemplo n.º 2
0
def l1_loss(tensor, batch_norm=True, reduce_batches=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l1_loss(tensor, batch_norm, reduce_batches) for tensor in all_tensors)
    if reduce_batches:
        total_loss = math.sum(math.abs(tensor))
    else:
        total_loss = math.sum(math.abs(tensor), axis=list(range(1, len(tensor.shape))))
    if batch_norm and reduce_batches:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Ejemplo n.º 3
0
def normalize_to(target, source=1, epsilon=1e-5, batch_dims=1):
    """
    Multiplies the target so that its total content matches the source.

    :param target: a tensor
    :param source: a tensor or number
    :param epsilon: small number to prevent division by zero or None.
    :return: normalized tensor of the same shape as target
    """
    target_total = math.sum(target, axis=tuple(range(batch_dims, math.ndims(target))), keepdims=True)
    denominator = math.maximum(target_total, epsilon) if epsilon is not None else target_total
    source_total = math.sum(source, axis=tuple(range(batch_dims, math.ndims(source))), keepdims=True)
    return target * (source_total / denominator)
Ejemplo n.º 4
0
def blur(field, radius, cutoff=None, kernel="1/1+x"):
    """
Warning: This function can cause NaN in the gradients, reason unknown.

Runs a blur kernel over the given tensor.
    :param field: tensor
    :param radius: weight function curve scale
    :param cutoff: kernel size
    :param kernel: Type of blur kernel (str). Must be in ('1/1+x', 'gauss')
    :return:
    """
    if cutoff is None:
        cutoff = min(int(round(radius * 3)), *field.shape[1:-1])

    xyz = np.meshgrid(
        *[range(-int(cutoff), (cutoff) + 1) for _ in field.shape[1:-1]])
    d = math.to_float(np.sqrt(np.sum([x**2 for x in xyz], axis=0)))
    if kernel == "1/1+x":
        weights = math.to_float(1) / (d / radius + 1)
    elif kernel.lower() == "gauss":
        weights = math.exp(-d / radius / 2)
    else:
        raise ValueError("Unknown kernel: %s" % kernel)
    weights /= math.sum(weights)
    weights = math.reshape(weights, list(weights.shape) + [1, 1])
    return math.conv(field, weights)
Ejemplo n.º 5
0
def fftfreq(resolution, mode='vector', dtype=None):
    """
    Returns the discrete Fourier transform sample frequencies.
    These are the frequencies corresponding to the components of the result of `math.fft` on a tensor of shape `resolution`.

    :param resolution: grid resolution measured in cells
    :param mode: one of (None, 'vector', 'absolute', 'square')
    :param dtype: data type of the returned tensor
    :return: tensor holding the frequencies of the corresponding values computed by math.fft
    """
    assert mode in ('vector', 'absolute', 'square')
    k = np.meshgrid(*[np.fft.fftfreq(int(n)) for n in resolution],
                    indexing='ij')
    k = math.expand_dims(math.stack(k, -1), 0)
    if dtype is not None:
        k = k.astype(dtype)
    else:
        k = math.to_float(k)
    if mode == 'vector':
        return k
    k = math.sum(k**2, axis=-1, keepdims=True)
    if mode == 'square':
        return k
    else:
        return math.sqrt(k)
Ejemplo n.º 6
0
def _divergence_nd(tensor, relative_shifts):
    rank = spatial_rank(tensor)
    tensor = math.pad(tensor, _get_pad_width(rank, (-relative_shifts[0], relative_shifts[1])))
    components = []
    for dimension in range(rank):
        lower, upper = _dim_shifted(tensor, dimension, relative_shifts, diminish_others=(-relative_shifts[0], relative_shifts[1]), components=rank - dimension - 1)
        components.append(upper - lower)
    return math.sum(components, 0)
Ejemplo n.º 7
0
def l_n_loss(tensor, n, batch_norm=True):
    if struct.isstruct(tensor):
        all_tensors = struct.flatten(tensor)
        return sum(l_n_loss(tensor, n, batch_norm) for tensor in all_tensors)
    total_loss = math.sum(tensor**n) / n
    if batch_norm:
        batch_size = math.shape(tensor)[0]
        return math.div(total_loss, math.to_float(batch_size))
    else:
        return total_loss
Ejemplo n.º 8
0
def fftfreq(resolution, mode='vector', dtype=np.float32):
    assert mode in ('vector', 'absolute', 'square')
    k = np.meshgrid(*[np.fft.fftfreq(int(n)) for n in resolution], indexing='ij')
    k = math.expand_dims(math.stack(k, -1), 0)
    k = k.astype(dtype)
    if mode == 'vector':
        return k
    k = math.sum(k**2, axis=-1, keepdims=True)
    if mode == 'square':
        return k
    else:
        return math.sqrt(k)
Ejemplo n.º 9
0
def _sliced_laplace_nd(tensor, axes=None):
    """
    Laplace Stencil for N-Dimensions
    aggregated from (c)enter, (u)pper, and (l)ower parts
    """
    rank = spatial_rank(tensor)
    dims = range(rank)
    components = []
    for ax in dims:
        if _contains_axis(axes, ax, rank):
            lower, center, upper = _dim_shifted(tensor, ax, (-1, 0, 1), diminish_others=(1, 1), diminish_other_condition=lambda other_ax: _contains_axis(axes, other_ax, rank))
            components.append(upper + lower - 2 * center)
    return math.sum(components, 0)
Ejemplo n.º 10
0
 def loop_body(pressure, momentum, A_times_momentum, residual, loop_index):
     """
     iteratively solve for:
     x : pressure
     momentum : momentum
     laplace_momentum : A_times_momentum
     residual : residual
     """
     tmp = math.sum(momentum * A_times_momentum,
                    axis=non_batch_dims,
                    keepdims=True)  # t = sum(mAm)
     a = math.divide_no_nan(
         math.sum(momentum * residual, axis=non_batch_dims, keepdims=True),
         tmp)  # a = sum(mr)/sum(mAm)
     pressure += a * momentum  # p += am
     residual -= a * A_times_momentum  # r -= aAm
     momentum = residual - math.divide_no_nan(
         math.sum(residual * A_times_momentum,
                  axis=non_batch_dims,
                  keepdims=True) * momentum,
         tmp)  # m = r-sum(rAm)*m/t = r-sum(rAm)*m/sum(mAm)
     A_times_momentum = apply_A(momentum)  # Am = A*m
     return [pressure, momentum, A_times_momentum, residual, loop_index + 1]
Ejemplo n.º 11
0
def spatial_sum(tensor):
    summed = math.sum(tensor, axis=math.dimrange(tensor))
    for i in math.dimrange(tensor):
        summed = math.expand_dims(summed, i)
    return summed