Exemple #1
0
    def __init__(self, N_layer, p, p_opt, Ln, afunc=func.relu):
        """
        Parameters
        ----------
        N_layer : int
            Number of iterations `L` in RNN.
        p : :py:class:`~deepwave.nn.crnn.Parameter`
            Serializer to encode/decode parameters.
        p_opt : :py:class:`~numpy.ndarray`
            (N_cell_1,) vectorized parameter encoding, output of
            :py:meth:`~deepwave.nn.crnn.Parameter.encode`.
        Ln : :py:class:`~scipy.sparse.csr_matrix`
            (N_px, N_px) normalized graph Laplacian.
        afunc : function
            activation function
        """
        if N_layer < 1:
            raise ValueError('Parameter[N_layer] must be positive.')
        self._N_layer = N_layer

        if not isinstance(p, Parameter):
            raise ValueError(
                'Parameter[p]: expected deepwave.nn.crnn.Parameter')

        self._afunc = afunc

        mu, D, tau = p.decode(p_opt)
        self._mu = mu.copy()
        self._tau = tau.copy()
        self._h = graph.ConvolutionalFilter(Ln, p._K)
        self._D_conj_T = np.ascontiguousarray(D.conj().T)
Exemple #2
0
def APGD_Parameter(XYZ, R, wl, lambda_, gamma, L, eps):
    r"""
    Theoretical values of mu, D, tau in APGD, used as initializer point for SGD.

    Parameters
    ----------
    XYZ : :py:class:`~numpy.ndarray`
        (3, N_antenna) Cartesian array geometry.
    R : :py:class:`~numpy.ndarray`
        (3, N_px) Cartesian grid points.
    wl : float
        Wavelength \ge 0 [m]
    lambda_ : float
        Regularization parameter.
    gamma : float
        Linear trade-off between lasso and ridge regularizers.
    L : float
        Lipschitz constant from Remark 3.3
    eps : float
        PSF truncation coefficient for
        :py:method:`~deepwave.tools.math.graph.ConvolutionalFilter.estimate_order`

    Returns
    -------
    p : :py:class:`~numpy.ndarray`
        (N_cell,) vectorized parameter value, output of
        :py:meth:`~deepwave.nn.crnn.Parameter.encode`.
    K : int
        Order of polynomial filter.
    """
    def e(i: int, N: int):
        v = np.zeros((N, ))
        v[i] = 1
        return v

    A = phased_array.steering_operator(XYZ, R, wl)
    N_antenna, N_px = A.shape
    alpha = 1 / L
    beta = 2 * lambda_ * alpha * (1 - gamma) + 1

    Ln, rho = graph.laplacian_exp(R, normalized=True)
    K = graph.ConvolutionalFilter.estimate_order(XYZ, rho, wl, eps)
    K *= 2  # Why?: just to be on the safe side.
    h = graph.ConvolutionalFilter(Ln, K)

    # Solve LSQ problem \sum_{k = 0}^{K} \mu_{k} T_{k}(\tilde{L}) =
    #                   \frac{I_{N} - 2 \alpha \abs{A^{H} A}^{2}}{beta}
    R_focus = np.mean(R, axis=1)
    R_focus /= linalg.norm(R_focus)
    idx = np.argmax(R_focus @ R)
    psf_mag2 = np.abs(A.conj().T @ A[:, idx])**2
    c = (e(idx, N_px) - 2 * alpha * psf_mag2) / beta

    mu = h.fit(e(idx, N_px), c)
    D = A * np.sqrt(2 * alpha / beta)
    tau = np.ones((N_px, )) * (lambda_ * alpha * gamma / beta)

    parameter = Parameter(N_antenna, N_px, K)
    p = parameter.encode(None, mu, D, tau)
    return p, K
Exemple #3
0
    def draw_rnn_psf(D, P, ax):
        N_antenna, N_px, K = D.XYZ.shape[1], D.R.shape[1], int(P['K'])
        parameter = crnn.Parameter(N_antenna, N_px, K)

        R_focus = np.mean(D.R, axis=1)
        R_focus /= linalg.norm(R_focus)
        idx_focus = np.argmax(R_focus @ D.R)

        p_vec = P['p_opt'][np.argmin(P['v_loss'])]
        p = dict(zip(['mu', 'D', 'tau'], parameter.decode(p_vec)))

        Ln, _ = graph.laplacian_exp(D.R, normalized=True)
        fltr = graph.ConvolutionalFilter(Ln, K)
        filter = fltr.filter(p['mu'], e(idx_focus, N_px))
        psf = np.abs(filter)
        psf[idx_focus] = 0

        if info['interpolation_order'] is not None:
            N = info['interpolation_order']
            approximate_kernel = True if (N > 15) else False
            interp = interpolate.Interpolator(N, approximate_kernel)
            N_s = N_px = D.R.shape[1]
            psf = interp.__call__(weight=np.ones((N_s, )),
                                  support=D.R,
                                  f=psf.reshape((1, N_px)),
                                  r=D.R)
            psf = np.clip(psf, a_min=0, a_max=None)

        psf_plot = s2image.Image(data=psf, grid=D.R)
        psf_plot.draw(projection=info['projection'],
                      use_contours=False,
                      catalog_kwargs=dict(edgecolor='g', ),
                      ax=ax)
        ax.set_title(r'$\Psi_{RNN}(r, r_{0})$')
Exemple #4
0
    def __init__(self,
                 N_layer,
                 p,
                 s,
                 Ln,
                 loss='relative-l2',
                 afunc=(func.relu, func.d_relu),
                 trainable_parameter=(('mu', True), ('D', True), ('tau',
                                                                  True))):
        r"""
        Parameters
        ----------
        N_layer : int
            Number of iterations `L` in RNN.
        p : :py:class:`~deepwave.nn.crnn.Parameter`
            Serializer to encode/decode parameters.
        s : :py:class:`~deepwave.nn.Sampler`
            Serializer to encode/decode samples.
        Ln : :py:class:`~scipy.sparse.csr_matrix`
            (N_px, N_px) normalized graph Laplacian.
        loss : str
            If 'relative-l2', use the relative squared error
                \eps = (1/2) * \norm{\hat{x} - x^{L}}{2}^{2} / \norm{\hat{x}}{2}^{2}
            If 'shifted-kl', use the generalized shifted Kullback-Leibler divergence
                \eps = (1 + \hat{x})^{T} \log\bigParen{\frac{1 + \hat{x}}{1 + x^{L}}}
                     - 1^{T}\bigParen{\hat{x} - x^{L}}
        afunc : tuple(function)
            (activation function, activation function derivative)
        trainable_parameter : tuple(tuple(str, bool))
            Tuple of (str, bool) pairs that state whether the corresponding parameters should have a gradient or not.
        """
        super().__init__()

        if N_layer < 1:
            raise ValueError('Parameter[N_layer] must be positive.')
        self._N_layer = N_layer

        if not isinstance(p, Parameter):
            raise ValueError(
                'Parameter[p]: expected deepwave.nn.crnn.Parameter')
        self._p = p

        if not isinstance(s, nn.Sampler):
            raise ValueError('Parameter[s]: expected deepwave.nn.Sampler')
        self._s = s

        N_px = self._s._N_px
        if not (isinstance(Ln, sp.csr_matrix) and (Ln.shape == (N_px, N_px))):
            raise ValueError('Parameter[Ln] must be (N_px, N_px) CSR.')
        self._h = graph.ConvolutionalFilter(Ln, self._p._K)

        if loss == 'relative-l2':
            self._use_l2 = True
        elif loss == 'shifted-kl':
            self._use_l2 = False
        else:
            raise ValueError(
                'Parameter[loss] must be one of {"relative-l2", "shifted-kl"}.'
            )

        if not (isinstance(afunc, tuple) and (len(afunc) == 2)):
            raise ValueError('Parameter[afunc]: expected (function, function)')
        self._afunc = afunc[0]
        self._afunc_d = afunc[1]

        param_msg = ('Parameter[trainable_parameter] must take form '
                     "(('mu', T/F), ('D', T/F), ('tau', T/F)).")
        if not (isinstance(trainable_parameter, tuple) and
                (len(trainable_parameter) == 3)
                and all([len(p) == 2 for p in trainable_parameter])):
            raise ValueError(param_msg)
        self._param = dict(trainable_parameter)
        if not ((set(self._param.keys()) == {'mu', 'D', 'tau'}) and
                (set(self._param.values()) <= {True, False})):
            raise ValueError(param_msg)

        # Buffer of intermediate values for grad().
        # Will always have shape (N_sample, N_layer, N_px)
        self._tape_buffer = None

        # Buffer (p, x) variables used in eval(). If the same ones are used in
        # grad() afterwards, we can skip re-computation of eval() during
        # grad().
        self._tape_p = None
        self._tape_x = None