Exemple #1
0
def APGD_Parameter(XYZ, R, wl, lambda_, gamma, L, eps):
    r"""
    Theoretical values of mu, D, tau in APGD, used as initializer point for SGD.

    Parameters
    ----------
    XYZ : :py:class:`~numpy.ndarray`
        (3, N_antenna) Cartesian array geometry.
    R : :py:class:`~numpy.ndarray`
        (3, N_px) Cartesian grid points.
    wl : float
        Wavelength \ge 0 [m]
    lambda_ : float
        Regularization parameter.
    gamma : float
        Linear trade-off between lasso and ridge regularizers.
    L : float
        Lipschitz constant from Remark 3.3
    eps : float
        PSF truncation coefficient for
        :py:method:`~deepwave.tools.math.graph.ConvolutionalFilter.estimate_order`

    Returns
    -------
    p : :py:class:`~numpy.ndarray`
        (N_cell,) vectorized parameter value, output of
        :py:meth:`~deepwave.nn.crnn.Parameter.encode`.
    K : int
        Order of polynomial filter.
    """
    def e(i: int, N: int):
        v = np.zeros((N, ))
        v[i] = 1
        return v

    A = phased_array.steering_operator(XYZ, R, wl)
    N_antenna, N_px = A.shape
    alpha = 1 / L
    beta = 2 * lambda_ * alpha * (1 - gamma) + 1

    Ln, rho = graph.laplacian_exp(R, normalized=True)
    K = graph.ConvolutionalFilter.estimate_order(XYZ, rho, wl, eps)
    K *= 2  # Why?: just to be on the safe side.
    h = graph.ConvolutionalFilter(Ln, K)

    # Solve LSQ problem \sum_{k = 0}^{K} \mu_{k} T_{k}(\tilde{L}) =
    #                   \frac{I_{N} - 2 \alpha \abs{A^{H} A}^{2}}{beta}
    R_focus = np.mean(R, axis=1)
    R_focus /= linalg.norm(R_focus)
    idx = np.argmax(R_focus @ R)
    psf_mag2 = np.abs(A.conj().T @ A[:, idx])**2
    c = (e(idx, N_px) - 2 * alpha * psf_mag2) / beta

    mu = h.fit(e(idx, N_px), c)
    D = A * np.sqrt(2 * alpha / beta)
    tau = np.ones((N_px, )) * (lambda_ * alpha * gamma / beta)

    parameter = Parameter(N_antenna, N_px, K)
    p = parameter.encode(None, mu, D, tau)
    return p, K
Exemple #2
0
    def draw_rnn_psf(D, P, ax):
        N_antenna, N_px, K = D.XYZ.shape[1], D.R.shape[1], int(P['K'])
        parameter = crnn.Parameter(N_antenna, N_px, K)

        R_focus = np.mean(D.R, axis=1)
        R_focus /= linalg.norm(R_focus)
        idx_focus = np.argmax(R_focus @ D.R)

        p_vec = P['p_opt'][np.argmin(P['v_loss'])]
        p = dict(zip(['mu', 'D', 'tau'], parameter.decode(p_vec)))

        Ln, _ = graph.laplacian_exp(D.R, normalized=True)
        fltr = graph.ConvolutionalFilter(Ln, K)
        filter = fltr.filter(p['mu'], e(idx_focus, N_px))
        psf = np.abs(filter)
        psf[idx_focus] = 0

        if info['interpolation_order'] is not None:
            N = info['interpolation_order']
            approximate_kernel = True if (N > 15) else False
            interp = interpolate.Interpolator(N, approximate_kernel)
            N_s = N_px = D.R.shape[1]
            psf = interp.__call__(weight=np.ones((N_s, )),
                                  support=D.R,
                                  f=psf.reshape((1, N_px)),
                                  r=D.R)
            psf = np.clip(psf, a_min=0, a_max=None)

        psf_plot = s2image.Image(data=psf, grid=D.R)
        psf_plot.draw(projection=info['projection'],
                      use_contours=False,
                      catalog_kwargs=dict(edgecolor='g', ),
                      ax=ax)
        ax.set_title(r'$\Psi_{RNN}(r, r_{0})$')
Exemple #3
0
def get_field(D, P, idx_img, img_type):
    """
    Parameters
    ----------
    D : list(:py:class:`~deepwave.nn.DataSet`)
        (9,) multi-frequency datasets.
    P : list(:py:class:`~deepwave.nn.crnn.Parameter`)
        (9,) multi-frequency trained parameters.
    idx_img : int
        Image index
    img_type : str
        One of ['APGD', 'RNN', 'DAS']

    Returns
    -------
    I : :py:class:`~numpy.ndarray`
        (9, N_px) frequency intensities of specified image.
    """
    I = []
    for idx_freq in range(9):
        Df, Pf = D[idx_freq], P[idx_freq]

        N_antenna = Df.XYZ.shape[1]
        N_px = Df.R.shape[1]
        K = int(Pf['K'])
        parameter = crnn.Parameter(N_antenna, N_px, K)
        sampler = Df.sampler()

        A = phased_array.steering_operator(Df.XYZ, Df.R, Df.wl)
        if img_type == 'APGD':
            _, I_apgd, _ = sampler.decode(Df[idx_img])
            I.append(I_apgd)
        elif img_type == 'RNN':
            Ln, _ = graph.laplacian_exp(Df.R, normalized=True)
            afunc = lambda _: func.retanh(Pf['tanh_lin_limit'], _)
            p_opt = Pf['p_opt'][np.argmin(Pf['v_loss'])]
            S, _, I_prev = sampler.decode(Df[idx_img])
            N_layer = Pf['N_layer']
            rnn_eval = crnn.Evaluator(N_layer, parameter, p_opt, Ln, afunc)
            I_rnn = rnn_eval(S, I_prev)
            I.append(I_rnn)
        elif img_type == 'DAS':
            S, _, _ = sampler.decode(Df[idx_img])
            alpha = 1 / (2 * pylinalg.eighMax(A))
            beta = 2 * Df.lambda_[idx_img] * alpha * (1 - Df.gamma) + 1

            I_das = spectral.DAS(Df.XYZ, S, Df.wl, Df.R) * 2 * alpha / beta
            I.append(I_das)
        else:
            raise ValueError(f'Parameter[img_type] invalid.')

    I = np.stack(I, axis=0)
    return I
def process(folder_path):
    dataset_path = folder_path / 'D.npz'
    D = nn.DataSet.from_file(str(dataset_path))
    R_laplacian, _ = graph.laplacian_exp(D.R, normalized=True)
    N_antenna = D.XYZ.shape[1]
    N_px = D.R.shape[1]

    param_path = [
        _ for _ in folder_path.iterdir()
        if re.search(r"D_train_[01][01][01].npz", str(_))
    ]
    df = []
    for file in param_path:
        pattern = r"D_train_([01])([01])([01]).npz"
        m = re.search(pattern, str(file))
        fix_mu, fix_D, fix_tau = map(lambda _: bool(int(_)), m.group(1, 2, 3))

        P = np.load(file)
        idx_opt = np.argmin(P['v_loss'])

        parameter = crnn.Parameter(N_antenna, N_px, int(P['K']))
        ridge_loss = crnn.D_RidgeLossFunction(float(P['D_lambda']), parameter)
        laplacian_loss = crnn.LaplacianLossFunction(R_laplacian,
                                                    float(P['tau_lambda']),
                                                    parameter)

        p_opt = P['p_opt'][idx_opt]
        x0 = np.zeros((N_px, ))
        v_loss = (P['v_loss'][idx_opt] - ridge_loss.eval(p_opt, x0) -
                  laplacian_loss.eval(p_opt, x0))

        df.append(
            pd.DataFrame(
                {
                    'fix_mu': fix_mu,
                    'fix_D': fix_D,
                    'fix_tau': fix_tau,
                    'v_loss': v_loss
                },
                index=pd.RangeIndex(1)))
    df = (pd.concat(
        df,
        ignore_index=True,
    ).set_index(['fix_mu', 'fix_D', 'fix_tau']))
    df_all = (df.assign(v_loss_rel=df['v_loss'].values /
                        df.at[(True, True, True), 'v_loss']).sort_values(
                            by='v_loss_rel'))
    return df_all
Exemple #5
0
    def draw_rnn(D, P, ax):
        idx_img = info['show_reconstruction']

        sampler = D.sampler()
        S, _, I_prev = sampler.decode(D[idx_img])
        sky_model = D.ground_truth[idx_img]

        N_antenna, N_px, K, N_layer = D.XYZ.shape[1], D.R.shape[1], int(
            P['K']), int(P['N_layer'])
        parameter = crnn.Parameter(N_antenna, N_px, K)
        p_vec = P['p_opt'][np.argmin(P['v_loss'])]
        p = dict(zip(['mu', 'D', 'tau'], parameter.decode(p_vec)))

        Ln, _ = graph.laplacian_exp(D.R, normalized=True)
        rnn_eval = crnn.Evaluator(
            N_layer, parameter, p_vec, Ln,
            lambda _: func.retanh(P['tanh_lin_limit'], _))
        exec_time = time.time()
        I_rnn = rnn_eval(S, I_prev)
        exec_time = time.time() - exec_time

        if info['interpolation_order'] is not None:
            N = info['interpolation_order']
            approximate_kernel = True if (N > 15) else False
            interp = interpolate.Interpolator(N, approximate_kernel)
            N_s = N_px = D.R.shape[1]
            I_rnn = interp.__call__(weight=np.ones((N_s, )),
                                    support=D.R,
                                    f=I_rnn.reshape((1, N_px)),
                                    r=D.R)
            I_rnn = np.clip(I_rnn, a_min=0, a_max=None)

        rnn_plot = s2image.Image(data=I_rnn, grid=D.R)
        rnn_plot.draw(catalog=sky_model.xyz,
                      projection=info['projection'],
                      use_contours=False,
                      catalog_kwargs=dict(edgecolor='g', ),
                      ax=ax)
        ax.set_title(f'RNN {N_layer:02d} iter, {exec_time:.02f} [s]')
Exemple #6
0
def train_network(args):
    """
    Parameters
    ----------
    args : :py:class:`~argparse.Namespace`

    Returns
    -------
    opt : dict
        p_opt : :py:class:`~numpy.ndarray`
            (N_epoch + 1, N_cell) optimized parameter per epoch.
            `p_opt[0] = p_apgd`
        iter_loss : :py:class:`~numpy.ndarray`
            (N_epoch, N_batch) loss function value per (epoch, batch) on
            training set.
        t_loss : :py:class:`~numpy.ndarray`
            (N_epoch + 1,) loss function value per epoch on training set.
        v_loss : :py:class:`~numpy.ndarray`
            (N_epoch + 1,) loss function value per epoch on validation set.
        t : :py:class:`~numpy.ndarray`
            (N_epoch,) execution time [s] per epoch.
            Includes time to compute training/validation loss.
        idx_t : :py:class:`~numpy.ndarray`
            (N_k1,) sample indices used for training set.
        idx_v : :py:class:`~numpy.ndarray`
            (N_k2,) sample indices used for validation set.
        K : int
            Order of polynomial filter.
        D_lambda : float
        tau_lambda : float
        N_layer : int
        psf_threshold : float
        tanh_lin_limit : float
        lr : float
        mu : float
        batch_size : int
    """
    if args.seed is not None:
        np.random.seed(args.seed)

    D = nn.DataSet.from_file(str(args.dataset))
    A = phased_array.steering_operator(D.XYZ, D.R, D.wl)
    N_antenna, N_px = A.shape
    sampler = nn.Sampler(N_antenna, N_px)

    # Set optimization initial point.
    p_apgd, K = crnn.APGD_Parameter(D.XYZ, D.R, D.wl,
                                    lambda_=np.median(D.lambda_),
                                    gamma=D.gamma,
                                    L=2 * pylinalg.eighMax(A),
                                    eps=args.psf_threshold)
    parameter = crnn.Parameter(N_antenna, N_px, K)
    p0 = p_apgd.copy()
    if args.random_initializer:
        p_mu, p_D, p_tau = parameter.decode(p0)
        if not args.fix_mu:
            mu_step = np.abs(p_mu[~np.isclose(p_mu, 0)]).min()
            p_mu[:] = mu_step * np.random.randn(K + 1)
        if not args.fix_tau:
            tau_step = np.abs(p_tau[~np.isclose(p_tau, 0)]).min()
            p_tau[:] = tau_step * np.random.randn(N_px)
        if not args.fix_D:
            D_step = np.abs(p_D[~np.isclose(p_D, 0)]).min() / 2  # because complex-valued.
            p_D[:] = D_step * (     np.random.randn(N_antenna, N_px) +
                               1j * np.random.randn(N_antenna, N_px))

    R_laplacian, _ = graph.laplacian_exp(D.R, normalized=True)

    afunc = (lambda x: func.retanh(args.tanh_lin_limit, x),
             lambda x: func.d_retanh(args.tanh_lin_limit, x))
    trainable_parameter = (('mu', not args.fix_mu),
                           ('D', not args.fix_D),
                           ('tau', not args.fix_tau))
    sample_loss = crnn.SampleLossFunction(args.N_layer, parameter, sampler, R_laplacian,
                                          args.loss, afunc, trainable_parameter)
    ridge_loss = crnn.D_RidgeLossFunction(args.D_lambda, parameter)
    laplacian_loss = crnn.LaplacianLossFunction(R_laplacian, args.tau_lambda, parameter)
    sgd_solver = optim.StochasticGradientDescent(func=[sample_loss, ridge_loss, laplacian_loss],
                                                 batch_size=args.batch_size,
                                                 N_epoch=args.N_epoch,
                                                 alpha=args.lr,
                                                 mu=args.mu,
                                                 verbosity='HIGH')

    log_fname = (pathlib.Path(args.parameter.parent) /
                 (args.parameter.stem + ".log"))
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s | %(message)s',
                        filename=log_fname,
                        filemode='w')
    # Setup logging to stdout.
    console = logging.StreamHandler(sys.stdout)
    console.setLevel(logging.DEBUG)
    console_formatter = logging.Formatter('%(asctime)s | %(message)s')
    console.setFormatter(console_formatter)
    logging.getLogger(__name__).addHandler(console)
    logging.info(str(args))

    ### Dataset Preprocessing: drop all-0 samples + permutation
    _, I, _ = sampler.decode(D[:])
    sample_mask = ~np.isclose(I.sum(axis=1), 0)
    if args.tv_index is None:  # Random split
        idx_valid = np.flatnonzero(sample_mask)
        idx_sample = np.random.permutation(idx_valid)

        N_sample = len(idx_valid)
        idx_ts = idx_sample[int(N_sample * args.tv_ratio):]
        idx_vs = idx_sample[:int(N_sample * args.tv_ratio)]
    else:  # Deterministic split
        idx_tv = np.load(args.tv_index)
        if not (('idx_train' in idx_tv) and ('idx_test' in idx_tv)):
            raise ValueError('Parameter[tv_index] does not have keys "idx_train" and "idx_test".')
        idx_ts = idx_tv['idx_train']
        if not (argcheck.has_integers(idx_ts) and
                np.all((0 <= idx_ts) & (idx_ts < len(D)))):
            raise ValueError('Specified "idx_ts" values must be integer and in {0, ..., len(D) - 1}.')
        idx_vs = idx_tv['idx_test']
        if not (argcheck.has_integers(idx_vs) and
                np.all((0 <= idx_vs) & (idx_vs < len(D)))):
            raise ValueError('Specified "idx_vs" values must be integer and in {0, ..., len(D) - 1}.')

        idx_invalid = np.flatnonzero(~sample_mask)
        idx_ts = np.setdiff1d(idx_ts, idx_invalid)
        idx_vs = np.setdiff1d(idx_vs, idx_invalid)

    D_ts = nn.DataSet(D[idx_ts], D.XYZ, D.R, D.wl,
                      ground_truth=[D.ground_truth[idx] for idx in idx_ts],
                      lambda_=np.array([np.median(D.lambda_[idx_ts])] * len(idx_ts)),
                      gamma=D.gamma,
                      N_iter=D.N_iter[idx_ts],
                      tts=D.tts[idx_ts])
    D_vs = nn.DataSet(D[idx_vs], D.XYZ, D.R, D.wl,
                      ground_truth=[D.ground_truth[idx] for idx in idx_vs],
                      lambda_=np.array([np.median(D.lambda_[idx_vs])] * len(idx_vs)),
                      gamma=D.gamma,
                      N_iter=D.N_iter[idx_vs],
                      tts=D.tts[idx_vs])
    out = sgd_solver.fit(D_ts, D_vs, p0, file_name=args.parameter)

    # Augment output with extra information.
    out = dict(**out,
               D_lambda=args.D_lambda,
               tau_lambda=args.tau_lambda,
               N_layer=args.N_layer,
               psf_threshold=args.psf_threshold,
               tanh_lin_limit=args.tanh_lin_limit,
               lr=args.lr,
               mu=args.mu,
               batch_size=args.batch_size,
               K=K,
               idx_t=idx_ts,
               idx_v=idx_vs,
               loss=args.loss)
    return out