def mlogit_warp_grad(alpha, beta, time, q, y, max_itr=8000, tol=1e-10, delta=0.008, display=0): """ calculates optimal warping for functional multinomial logistic regression :param alpha: scalar :param beta: numpy ndarray of shape (M,N) of N functions with M samples :param time: vector of size M describing the sample points :param q: numpy ndarray of shape (M,N) of N functions with M samples :param y: numpy ndarray of shape (1,N) responses :param max_itr: maximum number of iterations (Default=8000) :param tol: stopping tolerance (Default=1e-10) :param delta: gradient step size (Default=0.008) :param display: display iterations (Default=0) :rtype: tuple of numpy array :return gam_old: warping function """ gam_old = mw.mlogit_warp(np.ascontiguousarray(alpha), np.ascontiguousarray(beta), time, np.ascontiguousarray(q), np.ascontiguousarray(y, dtype=np.int32), max_itr, tol, delta, display) return gam_old
import numpy as np import fdasrsf as fs import mlogit_warp as mw import h5py fun = h5py.File( '/Users/jdtucker/Documents/Research/fdasrsf/debug/debug_data.h5') q = fun['q'][:] y = fun['y'][:] time = fun['time'][:] alpha = fun['alpha'][:] beta = fun['beta'][:] max_itr = 10000 # 4000 tol = 1e-10 delta = .01 display = 1 gam_old = mw.mlogit_warp(np.ascontiguousarray(alpha), np.ascontiguousarray(beta), time, np.ascontiguousarray(q), np.ascontiguousarray(y, dtype=np.int32), max_itr, tol, delta, display)
import numpy as np import fdasrsf as fs import mlogit_warp as mw import h5py fun = h5py.File('/Users/jdtucker/Documents/Research/fdasrsf/debug/debug_data.h5') q = fun['q'][:] y = fun['y'][:] time = fun['time'][:] alpha = fun['alpha'][:] beta = fun['beta'][:] max_itr = 10000 # 4000 tol = 1e-10 delta = .01 display = 1 gam_old = mw.mlogit_warp(np.ascontiguousarray(alpha), np.ascontiguousarray(beta), time, np.ascontiguousarray(q), np.ascontiguousarray(y, dtype=np.int32), max_itr, tol, delta, display)