예제 #1
0
def log_py_zM_bin_j(lambda_bin_j, y_bin_j, zM, k, nj_bin_j): 
    ''' Compute log p(y_j | zM, s1 = k1) of the jth
    
    lambda_bin_j ( (r + 1) 1darray): Coefficients of the binomial distributions in the GLLVM layer
    y_bin_j (numobs 1darray): The subset containing only the binary/count variables in the dataset
    zM (M x r x k ndarray): M Monte Carlo copies of z for each component k1 of the mixture
    k (int): The number of components of the mixture
    nj_bin_j (int): The number of possible values/maximum values of the jth binary/count variable
    --------------------------------------------------------------
    returns (ndarray): p(y_j | zM, s1 = k1)
    '''
    M = zM.shape[0]
    r = zM.shape[1]
    numobs = len(y_bin_j)
    
    yg = np.repeat(y_bin_j[np.newaxis], axis = 0, repeats = M)
    yg = yg.astype(np.float)

    nj_bin_j = np.float(nj_bin_j)

    coeff_binom = binom(nj_bin_j, yg).reshape(M, 1, numobs)
    
    eta = np.transpose(zM, (0, 2, 1)) @ lambda_bin_j[1:].reshape(1, r, 1)
    eta = eta + lambda_bin_j[0].reshape(1, 1, 1) # Add the constant
    
    den = nj_bin_j * log_1plusexp(eta)
    num = eta @ y_bin_j[np.newaxis, np.newaxis]  
    log_p_y_z = num - den + np.log(coeff_binom)
    
    return np.transpose(log_p_y_z, (0, 2, 1)).astype(np.float)
예제 #2
0
def mvt_logpdf(x, mu, Li, df):
    dim = Li.shape[0]
    Ki = np.dot(Li.T, Li)

    #determinant is just multiplication of diagonal elements of cholesky
    logdet = 2*log(1./np.diag(Li)).sum()
    lpdf_const = (gammaln((df + dim) / 2)
                                   -(gammaln(df/2)
                                     + (log(df)+log(np.pi)) * dim*0.5
                                     + logdet * 0.5)
                                   )

    x = np.atleast_2d(x)
    if x.shape[1] != mu.size:
        x = x.T
    assert(x.shape[1] == mu.size
               or x.shape[0] == mu.size)
    
    d = (x - mu.reshape((1 ,mu.size))).T
    
    Ki_d_scal = np.dot(Ki, d) /df          #vector
    d_Ki_d_scal_1 = diag_dot(d.T, Ki_d_scal) + 1. #scalar
    

    res_pdf = (lpdf_const 
               - 0.5 * (df + dim) * np.log(d_Ki_d_scal_1)).flatten() 
    if res_pdf.size == 1:
        res_pdf = np.float(res_pdf)
    return res_pdf
예제 #3
0
파일: GLM.py 프로젝트: codanonymous/tslasso
 def _logL(self, beta, X, y):
     """The log likelihood."""
     n_samples = np.float(X.shape[0])
     l = np.dot(X, beta)
     #logL = -0.5 * 1. / n_samples * np.sum((y - l)**2)
     logL = -0.5 * np.sum((y - l)**2)
     return logL
예제 #4
0
파일: GLM.py 프로젝트: codanonymous/tslasso
 def _grad_L2loss(self, beta, reg_lambda, X, y):
     n_samples = np.float(X.shape[0])
     z = np.dot(X, beta)
     #grad_beta = 1. / n_samples * np.transpose(np.dot(np.transpose(z - y), X))
     grad_beta = np.transpose(np.dot(np.transpose(z - y), X))
     print('grad_beta 0,1', grad_beta[0:2])
     return grad_beta
예제 #5
0
def get_learning_rate_exp_decay(global_step,
                                base_lr=0.01,
                                decay_rate=0.9,
                                decay_steps=2,
                                staircase=True):
    if staircase:
        exponent = global_step / decay_steps  # integer division
    else:
        exponent = global_step / np.float(decay_steps)
    return base_lr * np.power(decay_rate, exponent)
예제 #6
0
 def _grad_L2loss(self, beta, X, y):
     #print(beta.shape,X.shape,y.shape)
     if y.ndim == 1:
         y = y[:, np.newaxis]
     n_samples = np.float(X.shape[0])
     z = np.dot(X, beta)
     #grad_beta = 1. / n_samples * np.transpose(np.dot(np.transpose(z - y), X))
     grad_beta = np.transpose(np.dot(np.transpose(z - y), X))
     #print('gb',grad_beta.shape)
     return grad_beta
예제 #7
0
def chebyshev_centre(A, b, gamma):
    rows, cols = A.shape
    c = np.zeros(cols + 1)
    c[-1] = -1
    A_ = np.hstack([A, np.sqrt(np.sum(np.power(A, 2), axis=1)).reshape(-1, 1)])
    A_ = np.vstack([A_, -c.reshape(1, -1)])
    b_ = np.append(b, 100).reshape(-1, 1)

    # l2 norm minimisation of w
    P = gamma * np.eye(cols + 1)
    P[:, -1] = P[-1, :] = 0

    res = solve_qp(P=P, q=c, G=A_, h=b_)
    x_c = np.array(res[:-1])
    R = np.float(res[-1])
    return x_c, R
예제 #8
0
 def __init__(self, mu, K, Ki = None, logdet_K = None, L = None): 
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K) 
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     
     self.mu = mu
     self.K = K
     (val, vec) = np.linalg.eigh(K)
     idx = np.arange(mu.size-1,-1,-1)
     (self.eigval, self.eigvec) = (np.diag(val[idx]), vec[:,idx])
     self.eig = self.eigvec.dot(np.sqrt(self.eigval))
     self.dim = K.shape[0]
     #(self.Ki, self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     self.lpdf_const = -0.5 *np.float(self.dim * np.log(2 * np.pi)
                                        + self.logdet)
예제 #9
0
    def run_cavi(self, tau, nu, phi_mu, phi_var, max_iter=200, tol=1e-6):
        params = packing.flatten_params(tau, nu, phi_mu, phi_var)

        self.trace.reset()
        diff = np.float('inf')
        while diff > tol and self.trace.stepnum < max_iter:
            self.cavi_updates(tau, nu, phi_mu, phi_var)
            new_params = packing.flatten_params(tau, nu, phi_mu, phi_var)
            diff = np.max(np.abs(new_params - params))
            self.trace.update(params, diff)
            if not np.isfinite(diff):
                print('Error: non-finite parameter difference.')
                break
            params = new_params

        if self.trace.stepnum >= max_iter:
            print('Warning: CAVI reached max_iter.')

        print('Done with CAVI.')
        return tau, nu, phi_mu, phi_var
예제 #10
0
 def __init__(self, mu, K, df, Ki = None, logdet_K = None, L = None):
     mu = np.atleast_1d(mu).flatten()
     K = np.atleast_2d(K)
     assert(np.prod(mu.shape) == K.shape[0] )
     assert(K.shape[0] == K.shape[1])
     self.mu = mu
     self.K = K
     self.df = df
     self._freeze_chi2 = stats.chi2(df)
     self.dim = K.shape[0]
     self._df_dim = self.df + self.dim
     #(self.Ki,  self.logdet) = (np.linalg.inv(K), np.linalg.slogdet(K)[1])
     (self.Ki, self.L, self.Li, self.logdet) = pdinv(K)
     
     
     self.lpdf_const = np.float(gammaln((self.df + self.dim) / 2)
                                -(gammaln(self.df/2)
                                  + (log(self.df)+log(np.pi)) * self.dim*0.5
                                  + self.logdet * 0.5)
                                )
예제 #11
0
def config_to_str(config):
    batch_size_frac = config['batch_size'] / np.float(config['N'])
    cstr = 'B{:0.2f}-L{:0.2f}-M{}'.format(batch_size_frac, config['lr_decay'],
                                          config['momentum'])
    return cstr
예제 #12
0
 def forward(self, x=None):
     return np.float(1)
예제 #13
0
파일: GLM.py 프로젝트: codanonymous/tslasso
    def fit(self):
        group = self.group
        lambdas = self.reg_lambda
        X = self.xs
        y = self.ys
        np.random.RandomState(0)
        group = np.array(group)
        group.dtype = np.int64
        if group.shape[0] != X.shape[1]:
            raise ValueError('group should be (n_features,)')

        # type check for data matrix
        if not isinstance(X, np.ndarray):
            raise ValueError('Input data should be of type ndarray (got %s).' %
                             type(X))

        n_features = np.float(X.shape[1])
        n_features = np.int64(n_features)
        if y.ndim == 1:
            y = y[:, np.newaxis]

        n_classes = 1
        n_classes = np.int64(n_classes)

        # Initialize parameters
        beta_hat = 1 / (n_features) * np.random.normal(0.0, 1.0,
                                                       [n_features, n_classes])
        #print('betahat 0,1', beta_hat[0:2])
        fit_params = list()

        for l, rl in enumerate(lambdas):
            #print(l, rl)
            fit_params.append({'beta': beta_hat})

            # Warm initialize parameters
            if l == 0:
                fit_params[-1]['beta'] = beta_hat
            else:
                fit_params[-1]['beta'] = fit_params[-2]['beta']

            tol = self.tol
            alpha = 1.

            # Temporary parameters to update
            beta = np.zeros([n_features, n_classes])
            beta = fit_params[-1]['beta']
            g = np.zeros([n_features, n_classes])

            # Initialize loss accumulators
            L, DL = list(), list()
            #pdb.set_trace()
            for t in range(0, self.max_iter):
                #print(t)
                grad_beta = self._grad_L2loss(beta=beta,
                                              reg_lambda=rl,
                                              X=X,
                                              y=y)
                #print('before grad',beta[0:2])
                beta = beta - self.learning_rate * grad_beta
                #beta = beta - self.learning_rate * (1 / np.sqrt(t + 1)) * g
                #print('after grad',beta[0:2])
                beta = self._prox(beta, rl * self.learning_rate)
                #print('after prox',beta[0:2])
                #print(beta[0])
                L.append(self._loss(beta, rl, X, y))
                #print(self._loss(beta, rl, X, y))
                if t > 1:
                    if (L[-1] > L[-2]):
                        break
                    DL.append(L[-1] - L[-2])
                    #print('DL, L-1, L-2',DL, L[-1], L[-2])
                    if np.abs(DL[-1] / L[-1]) < tol:
                        print('converged', rl)
                        msg = ('\tConverged. Loss function:'
                               ' {0:.2f}').format(L[-1])
                        msg = ('\tdL/L: {0:.6f}\n'.format(DL[-1] / L[-1]))
                        break
            #print(beta)
            fit_params[-1]['beta'] = beta
            self.lossresults[rl] = L
            self.dls[rl] = DL
            #print(L)
        # Update the estimated variables

        self.fit_ = fit_params
        self.ynull_ = np.mean(y)

        # Return
        return self
예제 #14
0
파일: sgd_util.py 프로젝트: aoboturov/pmtk3
def get_learning_rate_exp_decay(global_step, base_lr=0.01, decay_rate=0.9, decay_steps=2, staircase=True):
    if staircase:
        exponent = global_step / decay_steps # integer division
    else:
        exponent = global_step / np.float(decay_steps) 
    return base_lr * np.power(decay_rate, exponent)
예제 #15
0
def dUdz(z,e):
    return (pDist(z+e)-pDist(z))/e
        
smpls=50000; burnin=100000; total=smpls+burnin;

z = np.empty(total); z[0] = 0;
r = np.empty(total); r[0] = 0;
accept = np.zeros(total);
ratio = np.zeros(total);

unif_RV = np.random.uniform(size=total)

eps = np.array([0.005,0.01,0.1,0.2,0.5, 1]);
e = 5; eps1 = eps[e];
L = 10; # leapgrog steps 
M = np.array([1]); s = np.float(M[0]) # mass definition matrix
#accRatio = np.zeros((len(eps)))

for i in np.arange(0,total-1):
    
    r[i] = np.random.normal(0,s) 
    Kr = (1/2)*r[i]**2*(s**(-1)) # kinetic energy
    Uz = -np.log(pDist(z[i])) # potential energ
    H = Uz + Kr # total energy
    
    for steps in range(L):
                
        r_half = r[i] - (eps1/2)*dUdz(z[i], eps1);
        z_whole = z[i] + eps1*(r_half/s);
        r_whole = r_half - (eps1/2)*dUdz(z_whole, eps1);
        
예제 #16
0
def config_to_str(config):
    batch_size_frac = config['batch_size'] / np.float(config['N'])
    cstr = 'B{:0.2f}-L{:0.2f}-M{}'.format(batch_size_frac, config['lr_decay'],
            config['momentum'])
    return cstr
예제 #17
0
 def log_pdf(self, y, x=None, node=None):
     self.cur_log_pdf = self.compute(log_pdf, y, x, node)
     if len(np.array(self.cur_log_pdf).shape) == 0:
         self.cur_log_pdf = np.float(self.cur_log_pdf)
     return self.cur_log_pdf
예제 #18
0
    cols = (repeat(asarray(Blf.col).reshape(Blf.nnz, 1) * M, M, 1) +
            ks).ravel()
    vals = (repeat(asarray(Blf.data).reshape(Blf.nnz, 1), M, 1)).ravel()

    newBlf = scipy.sparse.coo_matrix(
        (vals, (rows, cols)), shape=(shape[0] * M, shape[1] * M)).tocsr()
    return newBlf


if __name__ == "__main__":
    global img_file
    img_file = sys.argv[1]
    KS_file_name = sys.argv[2]
    Thickness_file_name = sys.argv[3]
    output_prefix = sys.argv[4]
    W_w = np.float(sys.argv[5])
    W_sparse = np.float(sys.argv[6])
    print 'W_sparse', W_sparse
    solve_choice = np.int(sys.argv[7])
    W_spatial = np.float(sys.argv[8])
    print 'W_spatial', W_spatial

    global save_for_application_path_prefix
    save_for_application_path_prefix = "./Application_Files/"

    W_neighbors = 0.0
    if solve_choice == 3 or solve_choice == 6:  #### solve per pixel with neighborhood info constraints
        W_neighbors = np.float(sys.argv[9])

    print 'W_neighbors', W_neighbors
예제 #19
0
    def fit(self):

        group = self.group
        print(group.shape)
        lambdas = self.reg_lambda
        parameter = self.parameter
        X = self.xs
        #print(X.shape)
        y = self.ys

        np.random.RandomState(0)
        group = np.asarray(group, dtype=np.int64)

        #print(group.shape[0])
        group.dtype = np.int64
        #print(group.shape[0])
        #print(X.shape[1])
        if group.shape[0] != X.shape[1]:
            raise ValueError('group should be (n_features,)')

        # type check for data matrix
        if not isinstance(X, np.ndarray):
            raise ValueError('Input data should be of type ndarray (got %s).' %
                             type(X))

        n_features = np.float(X.shape[1])
        n_features = np.int64(n_features)
        if y.ndim == 1:
            y = y[:, np.newaxis]
            self.ys = y
        #print(y.shape)
        n_classes = 1
        n_classes = np.int64(n_classes)

        beta_hat = 1 / (n_features) * np.random.normal(0.0, 1.0,
                                                       [n_features, n_classes])
        fit_params = list()

        for l, rl in enumerate(lambdas):
            fit_params.append({'beta': beta_hat})
            if l == 0:
                fit_params[-1]['beta'] = beta_hat
            else:
                fit_params[-1]['beta'] = fit_params[-2]['beta']
            tol = self.tol
            alpha = 1.
            beta = np.zeros([n_features, n_classes])
            beta = fit_params[-1]['beta']
            #print('losser',self._L2loss(beta,X,y))
            g = np.zeros([n_features, n_classes])
            L, DL, L2, PEN = list(), list(), list(), list()
            lamb = self.learning_rate
            bm1 = beta.copy()
            bm2 = beta.copy()
            for t in range(0, self.max_iter):
                L.append(self._loss(beta, rl, X, y))
                L2.append(self._L2loss(beta, X, y))
                PEN.append(self._L1penalty(beta))
                w = (t / (t + 3))
                yk = beta + w * (bm1 - bm2)
                #print('losser',self._L2loss(yk,X,y))
                #print('beforebt',np.linalg.norm(yk),np.linalg.norm(X),np.linalg.norm(y))
                beta, lamb = self._btalgorithm(yk, lamb, .5, 1000, rl)
                #X = self.xs
                #y = self.ys
                #print('losser2',self._L2loss(beta,X,y))
                bm2 = bm1.copy()
                bm1 = beta.copy()
                if t > 1:
                    DL.append(L[-1] - L[-2])
                    if np.abs(DL[-1] / L[-1]) < tol:
                        print('converged', rl)
                        msg = ('\tConverged. Loss function:'
                               ' {0:.2f}').format(L[-1])
                        msg = ('\tdL/L: {0:.6f}\n'.format(DL[-1] / L[-1]))
                        break

            #print(beta)
            fit_params[-1]['beta'] = beta
            self.lossresults[rl] = L
            self.l2loss[rl] = L2
            self.penalty[rl] = PEN
            self.dls[rl] = DL
            #print(L)
        # Update the estimated variables

        self.fit_ = fit_params
        self.ynull_ = np.mean(y)

        # Return
        return self