예제 #1
0
파일: niw.py 프로젝트: mattjj/svae
def expectedstats(natparam, fudge=1e-8):
    S, m, kappa, nu = natural_to_standard(natparam)
    d = m.shape[-1]

    E_J = nu[...,None,None] * symmetrize(np.linalg.inv(S)) + fudge * np.eye(d)
    E_h = np.matmul(E_J, m[...,None])[...,0]
    E_hTJinvh = d/kappa + np.matmul(m[...,None,:], E_h[...,None])[...,0,0]
    E_logdetJ = (np.sum(digamma((nu[...,None] - np.arange(d)[None,...])/2.), -1) \
                 + d*np.log(2.)) - np.linalg.slogdet(S)[1]

    return pack_dense(-1./2 * E_J, E_h, -1./2 * E_hTJinvh, 1./2 * E_logdetJ)
예제 #2
0
def covgrad(x, mean, cov, allow_singular=False):
    if allow_singular:
        raise NotImplementedError("The multivariate normal pdf is not "
                "differentiable w.r.t. a singular covariance matix")
    J = np.linalg.inv(cov)
    solved = np.matmul(J, np.expand_dims(x - mean, -1))
    return 1./2 * (generalized_outer_product(solved) - J)
예제 #3
0
 def Sigmas(self):
     return np.matmul(self._sqrt_Sigmas,
                      np.swapaxes(self._sqrt_Sigmas, -1, -2))
예제 #4
0
    def fit(self, batch_size, epochs=500, learning_rate=0.0001):
        """STEP 1: Set up what the optimization routine will be"""
        """Just to streamline with GVI code, re-name variables"""
        self.M = min(batch_size, self.n)
        Y = self.Y
        X = self.X
        """Create objective & take gradient"""
        objective = self.create_objective()
        objective_gradient = grad(objective)
        params = self.params
        """STEP 2: Sample from X, Y and perform ADAM steps"""
        """STEP 2.1: These are just the ADAM optimizer default settings"""
        m1 = 0
        m2 = 0
        beta1 = 0.9
        beta2 = 0.999
        epsilon = 1e-8
        t = 0
        """STEP 2.2: Loop over #epochs and take step for each subsample"""
        for epoch in range(epochs):
            """STEP 2.2.1: For each epoch, shuffle the data"""
            permutation = np.random.choice(range(Y.shape[0]),
                                           Y.shape[0],
                                           replace=False)
            """HERE: Should add a print statement here to monitor algorithm!"""
            if epoch % 100 == 0:
                print("epoch #", epoch, "/", epochs)
                #print("sigma2", np.exp(-q_params[3]))
            """STEP 2.2.2: Process M data points together and take one step"""
            for i in range(0, int(self.n / self.M)):
                """Get the next M observations (or less if we would run out
                of observations otherwise)"""
                end = min(self.n, (i + 1) * self.M)
                indices = permutation[(i * self.M):end]
                """ADAM step for this batch"""
                t += 1
                if X is not None:
                    if False:
                        print("Y", Y[indices])
                        print(
                            "X*coefs",
                            np.matmul(X[indices, :],
                                      np.array([1.0, -2.0, 0.5, 4.0, -3.5])))
                        print("X*params", np.matmul(X[indices, :],
                                                    params[:-1]))

                    grad_params = objective_gradient(params, self.parser,
                                                     Y[indices], X[indices, :])
                else:
                    grad_params = objective_gradient(params,
                                                     self.parser,
                                                     Y[indices],
                                                     X_=None)

#                print(grad_params)
#                print("before:", params)
                m1 = beta1 * m1 + (1 - beta1) * grad_params
                m2 = beta2 * m2 + (1 - beta2) * grad_params**2
                m1_hat = m1 / (1 - beta1**t)
                m2_hat = m2 / (1 - beta2**t)
                params -= learning_rate * m1_hat / (np.sqrt(m2_hat) + epsilon)


#                print("after", params)

        self.params = params
예제 #5
0
def generalized_outer_product(x):
    if np.ndim(x) == 1:
        return np.outer(x, x)
    return np.matmul(x, np.swapaxes(x, -1, -2))
예제 #6
0
파일: optimizers.py 프로젝트: vs74/csc2532
def rank_2_B_update(B,y,s,c):
    normalizer = np.dot(s,c)
    temp = np.outer(y - np.matmul(B, s), np.transpose(c))
    symmetric_term = (temp + np.transpose(temp)) /normalizer
    residual_term = (np.dot(np.transpose(s), y-np.matmul(B,s) ) * np.outer(c,np.transpose(c)))/np.power(normalizer, 2)
    return B + symmetric_term - residual_term
예제 #7
0
def fun_FA(centers,
           maxK,
           max_iter,
           n_repeats,
           s_all=None,
           verbose=False,
           conjugate_gradient=True):
    '''
    Extracts the low rank structure from the data given by centers

    Args:
        centers: 2D array of shape (N, P) where N is the ambient dimension and P is the number of centers
        maxK: Maximum rank to consider
        max_iter: Maximum number of iterations for the solver
        n_repeats: Number of repetitions to find the most stable solution at each iteration of K
        s: (Optional) iterable containing (P, 1) random normal vectors

    Returns:
        norm_coeff: Ratio of center norms before and after optimzation
        norm_coeff_vec: Mean ratio of center norms before and after optimization
        Proj: P-1 basis vectors
        V1_mat: Solution for each value of K
        res_coeff: Cost function after optimization for each K
        res_coeff0: Correlation before optimization
    '''
    N, P = centers.shape
    # Configure the solver
    opts = {'max_iter': max_iter, 'gtol': 1e-6, 'xtol': 1e-6, 'ftol': 1e-8}

    # Subtract the global mean
    mean = np.mean(centers.T, axis=0, keepdims=True)
    Xb = centers.T - mean
    xbnorm = np.sqrt(np.square(Xb).sum(axis=1, keepdims=True))

    # Gram-Schmidt into a P-1 dimensional basis
    q, r = qr(Xb.T, mode='economic')
    X = np.matmul(Xb, q[:, 0:P - 1])

    # Sore the (P, P-1) dimensional data before extracting the low rank structure
    X0 = X.copy()
    xnorm = np.sqrt(np.square(X0).sum(axis=1, keepdims=True))

    # Calculate the correlations
    C0 = np.matmul(X0, X0.T) / np.matmul(xnorm, xnorm.T)
    res_coeff0 = (np.sum(np.abs(C0)) - P) * 1 / (P * (P - 1))

    # Storage for the results
    V1_mat = []
    C0_mat = []
    norm_coeff = []
    norm_coeff_vec = []
    res_coeff = []

    # Compute the optimal low rank structure for rank 1 to maxK
    V1 = None
    for i in range(1, maxK + 1):
        best_stability = 0

        for j in range(1, n_repeats + 1):
            # Sample a random normal vector unless one is supplied
            if s_all is not None and len(s_all) >= i:
                s = s_all[i * j - 1]
            else:
                s = np.random.randn(P, 1)

            # Create initial V.
            sX = np.matmul(s.T, X)
            if V1 is None:
                V0 = sX
            else:
                V0 = np.concatenate([sX, V1.T], axis=0)
            V0, _ = qr(V0.T, mode='economic')  # (P-1, i)

            # Compute the optimal V for this i
            V1tmp, output = CGmanopt(
                V0, partial(square_corrcoeff_full_cost, grad=False), X, **opts)

            # Compute the cost
            cost_after, _ = square_corrcoeff_full_cost(V1tmp, X, grad=False)

            # Verify that the solution is orthogonal within tolerance
            assert np.linalg.norm(np.matmul(V1tmp.T, V1tmp) - np.identity(i),
                                  ord='fro') < 1e-10

            # Extract low rank structure
            X0 = X - np.matmul(np.matmul(X, V1tmp), V1tmp.T)

            # Compute stability of solution
            denom = np.sqrt(np.sum(np.square(X), axis=1))
            stability = min(np.sqrt(np.sum(np.square(X0), axis=1)) / denom)

            # Store the solution if it has the best stability
            if stability > best_stability:
                best_stability = stability
                best_V1 = V1tmp
            if n_repeats > 1 and verbose:
                print(j, 'cost=', cost_after, 'stability=', stability)

        # Use the best solution
        V1 = best_V1

        # Extract the low rank structure
        XV1 = np.matmul(X, V1)
        X0 = X - np.matmul(XV1, V1.T)

        # Compute the current (normalized) cost
        xnorm = np.sqrt(np.square(X0).sum(axis=1, keepdims=True))
        C0 = np.matmul(X0, X0.T) / np.matmul(xnorm, xnorm.T)
        current_cost = (np.sum(np.abs(C0)) - P) * 1 / (P * (P - 1))
        if verbose:
            print('K=', i, 'mean=', current_cost)

        # Store the results
        V1_mat.append(V1)
        C0_mat.append(C0)
        norm_coeff.append((xnorm / xbnorm)[:, 0])
        norm_coeff_vec.append(np.mean(xnorm / xbnorm))
        res_coeff.append(current_cost)

        # Break the loop if there's been no reduction in cost for 3 consecutive iterations
        if (i > 4 and res_coeff[i - 1] > res_coeff[i - 2]
                and res_coeff[i - 2] > res_coeff[i - 3]
                and res_coeff[i - 3] > res_coeff[i - 4]):
            if verbose:
                print("Optimal K0 found")
            break
    return norm_coeff, norm_coeff_vec, q[:, 0:P -
                                         1], V1_mat, res_coeff, res_coeff0
예제 #8
0
 def fun(self):
     x = self.par['x'].get()
     y = self.par['y'].get()
     return np.matmul(np.matmul(np.transpose(x), self.a), y)
예제 #9
0
    def forward_pass(self, X, hyp):
        Q = self.hidden_dim
        H = np.zeros((X.shape[1], Q))
        S = np.zeros((X.shape[1], Q))

        # Forget Gate
        idx_1 = 0
        idx_2 = idx_1 + self.X_dim * Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q * Q
        U_f = np.reshape(hyp[idx_1:idx_2], (self.X_dim, Q))
        b_f = np.reshape(hyp[idx_2:idx_3], (1, Q))
        W_f = np.reshape(hyp[idx_3:idx_4], (Q, Q))

        # Input Gate
        idx_1 = idx_4
        idx_2 = idx_1 + self.X_dim * Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q * Q
        U_i = np.reshape(hyp[idx_1:idx_2], (self.X_dim, Q))
        b_i = np.reshape(hyp[idx_2:idx_3], (1, Q))
        W_i = np.reshape(hyp[idx_3:idx_4], (Q, Q))

        # Update Cell State
        idx_1 = idx_4
        idx_2 = idx_1 + self.X_dim * Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q * Q
        U_s = np.reshape(hyp[idx_1:idx_2], (self.X_dim, Q))
        b_s = np.reshape(hyp[idx_2:idx_3], (1, Q))
        W_s = np.reshape(hyp[idx_3:idx_4], (Q, Q))

        # Ouput Gate
        idx_1 = idx_4
        idx_2 = idx_1 + self.X_dim * Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q * Q
        U_o = np.reshape(hyp[idx_1:idx_2], (self.X_dim, Q))
        b_o = np.reshape(hyp[idx_2:idx_3], (1, Q))
        W_o = np.reshape(hyp[idx_3:idx_4], (Q, Q))

        for i in range(0, self.lags):
            # Forget Gate
            F = self.sigmoid(
                np.matmul(H, W_f) + np.matmul(X[i, :, :], U_f) + b_f)
            # Input Gate
            I = self.sigmoid(
                np.matmul(H, W_i) + np.matmul(X[i, :, :], U_i) + b_i)
            # Update Cell State
            S_tilde = np.tanh(
                np.matmul(H, W_s) + np.matmul(X[i, :, :], U_s) + b_s)
            S = F * S + I * S_tilde
            # Ouput Gate
            O = self.sigmoid(
                np.matmul(H, W_o) + np.matmul(X[i, :, :], U_o) + b_o)
            H = O * np.tanh(S)

        idx_1 = idx_4
        idx_2 = idx_1 + Q * self.Y_dim
        idx_3 = idx_2 + self.Y_dim
        V = np.reshape(hyp[idx_1:idx_2], (Q, self.Y_dim))
        c = np.reshape(hyp[idx_2:idx_3], (1, self.Y_dim))
        Y = np.matmul(H, V) + c

        return Y
예제 #10
0
def ffnn(state,weights_1,weights_2,bias_1,bias_2):
    h = np.tanh(np.matmul(state,weights_1)+bias_1)
    return np.matmul(h,weights_2)+bias_2
예제 #11
0
def test_lds_log_probability_perf(T=1000, D=10, N_iter=10):
    """
    Compare performance of banded method vs message passing in pylds.
    """
    print("Comparing methods for T={} D={}".format(T, D))

    from pylds.lds_messages_interface import kalman_info_filter, kalman_filter

    # Convert LDS parameters into info form for pylds
    As, bs, Qi_sqrts, ms, Ri_sqrts = make_lds_parameters(T, D)
    Qis = np.matmul(Qi_sqrts, np.swapaxes(Qi_sqrts, -1, -2))
    Ris = np.matmul(Ri_sqrts, np.swapaxes(Ri_sqrts, -1, -2))
    x = npr.randn(T, D)

    print("Timing banded method")
    start = time.time()
    for itr in range(N_iter):
        lds_log_probability(x, As, bs, Qi_sqrts, ms, Ri_sqrts)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))

    # Compare to Kalman Filter
    mu_init = np.zeros(D)
    sigma_init = np.eye(D)
    Bs = np.ones((D, 1))
    sigma_states = np.linalg.inv(Qis)
    Cs = np.eye(D)
    Ds = np.zeros((D, 1))
    sigma_obs = np.linalg.inv(Ris)
    inputs = bs
    data = ms

    print("Timing PyLDS message passing (kalman_filter)")
    start = time.time()
    for itr in range(N_iter):
        kalman_filter(mu_init, sigma_init,
                      np.concatenate([As, np.eye(D)[None, :, :]]), Bs,
                      np.concatenate([sigma_states,
                                      np.eye(D)[None, :, :]]), Cs, Ds,
                      sigma_obs, inputs, data)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))

    # Info form comparison
    J_init = np.zeros((D, D))
    h_init = np.zeros(D)
    log_Z_init = 0

    J_diag, J_lower_diag, h = convert_lds_to_block_tridiag(
        As, bs, Qi_sqrts, ms, Ri_sqrts)
    J_pair_21 = J_lower_diag
    J_pair_22 = J_diag[1:]
    J_pair_11 = J_diag[:-1]
    J_pair_11[1:] = 0
    h_pair_2 = h[1:]
    h_pair_1 = h[:-1]
    h_pair_1[1:] = 0
    log_Z_pair = 0

    J_node = np.zeros((T, D, D))
    h_node = np.zeros((T, D))
    log_Z_node = 0

    print("Timing PyLDS message passing (kalman_info_filter)")
    start = time.time()
    for itr in range(N_iter):
        kalman_info_filter(J_init, h_init, log_Z_init, J_pair_11, J_pair_21,
                           J_pair_22, h_pair_1, h_pair_2, log_Z_pair, J_node,
                           h_node, log_Z_node)
    stop = time.time()
    print("Time per iter: {:.4f}".format((stop - start) / N_iter))
예제 #12
0
def _a_from_x(x):
    y = anp.matmul(anp.transpose(x), x)
    onevec = anp.ones_like(x[0])
    return y + 0.01 * anp.diag(onevec)
예제 #13
0
def neural_ode(thetas):
    weights_1, weights_2, bias_1, bias_2 = theta_reshape(thetas) # Reshape once for utilization in entire iteration
    batch_state_array = np.zeros(shape=(num_batches,batch_tsteps,state_len),dtype='double') # 
    batch_rhs_array = np.zeros(shape=(num_batches,batch_tsteps,state_len),dtype='double') #
    batch_time_array = np.zeros(shape=(num_batches,batch_tsteps,1),dtype='double') #

    augmented_state = np.zeros(shape=(1,state_len+num_wb+1))
    batch_ids = np.random.choice(tsteps-batch_tsteps,2*num_batches)

    # Finding validation batches
    val_batch = 0
    total_val_batch_loss = 0
    for j in range(num_batches):
        start_id = batch_ids[j]
        end_id = start_id + batch_tsteps

        batch_state_array[j,0,:] = true_state_array[start_id,:]
        batch_time_array[j,:batch_tsteps] = time_array[start_id:end_id,None]

        # Calculate forward pass - saving results for state and rhs to array - batchwise
        temp_state = np.copy(batch_state_array[j,0,:])
        for i in range(1,batch_tsteps):
            time = np.reshape(batch_time_array[j,i],newshape=(1,1))
            output_state, output_rhs = euler_forward(temp_state,weights_1,weights_2,bias_1,bias_2,time)  
            batch_state_array[j,i,:] = output_state[:]
            batch_rhs_array[j,i,:] = output_rhs[:]
            temp_state = np.copy(output_state)

        val_batch = val_batch + 1
        # Find batch loss
        total_val_batch_loss = total_val_batch_loss + np.sum((output_state-true_state_array[end_id-1,:])**2)

    # Minibatching within sampled domain
    total_batch_loss = 0.0
    for j in range(num_batches):
        start_id = batch_ids[val_batch+j]
        end_id = start_id + batch_tsteps

        batch_state_array[j,0,:] = true_state_array[start_id,:]
        batch_time_array[j,:batch_tsteps] = time_array[start_id:end_id,None]

        # Calculate forward pass - saving results for state and rhs to array - batchwise
        temp_state = np.copy(batch_state_array[j,0,:])
        for i in range(1,batch_tsteps):
            time = np.reshape(batch_time_array[j,i],newshape=(1,1))
            output_state, output_rhs = euler_forward(temp_state,weights_1,weights_2,bias_1,bias_2,time)  
            batch_state_array[j,i,:] = output_state[:]
            batch_rhs_array[j,i,:] = output_rhs[:]
            temp_state = np.copy(output_state)

        # Operations at final time step (setting up initial conditions for the adjoint)
        temp_state = np.copy(batch_state_array[j,-2,:])
        temp_state = np.reshape(temp_state,(1,state_len)) # prefinal state vector
        time = np.reshape(batch_time_array[j,-2],(1,1)) # prefinal time
        pvec = np.concatenate((temp_state,thetas,time),axis=1)

        # Calculate loss related gradients - dldz
        dldz = np.reshape(dldz_func(output_state,true_state_array[end_id-1,:]),(1,state_len))
        # With respect to weights,bias and time
        dl = dl_func(pvec,true_state_array[end_id-1,:])
        dldthetas = np.reshape(dl[:,state_len:-1],newshape=(1,num_wb))
        # Calculate dl/dt
        dldt = np.matmul(dldz,batch_rhs_array[j,-1,:])
        dldt = np.reshape(dldt,newshape=(1,1))

        # Find batch loss
        total_batch_loss = total_batch_loss + np.sum((output_state-true_state_array[end_id-1,:])**2)

        # Reverse operation (adjoint evolution in backward time)
        _augmented_state = np.concatenate((dldz,dldthetas,dldt),axis=1)
        for i in range(1,batch_tsteps):
            time = np.reshape(batch_time_array[j,-1-i],newshape=(1,1))
            state_now = np.reshape(batch_state_array[j,-1-i,:],newshape=(1,state_len))
            pvec = np.concatenate((state_now,thetas,time),axis=1)

            # Adjoint propagation backward in time
            i0 = _augmented_state + dt*adjoint_rhs(_augmented_state,pvec)
            sub_state = np.reshape(i0[0,:state_len],newshape=(1,state_len))

            _augmented_state[:,:] = i0[:,:]
        
        augmented_state = np.add(augmented_state,_augmented_state)
    
    return augmented_state, total_batch_loss/num_batches, total_val_batch_loss/num_batches
예제 #14
0
# Specify computer specs.
CORE_COUNT = 8
os.environ["OPENBLAS_NUM_THREADS"] = "{}".format(CORE_COUNT)
os.environ["MKL_NUM_THREADS"] = "{}".format(CORE_COUNT)

# Define experimental constants.
CHI_E = -5.65e-4 #GHz
CHI_F = 2 * CHI_E #GHz
MAX_AMP_C = 2 * anp.pi * 2e-3 #GHz
MAX_AMP_T = 2 * anp.pi * 2e-2 #GHz

# Define the system.
CAVITY_STATE_COUNT = 5
CAVITY_ANNIHILATE = get_annihilation_operator(CAVITY_STATE_COUNT)
CAVITY_CREATE = get_creation_operator(CAVITY_STATE_COUNT)
CAVITY_NUMBER = anp.matmul(CAVITY_CREATE, CAVITY_ANNIHILATE)
CAVITY_ZERO = anp.array(((1,), (0,), (0,), (0,), (0,)))
CAVITY_ONE = anp.array(((0,), (1,), (0,), (0,), (0,)))
CAVITY_TWO = anp.array(((0,), (0,), (1,), (0,), (0,)))
CAVITY_THREE = anp.array(((0,), (0,), (0,), (1,), (0,)))
CAVITY_FOUR = anp.array(((0,), (0,), (0,), (0,), (1,)))
CAVITY_I = anp.eye(CAVITY_STATE_COUNT)

TRANSMON_STATE_COUNT = 3
TRANSMON_G = anp.array(((1,), (0,), (0,)))
TRANSMON_G_DAGGER = conjugate_transpose(TRANSMON_G)
TRANSMON_E = anp.array(((0,), (1,), (0,)))
TRANSMON_E_DAGGER = conjugate_transpose(TRANSMON_E)
TRANSMON_F = anp.array(((0,), (0,), (1,)))
TRANSMON_F_DAGGER = conjugate_transpose(TRANSMON_F)
TRANSMON_I = anp.eye(TRANSMON_STATE_COUNT)
def f(theta):
    v = (y - np.matmul(x, theta))
    return np.sum(np.multiply(v, v))
예제 #16
0
파일: emissions.py 프로젝트: zhoupc/ssm
 def compute_mus(self, x):
     return np.matmul(self.Cs[None, ...], x[:, None, :, None])[:, :, :,
                                                               0] + self.ds
예제 #17
0
 def ObjPar(par, mat, loc):
     diff = par - loc
     return np.dot(diff, np.matmul(mat, diff))
예제 #18
0
    vb3 = b2*vb3 + (1-b2)*gradb3_2
    m_hatb3 = mb3/(1-np.power(b1,i))
    v_hatb3 = vb3/(1-np.power(b2,i))
    
    w1 = w1 - (eta*m_hatw1)/(np.sqrt(v_hatw1) + e)
    w2 = w2 - (eta*m_hatw2)/(np.sqrt(v_hatw2) + e)
    w3 = w3 - (eta*m_hatw3)/(np.sqrt(v_hatw3) + e)
    
    b1 = b1 - (eta*m_hatb1)/(np.sqrt(v_hatb1) + e)
    b2 = b2 - (eta*m_hatb2)/(np.sqrt(v_hatb2) + e)
    b3 = b3 - (eta*m_hatb3)/(np.sqrt(v_hatb3) + e)
    
#    w1 = w1 - eta*gradw1
#    w2 = w2 - eta*gradw2
#    w3 = w3 - eta*gradw3
#    b1 = b1 - eta*gradb1
#    b2 = b2 - eta*gradb2
#    b3 = b3 - eta*gradb3


    H1 = actifunc(w1,b1,data)
    H2 = actifunc(w2,b2,H1)
    H_out = actifunc(w3,b3,H2)
    
    diff = xy - H_out
    
    loss = np.matmul(diff.T,diff) + reg_term(w3)
    lossfunction = np.append(lossfunction, loss)

#it = np.linspace(0,iter-1,iter)
#plt.plot(it,lossfunction[1:])
예제 #19
0
 def get_x_vec(self):
     # For testing the Jacobian
     return np.matmul(self.b_mat, self.x.get())
예제 #20
0
def actifunc(w,b,x):
    
    y = np.matmul(x,w) + b
    y = np.tanh(y)
    return y
예제 #21
0
    def test_objective(self):
        model = Model(dim=3)
        objective = obj_lib.Objective(par=model.x, fun=model.f)

        model.set_inits()
        x_free = model.x.get_free()
        x_vec = model.x.get_vector()

        model.set_opt()
        self.assertTrue(objective.fun_free(x_free) > 0.0)
        np_test.assert_array_almost_equal(objective.fun_free(x_free),
                                          objective.fun_vector(x_vec))

        grad = objective.fun_free_grad(x_free)
        hess = objective.fun_free_hessian(x_free)
        np_test.assert_array_almost_equal(np.matmul(hess, grad),
                                          objective.fun_free_hvp(x_free, grad))

        self.assertTrue(objective.fun_vector(x_vec) > 0.0)
        grad = objective.fun_vector_grad(x_vec)
        hess = objective.fun_vector_hessian(x_vec)
        np_test.assert_array_almost_equal(
            np.matmul(hess, grad), objective.fun_vector_hvp(x_free, grad))

        # Test Jacobians.
        vec_objective = obj_lib.Objective(par=model.x, fun=model.get_x_vec)
        vec_jac = vec_objective.fun_vector_jacobian(x_vec)
        np_test.assert_array_almost_equal(model.b_mat, vec_jac)

        free_jac = vec_objective.fun_free_jacobian(x_free)
        x_free_to_vec_jac = \
            model.x.free_to_vector_jac(x_free).todense()
        np_test.assert_array_almost_equal(
            np.matmul(model.b_mat, np.transpose(x_free_to_vec_jac)), free_jac)

        # Test the preconditioning
        preconditioner = 2.0 * np.eye(model.dim)
        preconditioner[model.dim - 1, 0] = 0.1  # Add asymmetry for testing!
        objective.preconditioner = preconditioner

        np_test.assert_array_almost_equal(
            objective.fun_free_cond(x_free),
            objective.fun_free(np.matmul(preconditioner, x_free)),
            err_msg='Conditioned function values')

        fun_free_cond_grad = autograd.grad(objective.fun_free_cond)
        grad_cond = objective.fun_free_grad_cond(x_free)
        np_test.assert_array_almost_equal(
            fun_free_cond_grad(x_free),
            grad_cond,
            err_msg='Conditioned gradient values')

        fun_free_cond_hessian = autograd.hessian(objective.fun_free_cond)
        hess_cond = objective.fun_free_hessian_cond(x_free)
        np_test.assert_array_almost_equal(fun_free_cond_hessian(x_free),
                                          hess_cond,
                                          err_msg='Conditioned Hessian values')

        fun_free_cond_hvp = autograd.hessian_vector_product(
            objective.fun_free_cond)
        np_test.assert_array_almost_equal(
            fun_free_cond_hvp(x_free, grad_cond),
            objective.fun_free_hvp_cond(x_free, grad_cond),
            err_msg='Conditioned Hessian vector product values')
예제 #22
0
def reg_term(w):
    
    lam = 4
    r = lam*np.matmul(w.T,w)
    return r
예제 #23
0
파일: optimizers.py 프로젝트: vs74/csc2532
def rank_1_H_update(H,s,y,d):
    return H + np.outer((s-np.matmul(H,y)), d)/np.inner(d,y)
예제 #24
0
def unpack_posdef_matrix(free_vec, diag_lb=0.0):
    mat_chol = exp_matrix_diagonal(unvectorize_ld_matrix(free_vec))
    mat = np.matmul(mat_chol, mat_chol.T)
    k = mat.shape[0]
    return mat + np.make_diagonal(np.full(k, diag_lb), offset=0, axis1=-1, axis2=-2)
예제 #25
0
def manifold_analysis_corr(XtotT, kappa, n_t, t_vecs=None, n_reps=10):
    '''
    Carry out the analysis on multiple manifolds.

    Args:
        XtotT: Sequence of 2D arrays of shape (N, P_i) where N is the dimensionality
                of the space, and P_i is the number of sampled points for the i_th manifold.
        kappa: Margin size to use in the analysis (scalar, kappa > 0)
        n_t: Number of gaussian vectors to sample per manifold
        t_vecs: Optional sequence of 2D arrays of shape (Dm_i, n_t) where Dm_i is the reduced
                dimensionality of the i_th manifold. Contains the gaussian vectors to be used in
                analysis.  If not supplied, they will be randomly sampled for each manifold.

    Returns:
        a_Mfull_vec: 1D array containing the capacity calculated from each manifold
        R_M_vec: 1D array containing the calculated anchor radius of each manifold
        D_M_vec: 1D array containing the calculated anchor dimension of each manifold.
        res_coeff0: Residual correlation
        KK: Dimensionality of low rank structure
    '''
    # Number of manifolds to analyze
    num_manifolds = len(XtotT)
    # Compute the global mean over all samples
    Xori = np.concatenate(XtotT, axis=1)  # Shape (N, sum_i P_i)
    X_origin = np.mean(Xori, axis=1, keepdims=True)

    # Subtract the mean from each manifold
    Xtot0 = [XtotT[i] - X_origin for i in range(num_manifolds)]
    # Compute the mean for each manifold
    centers = [np.mean(XtotT[i], axis=1) for i in range(num_manifolds)]
    centers = np.stack(centers,
                       axis=1)  # Centers is of shape (N, m) for m manifolds
    center_mean = np.mean(centers, axis=1,
                          keepdims=True)  # (N, 1) mean of all centers

    # Center correlation analysis
    UU, SS, VV = np.linalg.svd(centers - center_mean)
    # Compute the max K
    total = np.cumsum(np.square(SS) / np.sum(np.square(SS)))
    maxK = np.argmax([t if t < 0.95 else 0 for t in total]) + 11

    # Compute the low rank structure
    norm_coeff, norm_coeff_vec, Proj, V1_mat, res_coeff, res_coeff0 = fun_FA(
        centers, maxK, 20000, n_reps)
    res_coeff_opt, KK = min(res_coeff), np.argmin(res_coeff) + 1

    # Compute projection vector into the low rank structure
    V11 = np.matmul(Proj, V1_mat[KK - 1])
    X_norms = []
    XtotInput = []
    for i in range(num_manifolds):
        Xr = Xtot0[i]
        # Project manifold data into null space of center subspace
        Xr_ns = Xr - np.matmul(V11, np.matmul(V11.T, Xr))
        # Compute mean of the data in the center null space
        Xr0_ns = np.mean(Xr_ns, axis=1)
        # Compute norm of the mean
        Xr0_ns_norm = np.linalg.norm(Xr0_ns)
        X_norms.append(Xr0_ns_norm)
        # Center normalize the data
        Xrr_ns = (Xr_ns - Xr0_ns.reshape(-1, 1)) / Xr0_ns_norm
        XtotInput.append(Xrr_ns)

    a_Mfull_vec = np.zeros(num_manifolds)
    R_M_vec = np.zeros(num_manifolds)
    D_M_vec = np.zeros(num_manifolds)
    # Make the D+1 dimensional data
    for i in range(num_manifolds):
        S_r = XtotInput[i]
        D, m = S_r.shape
        # Project the data onto a smaller subspace
        if D > m:
            Q, R = qr(S_r, mode='economic')
            S_r = np.matmul(Q.T, S_r)
            # Get the new sizes
            D, m = S_r.shape
        # Add the center dimension
        sD1 = np.concatenate([S_r, np.ones((1, m))], axis=0)

        # Carry out the analysis on the i_th manifold
        if t_vecs is not None:
            a_Mfull, R_M, D_M = each_manifold_analysis_D1(sD1,
                                                          kappa,
                                                          n_t,
                                                          t_vec=t_vecs[i])
        else:
            a_Mfull, R_M, D_M = each_manifold_analysis_D1(sD1, kappa, n_t)

        # Store the results
        a_Mfull_vec[i] = a_Mfull
        R_M_vec[i] = R_M
        D_M_vec[i] = D_M
    return a_Mfull_vec, R_M_vec, D_M_vec, res_coeff0, KK
예제 #26
0
def cholesky_factorization_backward(l, lbar):
    abar = copyltu(anp.matmul(anp.transpose(l), lbar))
    abar = anp.transpose(aspl.solve_triangular(l, abar, lower=True, trans='T'))
    abar = aspl.solve_triangular(l, abar, lower=True, trans='T')
    return 0.5 * abar
예제 #27
0
            print(names[i], values[i])

if True:
    """test if stuff works"""
    """get a simulator for BLRs"""
    from BLRSimulator import BLRSimulator
    """set up the simulation params"""
    coefs = np.array([1.0, -5.0, 20.5, 4.0, -3.5])
    sigma2 = 1.0
    d = len(coefs)
    n = 1000
    """create simulation object + simulated variables"""
    sim = BLRSimulator(d, coefs, sigma2=sigma2)
    X, Y = sim.generate(n=n, seed=2)

    MLE = np.matmul(np.linalg.inv(np.matmul(np.transpose(X), X)),
                    np.matmul(np.transpose(X), Y))

    print("true coefs:", coefs)
    print("MLE", MLE)
    """create SGD object + run SGD"""
    from Loss_frequentist import LogLossLinearRegression
    StandardLoss = LogLossLinearRegression(d)
    """THIS DOES NOT WORK! WHY?!?!"""
    optimization_object = SGD(StandardLoss, Y, X)

    optimization_object.fit(batch_size=1, epochs=30, learning_rate=0.1)

    optimization_object.report_parameters()
    """create SGD object + run SGD for robust version"""
    from Loss_frequentist import GammaLossLinearRegression
예제 #28
0
파일: nnet.py 프로젝트: mattjj/svae
### util

def rand_partial_isometry(m, n):
    d = max(m, n)
    return np.linalg.qr(npr.randn(d,d))[0][:m,:n]

def _make_ravelers(input_shape):
    ravel = lambda inputs: np.reshape(inputs, (-1, input_shape[-1]))
    unravel = lambda outputs: np.reshape(outputs, input_shape[:-1] + (-1,))
    return ravel, unravel


### basic layer stuff

layer = curry(lambda nonlin, W, b, inputs: nonlin(np.matmul(inputs, W) + b))
init_layer_random = curry(lambda d_in, d_out, scale:
                          (scale*npr.randn(d_in, d_out), scale*npr.randn(d_out)))
init_layer_partial_isometry = lambda d_in, d_out: \
    (rand_partial_isometry(d_in, d_out), npr.randn(d_out))
init_layer = lambda d_in, d_out, fn=init_layer_random(scale=1e-2): fn(d_in, d_out)



### special output layers to produce Gaussian parameters

@curry
def gaussian_mean(inputs, sigmoid_mean=False):
    mu_input, sigmasq_input = np.split(inputs, 2, axis=-1)
    mu = sigmoid(mu_input) if sigmoid_mean else mu_input
    sigmasq = log1pexp(sigmasq_input)
예제 #29
0
 def forward(self, x, input, tag):
     return np.matmul(self.Cs[None, ...], x[:, None, :, None])[:, :, :, 0] \
          + np.matmul(self.Fs[None, ...], input[:, None, :, None])[:, :, :, 0] \
          + self.ds
예제 #30
0
def multiCamCalib(umeas, xworld, cameraMats, boardRotMats, boardTransVecs,
                  planeData, cameraData):
    """Compute the calibrated camera matrix, rotation matrix, and translation vector for each camera.

	Inputs
	-------------------------------------------
	umeas              2 x nX*nY*nplanes x ncams array of image points in each camera
	xworld             3 x nX*nY*nplanes of all world points
	cameraMats         3 x 3 x ncams that holds all camera calibration matrices; first estimated in single camera calibration
	boardRotMats       3 x 3*nplanes x ncams for each board in each camera; from single camera calibration.
	boardTransVecs     3 x nplanes x ncams for each board in each camera; from single camera calibration
	planeData          struct of image related parameters
	cameraData         struct of camera related parameters

	Returns
	-------------------------------------------
	cameraMats         3 x 3 x ncams; final estimate of the camera matrices
	rotationMatsCam    3 x 3 x ncams; final estimate of the camera rotation matrices
	transVecsCam       3 x ncams; final estimate of the camera translational vectors
	"""

    # loop over all camera pairs
    # call kabsch on each pair
    # store rotation matrices and translational vectors
    # Calculate terms in sum, add to running sum

    log.info("Multiple Camera Calibrations")

    # Extract dimensional data.
    ncams = cameraData.ncams
    nplanes = planeData.ncalplanes

    # Storage variables.
    R_pair = np.zeros([3, 3, ncams, ncams])
    t_pair = np.zeros([3, ncams, ncams])

    xworld = np.transpose(xworld)

    for i in range(0, ncams):
        for j in range(0, ncams):
            if i == j:
                t_pair[:, i, j] = np.zeros(3)
                R_pair[:, :, i, j] = np.eye(3, 3)
                continue
            log.VLOG(2, 'Correlating cameras (%d, %d)' % (i, j))

            # Compute world coordinates used by kabsch.
            Ri = np.concatenate(np.moveaxis(boardRotMats[:, :, :, i], 2, 0),
                                axis=0)
            ti = boardTransVecs[:, :, i].reshape((-1, 1), order='F')
            Rj = np.concatenate(np.moveaxis(boardRotMats[:, :, :, j], 2, 0),
                                axis=0)
            tj = boardTransVecs[:, :, j].reshape((-1, 1), order='F')

            # Compute world coordinates and use Kabsch
            Xi = np.matmul(Ri, xworld) + ti
            Xj = np.matmul(Rj, xworld) + tj
            Xi = Xi.reshape((3, -1), order='F')
            Xj = Xj.reshape((3, -1), order='F')
            R_pair_ij, t_pair_ij = kabsch(Xi, Xj)
            R_pair[:, :, i, j] = R_pair_ij
            t_pair[:, i, j] = t_pair_ij
            log.VLOG(
                3, 'Pairwise rotation matrix R(%d, %d) = \n %s' %
                (i, j, R_pair_ij))
            log.VLOG(
                3, 'Pairwise translation vector R(%d, %d) = %s' %
                (i, j, t_pair_ij))

    log.info("Kabsch complete. Now minimizing...")

    ##################### Solve minimization problems for Rotation and Translation Estimates ####################

    # Solve linear least square problem to minimize translation vectors of all cameras.
    # This array is contructed here per pair of cameras and later resrtcutured to set up the linear minimization problem
    # Ax - b.
    log.info(
        "Minimizing for first estimates of translation vectors per camera.")
    A = np.zeros((3, 3 * ncams, ncams, ncams))
    b = np.zeros((3, ncams, ncams))

    # Construct expanded matrix expression for minimization.
    for i in range(0, ncams):
        for j in range(0, ncams):
            if i == j:
                continue
            # Rij = R_pair[:, :, min(i, j), max(i, j)]
            # tij = t_pair[:, min(i, j), max(i, j)]
            Rij = R_pair[:, :, i, j]
            tij = t_pair[:, i, j]

            A[:, 3 * i:3 * (i + 1), i, j] = -Rij
            A[:, 3 * j:3 * (j + 1), i, j] = np.eye(3)

            b[:, i, j] = tij

    A = np.concatenate(np.concatenate(np.moveaxis(A, (2, 3), (0, 1)), axis=0),
                       axis=0)
    b = b.reshape((-1, 1), order='F')

    log.VLOG(4, 'Minimization matrix A for translation vectors \n %s' % A)
    log.VLOG(4, 'Minimization vector b for translation vectors \n %s' % b)

    # We want to constrain only the translational vector for the first camera
    # Create a constraint array with a 3x3 identity matrix in the top left
    constraint_array = np.zeros([3 * ncams, 3 * ncams])
    constraint_array[0:3, 0:3] = np.eye(3)

    # Solve the minimization, requiring the first translational vector to be the zero vector

    # Initialize camera positions assuming the first camera is at the origin and the cameras
    # are uniformly spaced by horizontal a displacement vector and a vertical vector such as in
    # a rectangular grid of the dimensions to be specified.

    def trans_cost(x):
        return np.linalg.norm(np.matmul(A, np.array(x)) - b)

    # trans_cost_deriv = autograd.grad(lambda *args: trans_cost(np.transpose(np.array(args))))

    # Construct the problem.
    x = cp.Variable((3 * ncams, 1))
    objective = cp.Minimize(cp.sum_squares(A @ x - b))
    constraints = [constraint_array @ x == np.zeros((3 * ncams, 1))]
    prob = cp.Problem(objective, constraints)

    print("Optimal value", prob.solve())

    if prob.status == cp.OPTIMAL:
        log.info("Minimization for Translation Vectors Succeeded!")
        print('Optimized translation error: ', trans_cost(x.value))
        t_vals = x.value
    else:
        log.error('Minimization Failed for Translation Vectors!')
        return
    # Translation vectors stored as columns.
    t_vals = np.transpose(t_vals.reshape((-1, 3)))

    for i in range(t_vals.shape[1]):
        log.VLOG(3, 't(%d) = \n %s' % (i, t_vals[:, i]))

    # log.info('Minimizing for translation vectors of cameras: %s', res.message)

    # Solve linear least square problem to minimize rotation matrices.
    log.info("Minimizing for first estimates of rotation matrices per camera.")
    A = np.zeros((9, 9 * ncams, ncams, ncams))

    # Construct expanded matrix expression for minimization.
    for i in range(0, ncams):
        for j in range(0, ncams):
            if i == j:
                continue
            Rij = R_pair[:, :, i, j]

            A[:, 9 * i:9 * (i + 1), i, j] = np.eye(9)

            A[:, 9 * j:9 * (j + 1), i, j] = -np.kron(np.eye(3), Rij)

    A = np.concatenate(np.concatenate(np.moveaxis(A, (2, 3), (0, 1)), axis=0),
                       axis=0)
    b = np.zeros(A.shape[0])

    log.VLOG(4, 'Minimization matrix A for rotation matrices \n %s' % A)
    log.VLOG(4, 'Minimization vector b for rotation matrices \n %s' % b)

    # We want to constrain only the rotation matrix for the first camera
    # Create a constraint array with a 9x9 identity matrix in the top left
    constraint_array = np.zeros([9 * ncams, 9 * ncams])
    constraint_array[0:9, 0:9] = np.eye(9)
    bound = np.zeros(9 * ncams)
    bound[0] = 1
    bound[4] = 1
    bound[8] = 1

    # Initialize all rotation matrices to identities assuming no camera is rotated with respect to another.
    x0 = np.zeros(9 * ncams)
    for i in range(ncams):
        x0[9 * i] = 1
        x0[9 * i + 4] = 1
        x0[9 * i + 8] = 1

    # Solve the minimization, requiring the first rotation matrix to be the identity matrix

    # Construct the problem.
    x = cp.Variable(9 * ncams)
    objective = cp.Minimize(cp.sum_squares(A @ x - b))
    constraints = [constraint_array @ x == bound]
    prob = cp.Problem(objective, constraints)

    print("Optimal value", prob.solve())

    print('Minimization status: ', prob.status)
    if prob.status == cp.OPTIMAL:
        log.info("Minimization for Rotational Matrices Succeeded!")
        R_vals = x.value
    else:
        log.error('Minimization Failed for Rotational Matrices!')
        return

    # Rotation matrices stored rows first
    R_vals = np.moveaxis(R_vals.reshape(-1, 3, 3), 0, 2)

    # Fit rotation matrices from optimized result.
    for i in range(ncams):
        R_vals[:, :, i] = fitRotMat(R_vals[:, :, i])
        log.VLOG(3, 'R(%d) = \n %s' % (i, R_vals[:, :, i]))

    log.info(
        "Finding average rotation matrices and translational vectors from single camera calibration."
    )

    # Obtain average Rs and ts per images over cameras.
    R_images = np.sum(boardRotMats, axis=3) / ncams
    t_images = np.sum(boardTransVecs, axis=2) / ncams

    for i in range(R_images.shape[2]):
        log.VLOG(
            3, 'Average rotation matrix for Image %d = \n %s' %
            (i, R_images[:, :, i]))
    for i in range(t_images.shape[1]):
        log.VLOG(
            3, 'Average translation vector for Image %d = \n %s' %
            (i, t_images[:, i]))

    ####################### Final Minimization to Yield All Parameters######################

    # K_c, R_c, R_n, t_c, t_n.
    # cameraMats, R_vals, t_vals = interpretResults('../../../FILMopenfv-samples/sample-data/pinhole_calibration_data/calibration_results/results_000001438992543.txt')

    # Pack all matrices into a very tall column vector for minimization
    min_vec_ini = np.append(
        cameraMats.reshape((-1)),
        np.append(
            R_vals.reshape((-1)),
            np.append(R_images.reshape((-1)),
                      np.append(t_vals.reshape((-1)), t_images.reshape(
                          (-1))))))

    planeVecs = {}

    # Set up objective function and Jacobian sparsity structure for minimization.
    def reproj_obj_lsq(min_vec):
        # Compute the reprojection error at each intersection per each camera per each image.

        cameraMats, rotMatsCam, rotMatsBoard, transVecsCam, transVecsBoard = unpack_reproj_min_vector(
            cameraData, planeData, min_vec)

        err = np.zeros(ncams * nplanes * nX * nY)

        # Set pixel normalization factor to 1 here.
        normal_factor = 1

        for c in range(ncams):
            for n in range(nplanes):
                for i in range(nX):
                    for j in range(nY):
                        err[c * nplanes * nX * nY + n * nX * nY + i * nY + j] = \
                         reproj_error(normal_factor, umeas[:, n * nX * nY + i * nY + j, c],
                                      xworld[:, nX * nY * n + i * nY + j], cameraMats[:, :, c],
                                      rotMatsCam[:, :, c], rotMatsBoard[:, :, n], transVecsCam[:, c],
                                      transVecsBoard[:, n])
        return err

    def bundle_adjustment_sparsity():
        m = ncams * nplanes * nX * nY
        n = min_vec_ini.shape[0]
        A = lil_matrix((m, n), dtype=int)

        num_nonzeros = np.sum([
            np.prod(x.shape[:-1])
            for x in [cameraMats, R_vals, t_vals, R_images, t_images]
        ])
        for c in range(ncams):
            # make boolean arrays for camera-indexed arrays
            cams = np.zeros(cameraMats.shape)
            cams[:, :, c] = np.ones(cameraMats.shape[:-1])
            rots = np.zeros(R_vals.shape)
            rots[:, :, c] = np.ones(R_vals.shape[:-1])
            ts = np.zeros(t_vals.shape)
            ts[:, c] = np.ones(t_vals.shape[:-1])

            cams = cams.reshape(-1)
            rots = rots.reshape(-1)
            ts = ts.reshape(-1)

            for n in range(nplanes):
                # make boolean arrays for plane-indexed arrays
                if n not in planeVecs:
                    rimgs = np.zeros(R_images.shape)
                    rimgs[:, :, n] = np.ones(R_images.shape[:-1])
                    timgs = np.zeros(t_images.shape)
                    timgs[:, n] = np.ones(t_images.shape[:-1])
                    planeVecs[n] = rimgs, timgs
                else:
                    rimgs, timgs = planeVecs[n]

                rimgs = rimgs.reshape(-1)
                timgs = timgs.reshape(-1)

                # create boolean array with changed values for jacobian
                x0 = np.append(
                    cams.reshape((-1)),
                    np.append(
                        rots.reshape((-1)),
                        np.append(
                            rimgs.reshape((-1)),
                            np.append(ts.reshape((-1)), timgs.reshape((-1))))))

                # set changing values in jacobian
                A[nY * nX * (n + nplanes * c):nY * nX * (n + 1 + nplanes * c),
                  x0.nonzero()] = np.ones((nY * nX, num_nonzeros))

        return A

    A = bundle_adjustment_sparsity()
    reproj_res = scipy.optimize.least_squares(reproj_obj_lsq,
                                              min_vec_ini,
                                              jac_sparsity=A,
                                              verbose=2,
                                              x_scale='jac',
                                              ftol=1e-2)

    if reproj_res.success:
        finError = reproj_min_func(planeData, cameraData, umeas, xworld,
                                   reproj_res.x)
        log.info("Final error: {}".format(finError))
        log.info("Reprojection Minimization Succeeded!")
        cameraMats, rotationMatsCam, rotationMatsBoard, transVecsCam, transVecsBoard = unpack_reproj_min_vector(
            cameraData, planeData, reproj_res.x)
    else:
        log.error('Reprojection Minimization Failed!')
        return

    return cameraMats, rotationMatsCam, rotationMatsBoard, transVecsCam, transVecsBoard, finError
예제 #31
0
    def sdot(self, S, t, param_vec, Cin): # X is population vector, t is time, R is intrinsic growth rate vector, C is the rate limiting nutrient vector, A is interaction matrix
        '''
        Calculates and returns derivatives for the numerical solver odeint

        Parameters:
            S: current state
            t: current time
            Cin: array of the concentrations of the auxotrophic nutrients and the
                common carbon source
            params: list parameters for all the exquations
            num_species: the number of bacterial populations
        Returns:
            dsol: array of the derivatives for all state variables
        '''
        # extract parmeters
        '''
        A = param_vec[5]
        #A = param_vec[0]
        y = param_vec[0]
        y3 = param_vec[1]

        Rmax = param_vec[2]

        Km = self.ode_params[5]
        Km3 = self.ode_params[6]

        Km = param_vec[10:12]
        Km3 = param_vec[12:14]
        '''

        # autograd gives t as an array_box, need to convert to int
        if str(type(t)) == '<class \'autograd.numpy.numpy_boxes.ArrayBox\'>': # sort this out
            t = t._value
            t = int(t)
        else:
            t = int(t)
        t = min(Cin.shape[0] - 1, t) # to prevent solver from going past the max time

        Cin = Cin[t]

        print(" param vec: ", param_vec)
        A = np.reshape(param_vec[-4:], (2,2))
        y = param_vec[4:6]
        y3 = param_vec[6:8]

        Rmax = param_vec[8:10]

        Km = self.ode_params[5]
        Km3 = self.ode_params[6]

        num_species = 2
        # extract variables
        N = np.array(S[:num_species])
        C = np.array(S[num_species:2*num_species])
        C0 = np.array(S[-1])

        C0in, q = self.ode_params[:2]

        R = self.monod(C, C0, Rmax, Km, Km3)

        Cin = Cin[:num_species]
        # calculate derivatives
        dN = N * (R + np.matmul(A,N) - q) # q term takes account of the dilution
        dC = q*(Cin - C) - (1/y)*R*N # sometimes dC.shape is (2,2)


        dC0 = q*(C0in - C0) - sum(1/y3[i]*R[i]*N[i] for i in range(num_species))

        # consstruct derivative vector for odeint
        dC0 = np.array([dC0])
        dsol = np.append(dN, dC)
        dsol = np.append(dsol, dC0)



        return tuple(dsol)
예제 #32
0
 def trans_cost(x):
     return np.linalg.norm(np.matmul(A, np.array(x)) - b)
예제 #33
0
 def f_of_x(self, x):
     x_c = x - self.opt_x
     return np.matmul(x_c.T, np.matmul(self.a_mat, x_c))
예제 #34
0
    xworld = np.array([[i * dX + 1, j * dY + 1, 0] for _ in range(nplanes)
                       for i in range(nX)
                       for j in range(nY)]).astype('float32')
    camMatrix, boardRotMat, boardTransVec = singleCamCalibMat(
        umeas, xworld, planeData, cameraData)

    cameraMats, rotationMatsCam, rotationMatsBoard, transVecsCam, transVecBoard, finalError = multiCamCalib(
        umeas, xworld, camMatrix, boardRotMat, boardTransVec, planeData,
        cameraData)

    # print(rotationMatsCam)
    # print(transVecsCam)

    # construct P matrix
    P = np.zeros((3, 4, cameraData.ncams))
    for c in range(cameraData.ncams):
        Rt = np.column_stack(
            (np.transpose(rotationMatsCam[:, :, c]), transVecsCam[:, c]))
        P[:, :, c] = np.matmul(cameraMats[:, :, c], Rt)

    f = saveCalibData(exptPath, camIDs, P, transVecsCam, transVecBoard,
                      sceneData, cameraData, finalError, pix_phys, 'results_')
    log.info('\nData saved in ' + str(f))

    # TODO: Change saved data according to what multiCamCalib returns (we should probably try to make it return these
    #  though)
    # xworld = np.array([[i * 10, j * 10, k * 5] for i in range(nX) for j in range(nY) for k in range(nplanes)]).astype(
    #     'float32')
    # f = saveCalibData(cameraMats, rotationMatsCam, transVecsCam, xworld)
    # print('\nData saved in ' + str(f))