Esempio n. 1
0
def t_stat(data, X_matrix):
    """
    Return the estimated betas, t-values, degrees of freedom, and p-values for the glm_multi regression
    
    Parameters
    ----------
    data_4d: numpy array of 4 dimensions 
             The image data of one subject, one run
    X_matrix: numpy array 
       The design matrix for glm_multi
    Note that the fourth dimension of `data_4d` (time or the number 
    of volumes) must be the same as the number of rows that X has. 
    
    Returns
    -------
    beta: estimated beta values
    
    t: t-values of the betas
    
    df: degrees of freedom
    
    p: p-values corresponding to the t-values and degrees of freedom
    """

    beta = glm_beta(data, X_matrix)

    # Calculate the parameters - b hat
    beta = np.reshape(beta, (-1, beta.shape[-1])).T

    fitted = X_matrix.dot(beta)
    # Residual error
    y = np.reshape(data, (-1, data.shape[-1]))
    errors = y.T - fitted
    # Residual sum of squares
    RSS = (errors**2).sum(axis=0)
 
    df = X_matrix.shape[0] - npl.matrix_rank(X_matrix)
    # Mean residual sum of squares
    MRSS = RSS / df
    # calculate bottom half of t statistic
    Cov_beta=npl.pinv(X_matrix.T.dot(X_matrix))

    SE =np.zeros(beta.shape)
    for i in range(X_matrix.shape[-1]):
        c = np.zeros(X_matrix.shape[-1])
        c[i]=1
        c = np.atleast_2d(c).T
        SE[i,:]= np.sqrt(MRSS* c.T.dot(npl.pinv(X_matrix.T.dot(X_matrix)).dot(c)))


    zeros = np.where(SE==0)
    SE[zeros] = 1
    t = beta / SE

    t[:,zeros] =0
    # Get p value for t value using CDF of t didstribution
    ltp = t_dist.cdf(abs(t), df)
    p = 1 - ltp # upper tail
    
    return beta.T, t, df, p
Esempio n. 2
0
def supf(y, x, p):
    T = y.shape[0]
    range = np.floor(np.array([T * p, T * (1 - p)]))
    range = np.arange(range[0], range[1] + 1, dtype=np.int32)
    # Demean since intercept doesnt break
    x = x - np.mean(x)
    y = y - np.mean(y)
    b = pinv(x).dot(y)
    e = y - x.dot(b)
    # Compute full sample R2
    R2_r = 1 - e.dot(e) / y.dot(y)
    k = x.shape[1]
    F_stat = np.zeros(T)
    for t in range:
        X1 = x[:t]
        X2 = x[t:]
        # Parameters and errors before the break
        b = pinv(X1).dot(y[:t])
        e[:t] = y[:t] - X1.dot(b)
        # Parameters and errors after the break
        b = pinv(X2).dot(y[t:])
        e[t:] = y[t:] - X2.dot(b)
        # R2 from model with break
        R2_u = 1 - e.dot(e) / y.dot(y)
        # F stat for break at t
        F_stat[t] = ((R2_u - R2_r) / k) / ((1 - R2_u) / (T - 2* k - 1))
    # Only return maximum F stat
    return F_stat.max()
Esempio n. 3
0
def td_solver(P, r, X, gm, lm, d=None):
    """Compute the TD solution for an MDP under linear function approximation.

    Args:
      P : The transition matrix under a given policy.
      r : The expected immediate reward for each state under the policy.
      X : The feature matrix (one row for each state)
      gm : The discount parameter, gamma
      lm : The bootstrapping parameter, lambda
      d (optional): The stationary distribution to use.

    Returns:
      theta: the weight vector found by the TD solution.
    """
    ns = len(P) # number of states
    I = np.eye(ns)
    # TODO: Check for validity of P, r, X (size and values)
    # TODO: Provide a way to handle terminal states

    # account for scalar, vector, or matrix parameters
    G = parameter_matrix(gm)
    L = parameter_matrix(lm)

    # compute the stationary distribution if unspecified
    if d is None:
        d = stationary(P)
    # the stationary distribution as a matrix
    D = np.diag(d)

    # Solve the equation
    A = X.T @ D @ pinv(I - P @ G @ L) @ (I - P @ G) @ X
    b = X.T @ D @ pinv(I - P @ G @ L) @ r
    return pinv(A) @ b
 def predict_all(self):
     """Returns the predictions and the reconstructions for all training / test data."""
     outputs, hidden = self.feed_forward(self.pca_transformer.transform(self.X))
     hidden_expected = dot(self._inverse_activation(outputs), pinv(self.W_output))[:, :-1]
     hidden_reconstruction = self.pca_transformer.inverse_transform(
         dot(self._inverse_activation(hidden_expected), pinv(self.W_hidden))[:, :-1])
     return outputs.argmax(axis=1), hidden_reconstruction.reshape(self.app.dataset['images'].shape)
 def test_entity_array_variable_get_cond_mean_and_var(self):
   groups = self.groups
   group = groups['u']
   variable = [v for v in groups['u'].iter_variables() if v.entity_id
       == 'v1' ][0]
   related_votes = [
       {'review': 'r1', 'author': 'a1', 'voter': 'v1', 'vote': 4},
       {'review': 'r2', 'author': 'a1', 'voter': 'v1', 'vote': 5},
       {'review': 'r5', 'author': 'a3', 'voter': 'v1', 'vote': 5},
   ]
   v_values = [[v for v in groups['v'].iter_variables() if v.entity_id ==
       vote['review']][0].value for vote in related_votes]
   v_sum = sum([v.dot(v.T) for v in v_values]) / self.var_H.value
   var_matrix = group.var_param.value * identity(const.K)
   inv_var = pinv(var_matrix)
   true_var = pinv(inv_var + v_sum)
   rest_term = sum([variable.get_rest_value(groups, related_votes[i]) * 
       v_values[i] for i in xrange(len(related_votes))]) / \
       group.var_H.value
   dot_term = inv_var.dot(group.weight_param.value) \
       .dot(variable.features)
   true_mean = true_var.dot(rest_term + dot_term)
   res_mean, res_var = variable.get_cond_mean_and_var(groups, self.votes)
   ntest.assert_allclose(true_var, res_var, rtol=1, atol=1e-7)
   ntest.assert_allclose(true_mean, res_mean, rtol=1, atol=1e-7)
def ls(R,W,d):
  (n,m) = R.shape
  sigma = 0.0001
  Id = np.identity(d)
  U0 = np.zeros((d,n))
  
  V = np.random.rand(d, m)
  for i in range(1000):
      U = U0
      for g in range(n):
          VV = np.zeros(d)
          for w in W[g]:
             VV = VV+np.dot(V[:,w],V[:,w].T)
          X = nlin.pinv(sigma*Id+VV)
          #X = sigma*Id + VV
          for v in W[g]:
             U[:,g] = U[:,g] + R[g,v]*np.dot(V[:,v].T,X)               
             #U[:,g] = U[:,g] + R[g,v]*slin.solve(X ,V[:,v].T)
  
      Y = np.dot(U,U.T)
      Y = nlin.pinv(sigma*Id+Y)
      Y = np.dot(U.T,Y)
      #Y = np.linalg.solve( U.T, sigma*Id+Y)
      #Y = np.linalg.lstsq(U.T, sigma*Id+Y)
      for v in range(m):
         V[:,v] = np.dot(R[:,v].T,Y)

  return (U,V)
Esempio n. 7
0
	def sample_posterior_gibbs(self, X, num_steps=10, Y=None, Z=None):
		"""
		B{References:}
			- Doucet, A. (2010). I{A Note on Efficient Conditional Simulation of
			Gaussian Distributions.}
		"""

		# filter matrix and filter responses
		W = pinv(self.A)
		WX = dot(W, X)

		# nullspace projection matrix
		Q = eye(self.num_hiddens) - dot(W, self.A)

		# initial hidden state
		if Z is None:
			Y = WX + dot(Q, Y) if Y is not None else \
				WX + dot(Q, self.sample_prior(X.shape[1]))
		else:
			V = pinv(self.nullspace_basis())
			Y = WX + dot(V, Z)

		# Gibbs sample between S and Y given X
		for step in range(num_steps):
			# update scales
			S = self.sample_scales(Y)

			# update hidden states
			Y = self._sample_posterior_cond(Y, X, S, W, WX, Q)

			if Distribution.VERBOSITY > 1:
				print '{0:6}\t{1:10.2f}'.format(step + 1, mean(self.prior_energy(Y)))

		return asarray(Y)
Esempio n. 8
0
def AND(C, B):
	
	dim, col = C.shape
	tolerance = 1e-14

	UC, SC, UtC = svd(C)
	UB, SB, UtB = svd(B)

	diag_SC = diag(SC)
	diag_SB = diag(SB)

	# sum up how many elements on diagonal 
	# are bigger than tolerance
	numRankC =  (1.0 * (diag_SC > tolerance)).sum()
	numRankB =  (1.0 * (diag_SB > tolerance)).sum()

	UC0 = matrix(UC[:, numRankC:])
	UB0 = matrix(UB[:, numRankB:])
	W, Sigma, Wt = svd(UC0 * UC0.transpose() + UB0 * UB0.transpose())
	numRankSigma =  (1.0 * (diag(Sigma) > tolerance)).sum()
	Wgk = matrix(W[:, numRankSigma:])
	I = matrix(identity(dim))
	CandB = \
	  Wgk * inv(Wgk.transpose() *  \
	  ( pinv(C, tolerance) + pinv(B, tolerance) - \
	    I) * Wgk) *Wgk.transpose()
	return CandB
Esempio n. 9
0
def update_b(i_index, b, alpha, beta, gamma, sigma2, lambda_D,
             N_g, uni_id, uni_diet, id_g, p, W, X, Z, y):
    i = uni_id[i_index]

    for g_search, i_search in id_g.iteritems():
        if np.any(i_search == i):
            g = g_search

    g_index = np.where(uni_diet == g)[0][0]

    if np.all(gamma[g_index] == 0):  #check if all gamma's are 0
        V2 = lambda_D + np.dot(Z[i].T, Z[i])/sigma2
        mean2 = np.dot(pinv(V2), np.dot(Z[i].T, y[i]-W[i].dot(alpha)))/sigma2
    else:
        V2 = lambda_D + np.dot(Z[i].T, Z[i])/sigma2
        temp1 = XXsum(g_index, uni_diet, id_g, gamma, X)
        temp1 = pinv(temp1)
        V2 = V2 + np.dot(np.dot(np.dot(Z[i].T, X[i][:,gamma[g_index]!=0]), temp1), (np.dot(X[i][:,gamma[g_index]!=0].T, Z[i])))/(sigma2*N_g[g])
        mean2 = np.dot(Z[i].T, y[i] - W[i].dot(alpha) - np.dot(X[i][:,gamma[g_index]!=0], beta[g_index][gamma[g_index]!=0].reshape(np.sum(gamma[g_index]),1)))
        temp2 = np.dot(X[i][:,gamma[g_index]!=0].T, Z[i].dot(b[i_index].reshape(p, 1)))
        for j in id_g[g]:
            j_index = np.where(uni_id == j)[0][0]
            temp2 += np.dot(X[j][:,gamma[g_index]!=0].T, y[j] - W[j].dot(alpha) - Z[j].dot(b[j_index].reshape(p, 1)))
        mean2 = mean2 + np.dot(np.dot(Z[i].T.dot(X[i][:,gamma[g_index]!=0]), temp1), temp2)/N_g[g]
        mean2 = np.dot(pinv(V2), mean2)/sigma2

    #update
    b_new = np.random.multivariate_normal(mean2.reshape(p,), pinv(V2)).reshape(p, )
    return b_new
Esempio n. 10
0
def test_ridge_regression_py(X, Y, _lambda):

    X = X.astype(np.float32)
    Y = Y.astype(np.float32)
    
    t1 = time.time()
    
    n, p = X.shape

    if n < p:
        tmp = np.dot(X, X.T)
        if _lambda:
            tmp += _lambda*n*np.eye(n)
        tmp = la.pinv(tmp)

        beta_out = np.dot(np.dot(X.T, tmp), Y.reshape(-1, 1))
    else:
        tmp = np.dot(X.T, X)
        if _lambda:
            tmp += _lambda*n*np.eye(p)
        tmp = la.pinv(tmp)

        beta_out = np.dot(tmp, np.dot(X.T, Y.reshape(-1, 1)))
    
    t2 = time.time()
    dt = t2-t1
    
    print("total time (Python): {}".format(dt))
    print(beta_out[:20,0])
Esempio n. 11
0
 def _em_one_pass(self, centered=None, numcmpt=1, thresh=1e-16, out=None):
     """
     With numcmpt = 1, computes the first principal component
     of the data. Otherwise computes an unnormalized, non-orthogonal
     spanning set for the first numcmpt principal components. Assumes
     rows are variables, columns are data points.
     """
     csize = (self.ndim, numcmpt)
     if out != None:
         assert out.shape == csize
         comp = out
         comp[:] = random.normal(size=csize)
     else:
         comp = random.normal(size=csize)
     
     # Initialize 'old' array to infinity
     comp_old = np.empty(csize) + np.inf
     
     if centered == None:
         # Center the data with respect to the dataset mean
         centered = self._data - self._mean
         
     # Compensate for the shape of the data
     if not self._rowvar:
         centered = centered.T
     
     while linalg.norm(comp_old - comp, np.inf) > thresh:
         pinvc_times_data = np.dot(linalg.pinv(comp), centered)
         comp_old[:] = comp
         comp[:] = np.dot(centered, linalg.pinv(pinvc_times_data))
     
     # Normalize the eigenvectors we obtained.
     comp /= np.apply_along_axis(linalg.norm, 0, comp)[np.newaxis, :]
Esempio n. 12
0
 def t_stat(self):
     """ betas, t statistic and significance test given data,
     design matix, contrast
     This is OLS estimation; we assume the errors to have independent
     and identical normal distributions around zero for each $i$ in
     $\e_i$ (i.i.d).
     """
     if self.design is None:
         self.get_design_matrix()
     if self.t_values is None:
         y = self.data.T
         X = self.design
         c = [0, 0, 1]
         c = np.atleast_2d(c).T
         beta = npl.pinv(X).dot(y)
         fitted = X.dot(beta)
         errors = y - fitted
         RSS = (errors**2).sum(axis=0)
         df = X.shape[0] - npl.matrix_rank(X)
         MRSS = RSS / df
         SE = np.sqrt(MRSS * c.T.dot(npl.pinv(X.T.dot(X)).dot(c)))
         try:
             SE[SE == 0] = np.amin(SE[SE != 0])
         except ValueError:
             pass
         t = c.T.dot(beta) / SE
         self.t_values = abs(t[0])
     self.t_indices = np.array(self.t_values).argsort(
     )[::-1][:self.t_values.size]
     return self.t_indices
Esempio n. 13
0
def fit_to_model(imchunk,model, mode = 'pinv',fit_pix_mask = None,baseline = None):
    import numpy as np
    #im_array = (imchunk-baseline)#/baseline
    if not(baseline is None):
    	im_array = imchunk-baseline#/baseline
    else:
    	im_array = imchunk
    imshape = np.shape(im_array[0])
    im_array = im_array.reshape((-1,imshape[0]*imshape[1]))
    if mode == 'nnls':
        fits = np.empty((np.shape(model)[0],np.shape(im_array)[0]))
        for i,im2 in enumerate(im_array):
            im = im2.copy()
            im[~np.isfinite(im)] = 0
            from scipy.optimize import nnls
            if not(fit_pix_mask is None):
                fits[:,i] = nnls(model[:,fit_pix_mask].T,im[fit_pix_mask])[0]
            else:
                fits[:,i] = nnls(model.T,im)[0]
    else:
        im = im_array
        print np.shape(im_array)
        from numpy.linalg import pinv
        if not(fit_pix_mask is None):
            fits = np.dot(pinv(model[:,fit_pix_mask]).T,im[:,fit_pix_mask].T)
        else:
            fits = np.dot(pinv(model).T,im)
    return fits
Esempio n. 14
0
 def update(self):
     '''Initialize other arrays from fundamental arrays'''
     #The sparse matrices are treated a little differently because they are not rectangular
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore",category=DeprecationWarning)
         if self.AtAi.size == 0:
             self.AtAi = la.pinv(self.At.dot(self.At.T).todense(),rcond=1e-6).astype(np.float32)#(AtA)^-1
             self.BtBi = la.pinv(self.Bt.dot(self.Bt.T).todense(),rcond=1e-6).astype(np.float32)#(BtB)^-1
def noise(L, jump_op, ss, pops, Q, freq):
    noise = np.zeros(freq.size, dtype=complex128)
    for i in range(len(freq)):
        R_plus = np.dot(Q, np.dot(npla.pinv(1.j*freq[i]*np.eye(L.shape[0])-L), Q))
        R_minus = np.dot(Q, np.dot(npla.pinv(-1.j*freq[i]*np.eye(L.shape[0])-L), Q))
        noise[i] = np.dot(pops, np.dot(jump_op, ss)) \
                        + np.dot(pops, np.dot(np.dot(np.dot(jump_op, R_plus), jump_op) \
                                                   + np.dot(np.dot(jump_op, R_minus), jump_op), ss))
    return noise
Esempio n. 16
0
 def solve(self, eps=1e-8):
     '''
     Implement a LCQP solver, with numerical threshold eps.
     '''
     Cp = npl.pinv(self.C, eps)
     xopt = Cp * self.d
     P = eye(self.nx * self.N) - Cp * self.C
     xopt += npl.pinv(self.A * P, eps) * (self.b - self.A * xopt)
     return xopt
def nipals_xy(X, Y, mode="PLS", max_iter=500, tol=1e-06):
    """
    NIPALS algorithm; returns the first left and rigth singular
    vectors of X'Y.

    :param X, Y: data matrix
    :type X, Y: :class:`numpy.array`

    :param mode: possible values "PLS" (default) or "CCA" 
    :type mode: string

    :param max_iter: maximal number of iterations (default: 500)
    :type max_iter: int

    :param tol: tolerance parameter; if norm of difference
        between two successive left singular vectors is less than tol,
        iteration is stopped
    :type tol: a not negative float
            
    """
    yScore, uOld, ite = Y[:, [0]], 0, 1
    Xpinv = Ypinv = None
    # Inner loop of the Wold algo.
    while True and ite < max_iter:
        # Update u: the X weights
        if mode == "CCA":
            if Xpinv is None:
                Xpinv = linalg.pinv(X) # compute once pinv(X)
            u = dot(Xpinv, yScore)
        else: # mode PLS
        # Mode PLS regress each X column on yScore
            u = dot(X.T, yScore) / dot(yScore.T, yScore)
        # Normalize u
        u /= numpy.sqrt(dot(u.T, u))
        # Update xScore: the X latent scores
        xScore = dot(X, u)

        # Update v: the Y weights
        if mode == "CCA":
            if Ypinv is None:
                Ypinv = linalg.pinv(Y) # compute once pinv(Y)
            v = dot(Ypinv, xScore)
        else:
            # Mode PLS regress each X column on yScore
            v = dot(Y.T, xScore) / dot(xScore.T, xScore)
        # Normalize v
        v /= numpy.sqrt(dot(v.T, v))
        # Update yScore: the Y latent scores
        yScore = dot(Y, v)

        uDiff = u - uOld
        if dot(uDiff.T, uDiff) < tol or Y.shape[1] == 1:
            break
        uOld = u
        ite += 1
    return u, v
Esempio n. 18
0
def predict(x):
	maximum = "null"
	for param in params: # iterate through all distributions
		(m, u, v, name) = param
		mtx = pinv(v)*(np.transpose(np.subtract(x, m)))*(pinv(u))*(np.subtract(x, m))
		trace = np.trace(mtx) #butterfy has a small trace
		l = 1.0e300*np.exp(-0.5*trace)/((la.norm(v)**(n/2.0))*(la.norm(u)**(p/2.0))) # likelihood, excluding the "2pi" term and multiplying by a large positive number (we get overflow otherwise)
		if maximum == "null":
			maximum = (l, name)
		elif l > maximum[0]:
			maximum = (l, name)
			print maximum
	return maximum
Esempio n. 19
0
	def sample_posterior_ais(self, X, num_steps=10, annealing_weights=[]):
		"""
		Sample posterior distribution over hidden states using annealed importance
		sampling with Gibbs sampling transition operator.
		"""

		if not annealing_weights:
			annealing_weights = linspace(0, 1, num_steps + 1)[1:]

		# initialize proposal distribution to be Gaussian
		model = deepcopy(self)
		for gsm in model.subspaces:
			gsm.scales[:] = 1.

		# filter matrix and filter responses
		W = pinv(self.A)
		WX = dot(W, X)

		# nullspace basis and projection matrix
		B = self.nullspace_basis()
		Q = dot(B.T, B)

		# initialize proposal samples (Z is initially Gaussian and independent of X)
		Z = dot(B, randn(self.num_hiddens, X.shape[1]))
		Y = WX + dot(pinv(B), Z)

		# initialize importance weights (log-determinant of dot(B.T, B) not needed here)
		log_is_weights = sum(multiply(Z, dot(inv(dot(B, B.T)), Z)), 0) / 2. \
			+ (self.num_hiddens - self.num_visibles) / 2. * log(2. * pi)
		log_is_weights.resize(1, X.shape[1])

		for step, beta in enumerate(annealing_weights):
			# tune proposal distribution
			for i in range(len(self.subspaces)):
				# adjust standard deviations
				model.subspaces[i].scales = (1. - beta) + beta * self.subspaces[i].scales

			log_is_weights -= model.prior_energy(Y)

			# apply Gibbs sampling transition operator
			S = model.sample_scales(Y)
			Y = model._sample_posterior_cond(Y, X, S, W, WX, Q)

			log_is_weights += model.prior_energy(Y)

			if Distribution.VERBOSITY > 1:
				print '{0:6}\t{1:10.2f}'.format(step + 1, mean(self.prior_energy(Y)))

		log_is_weights += self.prior_loglikelihood(Y) + slogdet(dot(W.T, W))[1] / 2.

		return Y, log_is_weights
Esempio n. 20
0
 def _cov_(self, data, params, priors):
     '''Calculate covariance matrix of the normal posterior dist.'''
     V1 = np.zeros([data.p, data.p])
     for gdx in range(data.grp):
         g = data.unidiets[gdx]
         nzro_gamma = (params.gamma[gdx,:]!=0)
         for i in data.grp_uniids[g]:
             V1 += np.dot(data.id_W[i].T, data.id_W[i])
         if nzro_gamma.any():
             V1 += np.dot(self.__nxTw__[g].T,
                          np.dot(pinv(self.__nxTx__[g]),
                                 self.__nxTw__[g]))
     V1 = V1/params.sigma2 + priors.d4
     self.cov = pinv(V1)
Esempio n. 21
0
def get_h(w_matrix, x_matrix):
    """Finds a nonnegative right factor (H) of the perform_net_nmf function
    X ~ W.H

    Args:
        w_matrix: the positive left factor (W) of the perform_net_nmf function
        x_matrix: the postive matrix (X) to be decomposed

    Returns:
        h_matrix: nonnegative right factor (H)
    """
    wtw = np.dot(w_matrix.T, w_matrix)
    number_of_clusters = wtw.shape[0]
    wtx = np.dot(w_matrix.T, x_matrix)
    colix = np.arange(0, x_matrix.shape[1])
    rowix = np.arange(0, w_matrix.shape[1])
    h_matrix = np.dot(LA.pinv(wtw), wtx)
    h_pos = h_matrix > 0
    h_matrix[~h_pos] = 0
    col_log_arr = sum(h_pos == 0) > 0
    col_list = colix[col_log_arr]
    for cluster in range(0, number_of_clusters):
        if col_list.size > 0:
            w_ette = wtx[:, col_list]
            m_rows = w_ette.shape[0]
            n_cols = w_ette.shape[1]
            mcode_uniq_col_ix = np.arange(0, n_cols)
            h_ette = np.zeros((m_rows, n_cols))
            h_pos_ette = h_pos[:, col_list]
            mcoding = np.dot(2**(np.arange(0, m_rows)), np.int_(h_pos_ette))
            mcode_uniq = np.unique(mcoding)
            for u_n in mcode_uniq:
                ixidx = mcoding == u_n
                c_pat = mcode_uniq_col_ix[ixidx]
                if c_pat.size > 0:
                    r_pat = rowix[h_pos_ette[:, c_pat[0]]]
                    atmp = wtw[r_pat[:, None], r_pat]
                    btmp = w_ette[r_pat[:, None], c_pat]
                    atmptatmp = np.dot(atmp.T, atmp)
                    atmptatmp = LA.pinv(atmptatmp)
                    atmptbtmp = np.dot(atmp.T, btmp)
                    h_ette[r_pat[:, None], c_pat] = np.dot(atmptatmp, atmptbtmp)
                    h_matrix[:, col_list] = h_ette
            h_pos = h_matrix > 0
            h_matrix[~h_pos] = 0
            col_log_arr = sum(h_pos == 0) > 0
            col_list = colix[col_log_arr]
        else:
            break
    return h_matrix
Esempio n. 22
0
    def _Mstep(self,x,L):
        """
        VB-M step

        Parameters
        ----------
        x: array of shape(nbitem,self.dim)
           the data from which the model is estimated
        L: array of shape(nbitem,self.k)
           the likelihood of the data under each class
        """
        from numpy.linalg import pinv
        tiny  =1.e-15
        pop = L.sum(0)
       
        # shrinkage,weights,dof
        self.weights = self.prior_weights + pop
        pop = pop[0:self.k]
        L = L[:,:self.k]
        self.shrinkage = self.prior_shrinkage + pop
        self.dof = self.prior_dof + pop
        
        #reshape
        pop = np.reshape(pop,(self.k,1))
        prior_shrinkage = np.reshape(self.prior_shrinkage,(self.k,1))
        shrinkage = np.reshape(self.shrinkage,(self.k,1))

        # means
        means = np.dot(L.T,x)+ self.prior_means*prior_shrinkage
        self.means= means/shrinkage
        
        #precisions
        empmeans = np.dot(L.T,x)/np.maximum(pop,tiny)
        empcov = np.zeros(np.shape(self.prior_scale))
        for k in range(self.k):
             dx = x-empmeans[k]
             empcov[k] = np.dot(dx.T,L[:,k:k+1]*dx) 
                    
        covariance = np.array([pinv(self.prior_scale[k])
                               for k in range(self.k)])
        covariance += empcov

        dx = np.reshape(empmeans-self.prior_means,(self.k,self.dim,1))
        addcov = np.array([np.dot(dx[k],dx[k].T) for k in range(self.k)])
        apms =  np.reshape(prior_shrinkage*pop/shrinkage,(self.k,1,1))
        covariance += addcov*apms

        self.scale = np.array([pinv(covariance[k]) for k in range(self.k)])
Esempio n. 23
0
    def initialize(self):
        S = sum([N.dot(unit.X.T, unit.X) for unit in self.units])
        Y = sum([N.dot(unit.X.T, unit.Y) for unit in self.units])
        self.a = L.lstsq(S, Y)[0]

        D = 0
        t = 0
        sigmasq = 0
        for unit in self.units:
            unit.r = unit.Y - N.dot(unit.X, self.a)
            if self.q > 1:
                unit.b = L.lstsq(unit.Z, unit.r)[0]
            else:
                Z = unit.Z.reshape((unit.Z.shape[0], 1))
                unit.b = L.lstsq(Z, unit.r)[0]

            sigmasq += (N.power(unit.Y, 2).sum() -
                        (self.a * N.dot(unit.X.T, unit.Y)).sum() -
                        (unit.b * N.dot(unit.Z.T, unit.r)).sum())
            D += N.multiply.outer(unit.b, unit.b)
            t += L.pinv(N.dot(unit.Z.T, unit.Z))

        sigmasq /= (self.N - (self.m - 1) * self.q - self.p)
        self.sigma = N.sqrt(sigmasq)
        self.D = (D - sigmasq * t) / self.m
Esempio n. 24
0
 def getNormal(xs, Xseen, Yseen):
     def cov_matrix(x1s, x2s=None):
         if x2s is None:
             return covariance(np.asmatrix(x1s).T)
         return covariance(np.asmatrix(x1s).T, np.asmatrix(x2s).T)
 
     if len(Xseen) == 0:
         mu = np.zeros(xs.shape)
         sigma = cov_matrix(xs, xs)
     else:
         x2s = np.array(Xseen)
         o2s = np.array(Yseen)
         
         mu1 = np.zeros(xs.shape)
         mu1 = mu1.reshape((mu1.size,))
         mu2 = np.zeros(x2s.shape)
         a2 = np.matrix(o2s.reshape((len(o2s),1)))
     
         sigma11 = cov_matrix(xs, xs)
         sigma12 = cov_matrix(xs, x2s)
         sigma21 = cov_matrix(x2s, xs)
         sigma22 = cov_matrix(x2s,x2s)
         inv22 = la.pinv(sigma22)
 
         plusterm = np.asarray(np.dot(sigma12, np.dot(inv22, (a2 - mu2.reshape(a2.shape))))).squeeze()
         # print plusterm.shape
         mu = mu1 + plusterm
         sigma = sigma11 - np.dot(sigma12,np.dot(inv22,sigma21))
 
     return mu, sigma
def _field_gradient_jac(ref, target):
    """
    Given a reference field ref and a target field target
    compute the jacobian of the target with respect to ref

    Parameters
    ----------
    ref: Field instance that yields the topology of the space
    target array of shape(ref.V,dim)

    Results
    -------
    fgj: array of shape (ref.V) that gives the jacobian
         implied by the ref.field->target transformation.
    """
    import numpy.linalg as nl
    n = ref.V
    xyz = ref.field
    dim = xyz.shape[1]
    fgj = []
    ln = ref.list_of_neighbors()
    for i in range(n):
        j = ln[i]
        if np.size(j) > dim - 1:
            dx = np.squeeze(xyz[j] - xyz[i])
            df = np.squeeze(target[j] - target[i])
            FG = np.dot(nl.pinv(dx), df)
            fgj.append(nl.det(FG))
        else:
            fgj.append(1)

    fgj = np.array(fgj)
    return fgj
Esempio n. 26
0
 def reduce(self, assignment):
     if all([x is None for x in assignment]):
         return MultivariateGaussianDistribution(self.mean, self.cov)
     # reordering variables, so that non-reduced variables go before reduced
     reduced_idx = [i for i in range(len(assignment)) if assignment[i] is not None]
     non_reduced_idx = [i for i in range(len(assignment)) if assignment[i] is None]
     x = np.matrix([assignment[idx] for idx in reduced_idx]).T
     new_idx = non_reduced_idx + reduced_idx
     mean1 = np.matrix([self.mean[idx] for idx in non_reduced_idx]).T
     mean2 = np.matrix([self.mean[idx] for idx in reduced_idx]).T
     cov11 = self.cov[non_reduced_idx][:, non_reduced_idx]
     cov22 = self.cov[reduced_idx][:, reduced_idx]
     cov12 = self.cov[non_reduced_idx][:, reduced_idx]
     mean = mean1 + cov12 * pinv(cov22) * (x - mean2)
     cov = cov11 - cov12 * pinv(cov22) * cov12.T
     return MultivariateGaussianDistribution(np.array(mean.T), cov)
Esempio n. 27
0
	def __init__(self,N,xid,covd,modl,rl,sp=1.):
		#should take in N data vectors with corresponding models
		#xid is list of data vectors; should already be cut to correct scales, given in rl 
		#covd is full covariance matrix
		#modl is list of BAO templates
		#rl is the list of r values matching xid and covd
		self.xim = [] #these lists will be filled based on the r limits
		self.rl = []
		self.sp = sp
		bs = rl[1]-rl[0] #assumes linear bins
		s = 0
		nxib = len(xid)
		rsw = 0
		Bnode = 50.
		self.rl = rl
		self.xim = []
		for j in range(0,N):
			for i in range(0,len(xid[0])):
				self.xim.append(xid[j][i])
		print (self.xim,len(self.xim)		)
		
		self.nbin = len(self.rl)
		print ('using '+ str(self.nbin)+' xi(s) bins')
		#mt = zeros((self.nbin,self.nbin)) #this will be the trimmed covariance matrix
		#for i in range(mini,mini+self.nbin):
		#	for j in range(mini,mini+self.nbin):
		#		mt[i-mini][j-mini] = covd[i][j]
		mt = covd#[self.nbin,mini:mini+self.nbin]
		self.invt = linalg.pinv(mt)
		self.ximodmin = 10. #minimum of template
		self.modl = modl
		self.N = N
		print (self.wmod(100.))
Esempio n. 28
0
def smooth_pinv(B, L):
    """Regularized psudo-inverse

    Computes a regularized least square inverse of B

    Parameters
    ----------
    B : array_like (n, m)
        Matrix to be inverted
    L : array_like (n,)

    Returns
    -------
    inv : ndarray (m, n)
        regularized least square inverse of B

    Notes
    -----
    In the literature this inverse is often written $(B^{T}B+L^{2})^{-1}B^{T}$.
    However here this inverse is implemented using the psudo-inverse because it
    is more numerically stable than the direct implementation of the matrix
    product.

    """
    L = diag(L)
    inv = pinv(concatenate((B, L)))
    return inv[:, : len(B)]
Esempio n. 29
0
    def pdf(self, x):
	""" Evaluates the pdf of the instantiated Multivariate Normal object 

	"""
	Z = (2*pi)**(-self.k/2.0)*det(self.sigma)**(0.5) #normalization term
	pdf = Z*exp(-0.5*(x-self.mu).T.dot(pinv(self.sigma)).dot(x-self.mu)) 
	return pdf
Esempio n. 30
0
	def i_kinematics(self, epsilon, goal, step=1.0):
		num_links = self.num_links
		ang = self.angles
		p_e = vToThree(get_p(ang, self.lengths, num_links-1))
		new_ang = ang
		while LA.norm(goal - p_e) > epsilon:
			if step < 0.0001:
				return new_ang
			else:
				# print(jacobian(ang, self.lengths))
				# print(goal)
				# print(p_e)
				# print("step: " + str(step))
				dr = (LA.pinv(jacobian(ang, self.lengths)) * (goal - p_e))
				new_ang = []
				new = []
				for i in range(num_links):
					new.append(vectorize(ang)[i] + (step * getvec(dr, i * 3, (i+1) * 3 - 1)))
				new_ang = devector(new)
				new_p_e = vToThree(get_p(new_ang, self.lengths, num_links-1))
				# The new guess is worse		
				# print("distances are: " + str(LA.norm(goal - p_e)) + " vs " + str(LA.norm(goal - new_p_e)))
				if LA.norm(goal - p_e) < LA.norm(goal - new_p_e) + 0.00001:
					step = step/2
				# The new guess is better
				else:
					step = 1.0
					p_e = new_p_e
					ang = new_ang
		return new_ang
Esempio n. 31
0
def find_new_points(imgtf, shapeo, means, covs, k, m, orient):
    stop = False
    counter = 0
    shape = np.copy(shapeo)   
    normalvectors = gl.get_normals(shape)
    normalvectors = np.reshape(normalvectors,(normalvectors.size / 2, 2),'F')
    shape = np.reshape(shape,(shape.size / 2, 2),'F')
 
    # For each point
    for i in range(shape.size / 2):
        
        # Calculate own sample (slice along normal)
        slices = gl.get_slice_indices(shape[i],normalvectors[i],m)
        own_gradient_profile = gl.slice_image(slices,imgtf)
        
        dist = np.zeros(2*(m - k) + 1)
        for j in range(0,2*(m - k) + 1):
            dist[j] = scp.mahalanobis(own_gradient_profile[j:j+(2*k + 1)].flatten(),means[i],LA.pinv(covs[i]))              
        min_ind = np.argmin(dist)
        # Calculate the lower boundary and upper boundary for central 50%
        lower = slices[0,:].size / 4.0
        upper = lower + slices[0,:].size / 2.0
        # Check whether the crown part of upper incisors is not in central 50%
        if (orient == 0 and i % 40 >= 10 and i % 40 < 30 and  (min_ind + k < lower or min_ind + k > upper)):
            counter += 1
        # Check whether the crown part of lower incisors is not in central 50%
        elif (orient == 1 and (i % 40 < 8 or i % 40 >= 28) and  (min_ind + k < lower or min_ind + k > upper)):
            counter += 1
        new_point = slices[:,min_ind+k]
        shape[i] = new_point
    # If less than 10% of the newly suggested points are outside of the central 50%, the algorithm has converged
    if float(counter) / (shape.size/4) < 0.1:
        stop = True
    shape = np.reshape(shape,(shape.size, 1),'F')  
    return shape, stop
Esempio n. 32
0
def Trajectory_generator(A, B, K_old, x0, noise, alpha, number_of_iteration, control_mode):
    ''' generate the trajectory and upper-bound based on DGR paper and compare it to DeePC paper '''


    # System dimensions
    n = np.shape(A)[0]
    m = np.shape(B)[1]


    mean_w = 0

    ############# Implementing DeePC for comparison ###################

    # Parameters for DeePC algorithm (for comparison)
    if control_mode == 0 or control_mode == 2:
        T_ini = 1  # 8
        N_deepc = 4  # 30
    else:
        T_ini = 1
        N_deepc = 5
    # L_deepc = T_ini + N_deepc
    T_deepc = (m+1)*(T_ini+N_deepc + n)



    # solver parameters
    q = 80
    r = 0.000000001
    lam_sigma = 800000
    lam_g = 1


    X_deepc = [x0]
    U_deepc = []

    # offline data
    data_u = []
    data_x = [x0]

    # offline data generation
    for t in range(T_deepc):
        data_u.append(-K_old@data_x[-1] + np.array(np.random.normal(mean_w, 2, size=(m, 1))))
        data_x.append(A @ data_x[-1] + B @ data_u[-1])

    print('DeePC started')
    # DeePC implementation
    for t in range(number_of_iteration):

        if noise:
            if control_mode == 0 or control_mode == 2:
                w = np.array(np.random.normal(mean_w, 0.001, size=(n, 1)))
            else:
                w = np.array(np.random.normal(mean_w, 0.01, size=(n, 1)))
        else:
            w = np.zeros((n, 1))

        if t <= T_ini:
            U_deepc.append(np.array(np.random.normal(mean_w, 0.1, size=(m, 1))))
            X_deepc.append(A @ X_deepc[-1] + B @ U_deepc[-1] + w)
        else:
            if la.norm(X_deepc[-1]) > 1000:
                print('DeePC is blown up at')
                print(t)
                break

            try:
                u_N_deepc = deepc(U_deepc, X_deepc[:-1], data_u, data_x[:-1], T_ini, N_deepc, q, r, lam_sigma, lam_g)
            except:
                print('Deepc is stopped at iteration')
                print(t)
                break
            U_deepc.append(u_N_deepc)
            X_deepc.append(A@X_deepc[-1] + B @ U_deepc[-1] + w)

    print('DeePC ended')

    ###################### Algorithm 1 implementation ####################
    # Parameters for Algorithm 1
    X = [x0]
    Y = []
    K = [np.zeros((m, n))]
    u = []

    G_alpha = la.pinv(alpha * np.eye(m) + B.T @ B) @ B.T

    X_old = [x0]
    X_new = [x0]

    print('DGR started')
    for t in range(number_of_iteration):
        
        if noise:
            if control_mode == 0 or control_mode == 2:
                w = np.array(np.random.normal(mean_w, 0.001, size=(n, 1)))
            else:
                w = np.array(np.random.normal(mean_w, 0.01, size=(n, 1)))
        else:
            w = np.zeros((n, 1))

        u.append(-K[-1] @ X[-1])
        X.append(A @ X[-1] + B @ u[-1] + w)
        Y.append(X[-1] - B @ u[-1] )

        K.append(G_alpha @ np.hstack(Y) @ la.pinv(np.hstack(X[:-1])))
        
        if t < T_deepc:
            X_new.append(X[-1])            
        elif t == T_deepc:
            xposition = t
            
            Q = np.eye(np.shape(A)[0])
            if control_mode == 0 or control_mode ==2:
                R = 0.0000001*np.eye(np.shape(B)[1])
            else:
                R = 0.0000001*np.eye(np.shape(B)[1])
            A_hat = np.hstack(Y) @ la.pinv(np.hstack(X[:-1]))

            (_,_,K1) = control.dare(A_hat, B, Q, R)
            X_new.append((A-B@K1) @ X_new[-1] + w)
        else:
            X_new.append((A-B@K1) @ X_new[-1] + w)

        if la.norm(X_old[-1]) < 200:
            X_old.append((A - B @ K_old) @ X_old[-1] + w)

    print('DGR ended')

    # Find upper-bound based on Lemma 2
    z     = [X[0]]
    z_bar = [X[0] / la.norm(X[0])]
    w_bar = [X[0] / la.norm(X[0]) - z_bar[0]]
    
    for t in range(1, number_of_iteration+1):
        
        X_subspace = np.hstack(X[:t])
        l = len(X_subspace)
        Ux, _ , _ = la.svd(X_subspace, 0)
        z.append((np.eye(l) - Ux @ Ux.T) @ X[t])

        if la.norm(z[-1]) > 10e-12:
            z_bar.append(z[-1] / la.norm(z[-1]))
            w_bar.append(X[t]/la.norm(X[t]) - z_bar[-1])
        else:
            z_bar.append(np.zeros((n, 1)))
            w_bar.append(X[t] / la.norm(X[t]))

    Ur, _, _ = la.linalg.svd(B, 0)

    tilde_A = (np.eye(n) - Ur @ Ur.T) @ A
    tilde_B = Ur @ Ur.T @ A
    a_t     = [la.norm(A)]

    for t in range(1, number_of_iteration + 1):
        a_t.append(la.norm(la.matrix_power(tilde_A, t) @ A, 2))

    L  = [0]
    UB = [la.norm(X[0])]

    L.append(la.norm(A @ z_bar[0]))
    UB.append(L[-1] * la.norm(X[0]))
    Delta = B @ (la.pinv(B)-G_alpha) @ A
    
    for t in range(2, number_of_iteration + 1):
        sum_bl = 0
        for r in range(1, t):
            sum_bl += np.sqrt( la.norm( la.matrix_power(tilde_A, t-1-r) @ tilde_B @ z_bar[r])**2
                        + la.norm( la.matrix_power(tilde_A, t-1-r) @ Delta @ w_bar[r])**2 ) * L[r]
        L.append( a_t[t-1] + sum_bl )
        UB.append( L[-1] * la.norm(X[0]) )


    # ###################### DPC for a system after DGR is done ###############

    # online data generation for a system with DGR in the closed loop
    data_u = []
    data_x = [x0]

    # Parameters for DGR
    Y = []
    u_dgr = []
    K_dgr = [np.zeros((m, n))]
    G_alpha = la.pinv(alpha * np.eye(m) + B.T @ B) @ B.T

    for t in range(T_deepc):
        if noise:
            if control_mode == 0 or control_mode == 2:
                w = np.array(np.random.normal(mean_w, 0.001, size=(n, 1)))
            else:
                w = np.array(np.random.normal(mean_w, 0.01, size=(n, 1)))
        else:
            w = np.zeros((n, 1))

        data_u.append(np.array(np.random.normal(mean_w, 1, size=(m, 1))))
        u_dgr.append(-K_dgr[-1] @ data_x[-1] + data_u[-1])

        data_x.append(A @ data_x[-1] + B@u_dgr[-1] + w)
        Y.append(data_x[-1] - B @ u_dgr[-1])
        K_dgr.append(G_alpha @ np.hstack(Y) @ la.pinv(np.hstack(data_x[:-1])))

    print('DGR+DeePC started')
    # DeePC implementation

    X_deepc_dgr = []
    U_deepc_dgr = []
    X_deepc_dgr[0:T_deepc] = data_x
    U_deepc_dgr[0:T_deepc] = data_u

    # solver parameters
    if control_mode == 0 or control_mode == 2:
        q = 400
        r = 0.05
        lam_sigma = 10000
        lam_g = 1
    elif control_mode == 1 or control_mode == 3:
        q = 400
        r = 0.05
        lam_sigma = 10000
        lam_g = 1

    for t in range(T_deepc, number_of_iteration):

        if noise:
            if control_mode == 0 or control_mode == 2:
                w = np.array(np.random.normal(mean_w, 0.001, size=(n, 1)))
            else:
                w = np.array(np.random.normal(mean_w, 0.01, size=(n, 1)))
        else:
            w = np.zeros((n, 1))

        if la.norm(X_deepc_dgr[-1]) > 1000:
            print('DeePC is blown up at')
            print(t)
            break

        try:
            u_N_deepc_dgr = deepc(U_deepc_dgr, X_deepc_dgr[:-1], data_u, data_x[:-1], T_ini, N_deepc, q, r, lam_sigma,
                                  lam_g)
        except:
            print('DeePC is stopped at iteration')
            print(t)
            break
        U_deepc_dgr.append(u_N_deepc_dgr)
        X_deepc_dgr.append(A @ X_deepc_dgr[-1] + B @(-K_dgr[-1]@ X_deepc_dgr[-1] + U_deepc_dgr[-1]) + w)

    print('DGR+DeePC ended')


    return X, X_old, UB, X_new, xposition, K, X_deepc, X_deepc_dgr
Esempio n. 33
0
def main():
    if len(sys.argv) != 4:
        print 'usage: ./lin_reg.py <start time point> <end time point> <laser wavelength>'
        sys.exit(1)

    hbar = 6.582119514E-16  ## plank's constant in eV s
    c_speed = 2.99792458E8  ## speed of light in m/s
    t_start = float(sys.argv[1])  ## start time in femtoseconds
    t_stop = float(sys.argv[2])  ## stop time in femtoseconds
    omega = 1E-6 * 2.0 * np.pi * c_speed / float(
        sys.argv[3])  ## ang frequency of sinusoidal component

    # read in all data points from file
    f = open('fort.400', 'rU')  # opens file into the variable f
    N_points = 0
    time = []
    value = []
    for l in f:
        row = l.split()
        time.append(float(row[0]))
        value.append(float(row[1]))
        N_points = N_points + 1  ## number of time points


#    print row[0], row[1]
    f.close()

    # define data points used in linear regression
    t = 0
    i = 0
    t_arr = []
    y_arr = []
    while (t < t_stop):
        if time[i] > t_start:
            t_arr.append(time[i])
            y_arr.append(value[i])
        t = time[
            i]  # technically will keep first point AFTER t_stop but that's ok
        i = i + 1
    m = len(t_arr)  ## number of data points

    #prepare linear regression variables
    s_data_out = open('sample_data.dat', 'w')
    n = 4  ## number of linear regression parameters
    arr = []
    for i in range(m):
        s_data_out.write(str(t_arr[i]) + " " + str(y_arr[i]) + '\n')
        row = []
        for j in range(n):
            if j == 0: elem = 1.0
            if j == 1: elem = t_arr[i]
            if j == 2: elem = np.sin(omega * t_arr[i])
            if j == 3: elem = np.cos(omega * t_arr[i])
            row.append(elem)
        arr.append(row)
    x_mat = np.array(arr)
    theta_arr = [0.0] * n

    # solve via Normal Equation
    theta_arr = np.dot(
        np.dot(pinv(np.dot(np.transpose(x_mat), x_mat)), np.transpose(x_mat)),
        y_arr)

    print
    print 'theta0: ', theta_arr[0]
    print 'theta1: ', theta_arr[1]
    print 'theta2: ', theta_arr[2]
    print 'theta3: ', theta_arr[3]
    print
    print 'slope: ', theta_arr[1]
    print 'y-int: ', theta_arr[0]
    print
    print 'rate of electron transfer: ', theta_arr[1], ' electrons per fs'
    fit_out_log = open('fit.log', 'w')
    fit_out_log.write('theta0: ' + str(theta_arr[0]) + '\n' + 'theta1: ' +
                      str(theta_arr[1]) + '\n' + 'theta2: ' +
                      str(theta_arr[2]) + '\n' + 'theta3: ' +
                      str(theta_arr[3]) + '\n' + '\n')
    fit_out_log.write('slope: ' + str(theta_arr[1]) + '\n' + 'y-int: ' +
                      str(theta_arr[0]) + '\n')

    fit_out = open('fit.dat', 'w')
    linear_out = open('linear_fit.dat', 'w')
    for i in range(N_points):
        hyp = theta_arr[0] + theta_arr[1] * time[i] + theta_arr[2] * np.sin(
            omega * time[i]) + theta_arr[3] * np.cos(omega * time[i])
        hyp_line = theta_arr[0] + theta_arr[1] * time[i]
        fit_out.write(str(time[i]) + " " + str(hyp) + '\n')
        linear_out.write(str(time[i]) + " " + str(hyp_line) + '\n')
Esempio n. 34
0
def obs_avoidance_interpolation_moving(x,
                                       xd,
                                       obs=[],
                                       attractor='none',
                                       weightPow=2):

    # This function modulates the dynamical system at position x and dynamics xd such that it avoids all obstacles obs. It can furthermore be forced to converge to the attractor.
    #
    # INPUT
    # x [dim]: position at which the modulation is happening
    # xd [dim]: initial dynamical system at position x
    # obs [list of obstacle_class]: a list of all obstacles and their properties, which present in the local environment
    # attractor [list of [dim]]]: list of positions of all attractors
    # weightPow [int]: hyperparameter which defines the evaluation of the weight
    #
    # OUTPUT
    # xd [dim]: modulated dynamical system at position x
    #

    # Initialize Variables
    N_obs = len(obs)  #number of obstacles
    if N_obs == 0:
        return xd

    d = x.shape[0]
    Gamma = np.zeros((N_obs))

    if type(attractor) == str:
        if attractor == 'default':  # Define attractor position
            attractor = np.zeros((d))
            N_attr = 1
        else:
            N_attr = 0
    else:
        N_attr = 1

    # Linear and angular roation of velocity
    xd_dx_obs = np.zeros((d, N_obs))
    xd_w_obs = np.zeros(
        (d, N_obs))  #velocity due to the rotation of the obstacle

    # Modulation matrices
    E = np.zeros((d, d, N_obs))
    D = np.zeros((d, d, N_obs))
    M = np.zeros((d, d, N_obs))
    E_orth = np.zeros((d, d, N_obs))

    # Rotation matrix
    R = np.zeros((d, d, N_obs))

    for n in range(N_obs):
        # Move the position into the obstacle frame of reference
        if obs[n].th_r:  # Nonzero value
            R[:, :, n] = compute_R(d, obs[n].th_r)
        else:
            R[:, :, n] = np.eye(d)

        # Move to obstacle centered frame
        x_t = R[:, :, n].T @ (x - obs[n].x0)
        E[:, :, n], D[:, :,
                      n], Gamma[n], E_orth[:, :,
                                           n] = compute_modulation_matrix(
                                               x_t, obs[n], R[:, :, n])

    if N_attr:
        d_a = LA.norm(x - np.array(attractor))  # Distance to attractor
        weight = compute_weights(np.hstack((Gamma, [d_a])), N_obs + N_attr)

    else:
        weight = compute_weights(Gamma, N_obs)
    xd_obs = np.zeros((d))

    for n in range(N_obs):
        if d == 2:
            xd_w = np.cross(np.hstack(([0, 0], obs[n].w)),
                            np.hstack((x - np.array(obs[n].x0), 0)))
            xd_w = xd_w[0:2]
        elif d == 3:
            xd_w = np.cross(obs[n].w, x - obs[n].x0)
        else:
            warnings.warn('NOT implemented for d={}'.format(d))

        #the exponential term is very helpful as it help to avoid the crazy rotation of the robot due to the rotation of the object
        exp_weight = np.exp(-1 / obs[n].sigma * (np.max([Gamma[n], 1]) - 1))
        xd_obs_n = exp_weight * (np.array(obs[n].xd) + xd_w)

        xd_obs_n = E_orth[:, :, n].T @ xd_obs_n
        xd_obs_n[0] = np.max(xd_obs_n[0], 0)  # Onl use orthogonal part
        xd_obs_n = E_orth[:, :, n] @ xd_obs_n

        xd_obs = xd_obs + xd_obs_n * weight[n]

    xd = xd - xd_obs  #computing the relative velocity with respect to the obstacle

    # Create orthogonal matrix
    xd_norm = LA.norm(xd)

    if xd_norm:  # nonzero
        xd_normalized = xd / xd_norm
    else:
        xd_normalized = xd

    xd_t = np.array([xd_normalized[1], -xd_normalized[0]])

    Rf = np.array([xd_normalized, xd_t]).T

    xd_hat = np.zeros((d, N_obs))
    xd_hat_magnitude = np.zeros((N_obs))
    k_ds = np.zeros((d - 1, N_obs))

    for n in range(N_obs):
        M[:, :, n] = R[:, :, n] @ E[:, :, n] @ D[:, :, n] @ LA.pinv(
            E[:, :, n]) @ R[:, :, n].T

        xd_hat[:, n] = M[:, :, n] @ xd  # velocity modulation
        xd_hat_magnitude[n] = np.sqrt(np.sum(xd_hat[:, n]**2))
        if xd_hat_magnitude[n]:  # Nonzero hat_magnitude
            xd_hat_normalized = xd_hat[:, n] / xd_hat_magnitude[
                n]  # normalized direction
        else:
            xd_hat_normalized = xd_hat[:, n]

        if not d == 2:
            warnings.warn('not implemented for d neq 2')

        xd_hat_normalized_velocityFrame = Rf @ xd_hat_normalized

        # Kappa space - directional space
        k_fn = xd_hat_normalized_velocityFrame[1:]
        kfn_norm = LA.norm(k_fn)  # Normalize
        if kfn_norm:  # nonzero
            k_fn = k_fn / kfn_norm

        sumHat = np.sum(xd_hat_normalized * xd_normalized)
        if sumHat > 1 or sumHat < -1:
            sumHat = max(min(sumHat, 1), -1)
            warnings.warn('cosinus out of bound!')

        k_ds[:, n] = np.arccos(sumHat) * k_fn.squeeze()

    xd_hat_magnitude = np.sqrt(np.sum(xd_hat**2, axis=0))

    if N_attr:  #nonzero

        k_ds = np.hstack((k_ds, np.zeros(
            (d - 1, N_attr))))  # points at the origin

        xd_hat_magnitude = np.hstack((xd_hat_magnitude, LA.norm(
            (xd)) * np.ones(N_attr)))

    # Weighted interpolation for several obstacles
    weight = weight**weightPow
    if not LA.norm(weight, 2):
        warnings.warn('trivial weight.')
    weight = weight / LA.norm(weight, 2)

    xd_magnitude = np.sum(xd_hat_magnitude * weight)
    k_d = np.sum(k_ds * np.tile(weight, (d - 1, 1)), axis=1)

    norm_kd = LA.norm(k_d)

    if norm_kd:  # Nonzero
        n_xd = Rf.T @ np.hstack(
            (np.cos(norm_kd), np.sin(norm_kd) / norm_kd * k_d))
    else:
        n_xd = Rf.T @ np.hstack((1, k_d))

    xd = xd_magnitude * n_xd.squeeze()

    # transforming back from object frame of reference to inertial frame of reference
    xd = xd + xd_obs

    return xd
Esempio n. 35
0

def get_unseen_tfidf(unseen_event, model, dim):
    unseen_event_terms = jieba.analyse.extract_tags(unseen_event)
    unseen_event_vector = [0] * dim
    for term in unseen_event_terms:
        if term in model.vocabulary_:
            unseen_event_vector[model.vocabulary_[term]] += model.idf_[
                model.vocabulary_[term]]
    return unseen_event_vector


if __name__ == "__main__":
    VSM_MODEL, TERM_FACTOR_MATRIX = train_mf()
    UNSEENID_TO_SENTENCE = load_unseen()
    UNSEEN_EMBEDDING_DICT = {}
    for id_, (title_string, description) in UNSEENID_TO_SENTENCE.items():
        newitem_sentence = title_string + description
        unseen_tfidf = np.array([
            get_unseen_tfidf(newitem_sentence, VSM_MODEL,
                             TERM_FACTOR_MATRIX.shape[0])
        ])
        UNSEEN_EMBEDDING_DICT[id_] = np.dot(
            unseen_tfidf, pinv(TERM_FACTOR_MATRIX.T)).tolist()[0]

    with open('mf.txt', 'wt') as fout:
        fout.write("{}\n".format(len(UNSEEN_EMBEDDING_DICT)))
        for id_, embedding in UNSEEN_EMBEDDING_DICT.items():
            fout.write("{} {}\n".format(
                id_, ' '.join(map(lambda x: str(round(x, 6)), embedding))))
Esempio n. 36
0
def AnDA_analog_forecasting(x, AF):
    """ Apply the analog method on catalog of historical data to generate forecasts. """

    # initializations
    N, n = x.shape
    xf = np.zeros([N, n])
    xf_mean = np.zeros([N, n])
    stop_condition = 0
    i_var = np.array([0])
    i_var_iter = 0

    # local or global analog forecasting
    while (stop_condition != 1):

        # in case of global approach
        if np.all(AF.neighborhood == 1):
            i_var_neighboor = np.arange(n, dtype=np.int64)
            i_var = np.arange(n, dtype=np.int64)
            stop_condition = 1

        # in case of local approach
        else:
            i_var_neighboor = np.where(AF.neighborhood[int(i_var), :] == 1)[0]

        # find the indices and distances of the k-nearest neighbors (knn)
        #kdt = KDTree(AF.catalog.analogs[:,i_var_neighboor], leaf_size=50, metric='euclidean')
        #dist_knn, index_knn = kdt.query(x[:,i_var_neighboor], AF.k)
        # find the indices and distances of the k-nearest neighbors (knn)
        if not AF.initialized:
            if i_var_iter == 0:
                AF.kdt = list()
            AF.kdt.append(
                KDTree(AF.catalog.analogs[:, i_var_neighboor],
                       leaf_size=50,
                       metric='euclidean')
            )  #If we are using the global approach we can compute kdt only once.
        kdt = AF.kdt[i_var_iter]
        dist_knn, index_knn = kdt.query(x[:, i_var_neighboor], AF.k)

        # parameter of normalization for the kernels
        if AF.kernel == 'gaussian':
            lambdaa = np.median(dist_knn)

            # compute weights
            if AF.k == 1:
                weights = np.ones([N, 1])
            else:

                weights = mk_stochastic(
                    np.exp(-np.power(dist_knn, 2) / lambdaa**2))

        if AF.kernel == 'tricube':
            # compute weights
            if AF.k == 1:
                weights = np.ones([N, 1])
            else:
                weights = np.ones((N, AF.k))
                for j in range(weights.shape[0]):
                    lambdaa = max(dist_knn[j, :].squeeze())
                    weights[j, :] = (
                        1 - (dist_knn[j, :].squeeze() / lambdaa)**3)**3
                    weights[j, :] = weights[j, :] / np.sum(weights[j, :])
        # for each member/particle
        # for each member/particle
        for i_N in range(0, N):

            # initialization
            xf_tmp = np.zeros([AF.k, np.max(i_var) + 1])

            # method "locally-constant"
            if (AF.regression == 'locally_constant'):

                # compute the analog forecasts
                xf_tmp[:, i_var] = AF.catalog.successors[np.ix_(
                    index_knn[i_N, :], i_var)]

                # weighted mean and covariance
                xf_mean[i_N, i_var] = np.sum(
                    xf_tmp[:, i_var] *
                    np.repeat(weights[i_N, :][np.newaxis].T, len(i_var), 1), 0)
                E_xf = (xf_tmp[:, i_var] -
                        np.repeat(xf_mean[i_N, i_var][np.newaxis], AF.k, 0)).T
                cov_xf = 1.0 / (
                    1.0 - np.sum(np.power(weights[i_N, :], 2))) * np.dot(
                        np.repeat(weights[i_N, :][np.newaxis], len(i_var), 0) *
                        E_xf, E_xf.T)

            # method "locally-incremental"
            elif (AF.regression == 'increment'):

                # compute the analog forecasts
                xf_tmp[:, i_var] = np.repeat(
                    x[i_N, i_var][np.newaxis],
                    AF.k, 0) + AF.catalog.successors[np.ix_(
                        index_knn[i_N, :], i_var)] - AF.catalog.analogs[np.ix_(
                            index_knn[i_N, :], i_var)]

                # weighted mean and covariance
                xf_mean[i_N, i_var] = np.sum(
                    xf_tmp[:, i_var] *
                    np.repeat(weights[i_N, :][np.newaxis].T, len(i_var), 1), 0)
                E_xf = (xf_tmp[:, i_var] -
                        np.repeat(xf_mean[i_N, i_var][np.newaxis], AF.k, 0)).T
                cov_xf = 1.0 / (
                    1 - np.sum(np.power(weights[i_N, :], 2))) * np.dot(
                        np.repeat(weights[i_N, :][np.newaxis], len(i_var), 0) *
                        E_xf, E_xf.T)

            # method "locally-linear"
            elif (AF.regression == 'local_linear'):

                # define analogs, successors and weights
                X = AF.catalog.analogs[np.ix_(index_knn[i_N, :],
                                              i_var_neighboor)]
                Y = AF.catalog.successors[np.ix_(index_knn[i_N, :], i_var)]
                w = weights[i_N, :][np.newaxis]

                # compute centered weighted mean and weighted covariance
                Xm = np.sum(X * w.T, axis=0)[np.newaxis]
                Xc = X - Xm

                # regression on principal components
                Xr = np.c_[np.ones(X.shape[0]), Xc]
                Cxx = np.dot(w * Xr.T, Xr)
                Cxx2 = np.dot(w**2 * Xr.T, Xr)
                Cxy = np.dot(w * Y.T, Xr)
                inv_Cxx = pinv(
                    Cxx, rcond=0.01
                )  # in case of error here, increase the number of analogs (AF.k option)
                beta = np.dot(inv_Cxx, Cxy.T)
                X0 = x[i_N, i_var_neighboor] - Xm
                X0r = np.c_[np.ones(X0.shape[0]), X0]

                # weighted mean
                xf_mean[i_N, i_var] = np.dot(X0r, beta)
                pred = np.dot(Xr, beta)
                res = Y - pred
                xf_tmp[:, i_var] = xf_mean[i_N, i_var] + res

                # weigthed covariance
                cov_xfc = np.dot(w * res.T,
                                 res) / (1 - np.trace(np.dot(Cxx2, inv_Cxx)))
                cov_xf = cov_xfc * (
                    1 + np.trace(Cxx2 @ inv_Cxx @ X0r.T @ X0r @ inv_Cxx))

                # constant weights for local linear
                weights[i_N, :] = 1.0 / len(weights[i_N, :])

            # error
            else:
                raise ValueError("""\
                    Error: choose AF.regression between \
                    'locally_constant', 'increment', 'local_linear' """)
            '''
            # method "globally-linear" (to finish)
            elif (AF.regression == 'global_linear'):
                ### REMARK: USE i_var_neighboor IN THE FUTURE! ####
                xf_mean[i_N,:] = AF.global_linear.predict(np.array([x[i_N,:]]))
                if n==1:
                    cov_xf = np.cov((AF.catalog.successors - AF.global_linear.predict(AF.catalog.analogs)).T)[np.newaxis][np.newaxis]
                else:
                    cov_xf = np.cov((AF.catalog.successors - AF.global_linear.predict(AF.catalog.analogs)).T)
            
            # method "locally-forest" (to finish)
            elif (AF.regression == 'local_forest'):
                ### REMARK: USE i_var_neighboor IN THE FUTURE! #### 
                xf_mean[i_N,:] = AF.local_forest.predict(np.array([x[i_N,:]]))
                if n==1:
                    cov_xf = np.cov(((AF.catalog.successors - np.array([AF.local_forest.predict(AF.catalog.analogs)]).T).T))[np.newaxis][np.newaxis]
                else:
                    cov_xf = np.cov((AF.catalog.successors - AF.local_forest.predict(AF.catalog.analogs)).T)
                # weighted mean and covariance
                #xf_tmp[:,i_var] = AF.local_forest.predict(AF.catalog.analogs[np.ix_(index_knn[i_N,:],i_var)]);
                #xf_mean[i_N,i_var] = np.sum(xf_tmp[:,i_var]*np.repeat(weights[i_N,:][np.newaxis].T,len(i_var),1),0)
                #E_xf = (xf_tmp[:,i_var]-np.repeat(xf_mean[i_N,i_var][np.newaxis],AF.k,0)).T;
                #cov_xf = 1.0/(1.0-np.sum(np.power(weights[i_N,:],2)))*np.dot(np.repeat(weights[i_N,:][np.newaxis],len(i_var),0)*E_xf,E_xf.T);
            '''

            # Gaussian sampling
            if (AF.sampling == 'gaussian'):
                # random sampling from the multivariate Gaussian distribution
                xf[i_N, i_var] = np.random.multivariate_normal(
                    xf_mean[i_N, i_var], cov_xf)

            # Multinomial sampling
            elif (AF.sampling == 'multinomial'):
                # random sampling from the multinomial distribution of the weights
                i_good = sample_discrete(weights[i_N, :], 1, 1)
                xf[i_N, i_var] = xf_tmp[i_good, i_var]

            # error
            else:
                raise ValueError("""\
                    Error: choose AF.sampling between 'gaussian', 'multinomial' 
                """)

        # stop condition
        if (np.array_equal(i_var, np.array([n - 1])) or (len(i_var) == n)):
            stop_condition = 1

        else:
            i_var = i_var + 1
            i_var_iter = i_var_iter + 1

        AF.initialized = True

    return xf, xf_mean
Esempio n. 37
0
def regression_fit(data, design): 
  """Finally uses the design matrix from build_design() and fits a linear regression to each voxel 

  Parameters
  ----------
  data : fMRI data for a singe sunject 

  design: matrix returned by build_design()

  Returns
  -------
  numpy array of estimated betas for each voxel

  Example
  -------
  >>> data = get_img(1,1).get_data
  >>> behavdata = get_data(1,1)
  >>> design  = build_design(data,behavdata)
  >>> regression_fit(data, design).shape 
  (64, 64, 34, 4)
  >>> regression_fit(data, design)[1,1,...]
  array([[ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.],
     [ 0.,  0.,  0.,  0.]])

  """ 
  data_2d = np.reshape(data, (-1, data.shape[-1]))
  betas = npl.pinv(design).dot(data_2d.T)
  betas = np.reshape(betas.T, data.shape[:-1] + (-1,))
  return betas 
Esempio n. 38
0
def block_term_tensor_decomposition(tensor,
                                    modes,
                                    n_part,
                                    ranks=None,
                                    n_iter_max=100,
                                    tol=10e-5,
                                    random_state=None):
    if ranks == None:
        ranks = [tensor.shape[index - 1] for index in modes]

    # 随机生成core-tensor,以及factor matrixs
    rng = check_random_state(random_state)
    core = [np.array(rng.random_sample(ranks)) for i in range(n_part)]
    core_2 = [np.array(rng.random_sample(ranks)) for i in range(n_part)]

    factors = [
        np.array(
            rng.random_sample((tensor.shape[index], ranks[index] * n_part)))
        for index, mode in enumerate(modes)
    ]

    rec_errors = []
    for iteration in range(n_iter_max):
        for mode in modes:
            index = mode - 1
            factors[index] = (blockdiag(core, mode).dot(
                pinv(multi_mat_kr(factors, R=n_part,
                                  mode=mode))).dot(unfold(tensor, mode).T)).T
            for i in range(n_part):
                factors[index][:, i * ranks[index]:(i + 1) *
                               ranks[index]], _ = QR(
                                   factors[index][:, i * ranks[index]:(i + 1) *
                                                  ranks[index]])

        # rebuilt core tensor
        for i in range(n_part):
            factors_rebult = []
            for mode in modes:
                index = mode - 1
                factors_rebult.append(
                    factors[index][:, i * ranks[index]:(i + 1) * ranks[index]])
            core[i] = multi_mode_dot(tensor,
                                     factors_rebult,
                                     modes=modes,
                                     transpose=True)

        rebuilt_tensor = rebuilt_block_term_tensor(core, factors, modes)
        err = norm(rebuilt_tensor - tensor, 2) / norm(tensor, 2)

        print('iteration is:', iteration, 'err is:', err)

        vector_core = pinv(multi_mat_kr(factors,
                                        R=n_part)).dot(tensor.reshape(-1, 1))
        len_core = vector_core.shape[0] // n_part
        for i in range(n_part):
            core_2[i] = vector_core[i * len_core:(i + 1) *
                                    len_core].reshape(ranks)

        rebuilt_tensor_2 = rebuilt_block_term_tensor(core_2, factors, modes)
        err_2 = norm(rebuilt_tensor_2 - tensor, 2) / norm(tensor, 2)

        print('iteraton is :', iteration, 'err_2_vector_core is:', err_2)

        rec_errors.append(err)

        if iteration > 3:
            if (np.abs(rec_errors[-1] - rec_errors[-2]) < tol):  # 跳出循环的条件
                break

    return core, factors, rec_errors[-1], iteration
Esempio n. 39
0
assertNumDiff(contactData.Av, Av_numdiff,
              NUMDIFF_MODIFIER * np.sqrt(2 * EPS))  # threshold was 1e-4, is now 2.11e-4 (see assertNumDiff.__doc__)

Aq_numdiff = df_dq(rmodel, lambda _q: returna0(_q, v), q, h=eps)
Av_numdiff = df_dx(lambda _v: returna0(q, _v), v, h=eps)

assert (np.isclose(contactData.Aq, Aq_numdiff, atol=np.sqrt(eps)).all())
assert (np.isclose(contactData.Av, Av_numdiff, atol=np.sqrt(eps)).all())

q = pinocchio.randomConfiguration(rmodel)
v = rand(rmodel.nv) * 2 - 1

pinocchio.computeJointJacobians(rmodel, rdata, q)
J6 = pinocchio.getJointJacobian(rmodel, rdata, rmodel.joints[-1].id, pinocchio.ReferenceFrame.LOCAL).copy()
J = J6[:3, :]
v -= pinv(J) * J * v

x = np.concatenate([m2a(q), m2a(v)])
u = np.random.rand(rmodel.nv - 6) * 2 - 1

actModel = ActuationModelFreeFloating(rmodel)
contactModel3 = ContactModel3D(rmodel,
                               rmodel.getFrameId('gripper_left_fingertip_2_link'),
                               ref=np.random.rand(3),
                               gains=[4., 4.])
rmodel.frames[contactModel3.frame].placement = pinocchio.SE3.Random()
contactModel = ContactModelMultiple(rmodel)
contactModel.addContact(name='fingertip', contact=contactModel3)

model = DifferentialActionModelFloatingInContact(rmodel, actModel, contactModel, CostModelSum(rmodel))
data = model.createData()
Esempio n. 40
0
import numpy as np
from sys import stdin
from numpy import linalg
from sklearn.preprocessing import PolynomialFeatures


def polynomial(X, order):
    poly = PolynomialFeatures(order)
    return poly.fit_transform(X)


if __name__ == "__main__":
    P = 3
    (F, N) = map(int, stdin.readline().split(" "))
    train_data = np.array(
        [map(float,
             stdin.readline().split(" ")) for _ in xrange(N)])
    T = int(stdin.readline())
    test_data = np.array(
        [map(float,
             stdin.readline().split(" ")) for _ in xrange(T)])

    train_X = polynomial(train_data[:, :-1], P)
    train_y = train_data[:, -1]
    test_X = polynomial(test_data, P)

    w = np.dot(linalg.pinv(train_X), train_y)
    test_yhat = np.dot(test_X, w)
    print "\n".join([str(v) for v in test_yhat])
Esempio n. 41
0
 def _calc_params(self):
     self.W = pinv(self.X_mat.getT() * self.X_mat) * self.X_mat.getT() * self.Y_mat
Esempio n. 42
0
def bla_periodic(U, Y):  #(u, y, nper, fs, fmin, fmax):
    """Calculate the frequency response matrix, and the corresponding noise and
    total covariance matrices from the spectra of periodic input/output data.

    Note that the term stochastic nonlinear contribution term is a bit
    misleading. The NL contribution is deterministic given the same forcing
    buy differs between realizations.

    G(f) = FRF(f) = Y(f)/U(f) (Y/F in classical notation)
    Y and U is the output and input of the system in frequency domain.

    Parameters
    ----------
    u : ndarray
        Forcing signal
    y : ndarray
        Response signal (displacements)
    fs : float
        Sampling frequency
    fmin : float
        Starting frequency in Hz
    fmax : float
        Ending frequency in Hz

    Returns
    -------
    G : ndarray
        Frequency response matrix(FRM)
    covGML : ndarray
        Total covariance (= stochastic nonlinear contributions + noise)
    covGn : ndarray
        Noise covariance
    """

    # Number of inputs, realization, periods and frequencies
    m, R, P, F = U.shape
    p = Y.shape[0]  # number of outputs
    M = np.floor(R/m).astype(int)  # number of block of experiments
    if M*m != R:
        print('Warning: suboptimal number of experiments: B*m != M')
    # Reshape in M blocks of m experiments
    U = U[:,:m*M].reshape((m,m,M,P,F))
    Y = Y[:,:m*M].reshape((p,m,M,P,F))

    if P > 1:
        # average input/output spectra over periods
        U_mean = np.mean(U,axis=3)  # m x m x M x F
        Y_mean = np.mean(Y,axis=3)

        # Estimate noise spectra
        # create new axis. We could have used U_m = np.mean(U,3, keepdims=True)
        NU = U - U_mean[:,:,:,None,:]  # m x m x M x P x F
        NY = Y - Y_mean[:,:,:,None,:]

        # Calculate input/output noise (co)variances on averaged(over periods)
        # spectra
        covU = np.empty((m*m,m*m,M,F), dtype=complex)
        covY = np.empty((p*m,p*m,M,F), dtype=complex)
        covYU = np.empty((p*m,m*m,M,F), dtype=complex)
        for mm in range(M):  # Loop over experiment blocks
            # noise spectrum of experiment block mm (m x m x P x F)
            NU_m = NU[:,:,mm]
            NY_m = NY[:,:,mm]
            for f in range(F):
                # TODO extend this using einsum, so we avoid all loops
                # TODO fx: NU_m[...,f].reshape(-1,*NU_m[...,f].shape[2:])
                # flatten the m x m dimension and use einsum to take the outer
                # product of m*m x m*m and then sum over the p periods.
                tmpUU = NU_m[...,f].reshape(-1, P)  # create view
                tmpYY = NY_m[...,f].reshape(-1, P)
                covU[:,:,mm,f] = np.einsum('ij,kj->ik',tmpUU,tmpUU.conj()) / (P-1)/P
                covY[:,:,mm,f] = np.einsum('ij,kj->ik',tmpYY,tmpYY.conj()) / (P-1)/P
                covYU[:,:,mm,f] = np.einsum('ij,kj->ik',tmpYY,tmpUU.conj()) / (P-1)/P

        # Further calculations with averaged spectra
        U = U_mean  # m x m x M x F
        Y = Y_mean
    else:
        U = U.squeeze(axis=3)
        Y = Y.squeeze(axis=3)

    # Compute FRM and noise and total covariance on averaged(over experiment
    # blocks and periods) FRM
    G = np.empty((p,m,F), dtype=complex)
    covGML = np.empty((m*p,m*p,F), dtype=complex)
    covGn = np.empty((m*p,m*p,F), dtype=complex)
    Gm = np.empty((p,m,M), dtype=complex)
    U_inv_m = np.empty((m,m,M), dtype=complex)
    covGn_m = np.empty((m*p,m*p,M), dtype=complex)

    for f in range(F):
        # Estimate the frequency response matrix (FRM)
        for mm in range(M):
            # psudo-inverse by svd. A = usvᴴ, then A⁺ = vs⁺uᴴ where s⁺=1/s
            U_inv_m[:,:,mm] = pinv(U[:,:,mm,f])
            # FRM of experiment block m at frequency f
            Gm[:,:,mm] = Y[:,:,mm,f] @ U_inv_m[:,:,mm]

        # Average FRM over experiment blocks
        G[:,:,f] = Gm.mean(2)

        # Estimate the total covariance on averaged FRM
        if M > 1:
            NG = G[:,:,f,None] - Gm
            tmp = NG.reshape(-1, M)
            covGML[:,:,f] = np.einsum('ij,kj->ik',tmp,tmp.conj()) / M/(M-1)

        # Estimate noise covariance on averaged FRM (only if P > 1)
        if P > 1:
            for mm in range(M):
                U_invT = U_inv_m[:,:,mm].T
                A = np.kron(U_invT, np.eye(p))
                B = -np.kron(U_invT, Gm[:,:,mm])
                AB = A @ covYU[:,:,mm,f] @ B.conj().T
                covGn_m[:,:,mm] = A @ covY[:,:,mm,f] @ A.conj().T + \
                    B @ covU[:,:,mm,f] @ B.conj().T + \
                    (AB + AB.conj().T)

            covGn[:,:,f] = covGn_m.mean(2)/M

    # No total covariance estimate possible if only one experiment block
    if M < 2:
        covGML = None
    # No noise covariance estimate possible if only one period
    if P == 1:
        covGn = None

    return G, covGML, covGn
Esempio n. 43
0
""" dataproj.py """
from datared import *
from numpy.linalg import svd, pinv
mu21 = (mu2 - mu1).reshape(3,1)
mu31 = (mu3 - mu1).reshape(3,1)
W = np.hstack((mu21, mu31))
U,_,_ = svd(W)  # we only need U
P = W @ pinv(W)
R = U.T @ P
RX1 = (R @ X1.T).T
RX2 = (R @ X2.T).T
RX3 = (R @ X3.T).T

# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
plt.plot(RX1[:,0],RX1[:,1],'b.',alpha=0.5,markersize=2)
plt.plot(RX2[:,0],RX2[:,1],'g.',alpha=0.5,markersize=2)
plt.plot(RX3[:,0],RX3[:,1],'r.',alpha=0.5,markersize=2)
plt.savefig('pcaproj2py.pdf')
plt.show()
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Esempio n. 44
0
def f1d(mu_SMM1, sig_SMM1, sim_vals):
    '''
    --------------------------------------------------------------------
    This function performs the two-step SMM estimator by using the 
    estimates from part (c) with two moments to generate an estimator for
    the variance covariance matrix omega hat 2step, which then will be 
    used to get the two-step estimator for the optimal weighting matrix W
    hat 2step.
    --------------------------------------------------------------------
    INPUT: mu_SMM1, sig_SMM1, sim_val
    RETURN: none
    --------------------------------------------------------------------
    '''
    pts = np.loadtxt('incomes.txt')  # load txt file
    S = 300.0
    N = 200.0
    seed = 1234
    err1 = err_vec(pts, sim_vals, mu_SMM1, sig_SMM1, False)
    VCV2 = np.dot(err1, err1.T) / pts.shape[0]
    W_hat2 = lin.pinv(VCV2)
    mu = 11
    sigma = 0.2
    params_init = np.array([mu_SMM1, sig_SMM1])
    args = (pts, W_hat2, N, S, seed)
    results_d = opt.minimize(criterion,
                             params_init,
                             args=(args),
                             method='Nelder-Mead',
                             bounds=((None, None), (1e-10, None)))
    mu_SMM2, sig_SMM2 = results_d.x
    # Report the estimated parameter values:
    print('1d. mu_SMM2=', mu_SMM2, ' sig_SMM2=', sig_SMM2)

    # Calculate and report the value of the GMM criterion function at the estimated parameter values:
    params_SMM = np.array([mu_SMM2, sig_SMM2])
    value = criterion(params_SMM, *args)[0][0]
    print(
        'The value of the SMM criterion function at the estimated parameter values is',
        value)

    # Calculate and report and compare my two data moments against my two model moments at the estimated parameter values.
    sim_vals = LN_draws(mu_SMM2, sig_SMM2, N, S, seed)
    mean_data, std_data = data_moments(pts)
    mean_sim, std_sim = data_moments(sim_vals)
    mean_model = mean_sim.mean()
    std_model = std_sim.mean()
    print('Mean of incomes =', mean_data, ', Standard deviation of incomes =',
          std_data)
    print('Mean of model =', mean_model, ', Standard deviation of model =',
          std_model)
    # print(results_d)-success

    # Plot the estimated lognormal PDF against the histogram from part (a) and the estimated PDF from part (c):
    f1a()
    #part (c)
    dist_pts = np.linspace(0, 150000,
                           10000)  # 150000 is the upper bound of incomes
    plt.plot(dist_pts,
             LN_pdf(dist_pts, mu_SMM1, sig_SMM1),
             linewidth=2,
             color='k',
             label='$\mu_{b}$: 11.331,$\sigma_{b}$: 0.210')
    plt.legend(loc='upper right')
    #part (d)
    dist_pts = np.linspace(0, 150000,
                           10000)  # 150000 is the upper bound of incomes
    plt.plot(dist_pts,
             LN_pdf(dist_pts, mu_SMM2, sig_SMM2),
             '--',
             linewidth=2,
             color='r',
             label='$\mu_{b}$: 11.330,$\sigma_{b}$: 0.211')
    plt.legend(loc='upper right')
Esempio n. 45
0
central = 1

B_fluxdiv = [[1,-2, 1,4,-2,1,-8, 4,-2], \
             [1,-1, 1,1,-1,1,-1, 1,-1], \
             [1, 0, 1,0, 0,1,0,  0,0], \
             [1, 1, 1,1, 1,1,1,  1,1], \
             [1,-2, 0,4, 0,0,-8, 0,0], \
             [1,-1, 0,1, 0,0,-1, 0,0], \
             [1, 0, 0,0, 0,0,0,  0,0], \
             [1, 1, 0,1, 0,0,1,  0,0], \
             [1,-2,-1,4, 2,1,-8,-4,-2], \
             [1,-1,-1,1, 1,1,-1,-1,-1], \
             [1, 0,-1,0, 0,1,0,  0,0], \
             [1, 1,-1,1,-1,1,1, -1,1]]

B_fluxdiv_inv = la.pinv(B_fluxdiv)
#print("B_fluxdiv_inv", B_fluxdiv_inv)

B =    [[1,-5/2, 1,25/4,-5/2,1,-125/8, 25/4,-5/2], \
        [1,-3/2, 1, 9/4,-3/2,1, -27/8,  9/4,-3/2], \
        [1,-1/2, 1, 1/4,-1/2,1,  -1/8,  1/4,-1/2], \
        [1, 1/2, 1, 1/4, 1/2,1,   1/8,  1/4, 1/2], \
        [1,-5/2, 0,25/4, 0,  0,-125/8,  0,   0], \
        [1,-3/2, 0, 9/4, 0,  0, -27/8,  0,   0], \
        central*np.array([1,-1/2, 0, 1/4, 0,  0,  -1/8,  0,   0]), \
        central*np.array([1, 1/2, 0, 1/4, 0,  0,   1/8,  0,   0]), \
        [1,-5/2,-1,25/4, 5/2,1,-125/8,-25/4,-5/2], \
        [1,-3/2,-1, 9/4, 3/2,1, -27/8, -9/4,-3/2], \
        [1,-1/2,-1, 1/4, 1/2,1,  -1/8, -1/4,-1/2], \
        [1, 1/2,-1, 1/4,-1/2,1,   1/8, -1/4, 1/2]]
Esempio n. 46
0
    X = X[..., np.newaxis]
    X = np.concatenate((np.ones((len(X), 1)), X), axis=1)
    return X


standardize = False

np.random.seed(3)

xtrain, ytrain, _, _ = polyDataMake()

if standardize:
    xtrain = (xtrain - xtrain.mean(axis=0)) / xtrain.std(axis=0)

N = len(xtrain)
wBatch = np.dot(linalg.pinv(addOnes(xtrain)), ytrain)

ss = {}
w = np.zeros((2, N))
for i in range(N):
    w[:, i], ss = linregUpdateSS(ss, xtrain[i], ytrain[i], xtrain, ytrain)

fig, ax1 = plt.subplots(1, 1, figsize=(9, 6))
ax1.plot(np.arange(2, N + 1),
         w[0, 1:],
         color='#e41a1c',
         marker='o',
         linestyle='None',
         linewidth=2,
         label='w0')
ax1.plot(np.arange(2, N + 1),
Esempio n. 47
0
    def test_diff(self, groups, rho=None, weight=None):

        """
        test_diff(groups, rho=0)

        Test for difference between survival curves

        Parameters
        ----------
        groups: A list of the values for exog to test for difference.
        tests the null hypothesis that the survival curves for all
        values of exog in groups are equal

        rho: compute the test statistic with weight S(t)^rho, where
        S(t) is the pooled estimate for the Kaplan-Meier survival function.
        If rho = 0, this is the logrank test, if rho = 0, this is the
        Peto and Peto modification to the Gehan-Wilcoxon test.

        weight: User specified function that accepts as its sole arguement
        an array of times, and returns an array of weights for each time
        to be used in the test

        Returns
        -------
        An array whose zeroth element is the chi-square test statistic for
        the global null hypothesis, that all survival curves are equal,
        the index one element is degrees of freedom for the test, and the
        index two element is the p-value for the test.

        Examples
        --------

        >>> import statsmodels.api as sm
        >>> import matplotlib.pyplot as plt
        >>> import numpy as np
        >>> from statsmodels.sandbox.survival2 import KaplanMeier
        >>> dta = sm.datasets.strikes.load()
        >>> dta = dta.values()[-1]
        >>> censoring = np.ones_like(dta[:,0])
        >>> censoring[dta[:,0] > 80] = 0
        >>> dta = np.c_[dta,censoring]
        >>> km = KaplanMeier(dta,0,exog=1,censoring=2)
        >>> km.fit()

        Test for difference of survival curves

        >>> log_rank = km3.test_diff([0.0645,-0.03957])

        The zeroth element of log_rank is the chi-square test statistic
        for the difference between the survival curves using the log rank test
        for exog = 0.0645 and exog = -0.03957, the index one element
        is the degrees of freedom for the test, and the index two element
        is the p-value for the test

        >>> wilcoxon = km.test_diff([0.0645,-0.03957], rho=1)

        wilcoxon is the equivalent information as log_rank, but for the
        Peto and Peto modification to the Gehan-Wilcoxon test.

        User specified weight functions

        >>> log_rank = km3.test_diff([0.0645,-0.03957], weight=np.ones_like)

        This is equivalent to the log rank test

        More than two groups

        >>> log_rank = km.test_diff([0.0645,-0.03957,0.01138])

        The test can be performed with arbitrarily many groups, so long as
        they are all in the column exog
        """
        groups = np.asarray(groups)
        if self.exog == None:
            raise ValueError("Need an exogenous variable for logrank test")

        elif (np.in1d(groups,self.groups)).all():
            data = self.data[np.in1d(self.data[:,self.exog],groups)]
            t = ((data[:,self.endog]).astype(float)).astype(int)
            tind = np.unique(t)
            NK = []
            N = []
            D = []
            Z = []
            if rho != None and weight != None:
                raise ValueError("Must use either rho or weights, not both")

            elif rho != None:
                s = KaplanMeier(data,self.endog,censoring=self.censoring)
                s.fit()
                s = (s.results[0][0]) ** (rho)
                s = np.r_[1,s[:-1]]

            elif weight != None:
                s = weight(tind)

            else:
                s = np.ones_like(tind)

            if self.censoring == None:
                for g in groups:
                    dk = np.bincount((t[data[:,self.exog] == g]))
                    d = np.bincount(t)
                    if np.max(tind) != len(dk):
                        dif = np.max(tind) - len(dk) + 1
                        dk = np.r_[dk,[0]*dif]
                    dk = dk[:,list(tind)]
                    d = d[:,list(tind)]
                    dk = dk.astype(float)
                    d = d.astype(float)
                    dkSum = np.cumsum(dk)
                    dSum = np.cumsum(d)
                    dkSum = np.r_[0,dkSum]
                    dSum = np.r_[0,dSum]
                    nk = len(data[data[:,self.exog] == g]) - dkSum[:-1]
                    n = len(data) - dSum[:-1]
                    d = d[n>1]
                    dk = dk[n>1]
                    nk = nk[n>1]
                    n = n[n>1]
                    s = s[n>1]
                    ek = (nk * d)/(n)
                    Z.append(np.sum(s * (dk - ek)))
                    NK.append(nk)
                    N.append(n)
                    D.append(d)
            else:
                for g in groups:
                    censoring = ((data[:,self.censoring]).astype(float)).astype(int)
                    reverseCensoring = -1*(censoring - 1)
                    censored = np.bincount(t,reverseCensoring)
                    ck = np.bincount((t[data[:,self.exog] == g]),
                                     reverseCensoring[data[:,self.exog] == g])
                    dk = np.bincount((t[data[:,self.exog] == g]),
                                     censoring[data[:,self.exog] == g])
                    d = np.bincount(t,censoring)
                    if np.max(tind) != len(dk):
                        dif = np.max(tind) - len(dk) + 1
                        dk = np.r_[dk,[0]*dif]
                        ck = np.r_[ck,[0]*dif]
                    dk = dk[:,list(tind)]
                    ck = ck[:,list(tind)]
                    d = d[:,list(tind)]
                    dk = dk.astype(float)
                    d = d.astype(float)
                    ck = ck.astype(float)
                    dkSum = np.cumsum(dk)
                    dSum = np.cumsum(d)
                    ck = np.cumsum(ck)
                    ck = np.r_[0,ck]
                    dkSum = np.r_[0,dkSum]
                    dSum = np.r_[0,dSum]
                    censored = censored[:,list(tind)]
                    censored = censored.astype(float)
                    censoredSum = np.cumsum(censored)
                    censoredSum = np.r_[0,censoredSum]
                    nk = (len(data[data[:,self.exog] == g]) - dkSum[:-1]
                          - ck[:-1])
                    n = len(data) - dSum[:-1] - censoredSum[:-1]
                    d = d[n>1]
                    dk = dk[n>1]
                    nk = nk[n>1]
                    n = n[n>1]
                    s = s[n>1]
                    ek = (nk * d)/(n)
                    Z.append(np.sum(s * (dk - ek)))
                    NK.append(nk)
                    N.append(n)
                    D.append(d)
            Z = np.array(Z)
            N = np.array(N)
            D = np.array(D)
            NK = np.array(NK)
            sigma = -1 * np.dot((NK/N) * ((N - D)/(N - 1)) * D
                                * np.array([(s ** 2)]*len(D))
                            ,np.transpose(NK/N))
            np.fill_diagonal(sigma, np.diagonal(np.dot((NK/N)
                                                  * ((N - D)/(N - 1)) * D
                                                       * np.array([(s ** 2)]*len(D))
                                                  ,np.transpose(1 - (NK/N)))))
            chisq = np.dot(np.transpose(Z),np.dot(la.pinv(sigma), Z))
            df = len(groups) - 1
            return np.array([chisq, df, stats.chi2.sf(chisq,df)])
        else:
            raise ValueError("groups must be in column exog")
 def do(self, a, b):
     a_ginv = linalg.pinv(a)
     assert_almost_equal(dot(a, a_ginv), identity(asarray(a).shape[0]))
     assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
Esempio n. 49
0
def main(argv):
    seterr(over='raise', divide='raise', invalid='raise')

    try:
        if int(os.environ['OMP_NUM_THREADS']) > 1 or int(
                os.environ['MKL_NUM_THREADS']) > 1:
            print 'It seems that parallelization is turned on. This will skew the results. To turn it off:'
            print '\texport OMP_NUM_THREADS=1'
            print '\texport MKL_NUM_THREADS=1'
    except:
        print 'Parallelization of BLAS might be turned on. This could skew results.'

    experiment = Experiment(seed=42)

    if not os.path.exists(EXPERIMENT_PATH):
        print 'Could not find file \'{0}\'.'.format(EXPERIMENT_PATH)
        return 0

    results = Experiment(EXPERIMENT_PATH)
    ica = results['model'].model[1].model

    # load test data
    data = load('data/vanhateren.{0}.0.npz'.format(
        results['parameters'][0]))['data']
    data = data[:, :100000]
    data = preprocess(data)
    data = data[:, permutation(data.shape[1] / 2)[:NUM_SAMPLES]]

    # transform data
    dct = results['model'].transforms[0]
    wt = results['model'].model[1].transforms[0]
    data = wt(dct(data)[1:])

    X = data

    for method in sampling_methods:
        # disable output and parallelization
        Distribution.VERBOSITY = 0
        mapp.max_processes = 1

        # measure time required by transition operator
        start = time()

        # initial hidden states
        Y = dot(pinv(ica.A), X)

        # increase number of steps to reduce overhead
        ica.sample_posterior(
            X,
            method=(method['method'],
                    dict(method['parameters'],
                         Y=Y,
                         num_steps=method['parameters']['num_steps'] *
                         NUM_STEPS_MULTIPLIER)))

        # time required per transition operator application
        duration = (time() - start) / NUM_STEPS_MULTIPLIER

        # enable output and parallelization
        Distribution.VERBOSITY = 2
        mapp.max_processes = 2

        energies = [mean(ica.prior_energy(Y))]

        # Markov chain
        for i in range(int(NUM_SECONDS / duration + 1.)):
            Y = ica.sample_posterior(X,
                                     method=(method['method'],
                                             dict(method['parameters'], Y=Y)))
            energies.append(mean(ica.prior_energy(Y)))

        plot(arange(len(energies)) * duration,
             energies,
             '-',
             color=method['color'],
             line_width=1.2,
             pgf_options=['forget plot'],
             comment=str(method['parameters']))

    xlabel('time in seconds')
    ylabel('average energy')
    title('van Hateren')

    gca().width = 7
    gca().height = 7
    gca().xmin = -1
    gca().xmax = NUM_SECONDS

    savefig('results/vanhateren/vanhateren_trace_.tex')

    return 0
Esempio n. 50
0
def transform(foci, mat):
    """ Convert coordinates from one space to another using provided
    transformation matrix. """
    t = linalg.pinv(mat)
    foci = np.hstack((foci, np.ones((foci.shape[0], 1))))
    return np.dot(foci, t)[:, 0:3]
Esempio n. 51
0
                                  np.concatenate(
                                      ([data[0]], np.zeros(m - 1))))[m - 1:]

# Unpack data
data = loadmat('audio_data.mat')
noisy_speech = data['reference'].flatten()
noise = data['primary'].flatten()
fs = data['fs'].flatten()  # Hz

# Filter order
m = 100

# See http://www.cs.cmu.edu/~aarti/pubs/ANC.pdf page 7 last paragraph
X = regmat(noise, m)
y = noisy_speech[m - 1:]
w = npl.pinv(X).dot(y)

# The error is the signal we seek
speech = y - X.dot(w)

################################################# RECORD

if save_audio:
    wavwrite('recovered_ALMS.wav'.format(m), fs, speech)

################################################# VISUALIZATION

if plot_results:

    # Compute performance surface for order 2
    m2 = 2
Esempio n. 52
0
            dA = np.zeros((n, n))
            dA[0:n, 0:n] = np.random.normal(0, .05, (n, n))
            tilde_A = (np.eye(n) - Ur @ Ur.T) @ (A + dA)
            if np.max(np.abs(la.eigvals(tilde_A))) < 0.94:
                break
            iterator += 1
        
        A = A + dA

        # Generating the trajectory 
        x0 = 10 * np.random.randn(n, 1)

        alpha = 0.0
        if control_mode == 0 or control_mode == 2:
            alpha = 1e-9
        else:
            alpha = 5e-7

        number_of_iteration = 20*n  # or set it to 40 * n

        x_a, x_fre, up_a, x_update, xposition, K, x_deepc, x_deepc_dgr = Trajectory_generator(A, B, K1, x0, True, alpha,
                                                                                 number_of_iteration, control_mode)


        final_K.append(K)
        ideal_K.append(la.pinv(alpha * np.eye(m) + B.T @ B) @ B.T@A)
        
        plots_traj(np.hstack(x_a), np.hstack(x_fre), np.hstack(x_update), np.hstack(x_deepc), np.hstack(x_deepc_dgr), up_a, xposition, K, mode[control_mode], mode_title[control_mode])

    plot_controller(final_K, ideal_K)
    print('The parameters for DGR+DeePC must be adjusted for each specific instance of perturbation.')
Esempio n. 53
0
def multi_criteria(lineNo, rootDic, obs_centroid, __iteration_IK__, robot):  #
    k0 = 1
    alpha_vec = np.arange(0, 1.1, 0.1)
    q_pareto = []
    for alpha in alpha_vec:
        q_mc = np.zeros((len(t_vec), 7))
        q_mc[0, :] = q_initial
        start_IK = time.time()
        for i, t in enumerate(t_vec[1:]):
            J = jacobian_spatial(q_mc[i, :])
            w1_der = manipulability_gradient(
                q_mc[i, :], jacobian_spatial)  # singularity avoidance
            # w2_der = joint_limits_gradient(q_mc[i, :], q_min, q_max)
            w3_der = collision(q_input=q_sa[i, :],
                               centroid=obs_centroid,
                               robot=robot)
            w3_der = w3_der.numpy()
            q0 = k0 * (alpha * w1_der + (1 - alpha) * w3_der)
            qp_mc = la.pinv(J) @ desired_trajectory_vel[i, :] + (
                np.eye(7) - la.pinv(J) @ J) @ q0
            q_mc[i + 1, :] = q_mc[i, :] + qp_mc * h
        end_IK = time.time()
        diff_time_IK = end_IK - start_IK
        fig, ax = plt.subplots(1, 1, figsize=(6.472135955, 4), dpi=96)
        ax.plot(t_vec, q_mc[:, 0], label=r'$q_1$')
        ax.plot(t_vec, q_mc[:, 1], label=r'$q_2$')
        ax.plot(t_vec, q_mc[:, 2], label=r'$q_3$')
        ax.plot(t_vec, q_mc[:, 3], label=r'$q_4$')
        ax.plot(t_vec, q_mc[:, 4], label=r'$q_5$')
        ax.plot(t_vec, q_mc[:, 5], label=r'$q_6$')
        ax.plot(t_vec, q_mc[:, 6], label=r'$q_7$')

        ax.set_xlabel('Time [s]')
        ax.set_ylabel('Position [rad]')
        ax.set_title('Inverse kinematics with multi-criteria')
        ax.legend(loc='ceq_inputnter right', bbox_to_anchor=(1.2, 0.5))
        fig.tight_layout()
        base_file_name1 = 'Figure_q_mc_multi_criteria'
        suffix1 = '.csv'
        csv_fileHandle = os.path.join(
            rootDic, base_file_name1 + '_' + str(lineNo) + '_' +
            str(__iteration_IK__) + '_' + robot + ' ' + str(alpha) + suffix1)
        np.savetxt(csv_fileHandle, q_ls, delimiter=",")
        suffix2 = '.jpg'
        figureHandle = os.path.join(
            rootDic, base_file_name1 + '_' + str(lineNo) + '_' +
            str(__iteration_IK__) + '_' + robot + ' ' + str(alpha) + suffix2)
        fig.savefig(figureHandle)

        q_pareto.append(q_mc)
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(2 * 6.472135955, 4), dpi=96)
    for q, alpha in zip(q_pareto, alpha_vec):
        manipulability_cost = np.array(
            [manipulability(qi, jacobian_spatial) for qi in q])
        joint_limit_cost = np.array(
            [joint_limits(qi, q_min, q_max) for qi in q])
        color = np.random.rand(3, )
        if alpha == 0.0 or alpha == 1.0:
            lw = 3
        else:
            lw = 1
        ax1.plot(t_vec,
                 manipulability_cost,
                 label=(r'$\alpha=$' + '{0:.1f}'.format(alpha)),
                 color=color,
                 linewidth=lw)
        ax2.plot(t_vec,
                 joint_limit_cost,
                 label=(r'$\alpha=$' + '{0:.1f}'.format(alpha)),
                 color=color,
                 linewidth=lw)

    ax1.legend(ncol=2, prop={'size': 7})
    ax1.set_xlabel('Time [s]')
    ax2.set_xlabel('Time [s]')
    ax1.set_ylabel('Manipulability')
    ax2.set_ylabel('Joint Limits')
    fig.tight_layout()
    base_file_name2 = 'spatial_pareto'
    figureHandle = os.path.join(
        rootDic,
        base_file_name2 + str(__iteration_IK__) + '_' + robot + suffix2)
    fig.savefig(figureHandle)

    return q_pareto, diff_time_IK
Esempio n. 54
0
def run(X, y):
    #print la.inv(X.T.dot(X)).dot(X.T).dot(y)
    return np.dot(la.pinv(X), y)
Esempio n. 55
0
print('1(b): Mean of incomes =', mean_data, ', Variance of incomes =',
      var_data)
print('1(b): Mean of model =', mean_model, ', Variance of model =', var_model)
'''
---------------------------------------
the following code fragment is for 1(c)
---------------------------------------
'''
# calculate the error vector given the GMM estimate using identity matrix as the
# weighting matrix
err1 = err_vec(incomes, mu_GMM1, sig_GMM1, cutoff, False)
# construct an estimator for the variance covariance matrix
VCV2 = np.dot(err1, err1.T) / incomes.shape[0]

# the inverse of the variance covariance matrix is the optimal weighting matrix
W_hat2 = lin.pinv(VCV2)

params_init = np.array([mu_init, sig_init])
gmm_args = (incomes, cutoff, W_hat2)
results = opt.minimize(criterion,
                       params_init,
                       args=(gmm_args),
                       method='TNC',
                       bounds=((1e-10, None), (1e-10, None)))

mu_GMM2, sig_GMM2 = results.x
print('mu_GMM2=', mu_GMM2, ' sig_GMM2=', sig_GMM2)

# Plot the histogram of the data
plt.title('Histogram of annual incomes')
plt.xlabel(r'Annual income(\$s)')
 def lsq_solution_V2(self, X, y):
     w = np.dot(la.pinv(X), y)
     return w
Esempio n. 57
0
def _multivariate_ols_fit(endog, exog, method='svd', tolerance=1e-8):
    """
    Solve multivariate linear model y = x * params
    where y is dependent variables, x is independent variables

    Parameters
    ----------
    endog : array_like
        each column is a dependent variable
    exog : array_like
        each column is a independent variable
    method : string
        'svd' - Singular value decomposition
        'pinv' - Moore-Penrose pseudoinverse
    tolerance : float, a small positive number
        Tolerance for eigenvalue. Values smaller than tolerance is considered
        zero.
    Returns
    -------
    a tuple of matrices or values necessary for hypotheses testing

    .. [*] https://support.sas.com/documentation/cdl/en/statug/63033/HTML/default/viewer.htm#statug_introreg_sect012.htm
    Notes
    -----
    Status: experimental and incomplete

    """
    y = endog
    x = exog
    nobs, k_endog = y.shape
    nobs1, k_exog = x.shape
    if nobs != nobs1:
        raise ValueError('x(n=%d) and y(n=%d) should have the same number of '
                         'rows!' % (nobs1, nobs))

    # Calculate the matrices necessary for hypotheses testing
    df_resid = nobs - k_exog
    if method == 'pinv':
        # Regression coefficients matrix
        pinv_x = pinv(x)
        params = pinv_x.dot(y)

        # inverse of x'x
        inv_cov = pinv_x.dot(pinv_x.T)
        if matrix_rank(inv_cov, tol=tolerance) < k_exog:
            raise ValueError('Covariance of x singular!')

        # Sums of squares and cross-products of residuals
        # Y'Y - (X * params)'B * params
        t = x.dot(params)
        sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
        return (params, df_resid, inv_cov, sscpr)
    elif method == 'svd':
        u, s, v = svd(x, 0)
        if (s > tolerance).sum() < len(s):
            raise ValueError('Covariance of x singular!')
        invs = 1. / s

        params = v.T.dot(np.diag(invs)).dot(u.T).dot(y)
        inv_cov = v.T.dot(np.diag(np.power(invs, 2))).dot(v)
        t = np.diag(s).dot(v).dot(params)
        sscpr = np.subtract(y.T.dot(y), t.T.dot(t))
        return (params, df_resid, inv_cov, sscpr)
    else:
        raise ValueError('%s is not a supported method!' % method)
import numpy as np
from numpy import linalg
from sklearn.metrics import mean_squared_error
from math import exp
from sklearn.preprocessing import normalize

A = np.loadtxt("steel_composition_train.csv", delimiter=",", skiprows=1)
numOfRows =  A.shape[0]
numOfCols = A.shape[1]
trainingData_temp = np.delete(A, [0, numOfCols-1], 1)
targetVector_train = A[:,-1]

# normalization
trainingData = normalize(trainingData_temp, norm='l2', axis=1)

gramMatrix = np.zeros((numOfRows, numOfRows))
for i in range(0, numOfRows):
    for j in range(0, numOfRows):
        temp1 = np.dot(trainingData[i].transpose(), trainingData[j])
        gramMatrix[i][j] = np.power((temp1 + 1), 3)

eye = np.identity(numOfRows)
temp = np.add(gramMatrix, eye)
a = np.dot(linalg.pinv(temp), targetVector_train)
target_pred = np.zeros(numOfRows)
for i in range(0, numOfRows):
    target_pred[i] = np.dot(gramMatrix[:,i], a)
print np.sqrt(mean_squared_error(targetVector_train, target_pred))
Esempio n. 59
0
File: qmm.py Progetto: Ralph-AR/qmm
def mmmg(
    crit_list: List["BaseCrit"],
    init: array,
    tol: float = 1e-4,
    max_iter: int = 500,
) -> Tuple[array, List[float]]:
    r"""The Majorize-Minimize Memory Gradient (`3mg`) algorithm.

    The `mmmg` (`3mg`) algorithm is a subspace memory-gradient optimization
    algorithm with an explicit step formula based on Majorize-Minimize Quadratic
    approach [2]_.

    Parameters
    ----------
    crit_list : list of `BaseCrit`
        A list of :class:`BaseCrit` objects that each represent a `μ ψ(V·x - ω)`.
        The criteria are implicitly summed.
    init : array
        The initial point. The `init` array is updated in place to return the
        output. The user must make a copy before calling `mmmg` if this is not
        the desired behavior.
    tol : float, optional
        The stopping tolerance. The algorithm is stopped when the gradient norm
        is inferior to `init.size * tol`.
    max_iter : int, optional
        The maximum number of iterations.

    Returns
    -------
    minimizer : array
        The minimizer of the criterion with same shape than `init`.
    norm_grad : list of float
        The norm of the gradient during iterations.

    Notes
    -----
    The output of :meth:`BaseCrit.operator`, and the `init` value, are
    automatically vectorized internally. However, the output is reshaped as the
    `init` array.

    References
    ----------
    .. [2] E. Chouzenoux, J. Idier, and S. Moussaoui, “A Majorize-Minimize
       Strategy for Subspace Optimization Applied to Image Restoration,” IEEE
       Trans. on Image Process., vol. 20, no. 6, pp. 1517–1528, Jun. 2011, doi:
       10.1109/TIP.2010.2103083.

    """
    point = init.reshape((-1, 1))
    norm_grad = []

    # The first previous moves are initialized with 0 array. Consequently, the
    # first iterations implementation can be improved, at the cost of if
    # statement.
    move = np.zeros_like(point)
    op_directions = [
        np.tile(_vect(crit.operator, move, init.shape), 2)
        for crit in crit_list
    ]
    step = np.ones((2, 1))

    for _ in range(max_iter):
        # Vectorized gradient
        grad = _gradient(crit_list, point, init.shape)
        norm_grad.append(la.norm(grad))

        # Stopping test
        if norm_grad[-1] < point.size * tol:
            break

        # Memory gradient directions
        directions = np.c_[-grad, move]

        # Step by Majorize-Minimize
        op_directions = [
            np.c_[_vect(crit.operator, grad, init.shape), i_op_dir @ step]
            for crit, i_op_dir in zip(crit_list, op_directions)
        ]
        step = -la.pinv(
            sum(
                crit.norm_mat_major(i_op_dir, point.reshape(init.shape))
                for crit, i_op_dir in zip(crit_list, op_directions))) @ (
                    directions.T @ grad)
        move = directions @ step

        # update
        point += move

    return np.reshape(point, init.shape), norm_grad
Esempio n. 60
0
 def _calc_params(self):
     self.W = pinv(
         self.X_mat.getT() * self.X_mat + self.L * np.identity(self.dim + 1)) * self.X_mat.getT() * self.Y_mat