Exemplo n.º 1
0
def magcal_residual(X, a, mb):
    """ residual from all observations given magnetometer eccentricity, bias,
    gyro bias, and gyro scale"""

    # (x-c)T A^T A (x-c) = 1
    # x^T Ax - 2x^T Ac + c^T Ac = 1

    # a b c | x' = ax + by + cz
    # 0 d e | y' = dy + ez
    # 0 0 f | z' = fz
    # z = 1/f z'
    # y = 1/d (y' - e/f z')
    # x = 1/a (x' - b/d(y' - e/f z') - c/f z')
    #   = 1/a (x' - b/d y' - (be/df - c/f) z')
    # (x-c) A^T A (x-c)
    # [(A x) - (A c)]^2 - 1 = 0

    # y = A(x-c)
    # y /= ||y||
    # q(x; A, c) = (A^-1 (y+c) - x)^2

    Y = np.dot(X - mb, Ainv(a)).T
    Y /= np.linalg.norm(Y, axis=0)
    # Y /= np.sqrt(np.sum(np.square(Y), axis=0))
    Y = np.dot(Y.T, Amatrix(a)) + mb
    return np.mean(np.sum(np.square(X - Y), axis=1))
Exemplo n.º 2
0
 def log_loss_regression(self, W_vect, X, Y):
     log_prior = -self.L2_reg * np.dot(W_vect, W_vect)
     Yhat = self.predicted_regression(W_vect, X)
     Y = np.ravel(Y)
     Yhat = np.ravel(Yhat)
     N = X.shape[0]
     log_lik = -0.5*np.sum(np.square(Y - Yhat))/N
     return - log_prior - log_lik   
Exemplo n.º 3
0
 def fit(self, target_airfoil):
     target = target_airfoil - self.airfoil0  # N/2 x 2
     target = target.T.reshape(-1, 1)  # N x 1
     alpha = np.linalg.pinv(self.u_truncated) @ target  # dim x 1
     alpha = np.squeeze(alpha)
     fitted_airfoil = self.synthesize(alpha)
     error = np.mean(
         np.sum(np.square(fitted_airfoil - target_airfoil), axis=1))
     return alpha, fitted_airfoil, error
Exemplo n.º 4
0
def K(p):
    """
    Calculates the kinetic energy of some given momentum values and returns
    a float.

    Arguments:
    - p: Numpy array of momentum values
    """
    return ag_np.sum(ag_np.square(p)) / 2
Exemplo n.º 5
0
def response(params, inputs=None, targets=None, channels=None, hps=None):
    if np.any(targets) == None: targets = inputs
    return np.argmin(np.sum(np.square(
        np.subtract(
            targets,
            forward(params, inputs=inputs, channels=channels, hps=hps)[-1])),
                            axis=2,
                            keepdims=True),
                     axis=0)[:, 0]
Exemplo n.º 6
0
    def final_cost(self, state):
        '''
        if len(state.shape) >= 2:
            goal = np.repeat(self.goal[np.newaxis, :], repeats=state.shape[0], axis=0)
            achieved_goal = np.transpose(self.forward_dynamics(state[:, 0], state[:, 2]), [1, 0])
            cost = self.a1 * np.sum(np.square(goal - achieved_goal), axis=1) + self.a2 * np.square(state[:, 1]) + \
                                                                    self.a3 * np.square(state[:, 3])
        else:
            cost = self.a1 * np.sum(np.square(self.goal - self.forward_dynamics(state[0], state[2]))) + \
                   self.a2 * np.square(state[1]) + self.a3 * np.square(state[3])
        '''
        if len(state.shape) >= 2:
            cost = 0.5 * (np.square(1.0 - np.cos(state[:, 2])) + np.square(state[:, 1]) + np.square(state[:, 3]))

        else:
            cost = 0.5 * (np.square(1.0 - np.cos(state[2])) + np.square(state[1]) + np.square(state[3]))

        return cost
Exemplo n.º 7
0
def objective(eps_space):
    F.eps_r *= eps_space
    measured = []
    for t_index in range(steps):
        fields = F.forward(Jz=source(t_index))
        measured.append(npa.sum(fields['Ez'] * measure_pos))
    measured_f = my_fft(npa.array(measured))
    spectral_power = npa.square(npa.abs(measured_f))
    return spectral_power
Exemplo n.º 8
0
def loss(params, inputs=None, targets=None, hps=None):
    ## Cross-Entropy (i think); usually explodes
    # o = forward(params, inputs = inputs, hps = hps)[-1]
    # c = o * targets + (1 - o) * (1 - targets)
    # return -np.sum(np.log(c))
    ## SSE
    return np.sum(
        np.square(
            np.subtract(forward(params, inputs=inputs, hps=hps)[-1], targets)))
Exemplo n.º 9
0
 def loss(theta, X, y):
     y_hat = self.sigmoid(np.dot(X, theta))
     y_hat = np.squeeze(y_hat)
     y = np.squeeze(y)
     error = -np.sum(
         y.dot(np.log10(y_hat)) + (1 - y).dot(np.log10(1 - y_hat)))
     error = error / X.shape[0]
     error += self.l2_coef / (2 * X.shape[0]) * np.sum(np.square(theta))
     return error
Exemplo n.º 10
0
def loss(params, inputs = None, targets = None, hps = None):
    return np.sum(
        np.square(
            np.subtract(
                forward(params, inputs = inputs, hps = hps)[-1],
                targets
            )
        )
    )
Exemplo n.º 11
0
 def loss(w):
     lossVal = 0
     for wi, aH in zip(w, alphaHats):
         den = (1 / np.dot(wi, n))
         aXw = np.multiply(a, wi)
         dot = np.dot(aXw, n)
         tilde = den * dot
         lossVal = lossVal + .5 * np.square(aH - tilde)
     return lossVal
Exemplo n.º 12
0
def loss(params, X=None, Y=None, init_cell_state=None, init_hidden_state=None):
    output = forward_seq(params,
                         X=X,
                         init_cell_state=init_cell_state,
                         init_hidden_state=init_hidden_state)

    return np.sum(
        np.square(np.subtract(output, Y))
    )  # + np.sum(np.abs([np.sum(params[layer]['w']) for layer in params])) * .1 # <-- weight size regularization?
Exemplo n.º 13
0
    def loss(self, A, B, X, y, prob):
        ''' y is a matrix of size len(X), 50'''
        assert y.ndim == 2
        assert y.shape[1] == 50

        k = self.get_bin_size(X)
        prediction = np.dot(1. / 12. * A, np.matmul(X.values[:, 2:], np.exp(B * k)))
        loss = np.mean(np.square(prediction - np.multiply(y, prob).T))
        return loss
def PrintPerf(Params, iter, _):
    if iter == 0:
        print("     Epoch     |    Train cost  ")
    if iter % 5 == 0:
        Cost = ObjectiveFunWrap(Params, iter)
        Gradient = flatten(ObjectiveGrad(Params, iter))
        print(
            str(iter) + '  ' + str(np.round(Cost, 6)) + '  ' +
            str(np.square(Gradient[0]).sum()))
Exemplo n.º 15
0
def fnorm(matrix):
    """
    get the F norm.
    """
    U, S, V = np.linalg.svd(matrix)
    S = np.diag(S)
    fnorm_s = np.sqrt(np.trace(np.square(S)))

    return fnorm_s
Exemplo n.º 16
0
def loss(params, inputs = None, targets = None, channels = None, labels_indexed = None, hps = None):
    return np.sum(
        np.square(
            np.subtract(
                forward(params, inputs = inputs, hps = hps)[-1],
                targets
            )
        )
    )
Exemplo n.º 17
0
def ba_objective(cams, X, w, obs, feats):
    p = obs.shape[0]
    reproj_err = np.empty((p,2))
    for i in range(p):
        reproj_err[i] = compute_reproj_err(cams[obs[i,0]],X[obs[i,1]],w[i],feats[i])

    w_err = 1. - np.square(w)

    return (reproj_err, w_err)
def square_corrcoeff_full_cost(V, X, grad=True):
    '''
    The cost function for the correlation analysis. This effectively measures the square difference
    in correlation coefficients after transforming to an orthonormal basis given by V.

    Args:
        V: 2D array of shape (N, K) with V.T * V = I
        X: 2D array of shape (P, N) containing centers of P manifolds in an N=P-1 dimensional
            orthonormal basis
    '''
    # Verify that the shapes are correct
    P, N = X.shape
    N_v, K = V.shape
    assert N_v == N

    # Calculate the cost
    C = np.matmul(X, X.T)
    c = np.matmul(X, V)
    c0 = np.diagonal(C).reshape(P, 1) - np.sum(
        np.square(c), axis=1, keepdims=True)
    Fmn = np.square(C - np.matmul(c, c.T)) / np.matmul(c0, c0.T)
    cost = np.sum(Fmn) / 2

    if grad is False:  # skip gradient calc since not needed, or autograd is used
        gradient = None
    else:
        # Calculate the gradient
        X1 = np.reshape(X, [1, P, N, 1])
        X2 = np.reshape(X, [P, 1, N, 1])
        C1 = np.reshape(c, [P, 1, 1, K])
        C2 = np.reshape(c, [1, P, 1, K])

        # Sum the terms in the gradient
        PF1 = ((C - np.matmul(c, c.T)) / np.matmul(c0, c0.T)).reshape(
            P, P, 1, 1)
        PF2 = (np.square(C - np.matmul(c, c.T)) /
               np.square(np.matmul(c0, c0.T))).reshape(P, P, 1, 1)
        Gmni = -PF1 * C1 * X1
        Gmni += -PF1 * C2 * X2
        Gmni += PF2 * c0.reshape(P, 1, 1, 1) * C2 * X1
        Gmni += PF2 * (c0.T).reshape(1, P, 1, 1) * C1 * X2
        gradient = np.sum(Gmni, axis=(0, 1))

    return cost, gradient
Exemplo n.º 19
0
def gen_toy_ca_GP(N, D, CA_struct, Pois_noise = True,scale_pois = 1):
 	'''
	This function will generate some data with GP statistics with variance 'rh', and length scale 'len_sc'. 
	Data will be generated of length N, with batch size D. Default function will add Gaussian noise
	with marginal variance 'add_noise_var'. 
	Defaults
	sing_GP = False indicates a new GP draw with the same statistics for each Batch
	Pois_noise = False indicates Gaussian noise is added to the GP.
	'''
	#=X = tf.constant(np.tile(np.arange(N),(D,1)),dtype = tf.float32)
	#x = rbf_op(X,D,rh,len_sc)
	M1 = np.array([range(N)])- np.transpose(np.array([range(N)]))
	K = rh*np.exp(-(np.square(M1)/(2*np.square(len_sc))))
	x = np.array(np.random.multivariate_normal(np.zeros(N), K))


	x= [np.log(1 + np.exp(x)) for batch in range(D)]/np.asarray(scale_pois)
	#x = [np.exp(x)/10 for batch in range(D)]
	y = np.random.poisson(x) #poisson spikes from GP
	x = x[0] 



	logsprate = np.log(rate)*np.ones(int(np.floor(Xstruct['T']/Xstruct['dtSp']))) #constant (for now)
	spcounts = np.random.poisson(Xstruct['dtSp']*np.exp(logsprate)) # poisson spike counts


	##### Optional Model #####
	#AR2
	if Xstruct['AR2'] is True:
	    #AR2
	    q = [exp(-par.dt_spk/par.calc_ts),exp(-par.dt_spk/par.calc_ts_b)];
	    a_true = poly(q);
	    z = filter(1,a_true,spcounts);
	    
	else:
	    #AR1
	    z= lfilter([Xstruct['a']],[1,-np.exp(-Xstruct['dtSp']/Xstruct['calc_ts'])],spcounts) #convolve with exp filter


	trace = z + np.sqrt(Xstruct['Gauss_sigma'])*np.random.randn(np.size(z))#add noise


	return trace
Exemplo n.º 20
0
def forward(params, inputs = None, hps = None):
    hidden1_activations = np.array([
        hps['hidden1_activation'](
            np.add(
                np.matmul(
                    inputs,
                    params['input']['hidden1']['weights'][c,:,:],
                ),
                params['input']['hidden1']['bias'][c,:,:],
            )
        ) 
    for c in range(params['input']['hidden1']['weights'].shape[0])
    ])

    hidden2_activations = np.array([
        hps['hidden2_activation'](
            np.add(
                np.matmul(
                    hidden1_activations[c,:,:],
                    params['hidden1']['hidden2']['weights'][c,:,:],
                ),
                params['hidden1']['hidden2']['bias'][c,:,:],
            )
        ) 
    for c in range(params['hidden1']['hidden2']['weights'].shape[0])
    ])

    channel_activations = np.array([
        hps['channel_activation'](
            np.add(
                np.matmul(
                    hidden2_activations[c,:,:],
                    params['hidden2']['output']['weights'][c,:,:],
                ),
                params['hidden2']['output']['bias'][c,:,:],
            )
        ) 
    for c in range(params['hidden2']['output']['weights'].shape[0])
    ])

    ## reconstructive error
    output_activation = np.sum(
        np.square(
            np.subtract(
                inputs,
                channel_activations,
            )
        ),
        axis = 2
    ).T

    output_activation = 1 - hps['classifier_activation'](
        output_activation / output_activation.sum(axis=1, keepdims = True)
    )

    return [hidden1_activations, hidden2_activations, channel_activations, output_activation]
Exemplo n.º 21
0
 def _LOOCrossValidation(self, hyperparameters):  # scales, nuggets):
     self._CalculateNecessaryMatrices(scales=hyperparameters[1:],
                                      nuggets=hyperparameters[0])
     Kinv_diag = np.diag(np.linalg.inv(self.cov_matrix)).reshape(-1, 1)
     LOO_mean_minus_target = self.alpha / Kinv_diag
     LOO_sigma = np.reciprocal(Kinv_diag)
     log_CV = -0.5 * (np.log(LOO_sigma) + np.square(LOO_mean_minus_target) *
                      Kinv_diag + np.log(2 * np.pi))
     # print(self.alpha.shape, self.cholesky.shape, self.cov_matrix.shape, Kinv_diag.shape)
     return log_CV.sum()
Exemplo n.º 22
0
def intensity(eps_arr):

    eps_r = eps_arr.reshape((Nx, Ny))
    # set the permittivity of the FDFD and solve the fields
    F.eps_r = eps_r
    Ex, Ey, Hz = F.solve(source)

    # compute the gradient and normalize if you want
    I = npa.sum(npa.square(npa.abs(Hz * probe)))
    return -I / I_H0
Exemplo n.º 23
0
def ba_objective(cams, X, w, obs, feats):
    p = obs.shape[0]
    reproj_err = np.empty((p, 2))
    for i in range(p):
        reproj_err[i] = compute_reproj_err(cams[obs[i, 0]], X[obs[i, 1]], w[i],
                                           feats[i])

    w_err = 1. - np.square(w)

    return (reproj_err, w_err)
Exemplo n.º 24
0
 def compKphig_2d(self, z, R, eps):
     """
     Compute spatial cross-cov between CSD and LFP (fwd model applied to the x part).
     :param z: vector (nz, 2) of 2D CSD locations
     :param R: fwd model param value
     :param eps: spacing in front of array to assume zero charge
     :return: cross-covariance matrix
     """
     ell1 = self.params['ell1']['value']
     ell2 = self.params['ell2']['value']
     Ks = np.exp(-0.5 * np.square(
         (self.gl_x_grid[:, 0][:, None] - z[:, 0][None, :]) /
         ell1)) * np.exp(-0.5 * np.square(
             (self.gl_x_grid[:, 1][:, None] - z[:, 1][None, :]) / ell2))
     fwd_wts = b_fwd_2d(None, None, R, eps,
                        self.delta_w)  # (nx1*nx2, ngl1*ngl2)
     A = self.gl_w_prod.T * fwd_wts
     res = np.dot(A, Ks)
     return res
 def cost(coef):
     X_coef = -1 * np.matmul(X_, coef)
     z = 1 / (1 + np.exp(X_coef))
     epsilon = 1e-5
     class1 = np.multiply(y_, np.log(z + epsilon))
     class2 = np.multiply(1 - y_, np.log(1 - z + epsilon))
     ans = -(1 / y_.size) * (np.sum(class1 + class2))
     if self.penalty == "l1":
         return ans + self.val * np.sum(np.absolute(coef))
     else:
         return ans + self.val * np.sum(np.square(coef))
    def loss(self, A, B, X, y, prob):
        ''' y is a matrix of size len(X), 50'''
        assert y.ndim == 2
        assert y.shape[1] == 50

        k = self.get_bin_size(X)
        prediction = np.dot(
            1. / 12. * A,
            np.matmul(X[[str(i) for i in range(34)]].values, np.exp(B * k)))
        loss = np.mean(np.multiply(np.square(prediction - y.values.T), prob.T))
        return loss
def cost_fraction(H, A, T, E_np_masked, case):
    HAT = multiply_case(H, A, T, case)
    num_appliances = len(A)-1
    c = 0
    for appliance_num in range(1, num_appliances + 1):
        gt_appliance_fr = E_np_masked[:, appliance_num, :] / E_np_masked[:, 0, :]
        pred_appliance_fr = HAT[:, appliance_num, :] / E_np_masked[:, 0, :]
        diff_appliance_fr = (pred_appliance_fr - gt_appliance_fr).flatten()
        diff_appliance_fr = diff_appliance_fr[~np.isnan(diff_appliance_fr)]
        c = c + np.sqrt(np.square(diff_appliance_fr).mean())
    return c
Exemplo n.º 28
0
    def diagonal(self, X):
        alpha, mean_lam, gamma, delta = self._get_params(X)
        cfg, res, kappa, kr_pref, _ = self._compute_terms(
            X, alpha, mean_lam, gamma, delta)
        kappa2 = self._compute_kappa(res * 2, alpha, mean_lam)
        kdiag_res = anp.subtract(kappa2, anp.square(kappa))
        kdiag_res = anp.reshape(anp.multiply(kdiag_res, anp.square(kr_pref)),
                                (-1, ))
        kdiag_x = self.kernel_x.diagonal(cfg)
        if self.encoding_delta is None:
            if delta > 0.0:
                tmpvec = anp.subtract(kappa * 2, kappa2 * delta)
                tmpvec = anp.reshape(tmpvec * (-delta) + 1.0, (-1, ))
            else:
                tmpvec = 1.0
        else:
            tmpvec = anp.subtract(kappa * 2, anp.multiply(kappa2, delta))
            tmpvec = anp.reshape(anp.multiply(tmpvec, -delta) + 1.0, (-1, ))

        return kdiag_x * tmpvec + kdiag_res
Exemplo n.º 29
0
 def fit(self, target_airfoil):
     target = target_airfoil - self.airfoil0  # n_points x 2
     target = target[:-1]  # (n_points-1) x 2
     target = target.T.reshape(2, -1, 1)  # 2 x (n_points-1) x 1
     alpha = np.linalg.pinv(self.v_truncated) @ target  # 2 x dim/2 x 1
     alpha = np.squeeze(alpha)  # 2 x dim/2
     alpha = alpha.T.flatten()
     fitted_airfoil = self.synthesize(alpha)
     error = np.mean(
         np.sum(np.square(fitted_airfoil - target_airfoil), axis=1))
     return alpha, fitted_airfoil, error
Exemplo n.º 30
0
def prediction_loss_full(X, Y, W, V, b, c, l):
    WX = np.matmul(W, np.transpose(X))
    b = np.array([
        b,
    ] * X.shape[0]).transpose()
    c = np.array([
        c,
    ] * X.shape[0]).transpose()
    b_plus_WX = np.add(b, WX)
    sigma = np.tanh(b_plus_WX)
    f = np.add(c, np.matmul(V, sigma))

    L = 0
    for i in range(Y.shape[0]):
        softmax = np.log(np.sum(np.exp(f[:, i])))
        y = Y[i]
        L += -f[y][i] + softmax

    L += l * (np.sum(np.square(V)) + np.sum(np.square(W)))
    return L
Exemplo n.º 31
0
def intensity(eps_arr):

    # reshape the design variables
    eps_r = eps_arr.reshape((Nx, Ny))

    # linear simulation
    F_lin.eps_r = eps_r
    _, _, Ez = F_lin.solve(source)

    # nonlinear simulation
    density = (eps_max - eps_r) / (eps_max - 1)
    F_nl.eps_r = lambda E: eps_r + density * 3 * chi3 * npa.square(npa.abs(Ez))
    _, _, Ez_nl = F_nl.solve(source)

    # compute the intesnities of both simulations
    I_lin = npa.sum(npa.square(npa.abs(Ez * probe)))
    I_nl = npa.sum(npa.square(npa.abs(Ez_nl * probe)))

    # maximize linear intensity, minimize nonlinear
    return -(I_lin - I_nl) / I_E0
 def loss(self, theta, x, y):
     assert (x.shape[0] == y.shape[0])
     pred = self.predicition(theta, x)
     pred = np.squeeze(pred)
     y = np.squeeze(y)
     res = -np.sum(y.dot(np.log10(pred)) + (1 - y).dot(np.log10(1 - pred)))
     res = res / x.shape[0]
     res += self.l2_coef / (2 * x.shape[0]) * np.sum(np.square(theta))
     res += self.l1_coef / (2 * x.shape[0]) * np.sum(np.abs(theta))
     # print(res)
     return res
Exemplo n.º 33
0
 def area_wetted(self):
     # Returns the wetted area of a wing.
     area = 0
     for i in range(len(self.xsecs) - 1):
         chord_eff = (self.xsecs[i].chord + self.xsecs[i + 1].chord) / 2
         this_xyz_te = self.xsecs[i].xyz_te()
         that_xyz_te = self.xsecs[i + 1].xyz_te()
         span_le_eff = np.sqrt(
             np.square(self.xsecs[i].xyz_le[1] -
                       self.xsecs[i + 1].xyz_le[1]) +
             np.square(self.xsecs[i].xyz_le[2] -
                       self.xsecs[i + 1].xyz_le[2]))
         span_te_eff = np.sqrt(
             np.square(this_xyz_te[1] - that_xyz_te[1]) +
             np.square(this_xyz_te[2] - that_xyz_te[2]))
         span_eff = (span_le_eff + span_te_eff) / 2
         area += chord_eff * span_eff
     if self.symmetric:
         area *= 2
     return area
Exemplo n.º 34
0
def dKdu(u, v):
  """
  compute the grads of a given K w.r.t. u
  you can just switch order of args to compute it for v
  """
  anorm = np.sqrt(np.sum(u*u))
  bnorm = np.sqrt(np.sum(v*v))
  den2 = (anorm * bnorm) + 1e-20 

  a = v / den2
  b = u / np.sum(np.square(u))
  c = cosine_sim(u,v)
  return a - b*c
Exemplo n.º 35
0
 def NLL(self, W_vect, X, Y):
     '''Negative log likelihood.
     For classification, we assume Y is a N*C one-hot matrix.'''
     if self.output_type == 'classification':
         log_lik = np.sum(self.predictions(W_vect, X) * Y)
     else:
         log_lik = 0
         Yhat = self.predictions(W_vect, X)
         Y = np.ravel(Y)
         Yhat = np.ravel(Yhat) 
         log_lik = -0.5*np.sum(np.square(Y - Yhat))
     B = X.shape[0] # batch size
     log_lik = (log_lik / B ) * self.Ntrain
     return -log_lik 
Exemplo n.º 36
0
def rodrigues_rotate_point(rot,X):
    sqtheta = np.sum(np.square(rot))
    if sqtheta != 0.:
        theta = np.sqrt(sqtheta)
        costheta = np.cos(theta)
        sintheta = np.sin(theta)  
        theta_inverse = 1. / theta

        w = theta_inverse * rot
        w_cross_X = cross(w,X)
        tmp = np.dot(w,X) * (1. - costheta)

        return X*costheta + w_cross_X * sintheta + w * tmp
    else:
        return X + cross(rot,X)
Exemplo n.º 37
0
 def NLL(self, W_vect, X, Y, N=None):
     '''Negative log likelihood.
     For classification, we assume Y is a N*C one-hot matrix.'''
     if self.output_type == 'classification':
         log_lik = np.sum(self.predictions(W_vect, X) * Y)
     else:
         log_lik = 0
         Yhat = self.predictions(W_vect, X)
         Y = np.ravel(Y)
         Yhat = np.ravel(Yhat) 
         log_lik = -0.5*np.sum(np.square(Y - Yhat))
     if N is not None:
         # Compensate for this being a minibatch
         B = X.shape[0] # batch size
         log_lik = (log_lik / B ) * N
     return -log_lik 
Exemplo n.º 38
0
def magcal_residual_old(X, a, mb):
    mag = np.dot(X - mb, Ainv(a))
    return np.mean(np.square(1 - np.sum(mag*mag, axis=1)))
Exemplo n.º 39
0
def radial_distort(rad_params,proj):
    rsq = np.sum(np.square(proj))
    L = 1. + rad_params[0]*rsq + rad_params[1]*rsq*rsq
    return proj*L
Exemplo n.º 40
0
def squared_loss(y_pred, y):
    '''y is N*1, y_pred is N*1.
    Returns scalar'''
    N = y.shape[0]
    return sum(np.square(y - y_pred))/N
Exemplo n.º 41
0
def sqsum(x):
    return (np.square(x)).sum()
Exemplo n.º 42
0
Arquivo: dr.py Projeto: HelenLiGit/POT
def dist(x1, x2):
    """ Compute squared euclidean distance between samples (autograd)
    """
    x1p2 = np.sum(np.square(x1), 1)
    x2p2 = np.sum(np.square(x2), 1)
    return x1p2.reshape((-1, 1)) + x2p2.reshape((1, -1)) - 2 * np.dot(x1, x2.T)
Exemplo n.º 43
0
def cost(theta):
    return np.square(theta)
Exemplo n.º 44
0
def squared_loss(y_pred, y):
    N = y.shape[0]
    return 0.5*np.sum(np.square(y - y_pred))/N
Exemplo n.º 45
0
    print cosine_sim(u,v)

    # compute deltas automatically
    # just with respect to u
    cs_grad = grad(cosine_sim, argnum=0)
    auto_deltas = cs_grad(u,v)

    # compute deltas manually
    manual_deltas = np.zeros_like(auto_deltas)

    # compute the denominator
    anorm = np.sqrt(np.sum(u*u))
    bnorm = np.sqrt(np.sum(v*v))
    den2 = (anorm * bnorm) + 1e-5

    a = v / den2
    b = u / np.sum(np.square(u))
    c = cosine_sim(u,v)
    manual_deltas = a - b*c
    

    print "auto deltas"
    print auto_deltas
    print "manual deltas"
    print manual_deltas

    """
    manual_deltas gives us dk_i / dK_j
    """

Exemplo n.º 46
0
Arquivo: ml.py Projeto: gablg1/ml-util
def rmse(predictions, targets):
    assert(canSum(predictions, targets))
    return np.sqrt(np.mean(np.square(predictions-targets)))