Ejemplo n.º 1
0
    def fit(self, X: np.array, y: np.array):
        """
        1 / (2 * n_samples) * ||y - Xw||^2_2
        + l1_ratio * ||w||_1
        + 0.5 * l2_ratio * ||w||^2_2
        """
        if self.scale:
            X, self.X_offset, self.X_scale = scale(X)
            y, self.y_offset, self.y_scale = scale(y)
        n_samples, n_features = X.shape
        if self.fit_intercept:
            ones = np.ones((n_samples, 1))
            X = np.concatenate((ones, X), axis=1)
            n_samples, n_features = X.shape

        W = np.random.rand((n_features))
        self.history = []
        for _ in range(self.n_iters):
            preds = X.dot(W)
            dMSE = (1 / n_samples) * X.T.dot(preds - y)
            dl1 = np.array(np.sign(W))
            dl2 = 2 * W
            W = W - self.lr * (dMSE + self.l1_ratio * dl1 +
                               0.5 * self.l2_ratio * dl2)
            self.history.append(mse(X.dot(W), y))
        self.W = W
Ejemplo n.º 2
0
    def predict(self, trans: np.array, ctrl: np.array):
        # 计算高斯先验均值(高斯噪声的均值为zeros)
        self.statePre = trans.dot(self.statePost) + ctrl

        # 计算先验协方差 P(pre) = A * P(last post) * At + R(状态转移噪声协方差)
        self.preCov = trans.dot(self.postCov).dot(
            np.transpose(trans)) + self.transNoise
Ejemplo n.º 3
0
 def fit(self, X: np.array, y: np.array):
     if self.scale:
         X, self.X_offset, self.X_scale = scale(X)
     n_samples, n_features = X.shape
     _, n_classes = y.shape
     if self.fit_intercept:
         ones = np.ones((n_samples, 1))
         X = np.concatenate((ones, X), axis=1)
     
     self.n_samples, n_features = X.shape
     
     if self.multi_class == 'ovr':
         predFn = lambda X, W: (X.dot(W))
         self.W = []
         self.history = []
         for i in range(n_classes):
             W = np.random.rand(n_features)
             this_y = np.zeros((n_samples))
             this_y[y[:, i] == 1] = 2
             this_y -= 1
             W, history = LinearGradientDescent(self.dhinge_loss, X, this_y, W, self.n_iters, 
                                             self.lr, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio,
                                             metric=self.hinge_loss, predFn=predFn)
             self.W.append(W)
             self.history.append(history)
     elif self.multi_class == 'multi':
         predFn = lambda X, W: Softmax(X.dot(W)) # Softmax for cross_entropy metric
         W = np.random.rand(n_features, n_classes)
         self.W, self.history = LinearGradientDescent(self.dcrammer_singer_loss, X, y, W, self.n_iters, 
                                         self.lr, l1_ratio=self.l1_ratio, l2_ratio=self.l2_ratio,
                                         metric=cross_entropy, predFn=predFn)
     else:
         raise NotImplementedError
Ejemplo n.º 4
0
    def forward(self, x: np.array, method: str) -> None:
        '''
        feature extract and argsort
        :param x: test data x
        :param method: method for feature extract
        :return: None
        '''

        assert method in ['l1', 'l2', 'cosine']

        distance = []

        if method=='l1':
            # for L1 distance
            data_num = x.shape[0]
            for i in tqdm(range(data_num)):
                distance.append(np.sum(np.abs(self.x - x[i]), axis = 1))
            distance = np.array(distance)

        if method=='l2':
            # for L2 distance
            distance = -2 * x.dot(self.x.T) + np.sum(np.square(x), axis=1, keepdims=1) + np.sum(np.square(self.x), axis=1).T

        if method=='cosine':
            # for cosine distance
            distance = 1 - x.dot(self.x.T) / np.linalg.norm(x, axis=1, keepdims=1) / np.linalg.norm(self.x, axis=1).T

        # self.distance = distance
        self.sorted = []
        for i in tqdm(range(distance.shape[0])): # for not cuda oom, use forloop instead of np.argsort the whole array
            self.sorted.append(np.argsort(distance[i]).tolist())
Ejemplo n.º 5
0
 def transform(self, matrix: np.array) -> "Vector":
     old_initial = np.array([self.initial_pt.d1, self.initial_pt.d2])
     old_terminal = np.array([self.terminal_pt.d1, self.terminal_pt.d2])
     new_initial = matrix.dot(old_initial)
     new_terminal = matrix.dot(old_terminal)
     out_initial = Point(new_initial[0], new_initial[1])
     out_terminal = Point(new_terminal[0], new_terminal[1])
     return Vector(out_initial, out_terminal)
Ejemplo n.º 6
0
def b_formula(x_list: np.array, y_list: np.array, denominator: float):
    """TODO Docs"""
    b = (y_list.mean()
         * x_list.dot(x_list)
         - x_list.mean()
         * x_list.dot(y_list)) \
        / denominator
    return b
 def compute_similarity(self, vec_A: np.array, vec_B: np.array) -> float:
     """
         The cosine similarity metric is used to measure how similar a pair of vectors are.
         Mathematically, it measures the cosine of the angle between two vectors projected in a multi-dimensional space.
     """
     dot = vec_A.dot(vec_B)
     vec_A_magnitude = np.sqrt(vec_A.dot(vec_A))
     vec_B_magnitude = np.sqrt(vec_B.dot(vec_B))
     return dot / (vec_A_magnitude * vec_B_magnitude)
Ejemplo n.º 8
0
def _rate_eq(t: np.array, y: np.array, decay_matrix: np.array,
             UC_matrix: np.array, N_indices: np.array,
             coop_ET_matrix: np.array, coop_N_indices: np.array) -> np.array:
    '''Calculates the rhs of the ODE for the relaxation'''
    N_prod_sel = y[N_indices[:, 0]]*y[N_indices[:, 1]]
    UC_matrix = UC_matrix.dot(N_prod_sel)

    N_coop_prod_sel = y[coop_N_indices[:, 0]]*y[coop_N_indices[:, 1]]*y[coop_N_indices[:, 2]]
    coop_ET_matrix = coop_ET_matrix.dot(N_coop_prod_sel)

    return decay_matrix.dot(y) + UC_matrix + coop_ET_matrix
Ejemplo n.º 9
0
def _fit_fast(grid: np.array, x: str, y: str):
    """Low-level regression and prediction using linear algebra. Copied from Seaborn's Regression.py"""
    def reg_func(_x, _y):
        return np.linalg.pinv(_x).dot(_y)

    X, y = np.c_[np.ones(len(x)), x], y
    grid = np.c_[np.ones(len(grid)), grid]
    yhat = grid.dot(reg_func(X, y))

    beta_boots = sns.algorithms.bootstrap(X, y, func=reg_func).T
    yhat_boots = grid.dot(beta_boots).T
    return yhat, yhat_boots
Ejemplo n.º 10
0
def get_component_size_laplacian(m: np.array, ns: np.array):
    if all(m.dot(ns) < tol):
        return get_size(ns)
    else:
        prev = 0
        new = get_size(ns)

        while prev < new:
            prev = new
            ns = m.dot(ns)
            new = get_size(ns)

        return new
Ejemplo n.º 11
0
def neg_SR(weights: np.array, sigma: np.array, mus: np.array):
    # Returns minus the Sharpe Ratio (as we're minimising)

    estreturn = float(weights.dot(mus))
    std_dev = variance(weights, sigma)**0.5

    return -estreturn / std_dev
Ejemplo n.º 12
0
 def dcrammer_singer_loss(self,
                          X: np.array,
                          y: np.array,
                          W: np.array,
                          vectorized: bool = True) -> np.array:
     n_samples, n_classes = y.shape
     preds = X.dot(W)
     if vectorized:
         dW = np.zeros(preds.shape)
         yis = np.where(y == 1)
         error = preds - preds[yis].reshape((-1, 1)).repeat(3, axis=1) + 1
         error[yis] = 0
         dW[error > 0] = 1
         where_e = (error > 0).sum(1)
         dW[yis] -= where_e
         dW = self.C * (1 / self.n_samples) * X.T.dot(dW)
     else:
         dW = np.zeros(W.shape)
         for i in range(n_samples):
             yi = y[i].argmax()
             for j in range(n_classes):
                 if j == yi:
                     continue
                 if preds[i, j] - preds[i, yi] + 1 > 0:
                     dW[:, j] += X[i]
                     dW[:, yi] += -X[i]
         dW = self.C * (1 / self.n_samples) * dW
     return dW
Ejemplo n.º 13
0
def gradient_descent(X: np.array, y: np.array, theta: np.array, alpha: float, num_iters: int):
    m = len(y)
    J_history = np.zeros((num_iters, 1))
    for iter in range(num_iters):
        theta = theta - (alpha / m) * X.T.dot((X.dot(theta) - y))
        J_history[iter] = compute_cost(X, y, theta)
    return [theta, J_history]
Ejemplo n.º 14
0
 def applyWindForce(self, wind: numpy.array):
     """Applies the force of the wind to the particles on this face"""
     area = numpy.cross(self.b.x - self.a.x, self.c.x - self.a.x)
     fWind = wind.dot(area) * area / numpy.linalg.norm(area)
     self.a.windForce += fWind
     self.b.windForce += fWind
     self.c.windForce += fWind
Ejemplo n.º 15
0
def gradient_descent(
    X: np.array,
    y: np.array,
    alpha: float,
    theta: np.array,
    num_iters: int,
) -> np.array:
    """Perform gradient descent.

    Args:
        X: training examples, with bias term added.
        y: labeled output for training examples
        alpha: learning rate
        theta: parameters
        num_iters: number of iterations of gradient descent to perform.

    Returns:
        New parameters optimized over num_iters of gradient descent.
    """
    m = y.shape[0]
    scale_factor = alpha / m
    for i in range(num_iters):
        hypothesis = X.dot(theta).reshape(-1)
        error = hypothesis - y
        new_t0 = scale_factor * (error * X[:, 0]).sum()
        new_t1 = scale_factor * (error * X[:, 1]).sum()
        theta[0] = theta[0] - new_t0
        theta[1] = theta[1] - new_t1
    return theta
Ejemplo n.º 16
0
def eigen(matx: np.array, eps=1e-3):
    """
    функція знаходження власного числа і вектора методом скалярних добутків
    """
    tr_matrix = matx.transpose()
    y = np.zeros(matx.shape[0]) + START_Y
    z = np.zeros(matx.shape[0]) + START_Y
    eigenvalue = 0
    for j in range(ITERATION_LIMIT):
        next_y = matx.dot(y)
        next_z = tr_matrix.dot(z)
        tmp1 = np.sum(next_y * next_z)
        tmp2 = np.sum(y * next_z)

        tmp_res = tmp1 / tmp2
        if j == 0:
            eigenvalue = tmp_res
        elif abs(eigenvalue - tmp_res) < eps:
            break
        else:
            eigenvalue = tmp_res
        y = next_y
        z = next_z

    eigenvector = y / norm(y)
    return eigenvalue, eigenvector
Ejemplo n.º 17
0
def eigen(mtx: np.array, eps=1e-3):
    """
    функція знаходження власного числа і вектора методом скалярних добутків
    """
    transp = mtx.transpose()
    y = np.zeros(mtx.shape[0]) + Y_0
    z = np.zeros(mtx.shape[0]) + Y_0
    eigenvalue = 0
    for j in range(ITERATION_LIMIT):
        if y.shape[0] == 1:
            y = y.transpose()
        if z.shape[0] == 1:
            z = z.transpose()
        next_y = np.array(mtx.dot(y))
        next_z = np.array(transp.dot(z))

        tmp1 = np.sum(next_y * next_z)
        tmp2 = np.sum(y * next_z)

        tmp_res = tmp1 / tmp2
        if j == 0:
            eigenvalue = tmp_res
        elif abs(eigenvalue - tmp_res) < eps:
            break
        else:
            eigenvalue = tmp_res
        y = next_y
        z = next_z

    eigenvector = y / norma(y)
    return eigenvalue, eigenvector
Ejemplo n.º 18
0
def neg_return_with_risk_penalty(weights: np.array, covariance_as_np: np.array,
                                 mus: np.array, risk_aversion: float):
    estreturn = weights.dot(mus)

    risk_penalty = risk_aversion * variance(weights, covariance_as_np) / 2.0

    return -(estreturn - risk_penalty)
Ejemplo n.º 19
0
    def softmax_loss(self, X: np.array, y: np.array, reg: float = 1e-2):
        """
        Computes cross entropy loss and gradients on weights
        :param X: train data of shape (batch_size, num_dim)
        :param y: labels of shape (batch_size)
        :param reg: regularization
        :return: loss and gradients for weights
        """
        f = X.dot(self.W)

        # L = -log(e^y_i / sum(e^y_j))
        # x_i correct label

        exp = np.exp(f - np.max(f, axis=1).reshape(-1, 1))
        soft_scores = exp / np.sum(exp, axis=1)
        losses = soft_scores[np.arange(len(soft_scores)), y]
        loss = np.mean(-np.log(losses))
        loss += reg * 0.5 * np.sum(self.W**2)

        soft_scores[np.arange(len(soft_scores)), y] -= 1
        grads = X.T.dot(soft_scores)
        grads /= len(grads)
        grads += reg * self.W

        return loss, grads
Ejemplo n.º 20
0
def _rate_eq_pulse(t: np.array, y: np.array, abs_matrix: np.array, decay_matrix: np.array,
                   UC_matrix: np.array, N_indices: np.array,
                   coop_ET_matrix: np.array, coop_N_indices: np.array) -> np.array:
    ''' Calculates the rhs of the ODE for the excitation pulse
    '''
    return abs_matrix.dot(y) + _rate_eq(t, y, decay_matrix, UC_matrix, N_indices,
                                        coop_ET_matrix, coop_N_indices)
Ejemplo n.º 21
0
 def dhinge_loss(self, X: np.array, y: np.array, W: np.array) -> np.array:
     preds = X.dot(W)
     dHL = np.zeros(np.shape(preds))
     ty = y * preds
     dHL[ty < 1] = -y[ty < 1]
     dHL = self.C * (1/self.n_samples) * X.T.dot(dHL)
     return dHL
Ejemplo n.º 22
0
    def svm_loss(self,
                 X: np.array,
                 y: np.array,
                 delta: float = 1.0,
                 reg: float = 1e-2):
        """
        Computes svm loss and gradients on weights
        :param X: train data of shape (batch_size, num_dim)
        :param y: labels of shape (batch_size)
        :param delta: delta for svm loss
        :param reg: regularization
        :return: loss and gradients for weights
        """
        f = X.dot(self.W)

        # L = sum_j(max(0 ,y_j - y_i + delta))
        # y_i correct class
        # y_j all other classes

        # dL/dy_i = -sum_j( 1(y_j - y_i + delta > 0))
        # dL/dy_j = 1(y_j - y_i + delta > 0)

        diffs = f - f[np.arange(len(f)), y].reshape(-1, 1)
        diffs[diffs < delta] = 0
        diffs[np.arange(len(f)), y] = 0
        loss = np.mean(np.sum(diffs, axis=1))
        loss += reg * 0.5 * np.sum(self.W**2)

        diffs[diffs > 0] = 1
        row_sum = np.sum(diffs, axis=1)
        diffs[np.arange(len(diffs)), y] -= row_sum.T
        grads = X.T.dot(diffs)
        grads += reg * self.W

        return loss, grads
Ejemplo n.º 23
0
    def log_histogram(self, tag: str, data: np.array, step: int, num_bars: int = 30):
        """
        Adds a histogram to log.

        Parameters
        ----------
        tag: str
        data: np.array
            Array of any shape.
        step: int
        num_bars: int
            The number of bars if the resulting histogram.
        """
        data = data.ravel()
        min_ = data.min()
        max_ = data.max()
        sum_ = data.sum()
        sum_sq = data.dot(data)
        if min_ == max_:
            num = 1
            bucket_limit = [min_]
            bucket = [len(data)]
        else:
            bucket, bucket_limit = np.histogram(data, num_bars)
            num = len(bucket_limit)
            bucket_limit = bucket_limit[1:]

        hist = HistogramProto(min=min_, max=max_, sum=sum_, sum_squares=sum_sq, num=num,
                              bucket_limit=bucket_limit, bucket=bucket)
        self._write_event(tag, step, histo=hist)
Ejemplo n.º 24
0
def quat_from_two_vectors(v0: np.array, v1: np.array) -> np.quaternion:
    r"""Creates a quaternion that rotates the frist vector onto the second vector

    v1 = (q * np.quaternion(0, *v0) * q.inverse()).imag

    Args:
        v0 (np.array): The starting vector, does not need to be a unit vector
        v1 (np.array): The end vector, does not need to be a unit vector

    Returns:
        np.quaternion: The quaternion
    """

    v0 = v0 / np.linalg.norm(v0)
    v1 = v1 / np.linalg.norm(v1)
    c = v0.dot(v1)
    if c < (-1 + 1e-8):
        c = max(c, -1)
        m = np.stack([v0, v1], 0)
        _, _, vh = np.linalg.svd(m, full_matrices=True)
        axis = vh[2]
        w2 = (1 + c) * 0.5
        w = np.sqrt(w2)
        axis = axis * np.sqrt(1 - w2)
        return np.quaternion(w, *axis)

    axis = np.cross(v0, v1)
    s = np.sqrt((1 + c) * 2)
    return np.quaternion(s * 0.5, *(axis / s))
Ejemplo n.º 25
0
def resolutionSGD(X: np.array, Y, eps, nIteration):
    """descente de gradient stochastique"""
    D = np.zeros(X.shape[1])  # init à 0
    for _ in range(nIteration):
        i = random.randint(0,X.shape[1]-1)
        gradi = 2*X.T[i].dot(X.dot(D) - Y)
        D[i] -= gradi * eps
    return np.array([D])
Ejemplo n.º 26
0
def _jac_rate_eq(t: np.array, y: np.array, decay_matrix: np.array,
                 UC_matrix: np.array, jac_indices: np.array,
                 coop_ET_matrix: np.array, coop_jac_indices: np.array) -> np.array:
    ''' Calculates the jacobian of the ODE for the relaxation
    '''
    y_values = y[jac_indices[:, 2]]
    nJ_matrix = csr_matrix((y_values, (jac_indices[:, 0], jac_indices[:, 1])),
                           shape=(UC_matrix.shape[1], UC_matrix.shape[0]), dtype=np.float64)
    UC_J_matrix = UC_matrix.dot(nJ_matrix).toarray()

    y_coop_values = y[coop_jac_indices[:, 2]]*y[coop_jac_indices[:, 3]]
    nJ_coop_matrix = csr_matrix((y_coop_values, (coop_jac_indices[:, 0], coop_jac_indices[:, 1])),
                                shape=(coop_ET_matrix.shape[1], coop_ET_matrix.shape[0]),
                                dtype=np.float64)
    UC_J_coop_matrix = coop_ET_matrix.dot(nJ_coop_matrix).toarray()

    return decay_matrix.toarray() + UC_J_matrix + UC_J_coop_matrix
Ejemplo n.º 27
0
 def forward(self, X: np.array) -> np.array:
     """
     Compute forward pass and remember results for the backward pass
     :param X: input data of size (num_data_points, dimension)
     :return: np.array with layer output
     """
     self.last_X = X.copy()
     return X.dot(self.W) + self.b
Ejemplo n.º 28
0
def compute_delta(phi: np.array, beta, gamma):
    column = np.asarray([beta, gamma, 0])
    column.shape = (3, 1)

    delta = phi.dot(column)

    #delta is a 3x1 matrix
    return delta
Ejemplo n.º 29
0
def nachiteration(a: array, b: array, lu: array, x: array, n: int) -> array:
    x_k = copy(x)
    for k in range(n):
        r = b - a.dot(x_k)
        p = rueckwaerts(lu, vorwaerts(lu, r))
        x_k += p

    return x_k
Ejemplo n.º 30
0
def resolutionDescenteGradient(X: np.array, Y, eps, nIteration):
    D = np.zeros(X.shape[1])  # init à 0
    listD = [D]
    for _ in range(nIteration):
        grad = 2 * X.T.dot(X.dot(D) - Y)
        D = D.copy() - grad * eps
        listD.append(D)
    
    return np.array(listD)
Ejemplo n.º 31
0
def rayleigh_quotient(matrix: scipy.sparse.csr_matrix,
                      vector: np.array) -> float:
    """Compute the Rayleigh quotient of a matrix and a vector.

    """
    return matrix.dot(vector).T.dot(vector) / vector.dot(vector)