Exemplo n.º 1
0
def test_update_b_L_C_weighted_equals_compute_b_and_compute_L_C_constant_weights(
):
    N = 2
    D = 2
    m = 2
    omega = np.random.randn(D, m)
    u = np.random.uniform(0, 2 * np.pi, m)
    X1 = np.random.randn(N, D)
    X2 = np.random.randn(N, D)
    log_weights1 = np.log(np.ones(N))
    log_weights2 = np.log(np.ones(N))

    stacked = np.vstack((X1, X2))
    b = compute_b(stacked, omega, u)
    L_C = np.linalg.cholesky(compute_C(stacked, omega, u))

    b_updated = compute_b(X1, omega, u)
    L_C_updated = np.linalg.cholesky(compute_C(X1, omega, u))
    log_sum_weights1 = log_sum_exp(log_weights1)

    b_updated, L_C_updated = update_b_L_C_weighted(X2, b_updated, L_C_updated,
                                                   log_sum_weights1,
                                                   log_weights2, omega, u)

    assert_allclose(b, b_updated)
    assert_allclose(L_C, L_C_updated)
Exemplo n.º 2
0
    def update_fit(self, X, log_weights=None):
        assert_array_shape(X, ndim=2, dims={1: self.D})
        N = len(X)

        # dont do anything if no data observed
        if N == 0:
            return

        if log_weights is None:
            log_weights = np.log(np.ones(N))
        assert_array_shape(log_weights, ndim=1, dims={0: N})

        # first update: use first of X and log_weights, and then discard
        if self.log_sum_weights is None:
            # assume have observed fake terms, which is needed for making the system well-posed
            # the L_C says that the fake terms had covariance self.lmbda, which is a regulariser
            self.L_C = np.eye(self.m) * np.sqrt(self.lmbda)
            self.log_sum_weights = log_weights[0]
            self.b = compute_b(X[0].reshape(1, self.D), self.omega, self.u)
            self.n = 1

            X = X[1:]
            log_weights = log_weights[1:]
            N -= 1

        # dont do anything if no data observed
        if N == 0:
            return

        old_L_C = np.array(self.L_C, copy=True)
        self.b, self.L_C = update_b_L_C_weighted(X, self.b, self.L_C,
                                                 self.log_sum_weights,
                                                 log_weights, self.omega,
                                                 self.u)

        if np.any(np.isnan(self.L_C)) or np.any(np.isinf(self.L_C)):
            logger.warning(
                "Numerical error while updating Cholesky factor of C.\n"
                "Before update:\n%s\n"
                "After update:\n%s\n"
                "Updating data:\n%s\n"
                "Updating log weights:\n%s\n" %
                (str(old_L_C), str(self.L_C), str(X), str(log_weights)))
            raise RuntimeError(
                "Numerical error while updating Cholesky factor of C.")

        # update terms and weights
        self.n += len(X)
        self.log_sum_weights = log_sum_exp(
            list(log_weights) + [self.log_sum_weights])

        # finally update solution
        self.theta = fit_L_C_precomputed(self.b, self.L_C)
Exemplo n.º 3
0
 def update_fit(self, X, log_weights=None):
     assert_array_shape(X, ndim=2, dims={1: self.D})
     N = len(X)
     
     # dont do anything if no data observed
     if N == 0:
         return
     
     if log_weights is None:
         log_weights = np.log(np.ones(N))
     assert_array_shape(log_weights, ndim=1, dims={0: N})
     
     # first update: use first of X and log_weights, and then discard
     if self.log_sum_weights is None:
         # assume have observed fake terms, which is needed for making the system well-posed
         # the L_C says that the fake terms had covariance self.lmbda, which is a regulariser
         self.L_C = np.eye(self.m) * np.sqrt(self.lmbda)
         self.log_sum_weights = log_weights[0]
         self.b = compute_b(X[0].reshape(1, self.D), self.omega, self.u)
         self.n = 1
         
         X = X[1:]
         log_weights = log_weights[1:]
         N -= 1
         
     # dont do anything if no data observed
     if N == 0:
         return
     
     old_L_C = np.array(self.L_C, copy=True)
     self.b, self.L_C = update_b_L_C_weighted(X, self.b, self.L_C,
                                              self.log_sum_weights,
                                              log_weights,
                                              self.omega, self.u)
     
     if np.any(np.isnan(self.L_C)) or np.any(np.isinf(self.L_C)):
         logger.warning("Numerical error while updating Cholesky factor of C.\n"
                        "Before update:\n%s\n"
                        "After update:\n%s\n"
                        "Updating data:\n%s\n"
                        "Updating log weights:\n%s\n"
                        % (str(old_L_C), str(self.L_C), str(X), str(log_weights))
                        )
         raise RuntimeError("Numerical error while updating Cholesky factor of C.")
     
     # update terms and weights
     self.n += len(X)
     self.log_sum_weights = log_sum_exp(list(log_weights) + [self.log_sum_weights])
     
     # finally update solution
     self.theta = fit_L_C_precomputed(self.b, self.L_C)
Exemplo n.º 4
0
def log_weights_to_lmbdas(log_sum_old_weights, log_new_weights, boundary_check_min_number=1e-5):
    N = len(log_new_weights)
    lmbdas = np.zeros(N)
    
    for i, log_new_weight in enumerate(log_new_weights):
        log_sum_old_weights = log_sum_exp([log_sum_old_weights, log_new_weight])
        log_lmbda = log_new_weight - log_sum_old_weights
        lmbdas[i] = np.exp(log_lmbda)
    
    # numerical checks for lambdas. Must be in (0,1)
    lmbdas[lmbdas < boundary_check_min_number] = boundary_check_min_number
    lmbdas[(1 - lmbdas) < boundary_check_min_number] = 1 - boundary_check_min_number
    
    return lmbdas
def log_weights_to_lmbdas(log_sum_old_weights,
                          log_new_weights,
                          boundary_check_min_number=1e-5):
    N = len(log_new_weights)
    lmbdas = np.zeros(N)

    for i, log_new_weight in enumerate(log_new_weights):
        log_sum_old_weights = log_sum_exp(
            [log_sum_old_weights, log_new_weight])
        log_lmbda = log_new_weight - log_sum_old_weights
        lmbdas[i] = np.exp(log_lmbda)

    # numerical checks for lambdas. Must be in (0,1)
    lmbdas[lmbdas < boundary_check_min_number] = boundary_check_min_number
    lmbdas[(1 - lmbdas
            ) < boundary_check_min_number] = 1 - boundary_check_min_number

    return lmbdas
Exemplo n.º 6
0
 def test_log_sum_exp(self):
     X = np.abs(np.random.randn(100))
     direct = np.log(np.sum(np.exp(X)))
     indirect = log_sum_exp(X)
     assert_allclose(direct, indirect)
Exemplo n.º 7
0
 def test_log_sum_exp(self):
     X = np.abs(np.random.randn(100))
     direct = np.log(np.sum(np.exp(X)))
     indirect = log_sum_exp(X)
     assert_allclose(direct, indirect)