Exemplo n.º 1
0
def test_toy_ex_lmnn(X, y, loss):
    """Test that the loss give the right result on a toy example"""
    L = np.array([[1]])
    lmnn = LMNN(k=1, regularization=0.5)

    k = lmnn.k
    reg = lmnn.regularization

    X, y = lmnn._prepare_inputs(X, y, dtype=float, ensure_min_samples=2)
    num_pts, n_components = X.shape
    unique_labels, label_inds = np.unique(y, return_inverse=True)
    lmnn.labels_ = np.arange(len(unique_labels))
    lmnn.components_ = np.eye(n_components)

    target_neighbors = lmnn._select_targets(X, label_inds)

    # sum outer products
    dfG = _sum_outer_products(X, target_neighbors.flatten(),
                              np.repeat(np.arange(X.shape[0]), k))

    # storage
    a1 = [None] * k
    a2 = [None] * k
    for nn_idx in range(k):
        a1[nn_idx] = np.array([])
        a2[nn_idx] = np.array([])

    #  assert that the loss equals the one computed by hand
    assert lmnn._loss_grad(X, L.reshape(-1, X.shape[1]), dfG, k, reg,
                           target_neighbors, label_inds)[1] == loss
Exemplo n.º 2
0
  def test_loss_grad_lbfgs(self):
    """Test gradient of loss function
    Assert that the gradient is almost equal to its finite differences
    approximation.
    """
    rng = np.random.RandomState(42)
    X, y = make_classification(random_state=rng)
    L = rng.randn(rng.randint(1, X.shape[1] + 1), X.shape[1])
    lmnn = LMNN()

    k = lmnn.k
    reg = lmnn.regularization

    X, y = lmnn._prepare_inputs(X, y, dtype=float,
                                ensure_min_samples=2)
    num_pts, n_components = X.shape
    unique_labels, label_inds = np.unique(y, return_inverse=True)
    lmnn.labels_ = np.arange(len(unique_labels))
    lmnn.components_ = np.eye(n_components)

    target_neighbors = lmnn._select_targets(X, label_inds)

    # sum outer products
    dfG = _sum_outer_products(X, target_neighbors.flatten(),
                              np.repeat(np.arange(X.shape[0]), k))

    # initialize L
    def loss_grad(flat_L):
      return lmnn._loss_grad(X, flat_L.reshape(-1, X.shape[1]), dfG,
                             k, reg, target_neighbors, label_inds)

    def fun(x):
      return loss_grad(x)[1]

    def grad(x):
      return loss_grad(x)[0].ravel()

    # compute relative error
    epsilon = np.sqrt(np.finfo(float).eps)
    rel_diff = (check_grad(fun, grad, L.ravel()) /
                np.linalg.norm(approx_fprime(L.ravel(), fun, epsilon)))
    np.testing.assert_almost_equal(rel_diff, 0., decimal=5)