Exemplo n.º 1
0
def sci(z, n_class_atoms, reg='l0'):
    """
    the sparsity concentration index of a coefficient vector z
    for a given regularizer
    """

    n_classes = len(n_class_atoms)
    if reg == 'l0':
        norm_const = z.nonzero()[0].size
        nom = np.max([z[get_class_atoms(c, n_class_atoms)].nonzero()[0].size for c in range(n_classes)])
    elif reg == 'l1':
        nom = np.max([np.sum(np.abs(z[get_class_atoms(c, n_class_atoms)])) for c in range(n_classes)])
        norm_const = np.sum(np.abs(z))

    sci_index = (n_classes * (nom / norm_const) - 1) / float(n_classes - 1)
    return sci_index
Exemplo n.º 2
0
def local_sparse_predict(X, D, sparse_coder, n_class_atoms):
    n_samples = X.shape[1]
    n_classes = len(n_class_atoms)
    n_total_atoms = np.sum(n_class_atoms)
    # sparsely encode each datapoint over each class specific dictionary
    Z = np.zeros((n_total_atoms, n_samples))
    for c in range(n_classes):
        c_idx = get_class_atoms(c, n_class_atoms)
        Zc = sparse_coder(X, D[:, c_idx])
        Z[c_idx, :] = Zc

    predictions = []
    for i in xrange(n_samples):

        sp = np.zeros((n_classes)).astype(int)
        for c in range(n_classes):
            c_idx = get_class_atoms(c, D, n_class_atoms)
            sp[c] = Z[c_idx, i].nonzero()[0].size

        pred = np.argmin(sp)
        predictions.append(pred)

    return predictions
Exemplo n.º 3
0
def local_error(X, D, n_class_atoms, sparse_coder):
    """
    computes the approximation error of the dataset to
    each class-specific dictionary. Each datapoint is first encoded over the
    each dictionary seperately.
    """
    n_samples = X.shape[1]
    n_classes = len(n_class_atoms)
    E = np.zeros((n_classes, n_samples))
    for c in range(n_classes):
        c_idx = get_class_atoms(c, n_class_atoms)
        Dc = D[:, c_idx]
        Zc = sparse_coder(X, Dc)
        E[c, :] = approx_error(Dc, Zc, X)
    return E
Exemplo n.º 4
0
def global_sparse_predict(X, D, sparse_coder, n_class_atoms):
    """
    works in the same way as 'global_error_predict' but
    we classify according to the sparsest solution
    """

    Z = sparse_coder(X, D)
    n_samples = X.shape[1]
    n_classes = len(n_class_atoms)
    predictions = []

    for i in xrange(n_samples):
        sp = np.zeros((n_classes)).astype(int)

        for c in range(n_classes):
            c_idx = get_class_atoms(c, n_class_atoms)
            sp[c] = Z[c_idx, i].nonzero()[0].size

        pred = np.argmin(sp)
        predictions.append(pred)

    return predictions
Exemplo n.º 5
0
def global_error(X, D, sparse_coder, n_class_atoms, n_jobs=1):
    """
    computes the approximation error of the dataset to
    each class-specific dictionary. The dataset is first encoded over the
    joint dictionary.
    """

    Z = sparse_coder(X, D)
    n_samples = X.shape[1]
    n_classes = len(n_class_atoms)
    E = np.zeros((n_classes, n_samples))

    if n_jobs > 1:
        set_openblas_threads(n_jobs)

    for c in range(n_classes):
        c_idx = get_class_atoms(c, n_class_atoms)
        E[c, :] = np.sum(np.power(fast_dot(D[:, c_idx], Z[c_idx, :]) - X, 2), axis=0)

    if n_jobs > 1:
        set_openblas_threads(1)

    return E
Exemplo n.º 6
0
 def test_get_class_atoms(self):
     n_class_atoms = [2, 3, 5]
     cl = 1
     self.assertEqual(get_class_atoms(cl, n_class_atoms=n_class_atoms), [2, 3, 4])
Exemplo n.º 7
0
 def test_get_class_atoms(self):
     n_class_atoms = [2, 3, 5]
     cl = 1
     self.assertEqual(get_class_atoms(cl, n_class_atoms=n_class_atoms),
                      [2, 3, 4])