Ejemplo n.º 1
0
def test_sklearn_mcr_semilearned_both_c_st():
    """
    Test the special case when C & ST are provided, requiring C-fix ST-fix to
    be provided
    """

    M = 21
    N = 21
    P = 101
    n_components = 3

    C_img = np.zeros((M, N, n_components))
    C_img[..., 0] = np.dot(np.ones((M, 1)), np.linspace(0.1, 1, N)[None, :])
    C_img[..., 1] = np.dot(np.linspace(0.1, 1, M)[:, None], np.ones((1, N)))
    C_img[..., 2] = 1 - C_img[..., 0] - C_img[..., 1]
    C_img = C_img / C_img.sum(axis=-1)[:, :, None]

    St_known = np.zeros((n_components, P))
    St_known[0, 30:50] = 1
    St_known[1, 50:70] = 2
    St_known[2, 70:90] = 3
    St_known += 1

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, St_known)

    C_guess = 1 * C_known
    C_guess[:, 2] = np.abs(np.random.randn(int(M * N)))

    mcrar = McrAR(max_iter=50,
                  tol_increase=100,
                  tol_n_increase=10,
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_err_change=1e-10,
                  fit_kwargs={
                      'C': C_guess,
                      'ST': St_known,
                      'c_fix': [0, 1],
                      'st_fix': [0]
                  })

    mcrar.fit(D_known, c_first=True)
    assert_equal(mcrar.C_[:, 0], C_known[:, 0])
    assert_equal(mcrar.C_[:, 1], C_known[:, 1])
    assert_equal(mcrar.ST_[0, :], St_known[0, :])

    # ST-solve first
    mcrar.fit(D_known,
              C=C_guess,
              ST=St_known,
              c_fix=[0, 1],
              st_fix=[0],
              c_first=False)
    assert_equal(mcrar.C_[:, 0], C_known[:, 0])
    assert_equal(mcrar.C_[:, 1], C_known[:, 1])
    assert_equal(mcrar.ST_[0, :], St_known[0, :])
Ejemplo n.º 2
0
def test_mcr_tol_err_change(dataset):
    """ Test MCR exits due error increasing by a value """

    C_known, D_known, St_known = dataset

    mcrar = McrAR(max_iter=50,
                  c_regr='OLS',
                  st_regr='OLS',
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_increase=None,
                  tol_n_increase=None,
                  tol_err_change=1e-20,
                  tol_n_above_min=None)
    mcrar.fit(D_known, C=C_known)
    assert mcrar.exit_tol_err_change
Ejemplo n.º 3
0
def test_sklearn_mcr_c_semilearned():
    """ Test when C items are fixed, i.e., enforced to be the same as the input, always """

    M = 21
    N = 21
    P = 101
    n_components = 3

    C_img = np.zeros((M, N, n_components))
    C_img[..., 0] = np.dot(np.ones((M, 1)), np.linspace(0, 1, N)[None, :])
    C_img[..., 1] = np.dot(np.linspace(0, 1, M)[:, None], np.ones((1, N)))
    C_img[..., 2] = 1 - C_img[..., 0] - C_img[..., 1]
    C_img = C_img / C_img.sum(axis=-1)[:, :, None]

    St_known = np.zeros((n_components, P))
    St_known[0, 30:50] = 1
    St_known[1, 50:70] = 2
    St_known[2, 70:90] = 3
    St_known += 1

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, St_known)

    C_guess = 1 * C_known
    C_guess[:, 2] = np.abs(np.random.randn(int(M * N)) + 0.1)

    mcrar = McrAR(max_iter=50,
                  tol_increase=100,
                  tol_n_increase=10,
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_err_change=1e-10,
                  fit_kwargs={
                      'C': C_guess,
                      'c_fix': [0, 1]
                  })

    mcrar.fit(D_known)
    assert_equal(mcrar.C_[:, 0], C_known[:, 0])
    assert_equal(mcrar.C_[:, 1], C_known[:, 1])
Ejemplo n.º 4
0
def test_mcr_max_iterations(dataset):
    """ Test MCR exits at max_iter"""

    C_known, D_known, St_known = dataset

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad
    # error. The tol_err_change is set to None, so it makes it to max_iter.
    mcrar = McrAR(max_iter=50,
                  c_regr='OLS',
                  st_regr='OLS',
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_increase=None,
                  tol_n_increase=None,
                  tol_err_change=None,
                  tol_n_above_min=None)
    mcrar.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrar.exit_max_iter_reached
Ejemplo n.º 5
0
def test_mcr_tol_increase(dataset):
    """ Test MCR exits due error increasing above a tolerance fraction"""

    C_known, D_known, St_known = dataset

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad
    # error.
    mcrar = McrAR(max_iter=50,
                  c_regr='OLS',
                  st_regr='OLS',
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_increase=0,
                  tol_n_increase=None,
                  tol_err_change=None,
                  tol_n_above_min=None)
    mcrar.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrar.exit_tol_increase
Ejemplo n.º 6
0
def test_mcr_tol_n_above_min(dataset):
    """
    Test MCR exits due to half-terating n times with error above the minimum error.

    Note: On some CI systems, the minimum err bottoms out; thus, tol_n_above_min
    needed to be set to 0 to trigger a break.
    """

    C_known, D_known, St_known = dataset

    mcrar = McrAR(max_iter=50,
                  c_regr='OLS',
                  st_regr='OLS',
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_increase=None,
                  tol_n_increase=None,
                  tol_err_change=None,
                  tol_n_above_min=0)
    mcrar.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrar.exit_tol_n_above_min
Ejemplo n.º 7
0
def test_mcr_st_semilearned():
    """ Test when St items are fixed, i.e., enforced to be the same as the input, always """

    M = 21
    N = 21
    P = 101
    n_components = 3

    C_img = np.zeros((M, N, n_components))
    C_img[..., 0] = np.dot(np.ones((M, 1)), np.linspace(0, 1, N)[None, :])
    C_img[..., 1] = np.dot(np.linspace(0, 1, M)[:, None], np.ones((1, N)))
    C_img[..., 2] = 1 - C_img[..., 0] - C_img[..., 1]
    C_img = C_img / C_img.sum(axis=-1)[:, :, None]

    St_known = np.zeros((n_components, P))
    St_known[0, 30:50] = 1
    St_known[1, 50:70] = 2
    St_known[2, 70:90] = 3
    St_known += 1

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, St_known)

    ST_guess = 1 * St_known
    ST_guess[2, :] = np.random.randn(P)

    mcrar = McrAR(max_iter=50,
                  tol_increase=100,
                  tol_n_increase=10,
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_err_change=1e-10)

    mcrar.fit(D_known, ST=ST_guess, st_fix=[0, 1])
    assert_equal(mcrar.ST_[0, :], St_known[0, :])
    assert_equal(mcrar.ST_[1, :], St_known[1, :])
Ejemplo n.º 8
0
def test_sklearn_mcr_tol_n_increase(dataset):
    """
    Test MCR exits due iterating n times with an increase in error

    Note: On some CI systems, the minimum err bottoms out; thus, tol_n_above_min
    needed to be set to 0 to trigger a break.
    """

    C_known, D_known, St_known = dataset

    mcrar = McrAR(max_iter=50,
                  c_regr='OLS',
                  st_regr='OLS',
                  st_constraints=[ConstraintNonneg()],
                  c_constraints=[ConstraintNonneg(),
                                 ConstraintNorm()],
                  tol_increase=None,
                  tol_n_increase=0,
                  tol_err_change=None,
                  tol_n_above_min=None,
                  fit_kwargs={'C': C_known * 0 + 0.1})
    mcrar.fit(D_known)
    assert mcrar.exit_tol_n_increase
Ejemplo n.º 9
0
Archivo: mcr.py Proyecto: rkern/pyMCR
if __name__ == '__main__':  # pragma: no cover

    M = 21
    N = 21
    P = 101
    n_components = 2

    C_img = _np.zeros((M, N, n_components))
    C_img[..., 0] = _np.dot(_np.ones((M, 1)), _np.linspace(0, 1, N)[None, :])
    C_img[..., 1] = 1 - C_img[..., 0]

    ST_known = _np.zeros((n_components, P))
    ST_known[0, 40:60] = 1
    ST_known[1, 60:80] = 2

    C_known = C_img.reshape((-1, n_components))

    D_known = _np.dot(C_known, ST_known)

    mcrals = McrAls(max_iter=50,
                    tol_increase=100,
                    tol_n_increase=10,
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_err_change=1e-30)
    mcrals._saveall_st = True
    mcrals._saveall_c = True
    # mcrals.fit(D_known, ST=ST_known+1*_np.random.randn(*ST_known.shape), verbose=True)
    mcrals.fit(D_known, C=C_known * 0 + 1e-1, verbose=True)
Ejemplo n.º 10
0
def test_norm():

    # A must be dtype.float for in-place math (copy=False)
    constr_norm = ConstraintNorm(axis=0, copy=False)
    A = np.array([[1, 2, 3], [-1, -2, -3], [1, 2, 3]])  # dtype: int32
    with pytest.raises(TypeError):
        out = constr_norm.transform(A)

    # Axis must be 0,1, or -1
    with pytest.raises(ValueError):
        constr_norm = ConstraintNorm(axis=2, copy=False)

    A = np.array([[1, 2, 3], [-1, -2, -3], [1, 2, 3]], dtype=np.float)
    A_norm0 = A / A.sum(axis=0)[None, :]
    A_norm1 = A / A.sum(axis=1)[:, None]

    constr_norm = ConstraintNorm(axis=0, copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_norm0, out)

    constr_norm = ConstraintNorm(axis=1, copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_norm1, out)

    constr_norm = ConstraintNorm(axis=-1, copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_norm1, out)

    constr_norm = ConstraintNorm(axis=0, copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_norm0, A)

    A = np.array([[1, 2, 3], [-1, -2, -3], [1, 2, 3]], dtype=np.float)
    constr_norm = ConstraintNorm(axis=1, copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_norm1, A)
Ejemplo n.º 11
0
def test_mcr():
    M = 21
    N = 21
    P = 101
    n_components = 2

    C_img = np.zeros((M,N,n_components))
    C_img[...,0] = np.dot(np.ones((M,1)),np.linspace(0,1,N)[None,:])
    C_img[...,1] = 1 - C_img[...,0]

    ST_known = np.zeros((n_components, P))
    ST_known[0,40:60] = 1
    ST_known[1,60:80] = 2

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, ST_known)

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10, 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals._saveall_st = False
    mcrals._saveall_c = False
    mcrals.fit(D_known, ST=ST_known)

    assert_equal(1, mcrals.n_iter_opt)

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, ST=ST_known)
    assert_equal(1, mcrals.n_iter_opt)
    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='NNLS', st_regr='NNLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, ST=ST_known)
    assert_equal(1, mcrals.n_iter_opt)

    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, C=C_known)

    # Turns out some systems get it in 1 iteration, some in 2
    # assert_equal(1, mcrals.n_iter_opt)
    assert_equal(True, mcrals.n_iter_opt<=2)

    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad 
    # error. This is not really a test, but it does test out breaking
    # from tol_err_change
    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, C=C_known*0 + 0.1)

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad 
    # error. This is not really a test, but it does test out breaking
    # from tol_err_change
    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=None)
    mcrals.fit(D_known, C=C_known*0 + 0.1)
    assert_equal(mcrals.n_iter, 50)
Ejemplo n.º 12
0
def test_norm_fixed_axes():
    # AXIS = 1
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float)
    A_fix2_ax1 = np.array(
        [[0.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.0, 0.0], [0.1, 0.3, 0.6, 0.0]],
        dtype=np.float)

    # Fixed axes must be integers
    with pytest.raises(TypeError):
        constr_norm = ConstraintNorm(axis=1, fix=2.2, copy=True)

    # Dtype must be integer related
    with pytest.raises(TypeError):
        constr_norm = ConstraintNorm(axis=1, fix=np.array([2.2]), copy=True)

    # COPY: True
    # Fix of type int
    constr_norm = ConstraintNorm(axis=1, fix=2, copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type list
    constr_norm = ConstraintNorm(axis=1, fix=[2, 3], copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type tuple
    constr_norm = ConstraintNorm(axis=1, fix=(2), copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type ndarray
    constr_norm = ConstraintNorm(axis=1, fix=np.array([2]), copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # COPY: False
    A_fix2_ax1 = np.array(
        [[0.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.0, 0.0], [0.1, 0.3, 0.6, 0.0]],
        dtype=np.float)

    # Fix of type int
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float)
    constr_norm = ConstraintNorm(axis=1, fix=2, copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type list
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float)
    constr_norm = ConstraintNorm(axis=1, fix=[2, 3], copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type tuple
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float)
    constr_norm = ConstraintNorm(axis=1, fix=(2), copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type ndarray
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float)
    constr_norm = ConstraintNorm(axis=1, fix=np.array([2]), copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # AXIS = 0
    # Lazy, so just transposed
    A = np.array(
        [[0.0, 0.2, 1.0, 0.0], [0.25, 0.25, 0.0, 0.0], [0.3, 0.9, 0.6, 0.0]],
        dtype=np.float).T
    A_fix2_ax1 = np.array(
        [[0.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.0, 0.0], [0.1, 0.3, 0.6, 0.0]],
        dtype=np.float).T
    # COPY: True
    # Fix of type int
    constr_norm = ConstraintNorm(axis=0, fix=2, copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type list
    constr_norm = ConstraintNorm(axis=0, fix=[2, 3], copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type tuple
    constr_norm = ConstraintNorm(axis=0, fix=(2), copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # Fix of type ndarray
    constr_norm = ConstraintNorm(axis=0, fix=np.array([2]), copy=True)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, out)

    # COPY: False
    A_fix2_ax1 = np.array([[0.0, 0.0, 1.0], [0.5, 0.5, 0.0], [0.1, 0.3, 0.6]],
                          dtype=np.float).T

    # Fix of type int
    A = np.array([[0.0, 0.2, 1.0], [0.25, 0.25, 0.0], [0.3, 0.9, 0.6]],
                 dtype=np.float).T
    constr_norm = ConstraintNorm(axis=0, fix=2, copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type list
    A = np.array([[0.0, 0.2, 1.0], [0.25, 0.25, 0.0], [0.3, 0.9, 0.6]],
                 dtype=np.float).T
    constr_norm = ConstraintNorm(axis=0, fix=[2], copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type tuple
    A = np.array([[0.0, 0.2, 1.0], [0.25, 0.25, 0.0], [0.3, 0.9, 0.6]],
                 dtype=np.float).T
    constr_norm = ConstraintNorm(axis=0, fix=(2), copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)

    # Fix of type ndarray
    A = np.array([[0.0, 0.2, 1.0], [0.25, 0.25, 0.0], [0.3, 0.9, 0.6]],
                 dtype=np.float).T
    constr_norm = ConstraintNorm(axis=0, fix=np.array([2]), copy=False)
    out = constr_norm.transform(A)
    assert_allclose(A_fix2_ax1, A)
Ejemplo n.º 13
0
#Allowed Noise Percentage
noise = 5	
#manual
manual = False

D = np.asarray((pd.read_csv('Total_MCR_CuSSZ13.dat', sep='\t', header=None)).values)

if manual == False:
	#Run SVD
	eigens, explained_variance_ratio = svd.svd(D, nSVD)
	nPure = np.int(input('Number of Principle Components :'))
	#Run Simplisma
	S, C_u, C_c = simplisma.pure(D.T, nPure, noise, True)
else:
	S = np.asarray((pd.read_csv('sopt_5c2.dat', sep='\t', header=None)).values).T
	

#Run MCR
mcrals = McrAls(max_iter=50, st_regr='NNLS', c_regr='NNLS', 
                c_constraints=[ConstraintNonneg(), ConstraintNorm()])

mcrals.fit(D, ST=S.T, verbose=True)
print('\nFinal MSE: {:.7e}'.format(mcrals.err[-1]))

plt.subplot(2, 1, 1)
plt.plot(mcrals.ST_opt_.T)

plt.subplot(2, 1, 2)
plt.plot(mcrals.C_opt_)

plt.show()