Пример #1
0
def test_props_features_samples_targets(dataset):
    """ Test mcrals properties for features, targets, samples """
    C_known, D_known, St_known = dataset

    mcrals = McrAls()
    mcrals.fit(D_known, ST=St_known)

    assert mcrals.n_targets == C_known.shape[-1]  # n_components
    assert mcrals.n_samples == D_known.shape[0]
    assert mcrals.n_features == D_known.shape[-1]
Пример #2
0
def test_mcr_tol_err_change(dataset):
    """ Test MCR exits due error increasing by a value """

    C_known, D_known, St_known = dataset

    mcrals = McrAls(max_iter=50,
                    c_regr='OLS',
                    st_regr='OLS',
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_increase=None,
                    tol_n_increase=None,
                    tol_err_change=1e-20,
                    tol_n_above_min=None)
    mcrals.fit(D_known, C=C_known)
    assert mcrals.exit_tol_err_change
Пример #3
0
def test_mcr_max_iterations(dataset):
    """ Test MCR exits at max_iter"""

    C_known, D_known, St_known = dataset

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad
    # error. The tol_err_change is set to None, so it makes it to max_iter.
    mcrals = McrAls(max_iter=50,
                    c_regr='OLS',
                    st_regr='OLS',
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_increase=None,
                    tol_n_increase=None,
                    tol_err_change=None,
                    tol_n_above_min=None)
    mcrals.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrals.exit_max_iter_reached
Пример #4
0
def test_mcr_tol_increase(dataset):
    """ Test MCR exits due error increasing above a tolerance fraction"""

    C_known, D_known, St_known = dataset

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad
    # error.
    mcrals = McrAls(max_iter=50,
                    c_regr='OLS',
                    st_regr='OLS',
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_increase=0,
                    tol_n_increase=None,
                    tol_err_change=None,
                    tol_n_above_min=None)
    mcrals.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrals.exit_tol_increase
Пример #5
0
def test_mcr_tol_n_above_min(dataset):
    """
    Test MCR exits due to half-terating n times with error above the minimum error.

    Note: On some CI systems, the minimum err bottoms out; thus, tol_n_above_min
    needed to be set to 0 to trigger a break.
    """

    C_known, D_known, St_known = dataset

    mcrals = McrAls(max_iter=50,
                    c_regr='OLS',
                    st_regr='OLS',
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_increase=None,
                    tol_n_increase=None,
                    tol_err_change=None,
                    tol_n_above_min=0)
    mcrals.fit(D_known, C=C_known * 0 + 0.1)
    assert mcrals.exit_tol_n_above_min
Пример #6
0
def test_mcr_semilearned_both_c_st():
    """
    Test the special case when C & ST are provided, requiring C-fix ST-fix to
    be provided
    """

    M = 21
    N = 21
    P = 101
    n_components = 3

    C_img = np.zeros((M, N, n_components))
    C_img[..., 0] = np.dot(np.ones((M, 1)), np.linspace(0.1, 1, N)[None, :])
    C_img[..., 1] = np.dot(np.linspace(0.1, 1, M)[:, None], np.ones((1, N)))
    C_img[..., 2] = 1 - C_img[..., 0] - C_img[..., 1]
    C_img = C_img / C_img.sum(axis=-1)[:, :, None]

    St_known = np.zeros((n_components, P))
    St_known[0, 30:50] = 1
    St_known[1, 50:70] = 2
    St_known[2, 70:90] = 3
    St_known += 1

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, St_known)

    C_guess = 1 * C_known
    C_guess[:, 2] = np.abs(np.random.randn(int(M * N)))

    mcrals = McrAls(max_iter=50,
                    tol_increase=100,
                    tol_n_increase=10,
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_err_change=1e-10)

    mcrals.fit(D_known,
               C=C_guess,
               ST=St_known,
               c_fix=[0, 1],
               st_fix=[0],
               c_first=True)
    assert_equal(mcrals.C_[:, 0], C_known[:, 0])
    assert_equal(mcrals.C_[:, 1], C_known[:, 1])
    assert_equal(mcrals.ST_[0, :], St_known[0, :])

    # ST-solve first
    mcrals.fit(D_known,
               C=C_guess,
               ST=St_known,
               c_fix=[0, 1],
               st_fix=[0],
               c_first=False)
    assert_equal(mcrals.C_[:, 0], C_known[:, 0])
    assert_equal(mcrals.C_[:, 1], C_known[:, 1])
    assert_equal(mcrals.ST_[0, :], St_known[0, :])
Пример #7
0
def test_mcr_c_semilearned():
    """ Test when C items are fixed, i.e., enforced to be the same as the input, always """

    M = 21
    N = 21
    P = 101
    n_components = 3

    C_img = np.zeros((M, N, n_components))
    C_img[..., 0] = np.dot(np.ones((M, 1)), np.linspace(0, 1, N)[None, :])
    C_img[..., 1] = np.dot(np.linspace(0, 1, M)[:, None], np.ones((1, N)))
    C_img[..., 2] = 1 - C_img[..., 0] - C_img[..., 1]
    C_img = C_img / C_img.sum(axis=-1)[:, :, None]

    St_known = np.zeros((n_components, P))
    St_known[0, 30:50] = 1
    St_known[1, 50:70] = 2
    St_known[2, 70:90] = 3
    St_known += 1

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, St_known)

    C_guess = 1 * C_known
    C_guess[:, 2] = np.random.randn(int(M * N))

    mcrals = McrAls(max_iter=50,
                    tol_increase=100,
                    tol_n_increase=10,
                    st_constraints=[ConstraintNonneg()],
                    c_constraints=[ConstraintNonneg(),
                                   ConstraintNorm()],
                    tol_err_change=1e-10)

    mcrals.fit(D_known, C=C_guess, c_fix=[0, 1])
    assert_equal(mcrals.C_[:, 0], C_known[:, 0])
    assert_equal(mcrals.C_[:, 1], C_known[:, 1])
Пример #8
0
def test_mcr_ideal_default(dataset):
    """ Provides C/St_known so optimal should be 1 iteration """

    C_known, D_known, St_known = dataset

    mcrals = McrAls()
    mcrals.fit(D_known, ST=St_known)
    assert_equal(1, mcrals.n_iter_opt)
    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    mcrals.fit(D_known, C=C_known)
    assert_equal(2, mcrals.n_iter_opt)
    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10
Пример #9
0
def test_mcr_ideal_str_regressors(dataset):
    """ Test MCR with string-provded regressors"""

    C_known, D_known, St_known = dataset

    mcrals = McrAls(c_regr='OLS', st_regr='OLS')
    mcrals.fit(D_known, ST=St_known, verbose=True)
    assert_equal(1, mcrals.n_iter_opt)
    assert isinstance(mcrals.c_regressor, pymcr.regressors.OLS)
    assert isinstance(mcrals.st_regressor, pymcr.regressors.OLS)

    mcrals = McrAls(c_regr='NNLS', st_regr='NNLS')
    mcrals.fit(D_known, ST=St_known)
    assert_equal(1, mcrals.n_iter_opt)
    assert isinstance(mcrals.c_regressor, pymcr.regressors.NNLS)
    assert isinstance(mcrals.st_regressor, pymcr.regressors.NNLS)
    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    # Provided C_known this time
    mcrals = McrAls(c_regr='OLS', st_regr='OLS')
    mcrals.fit(D_known, C=C_known)

    # Turns out some systems get it in 1 iteration, some in 2
    # assert_equal(1, mcrals.n_iter_opt)
    assert_equal(True, mcrals.n_iter_opt <= 2)

    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10
Пример #10
0
def test_mcr_errors():

    # Providing both C and S^T estimates without C_fix and St_fix
    with pytest.raises(TypeError):
        mcrals = McrAls()
        mcrals.fit(np.random.randn(10, 5),
                   C=np.random.randn(10, 3),
                   ST=np.random.randn(3, 5))

    # Providing both C and S^T estimates without both C_fix and St_fix
    with pytest.raises(TypeError):
        mcrals = McrAls()
        # Only c_fix
        mcrals.fit(np.random.randn(10, 5),
                   C=np.random.randn(10, 3),
                   ST=np.random.randn(3, 5),
                   c_fix=[0])

    with pytest.raises(TypeError):
        mcrals = McrAls()
        # Only st_fix
        mcrals.fit(np.random.randn(10, 5),
                   C=np.random.randn(10, 3),
                   ST=np.random.randn(3, 5),
                   st_fix=[0])

    # Providing no estimates
    with pytest.raises(TypeError):
        mcrals = McrAls()
        mcrals.fit(np.random.randn(10, 5))

    # Unknown regression method
    with pytest.raises(ValueError):
        mcrals = McrAls(c_regr='NOTREAL')

    # regression object with no fit method
    with pytest.raises(ValueError):
        mcrals = McrAls(c_regr=print)
Пример #11
0
def test_mcr():
    M = 21
    N = 21
    P = 101
    n_components = 2

    C_img = np.zeros((M,N,n_components))
    C_img[...,0] = np.dot(np.ones((M,1)),np.linspace(0,1,N)[None,:])
    C_img[...,1] = 1 - C_img[...,0]

    ST_known = np.zeros((n_components, P))
    ST_known[0,40:60] = 1
    ST_known[1,60:80] = 2

    C_known = C_img.reshape((-1, n_components))

    D_known = np.dot(C_known, ST_known)

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10, 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals._saveall_st = False
    mcrals._saveall_c = False
    mcrals.fit(D_known, ST=ST_known)

    assert_equal(1, mcrals.n_iter_opt)

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, ST=ST_known)
    assert_equal(1, mcrals.n_iter_opt)
    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='NNLS', st_regr='NNLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, ST=ST_known)
    assert_equal(1, mcrals.n_iter_opt)

    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, C=C_known)

    # Turns out some systems get it in 1 iteration, some in 2
    # assert_equal(1, mcrals.n_iter_opt)
    assert_equal(True, mcrals.n_iter_opt<=2)

    assert ((mcrals.D_ - D_known)**2).mean() < 1e-10
    assert ((mcrals.D_opt_ - D_known)**2).mean() < 1e-10

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad 
    # error. This is not really a test, but it does test out breaking
    # from tol_err_change
    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=1e-10)
    mcrals.fit(D_known, C=C_known*0 + 0.1)

    # Seeding with a constant of 0.1 for C, actually leads to a bad local
    # minimum; thus, the err_change gets really small with a relatively bad 
    # error. This is not really a test, but it does test out breaking
    # from tol_err_change
    mcrals = McrAls(max_iter=50, tol_increase=100, tol_n_increase=10,
                    c_regr='OLS', st_regr='OLS', 
                    st_constraints=[ConstraintNonneg()], 
                    c_constraints=[ConstraintNonneg(), ConstraintNorm()],
                    tol_err_change=None)
    mcrals.fit(D_known, C=C_known*0 + 0.1)
    assert_equal(mcrals.n_iter, 50)
Пример #12
0
#Allowed Noise Percentage
noise = 5	
#manual
manual = False

D = np.asarray((pd.read_csv('Total_MCR_CuSSZ13.dat', sep='\t', header=None)).values)

if manual == False:
	#Run SVD
	eigens, explained_variance_ratio = svd.svd(D, nSVD)
	nPure = np.int(input('Number of Principle Components :'))
	#Run Simplisma
	S, C_u, C_c = simplisma.pure(D.T, nPure, noise, True)
else:
	S = np.asarray((pd.read_csv('sopt_5c2.dat', sep='\t', header=None)).values).T
	

#Run MCR
mcrals = McrAls(max_iter=50, st_regr='NNLS', c_regr='NNLS', 
                c_constraints=[ConstraintNonneg(), ConstraintNorm()])

mcrals.fit(D, ST=S.T, verbose=True)
print('\nFinal MSE: {:.7e}'.format(mcrals.err[-1]))

plt.subplot(2, 1, 1)
plt.plot(mcrals.ST_opt_.T)

plt.subplot(2, 1, 2)
plt.plot(mcrals.C_opt_)

plt.show()