Beispiel #1
0
	def test_doseframe_properties(self):
		m, n = 100, 50
		d = DoseFrame(m, n, None)

		# frame dimensions immutable once set
		self.assert_exception( call=d.voxels, args=[m] )
		self.assert_exception( call=d.beams, args=[m] )

		d.data = rand(m, n)
		self.assertTrue( isinstance(d.data, ndarray) )

		# matrix with wrong size fails
		self.assert_exception( call=d.data, args=[rand(m + 1, n)] )

		d.data = sprand(m, n, 0.2, 'csr')
		self.assertTrue( isspmatrix(d.data) )
		d.data = sprand(m, n, 0.2, 'csc')

		# coo matrix fails
		self.assert_exception( call=d.data, args=[sprand(m, n, 0.2)] )

		# voxel labels
		vl = (10 * rand(m)).astype(int)
		d.voxel_labels = vl
		self.assertTrue( sum(vl - d.voxel_labels) == 0 )

		vl_missized = (10 * rand(m + 1)).astype(int)
		self.assert_exception( call=d.voxel_labels, args=[vl_missized] )

		# beam labels
		bl = (4 * rand(n)).astype(int)
		d.beam_labels = bl
		self.assertTrue( sum(bl - d.beam_labels) == 0 )

		bl_missized = (10 * rand(n + 1)).astype(int)
		self.assert_exception( call=d.beam_labels, args=[bl_missized] )

		# voxel weights
		self.assertTrue( isinstance(d.voxel_weights, ndarray) )
		self.assertTrue( d.voxel_weights.size == m )
		self.assertTrue( sum(d.voxel_weights != 1) == 0 )
		vw = (5 * rand(m)).astype(int).astype(float)
		d.voxel_weights = vw
		self.assert_vector_equal( vw, d.voxel_weights )

		vw_missized = (5 * rand(m + 1)).astype(int).astype(float)
		self.assert_exception( call=d.voxel_weights, args=[vw_missized] )

		# beam weights
		self.assertTrue( isinstance(d.beam_weights, ndarray) )
		self.assertTrue( d.beam_weights.size == n )
		self.assertTrue( sum(d.beam_weights != 1) == 0 )
		bw = (5 * rand(n)).astype(int).astype(float)
		d.beam_weights = bw
		self.assert_vector_equal( bw, d.beam_weights )

		bw_missized = (5 * rand(n + 1)).astype(int).astype(float)
		self.assert_exception( call=d.beam_weights, args=[bw_missized] )
    def generate_H(m,n,d):
        ''' generate a parity check matrix H (m*n), density d (0<d<1) '''
        H = lil_matrix(m,n)
        H %= 2

        while not (all(sum(H,1)>=2) and all(sum(H,2)>=2)):
            H += abs(sprand(m,n,d)) > 0
            H %= 2
def load_simulated_data():
    n_users = 1000
    n_items = 1000

    ratings = sprand(n_users, n_items, density=0.01, format='csr')
    data = (np.random.randint(1, 5, size=ratings.nnz).astype(np.float64))
    ratings.data = data

    return ratings
def test_l1logistic_sparse_input_no_center():
    """Test that multiclass L1 Logistic raises an error when asked to center
    sparse data.
    """
    rs = np.random.RandomState(17)
    X = sprand(10, 10, random_state=rs)
    classes = ['abc', 'de', 'fgh']
    y = np.array(classes)[rs.randint(3, size=10)]

    with pytest.raises(ValueError):
        UoI_L1Logistic(fit_intercept=True).fit(X, y)
def disabled_test_factorization_sparse():
    I, J, K, rank = 10, 20, 75, 5
    Tmat = sprand(I, J * K, 0.1).tocoo()
    T = unfolded_sptensor((Tmat.data, (Tmat.row, Tmat.col)), None, 0, [], (I, J, K)).fold()
    core, U = tucker.hooi(T, rank, maxIter=20)

    Tmat = Tmat.toarray()
    T = unfolded_dtensor(Tmat, 0, (I, J, K)).fold()
    core2, U2 = tucker.hooi(T, rank, maxIter=20)

    assert allclose(core2, core)
    for i in range(len(U)):
        assert allclose(U2[i], U[i])
def test_factorization_sparse():
    I, J, K, rank = 10, 20, 75, 5
    Tmat = sprand(I, J * K, 0.1).tocoo()
    T = unfolded_sptensor((Tmat.data, (Tmat.row, Tmat.col)), None, 0, [], (I, J, K)).fold()
    core, U = tucker_hooi.tucker_hooi(T, rank, maxIter=20)

    Tmat = Tmat.toarray()
    T = unfolded_dtensor(Tmat, 0, (I, J, K)).fold()
    core2, U2 = tucker_hooi.tucker_hooi(T, rank, maxIter=20)

    assert_true(allclose(core2, core))
    for i in range(len(U)):
        assert_true(allclose(U2[i], U[i]))
Beispiel #7
0
	def test_doseframe_init_basic(self):
		m, n = 100, 50

		d = DoseFrame()
		self.assert_nan( d.voxels )
		self.assert_nan( d.beams )

		d = DoseFrame(m, n, None)
		self.assertTrue( d.voxels == m )
		self.assertTrue( d.beams == n )

		b = BeamSet(n)
		d = DoseFrame(m, b, None)
		self.assertTrue( d.voxels == m )
		self.assertTrue( d.beams == n )

		A = rand(m, n)
		d = DoseFrame(None, None, A)
		self.assertTrue( d.voxels == m )
		self.assertTrue( d.beams == n )
		# size mismatches
		self.assert_exception( call=DoseFrame, args=[None, n + 1, A] )
		self.assert_exception( call=DoseFrame, args=[m + 1, None, A] )
		self.assert_exception( call=DoseFrame, args=[m, n + 1, A] )
		self.assert_exception( call=DoseFrame, args=[m + 1, n, A] )

		A = sprand(m, n, 0.2, 'csr')
		self.assertTrue( d.voxels == m )
		self.assertTrue( d.beams == n )

		A = sprand(m, n, 0.2, 'csc')
		self.assertTrue( d.voxels == m )
		self.assertTrue( d.beams == n )

		A = sprand(m, n, 0.2) # COO sparse storage, not supported
		self.assert_exception( call=DoseFrame, args=[None, None, A] )
Beispiel #8
0
def create_toy_data(T=1000,
                    J=9,
                    K=9,
                    M=5,
                    N=2,
                    dt=0.02,
                    c1=0.,
                    c2=1.,
                    c3=0.,
                    noisevar=0.):

    # Create PRF and CGF
    prf_size = (J, K)
    cgf_size = (M + 1, 2 * N + 1)

    w_prf = np.zeros(prf_size)
    nt = int(np.round(J / 3.))
    nf = int(np.round(K / 3.))
    w_prf[nt:2 * nt, nf:2 * nf] = np.tile(np.linspace(-1, 1, nt), (nf, 1)).T

    w_cgf = np.zeros(cgf_size)
    nt = int(np.round(M / 2.))
    nf = int(np.round(N / 2.))
    w_cgf[-nt:-2 * nt:-1, N - nf:N + nf + 1] = -1
    w_cgf[-1, 0] = -1
    w_cgf[-1, -1] = 1

    w_prf = gaussian_filter(w_prf, .75)
    w_cgf = gaussian_filter(w_cgf, .75)
    w_cgf[0, N] = 0

    # DRC stimulus
    S = np.asarray(sprand(T, K, density=.165).todense())

    # Pad zeros around stimulus to simplify subsequent computations
    S_pad = np.zeros((J - 1 + M + T, K + 2 * N))
    S_pad[J - 1 + M:, N:-N] = S

    # Predict response of full context model
    y = np.zeros((T, ))

    context_fast.predict_y_context(S_pad, w_prf.ravel(), w_cgf.ravel(), y, c1,
                                   c2, c3, T, J, K, M, N)

    # Add some noise
    y += np.sqrt(noisevar) * np.random.randn(y.shape[0])

    return S, S_pad, y, w_prf, w_cgf
Beispiel #9
0
def main():

    # generate random Y
    Y = sprand(1000, 500, 0.1, 'csc')

    # train IMC
    mf_results = train_mf(Y,
                          k=10,
                          lamb=0.1,
                          solver_type=0,
                          maxiter=10,
                          threads=4)

    # get predictions and compute RMSE
    mf_results.predict_mf()
    rmse = mf_results.rmse(1)
    print "RMSE = %.6f" % rmse
Beispiel #10
0
def test_l1logistic_sparse_input():
    """Test that multiclass L1 Logistic works when using sparse matrix
       inputs"""
    rs = np.random.RandomState(17)
    X = sprand(100, 100, random_state=rs)
    classes = ['abc', 'de', 'fgh']
    y = np.array(classes)[rs.randint(3, size=100)]

    kwargs = dict(
        fit_intercept=False,
        random_state=rs,
        n_boots_sel=4,
        n_boots_est=4,
        n_C=7,
    )
    l1log = UoI_L1Logistic(**kwargs).fit(X, y)

    y_hat = l1log.predict(X)
    assert set(classes) >= set(y_hat)
Beispiel #11
0
from matplotlib.gridspec import GridSpec
fig = plt.figure(constrained_layout=True)
plt.set_cmap('rainbow')
# gs2 = GridSpec(2,3,figure=fig,left=0.05, right=0.48,wspace=0.03)
# ax0 = fig.add_subplot(gs2[0,0])
# ax1 = fig.add_subplot(gs2[0,1])
# c = ax0.pcolor(sin1*Z)
# ax0.set_title(r'$\mathbf{Y}$')
# ax0.set_axis_off()
# c = ax1.pcolor(sin1*np.ones((10,40)))#, edgecolors='k', linewidths=4)
# ax1.set_title(r'$\mathbf{PQ}$')
# ax1.set_axis_off()
from scipy.sparse import rand as sprand
np.savetxt("PQ.csv",sin1*Z,delimiter=',')
D = np.random.rand(10,60)
S = sprand(60,40,density=0.05)
print(np.shape(np.dot(D,S.todense())))

Z_new = sin1*Z + np.dot(D,S.todense())+np.random.normal(0,0.01,(10,40))# np.dot(D,S)# sin1*Z + np.dot(D,S) + np.random.randn(10,40,0.01)
np.savetxt("Z_new.csv",D,delimiter=',')
np.savetxt("D.csv",D,delimiter=',')
np.savetxt("S.csv",S.todense(),delimiter=',')
gs1 = GridSpec(2,3,figure=fig,left=0.55, right=0.98,wspace=0.03)
ax2 = fig.add_subplot(gs1[0,:-2])
ax3 = fig.add_subplot(gs1[:,2])
c = ax2.pcolor(D)#, edgecolors='k', linewidths=4)
ax2.set_title(r'$\mathbf{D}$')
ax2.set_axis_off()
c = ax3.pcolor(S.todense())
ax3.set_title(r'$\mathbf{S}$')
ax3.set_axis_off()
Beispiel #12
0
import time
start = time.time()

import numpy as np
from scipy.sparse import rand as sprand
import torch
from torch.autograd import Variable


n_users = 1000
n_items = 1000
ratings = sprand(n_users,n_items,density=0.1,format='csr',random_state=42)
ratings.data = (np.random.randint(1,5,size=ratings.nnz).astype(np.float64))
ratings = ratings.toarray() #this is the densification step!

class MatrixFactorization(torch.nn.Module):
    def __init__(self, n_users, n_items, n_factors=20):
        super().__init__()
        self.user_factors = torch.nn.Embedding(n_users,n_factors,sparse=True)
        self.item_factors = torch.nn.Embedding(n_items,n_factors,sparse=True)
        
    def forward(self, user, item):
        return(self.user_factors(user)*self.item_factors(item)).sum(1)
        

model = MatrixFactorization(n_users,n_items,n_factors=20)

loss_func = torch.nn.MSELoss()

optimizer = torch.optim.SGD(model.parameters(),lr=1e-6)