def hess_f1(self, a, z):
        "Define the hessian for the convex inequalities."

        ## Preliminaries
        z = z[1:]
        _z1 = z.sum()
        _zx = sp.dot(z, self.xarray)
        _zxx = (z[:, None, None] *
                self.xarray[:, :, None] *
                self.xarray[:, None, :]).sum(0)
        
        # Initialize the output "Hessian" array
        out = sp.zeros((self.Na, self.Na))

        ## There are four terms to compute
        _aux0 = _z1 * sp.tensordot(self.B, self.B, axes=[(-1,), (-1,)])
        _Dzx = sp.dot(self.D, _zx)
        _aux1 = sp.tensordot(self.B, _Dzx, axes=[(-1,), (-1,)])
        _aux2 = sp.tensordot(_Dzx, self.B, axes=[(-1,), (-1,)])
        _Dzxx = sp.dot(self.D, _zxx)
        _aux3 = sp.tensordot(_Dzxx, self.D, axes=[(1,2), (1,2)])

        ## output array 
        out[:, :] = 2.0 * (_aux0 + _aux1 + _aux2 + _aux3)

        return out
    def grad_f1(self, a):
        "Define the gradient for each convex inequality."

        # Initialize the output vector
        out = sp.zeros((self.M, self.Na))

        # Preliminary calculation
        _xx = sp.einsum('mi,mj->mij', self.xarray, self.xarray)

        # Compute the four terms
        _Da = sp.tensordot(self.D, a, axes=[(0,), (0,)])
        _DDa = sp.tensordot(self.D, _Da, axes=[(1,), (0,)])
        xxDDa = sp.tensordot(_xx.reshape(self.M, self.ndim**2),
                             _DDa.reshape(self.Na, self.ndim**2),
                             axes=[(-1,), (-1,)])

        _BDa = sp.dot(self.B, _Da)
        xBDa = sp.inner(self.xarray, _BDa)

        _Ba = sp.dot(a, self.B)
        _DBa = sp.dot(_Ba, self.D)
        xDBa = sp.tensordot(self.xarray,
                            _DBa, axes=[(-1,), (-1,)])

        BBa = sp.dot(self.B, _Ba)
        
        # compute the gradient by summing the four terms
        out[:, :] = 2.0 * (xxDDa + xBDa + xDBa + BBa)

        return out
Exemple #3
0
def subtract_frequency_modes(imap, modes, weight, freq, defer=False):
    r"""Subtract frequency modes from the map.
    """

    # First map.
    omap = sp.empty((len(modes), ) + imap.shape[1:])

    if defer:
        fitted = np.zeros_like(imap1[freq, :, :])

    for mode_index, mode_vector in enumerate(modes):
        #mode_vector = mode_vector.reshape(self.freq.shape)

        amp  = sp.tensordot(mode_vector, 
                            imap[freq, :, :] * weight[freq, :, :],
                            axes=(0,0))
        amp /= sp.tensordot(mode_vector,
                            mode_vector[:, None, None] * weight[freq, :, :],
                            axes=(0,0))

        if defer:
            fitted += mode_vector[:, None, None] * amp[None, :, :]
        else:
            fitted = mode_vector[:, None, None] * amp[None, :, :]
            imap[freq, :, :] -= fitted

        omap[mode_index, :, :] = amp

    if defer:
        imap[freq, :, :] -= fitted

    return imap, omap
Exemple #4
0
def expValue( g, l, op ):
    bondDim = l.shape[ 1 ]

    expVals = []
    for A in ( 0, 1 ):
        B = ( A + 1 ) % 2
        theta = l[ B ].reshape( bondDim, 1, 1 ) * g[ A ] * l[ A ]
        OpTheta = sp.tensordot( theta, op, ( 1, 1 ) )
        expVals.append( sp.tensordot( theta, OpTheta, ( [ 0, 1, 2 ], [ 0, 2, 1 ] ) ) )

    return sum( expVals ) / 2.0
Exemple #5
0
def energy( g, l, hermitonian ):
    bondDim = l.shape[ 1 ]
    physicalDim = g.shape[ 2 ]

    energys = []
    for A in ( 0, 1 ):
        B = ( A + 1 ) % 2
        theta = sp.tensordot( l[ B ].reshape( bondDim, 1, 1 ) * g[ A ] * l[ A ], g[ B ] * l[ B ], 1 )
        Htheta = sp.tensordot( theta.reshape( bondDim, physicalDim ** 2, bondDim ), hermitonian, ( 1, 1 ) )
        energys.append( sp.tensordot( theta.reshape( bondDim, physicalDim ** 2, bondDim ), Htheta, ( [ 0, 1, 2 ], [ 0, 2, 1 ] ) ) )

    return sum( energys ) / 2.0
Exemple #6
0
 def Velocity(self):
     if self._compute_velocity:
         self._velocity = scipy.tensordot(self[self.attrs['Velocity']],
                                          self._rotation,
                                          axes=((1, ), (1, )))
     self._compute_velocity = False
     return self._velocity
def calc_C_mat_op_AA(op, AA):
    #print "===="
    #print "op:",op.shape
    #print "AA:",AA.shape
    res = sp.tensordot(op, AA, ((2, 3), (0, 1)))
    #print "calculation successful!"
    return res
def dklC2lms(dlkC_array):
    """"""
    diff_array = sp.tensordot(dlkC_array, DKL2LMS, axes=((-1, ), (1, )))
    diff_array[..., 0] += gray_lms[0]
    diff_array[..., 1] += gray_lms[1]
    diff_array[..., 2] += gray_lms[2]
    return diff_array
def calc_C_mat_op_AA(op, AA):
    #print "===="
    #print "op:",op.shape
    #print "AA:",AA.shape
    res = sp.tensordot(op, AA, ((2, 3), (0, 1)))
    #print "calculation successful!"
    return res
def classify_csp(W, V, x_train_filt, y_train, x_test_filt, y_test):
    """ Classify data using CSP filter W"""
    # Project data
    proj_train = sp.tensordot(W.transpose(), x_train_filt, axes=[1,1])
    proj_test  = sp.tensordot(W.transpose(), x_test_filt, axes=[1,1])

    # Calculate features

    ftr = np.log( np.tensordot(proj_train**2, V, axes=[1,0]) )[:,:,0]
    fte = np.log( np.tensordot(proj_test **2, V, axes=[1,0]) )[:,:,0]
    # Classify
    logistic = LR()
    logistic.fit(ftr.transpose(), y_train[:,0])
    sc = logistic.score(fte.transpose(), y_test[:,0])

    return sc
def trueFeatureStats(T, R, fMap, discountFactor, stateProp=1, MAT_LIMIT=1e8):
    """ Gather the statistics needed for LSTD,
    assuming infinite data (true probabilities).
    Option: if stateProp is  < 1, then only a proportion of all 
    states will be seen as starting state for transitions """
    dim = len(fMap)
    numStates = len(T)
    statMatrix = zeros((dim, dim))
    statResidual = zeros(dim)
    ss = range(numStates)
    repVersion = False
    
    if stateProp < 1:
        ss = random.sample(ss, int(numStates * stateProp))
    elif dim * numStates**2 < MAT_LIMIT:
        repVersion = True
    
    # two variants, depending on how large we can afford our matrices to become.        
    if repVersion:    
        tmp1 = tile(fMap, (numStates,1,1))
        tmp2 = transpose(tmp1, (2,1,0))
        tmp3 = tmp2 - discountFactor * tmp1            
        tmp4 = tile(T, (dim,1,1))
        tmp4 *= transpose(tmp1, (1,2,0))
        statMatrix = tensordot(tmp3, tmp4, axes=[[0,2], [1,2]]).T
        statResidual = dot(R, dot(fMap, T).T)
    else:
        for sto in ss:
            tmp = fMap - discountFactor * repmat(fMap[:, sto], numStates, 1).T
            tmp2 = fMap * repmat(T[:, sto], dim, 1)
            statMatrix += dot(tmp2, tmp.T)             
            statResidual += R[sto] * dot(fMap, T[:, sto])
    return statMatrix, statResidual
Exemple #12
0
 def test_partial_dot_mat_mat_block(self):
     mat1 = sp.arange(2 * 3 * 5 * 7 * 11)
     mat1.shape = (2, 3, 5, 7, 11)
     mat1 = algebra.make_mat(mat1,
                             axis_names=('time', 'x', 'y', 'ra', 'z'),
                             row_axes=(0, 1, 3),
                             col_axes=(0, 2, 3, 4))
     mat2 = sp.arange(2 * 13 * 5 * 7 * 17)
     mat2.shape = (2, 13, 7, 5, 17)
     mat2 = algebra.make_mat(mat2,
                             axis_names=('time', 'w', 'ra', 'y', 'freq'),
                             row_axes=(0, 1, 2, 3),
                             col_axes=(1, 2, 4))
     tmp_arr = sp.tensordot(mat1, mat2, ((2, ), (3, )))
     right_ans = sp.empty((7, 13, 2, 3, 11, 17))
     for ii in range(2):
         for jj in range(7):
             this_tmp = tmp_arr[ii, :, jj, :, ii, :, jj, :]
             this_tmp = sp.rollaxis(this_tmp, 2, 0)
             right_ans[jj, :, ii, ...] = this_tmp
     result = algebra.partial_dot(mat1, mat2)
     self.assertEqual(result.axes, ('ra', 'w', 'time', 'x', 'z', 'freq'))
     self.assertEqual(result.rows, (0, 1, 2, 3))
     self.assertEqual(result.cols, (0, 1, 4, 5))
     self.assertTrue(sp.allclose(right_ans, result))
def decode_bayes(arr, axis=None, density=None, scale=None):
    """ The density is the probabilistic equivalent of the tuning curve, with
    two input arguments: the first is the value and the second the location
    parameter of the density. Assumin fdomain is (0., 1.)
    """

    nunits = arr.shape[axis]
    values = sp.linspace(0.0, 1.0, nunits, dtype=float)

    # matrix of discrete tuning curves; i: stimulus value #ID, j: preference
    tuning = sp.array([[density(theta, loc=phi, scale=scale) \
        for theta in values] for phi in values])
    tuning /= tuning.sum(1, keepdims=True)

    # normalized spike count
    spksum = arr.sum(axis, keepdims=True)
    spksum[spksum == 0] = 1.0
    spikes = arr / spksum

    # now decode
    probas = sp.tensordot(spikes, tuning, ((axis, ), (0, )))
    probas = sp.rollaxis(probas, axis=-1, start=axis)
    mapest = values[probas.argmax(axis)]
    #mapest = sp.tensordot(probas, values, ((axis,), (0,)))
    spread = 1.0 / (probas.max(axis) - probas.min(axis))

    return probas, mapest, spread
    def test_partial_dot_mat_mat_block(self):
        mat1 = sp.arange(2 * 3 * 5 * 7 * 11)
        mat1.shape = (2, 3, 5, 7, 11)

        mat1 = matrix.make_mat(mat1, axis_names=('time', 'x', 'y', 'ra', 'z'),
                                row_axes=(0, 1, 3), col_axes=(0, 2, 3, 4))

        mat2 = sp.arange(2 * 13 * 5 * 7 * 17)
        mat2.shape = (2, 13, 7, 5, 17)

        mat2 = matrix.make_mat(mat2,
                                axis_names=('time', 'w', 'ra', 'y', 'freq'),
                                row_axes=(0, 1, 2, 3), col_axes=(1, 2, 4))

        tmp_arr = sp.tensordot(mat1, mat2, ((2, ), (3, )))
        right_ans = sp.empty((7, 13, 2, 3, 11, 17))

        for ii in range(2):
            for jj in range(7):
                this_tmp = tmp_arr[ii, :, jj, :, ii, :, jj, :]
                this_tmp = sp.rollaxis(this_tmp, 2, 0)
                right_ans[jj, :, ii, ...] = this_tmp

        result = dot_products.partial_dot(mat1, mat2)
        self.assertEqual(result.axes, ('ra', 'w', 'time', 'x', 'z', 'freq'))
        self.assertEqual(result.rows, (0, 1, 2, 3))
        self.assertEqual(result.cols, (0, 1, 4, 5))
        self.assertTrue(sp.allclose(right_ans, result))
    def test_partial_dot_mat_mat(self):
        mat1 = sp.asarray(self.mat)
        mat1.shape = (4, 3, 2, 5)

        mat1 = matrix.make_mat(mat1,
                               axis_names=('time', 'x', 'y', 'z'),
                               row_axes=(0, ),
                               col_axes=(1, 2, 3))

        mat2 = sp.asarray(self.mat)
        mat2.shape = (4, 2, 3, 5)

        mat2 = matrix.make_mat(mat2,
                               axis_names=('w', 'y', 'x', 'freq'),
                               row_axes=(0, 1, 2),
                               col_axes=(3, ))

        result = dot_products.partial_dot(mat1, mat2)
        self.assertEqual(result.axes, ('time', 'w', 'z', 'freq'))
        self.assertEqual(result.rows, (0, 1))
        self.assertEqual(result.cols, (2, 3))
        self.assertEqual(result.shape, (4, 4, 5, 5))
        right_ans = sp.tensordot(mat1, mat2, ((1, 2), (2, 1)))
        right_ans = sp.swapaxes(right_ans, 1, 2)
        self.assertTrue(sp.allclose(right_ans, result))
Exemple #16
0
def classify_csp(W, V, x_train_filt, y_train, x_test_filt, y_test):
    """ Classify data using CSP filter W"""
    # Project data
    proj_train = sp.tensordot(W.transpose(), x_train_filt, axes=[1, 1])
    proj_test = sp.tensordot(W.transpose(), x_test_filt, axes=[1, 1])

    # Calculate features

    ftr = np.log(np.tensordot(proj_train**2, V, axes=[1, 0]))[:, :, 0]
    fte = np.log(np.tensordot(proj_test**2, V, axes=[1, 0]))[:, :, 0]
    # Classify
    logistic = LR()
    logistic.fit(ftr.transpose(), y_train[:, 0])
    sc = logistic.score(fte.transpose(), y_test[:, 0])

    return sc
Exemple #17
0
 def test_spacing_3D_rotated_uneven(self):
     net = op.network.Cubic(shape=[3, 4, 5], spacing=[1, 2, 3])
     theta = 0.1
     R = sp.array([[1, 0, 0], [0, sp.cos(theta), -sp.sin(theta)],
                   [0, sp.sin(theta), sp.cos(theta)]])
     net['pore.coords'] = sp.tensordot(net['pore.coords'], R, axes=(1, 1))
     assert sp.allclose(net.spacing, [1, 2, 3])
def decode(arr,
           kind='vote',
           axis=None,
           fdomain=DEFAULT_FDOMAIN,
           exclude_percent_of_max=0.,
           q=4):
    """"""

    if axis is None:
        if arr.ndim >= 3:
            axis = -3
        elif arr.ndim == 1:
            axis = 0
        else:
            raise ValueError('Ambiguous axis along which to decode')

    nunits = arr.shape[axis]
    values = sp.linspace(fdomain[0], fdomain[1], nunits, dtype=float)

    if exclude_percent_of_max > 0:
        pct = arr.max(axis=axis, keepdims=True) * \
            vote_exclude_percent_of_max
        arr = sp.maximum(arr - pct, 0)

    if kind == 'argmax':
        return arr.argmax(axis=axis)/float(nunits-1) \
            * (fdomain[1] - fdomain[0]) + fdomain[0]

    elif kind == 'vote':
        return sp.tensordot(arr, values, ((axis, ), (0, ))) / arr.sum(axis)

    elif kind == 'voteExp':
        return sp.tensordot(arr**q, values,
                            ((axis, ), (0, ))) / (arr**q).sum(axis)

    elif kind == 'circular_vote':
        values = (values - fdomain[0]) / (fdomain[1] - fdomain[0])
        sv = sp.sin(values * 2 * sp.pi - sp.pi)
        cv = sp.cos(values * 2 * sp.pi - sp.pi)
        sw = sp.tensordot(arr, sv, ((axis, ), (0, ))) / arr.sum(axis)
        cw = sp.tensordot(arr, cv, ((axis, ), (0, ))) / arr.sum(axis)
        return (sp.arctan2(sw, cw) + sp.pi)/(2*sp.pi) \
            * (fdomain[1] - fdomain[0]) + fdomain[0]

    else:
        raise ValueError('Invalid decoder type')
Exemple #19
0
 def Position(self):
     if self._compute_position:
         self._position = scipy.tensordot(
             self[self.attrs['Position']] - self._boxcenter,
             self._rotation,
             axes=((1, ), (1, ))) + self._translation
     self._compute_position = False
     return self._position
Exemple #20
0
 def test_spacing_3D_rotated_uneven(self):
     net = op.network.Cubic(shape=[3, 4, 5], spacing=[1, 2, 3])
     theta = 0.1
     R = sp.array([[1, 0, 0],
                   [0, sp.cos(theta), -sp.sin(theta)],
                   [0, sp.sin(theta), sp.cos(theta)]])
     net['pore.coords'] = sp.tensordot(net['pore.coords'], R, axes=(1, 1))
     assert sp.allclose(net.spacing, [1, 2, 3])
Exemple #21
0
def apply_MPO_local(Mn, An):
    q = An.shape[0]
    Dm1 = An.shape[1]
    D = An.shape[2]
    MAn = sp.tensordot(An, Mn, axes=[[0], [2]])
    MAn = sp.transpose(MAn, axes=(4, 0, 2, 1, 3)).copy()
    MAn = MAn.reshape((q, Dm1 * len(Mn), D * len(Mn[0])))

    return MAn
 def get_b(self, a):
     """
     Compute the ellipse centroid -b.
     
     The ellipse is,
       ||dot(A, x) + b||^{2} <= 1. 
     for array A and centroid vector -b.  
     """
     return sp.tensordot(self.B, a, axes=[(0,), (0,)])
Exemple #23
0
 def Areml_K_grad_i(self,i):
     dLWt = self.dLW().reshape((self.mean._N, self.mean._P, self.mean.n_covs), order = 'F')
     if i < self.covar.Cg.getNumberParams():
         SrdLWt = self.covar.Sr()[:, sp.newaxis, sp.newaxis] * dLWt
     else:
         SrdLWt = dLWt
     SrdLWtC = sp.tensordot(SrdLWt, self.covar.Ctilde(i), axes=(1, 1))
     SroCdLW = SrdLWtC.swapaxes(1,2).reshape((self.mean._N * self.mean._P, self.mean.n_covs), order = 'F')
     return -sp.dot(self.dLW().T, SroCdLW)
Exemple #24
0
def apply_MPO_local(Mn, An):
    q = An.shape[0]
    Dm1 = An.shape[1]
    D = An.shape[2]
    MAn = sp.tensordot(An, Mn, axes=[[0], [2]])
    MAn = sp.transpose(MAn, axes=(4, 0, 2, 1, 3)).copy()
    MAn = MAn.reshape((q, Dm1 * len(Mn), D * len(Mn[0])))
    
    return MAn
def lms2dklC(lms_array):
    """ DKL := (L-M [red-green], S-(L+M) [blue-yellow], L+M [luminance])
    """
    # get cone differences w.r.t. the background
    diff_array = lms_array.copy()
    diff_array[..., 0] -= gray_lms[0]
    diff_array[..., 1] -= gray_lms[1]
    diff_array[..., 2] -= gray_lms[2]
    dkl_array = sp.tensordot(diff_array, LMS2DKL, axes=((-1, ), (1, )))
    return dkl_array
Exemple #26
0
    def calc_C(self, n_low=-1, n_high=-1):
        """Generates the C matrices used to calculate the K's and ultimately the B's

        These are to be used on one side of the super-operator when applying the
        nearest-neighbour Hamiltonian, similarly to C in eqn. (44) of
        arXiv:1103.0936v2 [cond-mat.str-el], except being for the non-norm-preserving case.

        Makes use only of the nearest-neighbour hamiltonian, and of the A's.

        C[n] depends on A[n] and A[n + 1].
        
        This calculation can be significantly faster if a matrix form for h_nn
        is available. See gen_h_matrix().

        """
        if self.h_nn is None:
            return 0

        if n_low < 1:
            n_low = 0
        if n_high < 1:
            n_high = self.N + 1
        
        if self.h_nn_mat is None:
            for n in xrange(n_low, n_high):
                self.C[n].fill(0)
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA = mm.mmul(self.A[n][u], self.A[n + 1][v]) #only do this once for each
                        for s in xrange(self.q[n]):
                            for t in xrange(self.q[n + 1]):
                                h_nn_stuv = self.h_nn(n, s, t, u, v)
                                if h_nn_stuv != 0:
                                    self.C[n][s, t] += h_nn_stuv * AA
        else:
            dot = sp.dot
            for n in xrange(n_low, n_high):
                An = self.A[n]
                Anp1 = self.A[n + 1]
                
                AA = sp.empty_like(self.C[n])
                for u in xrange(self.q[n]):
                    for v in xrange(self.q[n + 1]):
                        AA[u, v] = dot(An[u], Anp1[v])
                        
                if n == 0: #FIXME: Temp. hack
                    self.AA0 = AA
                elif n == 1:
                    self.AA1 = AA
                
                res = sp.tensordot(AA, self.h_nn_mat[n], ((0, 1), (2, 3)))
                res = sp.rollaxis(res, 3)
                res = sp.rollaxis(res, 3)
                
                self.C[n][:] = res
Exemple #27
0
def tensordot2(A, B, sum=None, multiply=None):
    """ Tensordot that supports elementwise multiplication of axes
    without a subsequent sum-contraction.
    A sum contraction "can be prevented" if the corresponding axis is
    diagonalized. This principle can be seen in the most simple case by
    comparing
    ..code::
        a = np.arange(5)
        b = np.arange(5)
        a @ b           # sum-contracted to scalar
        np.diag(a) @ b  # vector with elementwise products
    Diagonalizing axes on a dense array would of course be prohibitevly
    costly but it is really cheap for sparse matrices.
    Parameters
    ----------
    A, B : COO
        The input arrays.
    sum : list[list[int]]
        The axes to multiply and sum-contract over.
    multiply : list[list[int]]
        The axes to multiply over.
    Returns
    -------
    COO
        The output of the computation.
    See Also
    --------
    einsum : Einstein summation function using this
    COO.diag_axis : Diagonalize axes of sparse array
    """
    if sum is None:
        sum = [[], []]
    else:
        sum = list(sum)

    if multiply is None:
        multiply = [[], []]
    else:
        multiply = list(multiply)

    # For each multiply[0] we are adding one axis, thus we need to increment
    # all following items by one: (0, 1, 2) -> (0, 2, 4)
    # We need to account that the array may be unsorted
    idx = np.argsort(multiply[0])
    post_multiply = multiply[0]
    for i, v in enumerate(idx):
        post_multiply[v] += i

    for i in post_multiply:
        A = A.diag_axis(i)

    sum[0] += post_multiply
    sum[1] += multiply[1]

    return tensordot(A, B, axes=sum)
 def Areml_K_grad_i(self, i):
     dLWt = self.dLW().reshape(
         (self.mean._N, self.mean._P, self.mean.n_covs), order='F')
     if i < self.covar.Cg.getNumberParams():
         SrdLWt = self.covar.Sr()[:, sp.newaxis, sp.newaxis] * dLWt
     else:
         SrdLWt = dLWt
     SrdLWtC = sp.tensordot(SrdLWt, self.covar.Ctilde(i), axes=(1, 1))
     SroCdLW = SrdLWtC.swapaxes(1, 2).reshape(
         (self.mean._N * self.mean._P, self.mean.n_covs), order='F')
     return -sp.dot(self.dLW().T, SroCdLW)
Exemple #29
0
def groundState( hermitonian, bondDim, timeIssue = None, wavefunc = None, out = None ):

    physicalDim = 5

    if not timeIssue:
        timeIssue = ( 0.05, 5.0 )

    if wavefunc:
        gamma, l = wavefunc
    else:
        gamma, l = sp.randn( 2, bondDim, 5, bondDim ), sp.randn( 2, bondDim )

    U = slg.expm( -timeIssue[ 0 ] * hermitonian )

    t = 0
    while t <= timeIssue[ 1 ]:
        if out:
            S_a, S_b = -sum( l[ 0 ] ** 2  * sp.log( l[ 0 ] ** 2 ) ), -sum( l[ 1 ] ** 2  * sp.log( l[ 1 ] ** 2 ) )
            expSz, expE = expValue( gamma, l, sz ), energy( gamma, l, hermitonian )
            print( "  {0:^+10.6f}{1:^+25.16f}{2:^+25.16f}{3:^+25.16f}{4:^+25.16f}".format( t, expE, expSz, S_a, S_b ), file = out )

        try:
            for A in range( 2 ):
                B = ( A + 1 ) % 2
                theta = sp.tensordot( l[ B ].reshape( bondDim, 1, 1 ) * gamma[ A ] * l[ A ], gamma[ B ] * l[ B ], 1 )
                theta = sp.tensordot( theta.reshape( bondDim, physicalDim ** 2, bondDim ), U, ( 1, 1 ) ).swapaxes( 1, 2 )
 
                X, Y, Z = slg.svd( theta.reshape( physicalDim * bondDim, physicalDim * bondDim ) )

                l[ A ] = Y[ 0:bondDim ] / slg.norm( Y[ 0:bondDim ] )
                gamma[ A ] = X[ :, 0:bondDim ].reshape( bondDim, 5, bondDim ) / l[ B ].reshape( bondDim, 1, 1 )
                gamma[ B ] = Z[ 0:bondDim, : ].reshape( bondDim, 5, bondDim ) / l[ B ]

            t += timeIssue[ 0 ]
        except slg.LinAlgError:
            print( "raise an exception", file = stderr )
            gamma += sp.random.normal( size = ( 2, bondDim, 5, bondDim ) )
            t = 0.0


    return gamma, l
def project_tensor(tensor, axes, projection_components, projected_axes):
    """
	Project a tensor array of rank > 2 to lower dimensions
	along given axes.
	
	Args:
		tensor: numpy array whose shape has length > 2
		axes: dictionary whose keys are the names of the variables
						to be projected to and whose values are their 
						respective ranges as rank-1 numpy arrays.
		projection_components: dictionary of axes to project along, whose 
					keys are the names of the projected axis variablse and 
					whose values indicate the component along which to 
					take the projection. Index must be less than or equal 
					to the length of this axis.
		projected_axes: 2-element list indicated which indices of axes 
						are to be projected to. 
		
	Returns:
		tensor: the projected tensor of shape 1 less than the input tensor.
	"""

    assert len(tensor.shape) > 2, \
     'Cannot project a rank 1 or 2 tensor to two dimensions'
    assert len(projected_axes) == 2, 'Can only project to two dimensions'

    for idx, name in enumerate(axes.keys()):
        if idx == projected_axes[0]:
            pass
        elif idx == projected_axes[1]:
            pass
        else:
            proj_axis = list(axes.keys()).index(name)

            try:
                print(('Setting %s fixed..' % name))
                proj_element = projection_components[name]
            except:
                print ('Need to specify iterated variable values that ' \
                  'are not being plotted in projection_components ' \
                  'dictionary')
                quit()

            assert (proj_element < len(axes[name])), \
              'Fixed index out of range, %s >= %s'\
              % (proj_element, len(axes[name]))
            proj_vec = sp.zeros(len(axes[name]))
            proj_vec[proj_element] = 1.0

            tensor = sp.tensordot(tensor, proj_vec, [proj_axis, 0])

    return tensor
Exemple #31
0
def sub_modes(map, modes):

    outmap = np.empty((len(modes), ) + map.shape[1:])

    for mode_index, mode_vector in enumerate(modes):
        mode_vector = mode_vector.reshape([map.shape[0],])

        amp = sp.tensordot(mode_vector, map, axes=(0,0))

        fitted = mode_vector[:, None, None] * amp[None, :, :]
        map -= fitted

        outmap[mode_index, :, :] = amp
Exemple #32
0
def vei_CoR_veX(X, C=None, R=None):
    """
    Args:
        X:  NxPxS tensor
        C:  CxC row covariance (if None: C set to I_PP)
        R:  NxN row covariance (if None: R set to I_NN)
    Returns:
        NxPxS tensor obtained as ve^{-1}((C \kron R) ve(X))
        where ve(X) reshapes X as a NPxS matrix.
    """
    _X = X.transpose((0,2,1))
    if R is not None:   RV = sp.tensordot(R, _X, (1,0))
    else:               RV = _X
    if C is not None:   RV = sp.dot(RV, C.T)
    return RV.transpose((0,2,1))
Exemple #33
0
 def expect_string_1s(self, op, n, d):
     """Calculates the expectation values of finite strings
     with lengths 1 to d, starting at position n.
     """
     if callable(op):
         op = sp.vectorize(op, otypes=[sp.complex128])
         op = sp.fromfunction(op, (self.q, self.q))
     
     res = sp.zeros((d), dtype=self.A[1].dtype)
     x = self.l[n - 1]
     for j in range(n, n + d + 1):
         Aop = sp.tensordot(op, self.A[j], axes=([1],[0]))
         x = tm.eps_l_noop(x, self.A[j], Aop)
         res[j - n - 1] = m.adot(x, self.r[j])
     
     return res
Exemple #34
0
 def expect_string_1s(self, op, n, d):
     """Calculates the expectation values of finite strings
     with lengths 1 to d, starting at position n.
     """
     if callable(op):
         op = sp.vectorize(op, otypes=[sp.complex128])
         op = sp.fromfunction(op, (self.q, self.q))
     
     res = sp.zeros((d), dtype=self.A[1].dtype)
     x = self.l[n - 1]
     for j in xrange(n, n + d + 1):
         Aop = sp.tensordot(op, self.A[j], axes=([1],[0]))
         x = tm.eps_l_noop(x, self.A[j], Aop)
         res[j - n - 1] = m.adot(x, self.r[j])
     
     return res
 def test_partial_dot_mat_mat(self):
     mat1 = sp.asarray(self.mat)
     mat1.shape = (4, 3, 2, 5)
     mat1 = algebra.make_mat(mat1, axis_names=('time', 'x', 'y', 'z'),
                             row_axes=(0,), col_axes=(1, 2, 3))
     mat2 = sp.asarray(self.mat)
     mat2.shape = (4, 2, 3, 5)
     mat2 = algebra.make_mat(mat2, axis_names=('w', 'y', 'x', 'freq'), 
                             row_axes=(0, 1, 2), col_axes=(3,))
     result = algebra.partial_dot(mat1, mat2)
     self.assertEqual(result.axes, ('time', 'w', 'z', 'freq'))
     self.assertEqual(result.rows, (0, 1))
     self.assertEqual(result.cols, (2, 3))
     self.assertEqual(result.shape, (4, 4, 5, 5))
     right_ans = sp.tensordot(mat1, mat2, ((1, 2), (2, 1)))
     right_ans = sp.swapaxes(right_ans, 1, 2)
     self.assertTrue(sp.allclose(right_ans, result))
Exemple #36
0
def sdss_to_usno(sdss_ugriz):
    """
    Return the estimated USNO 1m estimated magnitudes from SDSS 2.5m ones.

    Args:
        sdss_ugriz(5xN scipy.array):    The values of the u, g, r, and z
            magnitudes in the SDSS 2.5m system. Each magnitude is a column.

    Returns:
        5 x N scipy array:
            The values of the u', g', r', i', and z' magnitudes in the USNO 1m
            system in the same format as the input.
    """

    assert sdss_ugriz.shape[0] == 5
    return scipy.tensordot(_sdss_to_usno_matrix,
                           (sdss_ugriz.T + _sdss_to_usno_offset.T).T, 1)
Exemple #37
0
    def apply_rsd(self, velocity_offset, los='local'):

        if not scipy.isscalar(los):
            unit_vector = scipy.array(los, dtype='f8')
            unit_vector /= distance(unit_vector)
        elif los == 'local':
            unit_vector = self.Position / distance(self.Position)[:, None]
        elif los == 'global':
            unit_vector = self.glos
        else:
            axis = los
            if isinstance(los, str): axis = 'xyz'.index(axis)
            unit_vector = scipy.zeros((3), dtype=scipy.float64)
            unit_vector[axis] = 1.
            unit_vector = scipy.tensordot(unit_vector,
                                          self._rotation,
                                          axes=((0, ), (1, )))

        return self.Position + (unit_vector * velocity_offset).sum(
            axis=-1)[:, None] * unit_vector
Exemple #38
0
 def solve_t(self, Mt):
     """
     Mt is dim_r x dim_c x d tensor
     """
     if len(Mt.shape) == 2: _Mt = Mt[:, :, sp.newaxis]
     else: _Mt = Mt
     M = _Mt.transpose([0, 2, 1])
     MLc = sp.tensordot(M, self.Lc().T, (2, 0))
     MLcLc = sp.tensordot(MLc, self.Lc(), (2, 0))
     WrMLcWc = sp.tensordot(sp.tensordot(self.Wr(), MLc, (1, 0)),
                            self.Wc().T, (2, 0))
     DWrMLcWc = sp.tensordot(self.D()[:, sp.newaxis, :] * WrMLcWc,
                             self.Wc(), (2, 0))
     WrDWrMLcWcLc = sp.tensordot(self.Wr().T,
                                 sp.tensordot(DWrMLcWc, self.Lc(), (2, 0)),
                                 (1, 0))
     RV = (MLcLc - WrDWrMLcWcLc).transpose([0, 2, 1])
     if len(Mt.shape) == 2: RV = RV[:, :, 0]
     return RV
def atensor(theta, lmax=500):
    """ 
    Compute the tensor for given angles and lmax. 

    ** usage **
      To construct an array for sampling angles to the following.
      
      A = atensor(theta, lmax)
      ssa = SingleScatteringArray(defining_scattering_data)
      [f_i(theta_j)]_ij... = sp.einsum('ijkl, kl...->ij...', A, ssa)
       
    """
    x = sp.cos(theta)
    out = sp.zeros((6, theta.size, 6, lmax))
    out[(0,3),:,(0,3),:] = gensph(x, 0, 0, lmax)
    out[(4,5),:,(4,5),:] = gensph(x, 0, 2, lmax)
    out[1,:,1,:] = gensph(x,2,2,lmax)
    out[2,:,2,:] = gensph(x,2,-2,lmax)

    # pre and post arrays
    a = sp.eye(6)
    a[1:3, 1:3] = .5 * sp.array([[1,1],[1,-1]])
    b = sp.eye(6)
    b[1:3, 1:3] = sp.array([[1,1],[1,-1]])
    
    # make the final array
    # out = sp.einsum('ij,jklm->iklm', a, out) 

    # print(dict(a=a.shape,b=b.shape,out=out.shape))

    out[:] = sp.tensordot(a,out,axes=[1,0])
    


    # next step
    # out = sp.einsum('iklm,lo->ikom', out, b) 
    I = sp.arange(out.ndim)
    out[...] = sp.dot(
        out.transpose((I-1)%I.size), b).transpose((I+1)%I.size)
    return out
    def angle_eval(self, theta):
        """
        Use atensor to evaluate the scattering matrix elements at vector
        of angles. 
        * in *
          theta - angles to sample scattering matrix elements  [radians]
        """

        # OLD WAYS to compute this
        #
        # # this is a big temporary array
        # big = self.atensor(theta) * self
        # return big.sum(3).sum(2)
        # return sp.einsum("ijkl...,kl...->ij...", self.atensor(theta), self)
        #
        # These were raising SEG FAULTS!!! WHY???
        #

        # Scattering matrix
        aten = self.atensor(theta)
        P = sp.tensordot(aten, self, axes=[(-2, -1), (0, 1)])
        return P
def WGradient(W,sampleX,sampleY,alpha,C,stepSize,iteratorMu,sigma):

## Calculate  dGammadW
    derivGammadW=dGammadW(sampleY,alpha,W,stepSize,iteratorMu,sigma)
    derivGammadW=-np.dot(C,derivGammadW)
                         
## Calculate dFdW (dFdW= dFdG*dGammadW)
    dFdG=dFdGamma(sampleX,sampleY,alpha,W,C,stepSize,iteratorMu,sigma)
    dFdW=scipy.tensordot(dFdG,derivGammadW, axes=[0,0])
        
    return dFdW



       
     
     
     
     
     
     
     
Exemple #42
0
def svd_spec_time(data, params, file_ind, freq=None, time=None):

    # data[...,:100] = np.inf
    # data[...,-100:] = np.inf
    # data[...,1640:1740] = np.inf
    # data[...,2066:2166] = np.inf

    time_mask = np.logical_not(np.all(np.logical_not(np.isfinite(data)), axis=(2, 3)))
    freq_mask = np.all(np.isfinite(data[time_mask[..., None, None]]), axis=(0, 2))

    data[freq_mask[None, :, None, :]] = np.ma.masked

    # freq_mask = np.any(np.isfinite(data), axis=(0, 2))
    weights = np.ones(data.shape)
    data_mask = np.logical_not(np.isfinite(data))
    weights[data_mask] = 0.0
    data[data_mask] = 0.0

    # if np.sum(weights) < np.prod(weights.shape) * 0.1:
    #    #print "Warning: too much data masked, no svd performed"
    #    msg = ("WARNING: too much data masked, no svd performed")
    #    warnings.warn(msg)
    #    data[data_mask] = np.inf
    #    return data

    sh = data.shape

    # for XX
    data_svd = data[:, 0, :, :].reshape([-1, sh[-1]])[:, freq_mask[0, :]]
    weight_svd = weights[:, 0, :, :].reshape([-1, sh[-1]])[:, freq_mask[0, :]]
    # check flag percent
    weight_svd = np.ma.array(weight_svd)
    weight_svd[weight_svd == 0] = np.ma.masked
    percent = float(np.ma.count_masked(weight_svd)) / weight_svd.size * 100
    print "Flag percent XX: %f%%" % percent
    if np.sum(weight_svd) < np.prod(weight_svd.shape) * 0.1 or data_svd.shape[-1] < 10:
        # print "Warning: too much data masked, no svd performed"
        msg = "WARNING: too much data masked for XX, no svd performed"
        warnings.warn(msg)
        data[data_mask] = np.inf
        return data
    vec_t, val, vec_f = linalg.svd(data_svd)
    vec_f = vec_f.T
    sorted_index = np.argsort(val)[::-1]

    vec_t = vec_t[:, sorted_index]
    vec_f = vec_f[:, sorted_index]
    val = val[sorted_index]

    modes = params["modes"]
    amps = sp.empty((modes, data_svd.shape[0]))
    for i in np.arange(modes):

        amp = sp.tensordot(vec_f[:, i], data_svd * weight_svd, axes=(0, 1))
        amp /= sp.tensordot(vec_f[:, i], vec_f[:, i][None, :] * weight_svd, axes=(0, 1))

        data_svd -= vec_f[:, i][None, :] * amp[:, None]

        amps[i, :] = amp
        del amp

    data[:, 0, :, :][..., freq_mask[0, :]] = data_svd.reshape([sh[0], 2, -1])

    if params["save_svd"]:

        f_name = params["output_root"] + params["file_middles"][file_ind] + "_svd_XX.hdf5"
        utils.mkparents(f_name)
        f = h5py.File(f_name, "w")
        f["singular_values"] = val
        f["left_vectors"] = vec_t.T
        f["right_vectors"] = vec_f.T
        # f['outmap_left'] = outmap_left
        f["outmap_right"] = amps
        # f['map_left'] = map1
        f["map_right"] = data[:, 0, :, :]
        f["freq_mask"] = freq_mask[0, :]
        f["freq"] = freq
        f["time"] = time

        f.close()

    if params["save_plot"]:
        f_name = params["output_root"] + params["file_middles"][file_ind] + "_svd_XX.hdf5"
        utils.mkparents(f_name)
        check_svd(f_name, [val, vec_t.T, vec_f.T], freq_mask[0, :], freq)
        check_map(f_name, np.ma.array(data[:, 0, :, :]), time, freq)

    del data_svd, weight_svd, val, vec_t, vec_f, amps
    gc.collect()

    # for YY
    data_svd = data[:, 3, :, :].reshape([-1, sh[-1]])[:, freq_mask[3, :]]
    weight_svd = weights[:, 3, :, :].reshape([-1, sh[-1]])[:, freq_mask[3, :]]
    # check flag percent
    weight_svd = np.ma.array(weight_svd)
    weight_svd[weight_svd == 0] = np.ma.masked
    percent = float(np.ma.count_masked(weight_svd)) / weight_svd.size * 100
    print "Flag percent XX: %f%%" % percent
    if np.sum(weight_svd) < np.prod(weight_svd.shape) * 0.1 or data_svd.shape[-1] < 10:
        # print "Warning: too much data masked, no svd performed"
        msg = "WARNING: too much data masked for YY, no svd performed"
        warnings.warn(msg)
        data[data_mask] = np.inf
        return data
    vec_t, val, vec_f = linalg.svd(data_svd)
    vec_f = vec_f.T
    sorted_index = np.argsort(val)[::-1]

    vec_t = vec_t[:, sorted_index]
    vec_f = vec_f[:, sorted_index]
    val = val[sorted_index]

    modes = params["modes"]
    amps = sp.empty((modes, data_svd.shape[0]))
    for i in np.arange(modes):

        amp = sp.tensordot(vec_f[:, i], data_svd * weight_svd, axes=(0, 1))
        amp /= sp.tensordot(vec_f[:, i], vec_f[:, i][None, :] * weight_svd, axes=(0, 1))

        data_svd -= vec_f[:, i][None, :] * amp[:, None]

        amps[i, :] = amp
        del amp

    data[:, 3, :, :][..., freq_mask[3, :]] = data_svd.reshape([sh[0], 2, -1])

    if params["save_svd"]:

        f_name = params["output_root"] + params["file_middles"][file_ind] + "_svd_YY.hdf5"
        utils.mkparents(f_name)
        f = h5py.File(f_name, "w")
        f["singular_values"] = val
        f["left_vectors"] = vec_t.T
        f["right_vectors"] = vec_f.T
        # f['outmap_left'] = outmap_left
        f["outmap_right"] = amps
        # f['map_left'] = map1
        f["map_right"] = data[:, 3, :, :]
        f["freq_mask"] = freq_mask[3, :]
        f["freq"] = freq
        f["time"] = time

        f.close()

    if params["save_plot"]:
        f_name = params["output_root"] + params["file_middles"][file_ind] + "_svd_YY.hdf5"
        utils.mkparents(f_name)
        check_svd(f_name, [val, vec_t.T, vec_f.T], freq_mask[0, :], freq)
        check_map(f_name, np.ma.array(data[:, 0, :, :]), time, freq)

    del data_svd, weight_svd, val, vec_t, vec_f, amps
    gc.collect()

    data[data_mask] = np.inf

    return data
Exemple #43
0
    def calc_BHB_prereq(self, tdvp, tdvp2):
        """Calculates prerequisites for the application of the effective Hamiltonian in terms of tangent vectors.
        
        This is called (indirectly) by the self.excite.. functions.
        
        Parameters
        ----------
        tdvp2: EvoMPS_TDVP_Uniform
            Second state (may be the same, or another ground state).
            
        Returns
        -------
        A lot of stuff.
        """
        l = tdvp.l[0]
        r_ = tdvp2.r[0]
        r__sqrt = tdvp2.r_sqrt[0]
        r__sqrt_i = tdvp2.r_sqrt_i[0]
        A = tdvp.A[0]
        A_ = tdvp2.A[0]
        AA = tdvp.AA[0]
        AA_ = tdvp2.AA[0]
        AAA_ = tdvp2.AAA[0]
        
        eyed = np.eye(self.q**self.ham_sites)
        eyed = eyed.reshape(tuple([self.q] * self.ham_sites * 2))
        ham_ = self.ham - tdvp.h_expect.real * eyed
            
        V_ = sp.transpose(tdvp2.Vsh[0], axes=(0, 2, 1)).conj().copy(order='C')
        
        Vri_ = sp.zeros_like(V_)
        try:
            for s in range(self.q):
                Vri_[s] = r__sqrt_i.dot_left(V_[s])
        except AttributeError:
            for s in range(self.q):
                Vri_[s] = V_[s].dot(r__sqrt_i)

        Vr_ = sp.zeros_like(V_)            
        try:
            for s in range(self.q):
                Vr_[s] = r__sqrt.dot_left(V_[s])
        except AttributeError:
            for s in range(self.q):
                Vr_[s] = V_[s].dot(r__sqrt)
                
        Vri_A_ = tm.calc_AA(Vri_, A_)
                
        if self.ham_sites == 2:
            _C_AhlA = np.empty((self.q, self.q, A.shape[2], A.shape[2]), dtype=tdvp.typ)
            for u in range(self.q):
                for s in range(self.q):
                    _C_AhlA[u, s] = A[u].conj().T.dot(l.dot(A[s]))
            C_AhlA = sp.tensordot(ham_, _C_AhlA, ((0, 2), (0, 1)))
            C_AhlA = sp.transpose(C_AhlA, axes=(1, 0, 2, 3)).copy(order='C')
            
            _C_A_Vrh_ = tm.calc_AA(A_, sp.transpose(Vr_, axes=(0, 2, 1)).conj())
            C_A_Vrh_ = sp.tensordot(ham_, _C_A_Vrh_, ((3, 1), (0, 1)))
            C_A_Vrh_ = sp.transpose(C_A_Vrh_, axes=(1, 0, 2, 3)).copy(order='C')
            
            C_Vri_A_conj = tm.calc_C_conj_mat_op_AA(ham_, Vri_A_).copy(order='C')
    
            C_ = tm.calc_C_mat_op_AA(ham_, AA_).copy(order='C')
            C_conj = tm.calc_C_conj_mat_op_AA(ham_, AA_).copy(order='C')
            
            rhs10 = tm.eps_r_op_2s_AA12_C34(r_, AA_, C_Vri_A_conj)
            
            return C_, C_conj, V_, Vr_, Vri_, C_Vri_A_conj, C_AhlA, C_A_Vrh_, rhs10
        elif self.ham_sites == 3:
            C_Vri_AA_ = np.empty((self.q, self.q, self.q, Vri_.shape[1], A_.shape[2]), dtype=tdvp.typ)
            for s in range(self.q):
                for t in range(self.q):
                    for u in range(self.q):
                        C_Vri_AA_[s, t, u] = Vri_[s].dot(AA_[t, u])
            C_Vri_AA_ = sp.tensordot(ham_, C_Vri_AA_, ((3, 4, 5), (0, 1, 2))).copy(order='C')
            
            C_AAA_r_Ah_Vrih = np.empty((self.q, self.q, self.q, self.q, self.q, #FIXME: could be too memory-intensive
                                        A_.shape[1], Vri_.shape[1]), 
                                       dtype=tdvp.typ)
            for s in range(self.q):
                for t in range(self.q):
                    for u in range(self.q):
                        for k in range(self.q):
                            for j in range(self.q):
                                C_AAA_r_Ah_Vrih[s, t, u, k, j] = AAA_[s, t, u].dot(r_.dot(A_[k].conj().T)).dot(Vri_[j].conj().T)
            C_AAA_r_Ah_Vrih = sp.tensordot(ham_, C_AAA_r_Ah_Vrih, ((3, 4, 5, 2, 1), (0, 1, 2, 3, 4))).copy(order='C')
            
            C_AhAhlAA = np.empty((self.q, self.q, self.q, self.q,
                                  A_.shape[2], A.shape[2]), dtype=tdvp.typ)
            for t in range(self.q):
                for j in range(self.q):
                    for i in range(self.q):
                        for s in range(self.q):
                            C_AhAhlAA[j, t, i, s] = AA[i, j].conj().T.dot(l.dot(AA[s, t]))
            C_AhAhlAA = sp.tensordot(ham_, C_AhAhlAA, ((4, 1, 0, 3), (1, 0, 2, 3))).copy(order='C')
            
            C_AA_r_Ah_Vrih_ = np.empty((self.q, self.q, self.q, self.q,
                                        A_.shape[1], Vri_.shape[1]), dtype=tdvp.typ)
            for t in range(self.q):
                for u in range(self.q):
                    for k in range(self.q):
                        for j in range(self.q):
                            C_AA_r_Ah_Vrih_[u, t, k, j] = AA_[t, u].dot(r_.dot(A_[k].conj().T)).dot(Vri_[j].conj().T)
            C_AA_r_Ah_Vrih_ = sp.tensordot(ham_, C_AA_r_Ah_Vrih_, ((4, 5, 2, 1), (1, 0, 2, 3))).copy(order='C')
            
            C_AAA_Vrh_ = np.empty((self.q, self.q, self.q, self.q,
                                   A_.shape[1], Vri_.shape[1]), dtype=tdvp.typ)
            for s in range(self.q):
                for t in range(self.q):
                    for u in range(self.q):
                        for k in range(self.q):
                            C_AAA_Vrh_[s, t, u, k] = AAA_[s, t, u].dot(Vr_[k].conj().T)
            C_AAA_Vrh_ = sp.tensordot(ham_, C_AAA_Vrh_, ((3, 4, 5, 2), (0, 1, 2, 3))).copy(order='C')
            
            C_Vri_A_r_Ah_ = np.empty((self.q, self.q, self.q,
                                      A_.shape[2], Vri_.shape[1]), dtype=tdvp.typ)
            for u in range(self.q):
                for k in range(self.q):
                    for j in range(self.q):
                        C_Vri_A_r_Ah_[u, k, j] = Vri_[j].dot(A_[k]).dot(r_.dot(A_[u].conj().T))
            C_Vri_A_r_Ah_ = sp.tensordot(ham_.conj(), C_Vri_A_r_Ah_, ((5, 2, 1), (0, 1, 2))).copy(order='C')
            
            C_AhlAA = np.empty((self.q, self.q, self.q,
                                      A_.shape[2], A.shape[2]), dtype=tdvp.typ)
            for j in range(self.q):
                for i in range(self.q):
                    for s in range(self.q):
                        C_AhlAA[j, i, s] = A[s].conj().T.dot(l.dot(AA[i, j]))
            C_AhlAA_conj = sp.tensordot(ham_.conj(), C_AhlAA, ((1, 0, 3), (0, 1, 2))).copy(order='C')
            C_AhlAA = sp.tensordot(ham_, C_AhlAA, ((4, 3, 0), (0, 1, 2)))
            C_AhlAA = sp.transpose(C_AhlAA, axes=(2, 0, 1, 3, 4)).copy(order='C')
            
            C_AA_Vrh = np.empty((self.q, self.q, self.q,
                                      A_.shape[2], Vr_.shape[1]), dtype=tdvp.typ)
            for t in range(self.q):
                for u in range(self.q):
                    for k in range(self.q):
                        C_AA_Vrh[k, u, t] = AA_[t, u].dot(Vr_[k].conj().T)
            C_AA_Vrh = sp.tensordot(ham_, C_AA_Vrh, ((4, 5, 2), (2, 1, 0))).copy(order='C')
            
            C_ = sp.tensordot(ham_, AAA_, ((3, 4, 5), (0, 1, 2))).copy(order='C')
            
            rhs10 = tm.eps_r_op_3s_C123_AAA456(r_, AAA_, C_Vri_AA_)

            #NOTE: These C's are good as C12 or C34, but only because h is Hermitian!
            #TODO: Make this consistent with the updated 2-site case above.
            
            return V_, Vr_, Vri_, Vri_A_, C_, C_Vri_AA_, C_AAA_r_Ah_Vrih, C_AhAhlAA, C_AA_r_Ah_Vrih_, C_AAA_Vrh_, C_Vri_A_r_Ah_, C_AhlAA, C_AhlAA_conj, C_AA_Vrh, rhs10,
Exemple #44
0
def calc_C_conj_mat_op_AA(op, AA):
    return sp.tensordot(op.conj(), AA, ((0, 1), (0, 1)))
Exemple #45
0
def calc_C_3s_mat_op_AAA(op, AAA):
    return sp.tensordot(op, AAA, ((3, 4, 5), (0, 1, 2)))
def XYZ2lms(XYZimage):
    """"""
    return sp.tensordot(XYZimage, CIECAM02_CAT, axes=(-1, 1))
Exemple #47
0
def calc_C_mat_op_AA(op, AA):
    return sp.tensordot(op, AA, ((2, 3), (0, 1)))
def lms2rgb(lmsimage):
    """"""
    return xyz2rgb(
        sp.tensordot(lmsimage, sp.linalg.inv(CIECAM02_CAT), axes=(-1, 1)))
Exemple #49
0
    def _LMLgrad_covar(self,covar,**kw_args):
        """
        calculates LMLgrad for covariance parameters
        """
        # precompute some stuff
        if covar=='Cr':
            trR = self.cache['trXrXr'] 
            RLZ = self.cache['XrXrLZ']
            SrDWLZ = self.cache['SgDWLZ']
            WrRLZ = self.cache['WrXrXrLZ']
            diagSr = self.cache['Sg']
            n_params = self.Cr.getNumberParams()
            if self.F is not None:
                SrDWLY = self.cache['SgDWLY']
                WrRLY = self.cache['WrXrXrLY']
                SrDWLV_t = self.cache['SgDWLV_t']
                WrRLF = self.cache['WrXrXrLrF']
                FRF = self.cache['FLrXrXrLrF']
                FRLrY = self.cache['FXrXrLrY']
        elif covar=='Cn':
            trR = self.N
            RLZ = self.cache['LZ']
            SrDWLZ = self.cache['DWLZ']
            WrRLZ = self.cache['WrLZ']
            diagSr = SP.ones(self.S)
            n_params = self.Cn.getNumberParams()
            if self.F is not None:
                SrDWLY = self.cache['DWLY']
                WrRLY = self.cache['WrLY']
                SrDWLV = self.cache['DWLV']
                WrRLF = self.cache['WrLrF']
                SrDWLV_t = self.cache['DWLV_t']
                FRF = self.cache['FF']
                FRLrY = self.cache['FY']

        # fill gradient vector
        RV = SP.zeros(n_params)
        for i in range(n_params):

            #0. calc LCL
            start = TIME.time()
            if covar=='Cr':     C = self.Cr.Kgrad_param(i)
            elif covar=='Cn':   C = self.Cn.Kgrad_param(i)
            LCL = SP.dot(self.cache['Lc'],SP.dot(C,self.cache['Lc'].T))
            LLCLL = SP.dot(self.cache['Lc'].T,SP.dot(LCL,self.cache['Lc']))
            LCLW = SP.dot(LCL,self.cache['Wc'].T)
            WLCLW = SP.dot(self.cache['Wc'],LCLW)

            CoRLZ = SP.dot(RLZ,LCL.T)
            CoSrDWLZ = SP.dot(SrDWLZ,WLCLW.T)
            WCoRLZ = SP.dot(WrRLZ,LCLW)

            if self.F is not None:
                WcCLcA = SP.dot(SP.dot(self.cache['Wc'],LCL),self.cache['LcA'])
                CoSrDWLY = SP.dot(SrDWLY,WLCLW.T)
                DCoSrDWLY = self.cache['D']*CoSrDWLY
                WCoRLY = SP.dot(WrRLY,LCLW)
                DWCoRLY = self.cache['D']*WCoRLY

                #0a. grad of Areml
                if 1:
                    Areml_grad = SP.dot(SP.kron(WcCLcA,WrRLF).T,self.cache['DWLV'])
                else:
                    Areml_grad = SP.tensordot(SP.tensordot(WrRLF,self.cache['DWLV_t'],axes=(0,0)),WcCLcA,axes=(1,0))
                    # and then resize...
                Areml_grad+= Areml_grad.T
                Areml_grad-= SP.kron(LLCLL,FRF) #TODO: think about LLCLL
                CoSrDWLV_t = SP.tensordot(SrDWLV_t,WLCLW,axes=(1,1))
                Areml_grad-= SP.tensordot(self.cache['DWLV_t'],CoSrDWLV_t,axes=([0,1],[0,2]))

                #0b. grad of beta
                B_grad1 = -SP.dot(FRLrY,LLCLL)
                B_grad1-= SP.dot(SP.dot(self.cache['WrLrF'].T,DCoSrDWLY),self.cache['WcLcA'])
                B_grad1+= SP.dot(SP.dot(WrRLF.T,self.cache['DWLY']),WcCLcA)
                B_grad1+= SP.dot(SP.dot(self.cache['WrLrF'].T,DWCoRLY),self.cache['WcLcA'])
                b_grad = SP.reshape(B_grad1,(self.K*self.P,1),order='F')
                b_grad-= SP.dot(Areml_grad,self.cache['b'])
                b_grad = SP.dot(self.cache['Areml_inv'],b_grad)

            #1. der of log det
            start = TIME.time()
            trC = LCL.diagonal().sum()
            RV[i] = trC*trR
            RV[i]-= SP.dot(self.cache['d'],SP.kron(WLCLW.diagonal(),diagSr))
            smartSum(self.time,'lmlgrad_trace',TIME.time()-start)
            smartSum(self.count,'lmlgrad_trace',1)

            #2. der of quad form
            start = TIME.time()
            RV[i]-= SP.sum(self.cache['LZ']*CoRLZ)
            RV[i]-= SP.sum(self.cache['DWLZ']*CoSrDWLZ)
            RV[i]+= 2*SP.sum(self.cache['DWLZ']*WCoRLZ)
            if self.F is not None:
                RV[i]-= 2*SP.dot(self.cache['vecVKiZ'].T,b_grad)
            smartSum(self.time,'lmlgrad_quadform',TIME.time()-start)
            smartSum(self.count,'lmlgrad_quadform',1)

            if self.F is not None:
                #3. reml term
                RV[i] += (self.cache['Areml_inv']*Areml_grad).sum()

            RV[i] *= 0.5

        return RV
Exemple #50
0
    def _update_cache(self):
        """
        Update cache
        """
        cov_params_have_changed = self.Cr.params_have_changed or self.Cg.params_have_changed or self.Cn.params_have_changed

        if self.XX_has_changed:
            start = TIME.time()
            """ Row SVD Bg + Noise """
            self.cache['Srstar'],Urstar  = LA.eigh(self.XX)
            self.cache['Lr']   = Urstar.T
            self.mean.setRowRotation(Lr=self.cache['Lr'])

            smartSum(self.time,'cache_XXchanged',TIME.time()-start)
            smartSum(self.count,'cache_XXchanged',1)
        
        if self.Xr_has_changed or self.XX_has_changed:
            start = TIME.time()
            """ rotate Xr and XrXr """
            self.cache['LXr']    = SP.dot(self.cache['Lr'],self.Xr)
            smartSum(self.time,'cache_Xrchanged',TIME.time()-start)
            smartSum(self.count,'cache_Xrchanged',1)

        if cov_params_have_changed:
            start = TIME.time()
            """ Col SVD Bg + Noise """
            S2,U2 = LA.eigh(self.Cn.K()+self.offset*SP.eye(self.P))
            self.cache['Sc2'] = S2
            US2   = SP.dot(U2,SP.diag(SP.sqrt(S2)))
            USi2  = SP.dot(U2,SP.diag(SP.sqrt(1./S2)))
            Cstar = SP.dot(USi2.T,SP.dot(self.Cg.K(),USi2))
            self.cache['Scstar'],Ucstar = LA.eigh(Cstar)
            self.cache['Lc'] = SP.dot(Ucstar.T,USi2.T)

            """ pheno """
            self.mean.setColRotation(self.cache['Lc'])

            """ region part """
            self.cache['A']   = SP.reshape(self.Cr.getParams(),(self.P,self.rank),order='F')
            self.cache['LAc'] = SP.dot(self.cache['Lc'],self.cache['A'])

        if cov_params_have_changed or self.XX_has_changed:
            """ S """
            self.cache['s'] = SP.kron(self.cache['Scstar'],self.cache['Srstar'])+1
            self.cache['d'] = 1./self.cache['s']
            self.cache['D'] = SP.reshape(self.cache['d'],(self.N,self.P), order='F')

            """ pheno """
            self.cache['LY']  = self.mean.evaluate()
            self.cache['DLY'] = self.cache['D']*self.cache['LY']

            smartSum(self.time,'cache_colSVDpRot',TIME.time()-start)
            smartSum(self.count,'cache_colSVDpRot',1)

        if cov_params_have_changed or self.XX_has_changed or self.Xr_has_changed:

            """ calculate B =  I + kron(LcA,LrXr).T*D*kron(kron(LcA,LrXr)) """
            start = TIME.time()
            W                = SP.kron(self.cache['LAc'],self.cache['LXr'])
            self.cache['DW']  = W*self.cache['d'][:,SP.newaxis]
            self.cache['DWt'] = self.cache['DW'].reshape((self.N,self.P,self.rank*self.S),order='F')
            #B  = NP.einsum('ijk,jl->ilk',self.cache['DWt'],self.cache['LAc'])
            #B  = NP.einsum('ji,jlk->ilk',self.cache['LXr'],B)
            B = SP.tensordot(self.cache['DWt'],self.cache['LAc'],axes=(1,0)) 
            B = NP.transpose(B, (0, 2, 1))
            B = SP.tensordot(self.cache['LXr'],B,axes=(0,0))
            B = B.reshape((self.rank*self.S,self.rank*self.S),order='F')
            B+= SP.eye(self.rank*self.S)
            smartSum(self.time,'cache_calcB',TIME.time()-start)
            smartSum(self.count,'cache_calcB',1)

            """ invert B """
            start = TIME.time()
            self.cache['cholB'] = LA.cholesky(B).T
            self.cache['Bi']    = LA.cho_solve((self.cache['cholB'],True),SP.eye(self.S*self.rank))
            smartSum(self.time,'cache_invB',TIME.time()-start)
            smartSum(self.count,'cache_invB',1)
            
            """ pheno """
            start = TIME.time()
            Z = SP.dot(self.cache['LXr'].T,SP.dot(self.cache['DLY'],self.cache['LAc']))
            self.cache['z']           = SP.reshape(Z,(self.S*self.rank), order='F')
            self.cache['Biz']         = LA.cho_solve((self.cache['cholB'],True),self.cache['z'])
            BiZ = SP.reshape(self.cache['Biz'],(self.S,self.rank), order='F')
            self.cache['DLYpDLXBiz']  = SP.dot(self.cache['LXr'],SP.dot(BiZ,self.cache['LAc'].T))
            self.cache['DLYpDLXBiz'] *= -self.cache['D']
            self.cache['DLYpDLXBiz'] += self.cache['DLY']
            smartSum(self.time,'cache_phenoCalc',TIME.time()-start)
            smartSum(self.count,'cache_phenoCalc',1)

        self.XX_has_changed = False
        self.Xr_has_changed = False
        self.Y_has_changed  = False
        self.Cr.params_have_changed = False
        self.Cg.params_have_changed = False
        self.Cn.params_have_changed = False
Exemple #51
0
    def subtract_frequency_modes(self, modes1, modes2=None,
                                 weighted=False, defer=False):
        r"""Subtract frequency modes from the map.
        """

        if modes2 == None:
            modes2 = modes1

        # First map.
        outmap_left = sp.empty((len(modes1), ) + self.map1.shape[1:])
        outmap_left = algebra.make_vect(outmap_left,
                                     axis_names=('freq', 'ra', 'dec'))
        outmap_left.copy_axis_info(self.map1)

        if defer:
            fitted = np.zeros_like(self.map1[self.freq, :, :])

        for mode_index, mode_vector in enumerate(modes1):
            mode_vector = mode_vector.reshape(self.freq.shape)

            if weighted:
                amp = sp.tensordot(mode_vector, self.map1[self.freq, :, :] *
                                self.noise_inv1[self.freq, :, :], axes=(0,0))
                amp /= sp.tensordot(mode_vector, mode_vector[:, None, None] *
                                self.noise_inv1[self.freq, :, :], axes=(0,0))
            else:
                amp = sp.tensordot(mode_vector,
                                   self.map1[self.freq, :, :], axes=(0,0))
                #amp /= sp.dot(mode_vector, mode_vector)

            if defer:
                fitted += mode_vector[:, None, None] * amp[None, :, :]
            else:
                fitted = mode_vector[:, None, None] * amp[None, :, :]
                self.map1[self.freq, :, :] -= fitted

            outmap_left[mode_index, :, :] = amp

        if defer:
            self.map1 -= fitted

        self.left_modes = outmap_left

        # Second map.
        outmap_right = sp.empty((len(modes2), ) + self.map2.shape[1:])
        outmap_right = algebra.make_vect(outmap_right,
                                     axis_names=('freq', 'ra', 'dec'))
        outmap_right.copy_axis_info(self.map2)

        if defer:
            fitted = np.zeros_like(self.map2[self.freq, :, :])

        for mode_index, mode_vector in enumerate(modes2):
            mode_vector = mode_vector.reshape(self.freq.shape)

            if weighted:
                amp = sp.tensordot(mode_vector, self.map2[self.freq, :, :] *
                                self.noise_inv2[self.freq, :, :], axes=(0,0))
                amp /= sp.tensordot(mode_vector, mode_vector[:, None, None] *
                                self.noise_inv2[self.freq, :, :], axes=(0,0))
            else:
                amp = sp.tensordot(mode_vector,
                                   self.map2[self.freq, :, :], axes=(0,0))
                #amp /= sp.dot(mode_vector, mode_vector)

            if defer:
                fitted += mode_vector[:, None, None] * amp[None, :, :]
            else:
                fitted = mode_vector[:, None, None] * amp[None, :, :]
                self.map2[self.freq, :, :] -= fitted

            outmap_right[mode_index, :, :] = amp

        if defer:
            self.map2 -= fitted

        self.right_modes = outmap_right