Exemplo n.º 1
0
    def test_qr(self):
        Q, R = snp.qr(self.tens, [0, 1, 2], [3, 4, 5])

        QR = snp.tensordot(Q, R, ([3], [0]))
        QR.__squeeze__()
        diff1 = set(self.tens.__keys__()).difference(QR.__keys__())
        diff2 = set(QR.__keys__()).difference(self.tens.__keys__())

        self.assertTrue(len(diff1) == 0)
        self.assertTrue(len(diff2) == 0)

        for k in self.tens._tensor.keys():
            self.assertTrue(
                np.linalg.norm(self.tens[k] - QR[k]) /
                utils.prod(QR[k].shape) < self.eps)

        for k in QR._tensor.keys():
            self.assertTrue(
                np.linalg.norm(self.tens[k] - QR[k]) /
                utils.prod(QR[k].shape) < self.eps)

        unit = snp.tensordot(Q, snp.conj(Q), ([0, 1, 2], [0, 1, 2]))
        for k in unit._tensor.keys():
            self.assertTrue(
                np.linalg.norm(unit[k] - np.eye(unit[k].shape[0])) /
                utils.prod(unit[k].shape) < self.eps)
Exemplo n.º 2
0
    def test_eye(self):
        ind = self.rank - 1
        eye1 = self.tens.__eye__(ind, 0)
        eye2 = self.tens.__eye__(ind, 1)
        r1 = snp.tensordot(self.tens, eye1, ([ind], [0]))
        r2 = snp.tensordot(self.tens, eye2, ([ind], [1]))

        diff1 = set(r1.__keys__()).difference(self.tens.__keys__())
        diff2 = set(self.tens.__keys__()).difference(r1.__keys__())
        self.assertTrue(len(diff1) == 0)
        self.assertTrue(len(diff2) == 0)

        diff1 = set(r2.__keys__()).difference(self.tens.__keys__())
        diff2 = set(self.tens.__keys__()).difference(r2.__keys__())
        self.assertTrue(len(diff1) == 0)
        self.assertTrue(len(diff2) == 0)

        for k in r1._tensor.keys():
            self.assertTrue(
                np.linalg.norm(r1[k] - self.tens[k]) /
                utils.prod(self.tens[k].shape) < self.eps)
        for k in r2._tensor.keys():
            self.assertTrue(
                np.linalg.norm(r2[k] - self.tens[k]) /
                utils.prod(self.tens[k].shape) < self.eps)
Exemplo n.º 3
0
 def test_transpose(self):
     newinds = random.sample(range(self.rank), self.rank)
     transp = snp.transpose(self.tens, newinds)
     shapes = transp.__shapes__()
     for k in shapes:
         self.assertTrue(
             utils.prod(utils.flatten(tuple(shapes[k]))) == utils.prod(
                 transp[k].shape))
Exemplo n.º 4
0
    def test_tensordot_merged(self):
        num = random.sample(range(self.rank - 1), 1)[0]
        inds = random.sample(range(self.rank - 1),
                             random.sample(range(1, self.rank), 1)[0])
        tens = snp.mergeSingle(self.tens, [num, num + 1])
        r = snp.tensordot(tens, tens, (inds, inds), ignore_qflow=True)

        full = tens.__tondarray__()
        rfull = np.tensordot(full, full, (inds, inds))
        self.assertTrue(np.linalg.norm(rfull - r.__tondarray__()) < 1E-10)
        shapes = r.__shapes__()
        for k in shapes:
            self.assertTrue(
                utils.prod(utils.flatten(tuple(shapes[k]))) == utils.prod(
                    r[k].shape))
Exemplo n.º 5
0
    def __random__(self, index, which=0):
        if which > 1:
            sys.exit(
                'SparseTensor.__eye__(self,index={0},which={1}): which has be to be 0 or 1.'
                .format(index, which))
        Ds = [copy.deepcopy(self._Ds[index]), copy.deepcopy(self._Ds[index])]
        if which == 0:
            qflow = tuple(
                [utils.flipsigns(self._qflow[index]), self._qflow[index]])
        elif which == 1:
            qflow = tuple(
                [self._qflow[index],
                 utils.flipsigns(self._qflow[index])])

        mergelevel = tuple([self._mergelevel[index], self._mergelevel[index]])
        keytoq = tuple([self._ktoq[index], self._ktoq[index]])
        iden = SparseTensor(keys=[],
                            values=[],
                            Ds=Ds,
                            qflow=qflow,
                            keytoq=keytoq,
                            mergelevel=mergelevel,
                            dtype=self._dtype)
        for k in self._Ds[index].keys():
            key = tuple([k, k])
            temp = self._Ds[index][k]
            shape = tuple([temp, temp])
            size = utils.prod(utils.flatten(temp))
            iden.__insertmergedblock__(
                key, shape,
                np.random.rand(size, size).astype(self._dtype))

        return iden
Exemplo n.º 6
0
def vectorize(tensor):
    #determine the neccessary size:
    shapes={}
    if (len(tensor._shapes)==0):
        tensor.__shapes__()
    size=0
    for k in sorted(tensor.__keys__()):
        shapes[k]=(size,np.shape(tensor[k]))
        size+=utils.prod(tensor[k].shape)
    vec=np.zeros(size,dtype=tensor._dtype)
    for k in sorted(shapes.keys()):
        start=shapes[k][0]
        length=utils.prod(shapes[k][1])
        end=start+length
        vec[start:end]=np.reshape(tensor[k],length)
    return vec,list([shapes,tensor._qflow,tensor._mergelevel,tensor._shapes,tensor._ktoq])
Exemplo n.º 7
0
def splitSingle(tensor,index):
    if tensor._mergelevel[index]=='A':
        return tensor.__copy__()
    if tensor._mergelevel[index]!='A':
        #check if combkey really is a combined key, i.e. it has to be a tuple()
        if(tensor._mergelevel[index]=='A'):
            raise TensorKeyError
        Ds=[]
        for n in range(tensor._rank+len(tensor._mergelevel[index])-1):
            Ds.append(dict())

        #define the quantum number flow for the split tensor
        qflow=tuple([])
        mergelevel=tuple([])
        keytoq=tuple([])
        for n in range(0,index):
            qflow+=tuple([tensor._qflow[n]])
            mergelevel+=tuple([tensor._mergelevel[n]])
            keytoq+=tuple([tensor._ktoq[n]])            
        for n in range(len(tensor._mergelevel[index])):
            qflow+=tuple([tensor._qflow[index][n]])
            mergelevel+=tuple([tensor._mergelevel[index][n]])
            keytoq+=tuple([tensor._ktoq[index][n]])
            
        for n in range(index+1,tensor._rank):
            qflow+=tuple([tensor._qflow[n]])
            mergelevel+=tuple([tensor._mergelevel[n]])
            keytoq+=tuple([tensor._ktoq[n]])            

        result=spt.SparseTensor(keys=[],values=[],Ds=Ds,qflow=qflow,keytoq=keytoq,mergelevel=mergelevel,dtype=tensor._dtype)

        for key in tensor.__keys__():
            #for each key k, the entry at "index" is now split up into its constituents
            newkey=tuple([])
            for n in range(0,index):
                newkey+=tuple([key[n]])
            for n in range(len(key[index])):
                newkey+=tuple([key[index][n]])
            for n in range(index+1,tensor._rank):
                newkey+=tuple([key[n]])

            
            oldshape=tensor[key].shape
            oldshape_combined=tensor._Ds[index][key[index]]
            newshape=tuple([])
            newshape_combined=tuple([])            
            for n in range(index):
                newshape+=tuple([oldshape[n]])
                newshape_combined+=tuple([tensor._Ds[n][key[n]]])

            newshape_combined+=oldshape_combined            
            for n in range(len(oldshape_combined)):
                newshape+=tuple([utils.prod(utils.flatten(oldshape_combined[n]))])

            for n in range(index+1,len(oldshape)):
                newshape+=tuple([oldshape[n]])
                newshape_combined+=tuple([tensor._Ds[n][key[n]]])                
            result.__insertmergedblock__(newkey,newshape_combined,np.reshape(tensor[key],newshape))
        return result
Exemplo n.º 8
0
    def __fromndarray__(self, array):
        sizes = tuple([])
        blocks = [dict() for n in range(self._rank)]
        for n in range(self._rank):
            s = 0
            start = 0
            for k in sorted(self._Ds[n].keys()):
                blocks[n][k] = tuple(
                    [start, start + utils.prod(utils.flatten(self._Ds[n][k]))])
                start += utils.prod(utils.flatten(self._Ds[n][k]))
                s += utils.prod(utils.flatten(self._Ds[n][k]))
            sizes += tuple([s])

        for k in self.__keys__():
            b = tuple([])
            for n in range(self._rank):
                b += tuple([slice(blocks[n][k[n]][0], blocks[n][k[n]][1], 1)])
            self._tensor[k] = full[b]
Exemplo n.º 9
0
 def __squeeze__(self, thresh=1E-14):
     toberemoved = list()
     for k in self._tensor.keys():
         if np.linalg.norm(self[k]) / utils.prod(
                 utils.flatten(self[k].shape)) < thresh:
             toberemoved.append(k)
     for k in toberemoved:
         self.__remove__(k)
     return self
Exemplo n.º 10
0
    def test_qr_merged(self):
        merged = snp.merge(self.tens, [0, 1], [4, 5])
        Q, R = snp.qr(merged, [0, 1], [2, 3])
        #print
        #print 'merged:',
        #print 'qflow:',merged._qflow
        #print 'mergelevel:',merged._mergelevel
        #print 'charge:',merged.__charge__().values()
        #
        #print 'Q:'
        #print 'qflow:',Q._qflow
        #print 'mergelevel:',Q._mergelevel
        #print 'charge:',sorted(Q.__charge__().values())
        #print
        #
        #print 'R:'
        #print 'qflow:',R._qflow
        #print 'mergelevel:',R._mergelevel
        #print 'charge:',sorted(R.__charge__().values())
        #print

        QR = snp.tensordot(Q, R, ([2], [0]))
        QR.__squeeze__()
        diff1 = set(merged.__keys__()).difference(QR.__keys__())
        diff2 = set(QR.__keys__()).difference(merged.__keys__())

        self.assertTrue(len(diff1) == 0)
        self.assertTrue(len(diff2) == 0)

        for k in merged._tensor.keys():
            self.assertTrue(
                np.linalg.norm(merged[k] - QR[k]) /
                utils.prod(QR[k].shape) < self.eps)

        for k in QR._tensor.keys():
            self.assertTrue(
                np.linalg.norm(merged[k] - QR[k]) /
                utils.prod(QR[k].shape) < self.eps)

        unit = snp.tensordot(Q, snp.conj(Q), ([0, 1], [0, 1]))
        for k in unit._tensor.keys():
            self.assertTrue(
                np.linalg.norm(unit[k] - np.eye(unit[k].shape[0])) /
                utils.prod(unit[k].shape) < self.eps)
Exemplo n.º 11
0
    def test_tensordot_merged_2(self):
        N = 5
        rank = 8
        keys = []
        outind = random.sample(range(rank), 1)[0]

        for n in range(N):
            key = random.sample(range(20), rank - 1)
            key.insert(outind, sum(key))
            keys.append(tuple(key))

        Ds = [dict() for n in range(rank)]
        for n in range(rank):
            for k in keys:
                Ds[n][k[n]] = random.sample(range(2, 8), 1)[0]

        values = []
        for k in keys:
            size = tuple([])
            for n in range(rank):
                size += tuple([Ds[n][k[n]]])
            values.append(np.random.rand(*size))

        l = [1] * rank
        l[outind] = -1
        qflow = tuple(l)
        tens = spt.SparseTensor(keys=keys,
                                values=values,
                                Ds=Ds,
                                qflow=qflow,
                                mergelevel=None,
                                dtype=self.tens._dtype)
        eps = 1E-12

        tens1 = snp.merge(tens, [0, 1], [3, 4])
        tens2 = snp.merge(tens1, [0, 1], [3, 4, 5])
        inds = [1, 2]
        r = snp.tensordot(tens2, tens2, (inds, inds), ignore_qflow=True)
        shapes = r.__shapes__()
        for k in shapes:
            self.assertTrue(
                utils.prod(utils.flatten(tuple(shapes[k]))) == utils.prod(
                    r[k].shape))
Exemplo n.º 12
0
def eye(D,qflow,mergelevel,dtype=float):
    Ds=[D.copy(),D.copy()]
    iden=spt.SparseTensor(keys=[],values=[],Ds=Ds,qflow=qflow,mergelevel=mergelevel,dtype=dtype)
    for k in D.keys():
        key=tuple([k,k])
        shape=tuple([D[k],D[k]])
        size=utils.prod(utils.flatten(D[k]))
        iden.__insertmergedblock__(key,shape,np.eye(size).astype(dtype))

    return iden
Exemplo n.º 13
0
    def __tondarray__(self):
        sizes = tuple([])
        blocks = [dict() for n in range(self._rank)]
        for n in range(self._rank):
            s = 0
            start = 0
            for k in sorted(self._Ds[n].keys()):
                blocks[n][k] = tuple(
                    [start, start + utils.prod(utils.flatten(self._Ds[n][k]))])
                start += utils.prod(utils.flatten(self._Ds[n][k]))
                s += utils.prod(utils.flatten(self._Ds[n][k]))
            sizes += tuple([s])

        full = np.zeros(sizes, dtype=self._dtype)
        for k in self.__keys__():
            b = tuple([])
            for n in range(self._rank):
                b += tuple([slice(blocks[n][k[n]][0], blocks[n][k[n]][1], 1)])
            full[b] = self[k]
        return full
Exemplo n.º 14
0
    def __setitem__(self, key, value):
        assert (self._mergelevel == tuple(['A'] * self._rank))
        for n in range(self._rank):
            if key[n] in self._Ds[n]:
                try:
                    assert (utils.prod(utils.flatten(
                        self._Ds[n][key[n]])) == value.shape[n])
                except AssertionError:
                    print(n, 'key=', key, 'dim of k[', n, ']=',
                          utils.prod(utils.flatten(self._Ds[n][key[n]])),
                          'value.shape: ', value.shape)
                    sys.exit(
                        'SparseTensor.__setitem__(key,value): value.shape not consistent with existing blocks'
                    )
            elif key[n] not in self._Ds[n]:
                self._Ds[n][key[n]] = value.shape[n]

        #self._tensor[key]=np.copy(value)
        self._tensor[key] = value
        self._shapes[key] = value.shape
        self.__addkey__(key)
Exemplo n.º 15
0
def buildTensor(tensor):
    warnings.warn("buildTensor is deprecated; use SparseTensor.__tondarray__() instead",stacklevel=2)
    sizes=tuple([])
    
    blocks=[dict() for n in range(tensor._rank)]
    for n in range(tensor._rank):
        s=0
        start=0
        for k in sorted(tensor._Ds[n].keys()):
            blocks[n][k]=tuple([start,start+utils.prod(utils.flatten(tensor._Ds[n][k]))])
            start+=utils.prod(utils.flatten(tensor._Ds[n][k]))
            s+=utils.prod(utils.flatten(tensor._Ds[n][k]))
        sizes+=tuple([s])

    full=np.zeros(sizes,dtype=tensor._dtype)
    for k in tensor.__keys__():
        b=tuple([])
        for n in range(tensor._rank):
            b+=tuple([slice(blocks[n][k[n]][0],blocks[n][k[n]][1],1)])
        full[b]=tensor[k]
    return full
Exemplo n.º 16
0
    def test_svd_merged(self):
        merged = snp.merge(self.tens, [0, 1], [4, 5])
        U, S, V = snp.svd(merged, [0, 1], [2, 3])

        Z = S.__norm__()
        merged /= Z
        S /= Z
        S.__squeeze__()

        US = snp.tensordot(U, S, ([2], [0]))
        USV = snp.tensordot(US, V, ([2], [0]))
        USV.__squeeze__()
        diff1 = set(merged._tensor.keys()).difference(USV._tensor.keys())
        diff2 = set(USV._tensor.keys()).difference(merged._tensor.keys())
        self.assertTrue(len(diff1) == 0)
        self.assertTrue(len(diff2) == 0)

        for k in merged._tensor.keys():
            self.assertTrue(
                np.linalg.norm(merged[k] - USV[k]) /
                utils.prod(USV[k].shape) < self.eps)

        for k in USV._tensor.keys():
            self.assertTrue(
                np.linalg.norm(merged[k] - USV[k]) /
                utils.prod(USV[k].shape) < self.eps)

        unitU = snp.tensordot(U, snp.conj(U), ([0, 1], [0, 1]))
        for k in unitU._tensor.keys():
            self.assertTrue(
                np.linalg.norm(unitU[k] - np.eye(unitU[k].shape[0])) /
                utils.prod(unitU[k].shape) < self.eps)
        unitV = snp.tensordot(V, snp.conj(V), ([1, 2], [1, 2]))
        for k in unitV._tensor.keys():
            self.assertTrue(
                np.linalg.norm(unitV[k] - np.eye(unitV[k].shape[0])) /
                utils.prod(unitV[k].shape) < self.eps)
Exemplo n.º 17
0
def tensorize(unpackdata,vector):
    shapes=unpackdata[0]
    qflow=unpackdata[1]
    mergelevel=unpackdata[2]
    tensorshapes=unpackdata[3]
    keytoq=unpackdata[4]
    if vector.dtype==np.float64:
        dtype=float
    elif vector.dtype==np.complex128:
        dtype=complex
    #determine the neccessary size:        
    k=list(shapes.keys())[0]
    Ds=[dict() for n in range(len(k))]
    tensor=spt.SparseTensor(keys=[],values=[],Ds=Ds,qflow=qflow,keytoq=keytoq,mergelevel=mergelevel,dtype=dtype)
    #there's no need to sort the keys here
    for k in shapes:
        start=shapes[k][0]
        end=start+utils.prod(shapes[k][1])

        tensor.__insertmergedblock__(k,tensorshapes[k],np.reshape(vector[start:end],shapes[k][1]))
    return tensor
Exemplo n.º 18
0
 def __dim__(self, index):
     dim = 0
     for val in self._Ds[index].values():
         dim += utils.prod(utils.flatten(val))
     return dim
Exemplo n.º 19
0
def mergeSingle(tensor,inds):
    if not isinstance(inds,list):
        inds=list(inds)
    if len(inds)==1:
        return tensor
    try: 
        if len(inds)>tensor._rank:
            raise TensorSizeError
        assert((inds==list(range(inds[0],inds[-1]+1))) or (inds==list(range(inds[0],inds[-1]-1,-1))))

        if (inds==list(range(inds[0],inds[-1]+1))):
            Ds=[]
            qflow=tuple([])
            keytoq=tuple([])
            mergelevel=tuple([])
            for n in range(inds[0]):
                mergelevel+=tuple([tensor._mergelevel[n]])
                qflow+=tuple([tensor._qflow[n]])
                keytoq+=tuple([tensor._ktoq[n]])
                Ds.append(dict())
                
            Ds.append(dict())
            locqflow=tuple([])
            lockeytoq=tuple([])
            tempmergelevel=tuple([])
            for n in inds:
                tempmergelevel+=tuple([tensor._mergelevel[n]])                
                locqflow+=tuple([tensor._qflow[n]])
                lockeytoq+=tuple([tensor._ktoq[n]])

            mergelevel+=tuple([tempmergelevel])                
            qflow+=tuple([locqflow])
            keytoq+=tuple([lockeytoq])
            for n in range(inds[-1]+1,tensor._rank):
                mergelevel+=tuple([tensor._mergelevel[n]])                
                qflow+=tuple([tensor._qflow[n]])
                keytoq+=tuple([tensor._ktoq[n]])
                Ds.append(dict())
                

                
            result=spt.SparseTensor([],[],Ds,qflow,keytoq=keytoq,mergelevel=mergelevel,dtype=tensor._dtype)

            for k in tensor.__keys__():
                newkey=tuple([])
                for n in range(0,inds[0]):
                    newkey+=tuple([k[n]])
                tempkey=tuple([])
                for m in inds:
                    tempkey+=tuple([k[m]])
                newkey+=tuple([tempkey])
                for n in range(inds[-1]+1,tensor._rank):
                    newkey+=tuple([k[n]])
                
                oldshape=tensor[k].shape
                combshape=tuple([])
                for n in inds:
                    combshape+=tuple([tensor._Ds[n][k[n]]])

                newshape=tuple(oldshape[0:inds[0]])+tuple([utils.prod(utils.flatten(combshape))])+tuple(oldshape[inds[-1]+1::])
                result._tensor[newkey]=np.reshape(tensor[k],newshape)
                result.__addkey__(newkey)
                for n in range(inds[0]):
                    result._Ds[n]=dict(tensor._Ds[n])
                result._Ds[inds[0]][newkey[inds[0]]]=combshape
                for n in range(inds[-1]+1,len(oldshape)):
                    result._Ds[inds[0]+n-inds[-1]]=dict(tensor._Ds[n])

            #this refreshes the ._shapes member of result, a dict() containing key-shape (both are nested tuples) pairs for each block that has key as keyvalue.
            result.__shapes__()
            return result


        if (inds==list(range(inds[0],inds[-1]-1,-1))):
            sys.exit('sparsenumpy.mergeSingle(tensor,indices): indices is not a list of consecutive, increasing numbers')
    except AssertionError:
        print ('ERROR in sparsennumpy.merge: inds are not consecutive numbers')
    except TensorSizeError:
        print ('ERROR in sparsenumpy.merge: rank of tensor is smaller than the number of indices to be merged (tensor._rank<len(inds))')
        sys.exit()
Exemplo n.º 20
0
def qr(tensor,indices1,indices2,defflow=1):
    inds=sorted(indices1)+sorted(indices2)
    assert(sorted(inds)==list(range(tensor._rank)))
    #transpose the indices of tensor such that  indices1 are the the first (in ascending order) and indices2 are the second (in ascendin
    matrix=merge(tensor,indices1,indices2)
    assert(len(matrix._Ds)==2)

    #both indices are merged ones
    #matrix is now split apart using svd
    #we randomly assign the columns of the resulting matrix inflow character, and the rows outflow character. The total flow has to sum up to zero
    #if the tensor has the correct symmetry.
    #first, according to the chosen combination, find all blocks that have the same total quantum number inflow (and outflow):
    #qndict has as keys the total inflow of U(1) charge; as value, it has a list of tuples (k,sizes) of the keys of all blocks with this U(1) charge inflow and their size
    qndict=dict()
    usedict=(tensor._ktoq!=(list([None])*tensor._rank))

    flatktoq=utils.flatten(matrix._ktoq[0])
    flatflowx=utils.flatten(matrix._qflow[0])
    flatflowy=utils.flatten(matrix._qflow[1])
    
    #if all x-flows are inflowing, the resulting new index has to be outflowing to convserve the charge
    #(vice versa for all-outflowing x)

    if (list(map(np.sign,flatflowx))==([1]*len(flatflowx))) or (list(map(np.sign,flatflowx))==([-1]*len(flatflowx))):
        #print 'x-keys have the same flow direction'
        netflowx=flatflowx[0]
        #computeflowfromq=False
    elif (list(map(np.sign,flatflowy))==([1]*len(flatflowy))) or (list(map(np.sign,flatflowy))==([-1]*len(flatflowy))):
        #print 'y-keys have the same flow direction'            
        netflowx=-flatflowy[0]
        #computeflowfromq=False
    else:
        warnings.warn("in sparsenumpy.qr: flow-structure of merged tensor is non-uniform in the x-component. Flow direction of the new bond cannot be unambigously labelled; using the default value {0}.".format(defflow),stacklevel=2)
        netflowx=defflow
        
    for k in matrix.__keys__():
        #note: matrix has combined keys as row and column keys; they have to be utils.flattened to obtain the total U(1) charge.
        #kflat=[utils.flatten(utils.tupsignmult(k[0],matrix._qflow[0])),utils.flatten(utils.tupsignmult(k[1],matrix._qflow[1]))]
        if not usedict:
            kflat=utils.tupsignmult(utils.flatten(k[0]),utils.flatten(matrix._qflow[0]))
            #sum has to be used in a bit an awkward fashion; the second argument is the first element of the sequence to be added
            q=sum(kflat[1::],kflat[0])
        elif usedict:
            flatkey=utils.flatten(k[0])
            #sum has to be used in a bit an awkward fashion; the second argument is the first element of the sequence to be added
            if flatktoq[0]==None:
                q=flatkey[0]*flatflowx[0]
            else:
                q=flatktoq[0][flatkey[0]]*flatflowx[0]
                
            for n in range(1,len(flatkey)):

                if flatktoq[n]==None:
                    q+=flatkey[n]*flatflowx[n]
                else:
                    q+=flatktoq[n][flatkey[n]]*flatflowx[n]


        #each leg of the matrix has in this case incoming and outgoing sub-indices; the new central index can then be randomly assigned a
        #flow direction. The covention I use is that if the total charge is positive, then the flow direction is positive as well
        if q not in qndict:
            qndict[q]=[(k,matrix[k].shape)]
        elif q in qndict:
            qndict[q].append((k,matrix[k].shape))


    #in the case that there is only one entry in the matrix, and it has q=0, then the flowdirection of the x-leg of the matrix is chosen to be positive
    #if computeflowfromq==True:
    #    netflowx=1
    #    computeflowfromq=False
    
    #for each key-value pair in qndict, we now can do a seperate svd (this could be parallelized).
    #for a given key "q" below we have a set of N different matrices that together form a large matrix
    #which can be sv-decomposed. We now have to build this huge matrix. First, we go through the list qndict[q]
    #and find all different x and y keys:
    #the total charge flow for every block in matrix has to be the same; take the first block to check what it is:

    QDs=[dict() for n in range(2)]
    RDs=[dict() for n in range(2)]

    Qqflow=tuple([matrix._qflow[0],-netflowx])
    Rqflow=tuple([netflowx,matrix._qflow[1]])
    
    mergelevelQ=tuple([matrix._mergelevel[0],'A'])
    mergelevelR=tuple(['A',matrix._mergelevel[1]])


    keytoqQ=tuple([matrix._ktoq[0],None])
    keytoqR=tuple([None,matrix._ktoq[1]])        
    
    #the flow-sign of the new index depends on wether kflat is positive or negative. If kflat is positive, it means the the net charge inflow i on the x-index of the tensor is
    #positive, and hence the new index has to have outflow character (and vice versa for a net charge outflow the on x-index).
    Q=spt.SparseTensor(keys=[],values=[],Ds=QDs,qflow=Qqflow,keytoq=keytoqQ,mergelevel=mergelevelQ,dtype=tensor._dtype)
    R=spt.SparseTensor(keys=[],values=[],Ds=RDs,qflow=Rqflow,keytoq=keytoqR,mergelevel=mergelevelR,dtype=tensor._dtype)
    for q in qndict:
        startx=0
        starty=0
        xkeys=dict()
        ykeys=dict()
        for k in qndict[q]:
            #note: k[0] is a tuple (a,b), where a is the x-key and b the y-key of a matrix-block with total charge q
            #      k[1] is the shape of this block
            #k[0][0] contains the x-key of the block. this key can be a single key-object, or a tuple of key-objects (a nested tuple, i.e. a merged index).
            #in the latter case, the nested structure has to be transferred to the sv-decomposed matrices, U,S,V.
            #the same goes for ykeys
            if k[0][0] not in xkeys:
                xkeys[k[0][0]]=(startx,startx+k[1][0])
                startx+=k[1][0]
            elif k[0][0] in xkeys:
                #this is a consistency check and could in principle be removed
                assert((xkeys[k[0][0]][1]-xkeys[k[0][0]][0])==k[1][0])
                
            if k[0][1] not in ykeys:
                ykeys[k[0][1]]=(starty,starty+k[1][1])
                starty+=k[1][1]
            elif k[0][1] in ykeys:
                #this is a consistency check and could in principle be removed
                assert((ykeys[k[0][1]][1]-ykeys[k[0][1]][0])==k[1][1])
                
        #now we have the total size, and we can build the matrix
        mat=np.zeros((startx,starty)).astype(tensor._dtype)

        #fill in the blocks
        for k in qndict[q]:
            mat[xkeys[k[0][0]][0]:xkeys[k[0][0]][1],ykeys[k[0][1]][0]:ykeys[k[0][1]][1]]=matrix[k[0]]

        #now for the svd:
        blockq,blockr=np.linalg.qr(mat)
        Dc=blockq.shape[1]

        for xkey in xkeys:
            Qkey=tuple([xkey,q*netflowx])
            #Qkey=tuple([xkey,abs(q)])            
            shape=tuple([matrix._Ds[0][xkey],Dc])
            assert(utils.prod(utils.flatten(shape))==utils.prod(blockq[slice(xkeys[xkey][0],xkeys[xkey][1],1),:].shape))            
            Q.__insertmergedblock__(Qkey,shape,blockq[slice(xkeys[xkey][0],xkeys[xkey][1],1),:])
        for ykey in ykeys:            
            #Rkey=tuple([abs(q),ykey])
            Rkey=tuple([q*netflowx,ykey])            
            shape=tuple([Dc,matrix._Ds[1][ykey]])
            assert(utils.prod(utils.flatten(shape))==utils.prod(blockr[:,slice(ykeys[ykey][0],ykeys[ykey][1],1)].shape))
            R.__insertmergedblock__(Rkey,shape,blockr[:,slice(ykeys[ykey][0],ykeys[ykey][1],1)])
    Qsplit=splitSingle(Q,0)
    Rsplit=splitSingle(R,1)    
    return Qsplit,Rsplit