def __init__(self, *args, **kwargs): """ use the optional key-word arg 'transform' to supply a 2x2 transform-matrix """ try: transform = kwargs.pop('transform') except KeyError: transform = Matrix.eye(2) if isinstance(transform, GridHelperCurveLinear): assert 'Itransform' not in kwargs, ( 'no Itransform when transform is a %s' % type(transform)) grid_helper = transform elif callable(transform): grid_helper = GridHelperCurveLinear( [transform, kwargs.pop('Itransform')]) else: transform = Matrix(transform) try: Itransform = kwargs.pop('Itransform') except KeyError: Itransform = transform**(-1) grid_helper = GridHelperCurveLinear([ self.makeTransform(transform), self.makeTransform(Itransform) ]) kwargs['grid_helper'] = grid_helper mpl_toolkits.axisartist.Axes.__init__(self, *args, **kwargs)
def _subSquare(vectors, var, full=False): """ given a series of vectors, this function calculates: (variances,vectors)=numpy.linalg.eigh(vectors.H*vectors) it's a seperate function because if there are less vectors than dimensions the process can be accelerated, it just takes some dancing it is based on this: >>> vectors=Matrix(helpers.ascomplex(numpy.random.randn( ... numpy.random.randint(1,10),numpy.random.randint(1,10),2 ... ))) >>> cov = vectors.H*vectors >>> Xcov = vectors*vectors.H >>> (Xval,Xvec) = numpy.linalg.eigh(Xcov) >>> vec = Xvec.H*vectors >>> assert vec.H*vec == cov """ vectors = Matrix(vectors) shape = vectors.shape if not all(shape): val = numpy.zeros([0]) vec = numpy.zeros([0, shape[1]]) return (val, vec) eig = numpy.linalg.eigh if shape[0] >= shape[1] or full or not vectors.any() or (var < 0).any(): scaled = Matrix(var[:, None]*numpy.array(vectors)) cov = vectors.H*scaled (val, vec) = eig(cov) vec = vec.H elif not var.any(): cov = vectors.H*vectors (_,vec) = eig(cov) vec = vec.H val = numpy.zeros(vec.shape[0]) else: scaled = Matrix(scipy.sqrt(var)[:, None]*numpy.array(vectors)) Xcov = scaled*scaled.H #Xcov = var[:,None]*numpy.array(vectors)*vectors.H (_, Xvec) = eig(Xcov) Xscaled = (Xvec.H*scaled) val = helpers.mag2(Xscaled) vec = numpy.array(Xscaled)/scipy.sqrt(val[:, numpy.newaxis]) return (val, vec)
def _subSquare(vectors, var, full=False): """ given a series of vectors, this function calculates: (variances,vectors)=numpy.linalg.eigh(vectors.H*vectors) it's a seperate function because if there are less vectors than dimensions the process can be accelerated, it just takes some dancing it is based on this: >>> vectors=Matrix(helpers.ascomplex(numpy.random.randn( ... numpy.random.randint(1,10),numpy.random.randint(1,10),2 ... ))) >>> cov = vectors.H*vectors >>> Xcov = vectors*vectors.H >>> (Xval,Xvec) = numpy.linalg.eigh(Xcov) >>> vec = Xvec.H*vectors >>> assert vec.H*vec == cov """ vectors = Matrix(vectors) shape = vectors.shape if not all(shape): val = numpy.zeros([0]) vec = numpy.zeros([0, shape[1]]) return (val, vec) eig = numpy.linalg.eigh if shape[0] >= shape[1] or full or not vectors.any() or (var < 0).any(): scaled = Matrix(var[:, None] * numpy.array(vectors)) cov = vectors.H * scaled (val, vec) = eig(cov) vec = vec.H elif not var.any(): cov = vectors.H * vectors (_, vec) = eig(cov) vec = vec.H val = numpy.zeros(vec.shape[0]) else: scaled = Matrix(scipy.sqrt(var)[:, None] * numpy.array(vectors)) Xcov = scaled * scaled.H #Xcov = var[:,None]*numpy.array(vectors)*vectors.H (_, Xvec) = eig(Xcov) Xscaled = (Xvec.H * scaled) val = helpers.mag2(Xscaled) vec = numpy.array(Xscaled) / scipy.sqrt(val[:, numpy.newaxis]) return (val, vec)
def makeObjects(flat=None, ndim=None, seed=None): if seed is None: seed = randint(1, 1e6) numpy.random.seed(seed) randn = numpy.random.randn if ndim is None: ndim = randint(0, 20) shapes = { None: lambda: max(randint(-ndim, ndim), 0), True: lambda: randint(1, ndim), False: lambda: 0, } triple = lambda x: [x, x, x] if flat in shapes: flat = [item() for item in triple(shapes[flat])] elif isinstance(flat, int): flat = triple(flat) assert all(f <= ndim for f in flat), "flatness can't be larger than ndim" rvec = lambda n=1, ndim=ndim: Matrix(randn(n, ndim)) A, B, C = [Mvn.rand([ndim - F, ndim]) for F in flat] n = randint(1, 2 * ndim) M = rvec(n).H M2 = rvec(n).H E = Matrix.eye(ndim) K1 = (numpy.random.randn()) K2 = (numpy.random.randn()) N = randint(-5, 5) return { 'ndim': ndim, 'A': A, 'B': B, 'C': C, 'M': M, 'M2': M2, 'E': E, 'K1': K1, 'K2': K2, 'N': N, }
def main(): M1=Mvn.rand(2) M2=Mvn.rand(2) data1 = M1.sample(100) data2 = M2.sample(100) M1 = Mvn.fromData(data1) M2 = Mvn.fromData(data2) M3 = Mvn.fromData([M1,M2]) data3 = Matrix.stack([[data1],[data2]]) assert M3 == Mvn.fromData(data3) A=pylab.gca() M3.plot(A,facecolor='m',minalpha=0.1,zorder = -1) M1.plot(A,facecolor='b',minalpha = 0.1,zorder = 0) M2.plot(A,facecolor='r',minalpha = 0.1,zorder = 1) pylab.scatter(data1[:,0],data1[:,1],facecolor='b',zorder = 2) pylab.scatter(data2[:,0],data2[:,1],facecolor='r',zorder = 3) pylab.show()
def newAx(fig, transform = Matrix.eye(2)): fig.clear() axgrid = GridSpec(1, 1) #get axes ax = pylab.subplot( axgrid[:, :], projection = 'custom', transform = transform, ) ax.autoscale(False) # ax.set_xticks(numpy.arange(-10., 35., 5.)) # ax.set_yticks(numpy.arange(-10., 35., 5.)) ax.set_xlim([-5, 20]) ax.set_ylim([-5, 10]) ax.xaxis.set_major_locator(MultipleLocator(5)) ax.grid('on') drawLegend(ax) return ax
def newAx(fig, transform=Matrix.eye(2)): fig.clear() axgrid = GridSpec(1, 1) #get axes ax = pylab.subplot( axgrid[:, :], projection='custom', transform=transform, ) ax.autoscale(False) # ax.set_xticks(numpy.arange(-10., 35., 5.)) # ax.set_yticks(numpy.arange(-10., 35., 5.)) ax.set_xlim([-5, 20]) ax.set_ylim([-5, 10]) ax.xaxis.set_major_locator(MultipleLocator(5)) ax.grid('on') drawLegend(ax) return ax
def __init__( self, vectors=Matrix.eye, mean=numpy.zeros, ): mean = mean if callable(mean) else numpy.array(mean).flatten()[None, :] vectors = vectors if callable(vectors) else Matrix(vectors) stack = helpers.autoshape([ [vectors], [mean], ], default=1) #unpack the stack into the object's parameters self.vectors = Matrix(numpy.real_if_close(stack[0, 0])) self.mean = Matrix(numpy.real_if_close(stack[1, 0]))
def main(): M1 = Mvn.rand(2) M2 = Mvn.rand(2) data1 = M1.sample(100) data2 = M2.sample(100) M1 = Mvn.fromData(data1) M2 = Mvn.fromData(data2) M3 = Mvn.fromData([M1, M2]) data3 = Matrix.stack([[data1], [data2]]) assert M3 == Mvn.fromData(data3) A = pylab.gca() M3.plot(A, facecolor='m', minalpha=0.1, zorder=-1) M1.plot(A, facecolor='b', minalpha=0.1, zorder=0) M2.plot(A, facecolor='r', minalpha=0.1, zorder=1) pylab.scatter(data1[:, 0], data1[:, 1], facecolor='b', zorder=2) pylab.scatter(data2[:, 0], data2[:, 1], facecolor='r', zorder=3) pylab.show()
def main(): #generate data N = 75 red = Mvn.rand(2) blue = Matrix(red.sample(N)) red = Mvn.fromData(blue) #create figure fig = pylab.figure(1, figsize=(7, 7)) fig.suptitle('Mahalabois Distance') ax = setupAxes(red.transform(-1)) # scatter plot of the origional data scatter(ax[0, 0], red, blue) # scatter plot of the normalized data scatter(ax[1, 0], red / red, blue / red) # draw the cumulative distribution cumulative(ax[0, 1], red, blue) # draw the histogram hist(red.mah().pdf, red.mah(blue), ax[1, 1]) ax[0, 1].set_xlim([0, None]) ax[1, 1].set_ylim([0, None]) pylab.show()
def reverseTransform(center, M, x, y): center = center.squeeze() x = Matrix(x) y = Matrix(y) xy = numpy.hstack([x.T, y.T]) xy = xy - center[None, :] xy = numpy.array(xy * M) mags = (numpy.array(xy)**2).sum(1)[:, None]**0.5 dirs = xy / mags xy = dirs * mags**0.5 + center return xy[:, 0].squeeze(), xy[:, 1].squeeze()
def makeObjects(flat=None, ndim=None, seed=None): if seed is None: seed = randint(1, 1e6) numpy.random.seed(seed) randn = numpy.random.randn if ndim is None: ndim = randint(0, 20) shapes = { None:lambda :max(randint(-ndim, ndim), 0), True:lambda :randint(1, ndim), False:lambda :0, } triple = lambda x:[x, x, x] if flat in shapes: flat = [item() for item in triple(shapes[flat])] elif isinstance(flat, int): flat = triple(flat) assert all(f <= ndim for f in flat), "flatness can't be larger than ndim" rvec = lambda n=1, ndim=ndim:Matrix(randn(n ,ndim)) A,B,C = [ Mvn.rand([ndim-F, ndim]) for F in flat ] n = randint(1, 2*ndim) M = rvec(n).H M2 = rvec(n).H E = Matrix.eye(ndim) K1 = (numpy.random.randn()) K2 = (numpy.random.randn()) N = randint(-5, 5) return { 'ndim' : ndim, 'A' : A, 'B' : B, 'C' : C, 'M' : M, 'M2' : M2, 'E' : E, 'K1' : K1, 'K2' : K2, 'N' : N, }
def __init__( self, vectors= Matrix.eye, mean= numpy.zeros, ): mean = mean if callable(mean) else numpy.array(mean).flatten()[None, :] vectors = vectors if callable(vectors) else Matrix(vectors) stack=helpers.autoshape([ [vectors], [mean ], ],default= 1) #unpack the stack into the object's parameters self.vectors = Matrix(numpy.real_if_close(stack[0, 0])) self.mean = Matrix(numpy.real_if_close(stack[1, 0]))
def __init__(self,*args,**kwargs): """ use the optional key-word arg 'transform' to supply a 2x2 transform-matrix """ try: transform = kwargs.pop('transform') except KeyError: transform = Matrix.eye(2) if isinstance(transform,GridHelperCurveLinear): assert 'Itransform' not in kwargs,( 'no Itransform when transform is a %s' % type(transform) ) grid_helper = transform elif callable(transform): grid_helper = GridHelperCurveLinear([ transform, kwargs.pop('Itransform') ]) else: transform = Matrix(transform) try: Itransform = kwargs.pop('Itransform') except KeyError: Itransform = transform**(-1) grid_helper = GridHelperCurveLinear([ self.makeTransform(transform), self.makeTransform(Itransform) ]) kwargs['grid_helper']=grid_helper mpl_toolkits.axisartist.Axes.__init__(self,*args,**kwargs)
def makeTransform(self, M, x, y): x = Matrix(x) y = Matrix(y) xy = numpy.hstack([x.T, y.T]) xy = xy * M return xy[:, 0].squeeze(), xy[:, 1].squeeze()
class Plane(object): """ plane class, meant to (eventually) factor out some code, and utility from the Mvn class """ rtol = 1e-5 """ relative tolerence see :py:func:`mvn.helpers.approx` """ atol = 1e-8 """ absolute tolerence see :py:func:`mvn.helpers.approx` """ def __init__( self, vectors=Matrix.eye, mean=numpy.zeros, ): mean = mean if callable(mean) else numpy.array(mean).flatten()[None, :] vectors = vectors if callable(vectors) else Matrix(vectors) stack = helpers.autoshape([ [vectors], [mean], ], default=1) #unpack the stack into the object's parameters self.vectors = Matrix(numpy.real_if_close(stack[0, 0])) self.mean = Matrix(numpy.real_if_close(stack[1, 0])) def __repr__(self): """ print self """ return '\n'.join([ '%s(' % self.__class__.__name__, ' mean=', (' %r,' % self.mean).replace('\n', '\n' + 8 * ' '), ' vectors=', (' %r' % self.vectors).replace('\n', '\n' + 8 * ' '), ')', ]) __str__ = __repr__ def __getitem__(self, index): """ project the plane into the selected dimensions """ assert not isinstance(index, tuple), '1-dimensional index only' return type(self)( mean=self.mean[:, index], vectors=self.vectors[:, index], ) copy = decorate.automath.Automath.__dict__['copy'] @property def shape(self): """ get the shape of the vectors,the first element is the number of vectors, the second is their lengths: the number of dimensions of the space they are embedded in >>> assert A.vectors.shape == A.shape >>> assert (A.vectors.shape[0],A.mean.size)==A.shape >>> assert A.shape[0]==A.rank >>> assert A.shape[1]==A.ndim """ return self.vectors.shape @property def rank(self): """ get the number of dimensions of the space covered by the mvn >>> assert A.rank == A.vectors.shape[0] """ return self.vectors.shape[0] @property def ndim(self): """ get the number of dimensions of the space the mvn exists in >>> assert A.ndim==A.mean.size==A.mean.shape[1] >>> assert A.ndim==A.vectors.shape[1] """ return self.mean.size @property def flat(self): """ >>> assert bool(A.flat) == bool(A.vectors.shape[1] > A.vectors.shape[0]) """ return max(self.vectors.shape[1] - self.vectors.shape[0], 0) def __nonzero__(self): """ True if not empty >>> assert A >>> assert bool(A) == bool(A.ndim) >>> assert not A[:0] """ return bool(self.ndim) @decorate.MultiMethod def __add__(self, other): """ add two planes together """ raise TypeError("No Apropriate Method Found") @__add__.register(Plane) def __add__(self, other): result = self.copy() result.mean = result.mean + other return result @__add__.register(Plane, Plane) def __add__(self, other): return Plane(mean=self.mean + other.mean, vectors=numpy.vstack([self.vectors, other.vectors])) def approx(self, *args): return helpers.approx(*args, atol=self.atol, rtol=self.rtol) def __and__(self, other): """ plane intersection """ Nself = self.vectors.null() Nother = other.vectors.null() #and stack them null = numpy.vstack([ Nself, Nother, ]) mean = numpy.hstack([self.mean, other.mean]) #get length of the component of the means along each null vector r = numpy.vstack([Nself * self.mean.H, Nother * other.mean.H]) mean = (numpy.linalg.pinv(null, 1e-6) * r).H return type(self)(vectors=null.null(), mean=mean)
def square(vectors, var=None, full=False): """ calculates the eigen-vectors and eigen-values of the covariance matrix that would be produced by multiplying out A.var*numpy.array(A.vectors.H)*A.vectors without necessarily calculating the covariance matrix itself. It is also setup to handle vectors with infinite variances. origionally the idea came from these two line on wikipedia: http://en.wikipedia.org/wiki/Square_root_of_a_matrix: '''if T = A*A.H = B*B.H, then there exists a unitary U s.t. A = B*U''' http://en.wikipedia.org/wiki/Unitary_matrix '''In mathematics, a unitary matrix is an nxn complex matrix U satisfying the condition U.H*U = I, U*U.H = I''' ********************************* A better description for all this is the compact singular value decomposition. http://en.wikipedia.org/wiki/Singular_value_decomposition#Compact_SVD but here I only need one of the two sets of vectors, so I actually calculate the smaller of the two possible covariance marixes and, and then it's eigen-stuff. """ if var is None: var = numpy.ones(vectors.shape[0]) finite = numpy.isfinite(var) & numpy.isfinite(vectors.asarray()).all(1) infinite = ~finite Ivar = numpy.array([]) Ivectors = Matrix(numpy.zeros((0, vectors.shape[1]))) if infinite.any(): #square up the infinite vectors #Ivar is unused (Ivar, Ivectors) = _subSquare(vectors=vectors[infinite, :], var=numpy.ones_like(var[infinite]), full=True) #take the finite variances and vectors var = var[~infinite] vectors = vectors[~infinite, :] small = helpers.approx(Ivar) Ivar = Ivar[~small] SIvectors = Ivectors[~small, :] if vectors.any(): #revove the component parallel to each infinite vector vectors = vectors - vectors * SIvectors.H * SIvectors elif var.size: num = helpers.approx(var).sum() #gab the extra vectors here, because if the vectors are all zeros eig will fail vectors = Ivectors[small, :] vectors = vectors[:num, :] Ivectors = SIvectors if var.size: (var, vectors) = _subSquare(vectors, var) if Ivar.size and var.size: #sort the finite variances order = numpy.argsort(abs(var)) var = var[order] vectors = vectors[order, :] #if there are more vectors than dimensions kill = var.size + Ivar.size - vectors.shape[1] if kill > 0: #squeeze the vectors with the smallest variances var = var[kill:] vectors = vectors[kill:, :] return (numpy.concatenate((var, numpy.inf * numpy.ones_like(Ivar))), numpy.vstack([vectors, Ivectors]))
#create figure fig = pylab.figure(figsize=(6, 6)) ## kalman filter parameters #the actual, hidden state actual = numpy.array([[0, 5]]) #the sensor sensor = Mvn(vectors=[[1, 0], [0, 1]], var=[1, numpy.inf]) #the system noise noise = Mvn(vectors=[[1, 0], [0, 1]], var=numpy.array([0.5, 1])**2) #the shear transform to move the system forward transform = Matrix([[1, 0], [0.5, 1]]) filtered = sensor.measure(actual) ## initial plot ax = newAx(fig) #plot the initial actual position ax.plot(actual[:, 0], actual[:, 1], **actualParams) ax.set_title('Kalman Filtering: Start') pylab.xlabel('Position') pylab.ylabel('Velocity') P.publish(fig) #measure the actual position, and plot the measurment
class Plane(object): """ plane class, meant to (eventually) factor out some code, and utility from the Mvn class """ rtol = 1e-5 """ relative tolerence see :py:func:`mvn.helpers.approx` """ atol = 1e-8 """ absolute tolerence see :py:func:`mvn.helpers.approx` """ def __init__( self, vectors= Matrix.eye, mean= numpy.zeros, ): mean = mean if callable(mean) else numpy.array(mean).flatten()[None, :] vectors = vectors if callable(vectors) else Matrix(vectors) stack=helpers.autoshape([ [vectors], [mean ], ],default= 1) #unpack the stack into the object's parameters self.vectors = Matrix(numpy.real_if_close(stack[0, 0])) self.mean = Matrix(numpy.real_if_close(stack[1, 0])) def __repr__(self): """ print self """ return '\n'.join([ '%s(' % self.__class__.__name__, ' mean=', (' %r,' % self.mean).replace('\n', '\n'+8*' '), ' vectors=', (' %r' % self.vectors).replace('\n', '\n'+8*' '), ')', ]) __str__ = __repr__ def __getitem__(self, index): """ project the plane into the selected dimensions """ assert not isinstance(index, tuple),'1-dimensional index only' return type(self)( mean= self.mean[:, index], vectors= self.vectors[:, index], ) copy = decorate.automath.Automath.__dict__['copy'] @property def shape(self): """ get the shape of the vectors,the first element is the number of vectors, the second is their lengths: the number of dimensions of the space they are embedded in >>> assert A.vectors.shape == A.shape >>> assert (A.vectors.shape[0],A.mean.size)==A.shape >>> assert A.shape[0]==A.rank >>> assert A.shape[1]==A.ndim """ return self.vectors.shape @property def rank(self): """ get the number of dimensions of the space covered by the mvn >>> assert A.rank == A.vectors.shape[0] """ return self.vectors.shape[0] @property def ndim(self): """ get the number of dimensions of the space the mvn exists in >>> assert A.ndim==A.mean.size==A.mean.shape[1] >>> assert A.ndim==A.vectors.shape[1] """ return self.mean.size @property def flat(self): """ >>> assert bool(A.flat) == bool(A.vectors.shape[1] > A.vectors.shape[0]) """ return max(self.vectors.shape[1] - self.vectors.shape[0], 0) def __nonzero__(self): """ True if not empty >>> assert A >>> assert bool(A) == bool(A.ndim) >>> assert not A[:0] """ return bool(self.ndim) @decorate.MultiMethod def __add__(self, other): """ add two planes together """ raise TypeError("No Apropriate Method Found") @__add__.register(Plane) def __add__(self, other): result = self.copy() result.mean = result.mean+other return result @__add__.register(Plane, Plane) def __add__(self, other): return Plane( mean = self.mean+other.mean, vectors = numpy.vstack([self.vectors, other.vectors]) ) def approx(self, *args): return helpers.approx(*args, atol = self.atol, rtol = self.rtol) def __and__(self, other): """ plane intersection """ Nself = self.vectors.null() Nother = other.vectors.null() #and stack them null = numpy.vstack([ Nself, Nother, ]) mean = numpy.hstack([self.mean, other.mean]) #get length of the component of the means along each null vector r = numpy.vstack([Nself*self.mean.H, Nother*other.mean.H]) mean = (numpy.linalg.pinv(null, 1e-6)*r).H return type(self)(vectors= null.null(), mean=mean)