Exemplo n.º 1
0
    def summed_dist_matrix(self, vectors, presorted=False):
        """ Calculates the sum of all element pair distances for each
        pair of vectors.

        If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the
        :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the
        kernel, the resulting entry in the 2D array will be :math:`D_{uv}
        = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`.

        :param sequence vectors: A sequence of Quantity 1D to calculate the
            summed distances for each pair. The required units depend on the
            kernel. Usually it will be the inverse unit of the kernel size.
        :param bool presorted: Some optimized specializations of this function
            may need sorted vectors. Set `presorted` to `True` if you know that
            the passed vectors are already sorted to skip the sorting and thus
            increase performance.
        :rtype: Quantity 2D
        """

        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units
            else:
                D = D * pq.dimensionless

        for i, j in sp.ndindex(len(vectors), len(vectors)):
            D[i, j] = sp.sum(
                self((vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
 def benchmark_single_metric(self, metric):
     print "Benchmarking %s metric ..." % metric
     times = sp.empty((len(self.data.spike_count_range), len(self.data.train_count_range)))
     for i, j in sp.ndindex(*times.shape):
         trains = self.data.trains[i][: self.data.train_count_range[j]]
         times[i, j] = timeit.timeit(lambda: metrics[metric][1](trains), number=self.num_loops) / self.num_loops
     self.results[metric] = times
Exemplo n.º 3
0
    def summed_dist_matrix(self, vectors, presorted=False):
        """ Calculates the sum of all element pair distances for each
        pair of vectors.

        If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the
        :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the
        kernel, the resulting entry in the 2D array will be :math:`D_{uv}
        = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`.

        :param sequence vectors: A sequence of Quantity 1D to calculate the
            summed distances for each pair. The required units depend on the
            kernel. Usually it will be the inverse unit of the kernel size.
        :param bool presorted: Some optimized specializations of this function
            may need sorted vectors. Set `presorted` to `True` if you know that
            the passed vectors are already sorted to skip the sorting and thus
            increase performance.
        :rtype: Quantity 2D
        """

        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units
            else:
                D = D * pq.dimensionless

        for i, j in sp.ndindex(len(vectors), len(vectors)):
            D[i, j] = sp.sum(self(
                (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
Exemplo n.º 4
0
def ellipsoid(R=np.array([[2, 0, 0],[0, 1, 0],[0, 0, 1] ]),position=(0,0,0),thetares=20,phires=20,color=(0,0,1),opacity=1,tessel=0):

    ''' Create a ellipsoid actor.    
    Stretch a unit sphere to make it an ellipsoid under a 3x3 translation matrix R 
    
    R=sp.array([[2, 0, 0],
                         [0, 1, 0],
                         [0, 0, 1] ])
    '''
    
    Mat=sp.identity(4)
    Mat[0:3,0:3]=R
       
    '''
    Mat=sp.array([[2, 0, 0, 0],
                             [0, 1, 0, 0],
                             [0, 0, 1, 0],
                             [0, 0, 0,  1]  ])
    '''
    mat=vtk.vtkMatrix4x4()
    
    for i in sp.ndindex(4,4):
        
        mat.SetElement(i[0],i[1],Mat[i])
    
    radius=1
    sphere = vtk.vtkSphereSource()
    sphere.SetRadius(radius)
    sphere.SetLatLongTessellation(tessel)
   
    sphere.SetThetaResolution(thetares)
    sphere.SetPhiResolution(phires)
    
    trans=vtk.vtkTransform()
    
    trans.Identity()
    #trans.Scale(0.3,0.9,0.2)
    trans.SetMatrix(mat)
    trans.Update()
    
    transf=vtk.vtkTransformPolyDataFilter()
    transf.SetTransform(trans)
    transf.SetInput(sphere.GetOutput())
    transf.Update()
    
    spherem = vtk.vtkPolyDataMapper()
    spherem.SetInput(transf.GetOutput())
    
    spherea = vtk.vtkActor()
    spherea.SetMapper(spherem)
    spherea.SetPosition(position)
    spherea.GetProperty().SetColor(color)
    spherea.GetProperty().SetOpacity(opacity)
    #spherea.GetProperty().SetRepresentationToWireframe()
    
    return spherea
Exemplo n.º 5
0
def cube_grid(dims):
    """
    Return a regular nD-cube mesh with given shape.

    Eg.
      cube_grid_nd((2,2))   -> 2x2   - 2d mesh (x,y)
      cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)

    Eg.
    
      v,i = cube_grid_nd((2,1))

      v =
      array([[ 0.,  0.],
             [ 1.,  0.],
             [ 2.,  0.],
             [ 0.,  1.],
             [ 1.,  1.],
             [ 2.,  1.]])

      i = 
      array([[[0, 3],
              [1, 4]],

             [[1, 4],
              [2, 5]]])

    """
    dims = tuple(dims)

    vert_dims = tuple(x + 1 for x in dims)
    N = len(dims)

    vertices = zeros((prod(vert_dims), N))
    grid = mgrid[tuple(slice(0, x, None) for x in reversed(vert_dims))]
    for i in range(N):
        vertices[:, i] = ravel(grid[N - i - 1])

    #construct one cube to be tiled
    cube = zeros((2, ) * N, dtype='i')
    cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i')
    for i in ndindex(*((2, ) * N)):
        cube[i] = sum(array(i) * cycle)
        cycle = array([1] + list(cumprod(vert_dims)[:-1]), dtype='i')

    #indices of all vertices which are the lower corner of a cube
    interior_indices = arange(prod(vert_dims)).reshape(
        tuple(reversed(vert_dims))).T
    interior_indices = interior_indices[tuple(slice(0, x, None) for x in dims)]

    indices = tile(cube, (prod(dims), ) +
                   (1, ) * N) + interior_indices.reshape((prod(dims), ) +
                                                         (1, ) * N)

    return (vertices, indices)
def _create_matrix_from_indexed_function(
        shape, func, symmetric_2d=False, **func_params):
    mat = sp.empty(shape)
    if symmetric_2d:
        for i in xrange(shape[0]):
            for j in xrange(i, shape[1]):
                mat[i, j] = mat[j, i] = func(i, j, **func_params)
    else:
        for idx in sp.ndindex(*shape):
            mat[idx] = func(*idx, **func_params)
    return mat
Exemplo n.º 7
0
def cube_grid(dims):
    """
    Return a regular nD-cube mesh with given shape.

    Eg.
      cube_grid_nd((2,2))   -> 2x2   - 2d mesh (x,y)
      cube_grid_nd((4,3,2)) -> 4x3x2 - 3d mesh (x,y,z)

    Eg.
    
      v,i = cube_grid_nd((2,1))

      v =
      array([[ 0.,  0.],
             [ 1.,  0.],
             [ 2.,  0.],
             [ 0.,  1.],
             [ 1.,  1.],
             [ 2.,  1.]])

      i = 
      array([[[0, 3],
              [1, 4]],

             [[1, 4],
              [2, 5]]])

    """
    dims = tuple(dims)
    
    vert_dims = tuple(x+1 for x in dims)
    N = len(dims)
    
    vertices = zeros((prod(vert_dims),N))
    grid     = mgrid[tuple(slice(0,x,None) for x in reversed(vert_dims))]
    for i in range(N):
        vertices[:,i] = ravel(grid[N-i-1])


    #construct one cube to be tiled
    cube  = zeros((2,)*N,dtype='i')
    cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')
    for i in ndindex(*((2,)*N)):
        cube[i] = sum(array(i) * cycle)
        cycle = array([1] + list(cumprod(vert_dims)[:-1]),dtype='i')


    #indices of all vertices which are the lower corner of a cube
    interior_indices = arange(prod(vert_dims)).reshape(tuple(reversed(vert_dims))).T
    interior_indices = interior_indices[tuple(slice(0,x,None) for x in dims)]

    indices = tile(cube,(prod(dims),) + (1,)*N) + interior_indices.reshape((prod(dims),) + (1,)*N)
    
    return (vertices,indices)
Exemplo n.º 8
0
def get_full_state(s):
    psi = sp.zeros(tuple([2]*N), dtype=sp.complex128)
    
    for ind in sp.ndindex(psi.shape):
        A = 1.0
        for n in range(N, 0, -1):
            A = s.A[n][ind[n-1]].dot(A)

        psi[ind] = A[0,0]
    psi = psi.ravel()

    return psi
Exemplo n.º 9
0
def _create_matrix_from_indexed_function(shape,
                                         func,
                                         symmetric_2d=False,
                                         **func_params):
    mat = sp.empty(shape)
    if symmetric_2d:
        for i in xrange(shape[0]):
            for j in xrange(i, shape[1]):
                mat[i, j] = mat[j, i] = func(i, j, **func_params)
    else:
        for idx in sp.ndindex(*shape):
            mat[idx] = func(*idx, **func_params)
    return mat
 def benchmark_single_metric(self, metric):
     print "Benchmarking %s metric ..." % metric
     times = sp.empty((
         len(self.data.num_units_range), len(self.data.spike_count_range),
         len(self.data.train_count_range)))
     for u, i, j in sp.ndindex(*times.shape):
         if i == 0 and j == 0:
             print "%i units" % self.data.num_units_range[u]
         units = trains_as_multiunits(
             self.data.trains[i], self.data.train_count_range[j],
             self.data.num_units_range[u])
         times[u, i, j] = timeit.timeit(
             lambda: metrics[metric][1](units), number=self.num_loops) / \
             self.num_loops
     self.results[metric] = times
Exemplo n.º 11
0
def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True):
    """ Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can be
    found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a spike
    train with a single spike. Divide the result by sqrt(2.0) to get the
    normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n^2)`. An implementation in
    :math:`O(N^2 n)` would be possible but has a high constant factor rendering
    it slower in practical cases.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: Decay rate of the exponential function as time scalar. Controls
        for which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. This is not
        the smoothing filter, but its autocorrelation. If `kernel` is `None`, an
        unnormalized Laplacian kernel with a size of `tau` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times might be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the van Rossum distances for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if tau == sp.inf:
            spike_counts = [st.size for st in trains]
            return (spike_counts - sp.atleast_2d(spike_counts).T) ** 2
        kernel = sigproc.LaplacianKernel(tau, normalize=False)

    k_dist = kernel.summed_dist_matrix(
        [st.view(type=pq.Quantity) for st in trains], not sort)
    vr_dist = sp.empty_like(k_dist)
    for i, j in sp.ndindex(*k_dist.shape):
        vr_dist[i, j] = (
            k_dist[i, i] + k_dist[j, j] - k_dist[i, j] - k_dist[j, i])
    return sp.sqrt(vr_dist)
Exemplo n.º 12
0
def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True):
    """ Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can be
    found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a spike
    train with a single spike. Divide the result by sqrt(2.0) to get the
    normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n^2)`. An implementation in
    :math:`O(N^2 n)` would be possible but has a high constant factor rendering
    it slower in practical cases.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: Decay rate of the exponential function as time scalar. Controls
        for which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. This is not
        the smoothing filter, but its autocorrelation. If `kernel` is `None`, an
        unnormalized Laplacian kernel with a size of `tau` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times might be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the van Rossum distances for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if tau == sp.inf:
            spike_counts = [st.size for st in trains]
            return (spike_counts - sp.atleast_2d(spike_counts).T)**2
        kernel = sigproc.LaplacianKernel(tau, normalize=False)

    k_dist = kernel.summed_dist_matrix(
        [st.view(type=pq.Quantity) for st in trains], not sort)
    vr_dist = sp.empty_like(k_dist)
    for i, j in sp.ndindex(*k_dist.shape):
        vr_dist[i, j] = (k_dist[i, i] + k_dist[j, j] - k_dist[i, j] -
                         k_dist[j, i])
    return sp.sqrt(vr_dist)
Exemplo n.º 13
0
def template_ellipsoid (shape):
    """
    Returns an ellipsoid binary structure of a of the supplied radius that can be used as
    template input to the generalized hough transform.
    
    @param shape the main axes of the ellipsoid
    @type shape sequence
    
    @return a bool array containing an ellipsoid
    @rtype scipy.ndarray
    """
    # prepare template array
    template = scipy.zeros(map(lambda x: round(x / 2.), shape), dtype=scipy.bool_) # in odd shape cases, this will include the ellipses middle line, otherwise not

    # get real world offset to compute the ellipsoid membership
    rw_offset = []
    for s in shape:
        if int(s) % 2 == 0: rw_offset.append(0.5 - (s % 2) / 2.) # number before point is even 
        else: rw_offset.append(-1 * (s % int(s)) / 2.) # number before point is odd 

    # prepare an array containing the squares of the half axes to avoid computing inside the loop
    shape_pow = scipy.power(scipy.asarray(shape) / 2., 2)

    # we use the ellipse normal form to find all point in its surface as well as volume
    # e.g. for 2D, all voxels inside the ellipse (or on its surface) with half-axes a and b
    #      follow x^2/a^2 + y^2/b^2 <= 1; for higher dimensions accordingly
    # to not have to iterate over each voxel, we make use of the ellipsoids symmetry
    # and construct just a part of the whole ellipse here
    for idx in scipy.ndindex(template.shape):
        distance = sum((math.pow(coordinate + rwo, 2) / axes_pow for axes_pow, coordinate, rwo in zip(shape_pow, idx, rw_offset))) # plus once since ndarray is zero based, but real-world coordinates not
        if distance <= 1: template[idx] = True
        
    # we take now our ellipse part and flip it once along each dimension, concatenating it in each step
    # the slicers are constructed to flip in each step the current dimension i.e. to behave like arr[...,::-1,...]
    for i in range(template.ndim):
        slicers = [(slice(None, None, -1) if i == j else slice(None)) for j in range(template.ndim)]
        if 0 == int(shape[i]) % 2: # even case
            template = scipy.concatenate((template[slicers], template), i)
        else: # odd case, in which an overlap has to be created
            slicers_truncate = [(slice(None, -1) if i == j else slice(None)) for j in range(template.ndim)]
            template = scipy.concatenate((template[slicers][slicers_truncate], template), i)

    return template
Exemplo n.º 14
0
    def fft_no_dask(self, fname, srcdset, destdset, axis):
        """Perform an FFT on a .hdf5 dataset along a given axis.
        This takes an .hdf5 input file and dataset, loads the data into memory,
        performs and FFT and creates a new dataset in the .hdf5 file in which to save the result"""

        # open the hdf5 file
        with hd.File(fname, 'a', libver='latest') as f:

            # get the dimensions of the problem
            dshape = f[srcdset].shape
            cshape = f[srcdset].chunks

            # create a new data set for the result to be stored
            try:
                f.create_dataset(destdset,
                                 dshape,
                                 chunks=cshape,
                                 dtype=complex)
            except:
                pass

            # reshape dask array in order to perform fft
            # weld together existing chunks to span the desired axis
            newcshape = sp.array(cshape)
            newcshape[axis] = dshape[axis]
            newcshape = tuple(newcshape)

            # logic to run through each chunk column
            chunkarr = tuple([int(a / b) for (a, b) in zip(dshape, newcshape)])
            for x in sp.ndindex(chunkarr):
                # get subset of array
                index = tuple([
                    slice(int(a * b), int((a + 1) * b))
                    for (a, b) in zip(x, newcshape)
                ])
                chunk_data = f[srcdset][index]
                # perform fft
                fft_chunk = sp.fft(chunk_data, axis=axis)
                # write to disk
                f[destdset][index] = fft_chunk
        return 0