Exemple #1
0
def SVMtest(maxT, sumM, val, c_w_l, C=1e3,gamma=1e-1, k=2.2e35, lims=[2.2e3,9e3], reduced=None):


    #val, c_w_l, idx = conditionVal(name=name, nozero=nozero, conc=conc)
    temp = val/c_w_l/sumM*k
    print(maxT.shape, temp.shape)
    #data = scipy.io.readsav('W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave')
    #te = data['en']
    #idx2 = scipy.logical_and(te > lims[0], te < lims[1])

    y = time.time()

    if reduced is None:
        xData = scipy.log(scipy.atleast_2d(maxT).T)
        yData = scipy.log(temp)
    else:
        xData = scipy.log(scipy.atleast_2d(maxT[reduced]).T)
        yData = scipy.log(temp[reduced])

    svm = sklearn.svm.SVR(cache_size=7000)#,C=C,gamma=gamma)
    pipe = Pipeline([('scale',StandardScaler()),('svm',svm)])
    pipe.set_params(svm__C=C,svm__gamma=gamma)

    pipe.fit(xData,yData)
    print(time.time()-y)

    return pipe
Exemple #2
0
def prob_contour(H, xedges, yedges, p=0.95):
    """Compute PDF value enclosing desired probability mass.

    The contour corresponding to the returned PDF value will contain
    (approximately) p integrated probability mass.

    Parameters
    ----------
    H : 2d array, (n_x, n_y)
        Normalized (as PDF) histogram.
    xedges : 1d array, (n_x + 1,)
        X edges of histogram bins.
    yedges: 1d array, (n_y + 1,)
        Y edges of histogram bins.
    p : float, optional
        Probability to find contour of. Default is 0.95
    """
    # Plan: Find highest value, add. Repeat until target probability reached,
    # return value of H at last point added. This should be the contour which
    # encloses the desired fraction of probability mass.
    dx = scipy.atleast_2d(scipy.diff(xedges)).T
    dy = scipy.atleast_2d(scipy.diff(yedges))
    PM = (H * dx * dy).ravel()
    H = H.ravel()
    # Sort into order of decreasing probability mass:
    srtidx = PM.argsort()[::-1]
    # Find cumulative sum:
    PM_sum = PM[srtidx].cumsum()
    # Find first point where PM_sum >= p:
    mask = PM_sum >= scipy.atleast_2d(p).T
    out = scipy.zeros(mask.shape[0])
    for i in range(mask.shape[0]):
        idx, = scipy.nonzero(mask[i, :])
        out[i] = H[srtidx[idx[0]]]
    return out
Exemple #3
0
def gridSearch(maxT, sumM, name,loc='svmcrossvalid.p',reduction=50,k=2.2e35,gamma=[-3,3], C=[0,6], nozero=False, conc='c_w_l', lims=[2.2e3,9e3]):

    val, c_w_l, idx = conditionVal(name=name, nozero=nozero, conc=conc)
    idx1 = sample(val,len(val)/reduction)
    idx2 = sample(val,len(val)/reduction) #very small subsamples started for testing algos
    index0 = scipy.logspace(gamma[0],gamma[1],int(abs(gamma[0]-gamma[1])+1))
    index1 = scipy.logspace(C[0],C[1],int(abs(C[0]-C[1])+1))

    data = scipy.io.readsav('W_Abundances_grid_puestu_adpak_fitscaling_74_0.00000_5.00000_1000_idlsave')
    te = data['en']
    idx2 = scipy.logical_and(te > lims[0], te < lims[1])


    temp = val[idx2]/c_w_l[idx2]/sumM[idx2]*k


    output = scipy.zeros((len(index0),len(index1)))
    output2 = scipy.zeros((len(index0),len(index1),len(te[idx2])))



    for i in xrange(len(index0)):
        for j in xrange(len(index1)):
            print(i,j)
            pipe = SVMtest(maxT, sumM, val, c_w_l, reduced=idx1, gamma=index0[i], C=index1[j],k=k)
            output[i,j] = pipe.score(scipy.log(scipy.atleast_2d(maxT[idx2]).T),scipy.log(temp))
            output2[i,j] = scipy.exp(pipe.predict(scipy.log(scipy.atleast_2d(te[idx2]).T)))

    pickle.dump([output,output2],open(loc,'wb'))
    return output,output2
Exemple #4
0
    def intercept(self, ray):
        """Solves for intersection point of surface and a ray or Beam
    
        Args:
            ray: Ray or Beam object
                It must be in the same coordinate space as the surface object.
            
        Returns:
            s: value of s [meters] which intercepts along norm, otherwise an
            empty tuple (for no intersection).
        
        Examples:
            Accepts all point and point-derived object inputs, though all data 
            is stored as a python object.

            Generate an y direction Ray in cartesian coords using a Vec from (0,0,1)::
            
                    cen = geometry.Center(flag=True)
                    ydir = geometry.Vecx((0,1,0))
                    zpt = geometry.Point((0,0,1),cen)

        """


        # Proceedure will be to generate 
        if self._origin is ray._origin:
            try:
                rcopy = ray.copy()
                rcopy.redefine(self)
                
                intersect = _beam.interceptCyl(scipy.atleast_2d(rcopy.x()[:,-1]), 
                                               scipy.atleast_2d(rcopy.norm.unit), 
                                               scipy.array([self.sagi.s,self.sagi.s]),
                                               scipy.array([-self.norm.s,self.norm.s])) + rcopy.norm.s[-1]
                
                if not scipy.isfinite(intersect):
                    #relies on r1 using arctan2 so that it sets the branch cut properly (-pi,pi]
                    return None
                elif self.edgetest(intersect, (rcopy(intersect)).r1()):
                        return intersect
                else:
                    rcopy.norm.s[-1] = intersect
                    intersect = _beam.interceptCyl(scipy.atleast_2d(rcopy.x()[:,-1]), 
                                                   scipy.atleast_2d(rcopy.norm.unit), 
                                                   scipy.array([self.sagi.s,self.sagi.s]),
                                                   scipy.array([-self.norm.s,self.norm.s])) + rcopy.norm.s[-1]
                    if not scipy.isfinite(intersect):
                        #relies on r1 using arctan2 so that it sets the branch cut properly (-pi,pi]
                        return None
                    elif self.edgetest(intersect, (rcopy(intersect)).r1()):
                        return None
                    else:
                        return None

            except AttributeError:
                raise ValueError('not a surface object')
        else:           
            raise ValueError('not in same coordinate system, use redefine and try again')
Exemple #5
0
    def __call__(self, X, n):
        n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
        X = scipy.atleast_2d(scipy.asarray(X))
        n_unique = unique_rows(n)
        mu = scipy.zeros(X.shape[0])
        for nn in n_unique:
            idxs = (n == nn).all(axis=1)
            mu[idxs] = self.fun(X[idxs, :], nn, *self.params)

        return mu
Exemple #6
0
 def __call__(self, X, n):
     n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
     X = scipy.atleast_2d(scipy.asarray(X))
     n_unique = unique_rows(n)
     mu = scipy.zeros(X.shape[0])
     for nn in n_unique:
         idxs = (n == nn).all(axis=1)
         mu[idxs] = self.fun(X[idxs, :], nn, *self.params)
     
     return mu
	def topterms(self,n_terms=10):
		""" This function is given. """
		vec = sp.atleast_2d(sp.arange(0,self.n_words))
		topics = []
		for k in xrange(self.n_topics):
			probs = sp.atleast_2d(self._phi[k,:])
			mat = sp.append(probs,vec,0)
			sind = sp.array([mat[:,i] for i in sp.argsort(mat[0])]).T
			topics.append([self.vocab[int(sind[1,self.n_words - 1 - i])] for i in xrange(n_terms)])
		return topics
def worker_quality(predictions, num_classes):
    predictions = sp.atleast_2d(predictions)
    num_workers, num_objects = predictions.shape

    error_rates = sp.zeros((num_workers, num_classes, num_classes))
    diy, diz = sp.diag_indices(num_classes)
    error_rates[:, diy, diz] = 1

    while True:
        # E step
        new_predictions = sp.zeros((num_objects, num_classes))
        for i in xrange(num_objects):
            individual_predictions = predictions[:, i]
            individual_error_rates = error_rates[range(num_workers), individual_predictions, individual_predictions]
            new_predictions[i, :] = sp.bincount(individual_predictions, individual_error_rates, minlength=num_classes)

        correct_labels = sp.argmax(new_predictions, axis=1)
        count_per_label = sp.bincount(correct_labels)

        # M step
        new_error_rates = sp.zeros((num_workers, num_classes, num_classes))
        for i, label in enumerate(correct_labels):
            new_error_rates[range(num_workers), label, predictions[:, i]] += 1

        for i in xrange(num_classes):
            new_error_rates[:, :, i] /= count_per_label

        diff_error_rates = sp.absolute(new_error_rates - error_rates)
        error_rates = new_error_rates

        if sp.amax(diff_error_rates) < 0.001:
            break


    # calculate the cost of each worker
    class_priors = sp.bincount(correct_labels, minlength=num_classes) / float(num_objects)
    costs = []
    for k in xrange(num_workers):
        worker_class_priors = sp.dot(sp.atleast_2d(class_priors), error_rates[k])[0] + 0.0000001

        cost = 0
        for j in xrange(num_classes):
            soft_label = error_rates[k, :, j] * class_priors / worker_class_priors[j]

            soft_label_cost = 0.0
            for i in xrange(num_classes):
                soft_label_cost += sp.sum(soft_label[i] * soft_label)
            soft_label_cost -= sp.sum(soft_label ** 2) # subtract the diagonal entries (those costs = 0)
            cost += soft_label_cost * worker_class_priors[j]

        costs.append(cost)

    return error_rates, correct_labels, costs
Exemple #9
0
def mat_atan2(y,x):
    if shape(y)!=shape(x):
        raise IndexError, 'In matrix atan2, shape(x)!-shape(y)'
    x=atleast_2d(x)
    y=atleast_2d(y)
    nr=shape(y)[0]
    nc=shape(y)[1]
    outmat=zeros((nr,nc),'d')
#    pdb.set_trace()
    for i in range(nr):
        for j in range(nc):
            outmat[i,j]=atan2(y[i,j],x[i,j])
    return colwise(outmat)
Exemple #10
0
def sinc_interp1d(x, s, r):
    """Interpolates `x`, sampled at times `s`
    Output `y` is sampled at times `r`

    inspired from from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html

    :param ndarray x: input data time series
    :param ndarray s: input sampling time series (regular sample interval)
    :param ndarray r: output sampling time series
    :return ndarray: output data time series (regular sample interval)
    """

    # init
    s = sp.asarray(s)
    r = sp.asarray(r)
    x = sp.asarray(x)
    if x.ndim == 1:
        x = sp.atleast_2d(x)
    else:
        if x.shape[0] == len(s):
            x = x.T
        else:
            if x.shape[1] != s.shape[0]:
                raise ValueError('x and s must be same temporal extend')
    if sp.allclose(s, r):
        return x.T
    T = s[1] - s[0]

    # resample
    sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
    return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
Exemple #11
0
    def phigprov(self, Pp, Pg, theta):
        """ Calculate transition probabilities

        Parameters
        ------------
        Pp : ndarray, shape (n, k)
             Conditional choice probabilities for provinces
        Pg : ndarray, shape (n, 2 k)
             Conditional choice probabilities for the government
        theta : ndarray, shape (5, )
             Parameters

        Returns
        ---------
        V : ndarray
            Observable state values

        Notes
        -----------

        Takes conditional choice probabilities :math:`P` and :math:`\theta`
        as an input and returns values :math:`V^P`.
        This is the mapping :math:`\Phi` in part (b) of the assignment.

        This is a wrapper for the matlab function **Phigprov**.
        
        """
        theta = sp.atleast_2d(theta)
        return pytave.feval(1, "Phigprov", Pp, Pg, theta, self.model())[0]
Exemple #12
0
    def new_p(self, Pp, Pg, theta):
        """ Calculate transition probabilities

        Parameters
        --------------
        
        Pp : ndarray, shape (n, k)
             Conditional choice probabilities for provinces
        Pg : ndarray, shape (n, 2 k)
             Conditional choice probabilities for the government
        theta : ndarray, shape (5, )
             Parameters

        Returns
        ---------
        Pp : ndarray, shape (n, k)
             New conditional choice probabilities for provinces
        Pg : ndarray, shape (n, 2 k)
             New conditional choice probabilities for the government

        Notes
        -----------

        Takes conditional choice probabilities :math:`P` and :math:`\theta`
        as an input and returns new conditional choice values.
        This is the mapping :math:`\Psi` in part (c) of the assignment.

        This is a wrapper for the matlab function **NewP**.
        
        """
        theta = sp.atleast_2d(theta)
        return pytave.feval(2, "NewP", Pp, Pg, theta, self.model())
Exemple #13
0
def chunk_data(data, epochs=None, invert=False):
    """returns a generator of chunks from data given epochs

    :type data: ndarray
    :param data: signal data [[samples, channels]]
    :type epochs: ndarray
    :param epochs: epoch set, positive mask
    :type invert: bool
    :param invert: invert epochs, negative mask instead of positive mask
    :returns: generator - data chunks as per :epochs:
    """

    # checks
    data = sp.asarray(data)
    if data.ndim != 2:
        data = sp.atleast_2d(data).T
    if epochs is not None:
        if epochs.ndim != 2:
            raise ValueError("epochs has to be ndim=2 like [[start,end]]")
    if invert is True and epochs is not None:
        epochs = invert_epochs(epochs, end=data.shape[0])
    if epochs is None or len(epochs) == 0:
        epochs = [[0, data.shape[0]]]

    # yield data chunks
    for ep in epochs:
        yield data[ep[0] : ep[1], :], list(ep)
Exemple #14
0
def vec2ten(data, nchan=4):
    """converts from templates/spikes that are concatenated across the
    channels to tensors that have an extra dim for the channels

    :type data: ndarray
    :param data: input array [templates][vars * channels]
    :type nchan: int
    :param nchan: count of channels
        Default=4
    :returns: ndarray - data converted to tensor [templates][vars][channels]
    """

    if data.ndim == 1:
        data = sp.atleast_2d(data)
    n, dim = data.shape

    if dim % nchan != 0:
        raise ValueError(
            'dim %s nchan != 0 !! dim=%s, nchan=%s' % (dim, nchan))
    tf = dim / nchan

    rval = sp.zeros((n, tf, nchan), data.dtype)

    for i in xrange(n):
        for c in xrange(nchan):
            rval[i, :, c] = data[i, c * tf:(c + 1) * tf]
    return rval
Exemple #15
0
def nullspace(A, atol=1e-13, rtol=0):
    '''Compute an approximate basis for the nullspace of A.
    The algorithm used by this function is based on the singular value
    decomposition of `A`. This implementation was copied
    from the scipy cookbook: http://www.scipy.org/Cookbook/RankNullspace

    @param A: ndarray
        A should be at most 2-D.  A 1-D array with length k will be treated
        as a 2-D with shape (1, k)
    @param atol : float
        The absolute tolerance for a zero singular value.  Singular values
        smaller than `atol` are considered to be zero.
    @param rtol : float
        The relative tolerance.  Singular values less than rtol*smax are
        considered to be zero, where smax is the largest singular value.

    @note: If both `atol` and `rtol` are positive, the combined tolerance is the
    maximum of the two; that is::
        tol = max(atol, rtol * smax)
    Singular values smaller than `tol` are considered to be zero.

    @return: ns ndarray
        If `A` is an array with shape (m, k), then `ns` will be an array
        with shape (k, n), where n is the estimated dimension of the
        nullspace of `A`.  The columns of `ns` are a basis for the
        nullspace; each element in numpy.dot(A, ns) will be approximately
        zero.
    '''

    A = sp.atleast_2d(A)
    _u, s, vh = LA.svd(A)
    tol = max(atol, rtol * s[0])
    nnz = (s >= tol).sum()
    ns = vh[nnz:].conj().T
    return ns
Exemple #16
0
def sinc_interp1d(x, s, r):
    """Interpolates `x`, sampled at times `s`
    Output `y` is sampled at times `r`

    inspired from from Matlab:
    http://phaseportrait.blogspot.com/2008/06/sinc-interpolation-in-matlab.html

    :param ndarray x: input data time series
    :param ndarray s: input sampling time series (regular sample interval)
    :param ndarray r: output sampling time series
    :return ndarray: output data time series (regular sample interval)
    """

    # init
    s = sp.asarray(s)
    r = sp.asarray(r)
    x = sp.asarray(x)
    if x.ndim == 1:
        x = sp.atleast_2d(x)
    else:
        if x.shape[0] == len(s):
            x = x.T
        else:
            if x.shape[1] != s.shape[0]:
                raise ValueError('x and s must be same temporal extend')
    if sp.allclose(s, r):
        return x.T
    T = s[1] - s[0]

    # resample
    sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))
    return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T
Exemple #17
0
    def summed_dist_matrix(self, vectors, presorted=False):
        """ Calculates the sum of all element pair distances for each
        pair of vectors.

        If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the
        :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the
        kernel, the resulting entry in the 2D array will be :math:`D_{uv}
        = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`.

        :param sequence vectors: A sequence of Quantity 1D to calculate the
            summed distances for each pair. The required units depend on the
            kernel. Usually it will be the inverse unit of the kernel size.
        :param bool presorted: Some optimized specializations of this function
            may need sorted vectors. Set `presorted` to `True` if you know that
            the passed vectors are already sorted to skip the sorting and thus
            increase performance.
        :rtype: Quantity 2D
        """

        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units
            else:
                D = D * pq.dimensionless

        for i, j in sp.ndindex(len(vectors), len(vectors)):
            D[i, j] = sp.sum(
                self((vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
    def summed_dist_matrix(self, vectors, presorted=False):
        """ Calculates the sum of all element pair distances for each
        pair of vectors.

        If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the
        :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the
        kernel, the resulting entry in the 2D array will be :math:`D_{uv}
        = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`.

        :param sequence vectors: A sequence of Quantity 1D to calculate the
            summed distances for each pair. The required units depend on the
            kernel. Usually it will be the inverse unit of the kernel size.
        :param bool presorted: Some optimized specializations of this function
            may need sorted vectors. Set `presorted` to `True` if you know that
            the passed vectors are already sorted to skip the sorting and thus
            increase performance.
        :rtype: Quantity 2D
        """

        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units
            else:
                D = D * pq.dimensionless

        for i, j in sp.ndindex(len(vectors), len(vectors)):
            D[i, j] = sp.sum(self(
                (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
Exemple #19
0
def chunk_data(data, epochs=None, invert=False, min_len=0):
    """returns a generator of chunks from data given epochs

    :type data: ndarray
    :param data: signal data [[samples, channels]]
    :type epochs: ndarray
    :param epochs: epoch set, positive mask
    :type invert: bool
    :param invert: invert epochs, negative mask instead of positive mask
    :param min_len: epochs with fewer samples will be ignored and not returned
    :type min_len: int
    :returns: generator - data chunks as per :epochs:
    """

    # checks
    data = sp.asarray(data)
    if data.ndim != 2:
        data = sp.atleast_2d(data).T
    if epochs is not None:
        if epochs.ndim != 2:
            raise ValueError('epochs has to be ndim=2 like [[start,end]]')
    if invert is True and epochs is not None:
        epochs = invert_epochs(epochs, end=data.shape[0])
    if epochs is None or len(epochs) == 0:
        epochs = [[0, data.shape[0]]]

    # yield data chunks
    for ep in epochs:
        if ep[1] - ep[0] >= min_len:
            yield data[ep[0]:ep[1], :], list(ep)
Exemple #20
0
 def arcfit(p, x, arc, mod):
     fit = {'coeff': scipy.atleast_2d(p).T, 'type': 'polynomial'}
     w = sf.genfunc(x, 0., fit)
     cond = (w > bcutoff) & (w < rcutoff)
     m = interpolate.splev(w[cond], mod)
     chi = (m - arc[cond]) / abs(arc[cond])**0.5
     return chi
Exemple #21
0
    def plot_filter_set(self, ph=None, show=False):
        """plot the filter set in a waveform plot"""

        # get plotting tools
        try:
            from spikeplot import waveforms
        except ImportError:
            return None

        # checks
        if self.nf == 0:
            warnings.warn("skipping plot, no active units!")
            return None

        # init
        units = {}
        for k in self._idx_active_set:
            units[k] = sp.atleast_2d(self.bank[k].f_conc)

        return waveforms(
            units,
            tf=self._tf,
            plot_separate=True,
            plot_mean=False,
            plot_single_waveforms=False,
            plot_handle=ph,
            show=show,
        )
Exemple #22
0
    def reconstruct(self, X):
        n_features = sp.atleast_2d(X).shape[1]
        latent = sp.dot(self.inv_M, sp.dot(self.weight.T, (X - self.predict_mean).T))
        eps = sprd.multivariate_normal(sp.zeros(n_features), self.sigma2 * sp.eye(n_features))
        recons = sp.dot(self.weight, latent) + self.predict_mean + eps

        return recons
def signal(signal, events=None, epochs=None, spike_trains=None,
                spike_waveforms=None):
    """ Create a plot from an AnalogSignal.

    :param AnalogSignal signal: The signal to plot.
    :param sequence events: A list of Event objects to be included in the
        plot.
    :param sequence epochs: A list of Epoch objects to be included in the
        plot.
    :param dict spike_trains: A dictionary of SpikeTrain objects to be
        included in the plot. Spikes are plotted as vertical lines.
        Indices of the dictionary (typically Unit objects) are used
        for color and legend entries.
    :param sequence spike_waveforms: A dictionary of lists of Spike objects
        to be included in the plot. Waveforms of spikes are overlaid on
        the signal. Indices of the dictionary (typically Unit objects) are
        used for color and legend entries.
    """
    # Plot title
    win_title = 'Analog Signal'
    if signal.recordingchannel:
        win_title += ' | Recording Channel: %s' % \
                     signal.recordingchannel.name
    if signal.segment:
        win_title += ' | Segment: %s' % signal.segment.name
    win = PlotDialog(toolbar=True, wintitle=win_title)

    signalarray = neo.AnalogSignalArray(sp.atleast_2d(sp.asarray(signal)).T,
        units=signal.units, sampling_rate=signal.sampling_rate)

    _plot_signal_array_on_window(win, signalarray, events, epochs,
        spike_trains, spike_waveforms, False)
def _multi_norm(x, mean):
    """ Evaluate pdf of multivariate normal distribution with a mean
        at rows of x with high precision.
    """
    d = x.shape[1]
    fac = (2 * sp.pi)**(-d / 2.0)
    y = cdist(x, sp.atleast_2d(mean), 'sqeuclidean') * -0.5
    return fac * sp.exp(sp.longdouble(y))
Exemple #25
0
def genCylGrid(x0, x1, x2, edges=False):

    if edges:
        for i in (x0, x1, x2):
            i = scipy.insert(i, 0, 2 * i[1] - i[2])
            i = scipy.append(i, 2 * i[-1] - i[-2])
            i = (i[1:] + i[:-1]) / 2

    pnts = scipy.empty((x0.size, x1.size, x2.size, 3))
    xin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.cos(x1)))
    yin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.sin(x1)))
    zee = scipy.ones(yin.shape)
    for i in range(x2.size):
        pnts[:, :, i, 0] = xin
        pnts[:, :, i, 1] = yin
        pnts[:, :, i, 2] = x2[i] * zee
    return pnts
def _multi_norm(x, mean):
    """ Evaluate pdf of multivariate normal distribution with a mean
        at rows of x with high precision.
    """
    d = x.shape[1]
    fac = (2 * sp.pi) ** (-d / 2.0)
    y = cdist(x, sp.atleast_2d(mean), "sqeuclidean") * -0.5
    return fac * sp.exp(sp.longdouble(y))
Exemple #27
0
 def _stop_training(self, *args, **kwargs):
     # produce data in one piece
     self.data = sp.vstack(self.data)
     # calculate energy
     self.energy = self._energy_func(self.data)
     if self.energy.ndim == 1:
         self.energy = sp.atleast_2d(self.energy).T
     self.size, self.nchan = self.energy.shape
Exemple #28
0
def genCylGrid(x0,x1,x2,edges=False):

    if edges:
        for i in (x0,x1,x2):
            i = scipy.insert(i,0,2*i[1]-i[2])
            i = scipy.append(i,2*i[-1]-i[-2])
            i = (i[1:]+i[:-1])/2

    pnts = scipy.empty((x0.size, x1.size, x2.size,3))
    xin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.cos(x1)))
    yin = scipy.dot(scipy.atleast_2d(x0).T, scipy.atleast_2d(scipy.sin(x1)))
    zee = scipy.ones(yin.shape)
    for i in range(x2.size):
        pnts[:,:,i,0] = xin
        pnts[:,:,i,1] = yin
        pnts[:,:,i,2] = x2[i]*zee
    return pnts
Exemple #29
0
def regular_cube_innerproduct(rcc, k):
    """
    For a given regular_cube_complex, compute a matrix
    representing the k-form innerproduct.

    These elements are similar to Whitney forms,
    except using standard linear (bilinear,trilinear,..)
    elements for 0-forms.
    """

    N = rcc.complex_dimension()

    #standard cube is [0,0,..,0] [0,1,...,N]
    standard_cube = atleast_2d(array([0] * N + range(N), dtype='i'))
    standard_k_faces = standard_cube
    for i in range(N, k, -1):
        standard_k_faces = cube_array_boundary(standard_k_faces, i)[0]

    k_faces_per_cube = standard_k_faces.shape[0]

    K = zeros((k_faces_per_cube, k_faces_per_cube))  #local stiffness matrix
    h = 1
    V = h**N  #cube volume
    scale = V * (1 / h)**2 * (1 / 3.0)**(N - k)
    for i, row_i in enumerate(standard_k_faces):
        for j, row_j in enumerate(standard_k_faces):
            if all(row_i[N:] == row_j[N:]):
                differences = (row_i[:N] != row_j[:N])
                differences[row_i[N:]] = 0
                K[i, j] = scale * (1.0 / 2.0)**sum(differences)
            else:
                K[i, j] = 0

    CA = rcc[-1].cube_array[:, :N]
    num_cubes = CA.shape[0]

    k_faces = tile(hstack((CA, zeros((CA.shape[0], k), dtype=CA.dtype))),
                   (1, k_faces_per_cube)).reshape((-1, N + k))
    k_faces += tile(standard_k_faces, (num_cubes, 1))

    k_face_array = rcc[k].cube_array

    face_indices = cube_array_search(k_face_array, k_faces)

    rows = face_indices.repeat(k_faces_per_cube)
    cols = face_indices.reshape(
        (-1, k_faces_per_cube)).repeat(k_faces_per_cube, axis=0).reshape(
            (-1, ))
    data = K.reshape((1, -1)).repeat(num_cubes, axis=0).reshape((-1, ))

    # temporary memory cost solution - eliminate zeros from COO representation
    nz_mask = data != 0.0
    rows = rows[nz_mask]
    cols = cols[nz_mask]
    data = data[nz_mask]

    shape = (len(k_face_array), len(k_face_array))
    return coo_matrix((data, (rows, cols)), shape).tocsr()
def get_all_forces(atoms, mlmodel, grid_spacing, extfield=None, mixing=[1.,0.,0.], lammpsdata=None, do_update=False):
    # get actual forces and potential energy of configuration
    pot_energy, forces = calc_lammps(atoms, preloaded_data=lammpsdata)
    forces = [forces]

    ### ML IS HERE ###
    if not (mlmodel is None or (mixing[1] == 0 and not do_update)):
        # Accumulate the new observation in the dataset
        coarse_colvars = round_vector(atoms.colvars(), precision=grid_spacing)
        distance_from_data = sp_dist.cdist(
            sp.atleast_2d(coarse_colvars), mlmodel.X_fit_).ravel()
        # check if configuration has already occurred
        if distance_from_data.min() == 0.0:
            index = list(distance_from_data).index(0.0)
            # Learn E_min: uncomment this
            if pot_energy < mlmodel.y[index]: 
                mlmodel.y[index] = pot_energy
            else:
                do_update = False
    #             # Learn free energy: uncomment this
    #             beta = 1 / self.temp
    #             mlmodel.y[index] += - 1 / beta * sp.log(
    #                 1 + sp.exp(- beta * (pot_energy - mlmodel.y[index])))
    #             pot_energy = mlmodel.y[index]
        else:
            mlmodel.accumulate_data(coarse_colvars, pot_energy)
        if do_update:
            # update ML potential with all the data contained in it.
            mlmodel.update_fit()
        # Get ML constraint forces if the model is fitted
        if hasattr(mlmodel, 'dual_coef_') and pot_energy < 0:
            ml_forces = get_constraint_forces(atoms, mlmodel)
            # ml_forces /= sp.mean(map(LA.norm, ml_forces))
            # ml_forces *= 1 * sp.mean(map(LA.norm, forces[0]))
            forces.append(ml_forces)
        else:
            forces.append(sp.zeros(forces[0].shape))
        # X_near = Xplotgrid([atoms.phi() - 0.2, atoms.psi() - 0.2], [atoms.phi() - 0.2, atoms.psi() - 0.2], 2, 10)
        # y_near_mean = mlmodel.predict(X_near).mean()
        # if pot_energy < y_near_mean:
        #             mix = 1.2
        #         else:
        #             mix = 0.8

    # EXTERNAL FIELD IS HERE
    if not (extfield is None or mixing[2] == 0):
        colvars = round_vector(atoms.colvars(), precision=grid_spacing)
        extfield.update_cost(colvars, pot_energy) 
        extfield_forces = get_extfield_forces(atoms, extfield)
        extfield_forces /= sp.mean(map(LA.norm, extfield_forces))
        extfield_forces *= sp.mean(map(LA.norm, forces[0]))
        forces.append(extfield_forces)
    # Compose the actual and the ML forces together by mixing them accordingly
    # a [1,-1,0] mixing would result, in the perfect fitting limit, to a zero
    # mean field motion.
    forces = [m_i * f_i for m_i, f_i in zip(mixing[:len(forces)], forces)]
    f = sp.sum(forces, axis=0)
    return f, pot_energy, forces
def detect_skew(img, min_angle=-20, max_angle=20, quality='low'):
    img = sp.atleast_2d(img)
    rows, cols = img.shape
    min_min_angle = min_angle
    max_max_angle = max_angle

    if quality == 'low':
        resolution = sp.arctan2(2.0, cols) * 180.0 / sp.pi
        min_target_size = 100
        resize_order = 1
    elif quality == 'high':
        resolution = sp.arctan2(1.0, cols) * 180.0 / sp.pi
        min_target_size = 300
        resize_order = 3
    else:
        resolution = sp.arctan2(1.0, cols) * 180.0 / sp.pi
        min_target_size = 200
        resize_order = 2

    # resize the image so it's faster to work with
    min_size = min(rows, cols)
    target_size = min_target_size if min_size > min_target_size else min_size
    resize_ratio = float(target_size) / min_size
    img = imresize(img, resize_ratio)
    rows, cols = img.shape

    # pad the image and invert the colors
    img *= -1
    img += 255
    padded_img = sp.zeros((rows*2, cols*2))
    padded_img[rows//2:rows//2+rows, cols//2:cols//2+cols] = img
    img = padded_img

    # keep dividing the interval in half to achieve O(log(n))
    while True:
        current_resolution = (max_angle - min_angle) / 30.0
        best_angle = None
        best_variance = 0.0

        # rotate the image, sum the pixel values in each row for each rotation
        # then find the variance of all the sums, pick the highest variance
        for i in xrange(31):
            angle = min_angle + i * current_resolution
            rotated_img = rotate(img, angle, reshape=False, order=resize_order)
            num_black_pixels = sp.sum(rotated_img, axis=1)
            variance = sp.var(num_black_pixels)
            if variance > best_variance:
                best_angle = angle
                best_variance = variance

        if current_resolution < resolution:
            break

        # update the angle range
        min_angle = max(best_angle - current_resolution, min_min_angle)
        max_angle = min(best_angle + current_resolution, max_max_angle)

    return best_angle
Exemple #32
0
def regular_cube_innerproduct(rcc,k):      
    """
    For a given regular_cube_complex, compute a matrix
    representing the k-form innerproduct.

    These elements are similar to Whitney forms,
    except using standard linear (bilinear,trilinear,..)
    elements for 0-forms.
    """

    N = rcc.complex_dimension()

    #standard cube is [0,0,..,0] [0,1,...,N]   
    standard_cube  = atleast_2d(array([0]*N + range(N),dtype='i'))
    standard_k_faces = standard_cube
    for i in range(N,k,-1):        
        standard_k_faces = cube_array_boundary(standard_k_faces,i)[0]

        
    k_faces_per_cube = standard_k_faces.shape[0]


    K = zeros((k_faces_per_cube,k_faces_per_cube)) #local stiffness matrix
    h = 1
    V = h**N #cube volume
    scale = V * (1/h)**2 * (1/3.0)**(N-k)
    for i,row_i in enumerate(standard_k_faces):
        for j,row_j in enumerate(standard_k_faces):
            if all(row_i[N:] == row_j[N:]):
                differences = (row_i[:N] != row_j[:N])
                differences[row_i[N:]] = 0                
                K[i,j] = scale * (1.0/2.0)**sum(differences)
            else:
                K[i,j] = 0
        

    CA = rcc[-1].cube_array[:,:N]
    num_cubes = CA.shape[0]

    k_faces  = tile(hstack((CA,zeros((CA.shape[0],k),dtype=CA.dtype))),(1,k_faces_per_cube)).reshape((-1,N+k))
    k_faces += tile(standard_k_faces,(num_cubes,1))
    
    k_face_array = rcc[k].cube_array

    face_indices = cube_array_search(k_face_array,k_faces)

    rows = face_indices.repeat(k_faces_per_cube)
    cols = face_indices.reshape((-1,k_faces_per_cube)).repeat(k_faces_per_cube,axis=0).reshape((-1,))
    data = K.reshape((1,-1)).repeat(num_cubes,axis=0).reshape((-1,))
    
    # temporary memory cost solution - eliminate zeros from COO representation
    nz_mask = data != 0.0
    rows = rows[nz_mask]
    cols = cols[nz_mask]
    data = data[nz_mask]

    shape = (len(k_face_array),len(k_face_array))
    return coo_matrix( (data,(rows,cols)), shape).tocsr()
Exemple #33
0
def nb_vals(matrix, indices):
    matrix = scipy.array(matrix)
    indices = tuple(scipy.transpose(scipy.atleast_2d(indices)))
    arr_shape = scipy.shape(matrix)
    dist = scipy.ones(arr_shape)
    dist[indices] = 0
    dist = scipy.ndimage.distance_transform_cdt(dist, metric='chessboard')
    nb_indices = scipy.transpose(scipy.nonzero(dist == 1))
    return [matrix[tuple(ind)] for ind in nb_indices]
def get_constraint_forces(atoms, ml_model):
    """
    Atomic forces of a trained ML model given the configuration in atoms.
    """
    pos = atoms.get_positions()
    ds_dr = sp.array([ddihedralangle_dr(pos, dihedral_atoms_phi),
                      ddihedralangle_dr(pos, dihedral_atoms_psi, shift=True)])
    dUml_ds = ml_model.predict_gradient(sp.atleast_2d(atoms.colvars()))
    forces = - sp.dot(ds_dr.T, dUml_ds.ravel()).T
    return forces
 def compute(i, j):
     if i == j:
         return 1.0
     elif trains[i].size <= 0 or trains[j].size <= 0:
         return 0.0
     else:
         diff_matrix = sp.absolute(trains[i] - sp.atleast_2d(trains[j]).T)
         return 0.5 * (
             sp.sum(kernel(sp.amin(diff_matrix, axis=0))) / trains[i].size +
             sp.sum(kernel(sp.amin(diff_matrix, axis=1))) / trains[j].size)
 def compute(i, j):
     if i == j:
         return 1.0
     elif trains[i].size <= 0 or trains[j].size <= 0:
         return 0.0
     else:
         diff_matrix = sp.absolute(trains[i] - sp.atleast_2d(trains[j]).T)
         return 0.5 * (
             sp.sum(kernel(sp.amin(diff_matrix, axis=0))) / trains[i].size +
             sp.sum(kernel(sp.amin(diff_matrix, axis=1))) / trains[j].size)
def coeffExpand(M):
    if M.shape[0] != M.shape[1]:
        raise ValueError("Matrix must be square")

    M = sp.atleast_2d(M)
    if M.shape == (1,1):
        return M.item(0,0)
    else:
        det = sum(M[0,i]*(((-1)**i)*coeffExpand(sp.delete(M[1:],i,1))) for i in range(len(M)))
        return det
def victor_purpura_dist(trains, q=1.0 * pq.Hz, kernel=None, sort=True):
    """ Calculates the Victor-Purpura's (VP) distance. It is often denoted as
    :math:`D^{\\text{spike}}[q]`.

    It is defined as the minimal cost of transforming spike train `a` into
    spike train `b` by using the following operations:

        * Inserting or deleting a spike (cost 1.0).
        * Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q \\cdot |t
          - t'|`).

    A detailed description can be found in
    *Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    coding in visual cortex: a metric-space analysis. Journal of
    Neurophysiology.*

    Given the average number of spikes :math:`n` in a spike train and :math:`N`
    spike trains the run-time complexity of this function is
    :math:`O(N^2 n^2)` and :math:`O(N^2 + n^2)` memory will be needed.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the distance will be calculated pairwise.
    :param q: Cost factor for spike shifts as inverse time scalar. If `kernel`
        is not `None`, `q` will be ignored.
    :type q: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. If
        `kernel` is `None`, an unnormalized triangular kernel with a half width
        of `2.0/q` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times will be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the VP distance of all pairs of spike trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if q == 0.0:
            num_spikes = sp.atleast_2d([st.size for st in trains])
            return sp.absolute(num_spikes.T - num_spikes)
        else:
            kernel = sigproc.TriangularKernel(2.0 / q, normalize=False)

    if sort:
        trains = [sp.sort(st.view(type=pq.Quantity)) for st in trains]

    def compute(i, j):
        if i == j:
            return 0.0
        else:
            return _victor_purpura_dist_for_trial_pair(trains[i], trains[j],
                                                       kernel)

    return _create_matrix_from_indexed_function((len(trains), len(trains)),
                                                compute, kernel.is_symmetric())
def victor_purpura_dist(trains, q=1.0 * pq.Hz, kernel=None, sort=True):
    """ Calculates the Victor-Purpura's (VP) distance. It is often denoted as
    :math:`D^{\\text{spike}}[q]`.

    It is defined as the minimal cost of transforming spike train `a` into
    spike train `b` by using the following operations:

        * Inserting or deleting a spike (cost 1.0).
        * Shifting a spike from :math:`t` to :math:`t'` (cost :math:`q \\cdot |t
          - t'|`).

    A detailed description can be found in
    *Victor, J. D., & Purpura, K. P. (1996). Nature and precision of temporal
    coding in visual cortex: a metric-space analysis. Journal of
    Neurophysiology.*

    Given the average number of spikes :math:`n` in a spike train and :math:`N`
    spike trains the run-time complexity of this function is
    :math:`O(N^2 n^2)` and :math:`O(N^2 + n^2)` memory will be needed.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the distance will be calculated pairwise.
    :param q: Cost factor for spike shifts as inverse time scalar. If `kernel`
        is not `None`, `q` will be ignored.
    :type q: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. If
        `kernel` is `None`, an unnormalized triangular kernel with a half width
        of `2.0/q` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times will be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the VP distance of all pairs of spike trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if q == 0.0:
            num_spikes = sp.atleast_2d([st.size for st in trains])
            return sp.absolute(num_spikes.T - num_spikes)
        else:
            kernel = sigproc.TriangularKernel(2.0 / q, normalize=False)

    if sort:
        trains = [sp.sort(st.view(type=pq.Quantity)) for st in trains]

    def compute(i, j):
        if i == j:
            return 0.0
        else:
            return _victor_purpura_dist_for_trial_pair(
                trains[i], trains[j], kernel)

    return _create_matrix_from_indexed_function(
        (len(trains), len(trains)), compute, kernel.is_symmetric())
Exemple #40
0
def barycentric_gradients(pts):
    """
    Compute the gradients of the barycentric basis functions over a given simplex
    """            
    V = asarray(pts[1:] - pts[0])
    
    ##all gradients except the first are computed
    grads = dot(inv(inner(V,V)),V) #safer, but slower: grads = scipy.linalg.pinv2(V).T 
    
    ##since sum of all gradients is zero, simply compute the first from the others        
    return vstack((atleast_2d(-numpy.sum(grads,axis=0)),grads))
Exemple #41
0
 def skycorrect(p, arc, sky, arcmodel, skymodel):
     fit = {'coeff': scipy.atleast_2d(p[:-1]).T, 'type': 'polynomial'}
     w = sf.genfunc(x, 0., fit)
     arcm = interpolate.splev(w + p[-1], arcmodel)
     chi_arc = (arcm - arc)
     s = sky[w > 5100.]
     skym = interpolate.splev(w[w > 5100.], skymodel)
     skym *= scipy.median(s / skym)
     chi_sky = 5. * (skym - s)  #/abs(m)**0.5
     chi = scipy.concatenate((chi_arc, chi_sky))
     return chi
Exemple #42
0
    def posterior_prob(self, obs, with_noise=False):
        """posterior probabilities for data under the model

        :type obs: ndarray
        :param obs: observations to be evaluated [n, tf, nc]
        :type with_noise: bool
        :param with_noise: if True, include the noise cluster as component
            in the mixture.
            Default=False
        :rtype: ndarray
        :returns: matrix with per component posterior probabilities [n, c]
        """

        # check obs
        obs = sp.atleast_2d(obs)
        if len(obs) == 0:
            raise ValueError('no observations passed!')
        data = []
        if obs.ndim == 2:
            if obs.shape[1] != self._tf * self._nc:
                raise ValueError('data dimensions not compatible with model')
            for i in xrange(obs.shape[0]):
                data.append(obs[i])
        elif obs.ndim == 3:
            if obs.shape[1:] != (self._tf, self._nc):
                raise ValueError('data dimensions not compatible with model')
            for i in xrange(obs.shape[0]):
                data.append(mcvec_to_conc(obs[i]))
        data = sp.asarray(data, dtype=sp.float64)

        # build comps
        comps = self.get_template_set(mc=False)
        if with_noise:
            comps = sp.vstack((comps, sp.zeros((self._tf * self._nc))))
        comps = comps.astype(sp.float64)
        if len(comps) == 0:
            return sp.zeros((len(obs), 1))

        # build priors
        prior = sp.array([self._lpr_s] * len(comps), dtype=sp.float64)
        if with_noise:
            prior[-1] = self._lpr_n

        # get sigma
        try:
            sigma = self._ce.get_cmx(tf=self._tf).astype(sp.float64)
        except:
            return sp.zeros((len(obs), 1))

        # calc log probs
        lpr = log_multivariate_normal_density(data, comps, sigma,
                                              'tied') + prior
        logprob = logsumexp(lpr, axis=1)
        return sp.exp(lpr - logprob[:, sp.newaxis])
    def summed_dist_matrix(self, vectors, presorted=False):
        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units

        for i in xrange(len(vectors)):
            for j in xrange(i, len(vectors)):
                D[i, j] = D[j, i] = sp.sum(self(
                    (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
Exemple #44
0
 def __call__(self, X, n, hyper_deriv=None):
     """Evaluate the mean function at the given points with the current parameters.
     
     Parameters
     ----------
     X : array, (`N`,) or (`N`, `D`)
         Points to evaluate the mean function at.
     n : array, (`N`,) or (`N`, `D`)
         Derivative orders for each point.
     hyper_deriv : int or None, optional
         Index of parameter to take derivative with respect to.
     """
     n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
     X = scipy.atleast_2d(scipy.asarray(X))
     n_unique = unique_rows(n)
     mu = scipy.zeros(X.shape[0])
     for nn in n_unique:
         idxs = (n == nn).all(axis=1)
         mu[idxs] = self.fun(X[idxs, :], nn, *self.params, hyper_deriv=hyper_deriv)
     
     return mu
def coExpand(M):
    # if not M.shape[0] == M.shape[1]:
    #    raise Exception, "Matrix must be square"

    M = sp.atleast_2d(M)
    if M.shape == (1, 1):
        print M.item(0, 0)
        return M.item(0, 0)
    else:
        print M
        first = M.item(0, 0) * coeffExpand(M[1:, 1:])
        last = M.item(0, -1) * ((-1) ** (len(M) - 1)) * coeffExpand(M[1:, :-1])
Exemple #46
0
 def dofit(p, x, data, model):
     if scipy.isnan(p).any():
         return x * 0. + 1e7
     fit = {'coeff': scipy.atleast_2d(p[1:]).T, 'type': 'polynomial'}
     w = sf.genfunc(x, 0., fit)
     m = interpolate.splev(w, model)
     m *= p[0]
     chi = (m - data) / abs(data)**0.5
     cond = ~scipy.isnan(chi)
     cond = cond & scipy.isfinite(chi)
     cond = cond & (w > cutoff) & (w < 10400.)
     return chi[cond] / chi[cond].size
Exemple #47
0
    def summed_dist_matrix(self, vectors, presorted=False):
        D = sp.empty((len(vectors), len(vectors)))
        if len(vectors) > 0:
            might_have_units = self(vectors[0])
            if hasattr(might_have_units, 'units'):
                D = D * might_have_units.units

        for i in xrange(len(vectors)):
            for j in xrange(i, len(vectors)):
                D[i, j] = D[j, i] = sp.sum(
                    self((vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
        return D
Exemple #48
0
def rowwise(matin, makecopy=1):
    #    t1=time.time()
    if makecopy:
        tempmat = copy.deepcopy(matin)
    else:
        tempmat = matin
#    t2=time.time()
#    print('copy time='+str(t2-t1))
    matout = scipy.atleast_2d(tempmat)
    myshape = scipy.shape(matout)
    if myshape[0] > myshape[1]:
        matout = scipy.transpose(matout)
    return matout
 def compute(i, j):
     if i == j:
         return 1.0
     else:
         if tau is None:
             tau_mat = spq.minimum(
                 *spq.meshgrid(auto_taus[i], auto_taus[j])) / 2.0
         else:
             tau_mat = sp.tile(tau, (trains[j].size, trains[i].size))
         coincidence = sp.sum(
             kernel((trains[i] - sp.atleast_2d(trains[j]).T) / tau_mat))
         normalization = 1.0 / sp.sqrt(trains[i].size * trains[j].size)
         return normalization * coincidence
Exemple #50
0
    def __call__(self, X, n, hyper_deriv=None):
        """Evaluate the mean function at the given points with the current parameters.
        
        Parameters
        ----------
        X : array, (`N`,) or (`N`, `D`)
            Points to evaluate the mean function at.
        n : array, (`N`,) or (`N`, `D`)
            Derivative orders for each point.
        hyper_deriv : int or None, optional
            Index of parameter to take derivative with respect to.
        """
        n = scipy.atleast_2d(scipy.asarray(n, dtype=int))
        X = scipy.atleast_2d(scipy.asarray(X))
        n_unique = unique_rows(n)
        mu = scipy.zeros(X.shape[0])
        for nn in n_unique:
            idxs = (n == nn).all(axis=1)
            mu[idxs] = self.fun(X[idxs, :],
                                nn,
                                *self.params,
                                hyper_deriv=hyper_deriv)

        return mu
def _van_rossum_multiunit_dist_for_trial_pair(a, b, weighting, tau, kernel):
    if kernel is None:
        spike_counts = sp.atleast_2d([st.size for st in a + b])
        k_dist = spike_counts.T * (spike_counts - spike_counts.T)
    else:
        k_dist = kernel.summed_dist_matrix(a + b)

    non_diagonal = sp.logical_not(sp.eye(len(a)))
    summed_population = (sp.trace(k_dist) - sp.trace(k_dist, len(a)) -
                         sp.trace(k_dist, -len(a)))
    labeled_line = (sp.sum(k_dist[:len(a), :len(a)][non_diagonal]) +
                    sp.sum(k_dist[len(a):, len(a):][non_diagonal]) -
                    sp.sum(k_dist[:len(a), len(a):][non_diagonal]) -
                    sp.sum(k_dist[len(a):, :len(a)][non_diagonal]))
    return sp.sqrt(summed_population + weighting * labeled_line)
Exemple #52
0
def pendular_ring(target,
                  throat_diameter='throat.diameter',
                  pore_diameter='pore.diameter'):
    r"""
    Calculates the volume of the pendular rings residing between the end of
    a cylindrical throat and spherical pores that are in contact but not
    overlapping.

    This volume should be added to the throat volume if the throat length was
    found as the center-to-center distance less the pore radii.

    Parameters
    ----------
    throat_diameter : string
        The dictionary keys containing the array with the throat diameter
        values.
    pore_diameter : string
        The dictionary keys containing the array with the pore diameter
        values.

    Returns
    -------
    volume : ND-array
        The volume that should be added to each throat volume to account for
        under-represented void volume at the pore-throat junctions.

    Notes
    -----
    This model does NOT consider the possibility that multiple throats might
    overlap in the same location which could happen if throats are large and
    connectivity is random.

    See Also
    --------
    lens
    """
    network = target.network
    conns = network['throat.conns']
    Rp = target[pore_diameter]
    Rt = target[throat_diameter]
    a = _sp.atleast_2d(Rt).T
    q = _sp.arcsin(a / Rp[conns])
    b = Rp[conns] * _sp.cos(q)
    h = Rp[conns] - b
    Vlens = 1 / 6 * _sp.pi * h * (3 * a**2 + h**2)
    Vcyl = _sp.pi * (a)**2 * h
    V = Vcyl - Vlens
    return _sp.sum(V, axis=1)
def cs_dist(trains,
            smoothing_filter,
            sampling_rate,
            filter_area_fraction=sigproc.default_kernel_area_fraction):
    """ Calculates the Cauchy-Schwarz distance between two spike trains given
    a smoothing filter.

    Let :math:`v_a(t)` and :math:`v_b(t)` with :math:`t \\in \\mathcal{T}` be
    the spike trains convolved with some smoothing filter and :math:`V(a, b)
    = \\int_{\\mathcal{T}} v_a(t) v_b(t) dt`. Then, the Cauchy-Schwarz distance
    of the spike trains is defined as :math:`d_{CS}(a, b) = \\arccos \\frac{V(a,
    b)^2}{V(a, a) V(b, b)}`.

    The Cauchy-Schwarz distance is closely related to the Schreiber et al.
    similarity measure :math:`S_S` by :math:`d_{CS} = \\arccos S_S^2`

    This function numerically convolves the spike trains with the smoothing
    filter which can be quite slow and inaccurate. If the analytical result of
    the autocorrelation of the smoothing filter is known, one can use
    :func:`schreiber_similarity` for a more efficient and precise calculation.

    Further information can be found in *Paiva, A. R. C., Park, I., & Principe,
    J. (2010). Inner products for representation and learning in the spike
    train domain. Statistical Signal Processing for Neuroscience and
    Neurotechnology, Academic Press, New York.*

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the distance will be calculated pairwise.
    :param smoothing_filter: Smoothing filter to be convolved with the spike
        trains.
    :type smoothing_filter: :class:`.signal_processing.Kernel`
    :param sampling_rate: The sampling rate which will be used to bin
        the spike trains as inverse time scalar.
    :type sampling_rate: Quantity scalar
    :param float filter_area_fraction: A value between 0 and 1 which controls
        the interval over which the smoothing filter will be discretized. At
        least the given fraction of the complete smoothing filter area will be
        covered. Higher values can lead to more accurate results (besides the
        sampling rate).
    :returns: Matrix containing the Cauchy-Schwarz distance of all pairs of
        spike trains
    :rtype: 2-D array
    """

    inner = st_inner(trains, trains, smoothing_filter, sampling_rate,
                     filter_area_fraction)
    return sp.arccos(inner**2 / sp.diag(inner) /
                     sp.atleast_2d(sp.diag(inner)).T)
def van_rossum_dist(trains, tau=1.0 * pq.s, kernel=None, sort=True):
    """ Calculates the van Rossum distance.

    It is defined as Euclidean distance of the spike trains convolved with a
    causal decaying exponential smoothing filter. A detailed description can be
    found in *Rossum, M. C. W. (2001). A novel spike distance. Neural
    Computation, 13(4), 751-763.* This implementation is normalized to yield
    a distance of 1.0 for the distance between an empty spike train and a spike
    train with a single spike. Divide the result by sqrt(2.0) to get the
    normalization used in the cited paper.

    Given :math:`N` spike trains with :math:`n` spikes on average the run-time
    complexity of this function is :math:`O(N^2 n^2)`. An implementation in
    :math:`O(N^2 n)` would be possible but has a high constant factor rendering
    it slower in practical cases.

    :param sequence trains: Sequence of :class:`neo.core.SpikeTrain` objects of
        which the van Rossum distance will be calculated pairwise.
    :param tau: Decay rate of the exponential function as time scalar. Controls
        for which time scale the metric will be sensitive. This parameter will
        be ignored if `kernel` is not `None`. May also be :const:`scipy.inf`
        which will lead to only measuring differences in spike count.
    :type tau: Quantity scalar
    :param kernel: Kernel to use in the calculation of the distance. This is not
        the smoothing filter, but its autocorrelation. If `kernel` is `None`, an
        unnormalized Laplacian kernel with a size of `tau` will be used.
    :type kernel: :class:`.signal_processing.Kernel`
    :param bool sort: Spike trains with sorted spike times might be needed for
        the calculation. You can set `sort` to `False` if you know that your
        spike trains are already sorted to decrease calculation time.
    :returns: Matrix containing the van Rossum distances for all pairs of spike
        trains.
    :rtype: 2-D array
    """

    if kernel is None:
        if tau == sp.inf:
            spike_counts = [st.size for st in trains]
            return (spike_counts - sp.atleast_2d(spike_counts).T)**2
        kernel = sigproc.LaplacianKernel(tau, normalize=False)

    k_dist = kernel.summed_dist_matrix(
        [st.view(type=pq.Quantity) for st in trains], not sort)
    vr_dist = sp.empty_like(k_dist)
    for i, j in sp.ndindex(*k_dist.shape):
        vr_dist[i, j] = (k_dist[i, i] + k_dist[j, j] - k_dist[i, j] -
                         k_dist[j, i])
    return sp.sqrt(vr_dist)
Exemple #55
0
 def dofit(p, x, data, model):
     if scipy.isnan(p).any():
         return x * 0. + 1e7
     if abs(p[2] - scale) / scale > 0.15:
         return x * 0. + 1e7
     fit = {'coeff': scipy.atleast_2d(p[1:]).T, 'type': 'polynomial'}
     w = sf.genfunc(x, 0., fit)
     m = interpolate.splev(w, model)
     m *= p[0]
     chi = (m - data) / abs(data)**0.5
     cond = ~scipy.isnan(chi)
     cond = cond & scipy.isfinite(chi)
     cond = cond & (w > lowave) & (w < maxwave)
     badval = abs(chi[cond]).max()
     chi[~cond] = 2 * badval
     return chi
    def append_data_peaks(self, data, force=False):
        """append bin(s) calculated from a strip of data

        with this method the data is first queried for peaks. this should
        reduce the noise/smoothness of the histogram as observed from the
        amplitude distribution of the pure signal.

        :type data: ndarray
        :param data: the data to generate the bin(s) to append from
        :type force: bool
        :param force: if True, immediately start a new bin before calculation
        """

        # check data
        data_ = sp.asanyarray(data)
        if data.ndim < 2:
            data_ = sp.atleast_2d(data_)
            if data_.shape[0] < data_.shape[1]:
                data_ = data_.T
        nsmpl, nchan = data_.shape
        if nchan != self._nchan:
            raise ValueError('data has channel count %s, expected %s' %
                             (nchan, self._nchan))

        # generate bin set
        bin_set = [0]
        if self._cur_bin_smpl != 0:
            bin_set.append(self._bin_size - self._cur_bin_smpl)
        while bin_set[-1] < nsmpl:
            bin_set.append(bin_set[-1] + self._bin_size)
        if bin_set[-1] > nsmpl:
            bin_set[-1] = nsmpl

        # process bins
        idx = 1
        while idx < len(bin_set):
            data_bin = data_[bin_set[idx - 1]:bin_set[idx], :]
            for c in xrange(self._nchan):
                self._cur_bin[c] += sp.histogram(data_bin[:, c],
                                                 bins=self._ampl_range)[0]
            self._cur_bin_smpl += data_bin.shape[0]
            if self._cur_bin_smpl == self._bin_size:
                self.append_bin(self._cur_bin)
                self._cur_bin[:] = 0
                self._cur_bin_smpl = 0
            idx += 1