Exemplo n.º 1
0
def calc_probability_matrix(trains_a, trains_b, metric, tau, z):
    """ Calculates the probability matrix that one spike train from stimulus X
    will be classified as spike train from stimulus Y.

    :param list trains_a: Spike trains of stimulus A.
    :param list trains_b: Spike trains of stimulus B.
    :param str metric: Metric to base the classification on. Has to be a key in
        :const:`metrics.metrics`.
    :param tau: Time scale parameter for the metric.
    :type tau: Quantity scalar.
    :param float z: Exponent parameter for the classifier.
    """

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", "divide by zero")
        dist_mat = calc_single_metric(trains_a + trains_b, metric, tau) ** z
    dist_mat[sp.diag_indices_from(dist_mat)] = 0.0

    assert len(trains_a) == len(trains_b)
    l = len(trains_a)
    classification_of_a = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, :l], axis=0) / (l - 1),
        sp.sum(dist_mat[l:, :l], axis=0) / l)) ** (1.0 / z), axis=0)
    classification_of_b = sp.argmin(sp.vstack((
        sp.sum(dist_mat[:l, l:], axis=0) / l,
        sp.sum(dist_mat[l:, l:], axis=0) / (l - 1))) ** (1.0 / z), axis=0)
    confusion = sp.empty((2, 2))
    confusion[0, 0] = sp.sum(classification_of_a == 0)
    confusion[1, 0] = sp.sum(classification_of_a == 1)
    confusion[0, 1] = sp.sum(classification_of_b == 0)
    confusion[1, 1] = sp.sum(classification_of_b == 1)
    return confusion / 2.0 / l
Exemplo n.º 2
0
    def _get_yz(self, L, getcovar=True):
        """
        Gets flux and error (y and z) near the line wavelengths, makes
        sure everything is aligned.
        """

        l = deepcopy(L)

        #shift
        l.wv -= self.p['shift']
        m = (self.Lref.wv >= l.wv.min()) * (self.Lref.wv <= l.wv.max())
        y, z = l.interp(self.Lref.wv[m])
        #convolve
        k = self._get_kernel(self.Lref.wv[m])

        #have to make covar before smoothing z
        if getcovar:
            if self.kernelname == 'Delta':
                covar = get_covarmatrix(l.wv, self.Lref.wv[m], l.ef, k, 2)
            else:
                covar = get_covarmatrix(l.wv, self.Lref.wv[m], l.ef, k,
                                        5 * self.p['width'])

        z = sp.sqrt(sp.convolve(z**2, k**2, mode='same'))
        y = sp.convolve(y, k, mode='same')

        #scale
        z *= self.p['scale']
        y *= self.p['scale']
        if getcovar:
            covar *= self.p['scale'] * self.p['scale']
            #add reference errors now to simplify the chi2----these don't
            #seem to matter much
            covar[sp.diag_indices_from(covar)] += self.Lref.ef[m]**2

        #trim 10% of data to help with edge effects and shifting the
        #data.  This number is hard-coded so that the degrees of
        #freedom are fixed during the fit.
        trim = int(round(0.05 * (self.Lref.wv[m].size)))

        z = z[trim:-trim]
        y = y[trim:-trim]
        if getcovar:
            covar = covar[:, trim:-trim]
            covar = covar[trim:-trim, :]

        #need a mask for reference when calculating chi^2
        m2 = (self.Lref.wv >= self.Lref.wv[m][trim]) * (self.Lref.wv <
                                                        self.Lref.wv[m][-trim])

        if getcovar:
            # Note that z**2 (error spectrum) is slightly different
            # than the diagonal of covar, because covariance was
            # ignored for z
            return y, z**2, m2, covar
        else:
            return y, z**2, m2
 def probs(termList=termList):
     probs = 1./(1+sp.exp(-1*sum(termList)))
     probs[sp.diag_indices_from(probs)]= 0
     return(probs)
Exemplo n.º 4
0
    def summed_dist_matrix(self, vectors, presorted=False):
        # This implementation is based on
        #
        # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van
        # Rossum distances. Network: Computation in Neural Systems, 23(1-2),
        # 48-58.
        #
        # Note that the cited paper contains some errors: In formula (9) the
        # left side of the equation should be divided by two and in the last
        # sum in this equation it should say `j|v_i >= u_i` instead of
        # `j|v_i > u_i`. Also, in equation (11) it should say `j|u_i >= v_i`
        # instead of `j|u_i > v_i`.
        #
        # Given N vectors with n entries on average the run-time complexity is
        # O(N^2 * n). O(N^2 + N * n) memory will be needed.

        if len(vectors) <= 0:
            return sp.zeros((0, 0))

        if not presorted:
            vectors = [v.copy() for v in vectors]
            for v in vectors:
                v.sort()

        sizes = sp.asarray([v.size for v in vectors])
        values = sp.empty((len(vectors), max(1, sizes.max())))
        values.fill(sp.nan)
        for i, v in enumerate(vectors):
            if v.size > 0:
                values[i, :v.size] = \
                    (v / self.kernel_size * pq.dimensionless).simplified

        exp_diffs = sp.exp(values[:, :-1] - values[:, 1:])
        markage = sp.zeros(values.shape)
        for u in xrange(len(vectors)):
            markage[u, 0] = 0
            for i in xrange(sizes[u] - 1):
                markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i]

        # Same vector terms
        D = sp.empty((len(vectors), len(vectors)))
        D[sp.diag_indices_from(D)] = sizes + 2.0 * sp.sum(markage, axis=1)

        # Cross vector terms
        for u in xrange(D.shape[0]):
            all_ks = sp.searchsorted(values[u], values, 'left') - 1
            for v in xrange(u):
                js = sp.searchsorted(values[v], values[u], 'right') - 1
                ks = all_ks[v]
                slice_j = sp.s_[sp.searchsorted(js, 0):sizes[u]]
                slice_k = sp.s_[sp.searchsorted(ks, 0):sizes[v]]
                D[u, v] = sp.sum(
                    sp.exp(values[v][js[slice_j]] - values[u][slice_j]) *
                    (1.0 + markage[v][js[slice_j]]))
                D[u, v] += sp.sum(
                    sp.exp(values[u][ks[slice_k]] - values[v][slice_k]) *
                    (1.0 + markage[u][ks[slice_k]]))
                D[v, u] = D[u, v]

        if self.normalize:
            normalization = self.normalization_factor(self.kernel_size)
        else:
            normalization = 1.0
        return normalization * D
Exemplo n.º 5
0
def get_covarmatrix(x, xinterp, z, k, breakwidth):
    """
    Calculates the covariance matrix when needed.  Assumes both an
    interpolation and a smoothing----for now, only linear
    interpolation will work (only does one diagonal, but propagates
    the error correctly).
    """

    isort = sp.searchsorted(x, xinterp)
    f = (xinterp - x[isort - 1]) / (x[isort] - x[isort - 1])
    z2 = sp.sqrt((f**2) * (z[isort]**2) + ((1 - f)**2) * (z[isort - 1]**2))
    #for a delta function
    if k.size == 1:
        return sp.diag(z2**2)

    covar1 = sp.zeros((z2.size, z2.size))
    covar2 = sp.zeros((z2.size, z2.size))

    #the part from interpolation---searchsorted has thrown out the
    #first index, the last index won't be selected because of the slice
    covar1[sp.diag_indices_from(covar1)] = z2**2
    #    print sp.shape(f), sp.shape(z[isort.min():isort.max()-1]),sp.shape(z)
    diag1 = f[0:-1] * (1 - f[0:-1]) * (z[isort.min():isort.max()]**2)

    #linear interpolation only has one diagonal
    covar1 += sp.diag(diag1, 1)
    covar1 += sp.diag(diag1, -1)
    covar1 = sp.matrix(covar1)

    #the part from convolution
    #make kernel match size of input
    if k.size == z2.size - 2:
        k = sp.r_[0, k, 0]
    elif k.size == z2.size - 1:
        k = sp.r_[0, k]
    #force it to wrap around
    cent = k.size // 2
    k = sp.r_[k[cent::], k[0:cent]]

    #for matrix equation, see Gardner 2003, Uncertainties in
    #Interpolated Spectral Data; equation 6

    # seems like there is a better way than a double loop....
    for i in range(z2.size):
        for n in range(int(breakwidth)):
            #can shorten the loop because most of the matrix is
            #zero:the idea is that kernels that are far apart
            #should have zero overlap.  If k2 gets shifted
            #relative to k1 more than 5x the kernel width, assume
            #no overlap
            j = i + n
            if j > z2.size - 1: j = z2.size - 1
            k1 = sp.matrix(sp.roll(k, i))
            k2 = sp.matrix(sp.roll(k, j))
            trim1 = cent + i
            if trim1 <= k1.size:
                k1[:, trim1::] = 0
            else:
                k1[:, 0:trim1 - k1.size] = 0

            trim2 = cent + j
            if trim2 <= k2.size:
                k2[:, trim2::] = 0
            else:
                k2[:, 0:(trim2 - k2.size)] = 0

            covar2[i, j] = k1 * covar1 * k2.T
        #matrix is symmetric
    covar2 += covar2.T
    covar2[sp.diag_indices_from(covar2)] /= 2.

    return covar2
Exemplo n.º 6
0
    def summed_dist_matrix(self, vectors, presorted=False):
        # This implementation is based on
        #
        # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van
        # Rossum distances. Network: Computation in Neural Systems, 23(1-2),
        # 48-58.
        #
        # Note that the cited paper contains some errors: In formula (9) the
        # left side of the equation should be divided by two and in the last
        # sum in this equation it should say `j|v_i >= u_i` instead of
        # `j|v_i > u_i`. Also, in equation (11) it should say `j|u_i >= v_i`
        # instead of `j|u_i > v_i`.
        #
        # Given N vectors with n entries on average the run-time complexity is
        # O(N^2 * n). O(N^2 + N * n) memory will be needed.

        if len(vectors) <= 0:
            return sp.zeros((0, 0))

        if not presorted:
            vectors = [v.copy() for v in vectors]
            for v in vectors:
                v.sort()

        sizes = sp.asarray([v.size for v in vectors])
        values = sp.empty((len(vectors), max(1, sizes.max())))
        values.fill(sp.nan)
        for i, v in enumerate(vectors):
            if v.size > 0:
                values[i, :v.size] = \
                    (v / self.kernel_size * pq.dimensionless).simplified

        exp_diffs = sp.exp(values[:, :-1] - values[:, 1:])
        markage = sp.zeros(values.shape)
        for u in xrange(len(vectors)):
            markage[u, 0] = 0
            for i in xrange(sizes[u] - 1):
                markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i]

        # Same vector terms
        D = sp.empty((len(vectors), len(vectors)))
        D[sp.diag_indices_from(D)] = sizes + 2.0 * sp.sum(markage, axis=1)

        # Cross vector terms
        for u in xrange(D.shape[0]):
            all_ks = sp.searchsorted(values[u], values, 'left') - 1
            for v in xrange(u):
                js = sp.searchsorted(values[v], values[u], 'right') - 1
                ks = all_ks[v]
                slice_j = sp.s_[sp.searchsorted(js, 0):sizes[u]]
                slice_k = sp.s_[sp.searchsorted(ks, 0):sizes[v]]
                D[u, v] = sp.sum(
                    sp.exp(values[v][js[slice_j]] - values[u][slice_j]) *
                    (1.0 + markage[v][js[slice_j]]))
                D[u, v] += sp.sum(
                    sp.exp(values[u][ks[slice_k]] - values[v][slice_k]) *
                    (1.0 + markage[u][ks[slice_k]]))
                D[v, u] = D[u, v]

        if self.normalize:
            normalization = self.normalization_factor(self.kernel_size)
        else:
            normalization = 1.0
        return normalization * D