Esempio n. 1
0
    def reject_parts(self):
        ok = self

        """
        Hall = self._parts * np.log(self._parts) + \
            (1 - self._parts) * np.log(1 - self._parts)
        H = -np.apply_over_axes(np.mean, Hall, [1, 2, 3]).ravel()
        """

        th1 = self._parts
        th0 = np.apply_over_axes(np.mean, self._parts, [1, 2])

        M1 = th1 * np.log(th1 / th0) +\
            (1 - th1) * np.log((1 - th1) / (1 - th0))
        S1 = np.log((th1 / (1 - th1) * ((1 - th0) / th0)))**2 * th1 * (1 - th1)
        mu1 = np.apply_over_axes(np.sum, M1, [1, 2, 3]).ravel()
        sigma1 = np.sqrt(np.apply_over_axes(np.sum, S1, [1, 2, 3])).ravel()

        M1 = th0 * np.log(th1 / th0) +\
            (1 - th1) * np.log((1 - th0) / (1 - th0))
        S1 = np.log((th1 / (1 - th1) * ((1 - th0) / th0)))**2 * th0 * (1 - th0)
        mu0 = np.apply_over_axes(np.sum, M1, [1, 2, 3]).ravel()
        # sigma0 = np.sqrt(np.apply_over_axes(np.sum, S1, [1, 2, 3])).ravel()

        ok = ((mu1 - mu0) / sigma1) > self._settings.get('reject_entropy', 1.0)

        print(ok.shape)
        print(ok.sum())

        self._parts = self._parts[ok]
        self._num_parts = self._parts.shape[0]
Esempio n. 2
0
def calc_eff(ns0, nb0, ns1, nb1, alpha, sum_axes=None):

    if sum_axes:
        ns0 = np.apply_over_axes(np.sum, ns0, axes=sum_axes)
        nb0 = np.apply_over_axes(np.sum, nb0, axes=sum_axes)
        ns1 = np.apply_over_axes(np.sum, ns1, axes=sum_axes)
        nb1 = np.apply_over_axes(np.sum, nb1, axes=sum_axes)

    shape = np.broadcast(ns0, nb0, ns1, nb1).shape
    eff = np.zeros(shape)
    eff_var = np.zeros(shape)

    s0 = ns0 - alpha * nb0
    s1 = ns1 - alpha * nb1
    mask = (s0 * np.ones(shape) > 0)
    mask &= (s1 * np.ones(shape) > 0)

    s0[s0 <= 0] = 1.0
    eff = s1 / s0
    eff_var = (((ns0 - ns1 + alpha**2 * (nb0 - nb1)) * eff**2 +
                (ns1 + alpha**2 * nb1) * (1 - eff)**2) / s0**2)

    eff[~mask] = 0.0
    eff_var[~mask] = 0.0
    return eff, eff_var
def sort_and_avg(fr_array,sort_ind):
	#sort_ind 2D array, with col 0 = trial #, col 1 = sorting param

	unit_fr_mean = np.squeeze(np.apply_over_axes(np.mean,fr_array,(0,2)))
	unit_fr_std = np.squeeze(np.apply_over_axes(np.std,fr_array,(0,2)))

	zscore_array_all = []
	for i in range(int(np.max(sort_ind[:,1]) + 1)):
		temp = sort_ind[sort_ind[:,1] == i]
		for j in range(np.shape(temp)[0]):
			cond_trial_nums = temp[:,0]
			cond_trial_nums = cond_trial_nums.astype(int)

			fr_array_cond = fr_array[cond_trial_nums,:,:]
			mean_fr_cond = np.mean(fr_array_cond,axis=0)
			
			#gaussian_smoothed = gaussian_filter(mean_fr_cond,sigma=sigma_val)
			zscore_array = np.zeros((np.shape(mean_fr_cond)))
			for k in range(np.shape(mean_fr_cond)[0]):
				zscore_array[k,:] = (mean_fr_cond[k,:] - unit_fr_mean[k]) / unit_fr_std[k]

		zscore_array_all.append(zscore_array)

	zscore_array_all = np.asarray(zscore_array_all)
	dims = np.shape(zscore_array_all)
	zscore_array_all = zscore_array_all.reshape((dims[1],dims[0],dims[2]))



	return(zscore_array_all)
Esempio n. 4
0
    def lnl_null(ns,nc,mub,alpha=None,data_axes=0,sum_lnl=True):
        """
        Log-likelihood for null hypothesis.

        Parameters
        ----------
        ns: Vector of observed counts in signal region.

        nc: Vector of observed counts in control region(s).
        """       
        lnls = poisson_lnl(ns,mub)
        lnlc = np.zeros(nc.shape)

        if alpha: 
            # model amplitude for counts in control region
            muc = np.apply_over_axes(np.sum,mub,data_axes)/alpha
            lnlc = poisson_lnl(nc,muc)

        if sum_lnl: 
            lnls = np.apply_over_axes(np.sum,lnls,data_axes)
            lnls = np.squeeze(lnls,data_axes)

            lnlc = np.apply_over_axes(np.sum,lnlc,data_axes)
            lnlc = np.squeeze(lnlc,data_axes)
            return lnls+lnlc
        else:
            return lnls
Esempio n. 5
0
def pool(theta, size):
    w, h = theta.shape[0]//size, theta.shape[1]//size
    feat = np.zeros((w, h, theta.shape[-1]))
    for x, y in gv.multirange(w, h):
        feat[x,y] = np.apply_over_axes(np.sum, theta[x*size:(x+1)*size, y*size:(y+1)*size], [0, 1])[0,0]

    feat /= np.apply_over_axes(np.sum, feat, [-1]) + 1e-8
    
    return feat
Esempio n. 6
0
def two_way(cells):
	dt = cells.dtype
	cells = cells.astype('float64')  # Make sure we don't overflow
	total = np.apply_over_axes(np.sum, cells, [1,2]).ravel()
	chi_sq = np.zeros(cells.shape, dtype='float64')
	for i in range(2):
		for j in range(2):
			exp = np.sum(cells[:,i,:], 1).ravel() * np.sum(cells[:,:,j], 1).ravel() / total
			chi_sq[:,i,j] = (cells[:,i,j] - exp)**2 / exp
	chi_sq = np.apply_over_axes(np.sum, chi_sq, [1,2]).ravel()
	return special.chdtrc(1, chi_sq).astype(dt)
Esempio n. 7
0
 def verify_cumsum(h):
     for op in '<', '>':
         for kind in 'bincontent', 'binerror':
             func = lambda arr, axis: d.histfuncs.cumsum(arr, operator=op, axis=axis)
             if kind == 'bincontent':
                 cum = d.histfuncs.cumulative_bincontent(h, op)
                 cum_full = n.apply_over_axes(func, h._h_bincontent, range(h.ndim-1, -1, -1))[h._h_visiblerange]
             else:
                 cum = d.histfuncs.cumulative_binerror(h, op)
                 cum_full = n.sqrt(n.apply_over_axes(func, h._h_squaredweights, range(h.ndim-1, -1, -1))[h._h_visiblerange])
             assert((cum == cum_full).all())
Esempio n. 8
0
def two_way(cells):
    """ Two-way chi-square test of independence. 
	Takes a 3D array as input: N(voxels) x 2 x 2, where the last two dimensions
	are the contingency table for each of N voxels. Returns an array of p-values.
	"""
    # dt = cells.dtype
    cells = cells.astype("float64")  # Make sure we don't overflow
    total = np.apply_over_axes(np.sum, cells, [1, 2]).ravel()
    chi_sq = np.zeros(cells.shape, dtype="float64")
    for i in range(2):
        for j in range(2):
            exp = np.sum(cells[:, i, :], 1).ravel() * np.sum(cells[:, :, j], 1).ravel() / total
            chi_sq[:, i, j] = (cells[:, i, j] - exp) ** 2 / exp
    chi_sq = np.apply_over_axes(np.sum, chi_sq, [1, 2]).ravel()
    return special.chdtrc(1, chi_sq)  # .astype(dt)
Esempio n. 9
0
    def int_flux_threshold(self, skydir, fn, ts_thresh, min_counts):
        """Compute the integral flux threshold for a point source at
        position ``skydir`` with spectral parameterization ``fn``.

        """

        ebins = 10**np.linspace(np.log10(self.ebins[0]),
                                np.log10(self.ebins[-1]), 33)
        ectr = np.sqrt(ebins[0] * ebins[-1])

        sig, bkg, bkg_fit = self.compute_counts(skydir, fn, ebins)

        norms = irfs.compute_norm(sig, bkg, ts_thresh,
                                  min_counts, sum_axes=[1, 2, 3], bkg_fit=bkg_fit,
                                  rebin_axes=[4, 10, 1])

        npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig, [1, 2, 3]))
        npred = np.array(npred, ndmin=1)
        flux = np.squeeze(norms) * fn.flux(ebins[0], ebins[-1])
        eflux = np.squeeze(norms) * fn.eflux(ebins[0], ebins[-1])
        dnde = np.squeeze(norms) * fn.dnde(ectr)
        e2dnde = ectr**2 * dnde

        o = dict(e_min=self.ebins[0], e_max=self.ebins[-1], e_ref=ectr,
                 npred=npred, flux=flux, eflux=eflux,
                 dnde=dnde, e2dnde=e2dnde)

        sig, bkg, bkg_fit = self.compute_counts(skydir, fn)

        npred = np.squeeze(np.apply_over_axes(np.sum, norms * sig,
                                              [2, 3]))
        flux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] *
                          fn.flux(self.ebins[:-1], self.ebins[1:]))
        eflux = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))[:, None] *
                           fn.eflux(self.ebins[:-1], self.ebins[1:]))
        dnde = np.squeeze(np.squeeze(norms, axis=(1, 2, 3))
                          [:, None] * fn.dnde(self.ectr))
        e2dnde = ectr**2 * dnde

        o['bins'] = dict(npred=npred,
                         flux=flux,
                         eflux=eflux,
                         dnde=dnde,
                         e2dnde=e2dnde,
                         e_min=self.ebins[:-1], e_max=self.ebins[1:],
                         e_ref=self.ectr)

        return o
Esempio n. 10
0
    def mad(data, c=0.6745, axis=None):
        """
        Mean Absolute Deviation (MAD) along an axis.

        Straight from statsmodels's source code, adapted

        Parameters
        ----------
        data : iterable
            The data along which to calculate the MAD

        c : float, optional
            The normalization constant. Defined as
            ``scipy.stats.norm.ppf(3/4.)``, which is approximately ``.6745``.

        axis : int, optional, default ``0``
            Axis along which to calculate ``mad``. Default is ``0``, can also
            be ``None``
        """
        data = np.asarray(data)
        if axis is not None:
            center = np.apply_over_axes(np.median, data, axis)
        else:
            center = np.median(data)
        return np.median((np.fabs(data - center)) / c, axis=axis)
Esempio n. 11
0
def isc_between(E, condA, condB, method=("inter-subject", "subject-total", "total-total"), threshold=True):
    condnameA = condA.name.split("/")[-1]
    condnameB = condB.name.split("/")[-1]
    g_name = "correlations/%s" % condnameB
    g_out = condA[g_name] if g_name in condA else condA.create_group(g_name)

    iter_runs = E.iter_runs(condnameA)
    A_dlist = [run.load(standardized=True, threshold=threshold) for run in iter_runs]
    if "inter-subject" in method:
        # wg isc
        B_runs = E.iter_runs(condnameB)
        B_dlist = [run.load(standardized=True, threshold=threshold) for run in B_runs]
        C = crosscor(A_dlist, B_dlist, standardized=True)
        C_mean = np.squeeze(np.apply_over_axes(nanmean, C, [-1, -2]))

        dset_overwrite(g_out, "isc_mat", C)
        dset_overwrite(g_out, "inter-subject", C_mean)

    if "subject-total" in method:
        # WG subject-total
        sub_ttl_corr = [corcomposite(dat, condB["composite"]) for dat in A_dlist]
        sub_ttl_mean = nanmean(sub_ttl_corr, axis=0)

        dset_overwrite(g_out, "subject-total", sub_ttl_mean)

    if "total-total" in method:
        # BG correlation for composites
        gB_out = condB[g_name] if g_name in condB else condB.create_group(g_name)
        ttl_ttl_corr = corsubs(condA["composite"][...], condB["composite"][...])

        dset_overwrite(g_out, "total-total", ttl_ttl_corr)
        dset_overwrite(gB_out, "total-total", ttl_ttl_corr)
Esempio n. 12
0
def fwhm(data, axis=None):
    """Calculate the indices of the FWHM for the projections on the axis.

    Parameters
    ----------
    data : array_like
        n-dim data
    axis : int, optional
        The axis on which the projection is taken. If axis is `None` return
        a list of all FWHM index pairs.

    Returns
    -------
    idx : list of pairs of int
        The indices of the fwhm. If axis is specified a plain pair is
        returned.

    See
    ---
    For usage of `apply_over_axes` see:
    http://www.mail-archive.com/[email protected]/msg03469.html
    """
    if axis is None:
        return [fwhm(data, ax) for ax in range(data.ndim)]

    axes = np.r_[0:axis, axis+1:data.ndim]
    d = np.apply_over_axes(np.mean, data, axes).flatten()
    imax = d.argmax()
    hmax = 0.5 * d[imax]
    i0 = np.where(d[:imax] <= hmax)[0][-1]
    i1 = np.where(d[imax:] <= hmax)[0][0] + imax
    return i0, i1
 def normalize(self, seq):
     if 0==1 and len(seq.shape) == 3: #normalizes each frame to itself
         mins = np.apply_over_axes(np.min, seq, [1,2])
         seq = seq + abs(mins)
         maxs = np.apply_over_axes(np.max, seq, [1,2])
     elif len(seq.shape) == 3: #normalizes entire 3d matrix to its highest value
         minval = np.min(seq)
         seq = seq + abs(minval)
         maxval = np.max(seq)
         maxs = maxval
     elif len(seq.shape) < 3:
         mins = np.min(seq, axis=0)
         seq = seq.mins
         maxs = np.max(seq, axis=0)
     seq = seq / maxs
     return seq
Esempio n. 14
0
def get_source_kernel(gta, name, kernel=None):
    """Get the PDF for the given source."""

    sm = []
    zs = 0
    for c in gta.components:
        z = c.model_counts_map(name).data.astype('float')
        if kernel is not None:
            shape = (z.shape[0],) + kernel.shape
            z = np.apply_over_axes(np.sum, z, axes=[1, 2]) * np.ones(
                shape) * kernel[np.newaxis, :, :]
            zs += np.sum(z)
        else:
            zs += np.sum(z)

        sm.append(z)

    sm2 = 0
    for i, m in enumerate(sm):
        sm[i] /= zs
        sm2 += np.sum(sm[i] ** 2)

    for i, m in enumerate(sm):
        sm[i] /= sm2

    return sm
Esempio n. 15
0
    def deviance(self, X, y, weights=None):
        '''Calculate the normal deviance (i.e. sum of squared errors) for
        every lambda.  The model must already be fit to call this method.
        '''
        self._check_if_fit()
        if weights is not None and weights.shape[0] != X.shape[0]:
            raise ValueError("The weights vector must have the same length as X.")

        # We normalise responses by default
        resp_weights = 1.0 / np.apply_along_axis(np.nanstd, 0, np.array(y))

        y_hat = self.predict(X)
        # Take the response y, and repeat it to produce a matrix
        # of the same dimensions as y_hat
        a = np.array(y)
        y_stacked = np.tile(a.reshape(a.shape + (1,)), (1, 1, y_hat.shape[-1]))
        rw_stacked = np.tile(resp_weights.reshape(1, len(resp_weights), 1), (y_hat.shape[0], 1, y_hat.shape[2]))
        if weights is None:
            sq_residuals = ((y_stacked - y_hat) * rw_stacked)**2
            normfac = X.shape[0] * y.shape[1]
        else:
            w = np.array(weights)
            w_stacked = np.tile(w.reshape((y_hat.shape[0], 1, 1)), (1,) + y_hat.shape[1:])
            sq_residuals = w_stacked * ((y_stacked - y_hat) * rw_stacked)**2
            normfac = np.sum(weights) * y.shape[1]
        return np.apply_over_axes(np.sum, sq_residuals, [0, 1]).ravel() / normfac
Esempio n. 16
0
def poisson_ul(nc,mus,mub,data_axes=1):
    """Test statistic for discovery with known background."""


    # MLE for signal norm under signal hypothesis
    snorm = np.apply_over_axes(np.sum,nc-mub,data_axes)
    snorm[snorm<0] = 0

    x = np.linspace(-3,3,50)

    mutot = snorm*mus+mub

    deltas = 10**x#*np.sum(mub)

    smutot = deltas[np.newaxis,np.newaxis,:]*mus[...,np.newaxis] + mutot[...,np.newaxis]

    lnl = nc[...,np.newaxis]*np.log(smutot) - smutot

    lnl = np.sum(lnl,axis=data_axes)

    ul = np.zeros(lnl.shape[0])

    for i in range(lnl.shape[0]):
        
        dlnl = -2*(lnl[i]-lnl[i][0])
        deltas_root = find_root(deltas,dlnl,2.72)
        ul[i] = snorm[i][0] + deltas_root

    return ul
Esempio n. 17
0
def _cumsum_with_overflow(bincontent, overflow, func):
    """
    Emulate the result of a cumulative sum over the entire histogram backing
    array given only disjoint views into the visible *bincontent* and the
    *overflow* (a slice in each dimension).
    """
    cum = bincontent
    ndim = bincontent.ndim
    # TODO: take axes to sum over as a parameter
    axes = range(ndim-1, -1, -1)
    for i, axis in enumerate(axes):
        # overflow should be a slab with one trivial dimension
        oflow = overflow[axis]
        assert(oflow.ndim == cum.ndim)
        assert(oflow.shape[axis] == 1)
    
        # sum over the visible part of the array
        cum = func(cum, axis=axis)
        # apply all the sums taken so far to the overflow slab, then add only
        # the part that is either in the overflow bin for this dimension, or
        # in a visible bin of another dimension
        idx = [slice(1,-1)]*ndim
        idx[axis] = slice(0,1)
        cum += n.apply_over_axes(func, oflow, axes[:i])[idx]
        
    return cum
Esempio n. 18
0
 def project(self, axis=0, method=np.mean, show=False, roi=None, backend=pl, **kwargs):
     """Flatten/project the movie data across one or many axes
     
     Parameters
     ----------
     axis : int, list
         axis/axes over which to flatten
     method : def
         function to apply across the specified axes
     show : bool
         display the result (if 2d, as image; if 1d, as trace)
     roi : pyfluo.ROI 
         roi to display
     backend : module
         module used for interactive display (only matplotlib currently supported)
         
     Returns
     -------
     The projected image
     """
     if method == None:
         method = np.mean
     pro = np.apply_over_axes(method,self,axes=axis).squeeze()
     
     if show:
         ax = pl.gca()
         ax.margins(0.)
         if pro.ndim == 2:
             pl.imshow(pro, cmap=pl.cm.Greys_r, **kwargs)
             if roi is not None:
                 roi.show()
         elif pro.ndim == 1:
             pl.plot(self.time, pro)
     
     return pro
Esempio n. 19
0
    def psf(self,emin,emax,cthmin,cthmax):
        """Return energy- and livetime-weighted PSF density vector as
        a function of angular offset for a bin in energy and
        inclination angle."""
        
        logemin = np.log10(emin)
        logemax = np.log10(emax)

        ilo = np.argwhere(self._energy > emin)[0,0]
        ihi = np.argwhere(self._energy < emax)[-1,0]+1
        
        jlo = np.argwhere(self._ctheta_axis.center > cthmin)[0,0]
        jhi = np.argwhere(self._ctheta_axis.center < cthmax)[-1,0] +1
        
        weights = (self._energy[ilo:ihi,np.newaxis]*
                   self._exp[ilo:ihi,jlo:jhi]*
                   self._wfn(self._energy[ilo:ihi,np.newaxis]))
        
        wsum = np.sum(weights)
        psf = np.apply_over_axes(np.sum,
                                 self._psf[:,ilo:ihi,jlo:jhi]*
                                 weights[np.newaxis,...],
                                 [1,2])
        psf = np.squeeze(psf)                   
        psf *= (1./wsum)
        return self._dtheta, psf
Esempio n. 20
0
    def get_data_projection(data,axes,iaxis,xmin=-1,xmax=1,erange=None):

        s0 = slice(None,None)
        s1 = slice(None,None)
        s2 = slice(None,None)
        
        if iaxis == 0:
            i0 = valToEdge(axes[iaxis],xmin)
            i1 = valToEdge(axes[iaxis],xmax)
            s1 = slice(i0,i1)
            saxes = [1,2]
        else:
            i0 = valToEdge(axes[iaxis],xmin)
            i1 = valToEdge(axes[iaxis],xmax)
            s0 = slice(i0,i1)
            saxes = [0,2]

        if erange is not None:
            j0 = valToEdge(axes[2],erange[0])
            j1 = valToEdge(axes[2],erange[1])
            s2 = slice(j0,j1)
            
        c = np.apply_over_axes(np.sum,data[s0,s1,s2],axes=saxes)
        c = np.squeeze(c)

        return c
Esempio n. 21
0
def genLocs(locs,predlocs,conf):
    dlocs = np.apply_over_axes(np.sum,(locs-predlocs)**2,axes=[1,2])
    dlocs = old_div(np.sqrt(dlocs),conf.n_classes)
    close = np.reshape(dlocs < (old_div(conf.gen_minlen,2)),[-1])
    newlocs = copy.deepcopy(predlocs)
    newlocs[close,...] = genFewMovedNegSamples(newlocs[close,...],conf,nmove=3)
    return newlocs
Esempio n. 22
0
def downsample_rect(img, start_row, start_col, end_row, end_col, width, output, start_idx):
    """
    .. todo::

        WRITEME

    Parameters
    ----------
    img : WRITEME
        numpy matrix in topological order
        (batch size, rows, cols, channels)
    start_row : WRITEME
        row index of top-left corner of rectangle to average pool
    start_col : WRITEME
        col index of top-left corner of rectangle to average pool
    end_row : WRITEME
        row index of bottom-right corner of rectangle to average pool
    end_col : WRITEME
        col index of bottom-right corner of rectangle to average pool
    width : WRITEME
        take the mean over rectangular block of this width
    output : WRITEME
        dense design matrix, of shape (batch size, rows*cols*channels)
    start_idx : WRITEME
        column index where to start writing the output
    """
    idx = start_idx

    for i in xrange(start_row, end_row - width + 1, width):
        for j in xrange(start_col, end_col - width + 1, width):
            block = img[:, i:i+width, j:j+width]
            output[:,idx] = numpy.apply_over_axes(numpy.mean, block, axes=[1,2])[:,0,0]
            idx += 1

    return idx
Esempio n. 23
0
def median_absolute_deviation(array, c=scipy.stats.norm.ppf(3/4.), axis=0,
                              center=np.median):
    """ The Median Absolute Deviation along given axis of an array.

    Parameters
    ----------
    array: array-like
        input array.
    c: float (optional, default scipy.stats.norm.ppf(3/4.) ~ .6745
        the normalization constant.
    axis: int (optional default 0)
        axes over which the callable fucntion `center` is applied.
    center: callable or float (default `np.median`)
        If a callable is provided then the array is centerd.
        Otherwise, a float represented the center is provided.

    Returns
    -------
    mad: float
        `mad` = median(abs(`array` - center)) / `c`
    """
    # Convert array-like object
    array = np.asarray(array)

    # Compute the center if a callable is passed in parameters
    if callable(center):
        center = np.apply_over_axes(center, array, axis)

    # Compute the median absolute deviation
    return np.median((np.fabs(array - center)) / c, axis=axis)
Esempio n. 24
0
def cut_positions(filename, blurred, *positions):
	blurred = int(blurred)
	pos = eval("".join(positions))
	root = nc.open(filename)[0]
	lat = nc.getvar(root, 'lat')
	lon = nc.getvar(root, 'lon')
	data = nc.getvar(root, 'data')
	root_cut = nc.clonefile(root, 'cut_positions.' + filename, ['lat', 'lon', 'data'])[0]
	nc.getdim(root_cut, 'northing_cut', len(pos))
	nc.getdim(root_cut, 'easting_cut', 2)
	lat_cut = nc.getvar(root_cut, 'lat', 'f4', ('northing_cut','easting_cut',),4)
	lon_cut = nc.getvar(root_cut, 'lon', 'f4', ('northing_cut','easting_cut',),4)
	data_cut = nc.getvar(root_cut, 'data', 'f4', ('timing','northing_cut','easting_cut',),4)
	ix = 0
	for i in range(len(pos)):
		show("\rCutting data: processing position %d / %d " % (i+1, len(pos)))
		x, y = statistical_search_position(pos[i], lat, lon)
		if x and y:
			lat_cut[ix,0] = lat[x,y]
			lon_cut[ix,0] = lon[x,y]
			data_cut[:,ix,0] = np.apply_over_axes(np.mean, data[:,x-blurred:x+blurred,y-blurred:y+blurred], axes=[1,2]) if blurred > 0 else data[:,x,y]
			lat_cut[ix,1], lon_cut[ix,1], data_cut[:,ix,1] = lat_cut[ix,0], lon_cut[ix,0], data_cut[:,ix,0]
			ix += 1
	nc.close(root)
	nc.close(root_cut)
Esempio n. 25
0
def crop_to_bounding_box(infile, outfile):
    """
    Crops the volume in infile to locations where it is nonzero.
    Prints the resulting bounding box in the following format:
    xmin ymin zmin xmax ymax zmax
    """
    nii = nib.load(infile)
    aff = nii.get_affine()
    data = nii.get_data()
    minmaxes = []
    slicing = []
    for axis, otheraxes in enumerate([[2,1], [2,0], [1,0]]):
        one_axis = np.apply_over_axes(np.sum, data, otheraxes).squeeze()
        # hack because image has a weird bright patch
        (nonzero_locs,) = np.where(one_axis)
        minmaxes.append((nonzero_locs.min(), nonzero_locs.max()))


    minima = [int(min) for (min, max) in minmaxes]
    maxima = [int(max) for (min, max) in minmaxes]
    slicing = [slice(min, max, None) for (min, max) in minmaxes]
    aff[:3, -1] += minima
    out = nib.Nifti1Image(data[slicing], header=nii.get_header(), affine=aff)
    out.update_header()
    out.to_filename(outfile)
    print(" ".join(map(str,minima)))
    print(" ".join(map(str,maxima)))
Esempio n. 26
0
def blkavg(array, blockshape):
    blocks = blockview(array, blockshape)
    axes = range(len(blocks.shape) - 1,
                 len(blocks.shape) - len(array.shape) - 1, -1)
    means = np.apply_over_axes(np.mean, blocks, axes)
    # Drop the extra dimensions
    return means.reshape(blocks.shape[:len(array.shape)])[:-1, :-1]
    def _extract(self, phi, data):
        X = phi(data)
        XX = X[:, np.newaxis, np.newaxis]
        theta = self._models[np.newaxis]

        S = self._settings.get('standardize')
        if S:
            llh = XX * logit(theta)
            bb = np.apply_over_axes(np.sum, llh, [-3, -2, -1])[..., 0, 0, 0]
            bb = (bb - self._means) / self._sigmas
            yhat = np.argmax(bb.max(-1), axis=1)
        else:
            llh = XX * np.log(theta) + (1 - XX) * np.log(1 - theta)
            bb = np.apply_over_axes(np.sum, llh, [-3, -2, -1])[..., 0, 0, 0]
            yhat = np.argmax(bb.max(-1), axis=1)
        return yhat
Esempio n. 28
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('parts', metavar='<parts file>',
                        type=argparse.FileType('rb'),
                        help='Filename of parts file')
    args = parser.parse_args()

    feat_net = pnet.Layer.load(args.parts)

    layers = feat_net.layers

    S = 11

    layers += [
        pnet.PoolingLayer(shape=(S, S), strides=(S, S), operation='sum'),
        # pnet.MixtureClassificationLayer(n_components=10, min_prob=1e-5,
        # settings=dict( standardize=False,),)
        pnet.SVMClassificationLayer(C=100.0, settings=dict(standardize=True)),
    ]

    net = pnet.PartsNet(layers)

    limit = None
    error_rate, conf_mat = pnet.norb.train_and_test(net,
                                                    samples_per_class=None,
                                                    seed=0, limit=limit)

    print('Error rate: {:.02f}'.format(error_rate * 100))
    np.set_printoptions(precision=2, suppress=True)
    print('Confusion matrix:')

    norm_conf = conf_mat / np.apply_over_axes(np.sum, conf_mat, [1])
    print(norm_conf)
Esempio n. 29
0
    def to_wcs(self, sum_bands=False, normalize=True, proj='AIT', oversample=2,
               width_pix=None, hpx2wcs=None):

        from .wcsnd import WcsNDMap

        if sum_bands and self.geom.nside.size > 1:
            map_sum = self.sum_over_axes()
            return map_sum.to_wcs(sum_bands=False, normalize=normalize, proj=proj,
                                  oversample=oversample, width_pix=width_pix)

        # FIXME: Check whether the old mapping is still valid and reuse it
        if hpx2wcs is None:
            hpx2wcs = self.make_wcs_mapping(oversample=oversample, proj=proj,
                                            width_pix=width_pix)

        # FIXME: Need a function to extract a valid shape from npix property

        if sum_bands:
            hpx_data = np.apply_over_axes(np.sum, self.data,
                                          axes=np.arange(self.data.ndim - 1))
            hpx_data = np.squeeze(hpx_data)
            wcs_shape = tuple([t.flat[0] for t in hpx2wcs.npix])
            wcs_data = np.zeros(wcs_shape).T
            wcs = hpx2wcs.wcs.to_image()
        else:
            hpx_data = self.data
            wcs_shape = tuple([t.flat[0] for t in
                               hpx2wcs.npix]) + self.geom._shape
            wcs_data = np.zeros(wcs_shape).T
            wcs = hpx2wcs.wcs.to_cube(self.geom.axes)

        # FIXME: Should reimplement instantiating map first and fill data array
        hpx2wcs.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize)
        return WcsNDMap(wcs, wcs_data)
Esempio n. 30
0
def mad(a, c=Gaussian.ppf(3/4.), axis=0, center=np.median):
    # c \approx .6745
    """
    The Median Absolute Deviation along given axis of an array

    Parameters
    ----------
    a : array-like
        Input array.
    c : float, optional
        The normalization constant.  Defined as scipy.stats.norm.ppf(3/4.),
        which is approximately .6745.
    axis : int, optional
        The defaul is 0. Can also be None.
    center : callable or float
        If a callable is provided, such as the default `np.median` then it
        is expected to be called center(a). The axis argument will be applied
        via np.apply_over_axes. Otherwise, provide a float.

    Returns
    -------
    mad : float
        `mad` = median(abs(`a` - center))/`c`
    """
    a = np.asarray(a)
    if callable(center):
        center = np.apply_over_axes(center, a, axis)
    return np.median((np.fabs(a-center))/c, axis=axis)
Esempio n. 31
0
def mad(x, axis=0, center=np.median, scale=1.4826,
                              nan_policy='propagate'):
    """
    Calculates the Median Absolute Deviation (MAD) values for the input array x.

    Parameters
    ----------
    x : array-like
        Coalescence array in

    axis : int
        Axis index (If None, then doesn't apply to axis)

    center : np function
        The function defining the centre of the distribution 
        (e.g. np.median)
    
    scale : float
        The mad scaling factor for the mad factor output to make the mad factor 
        calculated in this function a consistent estimation of the standard 
        deviation of the distribution. Default is 1.4826, which is the appropriate 
        scaling factor for a normal distribution.
    
    nan_policy: str
        The policy with which to deal with NaNs. Options are: propagate, raise, 
        omit.

    Returns
    -------
    scaled_mad : array-like
        Array of scaled mean absolute deviation values for the input array, x,
        scaled to provide an estimation of the standard deviation of the 
        distribution.

    """
    # Perform initial data manipulation and checks:
    # (from scipy v1.3 source)
    x = np.asarray(x)
    # Consistent with `np.var` and `np.std`.
    if not x.size:
        return np.nan
    contains_nan, nan_policy = _contains_nan(x, nan_policy)
    if contains_nan and nan_policy == 'propagate':
        return np.nan
    if contains_nan and nan_policy == 'omit':
        # Way faster than carrying the masks around
        arr = np.ma.masked_invalid(x).compressed()
    else:
        arr = x

    # Calculate median and mad values:
    if axis is None:
        med = center(arr)
        mad = np.median(np.abs(arr - med))
    else:
        med = np.apply_over_axes(center, arr, axis)
        mad = np.median(np.abs(arr - med), axis=axis)

    return scale * mad
Esempio n. 32
0
def power_wash(ar):
    """Power wash RFI out of the data.

        Input:
            ar: The archive to be cleaned.
        Outputs:
            None - The archive is cleaned in place.
    """
    ar.pscrunch()
    ar.remove_baseline()
    ar.dedisperse()

    # Remove profile
    data = ar.get_data().squeeze()
    template = np.apply_over_axes(np.sum, data, (0, 1)).squeeze()
    clean_utils.remove_profile_inplace(ar, template, None)

    ar.dededisperse()

    bad_chans = []
    bad_subints = []
    bad_pairs = []
    std_sub_vs_chan = np.std(data, axis=2)
    print(std_sub_vs_chan.shape)
    #mean_sub_vs_chan = np.mean(data, axis=2)

    # Identify bad sub-int/channel pairs
    subintweights = clean_utils.get_subint_weights(ar).astype(bool)
    chanweights = clean_utils.get_chan_weights(ar).astype(bool)
    for isub in range(ar.get_nsubint()):
        for ichan in range(ar.get_nchan()):
            plt.figure()
            plt.subplot(2, 1, 1)
            plt.plot(std_sub_vs_chan[isub, :], 'k-')
            subint = clean_utils.scale_chans(std_sub_vs_chan[isub, :], \
                                                chanweights=chanweights)
            print(clean_utils.get_hot_bins(subint))
            plt.subplot(2, 1, 2)
            plt.plot(subint, 'r-')
            plt.title("Subint #%d" % isub)
            plt.figure()
            plt.subplot(2, 1, 1)
            plt.plot(std_sub_vs_chan[:, ichan], 'k-')
            chan = clean_utils.scale_subints(std_sub_vs_chan[:, ichan], \
                                                subintweights=subintweights)
            print(clean_utils.get_hot_bins(chan))
            plt.subplot(2, 1, 2)
            plt.plot(chan, 'r-')
            plt.title("Chan #%d" % ichan)
            plt.show()

    chanstds = np.sum(std_sub_vs_chan, axis=0)
    plt.subplot(2, 1, 1)
    plt.plot(chanstds)
    chanstds = clean_utils.scale_chans(chanstds, chanweights=chanweights)
    plt.subplot(2, 1, 2)
    plt.plot(chanstds)
    bad_chans.extend(np.argwhere(chanstds > 1).squeeze())
    plt.show()
Esempio n. 33
0
 def verify_cumsum(h):
     for op in '<', '>':
         for kind in 'bincontent', 'binerror':
             func = lambda arr, axis: d.histfuncs.cumsum(
                 arr, operator=op, axis=axis)
             if kind == 'bincontent':
                 cum = d.histfuncs.cumulative_bincontent(h, op)
                 cum_full = n.apply_over_axes(func, h._h_bincontent,
                                              range(h.ndim - 1, -1,
                                                    -1))[h._h_visiblerange]
             else:
                 cum = d.histfuncs.cumulative_binerror(h, op)
                 cum_full = n.sqrt(
                     n.apply_over_axes(func, h._h_squaredweights,
                                       range(h.ndim - 1, -1,
                                             -1))[h._h_visiblerange])
             assert ((cum == cum_full).all())
def getShortRunLowGrayLevelEmphasis(rlmatrix):
    I, J = calcuteIJ(rlmatrix)
    numerator = np.apply_over_axes(np.sum,
                                   apply_over_degree(np.divide, rlmatrix,
                                                     (I * I * J * J)),
                                   axes=(0, 1))[0, 0]
    S = calcuteS(rlmatrix)
    return numerator / S
def getLongRunLow(rlmatrix):
    I, J = calcuteIJ(rlmatrix)
    temp = apply_over_degree(np.multiply, rlmatrix, (J * J))
    numerator = np.apply_over_axes(np.sum,
                                   apply_over_degree(np.divide, temp, (J * J)),
                                   axes=(0, 1))[0, 0]
    S = calcuteS(rlmatrix)
    return numerator / S
def getLongRunHighGrayLevelEmphais(rlmatrix):
    I, J = calcuteIJ(rlmatrix)
    numerator = np.apply_over_axes(np.sum,
                                   apply_over_degree(np.multiply, rlmatrix,
                                                     (I * I * J * J)),
                                   axes=(0, 1))[0, 0]
    S = calcuteS(rlmatrix)
    return numerator / S
Esempio n. 37
0
def getSparsity(vox) :
    vox = np.squeeze(np.array(vox))
    vox = np.where(vox>0.5, 1, 0)
    if (len(vox.shape)==3):
        return np.sum(vox) / (vox.shape[1] ** 3)
    if (len(vox.shape)==4):
        result = np.squeeze(np.apply_over_axes(np.sum, vox, [1,2,3]))
        return result / (vox.shape[1] ** 3)
Esempio n. 38
0
 def getLongRunEmphasis(self, rlmatrix):
     I, J = self.calcuteIJ(rlmatrix)
     numerator = np.apply_over_axes(np.sum,
                                    self.apply_over_degree(
                                        np.multiply, rlmatrix, (J * J)),
                                    axes=(0, 1))[0, 0]
     S = self.calcuteS(rlmatrix)
     return numerator / S
Esempio n. 39
0
def get_input_features_from_numpy(img_batch):
    img, pts, pts_raw = _preprocess.batch_transform(img_batch,
                                                    face_detect=True,
                                                    preprocessing=False)
    img = np.float32(img)
    img_pp = img[:, 16:-16, 16:-16, :]
    img_pp -= np.apply_over_axes(np.mean, img_pp, [1, 2])
    return img_pp, pts, pts_raw
Esempio n. 40
0
 def getHighGrayLevelRunEmphais(self, rlmatrix):
     I, J = self.calcuteIJ(rlmatrix)
     numerator = np.apply_over_axes(np.sum,
                                    self.apply_over_degree(
                                        np.multiply, rlmatrix, (I * I)),
                                    axes=(0, 1))[0, 0]
     S = self.calcuteS(rlmatrix)
     return numerator / S
Esempio n. 41
0
def process(f):
    #print "Processing file {0}".format(f)
    im = gv.img.load_image(f)
    edges = parts_descriptor.extract_features(im, {'spread_radii': (0, 0)})

    counts = np.apply_over_axes(np.sum, edges, [0, 1]).ravel() 
    tots = np.prod(edges.shape[1:])
    return counts, tots
Esempio n. 42
0
def blkavg(array, blockshape):
    blocks = blockview(array, blockshape)
    axes = range(
        len(blocks.shape) - 1,
        len(blocks.shape) - len(array.shape) - 1, -1)
    means = np.apply_over_axes(np.mean, blocks, axes)
    # Drop the extra dimensions
    return means.reshape(blocks.shape[:len(array.shape)])[:-1, :-1]
Esempio n. 43
0
 def min_l1(self, layer, input, output):
     '''
     L1
     '''
     activs = np.abs(input[0].numpy())
     L1 = np.apply_over_axes(np.sum, activs, [0, 2, 3])
     L1 = L1.reshape(L1.shape[1])
     self.to_remove[layer.layer_name] = [np.argmin(L1)]
Esempio n. 44
0
 def mad(data, c=0.6745, axis=None):
     """Straight from statsmodels's source code, adapted"""
     data = np.asarray(data)
     if axis is not None:
         center = np.apply_over_axes(np.median, data, axis)
     else:
         center = np.median(data)
     return np.median((np.fabs(data - center)) / c, axis=axis)
Esempio n. 45
0
def absolute_deviation(x, axis=0, center=np.median, nan_policy='propagate'):
    """
	Compute the absolute deviations from the median of the data along the given axis.

	Parameters
	----------
	x : array_like
		Input array or object that can be converted to an array.
	axis : int or None, optional
		Axis along which the range is computed. Default is 0. If None, compute
		the MAD over the entire array.
	center : callable, optional
		A function that will return the central value. The default is to use
		np.median. Any user defined function used will need to have the function
		signature ``func(arr, axis)``.
	nan_policy : {'propagate', 'raise', 'omit'}, optional
		Defines how to handle when input contains nan. 'propagate'
		returns nan, 'raise' throws an error, 'omit' performs the
		calculations ignoring nan values. Default is 'propagate'.
	Returns
	-------
	ad : scalar or ndarray
		If ``axis=None``, a scalar is returned. If the input contains
		integers or floats of smaller precision than ``np.float64``, then the
		output data-type is ``np.float64``. Otherwise, the output data-type is
		the same as that of the input.
	Notes
	-----
	The `center` argument only affects the calculation of the central value
	around which the absolute deviation is calculated. That is, passing in ``center=np.mean``
	will calculate the absolute around the mean - it will not calculate the *mean*
	absolute deviation.
	"""
    x = np.asarray(x)

    # Consistent with `np.var` and `np.std`.
    if not x.size:
        return np.nan

    contains_nan, nan_policy = _contains_nan(x, nan_policy)

    if contains_nan and nan_policy == 'propagate':
        return np.nan

    if contains_nan and nan_policy == 'omit':
        # Way faster than carrying the masks around
        arr = np.ma.masked_invalid(x).compressed()
    else:
        arr = x

    if axis is None:
        med = center(arr)
        ad = np.abs(arr - med)
    else:
        med = np.apply_over_axes(center, arr, axis)
        ad = np.abs(arr - med)

    return ad
Esempio n. 46
0
def ydata_sum(slice, data, dataview):
    input_ydata = data.ycube[slice]
    summed_ydata = np.apply_over_axes(np.sum, input_ydata, [0, 1])
    input_xdata = data.xdata
    dtype = dataview.xdata_info['data_type']
    display_ev = dataview.display_ev
    ydata = ydata_calc2(summed_ydata, input_xdata, dtype, display_ev)

    return ydata
Esempio n. 47
0
def conv_forward_naive(x, w, b, conv_param):
    """
    A naive implementation of the forward pass for a convolutional layer.

    The input consists of N data points, each with C channels, height H and
    width W. We convolve each input with F different filters, where each filter
    spans all C channels and has height HH and width HH.

    Input:
    - x: Input data of shape (N, C, H, W)
    - w: Filter weights of shape (F, C, HH, WW)
    - b: Biases, of shape (F,)
    - conv_param: A dictionary with the following keys:
      - 'stride': The number of pixels between adjacent receptive fields in the
        horizontal and vertical directions.
      - 'pad': The number of pixels that will be used to zero-pad the input.

    Returns a tuple of:
    - out: Output data, of shape (N, F, H', W') where H' and W' are given by
      H' = 1 + (H + 2 * pad - HH) / stride
      W' = 1 + (W + 2 * pad - WW) / stride
    - cache: (x, w, b, conv_param)
    """
    out = None
    ###########################################################################
    # TODO: Implement the convolutional forward pass.                         #
    # Hint: you can use the function np.pad for padding.                      #
    ###########################################################################
    stride = conv_param['stride']
    pad = conv_param['pad']
    (N, C, H, W) = x.shape
    (F, C, HH, WW) = w.shape
    # --------------------------- padded x dimension --------------------------
    x_pad = np.zeros((N, C, H + 2 * pad, W + 2 * pad))
    x_pad[:, :, pad:-pad, pad:-pad] = x
    # --------------------------- out dimension -------------------------------
    H_out = int(1 + (H + 2 * pad - HH) / stride)
    W_out = int(1 + (W + 2 * pad - WW) / stride)
    out = np.zeros((N, F, H_out, W_out))
    w_ = w
    # ---------------------------
    for f in range(0, F):
        for i in range(0, H_out):
            for j in range(0, W_out):
                i_start = i * stride
                j_start = j * stride
                # print('i_start, j_start: {}, {}'.format(i_start, j_start))
                x_ = x_pad[:, :, i_start:i_start + HH,
                           j_start:j_start + WW] * w_[f]
                x_sum = np.apply_over_axes(np.sum, x_, [1, 2, 3]) + b[f]
                out[:, f:f + 1, i:i + 1, j:j + 1] = x_sum

    ###########################################################################
    #                             END OF YOUR CODE                            #
    ###########################################################################
    cache = (x, w_, b, conv_param)
    return out, cache
Esempio n. 48
0
def genLocs(locs, predlocs, conf):
    dlocs = np.apply_over_axes(np.sum, (locs - predlocs)**2, axes=[1, 2])
    dlocs = old_div(np.sqrt(dlocs), conf.n_classes)
    close = np.reshape(dlocs < (old_div(conf.gen_minlen, 2)), [-1])
    newlocs = copy.deepcopy(predlocs)
    newlocs[close, ...] = genFewMovedNegSamples(newlocs[close, ...],
                                                conf,
                                                nmove=3)
    return newlocs
Esempio n. 49
0
def compressed_array_summary(array, name, axes=[1, 2], extras=False):
    """print various summaries of arrays compressed along specified axes"""

    print "-" * 80
    print "array property summary for " + name + ":"
    array_summary(np.isnan(array), "nan", axes)
    array_summary(np.isinf(array), "inf", axes)
    array_summary((array == 0.), "zero", axes, meetall=True)
    array_summary((array < 0.), "negative", axes, identify_entries=False)

    if extras:
        sum_nu = np.apply_over_axes(np.sum, array, axes)
        min_nu = np.apply_over_axes(np.min, array, axes)
        max_nu = np.apply_over_axes(np.max, array, axes)
        print sum_nu.flatten()
        print min_nu.flatten()
        print max_nu.flatten()
    print ""
def getShortRunHighGrayLevelEmphasis(rlmatrix):
    I, J = calcuteIJ(rlmatrix)
    temp = apply_over_degree(np.multiply, rlmatrix, (I * I))
    print('-----------------------')
    numerator = np.apply_over_axes(np.sum,
                                   apply_over_degree(np.divide, temp, (J * J)),
                                   axes=(0, 1))[0, 0]
    S = calcuteS(rlmatrix)
    return numerator / S
Esempio n. 51
0
def poisson_ul(nc,mus,mub,data_axes=1):
    """Test statistic for discovery with known background."""


    # MLE for signal norm under signal hypothesis
    snorm = np.apply_over_axes(np.sum,nc-mub,data_axes)
    snorm[snorm<0] = 0

    x = np.linspace(-3,3,50)

    mutot = snorm*mus+mub

    deltas = 10**x#*np.sum(mub)

    smutot = deltas[np.newaxis,np.newaxis,:]*mus[...,np.newaxis] + mutot[...,np.newaxis]

    lnl = nc[...,np.newaxis]*np.log(smutot) - smutot

    lnl = np.sum(lnl,axis=data_axes)

    ul = np.zeros(lnl.shape[0])

    for i in range(lnl.shape[0]):
        
        dlnl = -2*(lnl[i]-lnl[i][0])

        deltas_root = find_root(deltas,dlnl,2.72)

        ul[i] = snorm[i][0] + deltas_root


        continue

        print i, snorm[i][0], deltas_root

        z0 = 2*np.sum(poisson_lnl(nc[i],mutot[i]))
        z1 = 2*np.sum(poisson_lnl(nc[i],mutot[i]+deltas_root*mus))

        print z0, z1, z1-z0

        continue

        ul = snorm[i][0] + find_root(deltas,dlnl,2.72)

        print '------------'

        z0 = 2*np.sum(poisson_lnl(nc[i],mutot[i]))
        z1 = 2*np.sum(poisson_lnl(nc[i],mub+ul*mus))

        print z0, z1, z1-z0

        print snorm[i][0], ul

#        plt.figure()
#        plt.plot(x,dlnl)
#        plt.show()
    return ul
Esempio n. 52
0
    def _road_feature_sample(init_seeds):
        if init_seeds is None or len(init_seeds) == 0:
            return None
        road_feature_samples = []
        for circle_seed in init_seeds:  # type: CircleSeedNp
            road_feature_samples.append(circle_seed.road_feature_vector)

        road_feature_samples = np.array(road_feature_samples)
        return np.apply_over_axes(np.mean, road_feature_samples, 0)[0]
Esempio n. 53
0
 def make_nd_histogram(hist_array):
     conv = 1e-40
     hist = np.asarray(hist_array, dtype=np.float32) + conv
     n_samples = np.shape(hist)[1]
     for i in range(n_samples):
         hist[:, :, 0] = conv
     return np.apply_over_axes(
         lambda x, y: np.apply_along_axis(lambda z: z - logsumexp_scipy(z),
                                          y, x), np.log(hist), 2)
Esempio n. 54
0
    def test_apply_over_axes(self, axes):
        def function(x, axis):
            return np.mean(np.square(x), axis)

        out = np.apply_over_axes(function, self.ma, axes)
        expected = self.ma
        for axis in (axes if isinstance(axes, tuple) else (axes, )):
            expected = (expected**2).mean(axis, keepdims=True)
        assert_array_equal(out.unmasked, expected.unmasked)
        assert_array_equal(out.mask, expected.mask)
Esempio n. 55
0
    def __call__(self, y_true, logits):
        self.y_true = y_true
        self.y_pred = np.apply_over_axes(self.__softmax, logits, axes=[0])

        batch_loss = -np.mean(self.y_true * np.log(self.y_pred + 1e-12))

        self.total_loss += batch_loss
        self.iters += 1

        return batch_loss
Esempio n. 56
0
    def asimov_ts0_signal(self,s,sum_lnl=True):
        """Compute the median discovery test statistic for a signal
        strength parameter s using the asimov method."""

        s = np.array(s,ndmin=1)[np.newaxis,...]

        mub = self._mub[:,np.newaxis]
        mus = self._mus[:,np.newaxis]

        wb = mub/np.apply_over_axes(np.sum,mub,self._data_axes)

        # model amplitude for signal counts in signal region under
        # signal hypothesis
        s1 = s*mus

        # nb of counts in signal region
        ns = mub + s1

        if self._alpha is None:        

            b0 = wb*np.apply_over_axes(np.sum,ns,self._data_axes)
            lnls1 = poisson_lnl(ns,ns)
            lnls0 = poisson_lnl(ns,b0)
            ts = 2*np.apply_over_axes(np.sum,(lnls1-lnls0),self._data_axes)

            return ts

        alpha  = self._alpha[:,np.newaxis]

        # nb of counts in control region
        nc = np.apply_over_axes(np.sum,mub/alpha,self._data_axes)

        # model amplitude for background counts in signal region under
        # null hypothesis
        b0 = wb*(nc+np.apply_over_axes(np.sum,ns,
                                       self._data_axes))*alpha/(1+alpha)

        lnl1 = OnOffExperiment.lnl_signal(ns,nc,s1,mub,alpha,
                                          self._data_axes,sum_lnl)
        lnl0 = OnOffExperiment.lnl_null(ns,nc,b0,alpha,
                                        self._data_axes,sum_lnl)

        return 2*(lnl1-lnl0)
Esempio n. 57
0
def Lp(errors, stepsizes, p):
    errors = np.absolute(errors)
    if p == float('infinity'):
        rerrors = np.max(errors)
    else:
        rerrors = np.power(
            np.squeeze(
                np.apply_over_axes(np.sum, np.power(errors, p),
                                   range(1, len(errors.shape)))), 1 / p)
    return rerrors
Esempio n. 58
0
 def _format_as_impl(self, is_numeric, batch, space):
     assert isinstance(space, SequenceSpace)
     if is_numeric:
         rval = np.apply_over_axes(
             lambda batch, axis: self.space._format_as_impl(
                 is_numeric=is_numeric, batch=batch, space=space.space),
             batch, 0)
     else:
         NotImplementedError("Can't convert SequenceSpace Theano variables")
     return rval
Esempio n. 59
0
def _collapse_keyence_rgb(path, img):
    # Compute image sum for each channel giving 3 item vector
    ch_sum = np.squeeze(np.apply_over_axes(np.sum, img, [0, 1]))
    if np.sum(ch_sum > 0) > 1:
        raise ValueError(
            'Found more than one channel with information in image file "{}"'.
            format(path))

    # Select and return the single channel with a non-zero sum
    return img[..., np.argmax(ch_sum)]
Esempio n. 60
0
def to_image(p_res, y_res, x_ff):
    cmap = cm.get_cmap('jet')
    p_res, y_res, x_ff = (t.to('cpu').numpy().transpose(0, 2, 3, 1)
                          for t in (p_res, y_res, x_ff))
    p_res /= np.apply_over_axes(np.max, p_res, [1, 2, 3]) + eps
    y_res /= np.apply_over_axes(np.max, y_res, [1, 2, 3]) + eps
    p_res = np.concatenate(list(p_res), axis=1)
    y_res = np.concatenate(list(y_res), axis=1)
    p_res_rgb = cmap(np.squeeze(p_res))[..., :3]
    y_res_rgb = cmap(np.squeeze(y_res))[..., :3]
    x_ff = np.concatenate(list(x_ff), axis=1)
    image = np.concatenate(
        (
            x_ff,
            0.5 * x_ff + 0.5 * p_res_rgb,
            0.5 * x_ff + 0.5 * y_res_rgb
        ), axis=0
    )
    return image