Example #1
0
 def __init__(self, dataset_size, batch_size, num_batches, rng=None):
     if rng is not None:
         raise ValueError("non-None rng argument not supported for "
                          "sequential batch iteration")
     assert num_batches is None or num_batches >= 0
     self._dataset_size = dataset_size
     if batch_size is None:
         if num_batches is not None:
             batch_size = int(np.ceil(self._dataset_size / num_batches))
         else:
             raise ValueError("need one of batch_size, num_batches "
                              "for sequential batch iteration")
     elif batch_size is not None:
         if num_batches is not None:
             max_num_batches = np.ceil(self._dataset_size / batch_size)
             if num_batches > max_num_batches:
                 raise ValueError("dataset of %d examples can only provide "
                                  "%d batches with batch_size %d, but %d "
                                  "batches were requested" %
                                  (self._dataset_size, max_num_batches,
                                   batch_size, num_batches))
         else:
             num_batches = np.ceil(self._dataset_size / batch_size)
     self._batch_size = batch_size
     self._num_batches = num_batches
     self._next_batch_no = 0
     self._idx = 0
     self._batch = 0
Example #2
0
def remlplen_ichige(fp, fs, dp, ds):
    """Determine the length of the low pass filter with passband frequency
    fp, stopband frequency fs, passband ripple dp, and stopband ripple ds.
    fp and fs must be normalized with respect to the sampling frequency.
    Note that the filter order is one less than the filter length.

    References
    ----------
    K. Ichige, M. Iwaki, and R. Ishii, Accurate Estimation of Minimum
    Filter Length for Optimum FIR Digital Filters, IEEE Transactions on
    Circuits and Systems, 47(10):1008-1017, October 2000.

    """
    
    dF = fs-fp
    v = lambda dF,dp:2.325*((-log10(dp))**-0.445)*dF**(-1.39)
    g = lambda fp,dF,d:(2.0/pi)*arctan(v(dF,dp)*(1.0/fp-1.0/(0.5-dF)))
    h = lambda fp,dF,c:(2.0/pi)*arctan((c/dF)*(1.0/fp-1.0/(0.5-dF)))
    Nc = ceil(1.0+(1.101/dF)*(-log10(2.0*dp))**1.1)
    Nm = (0.52/dF)*log10(dp/ds)*(-log10(dp))**0.17
    N3 = ceil(Nc*(g(fp,dF,dp)+g(0.5-dF-fp,dF,dp)+1.0)/3.0)
    DN = ceil(Nm*(h(fp,dF,1.1)-(h(0.5-dF-fp,dF,0.29)-1.0)/2.0))
    N4 = N3+DN

    return int(N4)
Example #3
0
def __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,
                     filter_scale, norm, sparsity, hop_length=None,
                     window='hann'):
    '''Generate the frequency domain constant-Q filter basis.'''

    basis, lengths = filters.constant_q(sr,
                                        fmin=fmin,
                                        n_bins=n_bins,
                                        bins_per_octave=bins_per_octave,
                                        tuning=tuning,
                                        filter_scale=filter_scale,
                                        norm=norm,
                                        pad_fft=True,
                                        window=window)

    # Filters are padded up to the nearest integral power of 2
    n_fft = basis.shape[1]

    if (hop_length is not None and
            n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):

        n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))

    # re-normalize bases with respect to the FFT window length
    basis *= lengths[:, np.newaxis] / float(n_fft)

    # FFT and retain only the non-negative frequencies
    fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]

    # sparsify the basis
    fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)

    return fft_basis, n_fft, lengths
Example #4
0
    def __init__(self, dataset_size, batch_size, num_batches=None, rng=None):
        self._rng = make_np_rng(rng, which_method=["random_integers",
                                                   "shuffle"])
        assert num_batches is None or num_batches >= 0
        self._dataset_size = dataset_size
        if batch_size is None:
            if num_batches is not None:
                batch_size = int(np.ceil(self._dataset_size / num_batches))
            else:
                raise ValueError("need one of batch_size, num_batches "
                                 "for sequential batch iteration")
        elif batch_size is not None:
            if num_batches is not None:
                max_num_batches = np.ceil(self._dataset_size / batch_size)
                if num_batches > max_num_batches:
                    raise ValueError("dataset of %d examples can only provide "
                                     "%d batches with batch_size %d, but %d "
                                     "batches were requested" %
                                     (self._dataset_size, max_num_batches,
                                      batch_size, num_batches))
            else:
                num_batches = np.ceil(self._dataset_size / batch_size)

        self._batch_size = batch_size
        self._num_batches = int(num_batches)
        self._next_batch_no = 0
        self._idx = 0
        self._batch_order = list(range(self._num_batches))
        self._rng.shuffle(self._batch_order)
Example #5
0
def _filter_ridge_lines(cwt, ridge_lines, window_size=None, min_length=None,
                       min_snr=1, noise_perc=10):
    """
    Filter ridge lines according to prescribed criteria. Intended
    to be used for finding relative maxima.

    Parameters
    -------------
    cwt : 2-D ndarray
        Continuous wavelet transform from which
        the ridge_lines were defined
    ridge_lines: 1-D sequence
        Each element should contain 2 sequences, the rows and columns
        of the ridge line (respectively)
    window_size: int, optional
        Size of window to use to calculate noise floor.
        Default is `cwt`.shape[1]/20
    min_length: int, optional
        Minimum length a ridge line needs to be acceptable.
        Default is `cwt`.shape[0]/4, ie 1/4th the number of widths.
    min_snr: float, optional
        Minimum SNR ratio. Default 0. The signal is the value of
        the cwt matrix at the shortest length scale (`cwt`[0,loc]), the noise is
        the `noise_perc`th percentile of datapoints contained within
        a window of `window_size` around `cwt`[0,loc]
    noise_perc: float,optional
        When calculating the noise floor, percentile of data points
        examined below which to consider noise. Calculated using
        scipy.stats.scoreatpercentile.

    References
    ----------
    Bioinformatics (2006) 22 (17): 2059-2065. doi: 10.1093/bioinformatics/btl355
    http://bioinformatics.oxfordjournals.org/content/22/17/2059.long

    """
    num_points = cwt.shape[1]
    if min_length is None:
        min_length = np.ceil(cwt.shape[0] / 4)
    if window_size is None:
        window_size = np.ceil(num_points / 20)
    hf_window = window_size / 2

    #Filter based on SNR
    row_one = cwt[0, :]
    noises = np.zeros_like(row_one)
    for ind, val in enumerate(row_one):
        window = np.arange(max([ind - hf_window, 0]), min([ind + hf_window, num_points]))
        window = window.astype(int)
        noises[ind] = scoreatpercentile(row_one[window], per=noise_perc)

    def filt_func(line):
        if len(line[0]) < min_length:
            return False
        snr = abs(cwt[line[0][0], line[1][0]] / noises[line[1][0]])
        if snr < min_snr:
            return False
        return True

    return filter(filt_func, ridge_lines)
    def _setup_paganin(self, height, width):
        micron = 10**(-6)
        keV = 1000.0
        distance = self.parameters['Distance']
        energy = self.parameters['Energy'] * keV
        resolution = self.parameters['Resolution'] * micron
        wavelength = (1240.0 / energy) * 10.0**(-9)
        ratio = self.parameters['Ratio']

        height1 = height + 2 * self.parameters['Padtopbottom']
        width1 = width + 2 * self.parameters['Padleftright']
        centery = np.ceil(height1 / 2.0) - 1.0
        centerx = np.ceil(width1 / 2.0) - 1.0

        # Define the paganin filter
        dpx = 1.0 / (width1 * resolution)
        dpy = 1.0 / (height1 * resolution)
        pxlist = (np.arange(width1) - centerx) * dpx
        pylist = (np.arange(height1) - centery) * dpy
        pxx = np.zeros((height1, width1), dtype=np.float32)
        pxx[:, 0:width1] = pxlist
        pyy = np.zeros((height1, width1), dtype=np.float32)
        pyy[0:height1, :] = np.reshape(pylist, (height1, 1))
        pd = (pxx * pxx + pyy * pyy) * wavelength * distance * math.pi

        filter1 = 1.0 + ratio * pd
        self.filtercomplex = filter1 + filter1 * 1j
Example #7
0
def max_lm(baselines, wavelengths, uwidth, vwidth=0.0):
    """Get the maximum (l,m) that a baseline is sensitive to.

    Parameters
    ----------
    baselines : np.ndarray
        An array of baselines.
    wavelengths : np.ndarray
        An array of wavelengths.
    uwidth : np.ndarray
        Width of the receiver in the u-direction.
    vwidth : np.ndarray
        Width of the receiver in the v-direction.

    Returns
    -------
    lmax, mmax : array_like
    """

    umax = (np.abs(baselines[:, 0]) + uwidth) / wavelengths
    vmax = (np.abs(baselines[:, 1]) + vwidth) / wavelengths

    mmax = np.ceil(2 * np.pi * umax).astype(np.int64)
    lmax = np.ceil((mmax**2 + (2*np.pi*vmax)**2)**0.5).astype(np.int64)

    return lmax, mmax
Example #8
0
 def spectrum(self, shape, surface_point, bound):
     """Returns the counts histogram (bins,counts) for """
     
     wavelengths = []
     key = shape.surface_identifier(surface_point)
     if not self.store.has_key(key):
         return None
     
     entries = self.store[key]
     if len(entries) == 0:
         return None
     
     for entry in entries:
         if entry[2] == bound:
             wavelengths.append(float(entry[1]))
     
     if len(wavelengths) is 0:
         return None
     
     wavelengths = np.array(wavelengths)
     min = wavelengths.min()
     max = wavelengths.max()
     
     if len(wavelengths) is 1:
         bins = np.arange(np.floor( wavelengths[0] - 1), np.ceil(wavelengths[0] + 2))
         freq, bins  = np.histogram(wavelengths, bins=bins)
     else:
         bins = np.arange(np.floor( wavelengths.min()-1), np.ceil(wavelengths.max()+2))
         freq, bins  = np.histogram(wavelengths, bins=bins)
     return Spectrum(bins[0:-1], freq)
Example #9
0
 def affine_grid(self,Hz,rhoz,Lam):
     """
     Get data on regular spatial grid
     """
     #First find dimensionless density params
     Om0 = 8*pi*rhoz[0]/(3*Hz[0]**2)
     OL0 = Lam/(3*Hz[0]**2)
     Ok0 = 1-Om0-OL0
     #Get t0
     t0 = self.get_age(Om0,Ok0,OL0,Hz[0])
     #Set affine parameter vals        
     dvo = uvs(self.z,1/(self.uz**2*Hz),k=3,s=0.0)
     vzo = dvo.antiderivative()
     vz = vzo(self.z)
     vz[0] = 0.0
     #Compute grid sizes that gives num error od err
     NJ = int(ceil(vz[-1]/sqrt(self.err) + 1))
     NI = int(ceil(3.0*(NJ - 1)*(t0 - self.tmin)/vz[-1] + 1))
     #Get functions on regular grid
     v = linspace(0,vz[-1],NJ)
     delv = (v[-1] - v[0])/(NJ-1)
     if delv > sqrt(self.err):
         print 'delv > sqrt(err)'
     Ho = uvs(vz,Hz,s=0.0,k=3)
     H = Ho(v)
     rhoo = uvs(vz,rhoz,s=0.0,k=3)
     rho = rhoo(v)
     uo = uvs(vz,self.uz,s=0.0,k=3)
     u = uo(v)
     u[0] = 1.0
     return v,vzo,H,rho,u,NJ,NI,delv,Om0,OL0,Ok0,t0
Example #10
0
def _search_fine(sino, srad, step, init_cen, ratio, drop):
    """
    Fine search for finding the rotation center.
    """
    Nrow, Ncol = sino.shape
    centerfliplr = (Ncol + 1.0) / 2.0 - 1.0

    # Use to shift the sinogram 2 to the raw CoR.
    shiftsino = np.int16(2 * (init_cen - centerfliplr))
    _copy_sino = np.roll(np.fliplr(sino[1:]), shiftsino, axis=1)
    lefttake = 0
    righttake = Ncol - 1
    if init_cen <= centerfliplr:
        lefttake = np.ceil(srad + 1)
        righttake = np.floor(2 * init_cen - srad - 1)
    else:
        lefttake = np.ceil(
            init_cen - (Ncol - 1 - init_cen) + srad + 1)
        righttake = np.floor(Ncol - 1 - srad - 1)
    Ncol1 = righttake - lefttake + 1
    mask = _create_mask(2 * Nrow - 1, Ncol1, 0.5 * ratio * Ncol, drop)
    numshift = np.int16((2 * srad + 1.0) / step)
    listshift = np.linspace(-srad, srad, num=numshift)
    listmetric = np.zeros(len(listshift), dtype='float32')
    num1 = 0
    for i in listshift:
        _sino = ndimage.interpolation.shift(
            _copy_sino, (0, i), prefilter=False)
        sinojoin = np.vstack((sino, _sino))
        listmetric[num1] = np.sum(np.abs(np.fft.fftshift(
            pyfftw.interfaces.numpy_fft.fft2(
                sinojoin[:, lefttake:righttake + 1]))) * mask)
        num1 = num1 + 1
    minpos = np.argmin(listmetric)
    return init_cen + listshift[minpos] / 2.0
Example #11
0
def setupFakePulsar(nodes=range(1, 9), fpgaclk=360e6, frqs=cfs, sideband=-1):
    n = np.arange(8)
    clk = fpgaclk
    if frqs is None:
        frqs = (
            18e9
            - (np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0)
            + ((clk * 2) * (2 * n + 1))
            - ((np.ceil(150e6 / (clk * 4 / 1024.0)) * clk * 4 / 1024.0) * n)
        )
    frqd = dict(zip(n + 1, frqs))
    esr = fpgaclk * 8  # effective sample rate

    pfb_rate = sideband * esr / (2 * 1024.0)
    for node in nodes:
        vsd[node].setParams(
            EFSAMPFR=esr,
            NCHAN=1024,
            EXPOSURE=1e-6,
            SUB0FREQ=frqd[node],
            OBSFREQ=frqd[node],
            CHAN_BW=pfb_rate,
            FPGACLK=fpgaclk,
        )  # exposure should be ~0 to get every single spectrum

    pass
def interp(pic,flow):
    ys=np.arange(pic.shape[0]*pic.shape[1])/pic.shape[1]
    ud=(flow[:,:,0].reshape(-1)+ys)%pic.shape[0]
    xs=np.arange(pic.shape[0]*pic.shape[1])%pic.shape[1]
    lr=(flow[:,:,1].reshape(-1)+xs)%pic.shape[1]

    u=np.int32(np.floor(ud))
    d=np.int32(np.ceil(ud))%pic.shape[0]
    udiffs=ud-u
    udiffs=np.dstack((udiffs,udiffs,udiffs))
    l=np.int32(np.floor(lr))
    r=np.int32(np.ceil(lr))%pic.shape[1]
    ldiffs=lr-l
    ldiffs=np.dstack((ldiffs,ldiffs,ldiffs))

    ul=pic[u,l,:]
    ur=pic[u,r,:]
    dl=pic[d,l,:]
    dr=pic[d,r,:]


    udl=ul*(1-udiffs)+dl*udiffs
    udr=ur*(1-udiffs)+dr*udiffs
    ans=np.zeros(pic.shape)
    ans[ys,xs,:]=udl*(1-ldiffs)+udr*ldiffs
    return ans
Example #13
0
def qwtCanvasClip(canvas, canvasRect):
    x1 = np.ceil(canvasRect.left())
    x2 = np.floor(canvasRect.right())
    y1 = np.ceil(canvasRect.top())
    y2 = np.floor(canvasRect.bottom())
    r = QRect(x1, y1, x2-x1-1, y2-y1-1)
    return canvas.borderPath(r)
Example #14
0
def plot_rand(txyxidata, b,X, outfile):
	""" Plot stochastic forces and response of x """
	
	me = "LE_Plot.plot_rand: "
	if os.path.isfile(outfile): return me+"skip"
	t0 = time.time()
	showplot = False
	
	t, x, eta, xi = txyxidata
	del txyxidata
	tmax = np.ceil(t.max())
	
	## Plot walk
	fs = 25
	winsize = int(tmax/80)
	fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
	fig.suptitle(outfile)#+"\n"+str(argv)[1:-1])
	envelope_plot(t, xi, winsize, ax=ax1)
	ax1.set_ylabel("$\\xi$",fontsize=fs)
	envelope_plot(t, eta, winsize, ax=ax2)
	ax2.set_ylabel("$\eta$",fontsize=fs)
	envelope_plot(t, x, winsize, ax=ax3)
	ax3.plot([0,t.max()],[X,X],"k--"); ax3.plot([0,t.max()],[-X,-X],"k--")
	ax3.set_xlabel("$t$",fontsize=fs);ax3.set_ylabel("$x$",fontsize=fs)
	etalim = np.ceil(abs(eta).max())	## Not perfect
	#fig.tight_layout()
	plt.savefig(outfile)
	print me+"Plot saved as",outfile
	print me+"Plotting random data:",round(time.time()-t0,1),"seconds"
	if showplot:		plt.show()	
	
	plt.close(fig)	
	return
Example #15
0
    def allowable_ref_dividers(self, f_ref, require_integer_n, require_fractional_n):
        """
        given a reference frequency and whether an integer-N solution is needed,
        return a qualified list of reference dividers that are within the limits
        of the PLL
        """
        # first, we establish a minimum and maximum reference divider modulus....
        # if we are forcing an integer-N solution, use those phase-detector frequency limits

        int_n_nref_max = (np.floor(f_ref / self.f_pfd_limits_integer_n[0])).astype(int)
        int_n_nref_min = (np.ceil(f_ref / self.f_pfd_limits_integer_n[1])).astype(int)

        frac_n_nref_max = (np.floor(f_ref / self.f_pfd_limits_fractional_n[0])).astype(int)
        frac_n_nref_min = (np.ceil(f_ref / self.f_pfd_limits_fractional_n[1])).astype(int)

        if require_integer_n:
            ref_divider_min = int_n_nref_min
            ref_divider_max = int_n_nref_max
        elif require_fractional_n:
            ref_divider_min = frac_n_nref_min
            ref_divider_max = frac_n_nref_max
        else:
            ref_divider_min = min(int_n_nref_min, frac_n_nref_min)
            ref_divider_max = max(int_n_nref_max, frac_n_nref_max)

        n_ref_allowed, n_ref_digital_codes = zip(*self.n_ref_data)

        # now, make a list of all the divider moduli in that range from min to max
        # making sure that the PLL can do each one
        allowable_divs = []
        for n_ref_modulus in range(ref_divider_min, ref_divider_max+1):
            if n_ref_modulus in n_ref_allowed:
                allowable_divs.append(n_ref_modulus)
        return allowable_divs
Example #16
0
def read_power(file, datadir='data/'):
    """ 
    29-apr-2009/dintrans: coded
    t,dat=read_power(name_power_file)
    Read a power spectra file like 'data/poweru.dat'
    """ 
    filename = path.join(datadir, file)
    infile = open(filename, 'r')
    lines = infile.readlines()
    infile.close()
#
#  find the number of blocks (t,power) that should be read
#
    dim=read_dim(datadir=datadir)
    nblock=int(len(lines)/int(N.ceil(dim.nxgrid/2/8.)+1))
#
    with open(filename, 'r') as infile:
        t=N.zeros(1, dtype='Float32')
        data=N.zeros(1, dtype='Float32')
        for i in range(nblock):
            st=infile.readline()
            t=N.append(t, float(st))
            for ii in range(int(N.ceil(dim.nxgrid/2/8.))):
                st=infile.readline()
                data=N.append(data, N.asarray(st.split()).astype('f'))

    t=t[1:] ; data=data[1:]
    nt=len(t) ; nk=int(len(data)/nt)
    data=data.reshape(nt, nk)
    return t, data
Example #17
0
    def _scale_to_res(self):
        """Change self._A and _extent to render an image whose
        resolution is matched to the eventual rendering."""

        ax = self.axes
        ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
        xlim, ylim = ax.get_xlim(), ax.get_ylim()
        dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]

        y0 = max(self.miny, ylim[0] - 5)
        y1 = min(self._full_res.shape[0] + self.miny, ylim[1] + 5)
        x0 = max(self.minx, xlim[0] - 5)
        x1 = min(self._full_res.shape[1] + self.minx, xlim[1] + 5)
        y0, y1, x0, x1 = map(int, [y0, y1, x0, x1])

        sy = int(max(1, min((y1 - y0) / 5., np.ceil(dy / ext[1]))))
        sx = int(max(1, min((x1 - x0) / 5., np.ceil(dx / ext[0]))))

        # have we already calculated what we need?
        if sx == self._sx and sy == self._sy and \
                x0 == self._bounds[0] and x1 == self._bounds[1] and \
                y0 == self._bounds[2] and y1 == self._bounds[3]:
            return

        self._A = self._full_res[y0 - self.miny:y1 - self.miny:sy,
                                 x0 - self.minx:x1 - self.minx:sx]

        x1 = x0 + self._A.shape[1] * sx
        y1 = y0 + self._A.shape[0] * sy

        self.set_extent([x0 - .5, x1 - .5, y0 - .5, y1 - .5])
        self._sx = sx
        self._sy = sy
        self._bounds = (x0, x1, y0, y1)
        self.changed()
Example #18
0
def plot_scatter_with_histograms(xvals, yvals, colour='k', oneToOneLine=True, xlabel=None, ylabel=None, title=None):
    gs = gridspec.GridSpec(5, 5)
    xmin = np.floor(min(xvals))
    xmax = np.ceil(max(xvals))
    ymin = np.floor(min(yvals))
    ymax = np.ceil(max(yvals))
    plt.subplot(gs[1:, 0:4])
    plt.plot(xvals, yvals, 'o', color=colour)
    if xlabel is not None:
        plt.xlabel(xlabel)
    if ylabel is not None:
        plt.ylabel(ylabel)
    if oneToOneLine:
        oneToOneMax = max([max(xvals),max(yvals)])
        plt.plot([0,oneToOneMax],[0,oneToOneMax],'b--')
    plt.xlim(xmin,xmax)
    plt.ylim(ymin,ymax)
    plt.subplot(gs[0, 0:4])
    plt.hist(xvals, np.linspace(xmin,xmax,50))
    plt.axis('off')
    plt.subplot(gs[1:,4])
    plt.hist(yvals, np.linspace(ymin,ymax,50), orientation='horizontal')
    plt.axis('off')
    if title is not None:
        plt.suptitle(title)
Example #19
0
def extract_matched_slices(ax, shape):
    """Determine the slice parameters to use, matched to the screen.

    :param ax: Axes object to query. It's extent and pixel size
               determine the slice parameters

    :param shape: Tuple of the full image shape to slice into. Upper
               boundaries for slices will be cropped to fit within
               this shape.

    :rtype: tulpe of x0, x1, sx, y0, y1, sy

    Indexing the full resolution array as array[y0:y1:sy, x0:x1:sx] returns
    a view well-matched to the axes' resolution and extent
    """
    ext = ax.transAxes.transform([1, 1]) - ax.transAxes.transform([0, 0])
    xlim, ylim = ax.get_xlim(), ax.get_ylim()
    dx, dy = xlim[1] - xlim[0], ylim[1] - ylim[0]

    y0 = int(max(0, ylim[0] - 5))
    y1 = int(min(shape[0], ylim[1] + 5))
    x0 = int(max(0, xlim[0] - 5))
    x1 = int(min(shape[1], xlim[1] + 5))

    sy = int(max(1, min((y1 - y0) / 5., np.ceil(dy / ext[1]))))
    sx = int(max(1, min((x1 - x0) / 5., np.ceil(dx / ext[0]))))

    return x0, x1, sx, y0, y1, sy
Example #20
0
def get_spectral_magnitude(y_data,time_data, fs):
    tStep = np.max(time_data)/len(time_data)
    timeV = np.arange(0, np.max(time_data), tStep)
    numsamp = 512 #timeDomainVectorLength(timeV)
    if (len(y_data) < numsamp):
        y_data = np.resize(y_data, (numsamp,))
    window  = hann(numsamp)
    ## setup the fft spectrum arrays
    mag_spectrum = np.zeros([numsamp,int(np.ceil(float(len(timeV))/numsamp))])
    #print 'time.len= %d, numsamp=%d, loop:%d' % (len(timeV), numsamp, int(np.ceil(float(len(timeV))/numsamp)))
    for k in range(0,int(np.ceil(float(len(timeV))/numsamp))):
        slice_dat    = y_data[k*numsamp:numsamp*(k+1)]
        
        if (len(slice_dat) < numsamp):
            if (len(slice_dat) < numsamp/2): # WE DISCARDS LAST SLICE POINTS IF < NUMSAMP/2
                break;
            slice_dat = np.resize(slice_dat,(numsamp,))
        #multiply it with the window and transform it into frequency domain
        spectrum_dat = fft(slice_dat*window);
        #get the spectrum mag @ each of the 256 frequency points and store it
        #print 'k:',k,' spectrum_dat.len:',len(spectrum_dat)
        mag_spectrum[:,k]= 20 * np.log10(abs(spectrum_dat))
        mag_spectrum[:,k]= abs(spectrum_dat)
    #print "fs= %.4g, NFFT= %ld, y_data.shape= %d, mag_spectrum= %dx%d" % (fs, numsamp,np.shape(y_data)[0], np.shape(mag_spectrum)[0],np.shape(mag_spectrum)[1])
    ## DOUBLE CHECK  THE SIZE OF THE MATRIX
    avg_fft_foreach = np.mean(mag_spectrum, axis=1)
    #    print "np.shape(avg_fft_foreach):", np.shape(avg_fft_foreach)
    return avg_fft_foreach
Example #21
0
    def fetch_raster(self, projection, extent, target_resolution):
        """
        Fetch SRTM elevation for the given projection and approximate extent.

        """
        if not self.validate_projection(projection):
            raise ValueError(
                'Unsupported projection for the SRTM{} source.'.format(
                    self._resolution))

        min_x, max_x, min_y, max_y = extent
        min_x, min_y = np.floor([min_x, min_y])
        nx = int(np.ceil(max_x) - min_x)
        ny = int(np.ceil(max_y) - min_y)
        skip = False
        if nx > self._max_tiles[0]:
            warnings.warn(
                'Required SRTM{} tile count ({}) exceeds maximum ({}). '
                'Increase max_nx limit.'.format(self._resolution, nx,
                                                self._max_tiles[0]))
            skip = True
        if ny > self._max_tiles[1]:
            warnings.warn(
                'Required SRTM{} tile count ({}) exceeds maximum ({}). '
                'Increase max_ny limit.'.format(self._resolution, ny,
                                                self._max_tiles[1]))
            skip = True
        if skip:
            return []
        else:
            img, _, extent = self.combined(min_x, min_y, nx, ny)
            return [LocatedImage(np.flipud(img), extent)]
Example #22
0
def check_orbits(p1, t1, p2, t2, tmn, tmx, tol):
    n1 = t1 + p1 * np.arange(np.floor((tmn-t1)/p1), np.ceil((tmx-t1)/p1))
    n1 = n1[(tmn <= n1) * (n1 <= tmx)]
    n2 = t2 + p2 * np.arange(np.floor((tmn-t2)/p2), np.ceil((tmx-t2)/p2))
    n2 = n2[(tmn <= n2) * (n2 <= tmx)]
    delta = np.fabs(n1[:, None] - n2[None, :])
    return max(len(n1), len(n2)) == np.sum(delta < tol)
Example #23
0
def each_SASA(sasas,sort_keys,kcat_cut=30,plot=True,meta=None):
    num_sims=len(sort_keys)
    labels=label_maker(sasas,kcat_cut=kcat_cut,name_list=sort_keys)
    base_size = 20.
    wide_factor = 1.5
    color_dict={True:'r', False:'g', 'maybe':'b', 'wt':'m'}
    ncols = int(np.ceil(np.sqrt(num_sims)))
    nrows = int(np.ceil(float(num_sims)/ncols))
    fig = plt.figure(figsize=(base_size,base_size*(float(nrows)/ncols)/wide_factor))
    gs = gridspec.GridSpec(nrows,ncols,hspace=0.65,wspace=0.8)
    axes = [plt.subplot(gs[plot_num/ncols,plot_num%ncols]) for plot_num in range(num_sims)]
    max_SASA=0;ts_scaling=0.02
    for plot_num,ax in enumerate(axes):
        SASA=sasas[sort_keys[plot_num]]
        ts_sasa=np.sum([SASA['base_sasa'][res]['sasa_vals'] for res in SASA['base_sasa']],axis=0)
        name=SASA['name'];activity=labels[plot_num]
        ts = np.array(range(len(ts_sasa)))*ts_scaling
        ax.plot(ts,ts_sasa,color=color_dict[activity])
        ax.set_title(name)
        ax.tick_params(axis='y',which='both',left='off',right='off',labelleft='on')
        ax.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='on')
        max_SASA=max(max_SASA,max(ts_sasa))

    min_SASA=0
    if meta:
        meta['kcat cut']=kcat_cut
        meta['max sasa']=max_sasa
        meta['ts scaling']=ts_scaling
    else: meta={'kcat cut':kcat_cut,'max sasa':max_SASA,'ts scaling':ts_scaling}
    for plot_num,ax in enumerate(axes):
        ax.set_ylim(min_SASA,max_SASA)
    if plot:
        plt.show(block=False)
    else: picturesave('fig.each-%s'%plotname,work.plotdir,backup=False,version=True,meta=meta)
Example #24
0
    def init_log_binned_fx_buckets(self):
        # initializes the refex_log_binned_buckets with the vertical log bin values,
        # computed based on p and the number of vertices in the graph

        max_fx_value = np.ceil(np.log2(self.no_of_vertices) + self.TOLERANCE)  # fixing value of p = 0.5,
        # In our experiments, we found p = 0.5 to be a sensible choice:
        # with each bin containing the bottom half of the remaining nodes.
        log_binned_fx_keys = [value for value in xrange(0, int(max_fx_value))]

        fx_bucket_size = []
        starting_bucket_size = self.no_of_vertices

        for idx in np.arange(0.0, max_fx_value):
            starting_bucket_size *= self.p
            fx_bucket_size.append(int(np.ceil(starting_bucket_size)))

        total_slots_in_all_buckets = sum(fx_bucket_size)
        if total_slots_in_all_buckets > self.no_of_vertices:
            fx_bucket_size[0] -= (total_slots_in_all_buckets - self.no_of_vertices)

        log_binned_buckets_dict = dict(zip(log_binned_fx_keys, fx_bucket_size))

        for binned_value in sorted(log_binned_buckets_dict.keys()):
            for count in xrange(0, log_binned_buckets_dict[binned_value]):
                self.refex_log_binned_buckets.append(binned_value)

        if len(self.refex_log_binned_buckets) != self.no_of_vertices:
            raise Exception("Vertical binned bucket size not equal to the number of vertices!")
Example #25
0
    def _drawGraticules(self,m,gd):
        par = np.arange(np.ceil(gd.ymin),np.floor(gd.ymax)+1,1.0)
        mer = np.arange(np.ceil(gd.xmin),np.floor(gd.xmax)+1,1.0)
        merdict = m.drawmeridians(mer,labels=[0,0,0,1],fontsize=10,
                                  linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)
        pardict = m.drawparallels(par,labels=[1,0,0,0],fontsize=10,
                                  linewidth=0.5,color='gray',zorder=GRATICULE_ZORDER)

        #loop over meridian and parallel dicts, change/increase font, draw ticks
        xticks = []
        for merkey,mervalue in merdict.items():
            merline,merlablist = mervalue
            merlabel = merlablist[0]
            merlabel.set_family('sans-serif')
            merlabel.set_fontsize(12.0)
            xticks.append(merline[0].get_xdata()[0])

        yticks = []
        for parkey,parvalue in pardict.items():
            parline,parlablist = parvalue
            parlabel = parlablist[0]
            parlabel.set_family('sans-serif')
            parlabel.set_fontsize(12.0)
            yticks.append(parline[0].get_ydata()[0])

        #plt.tick_params(axis='both',color='k',direction='in')
        plt.xticks(xticks,())
        plt.yticks(yticks,())
        m.ax.tick_params(direction='out')
Example #26
0
def create_mask(Nx,Ny,frac,
                rmin = 0.5,
                rmax = 2):
    """
    create a mask Nx by Ny pixels
    frac: 0 <= frac <= 1: fraction of pixels to be covered
    """
    mask = numpy.ones((Nx,Ny))

    ncovered = 0
    goal = frac*Nx*Ny

    while ncovered < goal:
        x = Nx*numpy.random.random()
        y = Ny*numpy.random.random()
        r = rmin + numpy.random.random()*(rmax-rmin)
        
        xmin = max(0,int(numpy.floor(x-r)))
        xmax = min(Nx,int(numpy.ceil(x+r)))
        ymin = max(0,int(numpy.floor(y-r)))
        ymax = min(Ny,int(numpy.ceil(y+r)))

        for ix in range(xmin,xmax):
            for iy in range(ymin,ymax):
                if (x-ix)**2 + (y-iy)**2 < r**2:
                    ncovered += mask[ix,iy]
                    mask[ix,iy] = 0
    
    return mask
Example #27
0
def sample_size_necessary_under_cph(power, ratio_of_participants, p_exp, p_con,
                                    postulated_hazard_ratio, alpha=0.05):
    """
    This computes the sample size for needed power to compare two groups under a Cox
    Proportional Hazard model.

    References:
        https://cran.r-project.org/web/packages/powerSurvEpi/powerSurvEpi.pdf

    Parameters:
        power: power to detect the magnitude of the hazard ratio as small as that specified by postulated_hazard_ratio.
        ratio_of_participants: ratio of participants in experimental group over control group.
        p_exp: probability of failure in experimental group over period of study.
        p_con: probability of failure in control group over period of study
        postulated_hazard_ratio: the postulated hazard ratio
        alpha: type I error rate

    Returns:
        n_exp, n_con: the samples sizes need for the experiment and control group, respectively, to achieve desired power
    """
    z = lambda p: stats.norm.ppf(p)

    m = 1.0 / ratio_of_participants \
        * ((ratio_of_participants * postulated_hazard_ratio + 1.0) / (postulated_hazard_ratio - 1.0)) ** 2 \
        * (z(1. - alpha / 2.) + z(power)) ** 2

    n_exp = m * ratio_of_participants / (ratio_of_participants * p_exp + p_con)
    n_con = m / (ratio_of_participants * p_exp + p_con)

    return int(np.ceil(n_exp)), int(np.ceil(n_con))
Example #28
0
    def return_unit_round_neighborhood(self, row, col, radius):
        """Return a list with (row, col, distance) of the units around a unit. This version uses a circle as radius, all the element inside the radius are taken as neighborood.

        @param row index of the unit
        @param col the column index of the unit
        @param radius the radius of the distance to consider
        """
        output_list = list()
        if(radius <= 0): output_list.append((row, col, 0)); return output_list #return empty if radius=0

        #Finding the square around the unit
        #with wide=radius using the ceil of radius
        row_range_min = row - int(np.ceil(radius))
        if(row_range_min < 0): row_range_min = 0
        row_range_max = row + int(np.ceil(radius))
        if(row_range_max >= self._matrix_size): row_range_max = self._matrix_size - 1
        col_range_min = col - int(np.ceil(radius))
        if(col_range_min < 0): col_range_min = 0
        col_range_max = col + int(np.ceil(radius))
        if(col_range_max >= self._matrix_size): col_range_max = self._matrix_size - 1

        for row_iter in range(row_range_min, row_range_max+1):
            for col_iter in range(col_range_min, col_range_max+1):
                #Finding the distances from the BMU
                col_distance = np.abs(col - col_iter)
                row_distance = np.abs(row - row_iter)
                #Pitagora's Theorem to estimate distance
                distance = np.sqrt( np.power(col_distance,2) + np.power(row_distance,2) )
                #Store the unit only if the distance is
                #less than the radius
                if(distance <= radius): output_list.append((row_iter, col_iter, distance))

        return output_list
Example #29
0
def dispims_color(M, border=0, bordercolor=[0.0, 0.0, 0.0], savePath=None, *imshow_args, **imshow_keyargs):
    """ Display an array of rgb images. 

    The input array is assumed to have the shape numimages x numpixelsY x numpixelsX x 3
    """
    bordercolor = numpy.array(bordercolor)[None, None, :]
    numimages = len(M)
    M = M.copy()
    for i in range(M.shape[0]):
        M[i] -= M[i].flatten().min()
        M[i] /= M[i].flatten().max()
    height, width, three = M[0].shape
    assert three == 3
    
    n0 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
    n1 = numpy.int(numpy.ceil(numpy.sqrt(numimages)))
    im = numpy.array(bordercolor)*numpy.ones(
                             ((height+border)*n1+border,(width+border)*n0+border, 1),dtype='<f8')
    for i in range(n0):
        for j in range(n1):
            if i*n1+j < numimages:
                im[j*(height+border)+border:(j+1)*(height+border)+border,
                   i*(width+border)+border:(i+1)*(width+border)+border,:] = numpy.concatenate((
                  numpy.concatenate((M[i*n1+j,:,:,:],
                         bordercolor*numpy.ones((height,border,3),dtype=float)), 1),
                  bordercolor*numpy.ones((border,width+border,3),dtype=float)
                  ), 0)
    imshow_keyargs["interpolation"]="nearest"
    pylab.imshow(im, *imshow_args, **imshow_keyargs)
    
    if savePath == None:
        pylab.show()
    else:
        pylab.savefig(savePath)
Example #30
0
def calc_slit_box_aps_1id(slit_box_corners, inclip=(1, 10, 1, 10)):
    """
    Calculate the clip box based on given slip corners.

    Parameters
    ----------
    slit_box_corners : np.ndarray
        Four corners of the slit box as a 4x2 matrix
    inclip : tuple, optional
        Extra inclipping to avoid clipping artifacts

    Returns
    -------
    Tuple:
        Cliping indices as a tuple of four
        (clipFromTop, clipToBottom, clipFromLeft, clipToRight)

    """
    return (
        np.floor(slit_box_corners[:, 0].min()).astype(
            int) + inclip[0],  # clip top    row
        np.ceil(slit_box_corners[:, 0].max()).astype(
            int) - inclip[1],  # clip bottom row
        np.floor(slit_box_corners[:, 1].min()).astype(
            int) + inclip[2],  # clip left   col
        np.ceil(slit_box_corners[:, 1].max()).astype(
            int) - inclip[3],  # clip right  col
    )
Example #31
0
def test_stumpi_self_join_egress():
    m = 3
    zone = int(np.ceil(m / 4))

    seed = np.random.randint(100000)
    np.random.seed(seed)
    n = 30
    T = np.random.rand(n)

    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P, comp_P)
    npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P, comp_left_P)
    npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)

    np.random.seed(seed)
    T = np.random.rand(n)
    T = pd.Series(T)

    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P, comp_P)
    npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P, comp_left_P)
    npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)
Example #32
0
    def evaluate(self, v):
        """
        Evaluate coefficients in standard 3D coordinate basis from those in 3D FB basis

        :param v: A coefficient vector (or an array of coefficient vectors) in FB basis
            to be evaluated. The first dimension must equal `self.count`.
        :return x: The evaluation of the coefficient vector(s) `x` in standard 3D
            coordinate basis. This is an array whose first three dimensions equal
            `self.sz` and the remaining dimensions correspond to dimensions two and
            higher of `v`.
        """
        # make should the first dimension of v is self.count
        v, sz_roll = unroll_dim(v, 2)
        v = m_reshape(v, (self.count, -1))

        # get information on polar grids from precomputed data
        n_theta = np.size(self._precomp['ang_theta_wtd'], 0)
        n_phi = np.size(self._precomp['ang_phi_wtd_even'][0], 0)
        n_r = np.size(self._precomp['radial_wtd'], 0)

        # number of 3D image samples
        n_data = np.size(v, 1)

        u_even = np.zeros((n_r, int(2 * self.ell_max + 1), n_data,
                           int(np.floor(self.ell_max / 2) + 1)),
                          dtype=v.dtype)
        u_odd = np.zeros((n_r, int(2 * self.ell_max + 1), n_data,
                          int(np.ceil(self.ell_max / 2))),
                         dtype=v.dtype)

        # go through each basis function and find corresponding coefficient
        # evaluate the radial parts
        for ell in range(0, self.ell_max + 1):
            k_max_ell = self.k_max[ell]
            radial_wtd = self._precomp['radial_wtd'][:, 0:k_max_ell, ell]

            ind = self._indices['ells'] == ell

            v_ell = m_reshape(v[ind, :], (k_max_ell, (2 * ell + 1) * n_data))
            v_ell = radial_wtd @ v_ell
            v_ell = m_reshape(v_ell, (n_r, 2 * ell + 1, n_data))

            if np.mod(ell, 2) == 0:
                u_even[:,
                       int(self.ell_max - ell):int(self.ell_max + ell + 1), :,
                       int(ell / 2)] = v_ell
            else:
                u_odd[:,
                      int(self.ell_max - ell):int(self.ell_max + ell + 1), :,
                      int((ell - 1) / 2)] = v_ell

        u_even = np.transpose(u_even, (3, 0, 1, 2))
        u_odd = np.transpose(u_odd, (3, 0, 1, 2))
        w_even = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1),
                          dtype=v.dtype)
        w_odd = np.zeros((n_phi, n_r, n_data, 2 * self.ell_max + 1),
                         dtype=v.dtype)

        # evaluate the phi parts
        for m in range(0, self.ell_max + 1):
            ang_phi_wtd_m_even = self._precomp['ang_phi_wtd_even'][m]
            ang_phi_wtd_m_odd = self._precomp['ang_phi_wtd_odd'][m]

            n_even_ell = np.size(ang_phi_wtd_m_even, 1)
            n_odd_ell = np.size(ang_phi_wtd_m_odd, 1)

            if m == 0:
                sgns = (1, )
            else:
                sgns = (1, -1)

            for sgn in sgns:

                end = np.size(u_even, 0)
                u_m_even = u_even[end - n_even_ell:end, :,
                                  self.ell_max + sgn * m, :]
                end = np.size(u_odd, 0)
                u_m_odd = u_odd[end - n_odd_ell:end, :,
                                self.ell_max + sgn * m, :]

                u_m_even = m_reshape(u_m_even, (n_even_ell, n_r * n_data))
                u_m_odd = m_reshape(u_m_odd, (n_odd_ell, n_r * n_data))

                w_m_even = ang_phi_wtd_m_even @ u_m_even
                w_m_odd = ang_phi_wtd_m_odd @ u_m_odd

                w_m_even = m_reshape(w_m_even, (n_phi, n_r, n_data))
                w_m_odd = m_reshape(w_m_odd, (n_phi, n_r, n_data))

                w_even[:, :, :, self.ell_max + sgn * m] = w_m_even
                w_odd[:, :, :, self.ell_max + sgn * m] = w_m_odd

        w_even = np.transpose(w_even, (3, 0, 1, 2))
        w_odd = np.transpose(w_odd, (3, 0, 1, 2))
        u_even = w_even
        u_odd = w_odd

        u_even = m_reshape(u_even,
                           (2 * self.ell_max + 1, n_phi * n_r * n_data))
        u_odd = m_reshape(u_odd, (2 * self.ell_max + 1, n_phi * n_r * n_data))

        # evaluate the theta parts
        w_even = self._precomp['ang_theta_wtd'] @ u_even
        w_odd = self._precomp['ang_theta_wtd'] @ u_odd

        pf = w_even + 1j * w_odd
        pf = m_reshape(pf, (n_theta * n_phi * n_r, n_data))

        # perform inverse non-uniformly FFT transformation back to 3D rectangular coordinates
        freqs = m_reshape(self._precomp['fourier_pts'],
                          (3, n_r * n_theta * n_phi, -1))
        x = np.zeros((self.sz[0], self.sz[1], self.sz[2], n_data),
                     dtype=v.dtype)
        for isample in range(0, n_data):
            x[..., isample] = np.real(anufft3(pf[:, isample], freqs, self.sz))

        # return the x with the first three dimensions of self.sz
        x = roll_dim(x, sz_roll)
        return x
Example #33
0
def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool):
    """

    :param spacing:
    :param patch_size:
    :param min_feature_map_size: min edge length of feature maps in bottleneck
    :return:
    """
    dim = len(spacing)

    current_spacing = deepcopy(list(spacing))
    current_size = deepcopy(list(patch_size))

    pool_op_kernel_sizes = []
    conv_kernel_sizes = []

    num_pool_per_axis = [0] * dim

    while True:
        # This is a problem because sometimes we have spacing 20, 50, 50 and we want to still keep pooling.
        # Here we would stop however. This is not what we want! Fixed in get_pool_and_conv_propsv2
        min_spacing = min(current_spacing)
        valid_axes_for_pool = [i for i in range(dim) if current_spacing[i] / min_spacing < 2]
        axes = []
        for a in range(dim):
            my_spacing = current_spacing[a]
            partners = [i for i in range(dim) if current_spacing[i] / my_spacing < 2 and my_spacing / current_spacing[i] < 2]
            if len(partners) > len(axes):
                axes = partners
        conv_kernel_size = [3 if i in axes else 1 for i in range(dim)]

        # exclude axes that we cannot pool further because of min_feature_map_size constraint
        #before = len(valid_axes_for_pool)
        valid_axes_for_pool = [i for i in valid_axes_for_pool if current_size[i] >= 2*min_feature_map_size]
        #after = len(valid_axes_for_pool)
        #if after == 1 and before > 1:
        #    break

        valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]

        if len(valid_axes_for_pool) == 0:
            break

        #print(current_spacing, current_size)

        other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]

        pool_kernel_sizes = [0] * dim
        for v in valid_axes_for_pool:
            pool_kernel_sizes[v] = 2
            num_pool_per_axis[v] += 1
            current_spacing[v] *= 2
            current_size[v] = np.ceil(current_size[v] / 2)
        for nv in other_axes:
            pool_kernel_sizes[nv] = 1

        pool_op_kernel_sizes.append(pool_kernel_sizes)
        conv_kernel_sizes.append(conv_kernel_size)
        #print(conv_kernel_sizes)

    must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
    patch_size = pad_shape(patch_size, must_be_divisible_by)

    # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
    conv_kernel_sizes.append([3]*dim)
    return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
Example #34
0
def get_pool_and_conv_props_v2(spacing, patch_size, min_feature_map_size, max_numpool):
    """

    :param spacing:
    :param patch_size:
    :param min_feature_map_size: min edge length of feature maps in bottleneck
    :return:
    """
    dim = len(spacing)

    current_spacing = deepcopy(list(spacing))
    current_size = deepcopy(list(patch_size))

    pool_op_kernel_sizes = []
    conv_kernel_sizes = []

    num_pool_per_axis = [0] * dim
    kernel_size = [1] * dim

    while True:
        # exclude axes that we cannot pool further because of min_feature_map_size constraint
        valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size]
        if len(valid_axes_for_pool) < 1:
            break

        spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool]

        # find axis that are within factor of 2 within smallest spacing
        min_spacing_of_valid = min(spacings_of_axes)
        valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2]

        # max_numpool constraint
        valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool]

        if len(valid_axes_for_pool) == 1:
            if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size:
                pass
            else:
                break
        if len(valid_axes_for_pool) < 1:
            break

        # now we need to find kernel sizes
        # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within
        # factor 2 of min_spacing. Once they are 3 they remain 3
        for d in range(dim):
            if kernel_size[d] == 3:
                continue
            else:
                if spacings_of_axes[d] / min(current_spacing) < 2:
                    kernel_size[d] = 3

        other_axes = [i for i in range(dim) if i not in valid_axes_for_pool]

        pool_kernel_sizes = [0] * dim
        for v in valid_axes_for_pool:
            pool_kernel_sizes[v] = 2
            num_pool_per_axis[v] += 1
            current_spacing[v] *= 2
            current_size[v] = np.ceil(current_size[v] / 2)
        for nv in other_axes:
            pool_kernel_sizes[nv] = 1

        pool_op_kernel_sizes.append(pool_kernel_sizes)
        conv_kernel_sizes.append(deepcopy(kernel_size))
        #print(conv_kernel_sizes)

    must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis)
    patch_size = pad_shape(patch_size, must_be_divisible_by)

    # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here
    conv_kernel_sizes.append([3]*dim)
    return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by
    # 指定搜索的超参数的范围===============
    weight_decay = 10 ** np.random.uniform(-8, -4)
    lr = 10 ** np.random.uniform(-6, -2)
    # ================================================

    val_acc_list, train_acc_list = __train(lr, weight_decay)
    print("val acc:" + str(val_acc_list[-1]) + " | lr:" + str(lr) + ", weight decay:" + str(weight_decay))
    key = "lr:" + str(lr) + ", weight decay:" + str(weight_decay)
    results_val[key] = val_acc_list
    results_train[key] = train_acc_list

# 绘制图形========================================================
print("=========== Hyper-Parameter Optimization Result ===========")
graph_draw_num = 20
col_num = 5
row_num = int(np.ceil(graph_draw_num / col_num))
i = 0

for key, val_acc_list in sorted(results_val.items(), key=lambda x:x[1][-1], reverse=True):
    print("Best-" + str(i+1) + "(val acc:" + str(val_acc_list[-1]) + ") | " + key)

    plt.subplot(row_num, col_num, i+1)
    plt.title("Best-" + str(i+1))
    plt.ylim(0.0, 1.0)
    if i % 5: plt.yticks([])
    plt.xticks([])
    x = np.arange(len(val_acc_list))
    plt.plot(x, val_acc_list)
    plt.plot(x, results_train[key], "--")
    i += 1
Example #36
0
expt = Experiment(path=r'C:\_Lib\python\slab\instruments\awg\chase',
                  config_file='config.json')

expt.plotter.clear('dac1')
expt.plotter.clear('dac2')

dac = DAx22000('dac', '1')

im = InstrumentManager()
trig = im['trig']

print(dac.initialize(ext_clk_ref=True))
print(dac.set_clk_freq(freq=0.5e9))

xpts = np.arange(6400)
ypts = np.ceil(2047.5 + 2047.5 * np.sin(2.0 * np.pi * xpts / (32)))
print(ypts)
print(dac.create_single_segment(1, 0, 2047, 2047, ypts, 1))
print(dac.place_mrkr2(1))
print(dac.set_ext_trig(ext_trig=True))

numsegs = 65
segs = []
waveforms = []


def waveform_compressor(waveform):
    print("compressing")


for ii in range(numsegs):
Example #37
0
def _get_xy_bounding_box(vertex, padding):
    """Returns the xy bounding box of the environment."""
    min_ = np.floor(np.min(vertex[:, :2], axis=0) - padding).astype(np.int)
    max_ = np.ceil(np.max(vertex[:, :2], axis=0) + padding).astype(np.int)
    return min_, max_
Example #38
0
def test_stumpi_constant_subsequence_self_join_egress():
    m = 3
    zone = int(np.ceil(m / 4))

    seed = np.random.randint(100000)
    np.random.seed(seed)

    T = np.concatenate(
        (np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))

    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P, comp_P)
    # npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P, comp_left_P)
    # npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        # npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        # npt.assert_almost_equal(ref_left_I, comp_left_I)

    np.random.seed(seed)
    T = np.concatenate(
        (np.zeros(20, dtype=np.float64), np.ones(10, dtype=np.float64)))
    T = pd.Series(T)

    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P, comp_P)
    # npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P, comp_left_P)
    # npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        # npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
Example #39
0
def test_stumpi_init_nan_inf_self_join_egress(substitute,
                                              substitution_locations):
    m = 3
    zone = int(np.ceil(m / 4))

    seed = np.random.randint(100000)
    # seed = 58638

    for substitution_location in substitution_locations:
        np.random.seed(seed)
        n = 30
        T = np.random.rand(n)

        if substitution_location == -1:
            substitution_location = T.shape[0] - 1
        T[substitution_location] = substitute

        ref_mp = naive.stumpi_egress(T, m)
        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        stream = stumpi(T, m, egress=True)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)

        for i in range(34):
            t = np.random.rand()
            ref_mp.update(t)
            stream.update(t)

            comp_P = stream.P_.copy()
            comp_I = stream.I_
            comp_left_P = stream.left_P_.copy()
            comp_left_I = stream.left_I_

            ref_P = ref_mp.P_.copy()
            ref_I = ref_mp.I_
            ref_left_P = ref_mp.left_P_.copy()
            ref_left_I = ref_mp.left_I_

            naive.replace_inf(ref_P)
            naive.replace_inf(ref_left_P)
            naive.replace_inf(comp_P)
            naive.replace_inf(comp_left_P)

            npt.assert_almost_equal(ref_P, comp_P)
            npt.assert_almost_equal(ref_I, comp_I)
            npt.assert_almost_equal(ref_left_P, comp_left_P)
            npt.assert_almost_equal(ref_left_I, comp_left_I)

        np.random.seed(seed)
        T = np.random.rand(n)

        if substitution_location == -1:
            substitution_location = T.shape[0] - 1
        T[substitution_location] = substitute
        T = pd.Series(T)

        ref_mp = naive.stumpi_egress(T, m)
        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        stream = stumpi(T, m, egress=True)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)

        for i in range(34):
            t = np.random.rand()
            ref_mp.update(t)
            stream.update(t)

            comp_P = stream.P_.copy()
            comp_I = stream.I_
            comp_left_P = stream.left_P_.copy()
            comp_left_I = stream.left_I_

            ref_P = ref_mp.P_.copy()
            ref_I = ref_mp.I_
            ref_left_P = ref_mp.left_P_.copy()
            ref_left_I = ref_mp.left_I_

            naive.replace_inf(ref_P)
            naive.replace_inf(ref_left_P)
            naive.replace_inf(comp_P)
            naive.replace_inf(comp_left_P)

            npt.assert_almost_equal(ref_P, comp_P)
            npt.assert_almost_equal(ref_I, comp_I)
            npt.assert_almost_equal(ref_left_P, comp_left_P)
            npt.assert_almost_equal(ref_left_I, comp_left_I)
Example #40
0
def test_stumpi_identical_subsequence_self_join_egress():
    m = 3
    zone = int(np.ceil(m / 4))

    seed = np.random.randint(100000)
    np.random.seed(seed)

    identical = np.random.rand(8)
    T = np.random.rand(20)
    T[1:1 + identical.shape[0]] = identical
    T[11:11 + identical.shape[0]] = identical

    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P,
                            comp_P,
                            decimal=config.STUMPY_TEST_PRECISION)
    # npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P,
                            comp_left_P,
                            decimal=config.STUMPY_TEST_PRECISION)
    # npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P,
                                comp_P,
                                decimal=config.STUMPY_TEST_PRECISION)
        # npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P,
                                comp_left_P,
                                decimal=config.STUMPY_TEST_PRECISION)
        # npt.assert_almost_equal(ref_left_I, comp_left_I)

    np.random.seed(seed)
    identical = np.random.rand(8)
    T = np.random.rand(20)
    T[1:1 + identical.shape[0]] = identical
    T[11:11 + identical.shape[0]] = identical
    T = pd.Series(T)
    ref_mp = naive.stumpi_egress(T, m)
    ref_P = ref_mp.P_.copy()
    ref_I = ref_mp.I_
    ref_left_P = ref_mp.left_P_.copy()
    ref_left_I = ref_mp.left_I_

    stream = stumpi(T, m, egress=True)

    comp_P = stream.P_.copy()
    comp_I = stream.I_
    comp_left_P = stream.left_P_.copy()
    comp_left_I = stream.left_I_

    naive.replace_inf(ref_P)
    naive.replace_inf(ref_left_P)
    naive.replace_inf(comp_P)
    naive.replace_inf(comp_left_P)

    npt.assert_almost_equal(ref_P,
                            comp_P,
                            decimal=config.STUMPY_TEST_PRECISION)
    # npt.assert_almost_equal(ref_I, comp_I)
    npt.assert_almost_equal(ref_left_P,
                            comp_left_P,
                            decimal=config.STUMPY_TEST_PRECISION)
    # npt.assert_almost_equal(ref_left_I, comp_left_I)

    for i in range(34):
        t = np.random.rand()
        ref_mp.update(t)
        stream.update(t)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P,
                                comp_P,
                                decimal=config.STUMPY_TEST_PRECISION)
        # npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P,
                                comp_left_P,
                                decimal=config.STUMPY_TEST_PRECISION)
Example #41
0
def readBED(basefilename,
            useMAFencoding=False,
            blocksize=1,
            start=0,
            nSNPs=SP.inf,
            startpos=None,
            endpos=None,
            order='F',
            standardizeSNPs=False,
            ipos=2,
            bim=None,
            fam=None):
    '''
    read [basefilename].bed,[basefilename].bim,[basefilename].fam
    --------------------------------------------------------------------------
    Input:
    basefilename    : string of the basename of [basename].bed, [basename].bim,
                      and [basename].fam
    blocksize       : load blocksize SNPs at a time (default 1)
    start           : index of the first SNP to be loaded from the .bed-file
                      (default 0)
    nSNPs           : load nSNPs from the .bed file (default SP.inf, meaning all)
    startpos        : starting position of the loaded genomic region[chr,bpdist]
    endpos          : end-position of the loaded genomic region     [chr,bpdist]
    order           : memory layout of the returned SNP array (default 'F')
                      'F'   : Fortran-style column-major array (SNP-major)
                      'C'   : C-style row-major array (individual-major)
    standardizeSNPs : bool indeicator if the resulting SNP array is supposed to 
                      be zero-mean and unit-vatiance with mean imputed missing
                      values (default False)
    ipos            : the index of the position index to use (default 2)
                        1 : genomic distance
                        2 : base-pair distance
    useMAFencoding  : if set to one, the minor allele is encoded with 2, the major allele with 0.
                      otherwise, the plink coding is used (default False).
    --------------------------------------------------------------------------
    Output dictionary:
    'rs'     : [S] array rs-numbers
    'pos'    : [S*3] array of positions [chromosome, genetic dist, basepair dist]
    'snps'   : [N*S] array of snp-data
    'iid'    : [N*2] array of family IDs and individual IDs
    --------------------------------------------------------------------------    
    '''

    if bim is None: bim = readBIM(basefilename, usecols=(0, 1, 2, 3))
    if fam is None: fam = readFAM(basefilename, usecols=(0, 1))

    rs = bim[:, 1]
    pos = SP.array(bim[:, (0, 2, 3)], dtype='float')

    if startpos is not None:
        #pdb.set_trace()
        i_c = pos[:, 0] == startpos[0]
        i_largerbp = pos[:, ipos] >= startpos[ipos]
        start = which(i_c * i_largerbp)
        while (start - 1 >= 0 and pos[start - 1, ipos] == startpos[ipos]):
            start = start - 1
        i_c = pos[:, 0] == endpos[0]
        i_smallerbp = pos[:, ipos] >= endpos[ipos]
        end = which(i_c * i_smallerbp)
        while (end + 1 < pos.shape[0] and pos[end + 1, ipos] == endpos[ipos]):
            end = end + 1
        nSNPs = end - start
        if (nSNPs <= 0) or (end == 0) or (start <= 0):
            ret = {
                'pos': SP.zeros((0, 3)),
                'rs': SP.zeros((0)),
                'iid': fam,
                'snps': SP.zeros((fam.shape[0], 0))
            }
            return ret
        pass
    N = fam.shape[0]
    S = bim.shape[0]
    S_res = min(S, start + nSNPs)
    nSNPs = min(S - start, nSNPs)
    #if startpos is not None:
    #print("start: " + str(start))
    #print("end: " + str(end))
    #print("S_res: " + str(S_res))
    #print("nSNPs: " + str(nSNPs))
    if nSNPs <= 0:
        ret = {
            'rs': rs[start:start],
            'pos': pos[start:start, :],
            #'snps'   :SNPs[0:N,start:start],
            'snps': SP.zeros((N, 0)),
            'iid': fam
        }
        return ret
    SNPs = SP.zeros(((SP.ceil(0.25 * N) * 4), nSNPs), order=order)
    bed = basefilename + '.bed'
    with open(bed, "rb") as f:
        mode = f.read(2)
        if mode != b'l\x1b':
            raise Exception('No valid binary PED file')
        mode = f.read(1)  #\x01 = SNP major \x00 = individual major
        if mode != b'\x01':
            raise Exception('only SNP-major is implemented')
        startbit = SP.ceil(0.25 * N) * start + 3
        f.seek(int(startbit))
        for blockStart in SP.arange(0, nSNPs, blocksize, dtype=int):
            blockEnd = int(min(S, blockStart + blocksize))
            Sblock = min(nSNPs - blockStart, blocksize)
            nbyte = int(SP.ceil(0.25 * N) * Sblock)
            bytes = SP.array(bytearray(f.read(nbyte))).reshape(
                (SP.ceil(0.25 * N), Sblock), order='F')

            SNPs[3::4, blockStart:blockEnd][bytes >= 64] = SP.nan
            SNPs[3::4, blockStart:blockEnd][bytes >= 128] = 1
            SNPs[3::4, blockStart:blockEnd][bytes >= 192] = 2
            bytes = SP.mod(bytes, 64)
            SNPs[2::4, blockStart:blockEnd][bytes >= 16] = SP.nan
            SNPs[2::4, blockStart:blockEnd][bytes >= 32] = 1
            SNPs[2::4, blockStart:blockEnd][bytes >= 48] = 2
            bytes = SP.mod(bytes, 16)
            SNPs[1::4, blockStart:blockEnd][bytes >= 4] = SP.nan
            SNPs[1::4, blockStart:blockEnd][bytes >= 8] = 1
            SNPs[1::4, blockStart:blockEnd][bytes >= 12] = 2
            bytes = SP.mod(bytes, 4)
            SNPs[0::4, blockStart:blockEnd][bytes >= 1] = SP.nan
            SNPs[0::4, blockStart:blockEnd][bytes >= 2] = 1
            SNPs[0::4, blockStart:blockEnd][bytes >= 3] = 2

    if 0:  #the binary format as described in the documentation (seems wrong)
        SNPs[3::4][bytes >= 128] = SP.nan
        SNPs[3::4][bytes >= 192] = 1
        bytes = SP.mod(bytes, 128)
        SNPs[3::4][bytes >= 64] += 1
        bytes = SP.mod(bytes, 64)
        SNPs[2::4][bytes >= 32] = SP.nan
        SNPs[2::4][bytes >= 48] = 1
        bytes = SP.mod(bytes, 32)
        SNPs[2::4][bytes >= 16] += 1
        bytes = SP.mod(bytes, 16)
        SNPs[1::4][bytes >= 8] = SP.nan
        SNPs[1::4][bytes >= 12] = 1
        bytes = SP.mod(bytes, 8)
        SNPs[1::4][bytes >= 4] += 1
        bytes = SP.mod(bytes, 4)
        SNPs[0::4][bytes >= 2] = SP.nan
        SNPs[0::4][bytes >= 3] = 1
        bytes = SP.mod(bytes, 2)
        SNPs[0::4][bytes >= 1] += 1
    snps = SNPs[0:N, :]

    if useMAFencoding:
        imaf = SP.sum(snps == 2, axis=0) > SP.sum(snps == 0, axis=0)
        snps[:, imaf] = 2 - snps[:, imaf]

    if standardizeSNPs:
        snps = standardize(snps)
    ret = {
        'rs': rs[start:S_res],
        'pos': pos[start:S_res, :],
        'snps': snps,
        'iid': fam
    }
    return ret
Example #42
0
def test_stumpi_stream_nan_inf_self_join_egress(substitute,
                                                substitution_locations):
    m = 3
    zone = int(np.ceil(m / 4))

    seed = np.random.randint(100000)

    for substitution_location in substitution_locations:
        np.random.seed(seed)
        T = np.random.rand(64)
        n = 30

        ref_mp = naive.stumpi_egress(T[:n], m)
        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        stream = stumpi(T[:n], m, egress=True)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)

        if substitution_location == -1:
            substitution_location = T[30:].shape[0] - 1
        T[n:][substitution_location] = substitute
        for t in T[n:]:
            ref_mp.update(t)
            stream.update(t)

            comp_P = stream.P_.copy()
            comp_I = stream.I_
            comp_left_P = stream.left_P_.copy()
            comp_left_I = stream.left_I_

            ref_P = ref_mp.P_.copy()
            ref_I = ref_mp.I_
            ref_left_P = ref_mp.left_P_.copy()
            ref_left_I = ref_mp.left_I_

            naive.replace_inf(ref_P)
            naive.replace_inf(ref_left_P)
            naive.replace_inf(comp_P)
            naive.replace_inf(comp_left_P)

            npt.assert_almost_equal(ref_P, comp_P)
            npt.assert_almost_equal(ref_I, comp_I)
            npt.assert_almost_equal(ref_left_P, comp_left_P)
            npt.assert_almost_equal(ref_left_I, comp_left_I)

        np.random.seed(seed)
        T = np.random.rand(64)

        ref_mp = naive.stumpi_egress(T[:n], m)
        ref_P = ref_mp.P_.copy()
        ref_I = ref_mp.I_
        ref_left_P = ref_mp.left_P_.copy()
        ref_left_I = ref_mp.left_I_

        stream = stumpi(T[:n], m, egress=True)

        comp_P = stream.P_.copy()
        comp_I = stream.I_
        comp_left_P = stream.left_P_.copy()
        comp_left_I = stream.left_I_

        naive.replace_inf(ref_P)
        naive.replace_inf(ref_left_P)
        naive.replace_inf(comp_P)
        naive.replace_inf(comp_left_P)

        npt.assert_almost_equal(ref_P, comp_P)
        npt.assert_almost_equal(ref_I, comp_I)
        npt.assert_almost_equal(ref_left_P, comp_left_P)
        npt.assert_almost_equal(ref_left_I, comp_left_I)

        if substitution_location == -1:
            substitution_location = T[n:].shape[0] - 1
        T[n:][substitution_location] = substitute
        for t in T[n:]:
            ref_mp.update(t)
            stream.update(t)

            comp_P = stream.P_.copy()
            comp_I = stream.I_
            comp_left_P = stream.left_P_.copy()
            comp_left_I = stream.left_I_

            ref_P = ref_mp.P_.copy()
            ref_I = ref_mp.I_
            ref_left_P = ref_mp.left_P_.copy()
            ref_left_I = ref_mp.left_I_

            naive.replace_inf(ref_P)
            naive.replace_inf(ref_left_P)
            naive.replace_inf(comp_P)
            naive.replace_inf(comp_left_P)

            npt.assert_almost_equal(ref_P, comp_P)
            npt.assert_almost_equal(ref_I, comp_I)
            npt.assert_almost_equal(ref_left_P, comp_left_P)
            npt.assert_almost_equal(ref_left_I, comp_left_I)
Example #43
0
def trunc_and_sum_inplace(function=None, function_uses='points',
                          sigma=None, points=None, bins=None, center=None, weights=1,
                          limit=3, method='auto'):
    """Wrap a simple broadening function, evaluate within a consistent range and sum to spectrum

                      center1                          center2
                         :                                :
                         :                                :
                         *                                :
                        * *                               :
                        * *                               **
                       * |-| sigma1      +               *  *            + ...
                      *     *                          *   |--| sigma2
                    *         *                    . *          * .
                ..: *         * :..             ..:: *          * ::..
             --------------------------     -----------------------
                    |----|                           |----|
                 limit * max(sigma)          limit * max(sigma)

    A spectrum is returned corresponding to the entire input set of
    bins/points, but this is zeroed outside of a restricted range (``limit * sigma``).
    The range is the same for all peaks, so max(sigma) is used for all.
    The purpose of this is performance optimisation compared to evaluating over the whole range.
    There is an artefact associated with this: a vertical step down to zero at the range cutoff.

    The function will be evaluated with the input points or bins
    for each set of (center, sigma) and summed to a single
    spectrum. Two technical approaches are provided: a python for-loop
    which iterates over the functions and sums/writes to the
    appropriate regions of the output array, and a np.histogram dump of
    all the frequency/intensity values. The for-loop approach has a
    more consistent speed but performance ultimately depends on the array sizes.

    :param function: broadening function; this should accept named arguments "center", "sigma" and either "bins" or "points".
    :type function: python function
    :param function_uses: 'points' or 'bins'; select which type of x data is passed to "function"
    :param sigma: widths of broadening functions (passed to "sigma" argument of function)
    :type sigma: float or Nx1 array
    :param bins: sample bins for function evaluation. This _must_ be evenly-spaced.
    :type bins: 1-D array
    :param points: regular grid of points for which function should be evaluated.
    :type points: 1-D array
    :param center: centers of broadening functions
    :type center: float or Nx1 array
    :param weights: weights of peaks for summation
    :type weights: float or array corresponding to "center"
    :param limit: range (as multiple of sigma) for cutoff
    :type limit: float
    :param method:
        'auto', 'histogram' or 'forloop'; select between implementations for summation at appropriate
        frequencies. 'auto' uses 'forloop' when there are > 1500 frequencies
    :type method: str

    :returns: (points, spectrum)
    :returntype: (1D array, 1D array)

    """
    # Select method for summation
    # Histogram seems to be faster below 1500 points; tested on a macbook pro and a Xeon workstation.
    # This threshold may change depending on updates to hardware, Python and Numpy...
    # For now we hard-code the number. It would ideally live in abins.parameters but is very specific to this function.
    if method == 'auto' and points.size < 1500:
        sum_method = 'histogram'
    elif method == 'auto':
        sum_method = 'forloop'
    else:
        sum_method = method

    bin_width = bins[1] - bins[0]
    if not np.isclose(points[1] - points[0], bin_width):
        raise ValueError("Bin spacing and point spacing are not consistent")

    freq_range = limit * max(sigma)
    nrows = len(center)
    ncols = int(np.ceil((freq_range * 2) / bin_width))
    if ncols > len(points):
        raise ValueError("Kernel is wider than evaluation region; "
                         "use a smaller cutoff limit or a larger range of points/bins")

    # Work out locations of frequency blocks. As these should all be the same size,
    # blocks which would exceed array size are "justified" into the array
    #
    # Start by calculating ideal start positions (allowed to exceed bounds) and
    # corresponding frequencies
    #
    # s-|-x----      |
    #   | s---x----  |
    #   |  s---x---- |
    #   |     s---x--|-
    #
    #
    start_indices = np.asarray((center - points[0] - freq_range + (bin_width / 2)) // bin_width,
                               int)
    # Values exceeding calculation range would have illegally large "initial guess" indices so clip those to final point
    #   |            | s---x----     ->       |           s|--x----
    start_indices[start_indices > len(points)] = -1

    start_freqs = points[start_indices]

    # Next identify points which will overshoot left: x lies to left of freq range
    # s-|-x-:--      |
    #   | s-:-x----  |
    #   |  s:--x---- |
    #   |   : s---x--|-
    left_justified = center < freq_range

    # "Left-justify" these points by setting start position to low limit
    #   |sx-------   |
    #   | s---x----  |
    #   |  s---x---- |
    #   |     s---x--|-
    start_freqs[left_justified] = points[0]
    start_indices[left_justified] = 0

    # Apply same reasoning to fix regions overshooting upper bound
    # Note that the center frequencies do not move: only the grid on which they are evaluated
    #   |sx------:   |           |sx-------   |
    #   | s---x--:-  |    --->   | s---x----  |
    #   |  s---x-:-- |           |  s---x---- |
    #   |     s--:x--|-          |   s-----x--|
    right_justified = center > (points[-1] - freq_range)
    start_freqs[right_justified] = points[-1] - (ncols * bin_width)
    start_indices[right_justified] = len(points) - ncols

    # freq_matrix is not used in (bins, forloop) mode so only generate if needed
    if (function_uses == 'points') or (function_uses == 'bins' and sum_method == 'histogram'):
        freq_matrix = start_freqs.reshape(nrows, 1) + np.arange(0, 2 * freq_range, bin_width)

    # Dispatch kernel generation depending on x-coordinate scheme
    if function_uses == 'points':
        kernels = function(sigma=sigma,
                           points=freq_matrix,
                           center=center)
    elif function_uses == 'bins':
        bin_matrix = start_freqs.reshape(nrows, 1) + np.arange(-bin_width / 2, 2 * freq_range + bin_width / 2, bin_width)
        kernels = function(sigma=sigma,
                           bins=bin_matrix,
                           center=center)
    else:
        raise ValueError('x-basis "{}" for broadening function is unknown.'.format(function_uses))

    # Sum spectrum using selected method
    if sum_method == 'histogram':
        spectrum, bin_edges = np.histogram(np.ravel(freq_matrix),
                                           bins,
                                           weights=np.ravel(weights * kernels),
                                           density=False)
    elif sum_method == 'forloop':
        spectrum = np.zeros_like(points)
        for start, kernel, weight in zip(start_indices.flatten(), kernels, np.asarray(weights).flatten()):
            scaled_kernel = kernel * weight
            spectrum[start:start+ncols] += scaled_kernel
    else:
        raise ValueError('Summation method "{}" is unknown.', format(method))

    return points, spectrum
    def __init__(self, parent,config,video,shuffle,Dataframe,scorer,savelabeled):
# Settting the GUI size and panels design
        displays = (wx.Display(i) for i in range(wx.Display.GetCount())) # Gets the number of displays
        screenSizes = [display.GetGeometry().GetSize() for display in displays] # Gets the size of each display
        index = 0 # For display 1.
        screenWidth = screenSizes[index][0]
        screenHeight = screenSizes[index][1]
        self.gui_size = (screenWidth*0.7,screenHeight*0.85)

        wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = 'DeepLabCut2.0 - Manual Outlier Frame Extraction',
                            size = wx.Size(self.gui_size), pos = wx.DefaultPosition, style = wx.RESIZE_BORDER|wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
        self.statusbar = self.CreateStatusBar()
        self.statusbar.SetStatusText("")

        self.SetSizeHints(wx.Size(self.gui_size)) #  This sets the minimum size of the GUI. It can scale now!
        
###################################################################################################################################################
# Spliting the frame into top and bottom panels. Bottom panels contains the widgets. The top panel is for showing images and plotting!
        topSplitter = wx.SplitterWindow(self)

        self.image_panel = ImagePanel(topSplitter, config,video,shuffle,Dataframe,self.gui_size)
        self.widget_panel = WidgetPanel(topSplitter)
        
        topSplitter.SplitHorizontally(self.image_panel, self.widget_panel,sashPosition=self.gui_size[1]*0.83)#0.9
        topSplitter.SetSashGravity(1)
        sizer = wx.BoxSizer(wx.VERTICAL)
        sizer.Add(topSplitter, 1, wx.EXPAND)
        self.SetSizer(sizer)

###################################################################################################################################################
# Add Buttons to the WidgetPanel and bind them to their respective functions.

        widgetsizer = wx.WrapSizer(orient=wx.HORIZONTAL)
        
        self.load_button_sizer = wx.BoxSizer(wx.VERTICAL)
        self.help_button_sizer = wx.BoxSizer(wx.VERTICAL)
        
        self.help = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Help")
        self.help_button_sizer.Add(self.help , 1, wx.ALL, 15)
#        widgetsizer.Add(self.help , 1, wx.ALL, 15)
        self.help.Bind(wx.EVT_BUTTON, self.helpButton)

        widgetsizer.Add(self.help_button_sizer,1,wx.ALL,0)
        
        self.grab = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Grab Frames")
        widgetsizer.Add(self.grab , 1, wx.ALL, 15)
        self.grab.Bind(wx.EVT_BUTTON, self.grabFrame)
        self.grab.Enable(True)

        widgetsizer.AddStretchSpacer(5)
        self.slider = wx.Slider(self.widget_panel, id=wx.ID_ANY, value = 0, minValue=0, maxValue=1,size=(200, -1), style=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS )
        widgetsizer.Add(self.slider,1, wx.ALL,5)
        self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)
        
        widgetsizer.AddStretchSpacer(5)
        self.start_frames_sizer = wx.BoxSizer(wx.VERTICAL)
        self.end_frames_sizer = wx.BoxSizer(wx.VERTICAL)

        self.start_frames_sizer.AddSpacer(15)
#        self.startFrame = wx.SpinCtrl(self.widget_panel, value='0', size=(100, -1), min=0, max=120)
        self.startFrame = wx.SpinCtrl(self.widget_panel, value='0', size=(100, -1))#,style=wx.SP_VERTICAL)
        self.startFrame.Enable(False)
        self.start_frames_sizer.Add(self.startFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
        start_text = wx.StaticText(self.widget_panel, label='Start Frame Index')
        self.start_frames_sizer.Add(start_text,1, wx.EXPAND|wx.ALIGN_LEFT,15)
        self.checkBox = wx.CheckBox(self.widget_panel, id=wx.ID_ANY,label = 'Range of frames')
        self.checkBox.Bind(wx.EVT_CHECKBOX,self.activate_frame_range)
        self.start_frames_sizer.Add(self.checkBox,1, wx.EXPAND|wx.ALIGN_LEFT,15)
#        
        self.end_frames_sizer.AddSpacer(15)
        self.endFrame = wx.SpinCtrl(self.widget_panel, value='1', size=(160, -1))#, min=1, max=120)
        self.endFrame.Enable(False)
        self.end_frames_sizer.Add(self.endFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
        end_text = wx.StaticText(self.widget_panel, label='Number of Frames')
        self.end_frames_sizer.Add(end_text,1, wx.EXPAND|wx.ALIGN_LEFT,15)
        self.updateFrame = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Update")
        self.end_frames_sizer.Add(self.updateFrame,1, wx.EXPAND|wx.ALIGN_LEFT,15)
        self.updateFrame.Bind(wx.EVT_BUTTON, self.updateSlider)
        self.updateFrame.Enable(False)
        
        widgetsizer.Add(self.start_frames_sizer,1,wx.ALL,0)
        widgetsizer.AddStretchSpacer(5)
        widgetsizer.Add(self.end_frames_sizer,1,wx.ALL,0)
        widgetsizer.AddStretchSpacer(15)
        
        self.quit = wx.Button(self.widget_panel, id=wx.ID_ANY, label="Quit")
        widgetsizer.Add(self.quit , 1, wx.ALL, 15)
        self.quit.Bind(wx.EVT_BUTTON, self.quitButton)
        self.quit.Enable(True)

        self.widget_panel.SetSizer(widgetsizer)
        self.widget_panel.SetSizerAndFit(widgetsizer)
        
        
# Variables initialization
        self.numberFrames = 0
        self.currFrame = 0
        self.figure = Figure()
        self.axes = self.figure.add_subplot(111)
        self.drs = []
        self.extract_range_frame = False
        self.firstFrame  = 0
        # self.cropping = False

# Read confing file
        self.cfg = auxiliaryfunctions.read_config(config)
        self.Task = self.cfg['Task']
        self.start = self.cfg['start']
        self.stop = self.cfg['stop']
        self.date = self.cfg['date']
        self.trainFraction = self.cfg['TrainingFraction']
        self.trainFraction = self.trainFraction[0]
        self.videos = self.cfg['video_sets'].keys()
        self.bodyparts = self.cfg['bodyparts']
        self.colormap = plt.get_cmap(self.cfg['colormap'])
        self.colormap = self.colormap.reversed()
        self.markerSize = self.cfg['dotsize']
        self.alpha = self.cfg['alphavalue']
        self.iterationindex=self.cfg['iteration']
        self.cropping = self.cfg['cropping']
        self.video_names = [Path(i).stem for i in self.videos]
        self.config_path = Path(config)
        self.video_source = Path(video).resolve()
        self.shuffle = shuffle
        self.Dataframe = Dataframe
        self.scorer = scorer
        self.savelabeled = savelabeled
        
# Read the video file
        self.vid = cv2.VideoCapture(str(self.video_source))
        self.videoPath = os.path.dirname(self.video_source)
        self.filename = Path(self.video_source).name
        self.numberFrames = int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))
        self.strwidth = int(np.ceil(np.log10(self.numberFrames)))
# Set the values of slider and range of frames
        self.startFrame.SetMax(self.numberFrames-1)
        self.slider.SetMax(self.numberFrames-1)
        self.endFrame.SetMax(self.numberFrames-1)
        self.startFrame.Bind(wx.EVT_SPINCTRL,self.updateSlider)#wx.EVT_SPIN
# Set the status bar
        self.statusbar.SetStatusText('Working on video: {}'.format(os.path.split(str(self.video_source))[-1]))
# Adding the video file to the config file.
        if  not (str(self.video_source.stem) in self.video_names) :
            add.add_new_videos(self.config_path,[self.video_source])

        self.filename = Path(self.video_source).name
        self.update()
        self.plot_labels()
        self.widget_panel.Layout()
    def __getitem__(self, index):
        if self.training:
            index_ratio = int(self.ratio_index[index])
        else:
            index_ratio = index

        # get the anchor index for current sample index
        # here we set the anchor index to the last one
        # sample in this group
        minibatch_db = [self._roidb[index_ratio]]
        blobs = get_minibatch(minibatch_db, self._num_classes,
                              self.cfg.TRAIN.batch_size, self.cfg.TRAIN.SCALES,
                              self.cfg.TRAIN.USE_ALL_GT,
                              self.cfg.PIXEL_MEANS, self.cfg.TRAIN.MAX_SIZE)
        data = torch.from_numpy(blobs['data'])
        im_info = torch.from_numpy(blobs['im_info'])
        # we need to random shuffle the bounding box.
        data_height, data_width = data.size(1), data.size(2)
        if self.training:
            np.random.shuffle(blobs['gt_boxes'])
            gt_boxes = torch.from_numpy(blobs['gt_boxes'])

            ########################################################
            # padding the input image to fixed size for each group #
            ########################################################

            # NOTE1: need to cope with the case where a group cover both conditions. (done)
            # NOTE2: need to consider the situation for the tail samples. (no worry)
            # NOTE3: need to implement a parallel data loader. (no worry)
            # get the index range

            # if the image need to crop, crop to the target size.
            ratio = self.ratio_list_batch[index]

            if self._roidb[index_ratio]['need_crop']:
                if ratio < 1:
                    # this means that data_width << data_height, we need to crop the
                    # data_height
                    min_y = int(torch.min(gt_boxes[:, 1]))
                    max_y = int(torch.max(gt_boxes[:, 3]))
                    trim_size = int(np.floor(data_width / ratio))
                    if trim_size > data_height:
                        trim_size = data_height
                    box_region = max_y - min_y + 1
                    if min_y == 0:
                        y_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            y_s_min = max(max_y - trim_size, 0)
                            y_s_max = min(min_y, data_height - trim_size)
                            if y_s_min == y_s_max:
                                y_s = y_s_min
                            else:
                                y_s = np.random.choice(range(y_s_min, y_s_max))
                        else:
                            y_s_add = int((box_region - trim_size) / 2)
                            if y_s_add == 0:
                                y_s = min_y
                            else:
                                y_s = np.random.choice(range(min_y, min_y + y_s_add))
                    # crop the image
                    data = data[:, y_s:(y_s + trim_size), :, :]

                    # shift y coordiante of gt_boxes
                    gt_boxes[:, 1] = gt_boxes[:, 1] - float(y_s)
                    gt_boxes[:, 3] = gt_boxes[:, 3] - float(y_s)

                    # update gt bounding box according the trip
                    gt_boxes[:, 1].clamp_(0, trim_size - 1)
                    gt_boxes[:, 3].clamp_(0, trim_size - 1)

                else:
                    # this means that data_width >> data_height, we need to crop the
                    # data_width
                    min_x = int(torch.min(gt_boxes[:, 0]))
                    max_x = int(torch.max(gt_boxes[:, 2]))
                    trim_size = int(np.ceil(data_height * ratio))
                    if trim_size > data_width:
                        trim_size = data_width
                    box_region = max_x - min_x + 1
                    if min_x == 0:
                        x_s = 0
                    else:
                        if (box_region - trim_size) < 0:
                            x_s_min = max(max_x - trim_size, 0)
                            x_s_max = min(min_x, data_width - trim_size)
                            if x_s_min == x_s_max:
                                x_s = x_s_min
                            else:
                                x_s = np.random.choice(range(x_s_min, x_s_max))
                        else:
                            x_s_add = int((box_region - trim_size) / 2)
                            if x_s_add == 0:
                                x_s = min_x
                            else:
                                x_s = np.random.choice(range(min_x, min_x + x_s_add))
                    # crop the image
                    data = data[:, :, x_s:(x_s + trim_size), :]

                    # shift x coordiante of gt_boxes
                    gt_boxes[:, 0] = gt_boxes[:, 0] - float(x_s)
                    gt_boxes[:, 2] = gt_boxes[:, 2] - float(x_s)
                    # update gt bounding box according the trip
                    gt_boxes[:, 0].clamp_(0, trim_size - 1)
                    gt_boxes[:, 2].clamp_(0, trim_size - 1)

            # based on the ratio, padding the image.
            if ratio < 1:
                # this means that data_width < data_height
                trim_size = int(np.floor(data_width / ratio))
                padding_data = torch.FloatTensor(int(np.ceil(data_width / ratio)), data_width, 3).zero_()
                padding_data[:data_height, :, :] = data[0]
                # update im_info
                im_info[0, 0] = padding_data.size(0)
            elif ratio > 1:
                # this means that data_width > data_height
                # if the image need to crop.
                padding_data = torch.FloatTensor(data_height, int(np.ceil(data_height * ratio)), 3).zero_()
                padding_data[:, :data_width, :] = data[0]
                im_info[0, 1] = padding_data.size(1)
            else:
                trim_size = min(data_height, data_width)
                padding_data = torch.FloatTensor(trim_size, trim_size, 3).zero_()
                padding_data = data[0][:trim_size, :trim_size, :]
                gt_boxes[:, :4].clamp_(0, trim_size)
                im_info[0, 0] = trim_size
                im_info[0, 1] = trim_size

            # check the bounding box:
            not_keep = (gt_boxes[:, 0] == gt_boxes[:, 2]) | (gt_boxes[:, 1] == gt_boxes[:, 3])
            keep = torch.nonzero(not_keep == 0).view(-1)

            gt_boxes_padding = torch.FloatTensor(self.max_num_box, gt_boxes.size(1)).zero_()
            if keep.numel() != 0:
                gt_boxes = gt_boxes[keep]
                num_boxes = min(gt_boxes.size(0), self.max_num_box)
                gt_boxes_padding[:num_boxes, :] = gt_boxes[:num_boxes]
            else:
                num_boxes = 0

            # permute trim_data to adapt to downstream processing
            padding_data = padding_data.permute(2, 0, 1).contiguous()
            im_info = im_info.view(3)

            return padding_data, im_info, gt_boxes_padding, num_boxes

        else:
            data = data.permute(0, 3, 1, 2).contiguous().view(3, data_height, data_width)
            im_info = im_info.view(3)
            gt_boxes = torch.FloatTensor([1, 1, 1, 1, 1])
            num_boxes = 0
            return data, im_info, gt_boxes, num_boxes
Example #46
0
def interpolated_broadening(sigma=None, points=None, bins=None,
                            center=None, weights=1, is_hist=False, limit=3,
                            function='gaussian', spacing='sqrt2'):
    """Return a fast estimate of frequency-dependent broadening

    Consider a spectrum of two peaks, in the case where (as in indirect-geometry INS) the peak width
    increases with frequency.

       |
       |        |
       |        |
    -----------------

    In the traditional scheme we broaden each peak individually and combine:

       *                      |                       *
       *        |       +     |        *       =      *        *
      * *       |             |      *   *           * *     *   *
    -----------------      -----------------       -----------------

    Instead of summing over broadening kernels evaluated at each peak, the approximate obtains
    a spectrum corresponding to the following scheme:

    - For each sigma value, the entire spectrum is convolved with an
      appropriate-width broadening function
    - At each frequency, the final spectrum value is drawn from the spectrum
      broadened with corresponding sigma.

    Compared to a summation over broadened peaks, this method introduces an
    asymmetry to the spectrum about each peak.

       *                    *                    *                         *
       *        *          * *       *          * *       *      -->       **       *
      * *       *         *   *     * *       *     *   *   *             *  *     *  *
    ----------------- ,  ----------------- ,  -----------------         -----------------

    This asymmetry should be tolerable as long as the broadening function
    varies slowly in frequency relative to its width.

    The benefit of this scheme is that we do not need to evaluate the
    convolution at every sigma value; nearby spectra can be interpolated.
    Trial-and-error finds that with optimal mixing the error of a Gaussian
    approximated by mixing a wider and narrower Gaussian is ~ 5% when the sigma
    range is factor of 2, and ~ 1% when the sigma range is a factor of sqrt(2).
    A pre-optimised transfer function can be used for a fixed ratio between the
    reference functions.

    :param sigma: widths of broadening functions (passed to "sigma" argument of function)
    :type sigma: float or Nx1 array
    :param bins: sample bins for function evaluation. This _must_ be evenly-spaced.
    :type bins: 1-D array
    :param points: regular grid of points for which function should be evaluated.
    :type points: 1-D array
    :param center: centers of broadening functions
    :type center: float or Nx1 array
    :param weights: weights of peaks for summation
    :type weights: float or array corresponding to "center"
    :param is_hist:
        If "weights" is already a histogram corresponding to evenly-spaced
        frequencies, set this to True to avoid a redundant binning operation.
    :type is_hist: bool
    :param function: broadening function; currently only 'gaussian' is accepted
    :type function: str
    :param limit: range (as multiple of sigma) for cutoff
    :type limit: float
    :param spacing:
        Spacing factor between Gaussian samples on log scale. This is not a
        free parameter as a pre-computed curve is used for interpolation.
        Allowed values: '2', 'sqrt2', with error ~5% and ~1% respectively.
    :type spacing: str

    :returns: (points, spectrum)
    :returntype: (1D array, 1D array)

    """
    mix_functions = {'gaussian': {'2': {'lower': [-0.1873, 1.464, -4.079, 3.803],
                                        'upper': [0.2638, -1.968, 5.057, -3.353]},
                                  'sqrt2': {'lower': [-0.6079, 4.101, -9.632, 7.139],
                                            'upper': [0.7533, -4.882, 10.87, -6.746]}}}
    log_bases = {'2': 2, 'sqrt2': np.sqrt(2)}
    log_base = log_bases[spacing]

    # Sample on appropriate log scale: log_b(x) = log(x) / log(b)
    n_kernels = int(np.ceil(np.log(max(sigma) / min(sigma)) / np.log(log_base))) + 1

    if n_kernels == 1:  # Special case: same width everywhere, only need one kernel
        sigma_samples = np.array([min(sigma)])
    else:
        sigma_samples = log_base**np.arange(n_kernels) * min(sigma)

    bin_width = bins[1] - bins[0]

    # Get set of convolved spectra for interpolation
    if is_hist:
        hist = weights
    else:
        hist, _ = np.histogram(center, bins=bins, weights=weights, density=False)
    freq_range = 3 * max(sigma)
    kernel_npts_oneside = np.ceil(freq_range / bin_width)

    if function == 'gaussian':
        kernels = mesh_gaussian(sigma=sigma_samples[:, np.newaxis],
                                points=np.arange(-kernel_npts_oneside, kernel_npts_oneside + 1, 1) * bin_width,
                                center=0)
    else:
        raise ValueError('"{}" kernel not supported for "interpolate" broadening method.'.format(function))

    spectra = np.array([convolve(hist, kernel, mode='same') for kernel in kernels])

    # Interpolate with parametrised relationship
    sigma_locations = np.searchsorted(sigma_samples, sigma) # locations in sampled values of points from sigma
    spectrum = np.zeros_like(points)
    # Samples with sigma == min(sigma) are a special case: copy directly from spectrum
    spectrum[sigma_locations==0] = spectra[0, sigma_locations==0]

    for i in range(1, len(sigma_samples)):
        masked_block = (sigma_locations == i)
        sigma_factors = sigma[masked_block] / sigma_samples[i - 1]
        lower_mix = np.polyval(mix_functions[function][spacing]['lower'], sigma_factors)
        upper_mix = np.polyval(mix_functions[function][spacing]['upper'], sigma_factors)

        spectrum[masked_block] = (lower_mix * spectra[i-1, masked_block]
                                  + upper_mix * spectra[i, masked_block])

    return points, spectrum
def main(args):

    device = "cuda:0"

    with open(DETECTION_JSON) as f:
        dataset_dict = json.load(f)

    # count the number of valid face detections
    n_valid_dets = 0
    for synset in sorted(dataset_dict['synsets'].keys()):
        for image in sorted(dataset_dict['synsets'][synset]['images'].keys()):
            for face in dataset_dict['synsets'][synset]['images'][image][
                    'faces']:
                if face['score'] > DETECTION_THRESHOLD:
                    n_valid_dets += 1

    # prepare the apparent gender model
    gender_model = torch.load(
        GENDER_MODEL_PTH_FILE)  # 0 for female, 1 for male
    gender_model.eval()
    gender_model.to(device)

    n_batches = int(np.ceil(n_valid_dets / float(INFERENCE_BATCH_SIZE)))

    batch_no = 1
    batch_list = []
    for synset in sorted(dataset_dict['synsets'].keys()):
        for image in sorted(dataset_dict['synsets'][synset]['images'].keys()):
            for face in dataset_dict['synsets'][synset]['images'][image][
                    'faces']:

                if face['score'] > DETECTION_THRESHOLD:  # TODO - should we reject any small faces ('w' or 'h' < 20)??

                    filepath = os.path.join(TRAINING_ROOT,
                                            os.path.join(synset, image))
                    batch_list.append((filepath, face))

                    if len(batch_list) == INFERENCE_BATCH_SIZE:

                        print('Batch', batch_no, 'of', n_batches)
                        image_batch = read_image_batch(batch_list)
                        image_batch = torch.from_numpy(image_batch).type(
                            torch.FloatTensor)
                        image_batch = image_batch.to(device)

                        gender_outputs = gender_model(image_batch)
                        gender_preds = expectation(gender_outputs.cpu())

                        for gender_pred, batch_item in zip(
                                gender_preds, batch_list):

                            filepath = batch_item[0]

                            synset_filename = filepath.split(TRAINING_ROOT)[1]
                            synset, filename = synset_filename.split('/')

                            if 'gender_preds' not in dataset_dict['synsets'][
                                    synset]['images'][filename]:
                                dataset_dict['synsets'][synset]['images'][
                                    filename]['gender_preds'] = []

                            dataset_dict['synsets'][synset]['images'][
                                filename]['gender_preds'].append(gender_pred)

                        batch_no += 1
                        batch_list = []

                        del image_batch

    if len(batch_list) > 0:

        print('Batch', batch_no, 'of', n_batches)
        image_batch = read_image_batch(batch_list)
        image_batch = torch.from_numpy(image_batch).type(torch.FloatTensor)
        image_batch = image_batch.to(device)

        gender_outputs = gender_model(image_batch)
        gender_preds = expectation(gender_outputs.cpu())

        for gender_pred, batch_item in zip(gender_preds, batch_list):

            filepath = batch_item[0]

            synset_filename = filepath.split(TRAINING_ROOT)[1]
            synset, filename = synset_filename.split('/')

            if 'gender_preds' not in dataset_dict['synsets'][synset]['images'][
                    filename]:
                dataset_dict['synsets'][synset]['images'][filename][
                    'gender_preds'] = []

            dataset_dict['synsets'][synset]['images'][filename][
                'gender_preds'].append(gender_pred)

        del image_batch

    out_json = json.dumps(dataset_dict)
    with open(OUTFILE, 'w') as f:
        f.write(out_json)

    return
Example #48
0
 def __len__(self):
     return int(np.ceil(len(self.indexes) / self.batchSize))
import matplotlib.pyplot as p
# print(os.listdir(os.path.dirname(pywt.__file__)))


import cv2 as cv
if __name__ == '__main__':
    print("python dwt-for-images.py read.jpg out.jpg outputPrefix")

    img=cv.imread(sys.argv[1])
    outPref=sys.argv[2].strip()


    cv.imwrite("A.jpg",img)
    p.show()

    dwt=np.ndarray((4,np.ceil(img.shape[0]/2).astype(np.int32),\
                       np.ceil(img.shape[1]/2).astype(np.int32),img.shape[2]))
    for c in range(img.shape[2]):
        coeffs = pywt.dwt2(img[:,:,c], 'haar')
        for cc in range(4):
            if cc==0:
                dwt[cc,:,:,c]=coeffs[cc]
            else:
                dwt[cc, :, :, c] = coeffs[0][cc-1]

         # newImg[:,:,c]=coeffs[0]

    print(dwt.shape)

    newImg=np.ndarray(shape=img.shape,dtype=np.uint8)
    zero=np.zeros(dwt[1,:,:,c].shape)
        pttr_time = []
        for p in pttr[0]:
            time = librosa.core.frames_to_time(np.array(
                [subbeats[p - pttr[1]], subbeats[p - 1]]),
                                               sr=sr,
                                               n_fft=fft_size,
                                               hop_length=hop_size)
            pttr_time.append(time)
        pattern_time.append(pttr_time)

    pattern_bpm = []
    for pt_time in pattern_time:
        pt_bpm = []
        for p in pt_time:
            p_b_start = np.floor((p[0] / 60.0) * audio_test['info'][1])
            p_b_end = np.ceil((p[1] / 60.0) * audio_test['info'][1])
            pt_bpm.append([
                p_b_start + audio_test['info'][2][0],
                p_b_end + audio_test['info'][2][0]
            ])
        pt_bpm = sorted(pt_bpm, key=lambda pt: pt[0])
        pattern_bpm.append(pt_bpm)

    csv_path = file_path + song_list[ind] + '/polyphonic/csv/'
    csv_file_path = glob.glob(csv_path + '*')
    csv_file = open(csv_file_path[0], 'r')
    cs = list(csv.reader(csv_file))
    cs_float = [[float(_c) for _c in c[:2]] for c in cs]
    csv_file.close()

    found = open(
Example #51
0
nValid = int(np.floor(0.01 * nTrain))
xValid = xTrain[0:nValid, :, :]
yValid = yTrain[0:nValid, :, :]
xTrain = xTrain[nValid:, :, :]
yTrain = yTrain[nValid:, :, :]
nTrain = xTrain.shape[0]

# Declaring the optimizers for each architectures
optimizer = optim.Adam(model.parameters(), lr=learningRate)

if nTrain < batchSize:
    nBatches = 1
    batchSize = [nTrain]
elif nTrain % batchSize != 0:
    nBatches = np.ceil(nTrain / batchSize).astype(np.int64)
    batchSize = [batchSize] * nBatches
    while sum(batchSize) != nTrain:
        batchSize[-1] -= 1
else:
    nBatches = np.int(nTrain / batchSize)
    batchSize = [batchSize] * nBatches
batchIndex = np.cumsum(batchSize).tolist()
batchIndex = [0] + batchIndex

epoch = 0  # epoch counter

# Store the training...
lossTrain = []
costTrain = []
lossValid = []
Example #52
0
    def _make_recordset(self, series, start_time='', end_time='', wavelength='',
                        segment='', primekey={}, **kwargs):
        """
        Take the query arguments and build a record string.

        All the primekeys are now stored in primekey dict, including Time and Wavelength
        which were passed through pre-defined attributes. The following piece of code,
        extracts the passed prime-keys and arranges it in the order as it appears in the
        JSOC database.

        `pkeys_isTime` is a Pandas DataFrame, whose index values are the Prime-key names
        and the column stores a boolean value, identifying whether the prime-key is a
        Time-type prime-key or not. Since, time-type prime-keys exist by different names,
        we made it uniform in the above piece of code, by storing the time-type primekey
        with a single name `TIME`.

        Considering an example, if the primekeys that exist for a given series are
        ['HARPNUM', 'T_OBS', 'WAVELNTH'], we will consider three different cases of the
        passed primekeys.

        pkeys_isTime.index.values = ['HARPNUM', 'T_OBS', 'WAVELNTH']

        Case 1
        ------

        primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI',
                    'HARPNUM' : '4864',
                    'WAVELNTH': '605'}

        If the primekey dict is as above, then pkstr should be as:

        pkstr = '{4864}{2014.01.01_00:00:45_TAI}{605}'

        Case 2
        ------

        primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI',
                    'WAVELNTH': '605'}

        If the primekey dict is as above, then pkstr should be as:

        pkstr = '{}{2014.01.01_00:00:45_TAI}{605}'

        Case 3
        ------

        primekey = {'T_OBS' : , '2014.01.01_00:00:45_TAI'}

        If the primekey dict is as above, then pkstr should be as:

        pkstr = '{}{2014.01.01_00:00:45_TAI}'

        The idea behind this should be clear. We build up the `pkstr` string
        containing the values of the prime-keys passed in the same order as
        it occurs in the list `pkeys_isTime.index.values`, i.e. how it is stored
        in the online database. Any missing prime-keys should be compensated by
        an empty {}, if it occurs before any passed prime-key. Any empty curly braces
        that is present at last of the pkstr, can be skipped.

        """

        # Extract and format segment
        # Convert list of segments into a comma-separated string
        if segment:
            if isinstance(segment, list):
                segment = str(segment)[1:-1].replace(' ', '').replace("'", '')
            segment = f'{{{segment}}}'

        # Extract and format sample
        sample = kwargs.get('sample', '')
        if sample:
            sample = f'@{sample}s'

        # Populate primekeys dict with Time and Wavelength values
        if start_time and end_time:
            # Check whether any primekey listed in PKEY_LIST_TIME has been passed through
            # PrimeKey() attribute. If yes, raise an error, since Time can only be passed
            # either through PrimeKey() attribute or Time() attribute.
            if not any(x in PKEY_LIST_TIME for x in primekey):
                timestr = '{start}-{end}{sample}'.format(
                    start=start_time.tai.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
                    end=end_time.tai.strftime("%Y.%m.%d_%H:%M:%S_TAI"),
                    sample=sample)
            else:
                error_message = "Time attribute has been passed both as a Time()"\
                                " and PrimeKey(). Please provide any one of them"\
                                " or separate them by OR operator."
                raise ValueError(error_message)

        else:
            # This is executed when Time has not been passed through Time() attribute.
            # `match` stores all the time-type prime-keys that has been passed through
            # PrimeKey() attribute. The length of `match` won't ever be greater than 1,
            # but it is a good idea to keep a check.
            match = set(primekey.keys()) & PKEY_LIST_TIME
            if len(match) > 1:
                error_message = "Querying of series, having more than 1 Time-type "\
                                "prime-keys is not yet supported. Alternative is to "\
                                "use only one of the primekey to query for series data."
                raise ValueError(error_message)

            if match:
                timestr = '{}'.format(primekey.pop(list(match)[0], ''))
            else:
                timestr = ''

        if wavelength != '':
            if not primekey.get('WAVELNTH', ''):
                if isinstance(wavelength, list):
                    wavelength = [int(np.ceil(wave.to(u.AA).value)) for wave in wavelength]
                    wavelength = str(wavelength)
                else:
                    wavelength = '{}'.format(int(np.ceil(wavelength.to(u.AA).value)))

            else:
                # This is executed when wavelength has been passed both through PrimeKey()
                # and Wavelength().
                error_message = "Wavelength attribute has been passed both as a Wavelength()"\
                                " and PrimeKey(). Please provide any one of them"\
                                " or separate them by OR operator."
                raise ValueError(error_message)

        else:
            # This is executed when wavelength has been passed through PrimeKey().
            wavelength = '{}'.format(primekey.pop('WAVELNTH', ''))

        # Populate primekey dict with formatted Time and Wavlength.
        if timestr:
            primekey['TIME'] = timestr
        if wavelength != '':
            primekey['WAVELNTH'] = wavelength

        # Extract and format primekeys
        pkstr = ''
        c = drms.Client()
        si = c.info(series)
        pkeys_isTime = si.keywords.loc[si.primekeys].is_time
        for pkey in pkeys_isTime.index.values:
            # The loop is iterating over the list of prime-keys existing for the given series.
            if len(primekey) > 0:
                if pkeys_isTime[pkey]:
                    pkstr += '[{}]'.format(primekey.pop('TIME', ''))
                else:
                    pkstr += '[{}]'.format(primekey.pop(pkey, ''))
            else:
                break
                # break because we can skip adding {} at the end of pkstr, if the primekey
                # dict is empty.

        if not pkstr:
            # pkstr cannot be totally empty
            #
            # Note that whilst it is technically posisble to just search by series,
            # this is not allowed here, because some of these would be very large
            # searches that would make JSOC sad
            raise ValueError("Time, Wavelength or an explicit PrimeKey must be specified.")

        dataset = '{series}{primekey}{segment}'.format(series=series,
                                                       primekey=pkstr,
                                                       segment=segment)

        return dataset
Example #53
0
    PI.piezo = 200

    print('--------- start acquisition --------')
    AI.start_acquisition()
    print('--------- start piezo output --------')
    for i in range(10):
        PI.piezo = i + 2 * 100

    status = AI.status
    print("status after running acquisition:")
    for elem in status:
        print(elem, ': ', AI.status[elem])

    number_of_reads = int(
        np.ceil(1.0 * parameters_Acq['data_length'] /
                parameters_Acq['block_size']))
    print('number_of_reads', number_of_reads)

    data_AI1 = np.zeros((number_of_reads, parameters_Acq['block_size']))

    elements_left = -1. * np.ones(number_of_reads)
    for i in range(number_of_reads):
        fifo_data = AI.data_queue.get()
        data_AI1[i, :] = np.array(fifo_data[0])
        elements_left[i] = int(fifo_data[2])

    print('finished')

    print('elements left after each block read from FIFO')
    print(elements_left)
vx = np.multiply(bf1, vx1) + np.multiply(bf2, vx2)

vy = np.multiply(bf1, vy1) + np.multiply(bf2, vy2)
bf = np.sqrt(np.power(vx, 2) + np.power(vy, 2))
print('B field min = %.2e max = %.2e' % (bf[np.nonzero(bf)].min(),
                                         bf.max()))
vx, vy = np.divide(vx, bf), np.divide(vy, bf)

fig, ax = plt.subplots(figsize=(3,3))
ax.plot(mesh.posx, mesh.posy, '.k',
        marker='.', markersize=3,
        color='black', linestyle='None')
# Alternatively, you can manually set the levels
# and the norm:
lev_exp = np.arange(np.floor(np.log10(bf[np.nonzero(bf)].min())),
                    np.ceil(np.log10(bf.max())), 0.05)
levs = np.power(10, lev_exp)
#levs = np.linspace(bf.min(), bf.max(), 50)
cs = ax.contour(mesh.posx, mesh.posy, bf, levs, norm=colors.LogNorm())
#ax.clabel(cs, cs.levels)
#fig.colorbar(cs)
#ax.quiver(mesh.posx, mesh.posy, vx, vy)
#ax.plot(pos1[0], pos1[1],
#        color='red', marker='o', markersize=15)
#ax.plot(pos2[0], pos2[1],
#        color='red', marker='o', markersize=15)

fig, ax = plt.subplots(2, 1, figsize=(6,6))
ax[0].plot(mesh.posx[int(nx/2), :], bf[int(nx/2), :])
ax[1].plot(mesh.posy[:, int(ny/2)], bf[:, int(ny/2)])
Example #55
0
    def createDatabaseFromFeatureset(database,
                                     featureFile,
                                     length,
                                     threshold=20,
                                     mode=None,
                                     label_type=None,
                                     classes=None,
                                     n_classes=None,
                                     oversample=False,
                                     sample_factor=50,
                                     pbc=(1, 1, 1),
                                     noProtons=False):

        featureFile = h5py.File(featureFile)

        indexes = np.arange(length)
        np.random.shuffle(indexes)

        labels = []
        for i in indexes:
            labels.append(featureFile[str(i) + '/label'].value)

        hist = np.histogram(labels, 25)
        maxdist = []
        for i in indexes:
            print(i)
            # Add Ligand
            atom_list = []
            property_list = []
            ligcoords = featureFile[str(i) + '/ligcoords'].value

            ligAtNum = np.char.decode(featureFile[str(i) + '/lignum'].value)
            ligFeatures = featureFile[str(i) + '/lig'].value
            x = ligcoords[:, 0].mean()
            y = ligcoords[:, 1].mean()
            z = ligcoords[:, 2].mean()
            mean = np.array([x, y, z])
            for j in range(len(ligcoords)):
                maxdist.append(np.linalg.norm(ligcoords[j] - mean))
                if noProtons:
                    if ligAtNum[j] != 'H':
                        atom_list.append(ase.Atom(ligAtNum[j], ligcoords[j]))
                        property_list.append(ligFeatures[j])
                else:
                    atom_list.append(ase.Atom(ligAtNum[j], ligcoords[j]))
                    property_list.append(ligFeatures[j])

            #Add Protein-Atoms in Cutoff-Range
            protcoords = featureFile[str(i) + '/protcoords'].value
            protAtNum = np.char.decode(featureFile[str(i) + '/protnum'].value)
            protFeatures = featureFile[str(i) + '/prot'].value

            for j in range(len(protcoords)):
                dist = np.linalg.norm(protcoords[j] - mean)
                if dist <= threshold:
                    if noProtons:
                        if protAtNum[j] != 'H':
                            atom_list.append(
                                ase.Atom(protAtNum[j], protcoords[j]))
                            property_list.append(protFeatures[j])
                    else:
                        atom_list.append(ase.Atom(protAtNum[j], protcoords[j]))
                        property_list.append(protFeatures[j])

            # Create Complex
            complexe = [ase.Atoms(atom_list, pbc=pbc)]

            label = featureFile[str(i) + '/label'].value

            affi = PreprocessingSchnet.classLabel(label,
                                                  mode,
                                                  label_type,
                                                  classes=classes,
                                                  n_classes=n_classes,
                                                  min_v=np.min(labels),
                                                  max_v=np.max(labels))
            affi[0]['props'] = np.array(property_list)

            if not oversample:
                database.add_systems(complexe, affi)
            else:
                classn = np.zeros(25)
                for j in range(len(hist[1]) - 1):
                    if j == len(hist[1]) - 2:
                        if hist[1][j] <= labels[i] <= hist[1][j + 1]:
                            classn[j] = 1
                    else:
                        if hist[1][j] <= labels[i] < hist[1][j + 1]:
                            classn[j] = 1

                if np.unique(classn, return_counts=True)[1][1] != 1:
                    print('warning -> Onehot is more than one')
                    print(classn)

                ind = np.argmax(classn)
                if hist[0][ind] == 0:
                    print('Warning -> zero-sample')
                    continue

                n_sampling = int(
                    np.ceil((1 / hist[0][ind]) * sample_factor * 25))
                print(i, len(indexes), n_sampling, ind)
                for _ in range(n_sampling):
                    database.add_systems(complexe, affi)
        print(np.max(maxdist))
Example #56
0
    def load_streaks_from_xml(self,
                              dataset,
                              settings,
                              image_shape_WH,
                              use_pickle=True,
                              verbose=True):
        '''
        Function to load and store the output from the physical simulator.
        Streak data is stored in a dictionary. Key: ID, Value: data
        '''
        print('Reading particles file {}'.format(self.streaks_path_xml))

        pickle_version = '1.0'

        # Compute the simulation file hash
        hasher = hashlib.md5()
        with open(self.streaks_path_xml, 'rb') as afile:
            buf = afile.read()
            hasher.update(buf)
        sim_hash = hasher.hexdigest()

        pickle_path = self.streaks_path_xml + '.pkl'
        if use_pickle and os.path.exists(pickle_path):
            print('     loading from pickle')
            input = open(pickle_path, 'rb')
            pickle_data = pickle.load(input)

            # If sim_hash did not change, and image shape is identical too
            if 'version' in pickle_data and pickle_data[
                    'version'] == pickle_version and pickle_data[
                        'sim_hash'] == sim_hash and np.all(
                            pickle_data['image_shapeWH'] == image_shape_WH):
                self.streaks_simulator = pickle_data['streaks']
                input.close()
                return
            else:
                print('Pickle out-dated. Regenerate.')
                input.close()

        if not os.path.exists(self.streaks_path_xml):
            my_utils.print_error("No existing path for XML file (" +
                                 self.streaks_path_xml + ")")
            exit(-1)

        simulation = parse(self.streaks_path_xml).getroot()

        if verbose:
            my_utils.print_progress_bar(0, len(simulation))
        try:
            for fix, frame in enumerate(simulation):
                f = Frame()
                f.id = int(frame.attrib['id'])
                f.exposure_time = int(frame.attrib['t'])
                f.starting_time = int(frame.attrib['d'])
                f.streaks_count = int(frame.attrib['rs'])
                f.streaks = {}

                for drop in frame:
                    s = Streak()
                    s.pid = int(drop.attrib["pid"])
                    s.world_position_start = np.array(
                        drop.attrib["wp1"][1:-1].split(';'), dtype=float)
                    s.world_position_end = np.array(
                        drop.attrib["wp2"][1:-1].split(';'), dtype=float)
                    s.world_diameter_start = float(drop.attrib['wd1'])
                    s.world_diameter_end = float(drop.attrib['wd2'])

                    s.image_position_start = np.array(
                        drop.attrib["ip1"][1:-1].split(';'),
                        dtype=float) / settings["render_scale"]  # x,y
                    s.image_position_end = np.array(
                        drop.attrib["ip2"][1:-1].split(';'),
                        dtype=float) / settings["render_scale"]  # x,y
                    s.image_diameter_start = float(
                        drop.attrib['iw1']) / settings["render_scale"]
                    s.image_diameter_end = float(
                        drop.attrib['iw2']) / settings["render_scale"]

                    if dataset == 'nuscenes_gan':
                        # in case the simulation and the rendering are not at the same resolution
                        r = np.mean((image_shape_WH[0] / 1600,
                                     image_shape_WH[1] / 900))
                        s.image_position_start = np.array(
                            drop.attrib["ip1"][1:-1].split(';'),
                            dtype=float) * r  # x,y
                        s.image_position_end = np.array(
                            drop.attrib["ip2"][1:-1].split(';'),
                            dtype=float) * r  # x,y
                        s.image_diameter_start = float(drop.attrib['iw1']) * r
                        s.image_diameter_end = float(drop.attrib['iw2']) * r

                    s.image_position_start[
                        1] = image_shape_WH[1] - s.image_position_start[1]
                    s.image_position_end[
                        1] = image_shape_WH[1] - s.image_position_end[1]
                    s.world_position_start[2] *= -1
                    s.world_position_end[2] *= -1
                    diff = abs(s.image_position_start - s.image_position_end)
                    s.max_width = int(
                        max(s.image_diameter_start, s.image_diameter_end))

                    dir1 = np.array([0, -1])
                    dir2 = diff / np.linalg.norm(diff)
                    dir2[1] = -dir2[1]
                    cos_theta = np.dot(dir1, dir2)
                    actual_length = diff[1] / cos_theta
                    s.ratio = s.max_width / actual_length
                    s.image_position_end = s.image_position_end.round().astype(
                        int)
                    s.image_position_start = s.image_position_start.round(
                    ).astype(int)
                    s.length = np.ceil(
                        np.linalg.norm(s.image_position_start -
                                       s.image_position_end)).astype(int)
                    s.drop_type = self.classify_drop(s.max_width)
                    if s.max_width >= 1 and s.length >= 1:
                        f.streaks.update({s.pid: s})

                self.streaks_simulator.update({f.id: f})
                if verbose:
                    my_utils.print_progress_bar(fix + 1, len(simulation))
        except Exception as e:
            ex_type, ex, tb = sys.exc_info()
            my_utils.print_error('Error while parsing XML file.\n\tFile: ' +
                                 self.streaks_path_xml)
            traceback.print_tb(tb)
            exit(-1)
Example #57
0
def create_neuropil_basis(ops, Ly, Lx):
    """
    computes neuropil basis functions

    Parameters
    ----------
    ops:
        ratio_neuropil, tile_factor, diameter, neuropil_type
    Ly: int
    Lx: int

    Returns
    -------
    S:
        basis functions (pixels x nbasis functions)
    """

    if 'ratio_neuropil' in ops:
        ratio_neuropil = ops['ratio_neuropil']
    else:
        ratio_neuropil = 6.
    if 'tile_factor' in ops:
        tile_factor    = ops['tile_factor']
    else:
        tile_factor = 1.
    diameter       = ops['diameter']

    ntilesY  = 1+2*int(np.ceil(tile_factor * Ly / (ratio_neuropil * diameter[0]/2))/2)
    ntilesX  = 1+2*int(np.ceil(tile_factor * Lx / (ratio_neuropil * diameter[1]/2))/2)
    ntilesY  = np.maximum(2,ntilesY)
    ntilesX  = np.maximum(2,ntilesX)
    yc = np.linspace(1, Ly, ntilesY)
    xc = np.linspace(1, Lx, ntilesX)
    ys = np.arange(0,Ly)
    xs = np.arange(0,Lx)

    Kx = np.ones((Lx, ntilesX), 'float32')
    Ky = np.ones((Ly, ntilesY), 'float32')
    if 1:
        # basis functions are fourier modes
        for k in range(int((ntilesX-1)/2)):
            Kx[:,2*k+1] = np.sin(2*math.pi * (xs+0.5) *  (1+k)/Lx)
            Kx[:,2*k+2] = np.cos(2*math.pi * (xs+0.5) *  (1+k)/Lx)
        for k in range(int((ntilesY-1)/2)):
            Ky[:,2*k+1] = np.sin(2*math.pi * (ys+0.5) *  (1+k)/Ly)
            Ky[:,2*k+2] = np.cos(2*math.pi * (ys+0.5) *  (1+k)/Ly)
    else:
        for k in range(ntilesX):
            Kx[:,k] = np.cos(math.pi * (xs+0.5) *  k/Lx)
        for k in range(ntilesY):
            Ky[:,k] = np.cos(math.pi * (ys+0.5) *  k/Ly)

    S = np.zeros((ntilesY, ntilesX, Ly, Lx), np.float32)
    for kx in range(ntilesX):
        for ky in range(ntilesY):
            S[ky,kx,:,:] = np.outer(Ky[:,ky], Kx[:,kx])

    S = np.reshape(S,(ntilesY*ntilesX, Ly*Lx))
    S = S / np.reshape(np.sum(S**2,axis=-1)**0.5, (-1,1))
    S = np.transpose(S, (1, 0)).copy()
    S = np.reshape(S, (Ly, Lx, -1))
    return S
Example #58
0
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
    # Plot image grid with labels

    if isinstance(images, torch.Tensor):
        images = images.cpu().float().numpy()
    if isinstance(targets, torch.Tensor):
        targets = targets.cpu().numpy()

    # un-normalise
    if np.max(images[0]) <= 1:
        images *= 255

    tl = 3  # line thickness
    tf = max(tl - 1, 1)  # font thickness
    bs, _, h, w = images.shape  # batch size, _, height, width
    bs = min(bs, max_subplots)  # limit plot images
    ns = np.ceil(bs ** 0.5)  # number of subplots (square)

    # Check if we should resize
    scale_factor = max_size / max(h, w)
    if scale_factor < 1:
        h = math.ceil(scale_factor * h)
        w = math.ceil(scale_factor * w)

    colors = color_list()  # list of colors
    mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)  # init
    for i, img in enumerate(images):
        if i == max_subplots:  # if last batch has fewer images than we expect
            break

        block_x = int(w * (i // ns))
        block_y = int(h * (i % ns))

        img = img.transpose(1, 2, 0)
        if scale_factor < 1:
            img = cv2.resize(img, (w, h))

        mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
        if len(targets) > 0:
            image_targets = targets[targets[:, 0] == i]
            boxes = xywh2xyxy(image_targets[:, 2:6]).T
            classes = image_targets[:, 1].astype('int')
            labels = image_targets.shape[1] == 6  # labels if no conf column
            conf = None if labels else image_targets[:, 6]  # check for confidence presence (label vs pred)

            if boxes.shape[1]:
                if boxes.max() <= 1.01:  # if normalized with tolerance 0.01
                    boxes[[0, 2]] *= w  # scale to pixels
                    boxes[[1, 3]] *= h
                elif scale_factor < 1:  # absolute coords need scale if image scales
                    boxes *= scale_factor
            boxes[[0, 2]] += block_x
            boxes[[1, 3]] += block_y
            for j, box in enumerate(boxes.T):
                cls = int(classes[j])
                color = colors[cls % len(colors)]
                cls = names[cls] if names else cls
                if labels or conf[j] > 0.25:  # 0.25 conf thresh
                    label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])
                    plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)

        # Draw image filename labels
        if paths:
            label = Path(paths[i]).name[:40]  # trim to 40 char
            t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
            cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
                        lineType=cv2.LINE_AA)

        # Image border
        cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)

    if fname:
        r = min(1280. / max(h, w) / ns, 1.0)  # ratio to limit image size
        mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)
        # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))  # cv2 save
        Image.fromarray(mosaic).save(fname)  # PIL save
    return mosaic
Example #59
0
def sourcery(mov: np.ndarray, ops):
    change_codes = True
    i0 = time.time()
    if isinstance(ops['diameter'], int):
        ops['diameter'] = [ops['diameter'], ops['diameter']]
    ops['diameter'] = np.array(ops['diameter'])
    ops['spatscale_pix'] = ops['diameter'][1]
    ops['aspect'] = ops['diameter'][0] / ops['diameter'][1]
    ops, U,sdmov, u   = getSVDdata(mov=mov, ops=ops) # get SVD components
    S, StU , StS = getStU(ops, U)
    Lyc, Lxc,nsvd = U.shape
    ops['Lyc'] = Lyc
    ops['Lxc'] = Lxc
    d0 = ops['diameter']
    sig = np.ceil(d0 / 4) # smoothing constant
    # make array of radii values of size (2*d0+1,2*d0+1)
    rs,dy,dx     = circleMask(d0)
    nsvd = U.shape[-1]
    nbasis = S.shape[-1]
    codes = np.zeros((0, nsvd), np.float32)
    LtU = np.zeros((0, nsvd), np.float32)
    LtS = np.zeros((0, nbasis), np.float32)
    L   = np.zeros((Lyc, Lxc, 0), np.float32)
    # regress maps onto basis functions and subtract neuropil contribution
    neu   = np.linalg.solve(StS, StU).astype('float32')
    Ucell = U - (S.reshape((-1,nbasis))@neu).reshape(U.shape)

    it = 0
    ncells = 0
    refine = -1

    # initialize
    ypix,xpix,lam = [], [], []

    while 1:
        if refine<0:
            V, us = getVmap(Ucell, sig)
            if it==0:
                vrem   = morphOpen(V, rs<=1.)
            V      = V - vrem # make V more uniform
            if it==0:
                V = V.astype('float64')
                # find indices of all maxima in +/- 1 range
                maxV   = filters.maximum_filter(V, footprint= np.ones((3,3)), mode='reflect')
                imax   = V > (maxV - 1e-10)
                peaks  = V[imax]
                # use the median of these peaks to decide if ROI is accepted
                thres  = ops['threshold_scaling'] * np.median(peaks[peaks>1e-4])
                ops['Vcorr'] = V
            V = np.minimum(V, ops['Vcorr'])

            # add extra ROIs here
            n = ncells
            while n<ncells+200:
                ind = np.argmax(V)
                i,j = np.unravel_index(ind, V.shape)
                if V[i,j] < thres:
                    break;
                yp, xp, la, ix, code = iter_extend(i, j, Ucell, us[i,j,:], change_codes=change_codes)
                codes = np.append(codes, np.expand_dims(code,axis=0), axis=0)
                ypix.append(yp)
                xpix.append(xp)
                lam.append(la)
                Ucell[ypix[n], xpix[n], :] -= np.outer(lam[n], codes[n,:])

                yp, xp = extendROI(yp,xp,Lyc,Lxc, int(np.mean(d0)))
                V[yp, xp] = 0
                n += 1
            newcells = len(ypix) - ncells
            if it==0:
                Nfirst = newcells
            L   = np.append(L, np.zeros((Lyc, Lxc, newcells), 'float32'), axis =-1)
            LtU = np.append(LtU, np.zeros((newcells, nsvd), 'float32'), axis = 0)
            LtS = np.append(LtS, np.zeros((newcells, nbasis), 'float32'), axis = 0)
            for n in range(ncells, len(ypix)):
                L[ypix[n],xpix[n], n] = lam[n]
                LtU[n,:] = lam[n] @ U[ypix[n],xpix[n],:]
                LtS[n,:] = lam[n] @ S[ypix[n],xpix[n], :]
            ncells +=newcells

            # regression with neuropil
            LtL = L.reshape((-1, ncells)).transpose() @ L.reshape((-1, ncells))
            cellcode = np.concatenate((LtL,LtS), axis=1)
            neucode  = np.concatenate((LtS.transpose(),StS), axis=1)
            codes = np.concatenate((cellcode, neucode), axis=0)
            Ucode = np.concatenate((LtU, StU),axis=0)
            codes = np.linalg.solve(codes + 1e-3*np.eye((codes.shape[0])), Ucode).astype('float32')
            neu   = codes[ncells:,:]
            codes = codes[:ncells,:]

        Ucell = U - (S.reshape((-1,nbasis))@neu + L.reshape((-1,ncells))@codes).reshape(U.shape)
        # reestimate masks
        n,k = 0,0
        while n < len(ypix):
            Ucell[ypix[n], xpix[n], :] += np.outer(lam[n], codes[k,:])
            ypix[n], xpix[n], lam[n], ix, codes[n,:] = iter_extend(ypix[n], xpix[n], Ucell,
                codes[k,:], refine, change_codes=change_codes)
            k+=1
            if ix.sum()==0:
                print('dropped ROI with no pixels')
                del ypix[n], xpix[n], lam[n]
                continue;
            Ucell[ypix[n], xpix[n], :] -= np.outer(lam[n], codes[n,:])
            n+=1
        codes = codes[:n, :]
        ncells = len(ypix)
        L   = np.zeros((Lyc,Lxc, ncells), 'float32')
        LtU = np.zeros((ncells, nsvd),   'float32')
        LtS = np.zeros((ncells, nbasis), 'float32')
        for n in range(ncells):
            L[ypix[n],xpix[n],n] = lam[n]
            if refine<0:
                LtU[n,:] = lam[n] @ U[ypix[n],xpix[n],:]
                LtS[n,:] = lam[n] @ S[ypix[n],xpix[n],:]
        err = (Ucell**2).mean()
        print('ROIs: %d, cost: %2.4f, time: %2.4f'%(ncells, err, time.time()-i0))

        it += 1
        if refine ==0:
            break
        if refine==2:
            # good place to get connected regions
            stat = [{'ypix':ypix[n], 'lam':lam[n], 'xpix':xpix[n]} for n in range(ncells)]
            stat = connected_region(stat, ops)
            # good place to remove ROIs that overlap, change ncells, codes, ypix, xpix, lam, L
            #stat, ix = remove_overlaps(stat, ops, Lyc, Lxc)
            #print('removed %d overlapping ROIs'%(len(ypix)-len(ix)))
            ypix = [stat[n]['ypix'] for n in range(len(stat))]
            xpix = [stat[n]['xpix'] for n in range(len(stat))]
            lam = [stat[n]['lam'] for n in range(len(stat))]
            #L = L[:,:,ix]
            #codes = codes[ix, :]
            ncells = len(ypix)
        if refine>0:
            Ucell = Ucell + (S.reshape((-1,nbasis))@neu).reshape(U.shape)
        if refine<0 and (newcells<Nfirst/10 or it==ops['max_iterations']):
            refine = 3
            U, sdmov = getSVDproj(mov, ops, u)
            Ucell = U
        if refine>=0:
            StU = S.reshape((Lyc*Lxc,-1)).transpose() @ Ucell.reshape((Lyc*Lxc,-1))
            #StU = np.reshape(S, (Lyc*Lxc,-1)).transpose() @ np.reshape(Ucell, (Lyc*Lxc, -1))
            neu = np.linalg.solve(StS, StU).astype('float32')
        refine -= 1
    Ucell = U - (S.reshape((-1,nbasis))@neu).reshape(U.shape)

    sdmov = np.reshape(sdmov, (Lyc, Lxc))
    ops['sdmov'] = sdmov
    stat  = [{'ypix':ypix[n], 'lam':lam[n]*sdmov[ypix[n], xpix[n]], 'xpix':xpix[n]} for n in range(ncells)]

    stat = postprocess(ops, stat, Ucell, codes)
    return ops, stat
Example #60
0
    def createDatabase(database,
                       threshold=20,
                       data_path='../Data/train/',
                       index_path='../Data/index/INDEX_refined_data.2016',
                       ligand_end='_ligand.sdf',
                       alt_ligand_end='_ligand.pdb',
                       prot_end='_pocket.pdb',
                       mode=None,
                       label_type=None,
                       classes=None,
                       n_classes=None,
                       oversample=False,
                       sample_factor=50,
                       pbc=(1, 1, 1)):

        ligandPaths = PreprocessingSchnet.getAllMolPaths(data_path, ligand_end)
        ligandPaths2 = PreprocessingSchnet.getAllMolPaths(
            data_path, alt_ligand_end)
        proteinPaths = PreprocessingSchnet.getAllMolPaths(data_path, prot_end)
        labels = PreprocessingSchnet.getLabels(data_path, index_path)

        indexes = np.arange(len(proteinPaths[0]))
        np.random.shuffle(indexes)

        # For oversampling, the data is split into 25 equally parts. Proteins with labels,
        # which are low represented are taken more ofter, to simulate a equal distributed label distribution
        hist = np.histogram(labels, 25)

        for i in indexes:
            atom_list = []
            # Reads the ligand and creates an list of ligand-atoms.
            # Sometimes the sdf is not working so it uses the alternative pdb.
            try:
                atoms2 = read(ligandPaths[0][i], format='sdf')
                for at in atoms2:
                    atom_list.append(at)
                x = atoms2.positions[:, 0].mean()
                y = atoms2.positions[:, 1].mean()
                z = atoms2.positions[:, 2].mean()
            except:
                try:
                    atoms3 = read(ligandPaths2[0][i], format='proteindatabank')
                    x = atoms3.positions[:, 0].mean()
                    y = atoms3.positions[:, 1].mean()
                    z = atoms3.positions[:, 2].mean()
                    for at in atoms3:
                        atom_list.append(at)
                except:
                    print('Does not work')
                    continue

            # Generate the labels in the necessary format for schnetpack
            affi = PreprocessingSchnet.classLabel(labels[i],
                                                  mode,
                                                  label_type,
                                                  classes=classes,
                                                  n_classes=n_classes,
                                                  min_v=np.min(labels),
                                                  max_v=np.max(labels))

            mean = np.array([x, y, z])

            atoms = read(proteinPaths[0][i], format='proteindatabank')

            # Create protein-atom list
            for at in atoms:
                dist = np.linalg.norm(at.position - mean)
                if dist <= threshold:
                    atom_list.append(at)

            # concat the protein and ligand atoms
            complexe = [ase.Atoms(atom_list, pbc=pbc)]

            if not oversample:
                # Add the complex to the database
                database.add_systems(complexe, affi)
            else:
                # Calculate, how often a protein have to be added to a database,
                # to simulate a equal distributed label distribution
                classn = np.zeros(25)
                for j in range(len(hist[1]) - 1):
                    if j == len(hist[1]) - 2:
                        if hist[1][j] <= labels[i] <= hist[1][j + 1]:
                            classn[j] = 1
                    else:
                        if hist[1][j] <= labels[i] < hist[1][j + 1]:
                            classn[j] = 1

                if np.unique(classn, return_counts=True)[1][1] != 1:
                    print('warning -> Onehot is more than one')
                    print(classn)

                ind = np.argmax(classn)
                if hist[0][ind] == 0:
                    print('Warning -> zero-sample')
                    continue

                n_sampling = int(
                    np.ceil((1 / hist[0][ind]) * sample_factor * 25))
                print(i, len(indexes), n_sampling, ind)
                for _ in range(n_sampling):
                    database.add_systems(complexe, affi)