def linestyle(i,a=5,b=3):
    ''' 
    provide one out of 25 unique combinations of style, color and mark

    use in combination with markevery=a+mod(i,b) to add spaced points,
    here a would be the base spacing that would depend on the data
    density, modulated with the number of lines to be plotted (b)

    Parameters
    ----------
    i : integer
        Number of linestyle combination - there are many....
    a : integer
        Spacing of marks.  The default is 5.
    b : integer
        Modulation in case of plotting many nearby lines.  The default
        is 3.

    Examples
    --------
    
    >>> plot(x,sin(x),linestyle(7)[0], markevery=linestyle(7)[1])


    (c) 2014 FH
    '''
    import scipy as sc

    lines=['-','--','-.',':']
    points=['v','^','<','>','1','2','3','4','s','p','*','h','H','+','x','D','d','o']
    colors=['b','g','r','c','m','k']
    ls_string = colors[sc.mod(i,6)]+lines[sc.mod(i,4)]+points[sc.mod(i,18)]
    mark_i    = a+sc.mod(i,b)
    return ls_string,mark_i
Esempio n. 2
0
    def __init__(self, dim, obs, tot, Zeta=None, E=None):
        # - dim (2d tuple): dimensionality of each view
        # - obs (ndarray): observed data
        # - E (ndarray): initial expected value of pseudodata
        PseudoY_Seeger.__init__(self, dim=dim, obs=None, params=params, E=E)

        # Initialise the observed data
        assert s.all(s.mod(obs, 1) == 0) and s.all(s.mod(tot, 1) == 0), "Data must not contain float numbers, only integers"
        assert s.all(obs >= 0) and s.all(tot >= 0), "Data must not contain negative numbers"
        assert s.all(obs <= tot), "Observed counts have to be equal or smaller than the total counts"
        self.obs = obs
        self.tot = tot
Esempio n. 3
0
def localProjection(lon, lat, radius, lon_0, lat_0, inverse=False):
    """ This function was written to use instead of Basemap which is very slow"""
    if inverse:
        x, y = lon, lat
        lat = np.fmin(np.fmax(lat_0 + y / radius, -sp.pi / 2), sp.pi / 2)
        lon = sp.mod(lon_0 + x /
                     (radius * sp.cos(lat_0)) + sp.pi, 2 * sp.pi) - sp.pi
        return (lon, lat)
    else:
        y = (lat - lat_0) * radius
        x = (sp.mod(lon - lon_0 + sp.pi, 2 * sp.pi) -
             sp.pi) * radius * sp.cos(lat_0)
    return (x, y)
Esempio n. 4
0
def transfermisigns(Lx, Ly, shift, q):
    kx, ky = fermisea(Lx, Ly, shift)
    kqx = sc.mod(kx * Lx - shift[0] - q[0] * Lx, Lx) / Lx + shift[0] / Lx
    kqy = sc.mod(ky * Ly - shift[1] - q[1] * Ly, Ly) / Ly + shift[1] / Ly
    kqx, kqy = mbzmod(kqx, kqy)
    fsign = sc.zeros(len(kx))
    gsup = sc.zeros(2 * len(kx))
    gsup[0::2] = 1
    gsdo = sc.zeros(2 * len(kx))
    gsdo[0::2] = 1
    for k in range(len(kx)):
        ma = abs(kqx[k] - kx) + abs(kqy[k] - ky)
        ma = (ma == sc.amin(ma))
        idx = sc.arange(0, len(ma), 1)[ma]
        fsign[k] = (-1)**sum(gsdo[0:2 * idx]) * (-1)**sum(gsup[0:(2 * k + 1)])
    return fsign
    def get_usgs_n(self):
        if self.get_usgsrc() == 0:
            return
        self.get_values(
        )  # Fetch usgsq,usgsh,handq,handh,handarea,handrad,handslope, handstage

        # Find indices for integer stageheight values in usgsh, and apply to usgsq
        usgsidx = scipy.where(scipy.equal(scipy.mod(
            self.usgsh, 1), 0))  # Find indices of integer values in usgsh
        usgsh = self.usgsh[usgsidx]
        usgsq = self.usgsq[usgsidx]

        # Find indices where usgsh[usgsidx] occur in handstage, and apply to handarea and handrad
        handidx = scipy.where(scipy.in1d(self.handstage, usgsh))
        area = self.handarea[handidx]
        hydrad = self.handrad[handidx]

        # Remove usgsq values for duplicate usgsh heights (keep first instance only)
        if usgsh.shape != area.shape:
            for i in range(usgsh.shape[0]):
                if i == 0: pass
                elif usgsh[i] == usgsh[i - 1]:
                    usgsq = scipy.delete(usgsq, i)

        # Calculate average manning's n after converting discharge units
        disch = usgsq  #*0.0283168 # Convert cfs to cms
        self.usgsroughness_array = self.mannings_n(area=area,
                                                   hydrad=hydrad,
                                                   slope=self.handslope,
                                                   disch=disch)
        self.usgsroughness = scipy.average(self.usgsroughness_array)
        print 'Average roughness: {0:.2f}'.format(self.usgsroughness)
Esempio n. 6
0
def fitsurface(errfunc,paramlists,inputs):
    """This function will create a fit surface using an error function given by the user
    and an N length list of parameter value lists. The output will be a N-dimensional array
    where each dimension is the size of the array given for each of the parameters. Arrays of
    one element are not represented in the returned fit surface array.
    Inputs:
        errfunc - The function used to determine the error between the given data and
        the theoretical function
        paramlists - An N length list of arrays for each of the parameters.
        inputs - A tuple of the rest of the inputs for error function."""
    paramsizlist = sp.array([len(i) for i in paramlists])
    outsize = sp.where(paramsizlist!=1)[0]
    #  make the fit surface and flatten it
    fit_surface = sp.zeros(paramsizlist[outsize])
    fit_surface = fit_surface.flatten()

    for inum in range(sp.prod(paramsizlist)):
        numcopy = inum
        curnum = sp.zeros_like(paramsizlist)
        # TODO: Replace with sp.unravel_index
        # determine current parameters
        for i, iparam in enumerate(reversed(paramsizlist)):
            curnum[i] = sp.mod(numcopy,iparam)
            numcopy = sp.floor(numcopy/iparam)
        curnum = curnum[::-1]
        cur_x = sp.array([ip[curnum[num_p]] for num_p ,ip in enumerate(paramlists)])
        diffthing = errfunc(cur_x,*inputs)
        fit_surface[inum]=(sp.absolute(diffthing)**2).sum()
        # return the fitsurace after its been de flattened
    return fit_surface.reshape(paramsizlist[outsize]).copy()
Esempio n. 7
0
def get_sawtooth_map():
    gridmap = tklib_log_gridmap(100, 30, 0.1)

    x_size = gridmap.get_map_width()
    y_size = gridmap.get_map_height()

    #fill first with nothing
    for i in range(x_size):
        for j in range(y_size):
            gridmap.set_value(i, j, -10000.0)

    #fill in the left/right wall
    for i in range(x_size):
        gridmap.set_value(i, 0, 10000.0)
        gridmap.set_value(i, y_size - 1, 10000.0)

    #fill in the top/bottom wall
    for i in range(y_size):
        gridmap.set_value(0, i, 10000.0)
        gridmap.set_value(x_size - 1, i, 10000.0)

    print "x_size", x_size, x_size * 0.1
    print "y_size", y_size, y_size * 0.1
    for i in range(x_size):
        for j in range(y_size):
            if (mod(j, 5) == 0 and abs(i - 150) > 30):
                gridmap.set_value(i, j, 10000.0)

    return gridmap
Esempio n. 8
0
def forward_prop(X):
  def forward(X, theta):
    m = X.shape[0]
    X = sp.hstack((sp.ones((m, 1)), X))
    return sigmoid(X * theta.T)

  return lambda *thetas: sp.mod(sp.argmax(reduce(forward, thetas, X), axis=1)+1, 10)
Esempio n. 9
0
    def update_rho(self, k, r, s):
        """Automatic rho adjustment."""

        if self.opt['AutoRho', 'Enabled']:
            tau = self.rho_tau
            mu = self.rho_mu
            xi = self.rho_xi
            if k != 0 and scipy.mod(k + 1, self.opt['AutoRho', 'Period']) == 0:
                if self.opt['AutoRho', 'AutoScaling']:
                    if s == 0.0 or r == 0.0:
                        rhomlt = tau
                    else:
                        rhomlt = scipy.sqrt(r / (s * xi) if r > s *
                                            xi else (s * xi) / r)
                        if rhomlt > tau:
                            rhomlt = tau
                else:
                    rhomlt = tau
                rsf = 1.0
                if r > xi * mu * s:
                    rsf = rhomlt
                elif s > (mu / xi) * r:
                    rsf = 1.0 / rhomlt
                self.rho = self.dtype.type(rsf * self.rho)
                self.U /= rsf
                if rsf != 1.0:
                    self.rhochange()
    def preprocessing(self,
                      signal,
                      time=None,
                      samplingRate=None,
                      channel=None):

        # Defining EEG filters
        order = int(min(3003, len(signal) - 3) / 3)
        bandPassFilter = Filter(samplingRate)
        bandPassFilter.create(low_crit_freq=self.lowFreq,
                              high_crit_freq=self.highFreq,
                              order=order,
                              btype="bandpass",
                              ftype="FIR",
                              useFiltFilt=True)

        # filtering can take a lot of memory. By making sure that the
        # garbage collector as passed just before the filtering, we
        # increase our chances to avoid a MemoryError
        gc.collect()
        signal = bandPassFilter.applyFilter(signal)

        ################################# RMS COMPUTATION #####################
        windowNbSample = int(round(self.averagingWindowSize * samplingRate))
        if mod(windowNbSample, 2) == 0:  # We need an odd number.
            windowNbSample += 1

        return self.averaging(np.abs(signal), windowNbSample)
Esempio n. 11
0
    def compute_emperical_feature_counts(self):
        self.fc_obs = zeros(self.get_num_features_obs())
        self.fc_trans = zeros(self.get_num_features_trans())
        #self.fc_obs_all = zeros([len(self.D.observations), self.get_num_features_obs()])
        #self.fc_trans_all = zeros([len(self.D.observations), self.get_num_features_trans()])

        for k, d in enumerate(self.D.observations):
            if(mod(k, 10000) == 0):
                print k, 'of', len(self.D.observations)
                
            for i in range(len(d.observations)):
                #print "starting"
                for a in self.D.get_output_alphabet():
                    if(d.features_obs.has_key(a) and d.features_obs[a][i] == None):
                        d.features_obs[a][i] = self.f_obs(a, d.observations[i])
                        #print "caching:", a, i, self.f_obs(a, d.observations[i]), d.observations[i]

                    #cache the transition probabilities as well
                    for b in self.D.get_output_alphabet():
                        if(d.features_trans.has_key(a) 
                           and d.features_trans[a].has_key(b) 
                           and d.features_trans[a][b][i] == None):
                            d.features_trans[a][b][i] = self.f_trans(a, b, d.observations[i])


                features_obs = d.features_obs[d.labels[i]][i]
                
                self.fc_obs += features_obs
                #self.fc_obs_all[k] += features_obs
                
                if(i != 0):
                    features_trans = self.f_trans(d.labels[i-1], d.labels[i], d.observations[i])
                    self.fc_trans += features_trans
Esempio n. 12
0
def DoQuestion3():
    fig = mp.figure()
    ax = mp.subplot(111)
    elpsilon = 0.05
    d_vc = 10
    x = []
    y = []
    loop = 0
    var = 1000
    for N in range(10000000):
        if scipy.mod(N, 100) == 5:
            var = var * 3
            loop += 1
            value = 4 * ((2 * N)**d_vc) * math.exp(-(elpsilon**2) * N / 8)
            if (1 == loop):
                lastValue = value
            x.append(N)
            y.append(value)

            print(value - 0.05, lastValue - 0.05)
            if scipy.sign(value - 0.05) != scipy.sign(lastValue - 0.05):
                print(scipy.sign(value - 0.05), scipy.sign(lastValue - 0.05))
                break
            lastValue = value

    z1 = np.array(x)
    z2 = np.array(y)
    ax.plot(z1[:], z2[:], '*', label='$y = Value$')
    #top = 10**10
    #ax.set_ylim(0,top)

    mp.title('Visualization of Dataset')
    ax.legend(loc='upper left', fontsize='small')
    fig.show()
Esempio n. 13
0
def initial_bearing(lon1, lat1, lon2, lat2):
    '''Initial bearing when traversing from point1 (lon1, lat1)
        to point2 (lon2, lat2)

        See http://www.movable-type.co.uk/scripts/latlong.html

        Parameters
        ----------
        lon1, lat1 : float
            longitude and latitude of start point
        lon2, lat2 : float
            longitude and latitude of end point

        Returns
        -------
        initial_bearing : float
            The initial bearing (azimuth direction) when heading out
            from the start point towards the end point along a great circle.

        '''
    rlon1 = np.radians(lon1)
    rlat1 = np.radians(lat1)
    rlon2 = np.radians(lon2)
    rlat2 = np.radians(lat2)
    bearing = np.arctan2(
        np.sin(rlon2 - rlon1) * np.cos(rlat2),
        np.cos(rlat1) * np.sin(rlat2) -
        np.sin(rlat1) * np.cos(rlat2) * np.cos(rlon2 - rlon1))
    return mod(np.degrees(bearing) + 360, 360)
Esempio n. 14
0
    def compute_log_gradient(self, theta):
        self.W_obs = theta[0:len(self.W_obs)]
        self.W_trans = theta[len(self.W_obs):]

        f_obs_exp = zeros(len(self.W_obs))
        f_trans_exp = zeros(len(self.W_trans))

        for i, d in enumerate(self.D.observations):
            if (mod(i + 1, 200) == 0):
                print i, "grad. of", len(self.D.observations)

            f_obs_e, f_trans_e = self.compute_expected_feature_count(d)
            f_obs_exp += f_obs_e
            f_trans_exp += f_trans_e

        obs_grad = self.fc_obs - f_obs_exp
        trans_grad = self.fc_trans - f_trans_exp

        ret_val = []
        ret_val.extend(obs_grad)
        ret_val.extend(trans_grad)
        ret_val = array(ret_val)

        #because we're minimizing the negative log likelihood
        ret_val *= -1.0
        ret_val += theta / (self.sigma**2.0)
        print "max", amax(ret_val)

        i = argmax(obs_grad)
        print "diff:", obs_grad[i], " features:", self.fc_obs[
            i], " expectation:", f_obs_exp[i]

        return ret_val
    def preprocessing(self,
                      signal,
                      time=None,
                      samplingRate=None,
                      channel=None):

        # Defining EEG filters
        order = int(min(3003, len(signal) - 3) / 3)
        bandPassFilter = Filter(samplingRate)
        bandPassFilter.create(low_crit_freq=self.lowFreq,
                              high_crit_freq=self.highFreq,
                              order=order,
                              btype="bandpass",
                              ftype="FIR",
                              useFiltFilt=True)

        # filtering can take a lot of memory. By making sure that the
        # garbage collector as passed just before the filtering, we
        # increase our chances to avoid a MemoryError
        gc.collect()
        signal = bandPassFilter.applyFilter(signal)

        ################################# RMS COMPUTATION #####################
        windowNbSample = int(round(self.averagingWindowSize * samplingRate))
        if mod(windowNbSample, 2) == 0:  # We need an odd number.
            windowNbSample += 1

        # For selecting samples using a quantile-based thresholds, using abs(X)
        # or X**2 to rectify the X signal will give exactly the same result
        # since X**2 eqauls abs(X)**2 (for real numbers) and  the transformation
        # from abs(X) to abs(X)**2 is monotonically increasing, meaning that
        # rank based statistics (such as quatiles) will give exactly the same
        # result. We use the numpy implementation of abs() because it is the
        # faster alternative.
        return np.sqrt(self.averaging(signal**2.0, windowNbSample))
Esempio n. 16
0
    def bin(self, n=None):
        """
        Bin a square array by grouping nxn pixels.
        Array size must be a multiple of n.

        """
        if n is None:
            n = CXP.preprocessing.bin
            # Now the detector pixel size has changed so we should update that
            CXP.experiment.dx_d *= n
            CXP.log.info(
                'After binning new detector pixel size: {2.2e}'.format(
                    CXP.experiment.dx_d))

        nx, ny = self.data[0].shape[0], self.data[0].shape[1]
        if not nx == ny:
            raise Exception('Array to be binned must be square')

        if not sp.mod(nx, n) == 0.:
            raise Exception('Array size must be a multiple of binning factor')

        if n > nx:
            raise Exception('Binning factor must be smaller than array size')

        nn = nx / n
        l = []
        for i in xrange(len(self.data)):
            tmp = sp.zeros((nn, nn))
            for p in xrange(nn):
                for q in xrange(nn):
                    tmp[p, q] = sp.sum(self.data[i][p * n:(p + 1) * n,
                                                    q * n:(q + 1) * n])
            l.append(tmp)

        self.data = l
    def preprocessing(self,
                      signal,
                      time=None,
                      samplingRate=None,
                      channel=None):

        #fileName = self.reader.getFileName() + "_RSP_" + channel + "_" + str(time[0])  + ".mat"

        N = len(signal)
        """
        if os.path.exists(fileName):
            print "Using saved RSP..."    
            self.RSP = loadmat(fileName)["RSP"]
            self.RSP = self.RSP.reshape(self.RSP.size)            
            assert(len(signal) == len(self.RSP))
        else: 
        """
        self.RSP = zeros(N)

        nbPad = int(0.1 * samplingRate)
        nbWin = int(4.0 * samplingRate)

        nbIter = int(N / nbWin)
        for i in range(nbIter):
            if mod(i, 1000) == 0: print((i, nbIter))

            if i == 0:  # First iteration
                indexes = arange(nbPad + nbWin)
            elif i == nbIter - 1:  # Last iteration
                indexes = arange(i * nbWin - nbPad, N)
            else:  # Other iterations
                indexes = arange(i * nbWin - nbPad, i * nbWin + nbWin + nbPad)

            #if any(stageIndicator[indexes]):
            X, fX = computeST(signal[indexes],
                              samplingRate,
                              fmin=0.5,
                              fmax=40.0)

            if i == 0:  # First iteration
                indexesNoPad = arange(nbWin)
            elif i == nbIter - 1:  # Last iteration
                indexesNoPad = arange(nbPad, nbPad + N - i * nbWin)
            else:  # Other iterations
                indexesNoPad = arange(nbPad, nbPad + nbWin)

            X = abs(X[indexesNoPad])
            indexes = indexes[indexesNoPad]

            spindleBand = (fX >= self.lowFreq) * (fX <= self.highFreq)
            self.RSP[indexes] = trapz(X[:, spindleBand],
                                      fX[spindleBand],
                                      axis=1) / trapz(X, fX, axis=1)
            #else:
            #    self.RSP[indexes] = 0.0

        #    assert(len(signal) == len(self.RSP))
        #    savemat(fileName, {"RSP":self.RSP})
        return self.RSP
Esempio n. 18
0
 def _make_strel(self, r):
     D = 2 * sp.ceil(r)
     if sp.mod(D, 2) == 0:
         D += 1
     strel = sp.ones((D, D, D))
     strel[D / 2, D / 2, D / 2] = 0
     strel = spim.distance_transform_bf(strel) <= r
     return strel
Esempio n. 19
0
    def _get_coefmat(self, rpfldin, vlabel):
        fmax = np.amax(np.absolute(rpfldin))
        rpfld = rpfldin / fmax
        coefmat = []
        nr = self.nrad
        nph = self.nphi
        n = self.orad
        m = self.ophi

        X = np.linspace(1.0 / float(nr), 1.0, nr)
        Y = np.zeros(nr)

        for i in xrange(nr):
            for j in xrange(nph):
                Y[i] += rpfld[i, j] / float(nph)

        P = np.polyfit(X, Y, n)
        Padj = np.zeros(n + 1)
        for i in range(n + 1):
            Padj[i] = P[n - i]

        coefmat.append(Padj.copy())

        for mi in xrange(m):
            Y = np.zeros(nr)
            if np.mod(mi + 1, 2) == np.mod(vlabel - 1, 2):
                for i in xrange(nr):
                    for j in xrange(nph):
                        Y[i] += np.cos(
                            2.0 * np.pi * (mi + 1) * (j + 1) /
                            float(nph)) * rpfld[i, j] * 2.0 / float(nph)
            elif np.mod(mi + 1, 2) == np.mod(vlabel, 2):
                for i in xrange(nr):
                    for j in xrange(nph):
                        Y[i] += np.sin(
                            2.0 * np.pi * (mi + 1) * (j + 1) /
                            float(nph)) * rpfld[i, j] * 2.0 / float(nph)

            P = np.polyfit(X, Y, n)
            for i in xrange(n + 1):
                Padj[i] = P[n - i]
            coefmat.append(Padj.copy())

        coefmat = np.transpose(np.array(coefmat))

        return fmax, coefmat
Esempio n. 20
0
def create_split(all_images, conf):
    temp = mod(arange(len(all_images)), conf.imagesperclass) < conf.numTrain
    selTrain = where(temp == True)[0]
    selTest = where(temp == False)[0]
    # the '[0]' is there, because 'where' returns tuples, don't know why....
    # the use of the 'temp' variable is not pythonic, but we need the indices
    # not a boolean array. See Matlab code
    return selTrain, selTest
Esempio n. 21
0
def watts_hex(Ny, Nx, pr=0.5):
    Gs = nx.Graph()
    Gs.add_nodes_from(range(0, Nx * Ny), state=1.0)
    for iy in range(0, Ny):
        for ix in range(0, Nx):
            ni = iy * Nx + ix
            nj1 = iy * Nx + sp.mod(ix + 1, Nx)
            nj2 = sp.mod(iy + 1, Ny) * Nx + ix
            nj3 = sp.mod(iy + 1, Ny) * Nx + sp.mod(ix + 1, Nx)
            Gs.add_edges_from(((ni, nj1), (ni, nj2), (ni, nj3)), weight=1.0)

    nx.double_edge_swap(Gs, nswap=int(pr * Nx * Ny * 3), max_tries=10000)

    ##remove self-edges
    Gs.remove_edges_from(Gs.selfloop_edges())

    return Gs
Esempio n. 22
0
    def __init__(self, dim, obs, params=None, E=None):
        # - dim (2d tuple): dimensionality of each view
        # - obs (ndarray): observed data
        # - E (ndarray): initial expected value of pseudodata
        PseudoY_Seeger.__init__(self, dim=dim, obs=obs, params=params, E=E)

        # Initialise the observed data
        assert s.all(s.mod(self.obs, 1) == 0), "Data must not contain float numbers, only integers"
        assert s.all(self.obs >= 0), "Data must not contain negative numbers"
Esempio n. 23
0
def phiktrans(kx, ky, qx, qy, p, r=sc.zeros((1, 2))):
    """
    Returns phi[k,r] such that |q,r>=sum_k phi[k,r]|q,k>
    """
    kqx = kx - qx
    kqy = ky - qy
    pk = sc.zeros((sc.shape(kx)[0], sc.shape(r)[0]), complex)
    pke=sc.conj(uk(kx,ky,1,1,p))*uk(kqx,kqy,-1,-1,p)+\
        sc.conj(vk(kx,ky,1,1,p))*vk(kqx,kqy,-1,-1,p)
    pko=sc.conj(vk(kx,ky,1,1,p))*uk(kqx,kqy,-1,-1,p)+\
        sc.conj(uk(kx,ky,1,1,p))*vk(kqx,kqy,-1,-1,p)
    even = 1 - sc.mod(r[:, 0] + r[:, 1], 2)
    odd = sc.mod(r[:, 0] + r[:, 1], 2)
    ph = sc.exp(-2j * sc.pi * (sc.einsum('i,j->ij', kx, r[:, 0]) +
                               sc.einsum('i,j->ij', ky, r[:, 1])))
    pk = sc.einsum('ij,j,i->ij', ph, even, pke) + sc.einsum(
        'ij,j,i->ij', ph, odd, pko)
    return pk
Esempio n. 24
0
def nearnei_regular(N, nnei, dnei):
    G = nx.Graph()
    for n in range(0, N):
        G.add_node(n, state=1.0, xloc=n, yloc=0)

    for n in range(0, N):
        for j in range(1, nnei + 1):
            G.add_edge(n, sp.mod(n + dnei * j, N), weight=1.0)
    return G
Esempio n. 25
0
 def getCharge(self, t):
     conditions = sp.mod(
         t, self.tBunchSpacing
     ) < 2 * self.bunchLengthLimitSigma * self.tBunchLengthSigma
     if conditions:
         tRed = sp.mod(t, self.tBunchSpacing)
         temp = sps.norm.cdf(
             sp.clip(
                 (tRed + self.dt / 2.) / self.tBunchLengthSigma -
                 self.bunchLengthLimitSigma, -self.bunchLengthLimitSigma,
                 self.bunchLengthLimitSigma)) - sps.norm.cdf(
                     sp.clip(
                         (tRed - self.dt / 2.) / self.tBunchLengthSigma -
                         self.bunchLengthLimitSigma,
                         -self.bunchLengthLimitSigma,
                         self.bunchLengthLimitSigma))
         return temp * self.nParticles / self.dt / self.beamVelocity * self.charge * self.qTransversalProfile
     else:
         return 0
Esempio n. 26
0
    def set_pole(self, num=1, ells=[0, 2, 4, 6, 8, 10, 12]):

        nells = len(ells)
        if (scipy.mod(ells, 2) == 0).all():
            ells = scipy.arange(nells) * 2
            multitype = 'even'
        elif (scipy.mod(ells, 2) == 1).all():
            ells = 1 + scipy.arange(nells) * 2
            multitype = 'odd'
        else:
            ells = scipy.arange(nells)
            multitype = 'all'

        self.anacorr.set_pole.argtypes = (ctypes.c_size_t, ctypes.c_char_p,
                                          ctypes.c_size_t)
        self.anacorr.set_pole(num, multitype.encode('utf-8'), nells)
        self._ells[num] = ells.tolist()

        return list(self._ells[num])
Esempio n. 27
0
 def _do_one_outer_iteration(self):
     r"""
     One iteration of an outer iteration loop for an algorithm
     (e.g. time or parametric study)
     """
     if (sp.mod(self._counter,500)==False):
         self._logger.info("Outer Iteration (counter = "+str(self._counter)+")")
     self._do_inner_iteration_stage()
     self._condition_update()
     self._counter += 1
Esempio n. 28
0
    def __init__(self):

        self.landmarks = []

        #cache landmark context
        self.landmark_context = collections.defaultdict(lambda : set())
        for i in range(len(self.landmarks)):
            if(mod(i, 50) == 0):
                print i, "of", len(self.landmarks)
            self._get_landmark_context(i)
Esempio n. 29
0
    def gen_data(self, add_noise=False):
        cpi_start = 0
        time = 0
        prf = self.prf  # will become vector
        range_unam = const.c0 / prf / 2
        tcpi = 1 / prf
        rng_window = sp.array([0., range_unam])
        fast_time = rng_window // const.c0 * 2
        ft_axis = sp.arange(fast_time[0], fast_time[1], 1 / (2 * self.bandw))
        numbins = ft_axis.size
        rng_axis = sp.linspace(0, numbins, rng_window[1])
        p_data = sp.zeros((numbins, self.npri))
        self.data = sp.zeros((self.ncpi, numbins, self.npri))
        for i in range(1, self.ncpi):
            '''
            This will loop to create data for each CPI, the first section will
            create vectors for the radar attributes that aligns with the data 
            it is collected from
            '''

            for p in range(0, self.npri):
                time = (p) * tcpi
                # itarget in range(0,len(target_range)): # this when I get to multiple targets
                range_new = self.target_range + self.target_radvel
                # The '1' will be replaced with a power calculation in future
                tmp_array = 1 * self.pulse * sp.exp(
                    sp.sqrt(-1) * 2 * sp.pi *
                    (sp.arange(0, len(self.pulse)) / self.fs + time) * 2 *
                    self.target_radvel / (const.c0 / self.freq))
                '''
                Insert generated data into appropriate 'bins'
                Each i loop goes into it's own MxN matrix and is stacked in a 3rd dim
                making it (rng x dop x cpi). The tmp_array calculates the value of the target
                while the indexing below slots it into the correct dopper bins. The range binning is taken care of by the 
                the pri loop.
                '''
                index1 = round(
                    sp.mod(range_new,
                           rng_window[1] / (rng_window[1] * (numbins)) + 1))
                print(index1)
                index2 = index1 + min(len(self.ts - 1), numbins - index1)
                print(index2)
                index_size = sp.arange(index1, index2 + 1)
                print(index_size)
                rev_tmp = tmp_array[0:len(index_size)]
                print(rev_tmp)
                p_data[p, index_size] = p_data[p, index_size] + rev_tmp[::-1]
                # add noise here
                #if add_noise is True:
                #    p_data[p, :] = p_data[p, :]
            cpi_start = cpi_start + time + tcpi + 1 / self.fs
            # Save a map for RDMap for every CPI. Structured Array
            self.data[i - 1] = p_data
            return self.data
Esempio n. 30
0
 def _rebuild_fld(self, coefmat, emax, vlabel):
     nr = self.nrad
     nph = self.nphi
     remap = np.zeros((nr, nph))
     n, m = coefmat.shape
     R = np.zeros(n)
     Phi = np.zeros(m)
     for i in xrange(nr):
         for j in xrange(nph):
             for ni in xrange(n):
                 R[ni] = np.power(float(i + 1) / float(nr), ni)
             for mi in xrange(m):
                 if np.mod(mi, 2) == np.mod(vlabel - 1, 2):
                     Phi[mi] = np.cos(2.0 * np.pi * mi * (j + 1) /
                                      float(nph))
                 elif np.mod(mi, 2) == np.mod(vlabel, 2):
                     Phi[mi] = np.sin(2.0 * np.pi * mi * (j + 1) /
                                      float(nph))
             remap[i, j] = emax * R.dot(coefmat.dot(Phi))
     return remap