Beispiel #1
0
def get_likelihood(observables):
    """
    get the likelihood for a point.
    @param observables: Dictionary containing the observables for which the likelihood is to be computed. Observable values are keyed by observable name. If a theory uncertainty is given, it is taken to be the only uncertainty pertaining to the observable.
    """
    product_likelihood = 1

    for obs,obsval in observables.items():
        if "special_case" in obsval.keys():#in case you need a non-standard handling of the likelihood, add the flag "special_case" as one of the keys in the likelihood_contributions dictionary
            continue
        if obs not in likelihood_contributions:
            print(str(obs)+" has no experimental result in database")
            continue
        if "uncertainty" in obsval.keys(): #higgs-type case: center gauss on theory value with width= theory uncertainty, evaluate at experimental value
            product_likelihood *= utils.gauss(likelihood_contributions[obs]["value"],obsval["value"],0.5*obsval["uncertainty"])
        else:
            if type(likelihood_contributions[obs]["uncertainty"]) == float:#usual case, symmetric error
                product_likelihood *= utils.gauss(obsval["value"],likelihood_contributions[obs]["value"],likelihood_contributions[obs]["uncertainty"])
            elif type(likelihood_contributions[obs]["uncertainty"]) == list:#non-symmetric error, use two-sided gaussian
                product_likelihood *= utils.gauss_pm(obsval["value"],likelihood_contributions[obs]["value"],sigma_m = likelihood_contributions[obs]["uncertainty"][0],sigma_p = likelihood_contributions[obs]["uncertainty"][1])

                
    #handle special cases
    #superiso chi2
    chi2 = observables["chi2"]["value"]
    ndf = observables["chi2_ndf"]["value"]
    gamma = math.gamma(float(ndf)/2)
    coeff = pow(chi2,(float(ndf)/2)-1)/((pow(2,float(ndf)/2))*gamma)
    product_likelihood*= (coeff*math.exp(-chi2/2))
    return product_likelihood
Beispiel #2
0
def getisosweights_gauss(weights, age, metallicity, isos, sigma):
    """ same as getisoweights, but adds Gaussian scatter """
    w = [0.] * len(isos)
    for m, a, feh in zip(weights, age, metallicity):
        for x in utils.frange(feh - 4, feh + 4, 0.1):
            w[getn(x, a, isos)] += m * utils.gauss(x, feh, sigma)
    return w
Beispiel #3
0
def getisosweights_gauss(weights,age,metallicity,isos,sigma):
    """ same as getisoweights, but adds Gaussian scatter """ 
    w=[0.]*len(isos)
    for m,a,feh in zip(weights,age,metallicity):
        for x in utils.frange(feh-4,feh+4,0.1):
            w[getn(x,a,isos)]+=m*utils.gauss(x,feh,sigma)
    return w
Beispiel #4
0
def bolEstm(obj, sig, width):
    '''
    peakFinder.bolEstm(obj, sig, width)
    =============================
    Performs a SN estimation at each wavelength following the max-likelihood
    approach of Bolton et al. (2012).  

    Parameters:
        obj: The SDSS object/spectra on which applied the subtraction
        sig: Width of the gaussian kernel
        width: Width of the convolutional window
    Returns:
        SN: The SN at each wavelength as an array. Beginning and end are filled
        with null values due to the convolution.
    '''
    NormGauss = gauss(np.linspace(-width * 0.5, width * 0.5, width), 0.0, 1.0,
                      sig**2.0)
    NormGauss = NormGauss / np.sum(NormGauss)
    Cj1 = np.array([
        np.sum(
            kernel(j + 0.5 * width, width, NormGauss, len(obj.wave)) *
            obj.reduced_flux * obj.ivar)
        for j in range(int(len(obj.wave) - width))
    ])
    Cj2 = np.array([
        np.sum(obj.ivar *
               kernel(j + 0.5 * width, width, NormGauss, len(obj.wave))**2.0)
        for j in range(int(len(obj.wave) - width))
    ])
    SN = np.zeros(len(obj.wave))
    SN[int(width * 0.5):int(width * 0.5 + len(Cj1))] = Cj1 / np.sqrt(Cj2)
    return SN
Beispiel #5
0
    def _cost(theta, stims, psth_data, sres, tres, channels):
        tres = int(np.rint(theta[0]))
        theta = theta[1:].reshape(channels,7)

        for i in range(channels):
            z1, p1, p2, p3, l, mu, sig = theta[i]    
            # bounds on the uniform prior distributions 
            if not (   
                  5 < tres <= 150 and
                 -100 < z1  < 100 and   
                 -100 < p1  < p2 < p3 < 100 and
                 0 < l  < 50 and
                 0 < mu  < sres and
                 0 < sig < sres
                ): return -np.inf

        Sfilt = np.empty((channels,sres))
        Tfilt = np.empty((channels,tres))

        for i in range(channels):
            z1,p1,p2,p3,l, mu, sig = theta[i]
            Tfilt[i] = np.asarray(utils.overtime(0,tres,utils.P3Z1,z1,p1,p2,p3,l,1))
            Sfilt[i] = utils.gauss(np.arange(0,sres),mu,sig)

        nh = np.matmul(Sfilt.T,Tfilt)

        out = 0
        ntrials = np.size(psth_data)
        for i,stim in enumerate(stims):
            rate = utils.speccnov(nh,stim)
            out += np.sum((utils.normalize(psth_data[i]) - utils.normalize(rate))**2)/len(psth_data[i])
        return  -out
Beispiel #6
0
def bolEstm(obj, sig, width):
    '''
    peakFinder.bolEstm(obj, sig, width)
    =============================
    Performs a SN estimation at each wavelength following the max-likelihood
    approach of Bolton et al. (2012).  

    Parameters:
        obj: The SDSS object/spectra on which applied the subtraction
        sig: Width of the gaussian kernel
        width: Width of the convolutional window
    Returns:
        SN: The SN at each wavelength as an array. Beginning and end are filled
        with null values due to the convolution.
    '''
    NormGauss = gauss(np.linspace(-width * 0.5, width * 0.5, width), 0.0, 1.0,
                      sig ** 2.0)
    NormGauss = NormGauss / np.sum(NormGauss)
    Cj1 = np.array([np.sum(kernel(j + 0.5 * width, width, NormGauss,
                                  len(obj.wave)) * obj.reduced_flux * obj.ivar)
                    for j in range(int(len(obj.wave) - width))])
    Cj2 = np.array([np.sum(obj.ivar * kernel(j + 0.5 * width, width, NormGauss,
                                             len(obj.wave)) ** 2.0)
                    for j in range(int(len(obj.wave) - width))])
    SN = np.zeros(len(obj.wave))
    SN[int(width * 0.5): int(width * 0.5 + len(Cj1))] = Cj1 / np.sqrt(Cj2)
    return SN
Beispiel #7
0
    def update(self, Z: list):
        self.S = self.innov_cov(self.H, self.P, self.R)
        zpred = self.zpred()
        innovations = []
        betas_unorm = []
        for z in Z:
            if z.size != 2:
                raise Exception("z has wrong dimension", z)
            innovations.append(z - zpred)
            betas_unorm.append(self.P_D * gauss(z, zpred, self.S))
        betas = betas_unorm / np.sum(betas_unorm)

        # Reduce the mixture to a single innovation weighted by betas
        ny = np.zeros_like(zpred)
        for j, assoc_ny in enumerate(innovations):
            ny += betas[j] * assoc_ny
        W = self.P @ np.transpose(self.H) @ np.linalg.inv(self.S)

        self.x = self.x + W @ ny

        beta_boi = 0
        for j, assoc_ny in enumerate(innovations):
            beta_boi += betas[j] * assoc_ny @ assoc_ny.T
        sprd_innov = W @ (beta_boi - ny @ ny.T) @ W.T
        beta_0 = self.lam * (1 - self.P_D)

        self.P = self.P - (1 - beta_0) * W @ self.S @ W.T + sprd_innov
Beispiel #8
0
def sumisos_gauss_slow(mass,age,metallicity,fehlist,agelist,isos,sigma):
    """slower version"""
    summed_iso = 0.*isos[isos.keys()[0]]
    for m,a,z in zip(mass,age,metallicity):
        feh = numarray.log10(z)-numarray.log10(SOLAR)
        for x in utils.frange(feh-4,feh+4,0.1):
            summed_iso+= utils.gauss(x,feh,sigma)*m*getiso(x,a,fehlist,agelist,isos)
    return summed_iso
Beispiel #9
0
def sumisos_gauss_slow(mass, age, metallicity, fehlist, agelist, isos, sigma):
    """slower version"""
    summed_iso = 0. * isos[isos.keys()[0]]
    for m, a, z in zip(mass, age, metallicity):
        feh = numarray.log10(z) - numarray.log10(SOLAR)
        for x in utils.frange(feh - 4, feh + 4, 0.1):
            summed_iso += utils.gauss(x, feh, sigma) * m * getiso(
                x, a, fehlist, agelist, isos)
    return summed_iso
def calcEM(height):
    ##########初始化#################
    N = len(height)
    gp = 0.5  # girl probability
    bp = 0.5  # boy probability
    gmu, gsigma = min(height), 18  # 先验:直接取最大和最小值
    bmu, bsigma = max(height), 20
    # r(i,k):第i个样本由第k个组份生成的概率
    ggamma = [0.0 for i in range(N)]  # 第i个样本由女性生成的概率
    bgamma = [0.0 for i in range(N)]  # 第i个样本由男性生成的概率
    cur = [gp, bp, gmu, gsigma, bmu, bsigma]
    now = []
    times = 0
    ##########迭代更新###############
    while times < 100:
        i = 0
        for x in height:
            ggamma[i] = gp * gauss(x, gmu, gsigma)
            bgamma[i] = bp * gauss(x, bmu, bsigma)
            s = ggamma[i] + bgamma[i]
            ggamma[i] /= s
            bgamma[i] /= s
            i += 1

        gn = sum(ggamma)
        gp = float(gn) / float(N)
        bn = sum(bgamma)
        bp = float(bn) / float(N)
        gmu = averageWeight(height, ggamma, gn)
        gsigma = varianceWeight(height, ggamma, gmu, gn)
        bmu = averageWeight(height, bgamma, bn)
        bsigma = varianceWeight(height, bgamma, bmu, bn)

        now = [gp, bp, gmu, gsigma, bmu, bsigma]
        if isSame(cur, now):
            break
        cur = now
        print("Times:\t", times)
        print("Girl mean/gsigma:\t", gmu, gsigma)
        print("Boy mean/bsigma:\t", bmu, bsigma)
        print("Boy/Girl:\t", bn, gn, bn + gn)
        print()
        times += 1
    return now
def sensor_model(particle_poses, beacon_pose, beacon_loc):
    """ Apply sensor model and return particle weights. 
    Parameters
    ----------
    
    particle_poses: an M x 3 array of particle_poses (in the map
    coordinate system) where M is the number of particles.  Each pose
    is (x, y, theta) where x and y are in metres and theta is in
    radians.

    beacon_pose: the measured pose of the beacon (x, y, theta) in the
    robot's camera coordinate system.

    beacon_loc: the pose of the currently visible beacon (x, y, theta)
    in the map coordinate system.

    Returns
    -------
    An M element array of particle weights.  The weights do not need to be
    normalised.

    """

    M = particle_poses.shape[0]
    particle_weights = np.zeros(M)

    # Calculate beacon measured from camera frame.
    r = np.sqrt((beacon_pose[0]) ** 2 + (beacon_pose[1]) ** 2)
    phi = arctan2(beacon_pose[1], beacon_pose[0])

    for m in range(M):
        # Calculate the true location of beacon from particle pose m.
        r_m = np.sqrt((beacon_loc[0] - particle_poses[m][0]) ** 2 + (beacon_loc[1] - particle_poses[m][1]) ** 2) 
        phi_m = angle_difference(arctan2(beacon_loc[0] - particle_poses[m][0], beacon_loc[1] - particle_poses[m][1]), particle_poses[m][2])

        # Calculate error.
        r_val = r - r_m
        phi_val = angle_difference(phi, phi_m)

        # Update particle weights.
        particle_weights[m] = gauss(r_val, mu=0, sigma=(r_m ** 2)) * gauss(phi_val, mu=0, sigma=(r_m ** 2))


    return particle_weights
Beispiel #12
0
def getisosweights_vgauss(weights,age,metallicity,isos,sigma,dsigmadlogt):
    """ same as getisoweights, but adds age-dependent Gaussian scatter """ 
    w=[0.]*len(isos)
    logt0 = numarray.log10(age[0])
    for m,a,feh in zip(weights,age,metallicity):
        logt = numarray.log10(a)
        sig = sigma+dsigmadlogt*(logt-logt0)
        for x in utils.frange(feh-4,feh+4,0.1):
            w[getn(x,a,isos)]+=m*utils.gauss(x,feh,sig)
    return w
Beispiel #13
0
def getisosweights_vgauss(weights, age, metallicity, isos, sigma, dsigmadlogt):
    """ same as getisoweights, but adds age-dependent Gaussian scatter """
    w = [0.] * len(isos)
    logt0 = numarray.log10(age[0])
    for m, a, feh in zip(weights, age, metallicity):
        logt = numarray.log10(a)
        sig = sigma + dsigmadlogt * (logt - logt0)
        for x in utils.frange(feh - 4, feh + 4, 0.1):
            w[getn(x, a, isos)] += m * utils.gauss(x, feh, sig)
    return w
Beispiel #14
0
def qsoContfit(obj, peak, searchLyA, sig, window_width=40):
    '''
    peakFinder.qsoContfit(obj, peak, searchLyA, window_width)
    =============================
    A function to fit the QSO continuum near a peak with a 3rd order polynomial 
    and subtract it. Add the new SN of the peak in its class.

    Parameters:
        obj: The SDSS object/spectra on which applied the subtraction
        peak: The inquired peak
        sig: Sigma of gaussian to perform SN max-likelihood 
        searchLyA: True if we search for background LAE, False for backgroung ELGs
        window_width: Half-width (Angstroms) on which the polynomial is fitted
    Returns:
        accept: True if the SN is still high after subtraction. False otherwise.
    '''
    x0 = peak.wavelength
    window = np.linspace(obj.wave2bin(x0) - window_width,
                         obj.wave2bin(x0) + window_width,
                         2 * window_width + 1,
                         dtype=np.int16)
    median_local = np.median(obj.reduced_flux[window])

    fit_QSO = np.poly1d(np.polyfit(x=obj.wave[window],y=obj.reduced_flux[window],deg=3, \
        w=(np.abs(obj.reduced_flux[window]-median_local)<5)*np.sqrt(obj.ivar[window])) )

    new_flux = obj.reduced_flux[window] - fit_QSO(obj.wave[window])

    NormGauss = gauss(
        np.linspace(-window_width * 0.5, window_width * 0.5, window_width),
        0.0, 1.0, sig**2.0)
    NormGauss = NormGauss / np.sum(NormGauss)
    cj1_new = np.sum(
        new_flux *
        kernel(int(len(window) / 2), window_width, NormGauss, len(new_flux)) *
        obj.ivar[window])
    cj2_new = np.sum(
        obj.ivar[window] *
        kernel(int(len(window) / 2), window_width, NormGauss, len(window))**2)
    SN_fitted = cj1_new / np.sqrt(cj2_new)

    if searchLyA and SN_fitted < 6:
        return False  #Reject
    elif searchLyA and SN_fitted > 6:
        peak.reduced_sn = SN_fitted
        obj.reduced_flux_QSO[window] = new_flux
    elif searchLyA == False and SN_fitted < 6:
        return False  # Reject
    elif searchLyA == False and SN_fitted > 6:
        peak.reduced_sn = SN_fitted
        obj.reduced_flux_QSO[window] = new_flux
    return True  # Accept
Beispiel #15
0
def bolEstm(obj, sig, width):
    NormGauss = gauss(np.linspace(-width * 0.5, width * 0.5, width), 0.0, 1.0,
                      sig ** 2.0)
    NormGauss = NormGauss / np.sum(NormGauss)
    Cj1 = np.array([np.sum(kernel(j + 0.5 * width, width, NormGauss,
                                  len(obj.wave)) * obj.reduced_flux * obj.ivar)
                    for j in range(int(len(obj.wave) - width))])
    Cj2 = np.array([np.sum(obj.ivar * kernel(j + 0.5 * width, width, NormGauss,
                                             len(obj.wave)) ** 2.0)
                    for j in range(int(len(obj.wave) - width))])
    SN = np.zeros(len(obj.wave))
    SN[int(width * 0.5): int(width * 0.5 + len(Cj1))] = Cj1 / np.sqrt(Cj2)
    return SN
Beispiel #16
0
def getisosweights_sgauss(weights,age,sfr,metallicity,isos,sigma,dsigmadlogt):
    """ Same as getisoweights, but adds sfr-dependent Gaussian scatter.
        **** CAUTION: not yet tested ****
    """ 
    w=[0.]*len(isos)
    logt0 = numarray.log10(age[0])
    for m,a,feh,s in zip(weights,age,metallicity,sfr):
        ss = numarray.maximum(sfr,1.e-5)
        logs = numarray.log10(ss)
        logt = numarray.log10(a)
        sig = sigma+dsigmadlogs*(logs)
        for x in utils.frange(feh-4,feh+4,0.1):
            w[getn(x,a,isos)]+=m*utils.gauss(x,feh,sig)
    return w
Beispiel #17
0
def sumisos_gauss(mass,age,metallicity,fehlist,agelist,isos,sigma):
    """Faster version."""
    isoweight = {}
    for k in isos.keys():
        isoweight[k] = 0.
    for m,a,z in zip(mass,age,metallicity):
        feh = numarray.log10(z/SOLAR)
        for x in utils.frange(feh-4,feh+4,0.1):
            isoweight[getindices(x,a,fehlist,agelist)]+= utils.gauss(x,feh,sigma)*m
    summed_iso = 0.*isos[isos.keys()[0]]
    for k in isoweight.keys():
        summed_iso+=isoweight[k]*isos[k]
#    p(isoweight)
    return summed_iso
Beispiel #18
0
def galSave(doublet, obj, peak_candidates, doublet_index, savedir, em_lines,
            doPlot, prodCrit):
    detection = False
    preProd = 0.0
    nxtProd = 0.0
    if doublet:
        preProd, nxtProd = fitcSpec(obj, peak_candidates[doublet_index])
        if preProd + nxtProd > prodCrit:
            raise Exception("Rejected by comparing to other fibers")
        z_s = peak_candidates[doublet_index].wavelength / 3727.09 - 1.0
        # Find peak near infered OIII and Hbeta by fitting
        fitChi, fitRes, fitSn = _findPeak(obj, z_s, obj.SN, width=20.0)
        detection = _doubletSave(obj, z_s, peak_candidates, doublet_index,
                                 savedir, preProd, nxtProd, fitChi, fitSn)
        detection = _dblmultSave(obj, z_s, peak_candidates, savedir,
                                 detection, em_lines)
    elif len(peak_candidates) > 1:
        detection = _multletSave(obj, peak_candidates, savedir, em_lines)
    if not detection:
        raise Exception("Rejected since source too near")
    peaks = []
    for k in range(len(peak_candidates)):
        peak = peak_candidates[k]
        if k == doublet_index and doublet:
            peaks.append([peak.wavDoublet[0], peak.ampDoublet[0],
                          peak.varDoublet])
            peaks.append([peak.wavDoublet[1], peak.ampDoublet[1],
                          peak.varDoublet])
        else:
            peaks.append([peak.wavSinglet, peak.ampSinglet, peak.varSinglet])
    peak_number = len(peak_candidates)
    if (peak_number > 1 or doublet) and detection:
        if doPlot:
            fit = 0.0
            for k in np.arange(len(peaks)):
                fit = fit + gauss(obj.wave, x_0=peaks[k][0], A=peaks[k][1],
                                  var=peaks[k][2])
            o3hbflux = gauss3(obj.wave, fitRes, 4862.68 * (1.0 + z_s),
                              4960.30 * (1.0 + z_s), 5008.24 * (1.0 + z_s))
            o3b = [4842.68 * (1.0 + z_s), 5028.24 * (1.0 + z_s)]
            o3hbwave = [4862.68 * (1.0 + z_s) - fitRes[0],
                        5008.24 * (1.0 + z_s) - fitRes[3]]
            plotGalaxyLens(doublet, obj, savedir, peak_candidates, preProd,
                           nxtProd, doublet_index, fit, o3hbflux, fitChi, o3b,
                           o3hbwave)
        if doublet:
            x_doublet = np.mean(peak_candidates[doublet_index].wavDoublet)
            bd = np.linspace(obj.wave2bin(x_doublet) - 10,
                             obj.wave2bin(x_doublet) + 10, 21, dtype=np.int16)
            galSaveflux(obj.reduced_flux[bd], obj.fiberid, savedir)
Beispiel #19
0
def getisosweights_sgauss(weights, age, sfr, metallicity, isos, sigma,
                          dsigmadlogt):
    """ Same as getisoweights, but adds sfr-dependent Gaussian scatter.
        **** CAUTION: not yet tested ****
    """
    w = [0.] * len(isos)
    logt0 = numarray.log10(age[0])
    for m, a, feh, s in zip(weights, age, metallicity, sfr):
        ss = numarray.maximum(sfr, 1.e-5)
        logs = numarray.log10(ss)
        logt = numarray.log10(a)
        sig = sigma + dsigmadlogs * (logs)
        for x in utils.frange(feh - 4, feh + 4, 0.1):
            w[getn(x, a, isos)] += m * utils.gauss(x, feh, sig)
    return w
Beispiel #20
0
    def _results(self,sampler):
            best = sampler.flatchain[sampler.flatlnprobability.argmax()]
            tres = int(np.rint(best[0]))
            best = best[1:].reshape(self.channels,7)
            newSfilt = np.empty((self.channels,self.sres))
            newTfilt = np.empty((self.channels,tres))

            for i in range(self.channels):
                z1,p1,p2,p3,l, mu, sig = best[i]
                newTfilt[i] = np.asarray(utils.overtime(0,tres,utils.P3Z1,z1,p1,p2,p3,l,1))
                newSfilt[i] = utils.gauss(np.arange(0,self.sres),mu,sig)

            newstrf = np.dot(newSfilt.T,newTfilt)
            newstrf /= np.max(np.abs(newstrf))
            self.sampler = sampler
            self.maxlik = (newstrf,newSfilt,newTfilt)
Beispiel #21
0
    def gettruedailybaseline(self,maxadc=None,corr=None):
        dataarray = []
        nrofday = int((self.time[-1] - self.time[0])/(24*60*60))
        firstday = utils.tstamptodatetime(self.time[0])
        lastday = utils.tstamptodatetime(self.time[-1])
        firstday = datetime.datetime(firstday.year,firstday.month,firstday.day,0,0,0)
        hour = 0
        minute = 0      
        length = 216
        for d in range(0,nrofday,1):
            t0 = firstday + datetime.timedelta(days=d)
            t1 = firstday + datetime.timedelta(days=d+1)
            t0 = utils.datettotimestamp(t0)
            t1 = utils.datettotimestamp(t1)
            daydata = self.getnewdataset(t0,t1)
            if len(daydata.radio)== length and len(daydata.tempLL)== length: 
                dataarray.append(daydata)
        count = 0
        truebls = []
        truetemps = []
        radtemp = []
        #produce fake signal
        if maxadc!=None:
            time = np.linspace(0,24,length)
            a = maxadc
            b = 17
            c = 2
            sig = utils.gauss(time,a,b,c)
        for dat in dataarray:
            rt = radiotemp.Radiotemp()
            if corr == None or corr==True:
                radiodata = dat.radioc
#                print 'dat.radio = ', dat.radio
                rt.corr = True
            else:
                radiodata = dat.radio
                rt.corr = False
            truebl = radiodata
            if maxadc!=None:
                truebl = truebl - sig
            truetemps.append(dat.tempLL)
            rt.radio = truebl
            rt.temp = dat.tempLL
            rt.date = dat.date
#            print 'rt.date = ' , rt.date
            radtemp.append(rt)
        return radtemp
Beispiel #22
0
def sumisos_gauss(mass, age, metallicity, fehlist, agelist, isos, sigma):
    """Faster version."""
    isoweight = {}
    for k in isos.keys():
        isoweight[k] = 0.
    for m, a, z in zip(mass, age, metallicity):
        feh = numarray.log10(z / SOLAR)
        for x in utils.frange(feh - 4, feh + 4, 0.1):
            isoweight[getindices(x, a, fehlist,
                                 agelist)] += utils.gauss(x, feh, sigma) * m
    summed_iso = 0. * isos[isos.keys()[0]]
    for k in isoweight.keys():
        summed_iso += isoweight[k] * isos[k]


#    p(isoweight)
    return summed_iso
Beispiel #23
0
def galSave(doublet, obj, peak_candidates, doublet_index, savedir, em_lines,
            doPlot, prodCrit=0.6):
    detection = False
    preProd = 1.0
    nxtProd = 1.0
    if doublet:
        if len(peak_candidates):
            preProd, nxtProd = fitcSpec(obj, peak_candidates[doublet_index])
            if preProd + nxtProd > prodCrit:
                raise Exception("Rejected by comparing to other fibers")
        z_s = peak_candidates[doublet_index].wavelength / 3727.24 - 1.0
        detection = _doubletSave(obj, z_s, peak_candidates, doublet_index,
                                 savedir, preProd, nxtProd)
        if len(peak_candidates):
            detection = _dblmultSave(obj, z_s, peak_candidates, savedir,
                                     detection, em_lines)
    elif len(peak_candidates) > 1:
        detection = _multletSave(obj, peak_candidates, savedir, em_lines)
    peaks = []
    for k in range(len(peak_candidates)):
        peak = peak_candidates[k]
        if k == doublet_index and doublet:
            peaks.append([peak.wavDoublet[0], peak.ampDoublet[0],
                          peak.varDoublet])
            peaks.append([peak.wavDoublet[1], peak.ampDoublet[1],
                          peak.varDoublet])
        else:
            peaks.append([peak.wavSinglet, peak.ampSinglet, peak.varSinglet])
    peak_number = len(peak_candidates)
    if (peak_number > 1 or doublet) and detection:
        if doPlot:
            fit = 0.0
            for k in np.arange(len(peaks)):
                fit = fit + gauss(obj.wave, x_0=peaks[k][0], A=peaks[k][1],
                                  var=peaks[k][2])
            plotGalaxyLens(doublet, obj, savedir, peak_candidates, preProd,
                           nxtProd, doublet_index, fit)
        if doublet:
            x_doublet = np.mean(peak_candidates[doublet_index].wavDoublet)
            bd = np.linspace(obj.wave2bin(x_doublet) - 10,
                             obj.wave2bin(x_doublet) + 10, 21, dtype=np.int16)
            galSaveflux(obj.reduced_flux[bd], obj.fiberid, savedir)
Beispiel #24
0
def qsoContfit(obj, peak, searchLyA, sig, window_width = 40):
    '''
    peakFinder.qsoContfit(obj, peak, searchLyA, window_width)
    =============================
    A function to fit the QSO continuum near a peak with a 3rd order polynomial 
    and subtract it. Add the new SN of the peak in its class.

    Parameters:
        obj: The SDSS object/spectra on which applied the subtraction
        peak: The inquired peak
        sig: Sigma of gaussian to perform SN max-likelihood 
        searchLyA: True if we search for background LAE, False for backgroung ELGs
        window_width: Half-width (Angstroms) on which the polynomial is fitted
    Returns:
        accept: True if the SN is still high after subtraction. False otherwise.
    '''
    x0 = peak.wavelength
    window = np.linspace(obj.wave2bin(x0)-window_width,obj.wave2bin(x0)+window_width,2*window_width+1,dtype = np.int16)
    median_local = np.median(obj.reduced_flux[window])

    fit_QSO = np.poly1d(np.polyfit(x=obj.wave[window],y=obj.reduced_flux[window],deg=3, \
        w=(np.abs(obj.reduced_flux[window]-median_local)<5)*np.sqrt(obj.ivar[window])) )
    
    new_flux = obj.reduced_flux[window] - fit_QSO(obj.wave[window])
    
    NormGauss = gauss(np.linspace(-window_width * 0.5, window_width * 0.5, window_width), 0.0, 1.0,sig ** 2.0)
    NormGauss = NormGauss / np.sum(NormGauss)
    cj1_new = np.sum(new_flux*kernel(int(len(window)/2),window_width,NormGauss,len(new_flux))*obj.ivar[window])
    cj2_new = np.sum(obj.ivar[window]*kernel(int(len(window)/2),window_width,NormGauss,len(window))**2)
    SN_fitted = cj1_new/np.sqrt(cj2_new)

    if searchLyA and SN_fitted < 6:
        return False #Reject
    elif searchLyA and SN_fitted > 6:
        peak.reduced_sn = SN_fitted
        obj.reduced_flux_QSO[window]=new_flux
    elif searchLyA == False and SN_fitted < 6:
        return False  # Reject
    elif searchLyA == False and SN_fitted > 6:
        peak.reduced_sn = SN_fitted
        obj.reduced_flux_QSO[window]=new_flux
    return True  # Accept
def BeamElem(e):
    """
	Calculate element stiffness matrix and element nodal body force vector

    Args:
        e : (int) element number

    Returns: ke, fe
        ke : (numpy(neqe,neqe)) element stiffness matrix
        fe : (numpy(neqe,1)) element nodal force vector
	"""
    IENe = model.IEN[:, e] - 1  # extract local connectivity information
    xe = model.x[IENe]  # extract element x coordinates
    J = (xe[model.nen - 1] - xe[0]) / 2  # compute Jacobian
    w, gp = gauss(model.ngp)

    ke = np.zeros((model.neqe, model.neqe))
    fe = np.zeros((model.neqe, 1))

    for i in range(model.ngp):
        N = Nmatrix1D(gp[i], xe)
        B = Bmatrix1D(gp[i], xe) * 1 / J**2

        Ae = N[0][::2] @ model.CArea[IENe]
        Ee = model.E[e]
        be = model.body[e]

        ke = ke + w[i] * Ae * Ee * (B.T @ B)
        fe = fe + w[i] * N.T * be

    ke = J * ke
    fe = J * fe

    for i in range(model.np):
        Pi = model.P[i]
        xpi = model.xp[i]
        if xe[0] <= xpi < xe[-1]:
            fe = fe + Pi * np.transpose(
                Nmatrix1D((2 * xpi - xe[0] - xe[-1]) / (xe[-1] - xe[0]), xe))

    return ke, fe
Beispiel #26
0
def find_gw(events, eventname, datadir):
    """Find Gravitational waves

    Use utils.py function to search for GWs in LIGO data for a given event

    Args:
        events    (dict): metadata about events, loaded from json file
        eventname (str): name of a specific event
        datadir   (str): relative path to where the data is stored
    """

    # permanent parameters (for plotting)
    titlesize = 20
    labelsize = 14
    legsize = 14

    # ############# OVERVIEW OF DATA #############
    print('DATA OVERVIEW')
    # load everything
    time, fs, strains, templs = ut.load_event(eventname, datadir, events)
    strain_H1, strain_L1 = strains
    templ_H1, templ_L1 = templs
    toff = time.min()  # set time offset

    # get frequencies for the dataset
    freqs = np.fft.rfftfreq(strain_H1.size, 1.0 / fs)

    # first plot of data and templates
    fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8, 6))
    ax[0].plot(time - toff,
               strain_H1 * 1e19,
               linewidth=0.5,
               color='b',
               label='H1 Data')
    ax[0].plot(time - toff,
               strain_L1 * 1e19,
               linewidth=0.5,
               color='r',
               label='L1 Data')
    ax[0].set_ylabel(r'Strain $\times 10^{19}$', fontsize=labelsize)
    ax[0].legend(loc=1, fontsize=legsize)
    ax[0].set_title('LIGO Data for event {}'.format(eventname),
                    fontsize=titlesize)
    ax[1].plot(time - toff,
               templ_H1 * 1e19,
               linewidth=0.5,
               color='b',
               label='H1 Template')
    ax[1].plot(time - toff,
               templ_L1 * 1e19,
               linewidth=0.5,
               color='r',
               label='L1 Template')
    ax[1].set_ylabel(r'Strain $\times 10^{19}$', fontsize=labelsize)
    ax[1].set_xlabel(r'GPS Time-{} $\times 10^{{9}}$ s'.format(toff / 1e9),
                     fontsize=labelsize)
    ax[1].legend(loc=1, fontsize=legsize)
    plt.show()
    print()
    print()

    # ############# PART A (NOISE MODEL) #############
    print('(a) NOISE MODEL')
    # get power spectrum for each detector
    window = np.blackman(strain_H1.size)
    powers_H1 = ut.powerspec(strain_H1, window=window)
    powers_L1 = ut.powerspec(strain_L1, window=window)

    # plot the ASD
    plt.loglog(freqs, np.sqrt(powers_H1), 'b', label='H1')
    plt.loglog(freqs, np.sqrt(powers_L1), 'r', label='L1')
    plt.xlim(20, 2000)  # focus on interesting range
    plt.ylabel(r'ASD (strain/$\sqrt{\textrm{Hz}}$)', fontsize=labelsize)
    plt.xlabel(r'Frequency (Hz)', fontsize=labelsize)
    plt.title(
        r'Log-log plot of the Amplitude Spectrums for {}'.format(eventname),
        fontsize=titlesize)
    plt.legend(loc=1, fontsize=legsize)
    plt.show()
    print()
    print()

    # ############# PART B (MATCHED FILTER) #############
    print('(b) MATCHED FILTER')
    # get filter output
    mf_H1 = ut.matchedfilt(strain_H1, templ_H1, powers_H1, window=window)
    mf_L1 = ut.matchedfilt(strain_L1, templ_L1, powers_L1, window=window)

    # show filter output
    fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8, 6))
    ax[0].plot(time - toff, mf_H1, linewidth=0.5, color='b', label='H1 Output')
    ax[0].set_ylabel(r'Filter Output', fontsize=labelsize)
    ax[0].legend(loc=1, fontsize=legsize)
    ax[0].set_title('Matched Filtering Outputs in Time Domain',
                    fontsize=titlesize)
    ax[1].plot(time - toff, mf_L1, linewidth=0.5, color='r', label='L1 Output')
    ax[1].set_ylabel(r'Filter Output', fontsize=labelsize)
    ax[1].set_xlabel(r'GPS Time-{} $\times 10^{{9}}$ s'.format(toff / 1e9),
                     fontsize=labelsize)
    ax[1].legend(loc=1, fontsize=legsize)
    plt.show()
    print()
    print()

    # ############# PART C (SNR) #############
    print('(c) SNR')
    # get snr for each detector
    snr_H1 = ut.get_snr(mf_H1, templ_H1, powers_H1, window=window)
    snr_L1 = ut.get_snr(mf_L1, templ_L1, powers_L1, window=window)
    snr_tot = np.sqrt(snr_H1**2 + snr_L1**2)

    # print SNR max
    print("Max SNR H1: {:.4f}".format(np.max(snr_H1)))
    print("Max SNR L1: {:.4f}".format(np.max(snr_L1)))
    print("Max SNR (total): {:.4f}".format(np.max(snr_tot)))

    # show SNR
    fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(8, 9))
    ax[0].plot(time - toff, snr_H1, linewidth=0.5, color='b', label='H1 SNR')
    ax[0].set_ylabel(r'SNR', fontsize=labelsize)
    ax[0].legend(loc=1, fontsize=legsize)
    ax[0].set_title('Signal to Noise Ratio (SNR) in Time Domain',
                    fontsize=titlesize)
    ax[1].plot(time - toff, snr_L1, linewidth=0.5, color='r', label='L1 SNR')
    ax[1].set_ylabel(r'SNR', fontsize=labelsize)
    ax[1].legend(loc=1, fontsize=legsize)
    ax[2].plot(time - toff,
               snr_tot,
               linewidth=0.5,
               color='g',
               label='Combined SNR')
    ax[2].set_ylabel(r'SNR', fontsize=labelsize)
    ax[2].set_xlabel(r'GPS Time-{} $\times 10^{{9}}$ s'.format(toff / 1e9),
                     fontsize=labelsize)
    ax[2].legend(loc=1, fontsize=legsize)
    plt.show()
    print()
    print()

    # ############# PART D (ANALYTIC SNR) #############
    print('(d) ANALYTIC SNR')
    # get snr for each detector
    esnr_H1 = ut.expect_snr(templ_H1, powers_H1, window=window)
    esnr_L1 = ut.expect_snr(templ_L1, powers_L1, window=window)
    esnr_tot = np.sqrt(esnr_H1**2 + esnr_L1**2)

    # print SNR max
    print("Max SNR H1: {:.4f}".format(np.max(esnr_H1)))
    print("Max SNR L1: {:.4f}".format(np.max(esnr_L1)))
    print("Max SNR (total): {:.4f}".format(np.max(esnr_tot)))

    # show SNR
    fig, ax = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(8, 9))
    ax[0].plot(time - toff, esnr_H1, linewidth=0.5, color='b', label='H1 SNR')
    ax[0].set_ylabel(r'SNR', fontsize=labelsize)
    ax[0].legend(loc=1, fontsize=legsize)
    ax[0].set_title('Analytic Expected SNR in Time Domain', fontsize=titlesize)
    ax[1].plot(time - toff, esnr_L1, linewidth=0.5, color='r', label='L1 SNR')
    ax[1].set_ylabel(r'SNR', fontsize=labelsize)
    ax[1].legend(loc=1, fontsize=legsize)
    ax[2].plot(time - toff, esnr_tot, linewidth=0.5, color='g', label='L1 SNR')
    ax[2].set_ylabel(r'SNR', fontsize=labelsize)
    ax[2].set_xlabel(r'GPS Time-{} $\times 10^{{9}}$ s'.format(toff / 1e9),
                     fontsize=labelsize)
    ax[2].legend(loc=1, fontsize=legsize)
    plt.show()
    print()
    print()

    # ############# PART E (HALF WEIGHT FREQUENCY) #############
    print('(e) HALF POWER FREQUENCY')
    # get half power frequency for each detector
    hf_H1 = ut.get_hf(freqs, templ_H1, powers_H1, window=window)
    hf_L1 = ut.get_hf(freqs, templ_L1, powers_L1, window=window)
    print("Half frequency for H1: {} Hz".format(hf_H1))
    print("Half frequency for L1: {} Hz".format(hf_L1))
    print()
    print()

    # ############# PART F (TIME OF ARRIVAL) #############
    print('(f) TIME OF ARRIVAL')
    # find snr peak time
    imax_H1 = np.argmax(snr_H1)
    imax_L1 = np.argmax(snr_L1)
    nside = 10

    # get time and uncertainty for each detector
    sguess = 0.001
    ta_H1, eta_H1 = ut.toa(time, snr_H1, sguess=sguess, nside=nside)
    ta_L1, eta_L1 = ut.toa(time, snr_L1, sguess=sguess, nside=nside)
    print('H1 time of arrival: {} ± {}'.format(ta_H1, eta_H1))
    print('L1 time of arrival: {} ± {}'.format(ta_L1, eta_L1))

    # get Gaussian profiles
    prof_H1 = ut.gauss(time[imax_H1 - nside:imax_H1 + nside], np.max(snr_H1),
                       ta_H1, eta_H1)
    prof_L1 = ut.gauss(time[imax_L1 - nside:imax_L1 + nside], np.max(snr_L1),
                       ta_L1, eta_L1)

    # Positional uncertainty in the sky => angle.
    # We want to use the difference in time, the speed of light,
    # and the difference in distance to infer an uncertainty in position angle.
    # We use ~3000 km for distance
    # (https://www.ligo.caltech.edu/page/ligo-detectors).
    tdiff = np.abs(ta_H1 - ta_L1) * u.s
    dist = 3e3 * u.km
    c = 3e8 * u.m / u.s
    epos = tdiff * c / dist
    msg = ('Typical positional uncertainty:'
           ' {}'.format(epos.to('rad',
                                equivalencies=u.dimensionless_angles())))
    print(msg)

    fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(8, 6))
    ax[0].plot(
        time[imax_H1 - nside:imax_H1 + nside] - toff,  # SNR near arrival
        snr_H1[imax_H1 - nside:imax_H1 + nside],
        'bo',
        label='SNR H1')
    ax[0].plot(
        time[imax_H1 - nside:imax_H1 + nside] - toff,  # Gaussian profile
        prof_H1,
        'g-',
        label='Gaussian profile (H1)')
    ax[0].set_ylabel('SNR', fontsize=labelsize)
    ax[0].set_title('Gaussian Profiles on SNR Peaks', fontsize=titlesize)
    ax[0].legend(fontsize=legsize)
    ax[1].plot(
        time[imax_L1 - nside:imax_L1 + nside] - toff,  # SNR near arrival
        snr_L1[imax_L1 - nside:imax_L1 + nside],
        'ro',
        label='SNR L1')
    ax[1].plot(
        time[imax_L1 - nside:imax_L1 + nside] - toff,  # Gaussian profile
        prof_L1,
        'c-',
        label='Gaussian profile (L1)')
    ax[1].set_ylabel('SNR', fontsize=labelsize)
    ax[1].set_xlabel(r'GPS Time-{} $\times 10^{{9}}$ s'.format(toff / 1e9),
                     fontsize=labelsize)
    ax[1].legend(fontsize=legsize)
    plt.show()
    def plot_histogram_filtering(self, good_matches, best_matches,
                                 histogram_filter):
        """
        Plots the result of the match histogram filtering

        :param good_matches: array of match pairs (point in image 1 vs point in image 2)
        :type good_matches: ndarray (nx2)
        :param best_matches: array of histogram-filtered match pairs (point in image 1 vs point in image 2)
        :type best_matches: ndarray (nx2)
        :param histogram_filter: histogram filtering object
        :type histogram_filter: HistogramLogicFilter
        """

        img1 = self.train_image_manager.image
        img2 = self.query_image_manager.image
        kp1 = self.train_image_manager.keypoints
        kp2 = self.query_image_manager.keypoints

        angle_hist = histogram_filter.angle_histogram
        length_hist = histogram_filter.length_histogram

        angle_th = histogram_filter.angle_threshold
        length_th = histogram_filter.length_threshold

        initial_matches_img = cv2.drawMatches(img1,
                                              kp1,
                                              img2,
                                              kp2,
                                              good_matches,
                                              None,
                                              flags=2)
        final_matches_img = cv2.drawMatches(img1,
                                            kp1,
                                            img2,
                                            kp2,
                                            best_matches,
                                            None,
                                            flags=2)

        fig = Figure()
        canvas = FigureCanvas(figure=fig)
        ax = self.remove_figure_padding(fig)
        n_bins = min(
            int(np.pi / 10 * len(angle_hist.bin_centres) /
                np.ptp(angle_hist.bin_centres)), 50)
        histogram_span = [-np.pi / 20, np.pi / 20]
        gaussian_samples = np.linspace(histogram_span[0], histogram_span[1],
                                       n_bins)
        ax.hist(angle_hist.data, bins=n_bins, range=histogram_span, color='b')
        hist_fit_angle = gauss(gaussian_samples,
                               *angle_hist.fitted_gauss_coefficients)
        ax.bar(angle_hist.fitted_gauss_coefficients[1] -
               angle_th * angle_hist.fitted_gauss_coefficients[2] / 2,
               np.max(angle_hist.histogram),
               angle_th * angle_hist.fitted_gauss_coefficients[2],
               alpha=0.4,
               color='r')
        ax.plot(gaussian_samples, hist_fit_angle, color='g')
        initial_histogram_img = self.render_and_crop_canvas(canvas, ax)

        fig_2 = Figure()
        canvas = FigureCanvas(figure=fig_2)
        ax = self.remove_figure_padding(fig_2)
        ax.hist(length_hist.data, bins=length_hist.bins, color='b')
        hist_fit_length = gauss(length_hist.bin_centres,
                                *length_hist.fitted_gauss_coefficients)
        ax.bar(length_hist.fitted_gauss_coefficients[1] -
               length_th * length_hist.fitted_gauss_coefficients[2] / 2,
               np.max(length_hist.histogram),
               length_th * length_hist.fitted_gauss_coefficients[2],
               alpha=0.4,
               color='r')
        ax.plot(length_hist.bin_centres, hist_fit_length, color='g')
        final_histogram_img = self.render_and_crop_canvas(canvas, ax)

        matches_img = np.append(initial_matches_img, final_matches_img, axis=0)
        final_img = self.resize_image_aspect_ratio(
            np.append(initial_histogram_img, final_histogram_img, axis=0),
            new_height=matches_img.shape[0])
        final_img = np.append(matches_img, final_img, axis=1)

        return self.image_bridge.cv2_to_compressed_imgmsg(final_img)
Beispiel #28
0
    est_time_completion += proj.task[i].ed
    est_mu += proj.task[i].mu
    est_sigma += proj.task[i].sigma**2
est_sigma = est_sigma**0.5  ##### formula

proj_completion_time_3 = []
proj_completion_prob_3 = [DiscreteDistribution({
    '1': 1,
    '0': 0
})] * (2 * config.n_datum + 1)
for datum in range(-config.n_datum, config.n_datum + 1):
    proj_completion_time_3.append(est_time_completion +
                                  len(proj.critical) * datum)
for i in range(len(proj_completion_time_3)):
    prob_node = Node('node')
    td_node_prob = gauss(proj_completion_time_3[i], est_mu, est_sigma)
    print(td_node_prob)
    td_node = DiscreteDistribution({'1': td_node_prob, '0': 1 - td_node_prob})
    prob_node.set_predecessor([td_node, total_risk_prob], prob=True)
    prob_node.set_cpt(CPT_R_ED)
    prob_node.calc_prob()
    proj_completion_prob_3[i] = prob_node.prob.parameters[0]['1']

proj_completion_prob_2 = [i.parameters[0]['1'] for i in proj_completion_prob_2]
# print(proj_completion_prob_2)
print('=' * 50)
print('INFO TASK')
print(proj.info_task)
print('=' * 50)
print('Critical path: ', '-'.join(proj.critical_path))
print('Time to complete project: ', proj.time_completion)
Beispiel #29
0
def sensor_model(poses, beacon_pose, beacon_loc, tf, odom_pose):
    """Apply sensor model and return particle weights.

    Parameters
    ----------
    poses: an M x 3 array of robot poses where M is the number of
    particles.  Each pose is (x, y, theta) where x and y are in metres
    and theta is in radians.

    beacon_pose: the measured pose of the beacon (x, y, theta)
    relative to the robot's camera pose.

    beacon_loc: the known global pose of the beacon (x, y, theta).

    Returns
    -------
    An M element array of particle weights.  The weights do not need to be
    normalised.

    """

    # For each particle calculate its weight based on its pose,
    # the relative beacon pose, and the global beacon location.

    beacon_measured = np.zeros(3)

    # Distance to the beacon from the robot
    distance_b = np.sqrt(beacon_pose[0]**2 + beacon_pose[1]**2)

    # This is bearing to beacon from local frame x axis
    bearing = odom_pose[2] + np.arctan2(beacon_pose[1], beacon_pose[0])

    # Using correct trig transform for the quadrant the bearing is in to find x and y as local
    if (np.pi / 2) < bearing < -(np.pi / 2):
        x_add = distance_b * np.sin(bearing % (np.pi / 2))
        y_add = distance_b * np.cos(bearing % (np.pi / 2))
    else:
        x_add = distance_b * np.cos(bearing)
        y_add = distance_b * np.sin(bearing)

    #print(x_add, ' = x global ', y_add, ' = y global ')
    #print(beacon_pose[0], ' = x ', beacon_pose[1], ' = y ')
    #print('range global = ', np.sqrt(x_add**2 + y_add**2), 'range measured = ', np.sqrt(beacon_pose[0]**2 + beacon_pose[1]**2))

    # This is the local beacon location from measurement
    beacon_measured[0] = odom_pose[0] + x_add
    beacon_measured[1] = odom_pose[1] + y_add
    beacon_measured[2] = beacon_pose[2]

    # This transforms the local poses into global poses
    g_beacon_mes = transform.transform_pose(tf, beacon_measured)
    g_robot_pose = transform.transform_pose(tf, odom_pose)

    # This is the robot range and angle from pg. 122 of the notes.
    robot_range = np.sqrt((g_beacon_mes[0] - g_robot_pose[0])**2 +
                          (g_beacon_mes[1] - g_robot_pose[1])**2)
    robot_angle = angle_difference(
        g_robot_pose[2],
        np.arctan((g_beacon_mes[1] - g_robot_pose[1]) /
                  (g_beacon_mes[0] - g_robot_pose[0])))
    # This is the particle range and angle from pg. 122 of the notes.
    particle_range = np.sqrt((beacon_loc[0] - poses[:, 0])**2 +
                             (beacon_loc[1] - poses[:, 1])**2)
    particle_angle = angle_difference(
        poses[:, 2],
        np.arctan(
            (beacon_loc[1] - poses[:, 1]) / (beacon_loc[0] - poses[:, 0])))

    M = poses.shape[0]
    weights = np.ones(M)

    #print('robot_range - particle_range', robot_range-particle_range)
    #print('robot_angle - particle_angle', angle_difference(particle_angle, robot_angle))
    #print('gauss of range diff = ', gauss(robot_range - particle_range, 0.01))
    #print('gauss of angle diff = ', *gauss( angle_difference(particle_angle, robot_angle) , 0.01 ))

    # Weights calculated by multiplying PDFs. dont need to be normalised.
    weights = gauss(robot_range - particle_range, sigma=0.1) * gauss(
        angle_difference(robot_angle, particle_angle), sigma=0.1)

    return weights
Beispiel #30
0
def disp_moment_and_shear(e, ax1, ax2, ax3):
    """
	Print the moments and shear forces at the Gauss points, plot displacements,
	moments and shear forces distributions obtained by FE analysis.

	Args:
		e: (int) element number
		ax1 : axis to draw displacement distribution
		ax2 : axis to draw moment distribution
		ax3 : axis to draw shear force distribution
	"""
    de = model.d[model.LM[:, e] - 1]
    IENe = model.IEN[:, e] - 1
    xe = model.x[IENe]
    J = (xe[-1] - xe[0]) / 2
    w, gp = gauss(model.ngp)

    gauss_pt = np.zeros(model.ngp)
    moment_gauss = np.zeros(model.ngp)
    shear_gauss = np.zeros(model.ngp)

    for i in range(model.ngp):
        gauss_pt[i] = 0.5 * (xe[0] + xe[-1]) + J * gp[i]
        N = Nmatrix1D(gp[i], xe)
        B = Bmatrix1D(gp[i], xe) * 1 / J**2
        S = Smatrix1D(gp[i], xe) * 1 / J**3
        Ee = model.E[e]

        moment_gauss[i] = Ee * B @ de
        shear_gauss[i] = Ee * S @ de

    print("%8d %12.6f %12.6f %16.6f %16.6f %16.6f %16.6f" %
          (e, gauss_pt[0], gauss_pt[1], moment_gauss[0], moment_gauss[1],
           shear_gauss[0], shear_gauss[1]))

    # equally distributed coordinate within an element
    xplot = np.linspace(xe[0], xe[-1], model.nplot)
    xplotgauss = (2 * xplot - xe[0] - xe[-1]) / (xe[-1] - xe[0])

    displacement = np.zeros(model.nplot)
    moment = np.zeros(model.nplot)
    shear = np.zeros(model.nplot)

    for i in range(model.nplot):
        xi = xplotgauss[i]
        N = Nmatrix1D(xi, xe)
        B = Bmatrix1D(xi, xe) * 1 / J**2
        S = Smatrix1D(xi, xe) * 1 / J**3
        Ee = model.E[e]
        displacement[i] = N @ de
        moment[i] = Ee * B @ de
        shear[i] = Ee * S @ de

    # plot displacements and moments and shear forces
    line1, = ax1.plot(xplot, displacement)
    line2, = ax2.plot(xplot, moment)
    line3, = ax3.plot(xplot, shear)
    if e == 0:
        line1.set_label('FE')
        line2.set_label('FE')
        line3.set_label('FE')
Beispiel #31
0
    def getfakedailybaseline(self,typeofbl=None,maxadc=None,corr=None):
        #        self.tempcorrection(1)
        dataarray = []
        nrofday = int((self.time[-1] - self.time[0])/(24*60*60))
        firstday = utils.tstamptodatetime(self.time[0])
        lastday = utils.tstamptodatetime(self.time[-1])
        firstday = datetime.datetime(firstday.year,firstday.month,firstday.day,0,0,0)
        hour = 0
        minute = 0      
        for d in range(0,nrofday,1):
            t0 = firstday + datetime.timedelta(days=d)
            t1 = firstday + datetime.timedelta(days=d+1)
            t0 = utils.datettotimestamp(t0)
            t1 = utils.datettotimestamp(t1)
            daydata = self.getnewdataset(t0,t1)
            if len(daydata.radio) > 215: 
                dataarray.append(daydata)

        sizeofdat = len(dataarray)
        maxlen = 216 # maximum nr of point in one day (1 point every 400s)
        # 1) create a average spectrum
        maxfreq = np.fft.rfftfreq(maxlen,400)
        specarray = np.ndarray(shape= (len(dataarray), len(maxfreq) ) )
        phasearray =np.ndarray(shape= (len(dataarray), len(maxfreq) ) )
        count = 0        
        for dat in dataarray:
            if corr == None or corr==True:
                radiodata = dat.radioc
            else:
                radiodata = dat.radio
            fft = np.fft.rfft(radiodata)
            spec = np.absolute(fft)
            phase = np.angle(fft)
            freq = np.fft.rfftfreq(len(radiodata),400)
            spec = np.interp(maxfreq,freq,spec)
            phase = np.interp(maxfreq,freq,phase)
            specarray[count] = spec
            phasearray[count] = phase
            count +=1
        meanspec = np.mean(specarray,axis=0)
        stdspec = np.std(specarray,axis=0)
        # we set the number of possible baseline as the possible combination 
        # of spectrum/phase from real data
        nrfake = sizeofdat*sizeofdat    
        # we have implemented two possibilities to produce fake baseline:
        # - either we draw a random spectrum around the mean spectrum and then 
        # assume one of the phase of the data
        # -  either we combine one spectrum with one phase.
        # 2) draw random spectrum:
        radtemp = []
        fakebls = []
        random = False
        combine = False
        if typeofbl.lower()=='random':
            for i in range(nrfake):
                fakespec = np.array([])
                for m,s in zip(meanspec,stdspec):
                    specpoint = np.random.normal(m,s)
                    fakespec = np.append(fakespec,specpoint)
                phaseindex = int(np.random.uniform(0,sizeofdat))
                fakephase = phasearray[phaseindex]
                fakefft = fakespec*np.exp(1J*fakephase)
                fakebl = np.fft.irfft(fakefft)
                fakebls.append(fakebl)
        # 3) draw a particular phase
        elif typeofbl.lower() == 'combine' or typeofbl==None:
            for i in range(sizeofdat):
                specindex = i
                for j in range(i, i+sizeofdat):
                    phaseindex = j%sizeofdat
                    fakespec = specarray[specindex]
                    fakephase = phasearray[phaseindex]
                    fakefft = fakespec*np.exp(1J*fakephase)
                    fakebl = np.fft.irfft(fakefft)
                    fakebls.append(fakebl)
        if maxadc == None:
            maxadc = 0
        #produce fake signal
        time = np.linspace(0,24,len(fakebls[0]))
        a = maxadc
        b = 17
        c = 2
        sig = utils.gauss(time,a,b,c)
        count = 0 
        for bl in fakebls:
            rt = radiotemp.Radiotemp()
            bl = bl - sig
            fakebls[count] = bl
            rt.radio  = bl
            rt.corr  = corr
            radtemp.append(rt)
            count +=1
        return radtemp