예제 #1
0
def bad_model(X):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = X.shape
    Y = pl.zeros_like(X)
    for t in range(T):
        Y[:,t,:] = X[:,t,:] / pl.outer(pl.array(X[:,t,:]).sum(axis=1), pl.ones(J))
    return Y.view(pl.recarray) 
예제 #2
0
    def init(self, img, box):
        img_now = ops.read_image(img)
        self.target_sz = np.array([box[3], box[2]])
        self.pos = np.array([box[1], box[0]]) + self.target_sz / 2
        # print(self.pos)
        # ground_truth =

        # window size, taking padding into account
        self.sz = pylab.floor(self.target_sz * (1 + self.padding))

        # desired output (gaussian shaped), bandwidth proportional to target size
        self.output_sigma = pylab.sqrt(pylab.prod(
            self.target_sz)) * self.output_sigma_factor

        grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0] / 2)
        grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1] / 2)
        #[rs, cs] = ndgrid(grid_x, grid_y)
        rs, cs = pylab.meshgrid(grid_x, grid_y)
        y = pylab.exp(-0.5 / self.output_sigma**2 * (rs**2 + cs**2))
        self.yf = pylab.fft2(y)
        # print(self.yf)
        #print("yf.shape ==", yf.shape)
        #print("y.shape ==", y.shape)

        # store pre-computed cosine window
        self.cos_window = pylab.outer(pylab.hanning(self.sz[0]),
                                      pylab.hanning(self.sz[1]))
        if img_now.ndim == 3:
            img_now = ops.rgb2gray(img_now)
        x = ops.get_subwindow(img_now, self.pos, self.sz, self.cos_window)
        k = ops.dense_gauss_kernel(self.sigma, x)
        self.alphaf = pylab.divide(
            self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        self.z = x
예제 #3
0
 def int(self, integrand):
     """
     Integrates over second argument of an array
     with Gaussian Quadrature weights
     """
     return M.sum(M.outer(1.*M.ones(integrand.shape[0]),self.weights) * \
                integrand,1)
예제 #4
0
파일: pt.py 프로젝트: jizhi/project_TL
def sigma2fromPk(c, r):
    """
    calculate sigma^2 from pk

    this function can be called with vectors or scalars, but always returns a vector
    """
    r = M.asarray(r)
    return 9.0 / r ** 2 * N.trapz(c.k * c.pk * sf.j1(M.outer(r, c.k)) ** 2, M.log(c.k)) / 2.0 / M.pi ** 2
예제 #5
0
def bad_model(X):
    """ Results in a matrix with shape matching X, but all rows sum to 1"""
    N, T, J = X.shape
    Y = pl.zeros_like(X)
    for t in range(T):
        Y[:, t, :] = X[:, t, :] / pl.outer(
            pl.array(X[:, t, :]).sum(axis=1), pl.ones(J))
    return Y.view(pl.recarray)
def apply_cos_window(channels):
    global cos_window

    if cos_window is None:
        cos_window = pylab.outer(pylab.hanning(channels.shape[1]),
                                 pylab.hanning(channels.shape[2]))

    return pylab.multiply(channels[:] - 0.5, cos_window)
예제 #7
0
파일: pt.py 프로젝트: jizhi/project_TL
def xi2fromCambPk(c, r):
    """
    calculate 2pt corr. function from Camb instance (with its
    associated k,pk)

    this function can be called with vectors
    """
    r = M.asarray(r)
    return N.trapz(c.k ** 3 * c.pk * sf.j0(M.outer(r, c.k)), M.log(c.k)) / 2.0 / M.pi ** 2
예제 #8
0
    def get_WCSS(self, K, labels, distance_matr):
        MD = pl.zeros(K)
        for k in range(K):
            ck = pl.ma.masked_equal(labels, k).mask
            Xk = self._X[ck, :]
            Nk = self._X[ck, :].shape[0]
            E = distance_matr[pl.outer(ck, ck)]
            MD[k] = E.sum() / (2 * Nk)

        return MD.sum()
예제 #9
0
파일: pt.py 프로젝트: jizhi/project_TL
def wPk(c, r, w):
    """
    convolve Pk with an arbitrary window function w(kr)
    
    int k^2 P(k)w(kr) dk/2/pi^2

    e.g., the previous two functions can be realized as
    wPk(c,r,j0)
    wPk(c,r,lambda x: (3*pt.sf.j1(x)/x)**2))
    """
    r = M.asarray(r)
    return N.trapz(c.k ** 3 * c.pk * w(M.outer(r, c.k)), M.log(c.k)) / 2.0 / M.pi ** 2
예제 #10
0
파일: pt.py 프로젝트: jizhi/project_TL
def xi2fromPk(k, pk, r):
    """
    calculate 2pt corr. function from k, p(k).
    It doesn't seem to work particularly well, though.

    this function can be called with vectors
    """
    r = M.asarray(r)

    print len(k), len(pk), len(r)

    return N.trapz(k ** 3 * pk * sf.j0(M.outer(r, k)), M.log(k)) / 2.0 / M.pi ** 2
예제 #11
0
def degraderesolution(prefix,factor,dlogstring):
    covar = M.load(prefix+'covar.dat')
    pnl = M.load(prefix+'pnl.dat')
    dlog = M.load(prefix+dlogstring)[:,1]
    k = pnl[:,0]*1.
    p = pnl[:,1]*1.
    gausspart = M.load(prefix+'gausspart.dat')
    nbins = len(k)

    nongausspart = covar - gausspart

    nongausspartnew = nongausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    knew = k[:nbins-factor:factor]*0.
    pnew = p[:nbins-factor:factor]*0.
    gausspartnew = gausspart[:nbins-factor:factor,:nbins-factor:factor]*0.
    nbinsnew = len(knew)
    dlognew = dlog[:nbins-factor:factor]*0.

    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        print i1,i1+factor-1,nbins
        print i1new,nbinsnew
        weights = k[i1:i1+factor-1]**3
        sumweights = M.sum(weights)
        pnew[i1new] = M.sum(p[i1:i1+factor-1]*weights)/sumweights
        knew[i1new] = M.sum(k[i1:i1+factor-1]*weights)/sumweights
        dlognew[i1new] = M.sum(dlog[i1:i1+factor-1]*weights)/sumweights

    sqrtkfact = M.sqrt(k[1]/k[0])
        
    for i1 in range(0,nbins-factor,factor):
        i1new = i1/factor
        for i2 in range(0,nbins-factor,factor):
            i2new = i2/factor
                                                                       
            weights2 = M.outer(k[i1:i1+factor-1]**3,k[i2:i2+factor-1]**3)
            sumweights2 = M.sum(M.sum(weights2))
            nongausspartnew[i1new,i2new] = M.sum(M.sum(nongausspart[i1:i1+factor-1,i2:i2+factor-1]*weights2))/sumweights2

            if i1new == i2new:
                vk = (4.*M.pi/3.)*((k[i1+factor-1]*sqrtkfact)**3 - (k[i1]/sqrtkfact)**3)
                gausspartnew[i1new,i2new] = (2.*M.pi)**3 * 2.*(pnew[i1new]**2)/vk
                                                                       
    covarnew = gausspartnew + nongausspartnew

    prefixnew = prefix+'degrade'+str(factor)+'/'
    os.system('mkdir '+prefixnew)
    M.save(prefixnew+'pnl.dat',M.transpose([knew,pnew]), fmt = '%18.16e')
    M.save(prefixnew+'covar.dat',covarnew, fmt = '%18.16e')
    M.save(prefixnew+'gausspart.dat',gausspartnew, fmt = '%18.16e')
    M.save(prefixnew+dlogstring,M.transpose([knew,dlognew]), fmt = '%18.16e')
    M.save(prefix+'nbins.dat',M.array([nbinsnew],shape=(1,1,)), fmt = '%d')
예제 #12
0
파일: pt.py 프로젝트: jizhi/project_TL
    def sumDotProd(self, start1, end1, start2, end2, k, cosTheta):
        """
        Calculates (k_start1 + ... + k_end1) . (k_start2 + ... + k_end2)
        k: [|k0|,|k1|,|k2| ...]
        thetarr: [[cos(theta_k0,k0),cos(theta_k0,k1),...]
                  [cos(theta_k1,k0), ...]]
        ...]
        """
        start1 -= 1
        start2 -= 1
        # Account for Python non-inclusive last element of array
        nentries = (end1 - start1) * (end2 - start2)

        # print start1,end1,start2,end2
        # print M.outer(k[start1:end1],k[start2:end2])

        ans = M.sum(M.reshape(M.outer(k[start1:end1], k[start2:end2]) * cosTheta[start1:end1, start2:end2], nentries))
        return ans
    def initialize(self, image, pos, target_sz):
        if len(image.shape) == 3 and image.shape[2] > 1:
            image = rgb2gray(image)
        self.image = image
        if self.should_resize_image:
            self.image = scipy.misc.imresize(self.image, 0.5)
            self.image = self.image / 255.0

        # window size, taking padding into account
        self.sz = pylab.floor(target_sz * (1 + self.padding))
        self.pos = pos

        # desired output (gaussian shaped), bandwidth proportional to target size
        output_sigma = pylab.sqrt(pylab.prod(
            self.sz)) * self.output_sigma_factor

        grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0] / 2)
        grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1] / 2)
        #[rs, cs] = ndgrid(grid_x, grid_y)
        rs, cs = pylab.meshgrid(grid_x, grid_y)
        self.y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
        self.yf = pylab.fft2(self.y)

        # store pre-computed cosine window
        self.cos_window = pylab.outer(pylab.hanning(self.sz[0]),
                                      pylab.hanning(self.sz[1]))

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(self.sigma, x)
        self.alphaf = pylab.divide(
            self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        self.z = x

        return
예제 #14
0
def deltah(h):
    """
    Gets delta_h (notation as in Cooray & Hu 2001),
    the Fourier-transformed halo profile
    """

    if h.p.nfw == 'y':
        # Use cisi
        ans = M.outer(h.k*0.,h.m*0.)
        cbar = M.exp(h.logcbar)
        rvir = (3./(4.*M.pi)*h.m/200.)**(1./3.)
        rs = rvir/cbar
        lf = M.log(1+cbar)-cbar/(1+cbar)
        for ki in range(len(h.k)):
            ci1c, si1c = utils.cisiarr((1+cbar)*h.k[ki]*rs)
            ci0, si0 = utils.cisiarr(h.k[ki]*rs)
            ans[ki,:] = (M.sin(h.k[ki]*rs)*(si1c-si0) - \
                         M.sin(cbar*h.k[ki]*rs)/((1+cbar)*h.k[ki]*rs) + \
                         M.cos(h.k[ki]*rs)*(ci1c-ci0))/lf
            
        else:
            g = utils.HGQ(5) # 5-pt Hermite-Gauss quadrature class
            ans = 1.*M.zeros((len(h.k),len(h.m),5))
            
            for mi in range(len(h.m)):
                logc = h.logcbar[mi] + g.abscissas*h.p.sigmalogc #array of c's
                c = M.exp(logc)
                rvir = ((3./4.)*h.m[mi]/(200.*M.pi))**(1./3.)
                rs = rvir/c
                lf = M.log(1+c)-c/(1+c)
                for ki in range(len(h.k)):
                    ci1c, si1c = utils.cisiarr((1+c)*h.k[ki]*rs)
                    ci0, si0 = utils.cisiarr(h.k[ki]*rs)
                    ans[ki,mi,:] = (M.sin(h.k[ki]*rs)*(si1c-si0) - \
                                    M.sin(c*h.k[ki]*rs)/((1+c)*h.k[ki]*rs) + \
                                    M.cos(h.k[ki]*rs)*(ci1c-ci0))/lf
                    
    return ans
예제 #15
0
    def test_process_fit_results(self):
        r = p.arange(5)
        e = p.outer(p.arange(5), p.arange(5))

        alpha_psp = AlphaPSP()
        pr, pe = alpha_psp.process_fit_results(r, e)

        self.assertTrue(p.all(pr == p.array([0, 2, 1, 3, 4])))
        self.assertEqual(pe[1, 1], 4)
        self.assertEqual(pe[2, 2], 1)

        self.assertLess(pr[2], pr[1])
        self.assertLess(pe[2, 2], pe[1, 1])

        # test again with permuted values
        pr, pe = alpha_psp.process_fit_results(pr, pe)

        self.assertTrue(p.all(pr == p.array([0, 2, 1, 3, 4])))
        self.assertEqual(pe[1, 1], 4)
        self.assertEqual(pe[2, 2], 1)

        self.assertLess(pr[2], pr[1])
        self.assertLess(pe[2, 2], pe[1, 1])
    def initialize(self, image, pos , target_sz ):
        if len(image.shape) == 3 and image.shape[2] > 1:
            image = rgb2gray(image)
        self.image = image
        if self.should_resize_image:
            self.image = scipy.misc.imresize(self.image, 0.5)
            self.image = self.image / 255.0

        # window size, taking padding into account
        self.sz = pylab.floor(target_sz * (1 + self.padding))
        self.pos = pos

        # desired output (gaussian shaped), bandwidth proportional to target size
        output_sigma = pylab.sqrt(pylab.prod(self.sz)) * self.output_sigma_factor

        grid_y = pylab.arange(self.sz[0]) - pylab.floor(self.sz[0]/2)
        grid_x = pylab.arange(self.sz[1]) - pylab.floor(self.sz[1]/2)
        #[rs, cs] = ndgrid(grid_x, grid_y)
        rs, cs = pylab.meshgrid(grid_x, grid_y)
        self.y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
        self.yf = pylab.fft2(self.y)

        # store pre-computed cosine window
        self.cos_window = pylab.outer(pylab.hanning(self.sz[0]),
                                      pylab.hanning(self.sz[1]))

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(self.image, self.pos, self.sz, self.cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(self.sigma, x)
        self.alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        self.z = x

        return
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    #spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0]/2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1]/2)
    #[rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    #print("yf.shape ==", yf.shape)
    #print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]),
                             pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)

        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        #print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        if debug:
            pylab.figure()
            pylab.imshow(x)
            pylab.title("sub window")

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz/2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            #first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:",  len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
예제 #18
0
def getHaloTrispec(c,h, startki = 0, endki = 0, strideki = 1, adder = 0., highprecisionthresh = 0.03):
    """
    Calculates the halo model trispectrum T(k1,-k1,k2,-k2),
    as in Cooray & Hu (2001)

    adder: something that will be added later, e.g. gaussian
    part of the covariance.  In there for precision checks

    Should work for galaxies, too, but shot noise isn't implemented.  Also, for some HOD parameters,
    may expose PT trispectrum on scales where it may not work
    """

    #p = c.cp

    tribi = pt.TriBiSpectrumFromCamb(c)
    g = utils.HGQ(5)

    # Could change a bunch of these to scalars

    i11 = 0.*h.k
    i21 = 0.*h.k
    i02 = 0.*h.k
    pk1plusk3_perp = 0.*h.k
    t3hB_perp = 0.*h.k
    t4hT_perp = 0.*h.k
    dsq4hnoT = 0.*h.k
    dsq4hshouldbe = 0.*h.k
    dsq1h = 0.*h.k
    dsq2h = 0.*h.k
    dsq2h31 = 0.*h.k
    dsq3h = 0.*h.k
    dsq4h = 0.*h.k
    qsq = 0.*h.k
    i04 = M.outer(0.*h.k,0.*h.k)
    i12 = M.outer(0.*h.k,0.*h.k)
    i13_112 = M.outer(0.*h.k,0.*h.k)
    i13_122 = M.outer(0.*h.k,0.*h.k)
    i22 = M.outer(0.*h.k,0.*h.k)
    i114 = M.outer(0.*h.k,0.*h.k)
    i1114 = M.outer(0.*h.k,0.*h.k)
    
    
    pk1plusk3 = M.outer(0.*h.k,0.*h.k)
    t2h31 = M.outer(0.*h.k,0.*h.k)
    t3hnoB = M.outer(0.*h.k,0.*h.k)
    t3hB = M.outer(0.*h.k,0.*h.k)
    t4hnoT = M.outer(0.*h.k,0.*h.k)
    t4hT = M.outer(0.*h.k,0.*h.k)
    b10 = M.outer(0.*h.k,0.*h.k)
    t10 = M.outer(0.*h.k,0.*h.k)

    for k1 in range(len(h.k)):
        i11[k1] = intOverMassFn(1,1, [k1],h)
        i21[k1] = intOverMassFn(2,1, [k1],h)
        i02[k1] = intOverMassFn(0,2, [k1,k1],h)

    if endki == 0:
        endki = len(h.k)

    for k1 in range(startki, endki, strideki):
        for k3 in range(k1,len(h.k)):
        #for k3 in range(k1,endki,strideki):
            i04[k1,k3] = intOverMassFn(0,4, [k1,k1,k3,k3], h)
            i13_112[k1,k3] = intOverMassFn(1,3, [k1,k1,k3], h)
            i13_122[k1,k3] = i13_112[k3,k1]
            i12[k1,k3] = intOverMassFn(1,2, [k1,k3], h)
            i22[k1,k3] = intOverMassFn(2,2, [k1,k3], h)

            t2h31[k1,k3] = 2.*(h.pk[k1]*i13_122[k1,k3]*i11[k1] + \
                             h.pk[k3]*i13_112[k1,k3]*i11[k3])

            t3hnoB[k1,k3] = (i11[k1]*h.pk[k1])**2 * i22[k3,k3] + \
                            (i11[k3]*h.pk[k3])**2 * i22[k1,k1] + \
                            4.*(i11[k1]*h.pk[k1])*(i11[k3]*h.pk[k3])*i22[k1,k3]

            t4hnoT[k1,k3] = 2.*i11[k1]*i11[k3]*h.pk[k1]*h.pk[k3] *\
                          (i21[k1]*i11[k3]*h.pk[k3] + \
                             i21[k3]*i11[k1]*h.pk[k1])

            # First Romberg-integrate explicitly-angular-averaged things to low precision
            pk1plusk3[k1,k3] = utils.openRomb(\
                lambda cth:c.pkInterp(M.sqrt(h.k[k1]**2 + h.k[k3]**2 + \
                                 2.*h.k[k1]*h.k[k3]*cth)), -1.,1.,eps=0.3,k=3)/2.

            b10[k1,k3] = utils.openRomb(lambda cth:tribi.b\
                                    (h.k[k1],h.k[k3], cth, c),-1.,1.,eps=0.3,k=3)/2.
            t3hB[k1,k3] = 4. * b10[k1,k3] * i12[k1,k3]*i11[k1]*i11[k3]


            #if k1 == k3:
            #t10[k1,k3] = 32.*h.pk[k1]**2*utils.openRomb(lambda cth: (3.+10*cth)**2*c.pkInterp(h.pk[k1]*M.sqrt(2.*(1-cth))),-1.,1.,eps = 0.3,k=2)/2. - 11./378.*h.pk[k1]**3
            # could change to this if we wanted to; quicker, but less uniform
            
            t10[k1,k3] = utils.openRomb(lambda cth:tribi.tk1mk1k2mk2_array\
                                    (h.k[k1],h.k[k3], cth, c,0),-1.,1.,eps=0.3,k=3)/2.

            t4hT[k1,k3] = t10[k1,k3] * i11[k1]**2 * i11[k3]**2

            tentativetotal = M.fabs(i04[k1,k3]+2*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3]+\
                             t3hnoB[k1,k3]+t3hB[k1,k3]+t4hnoT[k1,k3]+t4hT[k1,k3] +\
                             adder[k1,k3])

            if (adder[k1,k3] != 0.):
                print 'adder = ',adder[k1,k3]

            #calculate Romberg-integrated things to high precision, if they are >1/2 of total
            if M.fabs(2*pk1plusk3[k1,k3]*i12[k1,k3]) > highprecisionthresh*tentativetotal:
                print 't2h22: ',pk1plusk3[k1,k3],
                pk1plusk3[k1,k3] = utils.openRomb(
                    lambda cth:c.pkInterp(M.sqrt(h.k[k1]**2 + h.k[k3]**2 + \
                                                 2.*h.k[k1]*h.k[k3]*cth)), -1.,1.,eps=0.03,k=7,jmax=18)/2.
                print pk1plusk3[k1,k3]
                
            if M.fabs(t3hB[k1,k3]) > highprecisionthresh*tentativetotal:
                print 't3hB: ',b10[k1,k3],
                b10[k1,k3] = utils.openRomb(lambda cth:tribi.b\
                                        (h.k[k1],h.k[k3], cth, c),-1.,1.,eps=0.01,k=5,jmax=30)/2.
                print b10[k1,k3]
                t3hB[k1,k3] = 4. * b10[k1,k3] * i12[k1,k3]*i11[k1]*i11[k3]
                   
            if M.fabs(t4hT[k1,k3]) > highprecisionthresh*tentativetotal:
                print 't4hT:', t10[k1,k3],
                    
                t10[k1,k3] = utils.openRomb(lambda cth:tribi.tk1mk1k2mk2_array\
                                        (h.k[k1],h.k[k3], cth, c,0),-1.,1.,eps=0.01,k=5)/2.
                print t10[k1,k3]
                t4hT[k1,k3] = t10[k1,k3] * i11[k1]**2 * i11[k3]**2

            nrm = 2.*h.pk[k1]*h.pk[k3]

            #output some stuff at each entry in the CovMat
            print k1,k3,i04[k1,k3]/nrm, (2.*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3])/nrm, \
                  (t3hnoB[k1,k3]+t3hB[k1,k3])/nrm, t4hT[k1,k3]/nrm, t4hnoT[k1,k3]/nrm, (t4hT[k1,k3]+t4hnoT[k1,k3])/nrm,\
                  (i04[k1,k3]+ 2.*pk1plusk3[k1,k3]*i12[k1,k3]+t2h31[k1,k3]+ \
                   t3hnoB[k1,k3]+t3hB[k1,k3]+t4hT[k1,k3]+t4hnoT[k1,k3])/nrm
            
        pk1plusk3_perp[k1] = c.pkInterp(M.sqrt(2.)*h.k[k1])

        t3hB_perp[k1] = 4.*tribi.b(h.k[k1],h.k[k1],0.,c) *\
                           i12[k1,k1]*i11[k1]**2
        squaretri = tribi.tk1mk1k2mk2(h.k[k1],h.k[k1], 0., c,0)
        t4hT_perp[k1] = i11[k1]**4 * squaretri
        qsq[k1] = squaretri/(4.*h.pk[k1]**2 * (2.*pk1plusk3_perp[k1] +\
                                               h.pk[k1]))
        s = pk1plusk3_perp[k1]/h.pk[k1]
        dsq4hshouldbe[k1] = 0.085*(4.*h.pk[k1]**2 * \
                                   (2.*pk1plusk3_perp[k1] + h.pk[k1]))
        
        dsq1h[k1] = i04[k1,k1]
        dsq2h[k1] = 2.*pk1plusk3_perp[k1]*i12[k1,k1]**2. + t2h31[k1,k1]
        dsq2h31[k1] = t2h31[k1,k1]
        dsq3h[k1] = t3hnoB[k1,k1] + t3hB_perp[k1]
        dsq4hnoT[k1] = 4.*(i11[k1]*h.pk[k1])**3*i21[k1]
        dsq4h[k1] = t4hnoT[k1,k1] + t4hT_perp[k1]

    dsq = dsq1h + dsq2h + dsq3h + dsq4h

    df = h.k**3/(2.*M.pi**2)
    ot = 1./3.

    # These are debugging files; they output the square-configuration reduced trispectrum.
    #M.save(h.prefix+'dsq1h.dat',M.transpose([h.k,dsq1h]))
    #M.save(h.prefix+'dsq2h.dat',M.transpose([h.k,dsq2h]))
    #M.save(h.prefix+'dsq2h31.dat',M.transpose([h.k,dsq2h31]))
    #M.save(h.prefix+'dsq3h.dat',M.transpose([h.k,dsq3h]))
    #M.save(h.prefix+'dsq4h.dat',M.transpose([h.k,dsq4h]))
    rat = M.fabs(dsq4hnoT/t4hT_perp)

    t1h = i04
    t2h22 = 2.*pk1plusk3*i12**2
    for k1 in range(len(h.k)):
        for k3 in range(k1+1,len(h.k)):
            t10[k3,k1] = t10[k1,k3]
            t1h[k3,k1] = t1h[k1,k3]
            t2h22[k3,k1] = t2h22[k1,k3]
            t2h31[k3,k1] = t2h31[k1,k3]
            t3hnoB[k3,k1] = t3hnoB[k1,k3]
            t3hB[k3,k1] = t3hB[k1,k3]
            t4hnoT[k3,k1] = t4hnoT[k1,k3]
            t4hT[k3,k1] = t4hT[k1,k3]
    
    t2h = t2h22 + t2h31
    t3h = t3hnoB + t3hB
    t4h = t4hnoT + t4hT

    ans = t1h+t2h+t3h+t4h
    
    if h.p.outputalltterms == 0:
        return ans
    elif h.p.outputalltterms == 1:
        return ans,t10,t1h,t2h,t3h,t4h
    elif h.p.outputalltterms == 2:
        return ans,t10,t1h,t2h22,t2h31,t3hB,t3hnoB,t4hT,t4hnoT
    else:
        return
예제 #19
0
def getHaloCov(prefix,c,h):
    """
    Output halo model covariance matrix, correlation matrix into the directory 'prefix'
    """
    os.system('mkdir '+prefix)

    h.pnl = getHaloPknl(c,h)

    M.save(prefix+'pnl.dat',M.transpose([h.k,h.pnl]), fmt = '%18.16e')

    h.prefix = prefix
    #h.dlogPnldlogA = getdlogPnldlogA(c,h)
    #M.save(prefix+'dlogpnldloga.dat',M.transpose([h.k,h.dlogPnldlogA]),fmt='%6.5e')
    vk = h.k*0.
    vk[0] = (h.k[0]*h.k[1])**1.5 - (h.k[0]**3/h.k[1])**1.5
    for k1 in M.arange(1,len(h.k)-1):
        vk[k1] = (h.k[k1]*h.k[k1+1])**1.5 - (h.k[k1]*h.k[k1-1])**1.5
    vk[-1] = (h.k[-1]**3/h.k[-2])**1.5 - (h.k[-1]*h.k[-2])**1.5
    vk *= 4.*M.pi/3.

    gausspart = M.outer(h.k*0.,h.k*0.)
    for k1 in M.arange(len(h.k)):
        gausspart[k1,k1] = (2.*M.pi)**3 * 2.*(h.pnl[k1]**2)/vk[k1]

    if h.p.outputalltterms == 0:
        t = getHaloTrispec(c,h, adder=gausspart)
    elif h.p.outputalltterms == 1:
        t,t10,t1h,t2h,t3h,t4h = getHaloTrispec(c,h, adder=gausspart)
    elif h.p.outputalltterms == 2:
        t,t10,t1h,t2h22,t2h31,t3hB,t3hnoB,t4hT,t4hnoT = getHaloTrispec(c,h,adder=gausspart)
    
    covar = t*1.

    cocg = h.k*0.
    for k1 in M.arange(len(h.k)):
        cocg[k1] = M.sqrt(covar[k1,k1]/gausspart[k1,k1])

    covar += gausspart
    M.save(prefix+'covar.dat',covar, fmt = '%18.16e')
    M.save(prefix+'gausspart.dat',gausspart, fmt = '%18.16e')
    #t10->pt.dat is the perturbation theory trispectrum by itself.  However,
    #it might not be calculated at high precision in the nonlinear
    #regime, since where other things dominate on small scales, the
    #first pass at calculating it is done with low precision.

    #
    if h.p.outputalltterms == 1:
        M.save(prefix+'pt.dat',t10, fmt = '%18.16e')
        M.save(prefix+'t1h.dat',t1h, fmt = '%18.16e')
        M.save(prefix+'t2h.dat',t2h, fmt = '%18.16e')
        M.save(prefix+'t3h.dat',t3h, fmt = '%18.16e')
        M.save(prefix+'t4h.dat',t4h, fmt = '%18.16e')
    if h.p.outputalltterms == 2:
        M.save(prefix+'pt.dat',t10, fmt = '%18.16e')
        M.save(prefix+'t1h.dat',t1h, fmt = '%18.16e')
        M.save(prefix+'t2h22.dat',t2h22, fmt = '%18.16e')
        M.save(prefix+'t2h31.dat',t2h31, fmt = '%18.16e')
        M.save(prefix+'t3hB.dat',t3hB, fmt = '%18.16e')
        M.save(prefix+'t3hnoB.dat',t3hnoB, fmt = '%18.16e')
        M.save(prefix+'t4hT.dat',t4hT, fmt = '%18.16e')
        M.save(prefix+'t4hnoT.dat',t4hnoT, fmt = '%18.16e')
        
    correl = 0.*covar

    tnorm = t
    for i in M.arange(len(h.k)):
        for j in M.arange(len(h.k)):
            correl[i,j] = covar[i,j]/M.sqrt(covar[i,i]*covar[j,j])

    M.save(prefix+'nbins.dat',M.array([len(h.k)]), fmt = '%d')
    M.save(prefix+'correl.dat',correl, fmt = '%4.3f')
예제 #20
0
    def init(self, img, rect):
        im_width = img.shape[1]
        im_heihgt = img.shape[0]
        ys = pylab.floor(rect[1]) + pylab.arange(rect[3], dtype=int)
        xs = pylab.floor(rect[0]) + pylab.arange(rect[2], dtype=int)
        ys = ys.astype(int)
        xs = xs.astype(int)
        # check for out-of-bounds coordinates,
        # and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= img.shape[0]] = img.shape[0] - 1

        xs[xs < 0] = 0
        xs[xs >= img.shape[1]] = img.shape[1] - 1

        self.rect = rect  #rectangle contains the bounding box of the target
        #pos is the center postion of the tracking object (cy,cx)
        self.pos = pylab.array([rect[1] + rect[3] / 2, rect[0] + rect[2] / 2])
        self.posOffset = np.array([0, 0], np.int)
        self.tlx = rect[0]
        self.tly = rect[1]
        self.trackNo = 0
        # parameters according to the paper --

        padding = 1.0  # extra area surrounding the target(扩大窗口的因子,默认扩大2倍)
        # spatial bandwidth (proportional to target)
        output_sigma_factor = 1 / float(16)
        self.sigma = 0.2  # gaussian kernel bandwidth
        self.lambda_value = 1e-2  # regularization
        # linear interpolation factor for adaptation
        self.interpolation_factor = 0.075

        #target_ze equals to [rect3, rect2]
        target_sz = pylab.array([int(rect[3]), int(rect[2])])
        # window size(Extended window size), taking padding into account
        window_sz = pylab.floor(target_sz * (1 + padding))

        self.window_sz = window_sz
        self.target_sz = target_sz

        # desired output (gaussian shaped), bandwidth proportional to target size
        output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

        grid_y = pylab.arange(window_sz[0]) - pylab.floor(window_sz[0] / 2)
        grid_x = pylab.arange(window_sz[1]) - pylab.floor(window_sz[1] / 2)
        # [rs, cs] = ndgrid(grid_x, grid_y)
        rs, cs = pylab.meshgrid(grid_x, grid_y)
        y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
        self.yf = pylab.fft2(y)
        # store pre-computed cosine window
        self.cos_window = pylab.outer(pylab.hanning(window_sz[0]),
                                      pylab.hanning(window_sz[1]))

        # get subwindow at current estimated target position, to train classifer
        x = self.get_subwindow(img, self.pos, window_sz, self.cos_window)
        # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
        k = self.dense_gauss_kernel(self.sigma, x)
        #storing computed alphaf and z for next frame iteration
        self.alphaf = pylab.divide(
            self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        self.z = x

        #monitoring the tracker's self status, based on the continuity of psr
        self.self_status = 0
        #monitoring the collaborative status, based on the distance to the voted object bouding box center,  and on psr also.
        self.collaborate_status = 5

        self.collabor_container = np.ones((10, 1), np.int)
        self.highpsr_container = np.ones((10, 1), np.int)
        self.FourRecentRects = np.zeros((4, 4), np.float)
        #return initialization status
        return True
예제 #21
0
def sumouter(us, vs, lo=-1.0, hi=1.0, out=None):
    result = out or zeros((len(us[0]), len(vs[0])))
    for u, v in zip(us, vs):
        result += outer(clip(u, lo, hi), v)
    return result
def track(input_video_path):
    """
    notation: variables ending with f are in the frequency domain.
    """

    # parameters according to the paper --
    padding = 1.0  # extra area surrounding the target
    # spatial bandwidth (proportional to target)
    output_sigma_factor = 1 / float(16)
    sigma = 0.2  # gaussian kernel bandwidth
    lambda_value = 1e-2  # regularization
    # linear interpolation factor for adaptation
    interpolation_factor = 0.075

    info = load_video_info(input_video_path)
    img_files, pos, target_sz, \
        should_resize_image, ground_truth, video_path = info

    # window size, taking padding into account
    sz = pylab.floor(target_sz * (1 + padding))

    # desired output (gaussian shaped), bandwidth proportional to target size
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # [rs, cs] = ndgrid(grid_x, grid_y)
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    y = pylab.exp(-0.5 / output_sigma**2 * (rs**2 + cs**2))
    yf = pylab.fft2(y)
    # print("yf.shape ==", yf.shape)
    # print("y.shape ==", y.shape)

    # store pre-computed cosine window
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))

    total_time = 0  # to calculate FPS
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    global z, response
    z = None
    alphaf = None
    response = None

    for frame, image_filename in enumerate(img_files):

        if True and ((frame % 10) == 0):
            print("Processing frame", frame)

        # load image
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray(im)

        # print("Image max/min value==", im.max(), "/", im.min())

        if should_resize_image:
            im = scipy.misc.imresize(im, 0.5)

        start_time = time.time()

        # extract and pre-process subwindow
        x = get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)

        if not is_first_frame:
            # calculate response of the classifier at all locations
            k = dense_gauss_kernel(sigma, x, z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # target location is at the maximum response
            r = response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # get subwindow at current estimated target position,
        # to train classifer
        x = get_subwindow(im, pos, sz, cos_window)

        # Kernel Regularized Least-Squares,
        # calculate alphas (in Fourier domain)
        k = dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # first frame, train with a single image
            alphaf = new_alphaf
            z = x
        else:
            # subsequent frames, interpolate model
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            z = (1 - f) * z + f * new_z
        # end "first frame or not"

        # save position and calculate FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # visualization
        plot_tracking(frame, pos, target_sz, im, ground_truth)
    # end of "for each image in video"

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # show the precisions plot
        show_precision(positions, ground_truth, video_path, title)

    return
예제 #23
0
def track(input_video_path, show_tracking):
    """
    注意:以 f 结尾的变量表示频率域
    """

    # 目标周围的额外区域
    padding = 1.0
    # 空间带宽,与目标成比例
    output_sigma_factor = 1 / float(16)
    # 高斯核带宽
    sigma = 0.2
    # 正则化系数
    lambda_value = 1e-2
    # 线性插值因子
    interpolation_factor = 0.075
    # 加载视频信息,包括待测试的每帧图片列表,首帧目标矩形框中心点坐标[y,x],矩形框高、宽一半的大小,是否进行图片缩放一半
    # 每帧图片的 ground truth 信息,视频路径
    info = load_video_info.load_video_info(input_video_path)
    img_files, pos, target_sz, should_resize_image, ground_truth, video_path = info

    # 把填充考虑进去,定义为窗口大小。
    sz = pylab.floor(target_sz * (1 + padding))

    # 计算想要的高斯形状的输出,其中带宽正比于目标矩形框大小
    output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor
    # 平移目标矩形框的高度,以中心点为圆点,得到高度坐标列表
    # 平移目标矩形框的宽度,以中心点为圆点,得到宽度坐标列表
    grid_y = pylab.arange(sz[0]) - pylab.floor(sz[0] / 2)
    grid_x = pylab.arange(sz[1]) - pylab.floor(sz[1] / 2)
    # 把坐标列表边长坐标矩阵,即对二维平面范围内的区域进行网格划分
    rs, cs = pylab.meshgrid(grid_x, grid_y)
    # 论文中公式 (19),计算得到 [0, 1] 值,越靠近中心点值越大,反之越小
    y = pylab.exp((-0.5 / output_sigma ** 2) * (rs ** 2 + cs ** 2))
    # 计算二维离散傅里叶变换
    yf = pylab.fft2(y)

    # 首先计算矩形框高(某一个整数值)的 Hanning 窗(加权的余弦窗),其次计算矩形框宽的 Hanning 窗
    # 最后计算两个向量的外积得到矩形框的余弦窗
    cos_window = pylab.outer(pylab.hanning(sz[0]), pylab.hanning(sz[1]))
    # 计算 FPS
    total_time = 0  # to calculate FPS
    # 计算精度值
    positions = pylab.zeros((len(img_files), 2))  # to calculate precision

    # global z, response
    plot_tracking.z = None
    alphaf = None
    plot_tracking.response = None
    # 依次访问图像从图像名列表中
    for frame, image_filename in enumerate(img_files):
        if (frame % 10) == 0:
            print("Processing frame", frame)
        # 读取图像
        image_path = os.path.join(video_path, image_filename)
        im = pylab.imread(image_path)
        # 如果图像是彩色图像,则转化为灰度图像
        if len(im.shape) == 3 and im.shape[2] > 1:
            im = rgb2gray.rgb2gray(im)
        # 如果需要进行图像缩放,则缩放为原来一半
        if should_resize_image:
            im = np.array(Image.fromarray(im).resize((int(im.shape[0] / 2), int(im.shape[1] / 2))))

        # 开始计时
        start_time = time.time()

        # 提取并预处理子窗口,采用余弦子窗口
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        is_first_frame = (frame == 0)
        # 不过不是第一帧,则计算分类器的响应
        if not is_first_frame:
            # 计算分类器在所有位置上的相应
            k = dense_gauss_kernel.dense_gauss_kernel(sigma, x, plot_tracking.z)
            kf = pylab.fft2(k)
            alphaf_kf = pylab.multiply(alphaf, kf)
            plot_tracking.response = pylab.real(pylab.ifft2(alphaf_kf))  # Eq. 9

            # 最大响应就是目标位置
            r = plot_tracking.response
            row, col = pylab.unravel_index(r.argmax(), r.shape)
            pos = pos - pylab.floor(sz / 2) + [row, col]

            if debug:
                print("Frame ==", frame)
                print("Max response", r.max(), "at", [row, col])
                pylab.figure()
                pylab.imshow(cos_window)
                pylab.title("cos_window")

                pylab.figure()
                pylab.imshow(x)
                pylab.title("x")

                pylab.figure()
                pylab.imshow(plot_tracking.response)
                pylab.title("response")
                pylab.show(block=True)

        # end "if not first frame"

        # 获取目标位置的余弦窗口,用于训练分类器
        x = get_subwindow.get_subwindow(im, pos, sz, cos_window)

        # kernel 最小方差正则化,在傅里叶域计算参数 ALPHA
        k = dense_gauss_kernel.dense_gauss_kernel(sigma, x)
        new_alphaf = pylab.divide(yf, (pylab.fft2(k) + lambda_value))  # Eq. 7
        new_z = x

        if is_first_frame:
            # 对于第一帧,训练单张图片
            alphaf = new_alphaf
            plot_tracking.z = x
        else:
            # 对于后续帧,进行模型参数插值
            f = interpolation_factor
            alphaf = (1 - f) * alphaf + f * new_alphaf
            plot_tracking.z = (1 - f) * plot_tracking.z + f * new_z

        # 保持当前位置,并计算 FPS
        positions[frame, :] = pos
        total_time += time.time() - start_time

        # 可视化显示跟踪的结果
        if show_tracking == "yes":
            plot_tracking.plot_tracking(frame, pos, target_sz, im, ground_truth)

    if should_resize_image:
        positions = positions * 2

    print("Frames-per-second:", len(img_files) / total_time)

    title = os.path.basename(os.path.normpath(input_video_path))

    if len(ground_truth) > 0:
        # 画出精确率图像
        show_precision.show_precision(positions, ground_truth, title)
예제 #24
0
   x = array([0.0, 0.0, 0.0, 0.7, 1.2, 0.9, 2.0, 2.0, 2.0])
   y = array([0.0, 0.6, 1.0, 0.0, 0.8, 1.0, 0.0, 0.5, 1.0])
   z = array([1.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 1.0, 0.0])
   
   triang = delaunay(x, y)[2]
   pwl = pwl_new(x, y, z, triang)
     
   print abs(f(0.2, 0.7)- 5.0/36.0) <= 1.e-6
     
   ion()
   figure()
   zc2 = mycontour(f, 0., 2., 0., 1.)
   savefig('wdi_fig_2.eps')

   ### Example 3
   x = reshape(outer(ones(5), arange(-1.5, 1.75, 0.25)), -1)
   y = reshape(outer(arange(-0.5, 0.75, 0.25), ones(13)), -1)

   def g(x, y): return ((x-1.0)**2+y*y)*((x+1.0)**2+y*y)+0.2*x+0.1*y

   figure()
   zc3 = mycontour(g, -1.5, 1.5, -0.5, 0.5)
   savefig('wdi_fig_3.eps')

   triang = delaunay(x, y)[2]
   pwl = pwl_new(x, y, g(x,y), triang)
   print abs(f(-1.2, 0.2) - 0.2784375) <= 1.e-6
   print abs(f(0.7, -0.2) - 0.5371875) <= 1.e-6

   figure()
   zc4 = mycontour(f, -1.5, 1.5, -0.5, 0.5)
예제 #25
0
def intOverMassFn(beta, mExponent, karrayunnumed, h, whichp = 0, plot = 0, show = 0,colstr = ''):
    """
    Get an integral of, e.g., halo density profiles over the mass function.
    More automatic than generalIntOverMassFn.
    
              beta
    Computes I (k) (as in Cooray & Hu (2001)), integrating over the halo mass function.
              mu
    beta: order of bias coefficient (b_beta in the integrand)
    mExponent: additional (over the power of m from the logarithmic integration)
                mass exponent (put mu here).
    karrayunnumed: Array of k indices to evaluate Fourier-transformed halo density profile
    h: halo model instance
    whichp: for now, 'gg' -> galaxy statistics.
                     'mm' -> matter stats.   Default: h.p.whichp

    """
    if whichp == 0:
        whichp = h.p.whichp

    karray = M.array(karrayunnumed)

    if len(karray) != mExponent:
        print "Warning! Different number of k indices than mExponent!"
    
    g = utils.HGQ(5) # 5-pt Hermite-Gauss quadrature class

    uM = h.fthp[0,:,:]*0. + 1.
    g_integrand = h.fthp[0,:,:]*0. + 1.
            
    if whichp == 'gg':
        """
        Right now, we've only implemented the Kravtsov-style 'satellite' halo occupation distribution,
        i.e. the HOD excluding the central galaxy.  For example, in Smith, Watts & Sheth 2006, sect. 5.4, the
        halo integrands simplify in the case of a satellite HOD in eq. 72 (one-halo P_gg) to
        <Ns(Ns-1)> |u(k)|^2 + 2 <Ns> u(k).  This second term includes the contribution of the central galaxy.
        In general, using SWS's notation, the n-galaxy convolution kernel (e.g. eq. 76) is

        W^ng(k_0, ..., k_n) = <Ns(Ns-1)...(Ns-n)> u(k_0)...u(k_n) + Sum_i <Ns...(Ns-(n-1))> u(k_0)...u(k_n)/u(k_i).
        """                                                   
        
        hodNsMomentM = M.outer(hodNsMoment(mExponent,h),1.*M.ones(5))
        hodNsMomentMminus1 = M.outer(hodNsMoment(mExponent-1,h),1.*M.ones(5))
        
        # Leading term in mExponent; fthp(k)^mExponent
        for ki in karray:
            uM *= h.fthp[ki,:,:]

        g_integrand = hodNsMomentM*uM*1.
        
        #mExponent * fthp^(mExponent-1) term
        for ki in karray:
            g_integrand += hodNsMomentMminus1 * uM/h.fthp[ki,:,:]
            
        extra_integrand = g.int(g_integrand)
                

    elif whichp == 'mm':
        for ki in karray:
            g_integrand *= h.fthp[ki,:,:]
        
        extra_integrand = h.m**(mExponent)* g.int(g_integrand)

    integrand = h.b[beta,:]*h.m*h.nmz * extra_integrand

    if plot == 1:
        M.loglog(h.m[::10],integrand[::10],colstr)
        #M.semilogx(h.m,integrand)

    if show == 1:
        M.show()
        
    ans = utils.simpsonRule(integrand[:h.integratetoindex], h.dlogm)
    if whichp == 'gg':
        ans /= h.ngalbar**mExponent
        
    return ans
예제 #26
0
    ys[ys < 0] = 0
    ys[ys >= im.shape[0]] = im.shape[0] - 1
    xs[xs < 0] = 0
    xs[xs >= im.shape[1]] = im.shape[1] - 1
    # 提取子窗剪切的图像块
    out = im[pylab.ix_(ys, xs)]
    # 将图像像素值从 [0, 1] 平移到 [-0.5, 0.5]
    out = out.astype(pylab.float64) - 0.5
    # 余弦窗口化,论文公式 (18)

    return pylab.multiply(cos_window, out)


if __name__ == '__main__':
    image_path = r'..\data\surfer\imgs'
    image_list = os.listdir(image_path)
    image = os.path.join(image_path, image_list[0])
    img = mpimg.imread(image)
    gray = rgb2gray.rgb2gray(rgb_image=img)
    position = np.array([152., 286.])
    size = np.array([35., 32.])
    cos_window = pylab.outer(pylab.hanning(size[0]), pylab.hanning(size[1]))
    result = get_subwindow(im=gray,
                           pos=position,
                           sz=size,
                           cos_window=cos_window)
    print(pylab.hanning(size[0]))
    print(cos_window)
    plt.imshow(result)
    plt.show()
예제 #27
0
    def init(self, img, rect ):
        im_width = img.shape[1]
        im_heihgt= img.shape[0]
        ys = pylab.floor(rect[1]) + pylab.arange(rect[3], dtype=int)
        xs = pylab.floor(rect[0]) + pylab.arange(rect[2], dtype=int)
        ys = ys.astype(int)
        xs = xs.astype(int)
        # check for out-of-bounds coordinates,
        # and set them to the values at the borders
        ys[ys < 0] = 0
        ys[ys >= img.shape[0]] = img.shape[0] - 1

        xs[xs < 0] = 0
        xs[xs >= img.shape[1]] = img.shape[1] - 1
        roi = self.get_imageROI(img, rect)

        self.init_frame = img.copy()
        self.canvas     = img.copy()
        #pos is the center postion of the tracking object (cy,cx)
        pos = pylab.array([rect[1] + rect[3]/2, rect[0] + rect[2]/2])
        self.pos_list   = [pos]
        self.roi_list   = [roi]
        self.rect_list  = [rect]
        self.trackNo    = 0
        # parameters according to the paper --

        padding = 1.0  # extra area surrounding the target(扩大窗口的因子,默认扩大2倍)
        # spatial bandwidth (proportional to target)
        output_sigma_factor = 1 / float(16)
        self.sigma = 0.2  # gaussian kernel bandwidth
        self.lambda_value = 1e-2  # regularization
        # linear interpolation factor for adaptation
        #self.interpolation_factor = 0.075
        self.interpolation_factor = 0.01

        self.scale_ratios = [0.985, 0.99, 0.995, 1.0, 1.005, 1.01, 1.015]


        #target_ze equals to [rect3, rect2]
        target_sz = pylab.array([int(rect[3]), int(rect[2])])
        # window size(Extended window size), taking padding into account
        window_sz = pylab.floor(target_sz * (1 + padding))

        self.window_sz = window_sz
        self.window_sz_new = window_sz
        self.target_sz = target_sz

        # desired output (gaussian shaped), bandwidth proportional to target size
        output_sigma = pylab.sqrt(pylab.prod(target_sz)) * output_sigma_factor

        grid_y = pylab.arange(window_sz[0]) - pylab.floor(window_sz[0] / 2)
        grid_x = pylab.arange(window_sz[1]) - pylab.floor(window_sz[1] / 2)
        # [rs, cs] = ndgrid(grid_x, grid_y)
        rs, cs = pylab.meshgrid(grid_x, grid_y)
        y = pylab.exp(-0.5 / output_sigma ** 2 * (rs ** 2 + cs ** 2))
        self.yf= pylab.fft2(y)
        # store pre-computed cosine window
        self.cos_window = pylab.outer(pylab.hanning(window_sz[0]), pylab.hanning(window_sz[1]))


        # get subwindow at current estimated target position, to train classifer
        x = self.get_subwindow(img, pos, window_sz)
        # Kernel Regularized Least-Squares, calculate alphas (in Fourier domain)
        k = self.dense_gauss_kernel(self.sigma, x)
        #storing computed alphaf and z for next frame iteration
        self.alphaf = pylab.divide(self.yf, (pylab.fft2(k) + self.lambda_value))  # Eq. 7
        self.z = x

        #return initialization status
        return True