Example #1
0
def samp_start_path(z1,edge_pos,v):

# [edgeidx,z] = samp_start_path(z1,edge_pos,v)
# Samples a starting edge
# Inputs:
# z1- 2-element measurement list or array
# edge_pos- candidate edges (a list of arrays containing the positions of the edge nodes)
# v- measurement noise variance
# Outputs:
# edgeidx- index of sampled edge (integer)
# z- distance along edge (float)

    nedge = len(edge_pos)
    
    x1 = z1[0]
    y1 = z1[1]
    sqv = math.sqrt(v)
    
    mui = numpy.zeros((nedge))
    elli = numpy.zeros((nedge))
    qrho = numpy.zeros((nedge))
    # Compute weights for each edge
    for i in range(nedge):
        xi = edge_pos[i][0,0]
        yi = edge_pos[i][1,0]
        xe = edge_pos[i][0,1]
        ye = edge_pos[i][1,1]
        thi = math.atan2(ye-yi,xe-xi)
        cth = math.cos(thi)
        sth = math.sin(thi)
        elli[i] = math.sqrt((xe-xi)**2+(ye-yi)**2)
        mui[i] = (x1-xi)*cth+(y1-yi)*sth
        P = mystats.normprob(mui[i],sqv,0,elli[i])
        if abs(cth)>1e-10:
            npdf = mystats.normpdf(y1-yi,(x1-xi)*sth/cth,sqv/abs(cth))/abs(cth)
        else:
            npdf = mystats.normpdf(x1-xi,(y1-yi)*cth/sth,sqv/abs(sth))/abs(sth)
        qrho[i] = P*npdf/elli[i]
    # Normalise weights
    w = sum(qrho)
    qrho = qrho/w
    # Draw an edge
    idx = mystats.drawmultinom(qrho)
    # Draw the distance along the edge
    z = mystats.tnorm(mui[idx],v,0,elli[idx])

    return idx, z
Example #2
0
def draw_location(t,path_pos,sk,segk,skm1,segkm1,tk,tkm1,vmean,vvar):
    
# [ssamp,segidx,logw] =  
# draw_location(t,path_pos,sk,segk,skm1,segkm1,tk,tkm1,vmean,vvar):
# Sample a location between two points on the network
# Inputs:
# t- time at which sample is being drawn (float)
# path_pos- array of positions of path nodes (2 x q array of floats)
# (sk,segk)- position at time tk>t (float, integer)
# (skm1,segkm1)- position at time tkm1<t (float, integer)
# tk- next time (float)
# tkm1- previous time (float)
# (vmean,vvar)- - statistics of vehicle movement (floats)
# Outputs:
# ssamp- sample position on edge (float)
# segidx- index of sample along path (integer)
# logw- sample weight (float)
    

    # Pre-allocate
    nsegs = segk-segkm1+1 # This is the number of segments between the bracketing positions
    Tbar = numpy.zeros((nsegs))
    kap = numpy.zeros((nsegs))
    ell = numpy.zeros((nsegs)) # total length of each segment
    lb = numpy.zeros((nsegs))
    ub = numpy.zeros((nsegs))
    dist_cs = numpy.zeros((nsegs))
    
    totell = 0
    # For each segment, compute its length, mean duration and duration variance
    if nsegs==1:
        pos1 = path_pos[:,segkm1]
        pos2 = path_pos[:,segkm1+1]
        ell[0] = numpy.linalg.norm(pos1-pos2)
        Tbar[0] = ell[0]/vmean
        kap[0] = ell[0]*ell[0]*vvar/(vmean**4)
        lb[0] = skm1
        ub[0] = sk
        totell = sk-skm1
        dist_cs[0] = totell
    else:
        pos1 = path_pos[:,segkm1]
        pos2 = path_pos[:,segkm1+1]
        ell[0] = numpy.linalg.norm(pos1-pos2)
        Tbar[0] = ell[0]/vmean
        kap[0] = ell[0]*ell[0]*vvar/(vmean**4)
        lb[0] = skm1
        ub[0] = ell[0]
        totell = ell[0]-skm1
        dist_cs[0] = totell
        for j in range(1,nsegs-1):
            pos1 = path_pos[:,segkm1+j]
            pos2 = path_pos[:,segkm1+j+1]
            ell[j] = numpy.linalg.norm(pos1-pos2)
            Tbar[j] = ell[j]/vmean
            kap[j] = ell[j]*ell[j]*vvar/(vmean**4)
            lb[j] = 0
            ub[j] = ell[j]
            totell = totell+ell[j]
            dist_cs[j] = dist_cs[j-1]+ell[j]
        j = nsegs-1
        pos1 = path_pos[:,segk-1]
        pos2 = path_pos[:,segk]
        ell[j] = numpy.linalg.norm(pos1-pos2)
        Tbar[j] = ell[j]/vmean
        kap[j] = ell[j]*ell[j]*vvar/(vmean**4)
        lb[j] = 0
        ub[j] = sk
        totell = totell+sk
        dist_cs[j] = dist_cs[j-1]+sk
    
    
    shat = numpy.zeros((nsegs))
    lam = numpy.zeros((nsegs))
    etilde = numpy.zeros((nsegs))
    p2 = numpy.zeros((nsegs))
    xi1 = numpy.zeros((nsegs))
    xi2 = numpy.zeros((nsegs))
    a1 = numpy.zeros((nsegs))
    a2 = numpy.zeros((nsegs))
    Ttilde1 = numpy.zeros((nsegs))
    Ttilde2 = numpy.zeros((nsegs))
    scfact = numpy.zeros(nsegs)
    scfact[0] = (ell[0]-skm1)*(ell[0]-skm1)/(ell[0]*ell[0])
    for b in range(1,nsegs-1):
        scfact[b] = 1.
    scfact[nsegs-1] = sk*sk/(ell[nsegs-1]*ell[nsegs-1])
    
    # Compute the weights and sampling density for each segment
    # First segment
    a1[0] = -Tbar[0]/ell[0]
    Ttilde1[0] = sk*Tbar[nsegs-1]/ell[nsegs-1]
    for b in range(0,nsegs-1):
        Ttilde1[0] += Tbar[b]
    xi1[0] = kap[0]
    for b in range(1,nsegs):
        xi1[0] += scfact[b]*kap[b]
    a2[0] = Tbar[0]/ell[0]
    Ttilde2[0] = -skm1*Tbar[0]/ell[0]
    xi2[0] = kap[0]
    db = (a1[0]*a1[0]*xi2[0]+a2[0]*a2[0]*xi1[0])
    shat[0] = (a2[0]*xi1[0]*(t-tkm1-Ttilde2[0])+a1[0]*xi2[0]*(tk-t-Ttilde1[0]))/db
    lam[0] = xi1[0]*xi2[0]/db
    p1 = mystats.normpdf(a2[0]*(tk-t-Ttilde1[0]),a1[0]*(t-tkm1-Ttilde2[0]),math.sqrt(db))
    p2[0] = mystats.normprob(shat[0],math.sqrt(lam[0]),lb[0],ub[0])
    etilde[0] = p1*p2[0]
    for b in range(1,nsegs):
        a1[b] = -Tbar[b]/ell[b]
        a2[b] = Tbar[b]/ell[b]
        Ttilde2[b] = Ttilde2[b-1]+Tbar[b-1]
        Ttilde1[b] = Ttilde1[b-1]-Tbar[b-1]
        xi2[b] = xi2[b-1]+(scfact[b-1]-1)*kap[b-1]+kap[b]
        xi1[b] = xi1[b-1]-kap[b-1]+(1-scfact[b])*kap[b]
        db = (a1[b]*a1[b]*xi2[b]+a2[b]*a2[b]*xi1[b])
        shat[b] = (a2[b]*xi1[b]*(t-tkm1-Ttilde2[b])+a1[b]*xi2[b]*(tk-t-Ttilde1[b]))/db
        lam[b] = xi1[b]*xi2[b]/db
        p1 = mystats.normpdf(a2[b]*(tk-t-Ttilde1[b]),a1[b]*(t-tkm1-Ttilde2[b]),math.sqrt(db))
        p2[b] = mystats.normprob(shat[b],math.sqrt(lam[b]),lb[b],ub[b])
        etilde[b] = p1*p2[b]

    # Normalise weights
    e = etilde/numpy.sum(etilde)
    # Draw a segment
    segidx = mystats.drawmultinom(e)
    # Draw a position in the segment
    if segidx==0:
        z = mystats.tnorm(shat[segidx],lam[segidx],skm1,ell[segidx])
    elif segidx<(nsegs-1):
        z = mystats.tnorm(shat[segidx],lam[segidx],0,ell[segidx])
    else:
        z = mystats.tnorm(shat[segidx],lam[segidx],0,sk)
    
    # Sample weight (allow for approximate prior)
    T1 = Ttilde1[segidx]+a1[segidx]*z
    T2 = Ttilde2[segidx]+a2[segidx]*z
    kap2 = 0
    for b in range(0,segidx):
        kap2 = kap2+(ub[b]-lb[b])*(ub[b]-lb[b])*kap[b]/(ell[b]*ell[b])
    kap2 = kap2+(z-lb[segidx])*(z-lb[segidx])*kap[segidx]/(ell[segidx]*ell[segidx])
    kap1 = (ub[segidx]-z)*(ub[segidx]-z)*kap[segidx]/(ell[segidx]*ell[segidx])
    for b in range(segidx+1,nsegs):
        kap1 = kap1+(ub[b]-lb[b])*(ub[b]-lb[b])*kap[b]/(ell[b]*ell[b])
    logw = lognormpdf(tk-t,T1,kap1)+lognormpdf(t-tkm1,T2,kap2)
    logw = logw-(lognormpdf(tk-t,T1,xi1[segidx])+lognormpdf(t-tkm1,T2,xi2[segidx]))

    return z, segidx+segkm1, logw
Example #3
0
def calcinter(G,z1,t1,z2,t2,n,n2,K,tax,ngrid,vmean,vvar,ups):
    
# [H,P1,P2,s1,eidx1] = calcinter(G,z1,t1,z2,t2,n,n2,K,tax,ngrid,vmean,vvar,ups)
# Calculates the Helliner affinity between the posterior position densities of two 
# objects at specified times
# Inputs:
# G- road network
# z1- m1-length list of position measurements for object 1 (list of list of floats)
# t1- list of measurement times for object 1 (list of floats)
# z2- m2-length list of position measurements for object 2 (list of list of floats)
# t2- m2-length list of measurement times for object 2 (list of floats)
# n- sample size for posterior density approximation (integer)
# n2- sample size for Hellinger affinity approximation (integer)
# K- number of candidate paths (integer)
# tax- times at which to compute Hellinger affinity (list of floats)
# ngrid- number of intervals used for numerical approximation to Hellinger affinity
# (vmean,vvar)- statistics of vehicle movement (floats)
# ups- measurement noise variance (float)
# Outputs:
# H- Hellinger affinity at times (t1,t2) (ntax x ntax array of floats)
# P1- position probabilities for object 1 in each graph interval (ngrid x ntax 
# array of floats)
# P2- position probabilities for object 2 in each graph interval (ngrid x ntax 
# array of floats)
# s1- sampled positions for object 1 at each time (ntax-length list of n2-length 
# arrays of floats)
# eidx1- sampled edges for object 1 at each time (ntax-length list of n2-length 
# lists of integers)
    
    # Sample paths and positions for both objects
    [pp1,ppos1,wp1,segp1,sp1] = case1.calcpostprobs_case2(G,z1,t1,n,K,vmean,vvar,ups)
    [pp2,ppos2,wp2,segp2,sp2] = case1.calcpostprobs_case2(G,z2,t2,n,K,vmean,vvar,ups)
    #w1cdf = numpy.cumsum(wp1)
    #w2cdf = numpy.cumsum(wp2)    
    
    # Pre-compute cumulative length along the network edges
    # This is used for probability calculations for graph intervals
    e = G.edges()
    nedges = len(e)
    ell = numpy.zeros(nedges)
    ellcum = numpy.zeros(nedges)
    i = 0
    pos0 = G.node[e[i][0]]["pos"]
    pos1 = G.node[e[i][1]]["pos"]
    ell[0] = numpy.linalg.norm(pos1-pos0)
    ellcum[0] = 0.
    for i in range(1,nedges):
        pos0 = G.node[e[i][0]]["pos"]
        pos1 = G.node[e[i][1]]["pos"]
        ell[i] = numpy.linalg.norm(pos1-pos0)
        ellcum[i] = ellcum[i-1]+ell[i-1]
    totell = ellcum[nedges-1]+ell[nedges-1]
    
    # Grid size
    gsize = totell/ngrid

    # Pre-allocate required quantities
    nt = len(tax)
    s1 = [numpy.zeros(n2) for j in range(nt)]
    eidx1 = [[0 for i in range(n2)] for j in range(nt)]
    w1 = [[] for j in range(nt)]
    lw1 = numpy.zeros(n2)
    gpos1 = numpy.zeros(n2)
    w1tilde = numpy.zeros(n2)
    e1 = [[0 for i in range(2)] for j in range(n2)]
    s2 = numpy.zeros(n2)
    lw2 = numpy.zeros(n2)
    gpos2 = numpy.zeros(n2)
    w2tilde = numpy.zeros(n2)
    e2 = [[0 for i in range(2)] for j in range(n2)]
    H = numpy.zeros((nt,nt))
    P1 = numpy.zeros((ngrid,nt))
    P2 = numpy.zeros((ngrid,nt))
    # Loop over times at which to compute the Hellinegr affinity
    # Here the position probabilities are calculated for each time
    for j in range(nt):
        t = tax[j]
        # Find the bracketing measurement times for this time
        k1 = 0
        while t1[k1]<t:
            k1 += 1
        t1k = t1[k1]
        t1km1 = t1[k1-1]
        k2 = 0
        while t2[k2]<t:
            k2 += 1
        t2k = t2[k2]
        t2km1 = t2[k2-1]
        maxw1 = -1e100
        maxw2 = -1e100
        # Draw sample positions for each object at this time
        for i in range(n2):
            # Sample a position for object 1
            idx1 = mystats.drawmultinom(wp1)
            seg1k = segp1[k1][idx1]
            s1k = sp1[k1,idx1]
            seg1km1 = segp1[k1-1][idx1]
            s1km1 = sp1[k1-1,idx1]
            [s1[j][i],seg1,lw1[i]] = draw_location(t,ppos1[idx1],s1k,seg1k,s1km1,seg1km1,t1k,t1km1,vmean,vvar)
            e1[i][0] = pp1[idx1][seg1]
            e1[i][1] = pp1[idx1][seg1+1]
            # Find the edge index and convert to a position along the line 
            # representation of the network
            if tuple(e1[i]) in e:
                eidx1[j][i] = e.index(tuple(e1[i]))
                gpos1[i] = ellcum[eidx1[j][i]]+s1[j][i]
            else:
                eidx1[j][i] = e.index(tuple([e1[i][1],e1[i][0]]))
                gpos1[i] = ellcum[eidx1[j][i]]+ell[eidx1[j][i]]-s1[j][i]
            if lw1[i]>maxw1:
                maxw1 = lw1[i]
            # Do the same for object 2            
            idx2 = mystats.drawmultinom(wp2)
            seg2k = segp2[k2][idx2]
            s2k = sp2[k2,idx2]
            seg2km1 = segp2[k2-1][idx2]
            s2km1 = sp2[k2-1,idx2]
            [s2[i],seg2,lw2[i]] = draw_location(t,ppos2[idx2],s2k,seg2k,s2km1,seg2km1,t2k,t2km1,vmean,vvar)
            e2[i][0] = pp2[idx2][seg2]
            e2[i][1] = pp2[idx2][seg2+1]
            if tuple(e2[i]) in e:
                eidx2 = e.index(tuple(e2[i]))
                gpos2[i] = ellcum[eidx2]+s2[i]
            else:
                eidx2 = e.index(tuple([e2[i][1],e2[i][0]]))
                gpos2[i] = ellcum[eidx2]+ell[eidx2]-s2[i]
            if lw2[i]>maxw2:
                maxw2 = lw2[i]
        # Normalise the weights
        for i in range(n2):
            w1tilde[i] = math.exp(lw1[i]-maxw1)
            w2tilde[i] = math.exp(lw2[i]-maxw2)    
        w1[j] = w1tilde/sum(w1tilde)
        w2 = w2tilde/sum(w2tilde)
        # Compute position probabilities for each interval
        for i in range(n2):
            idx = int(math.floor(gpos1[i]/gsize))
            P1[idx,j] += w1[j][i]
            idx = int(math.floor(gpos2[i]/gsize))
            P2[idx,j] += w2[i]
    
    # Compute Hellinger affinity for each pair of times
    for j1 in range(nt):
        for j2 in range(nt):
            for k in range(ngrid):
                H[j1,j2] += math.sqrt(P1[k,j1]*P2[k,j2])
                
    
    return H, P1, P2, s1, eidx1
Example #4
0
def samp_path(z,t,cand_paths,turnpen,s0,v,vmean,vvar):

# [pathidx,segidx,z,logwt] = samp_path(z,t,cand_paths,turnpen,s0,v,vmean,vvar)
# Samples a path extension
# Inputs:
# z- 2-element measurement list or array of floats
# t- duration since last measurement 
# cand_paths- candidate paths (a list of arrays containing the positions of each node on the path)
# turnpen- turn penalty along each path
# s0- starting position in final segment of each candidate path
# v- measurement noise variance
# (vmean,vvar)- statistics of vehicle movement
# Outputs:
# pathidx- index of sampled path (integer)
# segidx- segment along path in which vehicle lies (integer)
# z- distance along segment (float)
# logwt- log of sample weight (float)

    npaths = len(cand_paths)
    sqv = math.sqrt(v)
    eps = numpy.spacing(1)
    
    # Pre-allocate
    x = z[0]
    y = z[1]
    gamma = numpy.zeros((npaths))
    C1 = numpy.zeros((npaths))
    qpath = numpy.zeros((npaths))
    pathprior = numpy.zeros((npaths))
    b = list(numpy.zeros((npaths)))
    nu = list(numpy.zeros((npaths)))
    zeta = list(numpy.zeros((npaths)))
    ell = list(numpy.zeros((npaths)))
    nsegs = [0 for i in range(npaths)]
    lb = [0 for i in range(npaths)]
    # Compute the weight of each path
    for i in range(npaths):
        sz_path = cand_paths[i].shape
        nsegs[i] = sz_path[1]-1
        if nsegs[i]==1:
            lb[i] = s0[i]
        else:
            lb[i] = 0.
        ell[i] = numpy.zeros(nsegs[i])
        # Calculate prior mean and variance
        # First segment (account for s0)
        pos1 = cand_paths[i][:,0]
        pos2 = cand_paths[i][:,1]
        ell[i][0] = numpy.linalg.norm(pos1-pos2)
        Tbarj = ell[i][0]/vmean
        kapj = ell[i][0]*ell[i][0]*vvar/(vmean**4)
        Ttilde = -s0[i]*Tbarj/ell[i][0]
        kaptilde = 0.
        c = (ell[i][0]-s0[i])/ell[i][0]
        mean_term = Tbarj
        var_term = c*c*kapj
        for j in range(1,nsegs[i]):
            pos1 = cand_paths[i][:,j]
            pos2 = cand_paths[i][:,j+1]
            ell[i][j] = numpy.linalg.norm(pos1-pos2)
            Ttilde += mean_term
            kaptilde += var_term
            Tbarj = ell[i][j]/vmean
            kapj = ell[i][j]*ell[i][j]*vvar/(vmean**4)
            mean_term = Tbarj
            var_term = kapj
        # Compute location sampling density and path weight
        xi = pos1[0]
        yi = pos1[1]
        xe = pos2[0]
        ye = pos2[1]
        thi = math.atan2(ye-yi,xe-xi)
        cth = math.cos(thi)
        sth = math.sin(thi)
        mu = (x-xi)*cth+(y-yi)*sth
        alph = ell[i][nsegs[i]-1]*(t-Ttilde)/Tbarj
        lam = ell[i][nsegs[i]-1]*ell[i][nsegs[i]-1]*(kaptilde+kapj)/(Tbarj*Tbarj)
        nu[i] = (v*alph+lam*mu)/(v+lam)
        zeta[i] = lam*v/(v+lam)
        if abs(cth)>1e-10:
            npdf1 = mystats.normpdf(y-yi,(x-xi)*sth/cth,sqv/abs(cth))/abs(cth)
        else:
            npdf1 = mystats.normpdf(x-xi,(y-yi)*cth/sth,sqv/abs(sth))/abs(sth)
        npdf2 = mystats.normpdf(mu,alph,math.sqrt(v+lam))
        sqzeta = math.sqrt(zeta[i])
        C2 = mystats.normprob(nu[i],sqzeta,lb[i],ell[i][nsegs[i]-1])
        sqlam = math.sqrt(lam)
        C1[i] = mystats.normprob(alph,sqlam,lb[i],ell[i][nsegs[i]-1])
        pathprior[i] = 1/(numpy.sum(ell[i])+turnpen[i])
        if npdf2>eps:
            gamma[i] = npdf1*npdf2*C2/C1[i]
        else:
            gamma[i] = 0.
    # Normalise path weights
    pathprior = pathprior/numpy.sum(pathprior)
    qpath = gamma*pathprior
    wt = numpy.sum(qpath)
    if wt<eps:
        # Ignore zero weighted paths
        pidx = 0
        segidx = 0
        ssamp = 0.
        logwt = -1e10
        isvalid = 0
        return pidx, segidx, ssamp, logwt, isvalid
    # If we get here we can sample a valid path
    isvalid = 1
    qpath = qpath/wt
    # Sample a path
    pidx = mystats.drawmultinom(qpath)
    segidx = nsegs[pidx]-1
    # Now sample a position
    if nsegs[pidx]==1:
        ssamp = mystats.tnorm(nu[pidx],zeta[pidx],s0[pidx],ell[pidx][nsegs[pidx]-1])
    else:
        ssamp = mystats.tnorm(nu[pidx],zeta[pidx],0,ell[pidx][nsegs[pidx]-1])
    # Calculate sample weight
    logwt = math.log(wt)+mystats.calc_samp_wt(t,ell[pidx],s0[pidx],ssamp,lb[pidx],C1[pidx],vmean,vvar)
    

    return pidx, segidx, ssamp, logwt, isvalid