Esempio n. 1
0
def genStateData(fstate, sim):
    logging.info("generating state data for optimization result")

    tj.DIM = DIM = sim['DIM']
    tj.N = N = sim['N']
    tj.SIGMA = SIGMA = sim['SIGMA']
    order = sim['order']
    
    if order < 1:
        fstate = np.append(fstate, np.zeros(N*DIM**2)) # append mu_1
    if order < 2:
        fstate = np.append(fstate, np.zeros(N*DIM*tj.triuDim())) # append mu_2
    fstate = tj.triangular_to_state(fstate)

    (t_span, y_span) = tj.integrate( fstate )
    
    # save result
    np.save('output/state_data',y_span)
    np.save('output/time_data',t_span)
    np.save('output/setup',[N,DIM,SIGMA])
Esempio n. 2
0
def F(sim, nonmoving, x, weights=None, adjint=True, order=2, scalegrad=None, simGradCheck=False, energyGradCheck=False, visualize=False):
    """
    Function that scipy's optimize function will call for returning the value 
    and gradient for a given x. The forward and adjoint integration is called 
    from this function using the values supplied by the similarity measure.
    """

    N = sim['N']
    DIM = sim['DIM']

    i = 0
    q = np.reshape( nonmoving[i:(i+N*DIM)] , [N,DIM] )
    tj.gaussian.N = N
    tj.gaussian.DIM = DIM
    tj.gaussian.SIGMA = tj.SIGMA
    K,DK,D2K,D3K,D4K,D5K,D6K = tj.derivatives_of_kernel(q,q)

    # input
    state0 = np.append(nonmoving, x)
    if order < 1:
        state0 = np.append(state0, np.zeros(N*DIM**2)) # append mu_1
    if order < 2:
        state0 = np.append(state0, np.zeros(N*DIM*tj.triuDim())) # append mu_2
    # shift from triangular to symmetric
    state0 = tj.triangular_to_state(state0)
    triunonmoving = nonmoving
    triux = x
    nonmoving = state0[0:state0.size/2]
    x = state0[state0.size/2:]

    # rescale
    if scalegrad:
        #logging.debug("rescaling, SIGMA " + str(tj.SIGMA))
        q0,q0_1,q0_2,p0,mu0_1,mu0_2 = tj.state_to_weinstein_darboux( state0 )
        if order >= 1:
            mu0_1 = tj.SIGMA*mu0_1
        if order == 2:
            mu0_2 = tj.SIGMA*mu0_2
        state0 = tj.weinstein_darboux_to_state(q0,q0_1,q0_2,p0,mu0_1,mu0_2)

    q0,q0_1,q0_2,p0,mu0_1,mu0_2 = tj.state_to_weinstein_darboux( state0 )

    # flow
    (t_span, y_span) = tj.integrate(state0)
    stateT = y_span[-1]
    
    # debug
    qT,qT_1,qT_2,pT,muT_1,muT_2 = tj.state_to_weinstein_darboux( stateT )
    #logging.info("q0: " + str(q0))
    #logging.info("p0_2: " + str(p0))
    #logging.info("qT: " + str(qT))
    logging.info("||p0||: " + str(np.linalg.norm(p0)))
    logging.info("||mu0_1||: " + str(np.linalg.norm(mu0_1)))
    logging.info("||mu0_2||: " + str(np.linalg.norm(mu0_2)))
    #if order >= 1:
        #logging.info("q0_1: " + str(q0_1))
        #logging.info("qT_1: " + str(qT_1))
        #logging.info("mu0_1: " + str(mu0_1))
    #if order >= 2:
        #logging.info("q0_2: " + str(q0_2))
        #logging.info("qT_2: " + str(qT_2))
        #logging.info("mu0_2: " + str(mu0_2))
    #logging.info("qT-q0: " + str(qT-q0))
    #logging.info("qT_1-q0_1: " + str(qT_1-q0_1))
    #logging.info("qT_2-q0_2: " + str(qT_2-q0_2))

    simT = sim['f'](stateT, state0=state0, visualize=visualize)

    # debug
    #logging.info('match term (before flow/after flow/diff): ' + str(sim['f'](state0)[0]) + '/' + str(simT[0]) + '/' + str(sim['f'](state0)[0]-simT[0]))
    logging.info('match term after flow: ' + str(simT[0]))
    
    Ediff = tj.Hamiltonian(q0,p0,mu0_1,mu0_2) # path energy from Hamiltonian
    logging.info('Hamiltonian: ' + str(Ediff))

    if not adjint:
        return weights[1]*simT[0]+weights[0]*Ediff

    dq = simT[1][0]
    if order >= 1:
        dq_1 = simT[1][1]
    else:
        dq_1 = np.zeros(q0_1.shape)
    if order >= 2:
        dq_2 = simT[1][2]
    else:
        dq_2 = np.zeros(q0_2.shape)

    logging.info("||dq||: " + str(np.linalg.norm(dq)))
    logging.info("||dq_1||: " + str(np.linalg.norm(dq_1)))
    logging.info("||dq_2||: " + str(np.linalg.norm(dq_2)))
    ds1 = tj.weinstein_darboux_to_state(dq,dq_1,dq_2,np.zeros(dq.shape),np.zeros(dq_1.shape),np.zeros(dq_2.shape),N,DIM)

    if simGradCheck:
        logging.info("computing finite difference approximation of sim gradient")
        fsim = lambda x: sim['f'](np.hstack( (x,stateT[x.size:],) ), state0=state0)[0]
        findiffgrad = approx_fprime(stateT[0:N*DIM+N*DIM**2+N*DIM**3],fsim,1e-5)
        compgrad = ds1[0:N*DIM+N*DIM**2+N*DIM**3]
        graderr = np.max(abs(findiffgrad-compgrad))
        logging.debug("sim gradient numerical check error: %e",graderr)
        logging.debug("finite diff gradient: " + str(findiffgrad))
        logging.debug("computed gradient: " + str(compgrad))
        logging.debug("difference: " + str(findiffgrad-compgrad))
    if energyGradCheck:
        logging.info("computing finite difference approximation of energy gradient")
        fsim = lambda x: tj.Hamiltonian(q0,np.reshape(x[0:N*DIM],[N,DIM]),np.reshape(x[N*DIM:N*DIM+N*DIM**2],[N,DIM,DIM]),np.reshape(x[N*DIM+N*DIM**2:N*DIM+N*DIM**2+N*DIM**3],[N,DIM,DIM,DIM]))
        findiffgrad = approx_fprime(np.hstack((p0.flatten(),mu0_1.flatten(),mu0_2.flatten(),)),fsim,1e-7)
        compgrad = tj.grad_Hamiltonian(q0,p0,mu0_1,mu0_2)
        graderr = np.max(abs(findiffgrad-compgrad))
        logging.debug("energy gradient numerical check error: %e",graderr)
        logging.debug("finite diff gradient: " + str(findiffgrad))
        logging.debug("computed gradient: " + str(compgrad))
        logging.debug("difference: " + str(findiffgrad-compgrad))
    
    (t_span, y_span) = tj.adj_integrate(stateT,ds1)
    adjstate0 = y_span[-1]

    assert(nonmoving.size+x.size<=adjstate0.size/2)
    gradE = tj.grad_Hamiltonian(q0,p0,mu0_1,mu0_2)
    assert(adjstate0.size/2-nonmoving.size == gradE.size) # gradE doesn't include point variations currently
    gradE = gradE[0:x.size]
    grad0 = weights[1]*adjstate0[adjstate0.size/2+nonmoving.size:adjstate0.size/2+nonmoving.size+x.size] + weights[0]*gradE # transported gradient + grad of energy

    adjstate0[adjstate0.size/2+nonmoving.size:adjstate0.size/2+nonmoving.size+grad0.size] = grad0
    grad0 = tj.state_to_triangular(adjstate0[adjstate0.size/2:adjstate0.size])[triunonmoving.size:triunonmoving.size+triux.size]

    grad0 = np.ndarray.flatten(grad0)

    # rescale
    if scalegrad:
        if order >= 1:
            grad0[N*DIM:N*DIM+N*DIM**2] = tj.SIGMA*grad0[N*DIM:N*DIM+N*DIM**2]
        if order == 2:
            grad0[N*DIM+N*DIM**2:N*DIM+N*DIM**2+N*DIM**3] = tj.SIGMA*grad0[N*DIM+N*DIM**2:N*DIM+N*DIM**2+N*DIM**3]

    # visualization
    dq0,dq0_1,dq0_2,dp0,dmu0_1,dmu0_2 = tj.state_to_weinstein_darboux( adjstate0[adjstate0.size/2:adjstate0.size],N,DIM )
    #logging.info("dp0: " + str(dp0))
    logging.info("||dp0|| final: " + str(np.linalg.norm(dp0)))
    #logging.info("dmu0_1: " + str(dmu0_1))
    logging.info("||dmu0_1|| final: " + str(np.linalg.norm(dmu0_1)))
    #logging.info("dmu0_2: " + str(dmu0_2))
    logging.info("||dmu0_2|| final: " + str(np.linalg.norm(dmu0_2)))
    #logging.info("adjstate0: " + str(adjstate0))
    #logging.info("grad0: " + str(grad0))
    #plt.figure(0)
    #plt.quiver(q0[:,0],q0[:,1],dp0[:,0],dp0[:,1])

    ## pause
    #raw_input("F: Press ENTER to continue")

    return (weights[1]*simT[0]+weights[0]*Ediff, grad0)