Example #1
0
            if i < self.n - 1:
                R_w[i] = v[i+1] - vip
            if i > 0:
                R_v[i] = w[i-1] - wim
            else:
                R_v[i] = - wim
        return hstack([ravel(R_v), ravel(R_w)])


import kuramoto
# c, n_grid, T0, n_chunk, t_chunk, dt_max
# u0 = rand(127)
u0 = zeros(127)
u0[64] = 1

kuramoto.c_init(0.5, u0, 500, 25, 4, 0.2, 1);

pde = Wrapper(kuramoto.cvar.N_GRID, kuramoto.cvar.N_CHUNK,
              kuramoto.cvar.DT_STEP * kuramoto.cvar.N_STEP,
              kuramoto.c_tangent, kuramoto.c_adjoint, kuramoto.c_project_ddt)

# construct matrix rhs
x = zeros(pde.m * (2 * pde.n - 1))
rhs = pde.matvec(x, -1) - pde.matvec(x, 0)

# solve
from scipy import sparse
import scipy.sparse.linalg as splinalg

w = zeros(rhs.size)
oper = splinalg.LinearOperator((w.size, w.size), matvec=pde.matvec, dtype=float)
Example #2
0
# c, n_grid, T0, n_chunk, t_chunk, dt_max

mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
mpi_size = mpi_comm.Get_size()

T0 = 500 if mpi_rank == 0 else 0

u0 = zeros(127) 
u0[64] = 1
#u0 = rand(127)
if mpi_rank > 0:
    mpi_comm.Recv(u0, mpi_rank - 1, 1)

# pass mpi_size to init...
kuramoto.c_init(0.5, u0, T0, 25, 4, 0.2, mpi_size);

if mpi_rank < mpi_size - 1:
    mpi_comm.Send(u0, mpi_rank + 1, 1)

# Compute time averaged objective function over all time (and processors), Jbar
Jbar = mpi_comm.allreduce(kuramoto.cvar.JBAR)
kuramoto.c_assignJBAR(Jbar)


pde = Wrapper(kuramoto.cvar.N_GRID, kuramoto.cvar.N_CHUNK,
              kuramoto.cvar.DT_STEP * kuramoto.cvar.N_STEP,
              kuramoto.c_tangent, kuramoto.c_adjoint, kuramoto.c_project_ddt)

# construct matrix rhs
nvw = 2 * pde.n - (1 if mpi_rank == 0 else 0)