Exemplo n.º 1
0
    def brain_slice_data_to_array(self,\
            experimental_z_n,\
            source_dir,\
            storage_fname = None):

        self.create_coronal_section_mesh(experimental_z_n)

        self.set_function_spaces()
        #Only for linear elements

        dof_to_vertex_vec = fe.dof_to_vertex_map(self.function_space)
        coordinates = self.mesh.coordinates()[dof_to_vertex_vec]
        x = coordinates[:, 0]
        y = coordinates[:, 1]

        SP = self.image_manipulation_obj.\
                create_slice_properties_object(source_dir)

        u = SP.map_from_model_xy_to_experimental_matrix(x, y)

        if storage_fname is None:
            return u

        np.savetxt(storage_fname, u, fmt='%d')

        return u
Exemplo n.º 2
0
def FEniCSBoxField(fenics_function, division=None, uniform_mesh=True):
    """
    Turn a DOLFIN P1 finite element field over a structured mesh into
    a BoxField object.
    Standard DOLFIN numbering numbers the nodes along the x[0] axis,
    then x[1] axis, and so on.

    If the DOLFIN function employs elements of degree > 1, the function
    is automatically interpolated to elements of degree 1.
    """

    # Get mesh
    fenics_mesh = fenics_function.function_space().mesh()

    # Interpolate if necessary
    element = fenics_function.ufl_element()
    if element.degree() != 1:
        if element.value_size() == 1:
            fenics_function \
              = interpolate(u, FunctionSpace(fenics_mesh, 'P', 1))
        else:
            fenics_function \
              = interpolate(u, VectorFunctionSpace(fenics_mesh, 'P', 1,
                                                   element.value_size()))

    import dolfin
    if dolfin.__version__[:3] == "1.0":
        nodal_values = fenics_function.vector().array().copy()
    else:
        d2v = fenics.dof_to_vertex_map(fenics_function.function_space())
        nodal_values = fenics_function.vector().array().copy()
        nodal_values[d2v] = fenics_function.vector().array().copy()

    if uniform_mesh:
        grid = fenics_mesh2UniformBoxGrid(fenics_mesh, division)
    else:
        grid = fenics_mesh2BoxGrid(fenics_mesh, division)

    if nodal_values.size > grid.npoints:
        # vector field, treat each component separately
        ncomponents = int(nodal_values.size/grid.npoints)
        try:
            nodal_values.shape = (ncomponents, grid.npoints)
        except ValueError as e:
            raise ValueError('Vector field (nodal_values) has length %d, there are %d grid points, and this does not match with %d components' % (nodal_values.size, grid.npoints, ncomponents))
        vector_field = [_rank12rankd_mesh(nodal_values[i,:].copy(),
                                          grid.shape) \
                        for i in range(ncomponents)]
        nodal_values = array(vector_field)
        bf = BoxField(grid, name=fenics_function.name(),
                      vector=ncomponents, values=nodal_values)
    else:
        try:
            nodal_values = _rank12rankd_mesh(nodal_values, grid.shape)
        except ValueError as e:
            raise ValueError('DOLFIN function has vector of size %s while the provided mesh has %d points and shape %s' % (nodal_values.size, grid.npoints, grid.shape))
        bf = BoxField(grid, name=fenics_function.name(),
                      vector=0, values=nodal_values)
    return bf
Exemplo n.º 3
0
 def _build_transformer(self):
     self.num_dofs = self.V.dim()
     self.num_vertices = self.mesh.num_vertices()
     self.v_d = fa.vertex_to_dof_map(self.V)
     self.d_v = fa.dof_to_vertex_map(self.V)
     self.coo_dof = self.V.tabulate_dof_coordinates()
     self.coo_ver = self.mesh.coordinates()
     self.cells = self.mesh.cells()
     self._set_boundary_flags()
     self._set_detailed_boundary_flags()
Exemplo n.º 4
0
def get_conductivity(v, values, mesh=None, dimension=2, num_subspaces=0):
    """
    Take a conductivity field that was read in and translate it to a fenics mesh
    :param mesh: the fenics mesh onto which you want to put the conductivity field
    :param v: a function space
    :param values: function values at the nodes
    :param dimension: the problem spatial dimension
    :return: a fenics.Function describing the property field
    """
    c = fenics.Function(v)
    # ordering = dolfin.dof_to_vertex_map(v)
    ordering = fenics.dof_to_vertex_map(v)
    #c.vector()[:] = values.flatten(order='C')[ordering]
    values.astype(float)
    c.vector()[:] = values.flatten()[ordering]
    return c
Exemplo n.º 5
0
def remove_divergence(V, u, v, sigma):
    c_shape = u.shape
    V_div = divergence(u, v, 1, 1)
    ff = fe.Function(V)
    d2v_map = fe.dof_to_vertex_map(V)
    array_ff = V_div.ravel()
    array_ff = array_ff[d2v_map]
    ff.vector().set_local(array_ff)
    uu = fe.TrialFunction(V)
    vv = fe.TestFunction(V)
    a = fe.dot(fe.grad(uu), fe.grad(vv))*fe.dx
    L = ff*vv*fe.dx
    uu = fe.Function(V)
    fe.solve(a == L, uu)
    phi = uu.compute_vertex_values().reshape(c_shape)
    grad_phi = np.gradient(phi, 1, 1)
    u_corrected = u + grad_phi[1]
    v_corrected = v + grad_phi[0]
    u_corrected = sp.ndimage.filters.gaussian_filter(u_corrected, sigma=sigma)
    v_corrected = sp.ndimage.filters.gaussian_filter(v_corrected, sigma=sigma)
    return u_corrected, v_corrected
Exemplo n.º 6
0
def FEniCSBoxField(fenics_function, division=None, uniform_mesh=True):
    """
    Turn a DOLFIN P1 finite element field over a structured mesh into
    a BoxField object.
    Standard DOLFIN numbering numbers the nodes along the x[0] axis,
    then x[1] axis, and so on.

    If the DOLFIN function employs elements of degree > 1, the function
    is automatically interpolated to elements of degree 1.
    """

    # Get mesh
    fenics_mesh = fenics_function.function_space().mesh()

    # Interpolate if necessary
    element = fenics_function.ufl_element()
    if element.degree() != 1:
        if element.value_size() == 1:
            fenics_function \
              = interpolate(u, FunctionSpace(fenics_mesh, 'P', 1))
        else:
            fenics_function \
              = interpolate(u, VectorFunctionSpace(fenics_mesh, 'P', 1,
                                                   element.value_size()))

    import dolfin
    if dolfin.__version__[:3] == "1.0":
        nodal_values = fenics_function.vector().get_local().copy()
    else:
        d2v = fenics.dof_to_vertex_map(fenics_function.function_space())
        nodal_values = fenics_function.vector().get_local().copy()
        nodal_values[d2v] = fenics_function.vector().get_local().copy()

    if uniform_mesh:
        grid = fenics_mesh2UniformBoxGrid(fenics_mesh, division)
    else:
        grid = fenics_mesh2BoxGrid(fenics_mesh, division)

    if nodal_values.size > grid.npoints:
        # vector field, treat each component separately
        ncomponents = int(nodal_values.size / grid.npoints)
        try:
            nodal_values.shape = (ncomponents, grid.npoints)
        except ValueError as e:
            raise ValueError(
                'Vector field (nodal_values) has length %d, there are %d grid points, and this does not match with %d components'
                % (nodal_values.size, grid.npoints, ncomponents))
        vector_field = [_rank12rankd_mesh(nodal_values[i,:].copy(),
                                          grid.shape) \
                        for i in range(ncomponents)]
        nodal_values = array(vector_field)
        bf = BoxField(grid,
                      name=fenics_function.name(),
                      vector=ncomponents,
                      values=nodal_values)
    else:
        try:
            nodal_values = _rank12rankd_mesh(nodal_values, grid.shape)
        except ValueError as e:
            raise ValueError(
                'DOLFIN function has vector of size %s while the provided mesh has %d points and shape %s'
                % (nodal_values.size, grid.npoints, grid.shape))
        bf = BoxField(grid,
                      name=fenics_function.name(),
                      vector=0,
                      values=nodal_values)
    return bf
Exemplo n.º 7
0
def fenics_function2BoxField(fenics_function,
                             fenics_mesh,
                             division=None,
                             uniform_mesh=True):
    """
    Turn a DOLFIN P1 finite element field over a structured mesh into
    a BoxField object.
    Standard DOLFIN numbering numbers the nodes along the x[0] axis,
    then x[1] axis, and so on.

    If the DOLFIN function employs elements of degree > 1, one should
    project or interpolate the field onto a field with elements of
    degree=1.
    """
    if fenics_function.ufl_element().degree() != 1:
        raise TypeError("""\
The fenics_function2BoxField function works with degree=1 elements
only. The DOLFIN function (fenics_function) has finite elements of type
%s
i.e., the degree=%d != 1. Project or interpolate this function
onto a space of P1 elements, i.e.,

V2 = FunctionSpace(mesh, 'CG', 1)
u2 = project(u, V2)
# or
u2 = interpolate(u, V2)

""" % (str(fenics_function.ufl_element()),
        fenics_function.ufl_element().degree()))

    import dolfin
    if dolfin.__version__[:3] == "1.0":
        nodal_values = fenics_function.vector().array().copy()
    else:
        #map = fenics_function.function_space().dofmap().vertex_to_dof_map(fenics_mesh)
        d2v = fenics.dof_to_vertex_map(fenics_function.function_space())
        nodal_values = fenics_function.vector().array().copy()
        nodal_values[d2v] = fenics_function.vector().array().copy()

    if uniform_mesh:
        grid = fenics_mesh2UniformBoxGrid(fenics_mesh, division)
    else:
        grid = fenics_mesh2BoxGrid(fenics_mesh, division)

    if nodal_values.size > grid.npoints:
        # vector field, treat each component separately
        ncomponents = int(nodal_values.size / grid.npoints)
        try:
            nodal_values.shape = (ncomponents, grid.npoints)
        except ValueError as e:
            raise ValueError(
                'Vector field (nodal_values) has length %d, there are %d grid points, and this does not match with %d components'
                % (nodal_values.size, grid.npoints, ncomponents))
        vector_field = [_rank12rankd_mesh(nodal_values[i,:].copy(),
                                          grid.shape) \
                        for i in range(ncomponents)]
        nodal_values = array(vector_field)
        bf = BoxField(grid,
                      name=fenics_function.name(),
                      vector=ncomponents,
                      values=nodal_values)
    else:
        try:
            nodal_values = _rank12rankd_mesh(nodal_values, grid.shape)
        except ValueError as e:
            raise ValueError(
                'DOLFIN function has vector of size %s while the provided mesh has %d points and shape %s'
                % (nodal_values.size, grid.npoints, grid.shape))
        bf = BoxField(grid,
                      name=fenics_function.name(),
                      vector=0,
                      values=nodal_values)
    return bf
Exemplo n.º 8
0
def fwi_si(gt_data, i_guess, n_receivers, noise_lv, path):
    """
    This is the main function of the project.
    Entries 
        gt_data: string path to the ground truth image data
        i_guess: integer pointing the algorithm initialization guess
        n_shots: integer, number of strikes for the FWI
        n_receivers: integer, number of receivers for the FWI
        noise_lv: float type variable that we use to compute noise level
        path: string type variable, path to local results directory
    """

    # Implementing parallel processing at shots level """
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    n_shots = comm.Get_size()

    seism_vel = [4.12, 1.95]
    image_phi = mpimg.imread(gt_data)
    chi0 = np.int64(image_phi == 0)
    chi1 = 1.0 - chi0
    synth_model = seism_vel[0] * chi1 + seism_vel[1] * chi0
    #scale in meter
    xMin = 0.0
    xMax = 1.0
    zMin = 0.0
    zMax = 0.650
    #scale in seconds
    tMin = 0.0
    tMax = 1.0

    # Damping layer width and damping limits
    damp_layer = 0.1 * xMax
    dmp_xMin = xMin + damp_layer
    dmp_xMax = xMax - damp_layer
    dmp_zMax = zMax - damp_layer

    #    Number of grid points are determined by the loaded image size
    #    Nz, Nx are (#) of grid point
    Nz, Nx = synth_model.shape
    delta_x = xMax / Nx
    delta_z = zMax / Nz
    CFL = 0.4
    delta_t = (CFL * min(delta_x, delta_z)) / max(seism_vel)
    gc_t = np.arange(tMin, tMax, delta_t)
    Nt = len(gc_t)

    # Level set parameters
    MainItMax = 5000
    gamma = 0.8
    gamma2 = 0.8
    stop_coeff = 1.0e-8
    add_weight = True
    ls_max = 3
    ls = 0
    beta0_init = 1.5  # 1.2 #0.8 #0.5 #0.3
    beta0 = beta0_init
    beta = beta0
    stop_decision_limit = 150
    stop_decision = 0
    alpha1 = 0.01
    alpha2 = 0.97

    # wave Parameters
    PlotFields = True
    add_noise = False if noise_lv == 0 else True
    src_Zpos = 5.0
    source_peak_frequency = 5.0  # (kilo hertz)

    # Grid coordinates
    gc_x = np.arange(xMin, xMax, delta_x)
    gc_z = np.arange(zMin, zMax, delta_z)

    # Compute receivers
    id_dmp_xMin = np.where(gc_x == dmp_xMin)[0][0]
    id_dmp_xMax = np.where(gc_x == dmp_xMax)[0][0]
    id_dmp_zMax = np.where(gc_z == dmp_zMax)[0][0]
    rec_index = np.linspace(id_dmp_xMin,
                            id_dmp_xMax,
                            n_receivers + 1,
                            dtype='int')
    try:
        assert (len(rec_index) < id_dmp_xMax - id_dmp_xMin)
    except AssertionError:
        "receivers in different positions"

    # Build the HUGE parameter dictionary
    parameters = {
        "gamma": gamma,
        "gamma2": gamma2,
        "ls_max": ls_max,
        "stop_coeff": stop_coeff,
        "add_noise": add_noise,
        "add_weight": add_weight,
        "beta0_init": beta0_init,
        "stop_decision_limit": stop_decision_limit,
        "alpha1": alpha1,
        "alpha2": alpha2,
        "CFL": CFL,
        "source_peak_frequency": source_peak_frequency,
        "src_Zpos": src_Zpos,
        "i_guess": i_guess,
        "n_shots": n_shots,
        "n_receivers": n_receivers,
        "add_weight": add_weight,
        "nz": Nz,
        "nx": Nx,
        "nt": Nt,
        "gc_t": gc_t,
        "gc_x": gc_x,
        "gc_z": gc_z,
        "xMin": xMin,
        "xMax": xMax,
        "zMin": zMin,
        "zMax": zMax,
        "tMin": tMin,
        "tMax": tMax,
        "hz": delta_z,
        "hx": delta_x,
        "ht": delta_t,
        "dmp_xMin": dmp_xMin,
        "dmp_xMax": dmp_xMax,
        "dmp_zMax": dmp_zMax,
        "dmp_layer": damp_layer,
        "id_dmp_xMin": id_dmp_xMin,
        "id_dmp_xMax": id_dmp_xMax,
        "id_dmp_zMax": id_dmp_zMax,
        "rec": gc_x[rec_index],
        "rec_index": rec_index,
        'noise_lv': noise_lv,
        "path": path,
        "path_misfit": path + 'misfit/',
        "path_phi": path + 'phi/'
    }

    # Compute initial guess matrix
    if rank == 0:
        outputs_and_paths(parameters)
        gnu_data(image_phi, 'ground_truth.dat', parameters)
        mkDirectory(parameters["path_phi"])

    comm.Barrier()
    phi_mat = initial_guess(parameters)
    ind = inside_shape(phi_mat)
    ind_c = np.ones_like(phi_mat) - ind
    vel_field = seism_vel[0] * ind + seism_vel[1] * ind_c

    # Initialization of Fenics-Dolfin functions
    # ----------------------------------------
    # Define mesh for the entire domain Omega
    # ----------------------------------------
    mesh = fc.RectangleMesh(comm, fc.Point(xMin, zMin), fc.Point(xMax, zMax),
                            Nx - 1, Nz - 1)
    # ----------------------------------------
    # Function spaces
    # ----------------------------------------
    V = fc.FunctionSpace(mesh, "Lagrange", 1)
    VF = fc.VectorFunctionSpace(mesh, "Lagrange", 1)
    theta = fc.TrialFunction(VF)
    csi = fc.TestFunction(VF)

    # ----------------------------------------
    # Define boundaries of the domain
    # ----------------------------------------
    tol = fc.DOLFIN_EPS  # tolerance for coordinate comparisons

    class Left(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[0] - xMin) < tol

    class Right(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[0] - xMax) < tol

    class Bottom(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[1] - zMin) < tol

    class Top(fc.SubDomain):
        def inside(self, x, on_boundary):
            return on_boundary and abs(x[1] - zMax) < tol

    # --------------------------------------
    # Initialize sub-domain instances
    # --------------------------------------
    left = Left()
    top = Top()
    right = Right()
    bottom = Bottom()
    # ----------------------------------------------
    # Initialize mesh function for boundary domains
    # ----------------------------------------------
    boundaries = fc.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    domains = fc.MeshFunction("size_t", mesh, mesh.topology().dim())
    left.mark(boundaries, 3)
    top.mark(boundaries, 4)
    right.mark(boundaries, 5)
    bottom.mark(boundaries, 6)
    # ---------------------------------------
    # Define operator for speed vector theta
    # ---------------------------------------
    dtotal = Measure("dx")
    dircond = 1
    # ---------------------------------------
    # setting shape derivative weights
    # re-balancing sensibility to be greater at the bottom
    # ---------------------------------------
    wei_equation = '1.0e8*(pow(x[0] - 0.5, 16) + pow(x[1] - 0.325, 10))+100'
    wei = fc.Expression(str(wei_equation), degree=1)

    # Building the left hand side of the bi-linear system
    # to obtain the descendant direction from shape derivative
    if dircond < 4:
        bcF = [
            fc.DirichletBC(VF, (0, 0), boundaries, 3),
            fc.DirichletBC(VF, (0, 0), boundaries, 4),
            fc.DirichletBC(VF, (0, 0), boundaries, 5),
            fc.DirichletBC(VF, (0, 0), boundaries, 6)
        ]
    if dircond == 1:
        lhs = wei * alpha1 * inner(grad(theta), grad(csi)) * dtotal \
          + wei * alpha2 * inner(theta, csi) * dtotal
    #
    elif dircond == 2:
        lhs = alpha1 * inner(grad(theta), grad(csi)) * \
            dtotal + alpha2 * inner(theta, csi) * dtotal
    elif dircond == 3:
        lhs = inner(grad(theta), grad(csi)) * dtotal
    elif dircond == 5:
        lhs = inner(grad(theta), grad(csi)) * \
            dtotal + inner(theta, csi) * dtotal

    aV = fc.assemble(lhs)
    #
    if dircond < 4:
        for bc in bcF:
            bc.apply(aV)
    #
    # solver_V = fc.LUSolver(aV, "mumps")
    solver_V = fc.LUSolver(aV)
    # ------------------------------
    # Initialize Level set function
    # ------------------------------
    phi = fc.Function(V)
    phivec = phi.vector()
    phivalues = phivec.get_local()  # empty values
    my_first, my_last = V.dofmap().ownership_range()

    tabcoord = V.tabulate_dof_coordinates().reshape((-1, 2))

    unowned = V.dofmap().local_to_global_unowned()
    dofs = list(
        filter(
            lambda dof: V.dofmap().local_to_global_index(dof) not in unowned,
            [i for i in range(my_last - my_first)]))

    tabcoord = tabcoord[dofs]
    phivalues[:] = phi_mat.reshape(Nz * Nx)[dofs]  # assign values
    phivec.set_local(phivalues)
    phivec.apply('insert')

    cont = 0
    boundaries = fc.MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
    domains = fc.MeshFunction("size_t", mesh, mesh.topology().dim())
    # -----------------------------
    # Define measures
    # -----------------------------
    dx = Measure('dx')(subdomain_data=domains)

    # -------------------------------
    # Define function Omega1
    # -------------------------------

    class Omega1(fc.SubDomain):
        def __init__(self) -> None:
            super(Omega1, self).__init__()

        def inside(self, x, on_boundary):
            return True if phi(x) <= 0 and x[0] >= xMin and x[0] <= xMax and x[
                1] >= zMin and x[1] <= zMax else False

    # instantiate variables
    eta = dmp(parameters)

    source = Source(parameters)
    FT = source.inject()

    phi_mat_old = np.zeros_like(phi_mat)
    vel_field_new = np.zeros_like(vel_field)
    theta1_mat = np.zeros((Nz * Nx))
    theta2_mat = np.zeros_like(theta1_mat)
    MainItEff = 0
    MainIt = 0
    stop_decision = 0
    st_mem_usage = 0.0
    adj_mem_usage = 0.0
    Jevaltotal = np.zeros((MainItMax))
    norm_theta = np.zeros((MainItMax))

    # path to recording phi function
    # path to recording misfit function
    if rank == 0:
        plot_mat(parameters, 'Damping', 'Damping function', eta)
        mkDirectory(parameters["path_phi"])
        mkDirectory(parameters["path_misfit"])

    comm.Barrier()
    # -------------------------------
    # Seismograms
    # -------------------------------
    wavesolver = WaveSolver(parameters, eta)
    start = time.time()
    d_send = np.empty((Nz, Nx, Nt), np.dtype('float'))
    d = wavesolver.measurements(d_send[0:Nz, 0:Nx, 0:Nt], synth_model,
                                FT[rank, 0:Nz, 0:Nx, 0:Nt], add_noise)
    seismograms = d[0, rec_index, 0:Nt].copy(order='C')
    end = time.time()

    # Plot Seismograms
    if PlotFields:
        print("{:.1f}s to build synthetic seismograms".format(end - start))
        plotMeasurements(parameters, seismograms, rank)
        if rank == 0:
            plot_displacement_field(parameters, d)

    sys.stdout.flush()
    del (d, d_send)
    ###################################################
    # Main Loop
    ###################################################
    gradshape = ShapeDerivative(parameters, csi, V, dtotal, seism_vel)
    while MainIt < MainItMax:
        # ----------------------------------------------
        # Initialize mesh function for boundary domains
        # ----------------------------------------------
        if MainIt > 0:
            vel_field = vel_field_new

        domains.set_all(0)
        omega1 = Omega1()
        omega1.mark(domains, 1)
        dx = Measure('dx')(subdomain_data=domains)

        u = np.empty((Nz, Nx, Nt), np.dtype('float'))
        P = np.empty((Nz, Nx, Nt), np.dtype('float'))

        if MainIt > 0:
            vel_field = vel_field_new
        # ------------------------------------
        # Compute STATE. u stands for displacement field
        # ------------------------------------
        start = time.time()
        u[0:Nz, 0:Nx, 0:Nt] = wavesolver.state(u[0:Nz, 0:Nx, 0:Nt], vel_field,
                                               FT[rank, 0:Nz, 0:Nx, 0:Nt])
        end = time.time()
        # ------------------------------------
        # Compute ADJOINT. P stands for the adjoint variable
        # ------------------------------------
        start1 = time.time()
        tr_u = u[0, rec_index, 0:Nt].copy(order='C')
        misfit = tr_u - seismograms
        P[0:Nz, 0:Nx, 0:Nt] = wavesolver.adjoint(P[0:Nz, 0:Nx, 0:Nt],
                                                 vel_field, misfit)
        end1 = time.time()
        comm.Barrier()
        print(
            '{:.1f}s to compute state and {:.1f}s to compute adjoint with {:d} shots. '
            .format(end - start, end1 - start1, n_shots))

        del (start, end, start1, end1)

        # Plot state/adjoint in 1st-iteration only
        if MainIt == 0 and PlotFields:
            if rank == 0:
                mkDirectory(path + 'initial_state_%03d/' % (n_shots))
                plotadjoint(parameters, P[0:Nz, 0:Nx, 0:Nt])
            folder_name = 'initial_state_%03d/' % (n_shots)
            plotstate(parameters, u[0:Nz, 0:Nx, 0:Nt], folder_name, rank)
            # plot_displacement_field(parameters, u[1, 0:Nz, 0:Nx, 0:Nt])
            st_mem_usage = (u.size * u.itemsize) / 1_073_741_824  # 1GB
            adj_mem_usage = (P.size * P.itemsize) / 1_073_741_824  # 1GB

        # Plotting reconstructions
        if rank == 0 and (MainItEff % 10 == 0
                          or stop_decision == stop_decision_limit - 1):
            plottype1(parameters, synth_model, phi_mat, cont)
            plottype2(parameters, synth_model, phi_mat, cont)
            plottype3(parameters, synth_model, phi_mat, MainIt, cont)
            plotcostfunction(parameters, Jevaltotal, MainItEff)
            plotnormtheta(parameters, norm_theta, MainItEff)
            np.save(path + 'last_phi_mat.npy', phi_mat)
            gnu_data(phi_mat, 'reconstruction.dat', parameters)

        plot_misfit(parameters, 'misfit', 'Misfit', misfit,
                    rank) if (MainItEff % 50 == 0 and PlotFields) else None

        # -------------------------
        # Compute Cost Function
        # -------------------------
        J_omega = np.zeros((1))
        l2_residual = np.sum(np.power(misfit, 2), axis=0)

        if MainIt == 0 and add_weight:
            weights = 1.0e-5

        comm.Reduce(simpson_rule(l2_residual[0:Nt], gc_t), J_omega, op=MPI.SUM)
        Jevaltotal[MainItEff] = 0.5 * (J_omega / weights)
        del (J_omega)
        # -------------------------
        # Evaluate shape derivative
        # -------------------------
        start = time.time()
        shapeder = (1.0 / weights) * gradshape.compute(u[0:Nz, 0:Nx, 0:Nt],
                                                       P[0:Nz, 0:Nx, 0:Nt], dx)
        # Build the rhs of bi-linear system
        shapeder = fc.assemble(shapeder)
        end = time.time()
        print('{}s to compute shape derivative.'.format(end - start))
        del (start, end)
        del (u, P)

        with open(path + "cost_function.txt", "a") as file_costfunction:
            file_costfunction.write('{:d} - {:.4e} \n'.format(
                MainItEff, Jevaltotal[MainItEff]))
        # ====================================
        # ---------- Line search -------------
        # ====================================
        if MainIt > 0 and Jevaltotal[MainItEff] > Jevaltotal[
                MainItEff - 1] and ls < ls_max:
            ls = ls + 1
            beta = beta * gamma
            phi_mat = phi_mat_old
            # ------------------------------------------------------------
            # Update level set function using the descent direction theta
            # ------------------------------------------------------------
            hj_input = [
                theta1_mat, theta2_mat, phi_mat, parameters, beta, MainItEff
            ]
            phi_mat = hamiltonjacobi(*hj_input)
            del (hj_input)
            ind = inside_shape(phi_mat)
            ind_c = np.ones_like(phi_mat) - ind
            vel_field_new = seism_vel[0] * ind + seism_vel[1] * ind_c

            phivec = phi.vector()
            phivalues = phivec.get_local()  # empty values
            my_first, my_last = V.dofmap().ownership_range()

            tabcoord = V.tabulate_dof_coordinates().reshape((-1, 2))
            set_trace()
            unowned = V.dofmap().local_to_global_unowned()
            dofs = list(
                filter(
                    lambda dof: V.dofmap().local_to_global_index(dof) not in
                    unowned, [i for i in range(my_last - my_first)]))

            tabcoord = tabcoord[dofs]
            phivalues[:] = phi_mat.reshape(Nz * Nx)[dofs]  # assign values
            phivec.set_local(phivalues)
            phivec.apply('insert')

        else:
            print("----------------------------------------------")
            print("Record in: {}".format(path))
            print("----------------------------------------------")
            print("ITERATION NUMBER (MainItEff)  : {:d}".format(MainItEff))
            print("ITERATION NUMBER (MainIt)  : {:d}".format(MainIt))
            print("----------------------------------------------")
            print("Grid Size                : {:d} x {:d}".format(Nx, Nz))
            print("State memory usage       : {:.4f} GB".format(st_mem_usage))
            print("Adjoint memory usage     : {:.4f} GB".format(adj_mem_usage))
            print("----------------------------------------------")
            print("Line search  iterations  : {:d}".format(ls))
            print("Step length beta         : {:.4e}".format(beta))
            if ls == ls_max:
                beta0 = max(beta0 * gamma2, 0.1 * beta0_init)
            if ls == 0:
                beta0 = min(beta0 / gamma2, 1.0)
            ls = 0
            MainItEff = MainItEff + 1
            beta = beta0  # /(0.999**MainIt)

            theta = fc.Function(VF)
            solver_V.solve(theta.vector(), -1.0 * shapeder)

            # ------------------------------------
            # Compute norm theta and grad(phi)
            # ------------------------------------
            mpi_comm = theta.function_space().mesh().mpi_comm()
            arraytheta = theta.vector().get_local()
            theta_gathered = mpi_comm.gather(arraytheta, root=0)

            # parei aqui !!!!!
            comm.Barrier()
            if rank == 0:
                set_trace()
                theta_vec = theta.vector()[fc.vertex_to_dof_map(VF)]
                theta1_mat = theta_vec[0:len(theta_vec):2].reshape(Nz, Nx)
                theta2_mat = theta_vec[1:len(theta_vec):2].reshape(Nz, Nx)
            norm_theta[MainItEff - 1] = np.sqrt(
                theta1_mat.reshape(Nz * Nx).dot(theta1_mat.reshape(Nx * Nz)) +
                theta2_mat.reshape(Nz * Nx).dot(theta2_mat.reshape(Nx * Nz)))
            max_gnp = np.sqrt(fc.assemble(dot(grad(phi), grad(phi)) * dtotal))
            print("Norm(grad(phi))          : {:.4e}".format(max_gnp))
            print("L2-norm of theta         : {:.4e}".format(
                norm_theta[MainItEff - 1]))
            print("Cost functional          : {:.4e}".format(
                Jevaltotal[MainItEff - 1]))

            # ------------------------------------------------------------
            # Update level set function using the descent direction theta
            # ------------------------------------------------------------
            phi_mat_old = phi_mat

            hj_input = [
                theta1_mat, theta2_mat, phi_mat, parameters, beta,
                MainItEff - 1
            ]

            phi_mat = hamiltonjacobi(*hj_input)

            del (hj_input)
            phi.vector()[:] = phi_mat.reshape(
                (Nz) * (Nx))[fc.dof_to_vertex_map(V)]
            ind = inside_shape(phi_mat)
            ind_c = np.ones_like(phi_mat) - ind
            vel_field_new = seism_vel[0] * ind + seism_vel[1] * ind_c

            # ----------------
            # Computing error
            # ----------------
            error_area = np.abs(chi1 - ind)
            relative_error = np.sum(error_area) / np.sum(chi0)
            print('relative error           : {:.3f}%'.format(100 *
                                                              relative_error))

            with open(path + "error.txt", "a") as text_file:
                text_file.write(f'{MainIt} {np.round(relative_error,3):>3}\n')

            # Plot actual phi function
            if MainIt % 50 == 0:
                plot_mat3D(parameters, 'phi_3D', phi_mat, MainIt)
                plot_countour(parameters, 'phi_contour', phi_mat, MainIt)
                phi_ind = '%03d_' % (MainIt)
                np.save(parameters["path_phi"] + phi_ind + 'phi.npy', phi_mat)

            # --------------------------------
            # Reinitialize level set function
            # --------------------------------
            if np.mod(MainItEff, 10) == 0:
                phi_mat = reinit(Nz, Nx, phi_mat)

            # ====================================
            # -------- Stopping criterion --------
            # ====================================
            if MainItEff > 5:
                stop0 = stop_coeff * (Jevaltotal[1] - Jevaltotal[2])
                stop1 = Jevaltotal[MainItEff - 2] - Jevaltotal[MainItEff - 1]
                if stop1 < stop0:
                    stop_decision = stop_decision + 1
                if stop_decision == stop_decision_limit:
                    MainIt = MainItMax + 1
                print("stop0                    : {:.4e}".format(stop0))
                print("stop1                    : {:.4e}".format(stop1))
            print("Stopping step            : {:d} of {:d}".format(
                stop_decision, stop_decision_limit))
            print("----------------------------------------------\n")
            cont += 1

        MainIt += 1

    return None
Exemplo n.º 9
0
def sd_nodenode(mesh, V, u_n, De, nexp):
    """
    SD node-to-node
    Flow routing from node-to-node based on the steepest route of descent

    :param mesh: mesh object generated using mshr (fenics)
    :param V: finite element function space
    :param u_n: solution (trial function) for water flux
    :param De: dimensionless diffusion coefficient
    :param nexp: water flux exponent
    :return:
    """

    # get the global coordinates
    gdim = mesh.geometry().dim()
    if dolfin.dolfin_version() == '1.6.0':
        dofmap = V.dofmap()
        gc = dofmap.tabulate_all_coordinates(mesh).reshape((-1, gdim))
    else:
        gc = V.tabulate_dof_coordinates().reshape((-1, gdim))
    vtd = vertex_to_dof_map(V)

    # first get the elevation of each vertex
    elevation = np.zeros(len(gc))
    elevation = u_n.compute_vertex_values(mesh)

    # loop to get the local flux
    mesh.init(0, 1)
    flux = np.zeros(len(gc))
    neighbors = []
    for v in vertices(mesh):
        idx = v.index()

        # get the local neighbourhood
        neighborhood = [Edge(mesh, i).entities(0) for i in v.entities(1)]
        neighborhood = np.array(neighborhood).flatten()

        # Remove own index from neighborhood
        neighborhood = neighborhood[np.where(neighborhood != idx)[0]]
        neighbors.append(neighborhood)

        # get location
        xh = v.x(0)
        yh = v.x(1)

        # get distance to neighboring vertices
        length = np.zeros(len(neighborhood))
        weight = np.zeros(len(neighborhood))
        i = 0
        for vert in neighborhood:
            nidx = vtd[vert]
            xn = gc[nidx, 0]
            yn = gc[nidx, 1]
            length[i] = np.sqrt((xh - xn) * (xh - xn) + (yh - yn) * (yh - yn))
            flux[vert] = length[i]
#            weight[i] = elevation[idx] - elevation[vert]
#            # downhill only
#            if weight[i] < 0:
#              weight[i] = 0
#            i += 1
#
#        # find steepest slope
#        steepest = len(neighborhood)+2
#        if max(weight) > 0:
#            steepest = np.argmax(weight)
#        else:
#            weight[:] = 0
#        i = 0
#        for vert in neighborhood:
#            if i == steepest:
#                weight[i] = 1
#            else:
#                weight[i] = 0
#            flux[vert] = flux[vert] + length[i]*weight[i]
#            i += 1

# sort from top to botton
    sortedidx = np.argsort(-elevation)

    # accumulate fluxes from top to bottom
    for idx in sortedidx:
        neighborhood = neighbors[idx]
        weight = np.zeros(len(neighborhood))
        i = 0
        for vert in neighborhood:
            weight[i] = elevation[idx] - elevation[vert]
            # downhill only
            if weight[i] < 0:
                weight[i] = 0
            i += 1

        # find steepest slope
        steepest = len(neighborhood) + 2
        if max(weight) > 0:
            steepest = np.argmax(weight)
        else:
            weight[:] = 0
        i = 0
        for vert in neighborhood:
            if i == steepest:
                weight[i] = 1
            else:
                weight[i] = 0
            flux[vert] = flux[vert] + flux[idx] * weight[i]
            i += 1

    # calculate the diffusion coefficient
    q0 = 1 + De * pow(flux, nexp)
    q = Function(V)
    q.vector()[:] = q0[dof_to_vertex_map(V)]

    return (q)