Exemplo n.º 1
0
def solveFiniteVolume(mesh,
                      a=1.0,
                      f=0.0,
                      fn=0.0,
                      vel=0.0,
                      u0=None,
                      times=None,
                      uL=None,
                      relax=1.0,
                      ws=None,
                      scheme='CDS',
                      **kwargs):
    """
    """
    # The Workspace is to hold temporary data or preserve matrix rebuild
    swatch = pg.core.Stopwatch(True)
    sparse = True

    workspace = WorkSpace()
    if ws:
        workspace = ws

    a = solver.parseArgToArray(a, [mesh.cellCount(), mesh.boundaryCount()])
    f = solver.parseArgToArray(f, mesh.cellCount())
    fn = solver.parseArgToArray(fn, mesh.cellCount())

    boundsDirichlet = None
    boundsNeumann = None

    if not hasattr(workspace, 'S'):

        if 'uBoundary' in kwargs:
            boundsDirichlet = pg.solver.parseArgToBoundaries(
                kwargs['uBoundary'], mesh)

        if 'duBoundary' in kwargs:
            boundsNeumann = pg.solver.parseArgToBoundaries(
                kwargs['duBoundary'], mesh)

        workspace.S, workspace.rhsBCScales = diffusionConvectionKernel(
            mesh=mesh,
            a=a,
            f=f,
            uBoundaries=boundsDirichlet,
            duBoundaries=boundsNeumann,
            u0=u0,
            fn=fn,
            vel=vel,
            scheme=scheme,
            sparse=sparse,
            userData=kwargs.pop('userData', None))
        print('FVM kernel 1:', swatch.duration(True))
        dof = len(workspace.rhsBCScales)

        #        workspace.uDir = np.zeros(dof)

        #        if u0 is not None:
        #            workspace.uDir = np.array(u0)
        #
        #        if len(boundsDirichlet):
        #            for boundary, val in boundsDirichlet.items():
        #                workspace.uDir[boundary.leftCell().id()] = val

        workspace.ap = np.zeros(dof)

        # for nonlinears

        if uL is not None:
            for i in range(dof):
                val = 0.0
                if sparse:
                    val = workspace.S.getVal(i, i) / relax
                    workspace.S.setVal(i, i, val)
#                    workspace.S[i, i] /= relax
#                    workspace.ap[i] = workspace.S[i, i]
                else:
                    val = workspace.S[i, i] / relax
                    workspace.S[i, i] = val

                workspace.ap[i] = val

        print('FVM kernel 2:', swatch.duration(True))
    # endif: not hasattr(workspace, 'S'):

    workspace.rhs = np.zeros(len(workspace.rhsBCScales))
    workspace.rhs[0:mesh.cellCount()] = f  # * mesh.cellSizes()

    #    if len(workspace.uDir):
    workspace.rhs += workspace.rhsBCScales

    # for nonlinear: relax progress with scaled last result
    if uL is not None:
        workspace.rhs += (1. - relax) * workspace.ap * uL
    # print('FVM: Prep:', swatch.duration(True))

    if not hasattr(times, '__len__'):

        if sparse and not hasattr(workspace, 'solver'):
            Sm = pg.matrix.SparseMatrix(workspace.S)
            # hold Sm until we have reference counting,
            # loosing Sm here will kill LinSolver later
            workspace.Sm = Sm
            workspace.solver = pg.core.LinSolver(Sm, True)

        u = None
        if sparse:
            u = workspace.solver.solve(workspace.rhs)
        else:
            u = np.linalg.solve(workspace.S, workspace.rhs)
        print('FVM solve:', swatch.duration(True))
        return u[0:mesh.cellCount():1]
    else:
        theta = kwargs.pop('theta', 0.5)
        verbose = kwargs.pop('verbose', False)

        if sparse:
            I = solver.identity(len(workspace.rhs))
        else:
            I = np.diag(np.ones(len(workspace.rhs)))

        print("solve cN")
        return solver.crankNicolson(times,
                                    theta,
                                    workspace.S,
                                    I,
                                    f=workspace.rhs,
                                    u0=u0,
                                    verbose=verbose)
Exemplo n.º 2
0
def diffusionConvectionKernel(
        mesh,
        a=None,
        b=0.0,
        uB=None,
        duB=None,
        vel=0,
        # u0=0,
        fn=None,
        scheme='CDS',
        sparse=False,
        time=0.0,
        userData=None):
    """
    Generate system matrix for diffusion and convection in a velocity field.

    Particle concentration u inside a velocity field.

    Peclet Number - ratio between convection/diffusion = F/D
        F = velocity flow trough volume boundary,
        D = diffusion coefficient

    Parameters
    ----------
    mesh: :gimliapi:`GIMLI::Mesh`
        Mesh represents spatial discretization of the calculation domain
    a: value | array | callable(cell, userData)
        Diffusion coefficient per cell
    b: value | array | callable(cell, userData)
        TODO What is b
    fn: iterable(cell)
        TODO What is fn
    vel: ndarray (N,dim) | RMatrix(N,dim)
        velocity field [[v_i,]_j,] with i=[1..3] for the mesh dimension
        and j = [0 .. N-1] per Cell or per Node so N is either
        mesh.cellCount() or mesh.nodeCount()
    scheme: str [CDS]
        Finite volume scheme
        * CDS -- Central Difference Scheme.
            maybe irregular for Peclet no. |F/D| > 2
            Diffusion dominant. Error of order 2
        * UDS -- Upwind Scheme.
            Convection dominant. Error of order 1
        * HS -- Hybrid Scheme.
            Diffusion dominant for Peclet-number |(F/D)| < 2
            Convection dominant else.
        * PS -- Power Law Scheme.
            Identical to HS for Peclet-number |(F/D)| > 10 and near to ES else
            Convection dominant.
        * ES -- Exponential scheme
            Only stationary one-dimensional but exact solution
    Returns
    -------
    S: :gimliapi:`GIMLI::SparseMatrix` | numpy.ndarray(nCells, nCells)
        Kernel matrix, depends on vel, a, b, scheme, uB, duB .. if some of this
        has been changed you cannot cache these matrix
    rhsBoundaryScales: ndarray(nCells)
        RHS offset vector
    """
    if a is None:
        a = pg.Vector(mesh.boundaryCount(), 1.0)

    AScheme = None
    if scheme == 'CDS':
        AScheme = lambda peclet_: 1.0 - 0.5 * abs(peclet_)
    elif scheme == 'UDS':
        AScheme = lambda peclet_: 1.0
    elif scheme == 'HS':
        AScheme = lambda peclet_: max(0.0, 1.0 - 0.5 * abs(peclet_))
    elif scheme == 'PS':
        AScheme = lambda peclet_: max(0.0, (1.0 - 0.1 * abs(peclet_))**5.0)
    elif scheme == 'ES':
        AScheme = lambda peclet_: (peclet_) / (np.exp(abs(peclet_)) - 1.0) \
            if peclet_ != 0.0 else 1
    else:
        raise BaseException("Scheme unknwon:" + scheme)

    useHalfBoundaries = False

    dof = mesh.cellCount()

    if not uB:
        uB = []
    if not duB:
        duB = []

    if useHalfBoundaries:
        dof = mesh.cellCount() + len(uB)

    S = None
    if sparse:
        S = pg.matrix.SparseMapMatrix(dof, dof, stype=0) + identity(dof,
                                                                    scale=b)
    else:
        S = np.zeros((dof, dof))

    rhsBoundaryScales = np.zeros(dof)

    #    swatch = pg.core.Stopwatch(True)

    # we need this to fast identify uBoundary and value by boundary
    uBoundaryID = []
    uBoundaryVals = [None] * mesh.boundaryCount()

    for [b, val] in uB:

        if isinstance(b, pg.core.Boundary):
            uBoundaryID.append(b.id())
            uBoundaryVals[b.id()] = val
        elif isinstance(b, pg.core.Node):
            for _b in b.boundSet():
                if _b.rightCell() is None:
                    pg.warn(
                        'Dirichlet for one node considered for the nearest boundary.',
                        _b.id())
                    uBoundaryID.append(_b.id())
                    uBoundaryVals[_b.id()] = val
                    break
        else:
            raise BaseException("Please give boundary, value list")

    duBoundaryID = []
    duBoundaryVals = [None] * mesh.boundaryCount()

    for [boundary, val] in duB:
        if not isinstance(boundary, pg.core.Boundary):
            raise BaseException("Please give boundary, value list")

        duBoundaryID.append(boundary.id())
        duBoundaryVals[boundary.id()] = val

    # iterate over all cells
    for cell in mesh.cells():
        cID = cell.id()
        for bi in range(cell.boundaryCount()):
            boundary = pg.core.findBoundary(cell.boundaryNodes(bi))

            ncell = boundary.leftCell()
            if ncell == cell:
                ncell = boundary.rightCell()

            v = findVelocity(mesh, vel, boundary, cell, ncell)

            # Convection part
            F = boundary.norm(cell).dot(v) * boundary.size()
            # print(F, boundary.size(), v, vel)
            # Diffusion part
            D = findDiffusion(mesh, a, boundary, cell, ncell)

            # print(F, D, F/D)
            # print((1.0 - 0.1 * abs(F/D))**5.0)
            if D > 0:
                aB = D * AScheme(F / D) + max(-F, 0.0)
            else:
                aB = max(-F, 0.0)

            aB /= cell.size()

            # print(cell.center(), boundary.center(), boundary.norm(cell), aB)
            if ncell:
                # no boundary
                if sparse:
                    S.addVal(cID, ncell.id(), -aB)
                    S.addVal(cID, cID, +aB)
                else:
                    S[cID, ncell.id()] -= aB
                    S[cID, cID] += aB

            elif not useHalfBoundaries:

                if boundary.id() in uBoundaryID:
                    val = pg.solver.generateBoundaryValue(
                        boundary,
                        uBoundaryVals[boundary.id()],
                        time=time,
                        userData=userData)

                    if sparse:
                        S.addVal(cID, cID, aB)
                    else:
                        S[cID, cID] += aB

                    rhsBoundaryScales[cID] += aB * val

                if boundary.id() in duBoundaryID:
                    # Neumann boundary condition
                    val = pg.solver.generateBoundaryValue(
                        boundary,
                        duBoundaryVals[boundary.id()],
                        time=time,
                        userData=userData)

                    # amount of flow through the boundary .. maybe buggy
                    # fill be replaced by suitable FE solver
                    outflow = -val * boundary.size() / cell.size()

                    if sparse:
                        S.addVal(cID, cID, outflow)
                    else:
                        S[cID, cID] += outflow

        if fn is not None:
            if sparse:
                # * cell.shape().domainSize())
                S.addVal(cell.id(), cell.id(), -fn[cell.id()])
            else:
                # * cell.shape().domainSize()
                S[cell.id(), cell.id()] -= fn[cell.id()]

    return S, rhsBoundaryScales
Exemplo n.º 3
0
def diffusionConvectionKernel(mesh, a=None, b=0.0, f=None,
                              uB=None, duB=None,
                              fn=None, vel=0, u0=0,
                              scheme='CDS', sparse=False, time=0.0,
                              userData=None):
    """
    Peclet Number - ratio between convection/diffusion * Length

    Advection .. forced convection
    """
    if a is None:
        a = pg.RVector(mesh.boundaryCount(), 1.0)

    AScheme = None
    if scheme == 'CDS':
        # CDS - central diff. scheme .. maybe irregular for Peclet no. |F/D|>2
        # diffusion dominant
        # Error of order 2
        AScheme = lambda peclet_: 1.0 - 0.5 * abs(peclet_)
    elif scheme == 'UDS':
        # UDS - upwind scheme
        # Convection dominant
        # Error of order 1
        AScheme = lambda peclet_: 1.0
    elif scheme == 'HS':
        # HS - hybrid scheme.
        # Diffusion dominant for Peclet-number |(F/D)| < 2
        # Convection dominant else
        AScheme = lambda peclet_: max(0.0, 1.0 - 0.5 * abs(peclet_))
    elif scheme == 'PS':
        # PS - power-law scheme.
        # Identical to HS for Peclet-number |(F/D)| > 10 and near to ES else
        AScheme = lambda peclet_: max(0.0, (1.0 - 0.1 * abs(peclet_))**5.0)
    elif scheme == 'ES':
        # ES - exponential scheme
        # Only stationary one-dimensional but exact solution
        AScheme = lambda peclet_: (peclet_) / (np.exp(abs(peclet_)) - 1.0) \
            if peclet_ != 0.0 else 1
    else:
        raise

    useHalfBoundaries = False

    dof = mesh.cellCount()

    if not uB:
        uB = []
    if not duB:
        duB = []

    if useHalfBoundaries:
        dof = mesh.cellCount() + len(uB)

    S = None
    if sparse:
        S = pg.RSparseMapMatrix(dof, dof, 0) + identity(dof) * b
    else:
        S = np.zeros((dof, dof))

    rhsBoundaryScales = np.zeros(dof)

#    swatch = pg.Stopwatch(True)

    # we need this to fast identify uBoundary and value by boundary
    uBoundaryID = []
    uBoundaryVals = [None] * mesh.boundaryCount()
    for i, [boundary, val] in enumerate(uB):
        if not isinstance(boundary, pg.Boundary):
            raise BaseException("Please give boundary, value list")
        uBoundaryID.append(boundary.id())
        uBoundaryVals[boundary.id()] = val
    duBoundaryID = []
    duBoundaryVals = [None] * mesh.boundaryCount()
    for i, [boundary, val] in enumerate(duB):
        if not isinstance(boundary, pg.Boundary):
            raise BaseException("Please give boundary, value list")
        duBoundaryID.append(boundary.id())
        duBoundaryVals[boundary.id()] = val

    for cell in mesh.cells():

        for bi in range(cell.boundaryCount()):
            boundary = pg.findBoundary(cell.boundaryNodes(bi))

            ncell = boundary.leftCell()
            if ncell == cell:
                ncell = boundary.rightCell()

            v = findVelocity(mesh, vel, boundary, cell, ncell)

            # Convection part
            F = boundary.norm(cell).dot(v) * boundary.size()

            # Diffusion part
            D = findDiffusion(mesh, a, boundary, cell, ncell)

            aB = D * AScheme(F / D) + max(-F, 0.0)

            aB /= cell.size()

            # print(cell.center(), boundary.center(), boundary.norm(cell), aB)
            if ncell:
                # no boundary
                if sparse:
                    S.addVal(cell.id(), ncell.id(), -aB)
                    S.addVal(cell.id(), cell.id(), +aB)
                else:
                    S[cell.id(), ncell.id()] -= aB
                    S[cell.id(), cell.id()] += aB

            elif not useHalfBoundaries:

                if boundary.id() in uBoundaryID:
                    val = pg.solver.generateBoundaryValue(boundary,
                                                          uBoundaryVals[
                                                              boundary.id()],
                                                          time=time,
                                                          userData=userData)

                    if sparse:
                        S.addVal(cell.id(), cell.id(), aB)
                    else:
                        S[cell.id(), cell.id()] += aB

                    rhsBoundaryScales[cell.id()] += aB * val

                if boundary.id() in duBoundaryID:
                    # Neumann boundary condition
                    val = pg.solver.generateBoundaryValue(
                        boundary, duBoundaryVals[boundary.id()], time=time,
                        userData=userData)
                    if sparse:
                        # amount of flow through the boundary
                        S.addVal(cell.id(), cell.id(), val *
                                 boundary.size() / cell.size())
                    else:
                        S[cell.id(), cell.id()] += val * \
                            boundary.size() / cell.size()

        if fn is not None:
            if sparse:
                # * cell.shape().domainSize())
                S.addVal(cell.id(), cell.id(), -fn[cell.id()])
            else:
                # * cell.shape().domainSize()
                S[cell.id(), cell.id()] -= fn[cell.id()]

    if useHalfBoundaries:
        for i, [b, val] in enumerate(duB):  # not defined!!!
            bIdx = mesh.cellCount() + i

            c = b.leftCell()
            if not c:
                c = b.rightCell()

            if c:
                n = b.norm(c)
                v = findVelocity(mesh, vel, b, c, nc=None)
                F = n.dot(v) * b.size()

                D = findDiffusion(mesh, a, b, c)
                aB = D * AScheme(F / D) + max(-F, 0.0)

                if useHalfBoundaries:
                    if sparse:
                        S.setVal(c.id(), c.id(), 1.)
                        S.addVal(c.id(), bIdx, -aB)
                    else:
                        S[bIdx, bIdx] = 1.
                        S[c.id(), bIdx] -= aB

                    rhsBoundaryScales[bIdx] = aB

    return S, rhsBoundaryScales
c = v * dt / (dx*dx)  # [(m*m)/s * s/(m*m)] = []
print("Courant-Friedrichs-Lewy condition:", "CLF =", c)
# r needs to be <= 0.5

uE = np.zeros((len(t), len(x)))
uE[0] = u0
uE[0][0] = 0.0
uE[0][-1] = 0.0

uI = np.array(uE)
uFEM = np.array(uE)

#L = np.diag(np.ones(len(x)-1), -1) - 2. * np.diag(np.ones(len(x))) + np.diag(np.ones(len(x)-1), 1)
Lg = solver.triDiagToeplitz(len(x), a=2.0, l=-1.0, r=-1.0,
                            start=1, end=len(x)-1)
I = solver.identity(len(x))
# L[1,0]=2 # neummann links
# L[-2,-1]=2 # neummann rechts

grid = pg.createGrid(x=x)
uFEM = np.array(uE)
dirichletBC = [[1, 0.0],  # top
               [2, 0.0]]  # bottom

A = solver.createStiffnessMatrix(grid, np.ones(grid.cellCount()))
M = solver.createMassMatrix(grid, np.ones(grid.cellCount()))

for n in range(1, len(t)):
    """
        Direct time and space discretization of the second derivatives
        # diff in space is 2nd order central
Exemplo n.º 5
0
def diffusionConvectionKernel(mesh, a=None, b=0.0,
                              uB=None, duB=None,
                              vel=0,
                              # u0=0,
                              fn=None,
                              scheme='CDS', sparse=False, time=0.0,
                              userData=None):
    """
    Generate system matrix for diffusion and convection in a velocity field.

    Particle concentration u inside a velocity field.

    Peclet Number - ratio between convection/diffusion = F/D
        F = velocity flow trough volume boundary,
        D = diffusion coefficient

    Parameters
    ----------
    mesh : :gimliapi:`GIMLI::Mesh`
        Mesh represents spatial discretization of the calculation domain

    a   : value | array | callable(cell, userData)
        Diffusion coefficient per cell

    b   : value | array | callable(cell, userData)
        TODO What is b

    fn   : iterable(cell)
        TODO What is fn

    vel : ndarray (N,dim) | RMatrix(N,dim)
        velocity field [[v_i,]_j,] with i=[1..3] for the mesh dimension
        and j = [0 .. N-1] per Cell or per Node so N is either
        mesh.cellCount() or mesh.nodeCount()

    scheme : str [CDS]
        Finite volume scheme

        * CDS -- Central Difference Scheme.
            maybe irregular for Peclet no. |F/D| > 2
            Diffusion dominant. Error of order 2
        * UDS -- Upwind Scheme.
            Convection dominant. Error of order 1
        * HS -- Hybrid Scheme.
            Diffusion dominant for Peclet-number |(F/D)| < 2
            Convection dominant else.
        * PS -- Power Law Scheme.
            Identical to HS for Peclet-number |(F/D)| > 10 and near to ES else
            Convection dominant.
        * ES -- Exponential scheme
            Only stationary one-dimensional but exact solution

    Returns
    -------

    S : :gimliapi:`GIMLI::SparseMatrix` | numpy.ndarray(nCells, nCells)
        Kernel matrix, depends on vel, a, b, scheme, uB, duB .. if some of this
        has been changed you cannot cache these matrix

    rhsBoundaryScales : ndarray(nCells)
        RHS offset vector
    """
    if a is None:
        a = pg.RVector(mesh.boundaryCount(), 1.0)

    AScheme = None
    if scheme == 'CDS':
        AScheme = lambda peclet_: 1.0 - 0.5 * abs(peclet_)
    elif scheme == 'UDS':
        AScheme = lambda peclet_: 1.0
    elif scheme == 'HS':
        AScheme = lambda peclet_: max(0.0, 1.0 - 0.5 * abs(peclet_))
    elif scheme == 'PS':
        AScheme = lambda peclet_: max(0.0, (1.0 - 0.1 * abs(peclet_))**5.0)
    elif scheme == 'ES':
        AScheme = lambda peclet_: (peclet_) / (np.exp(abs(peclet_)) - 1.0) \
            if peclet_ != 0.0 else 1
    else:
        raise BaseException("Scheme unknwon:" + scheme)

    useHalfBoundaries = False

    dof = mesh.cellCount()

    if not uB:
        uB = []
    if not duB:
        duB = []

    if useHalfBoundaries:
        dof = mesh.cellCount() + len(uB)

    S = None
    if sparse:
        S = pg.RSparseMapMatrix(dof, dof, stype=0) + identity(dof, scale=b)
    else:
        S = np.zeros((dof, dof))

    rhsBoundaryScales = np.zeros(dof)

#    swatch = pg.Stopwatch(True)

    # we need this to fast identify uBoundary and value by boundary
    uBoundaryID = []
    uBoundaryVals = [None] * mesh.boundaryCount()
    for [boundary, val] in uB:

        if not isinstance(boundary, pg.Boundary):
            raise BaseException("Please give boundary, value list")

        uBoundaryID.append(boundary.id())
        uBoundaryVals[boundary.id()] = val

    duBoundaryID = []
    duBoundaryVals = [None] * mesh.boundaryCount()

    for [boundary, val] in duB:
        if not isinstance(boundary, pg.Boundary):
            raise BaseException("Please give boundary, value list")

        duBoundaryID.append(boundary.id())
        duBoundaryVals[boundary.id()] = val

    # iterate over all cells
    for cell in mesh.cells():
        cID = cell.id()
        for bi in range(cell.boundaryCount()):
            boundary = pg.findBoundary(cell.boundaryNodes(bi))

            ncell = boundary.leftCell()
            if ncell == cell:
                ncell = boundary.rightCell()

            v = findVelocity(mesh, vel, boundary, cell, ncell)

            # Convection part
            F = boundary.norm(cell).dot(v) * boundary.size()
            # print(F, boundary.size(), v, vel)
            # Diffusion part
            D = findDiffusion(mesh, a, boundary, cell, ncell)

            # print(F, D, F/D)
            # print((1.0 - 0.1 * abs(F/D))**5.0)
            aB = D * AScheme(F / D) + max(-F, 0.0)

            aB /= cell.size()

            # print(cell.center(), boundary.center(), boundary.norm(cell), aB)
            if ncell:
                # no boundary
                if sparse:
                    S.addVal(cID, ncell.id(), -aB)
                    S.addVal(cID, cID, +aB)
                else:
                    S[cID, ncell.id()] -= aB
                    S[cID, cID] += aB

            elif not useHalfBoundaries:

                if boundary.id() in uBoundaryID:
                    val = pg.solver.generateBoundaryValue(
                        boundary,
                        uBoundaryVals[boundary.id()],
                        time=time,
                        userData=userData)

                    if sparse:
                        S.addVal(cID, cID, aB)
                    else:
                        S[cID, cID] += aB

                    rhsBoundaryScales[cID] += aB * val

                if boundary.id() in duBoundaryID:
                    # Neumann boundary condition
                    val = pg.solver.generateBoundaryValue(
                        boundary,
                        duBoundaryVals[boundary.id()],
                        time=time,
                        userData=userData)

                    # amount of flow through the boundary
                    outflow = val * boundary.size() / cell.size()
                    if sparse:
                        S.addVal(cID, cID, outflow)
                    else:
                        S[cID, cID] += outflow

        if fn is not None:
            if sparse:
                # * cell.shape().domainSize())
                S.addVal(cell.id(), cell.id(), -fn[cell.id()])
            else:
                # * cell.shape().domainSize()
                S[cell.id(), cell.id()] -= fn[cell.id()]

    return S, rhsBoundaryScales
Exemplo n.º 6
0
def solveFiniteVolume(mesh, a=1.0, f=0.0, fn=0.0, vel=0.0, u0=None,
                      times=None,
                      uL=None, relax=1.0,
                      ws=None, scheme='CDS', **kwargs):
    """
    """
    # The Workspace is to hold temporary data or preserve matrix rebuild
    swatch = pg.Stopwatch(True)
    sparse = True

    workspace = WorkSpace()
    if ws:
        workspace = ws

    a = solver.parseArgToArray(a, [mesh.cellCount(), mesh.boundaryCount()])
    f = solver.parseArgToArray(f, mesh.cellCount())
    fn = solver.parseArgToArray(fn, mesh.cellCount())

    boundsDirichlet = None
    boundsNeumann = None

    if not hasattr(workspace, 'S'):

        if 'uBoundary' in kwargs:
            boundsDirichlet = pg.solver.parseArgToBoundaries(
                    kwargs['uBoundary'], mesh)

        if 'duBoundary' in kwargs:
            boundsNeumann = pg.solver.parseArgToBoundaries(
                    kwargs['duBoundary'], mesh)

        workspace.S, workspace.rhsBCScales = diffusionConvectionKernel(
                mesh=mesh, a=a, f=f, uBoundaries=boundsDirichlet,
                duBoundaries=boundsNeumann, u0=u0, fn=fn, vel=vel,
                scheme=scheme, sparse=sparse,
                userData=kwargs.pop('userData', None))
        print('FVM kernel 1:', swatch.duration(True))
        dof = len(workspace.rhsBCScales)

#        workspace.uDir = np.zeros(dof)

#        if u0 is not None:
#            workspace.uDir = np.array(u0)
#
#        if len(boundsDirichlet):
#            for boundary, val in boundsDirichlet.items():
#                workspace.uDir[boundary.leftCell().id()] = val

        workspace.ap = np.zeros(dof)

        # for nonlinears

        if uL is not None:
            for i in range(dof):
                val = 0.0
                if sparse:
                    val = workspace.S.getVal(i, i) / relax
                    workspace.S.setVal(i, i, val)
#                    workspace.S[i, i] /= relax
#                    workspace.ap[i] = workspace.S[i, i]
                else:
                    val = workspace.S[i, i] / relax
                    workspace.S[i, i] = val

                workspace.ap[i] = val


        print('FVM kernel 2:', swatch.duration(True))
    # endif: not hasattr(workspace, 'S'):

    workspace.rhs = np.zeros(len(workspace.rhsBCScales))
    workspace.rhs[0:mesh.cellCount()] = f  # * mesh.cellSizes()

#    if len(workspace.uDir):
    workspace.rhs += workspace.rhsBCScales

    # for nonlinear: relax progress with scaled last result
    if uL is not None:
        workspace.rhs += (1. - relax) * workspace.ap * uL
    # print('FVM: Prep:', swatch.duration(True))

    if not hasattr(times, '__len__'):

        if sparse and not hasattr(workspace, 'solver'):
            Sm = pg.RSparseMatrix(workspace.S)
            # hold Sm until we have reference counting,
            # loosing Sm here will kill LinSolver later
            workspace.Sm = Sm
            workspace.solver = pg.LinSolver(Sm, True)

        u = None
        if sparse:
            u = workspace.solver.solve(workspace.rhs)
        else:
            u = np.linalg.solve(workspace.S, workspace.rhs)
        print('FVM solve:', swatch.duration(True))
        return u[0:mesh.cellCount():1]
    else:
        theta = kwargs.pop('theta', 0.5)
        verbose = kwargs.pop('verbose', False)

        if sparse:
            I = solver.identity(len(workspace.rhs))
        else:
            I = np.diag(np.ones(len(workspace.rhs)))

        print("solve cN")
        return solver.crankNicolson(times, theta, workspace.S, I,
                                    f=workspace.rhs, u0=u0, verbose=verbose)