示例#1
0
    def __init__(self, mesh, element, name='fspace'):

        self._themiselement = ThemisElement(element)

        self.ncomp = self._themiselement.get_ncomp()
        self._mesh = mesh
        self._name = name
        self._spaces = [
            self,
        ]
        self.nspaces = 1
        self.npatches = mesh.npatches

        UFLFunctionSpace.__init__(self, mesh, element)

        #create das and lgmaps
        self._composite_da = PETSc.DMComposite().create()
        self._da = []
        self._lgmaps = []
        self._component_offsets = []
        for ci in xrange(self.ncomp):
            self._component_offsets.append(self.npatches * ci)
            das = []
            lgmaps = []
            for bi in range(self.npatches):
                das.append(mesh.create_dof_map(self._themiselement, ci, bi))
                lgmaps.append(das[bi].getLGMap())
                self._composite_da.addDM(das[bi])
            self._da.append(das)
            self._lgmaps.append(lgmaps)
        self._composite_da.setUp()

        self._component_lgmaps = self._composite_da.getLGMaps()
        self._overall_lgmap = self._composite_da.getLGMap()
        self._cb_lis = self._composite_da.getLocalISs()
示例#2
0
文件: form.py 项目: dham/themis
def create_empty(target,source):
	#create matrix			
	mlist = []
	nlist = []
	for si1 in xrange(target.nspaces):
		tspace = target.get_space(si1)
		for ci1 in xrange(tspace.ncomp):
			for bi1 in xrange(tspace.npatches):
				m = tspace.get_localndofs(ci1,bi1)
				mlist.append(m)
	for si2 in xrange(source.nspaces):
		sspace = source.get_space(si2)
		for ci2 in xrange(sspace.ncomp):						
			for bi2 in xrange(sspace.npatches):						
				n = sspace.get_localndofs(ci2,bi2)
				nlist.append(n)
	
	M = np.sum(np.array(mlist,dtype=np.int32))
	N = np.sum(np.array(nlist,dtype=np.int32))
	mat = PETSc.Mat()
	mat.create(PETSc.COMM_WORLD)
	mat.setSizes(((M, None),(N,None)))
	mat.setType('aij')
	mat.setLGMap(target.get_overall_lgmap(),cmap=source.get_overall_lgmap())
	mat.setUp()
	mat.assemblyBegin()
	mat.assemblyEnd()
		
	return mat
示例#3
0
文件: form.py 项目: dham/themis
def create_mono(target,source,blocklist,kernellist):
	#create matrix			
	mlist = []
	nlist = []			
	for si1 in xrange(target.nspaces):
		tspace = target.get_space(si1)
		for ci1 in xrange(tspace.ncomp):
			for bi1 in xrange(tspace.npatches):
				m = tspace.get_localndofs(ci1,bi1)
				mlist.append(m)
	for si2 in xrange(source.nspaces):
		sspace = source.get_space(si2)
		for ci2 in xrange(sspace.ncomp):						
			for bi2 in xrange(sspace.npatches):						
				n = sspace.get_localndofs(ci2,bi2)
				nlist.append(n)
				
	M = np.sum(np.array(mlist,dtype=np.int32))
	N = np.sum(np.array(nlist,dtype=np.int32))
	mat = PETSc.Mat()
	mat.create(PETSc.COMM_WORLD)
	mat.setSizes(((M, None),(N,None)))
	mat.setType('aij')
	mat.setLGMap(target.get_overall_lgmap(),cmap=source.get_overall_lgmap())
	
	#preallocate matrix

	#PRE-ALLOCATE IS NOT QUITE PERFECT FOR FACET INTEGRALS- IT WORKS BUT IS TOO MUCH!
	mlist.insert(0,0)
	mlist_adj = np.cumsum(mlist)
	dnnzarr = np.zeros(M,dtype=np.int32)
	onnzarr = np.zeros(M,dtype=np.int32)
	i = 0 #this tracks which row "block" are at
	#This loop order ensures that we fill an entire row in the matrix first
	#Assuming that fields are stored si,ci,bi, which they are!
	for si1 in xrange(target.nspaces):
		tspace = target.get_space(si1)
		for ci1 in xrange(tspace.ncomp):
			#only pre-allocate diagonal blocks, so one loop on bi
			for bi in xrange(tspace.npatches): #here we can assume tspace.mesh = sspace.mesh, and therefore tspace.blocks = sspace.nblocks)
				for si2 in xrange(source.nspaces):
					sspace = source.get_space(si2)
					for ci2 in xrange(sspace.ncomp):
						if ((si1,si2) in blocklist):
							bindex = blocklist.index((si1,si2))
							interior_x,interior_y,interior_z = get_interior_flags(kernellist[bindex])
							dnnz,onnz = two_form_preallocate_opt(tspace.mesh(),tspace,sspace,ci1,ci2,bi,interior_x,interior_y,interior_z)
							dnnz = np.ravel(dnnz)
							onnz = np.ravel(onnz)
							dnnzarr[mlist_adj[i]:mlist_adj[i+1]] = dnnzarr[mlist_adj[i]:mlist_adj[i+1]] + dnnz
							onnzarr[mlist_adj[i]:mlist_adj[i+1]] = onnzarr[mlist_adj[i]:mlist_adj[i+1]] + onnz
				i = i + 1 #increment row block
	mat.setPreallocationNNZ((dnnzarr,onnzarr))
	mat.setOption(PETSc.Mat.Option.IGNORE_ZERO_ENTRIES, False)
	mat.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True)
	mat.setUp()
	mat.zeroEntries()
	
	return mat
示例#4
0
文件: form.py 项目: dham/themis
def create_matrix(mat_type,target,source,blocklist,kernellist):
	
	#block matrix
	if mat_type == 'nest' and (target.nspaces > 1 or source.nspaces > 1):
		#create matrix array
		matrices = []			
		for si1 in xrange(target.nspaces):
			matrices.append([])
			for si2 in xrange(source.nspaces):
				if ((si1,si2) in blocklist):
					bindex = blocklist.index((si1,si2))
					mat = create_mono(target.get_space(si1),source.get_space(si2),[(0,0),],[kernellist[bindex],])
				else:
					mat = create_empty(target.get_space(si1),source.get_space(si2))
				matrices[si1].append(mat)
		
		#do an empty assembly
		for si1 in xrange(target.nspaces):
			for si2 in xrange(source.nspaces):
				if ((si1,si2) in blocklist):
					bindex = blocklist.index((si1,si2))
					fill_mono(matrices[si1][si2],target.get_space(si1),source.get_space(si2),[(0,0),],[kernellist[bindex],],zeroassembly=True)
					#this catches bugs in pre-allocation and the initial assembly by locking the non-zero structure
					matrices[si1][si2].setOption(PETSc.Mat.Option.NEW_NONZERO_LOCATION_ERR, True)
					matrices[si1][si2].setOption(PETSc.Mat.Option.UNUSED_NONZERO_LOCATION_ERR, True)    
					#These are for zeroRows- the first keeps the non-zero structure when zeroing rows, the 2nd tells PETSc that the process only zeros owned rows
					matrices[si1][si2].setOption(PETSc.Mat.Option.KEEP_NONZERO_PATTERN, True) 
					matrices[si1][si2].setOption(PETSc.Mat.Option.NO_OFF_PROC_ZERO_ROWS, False)	
	
		#create nest
		mat = PETSc.Mat().createNest(matrices,comm=PETSc.COMM_WORLD)
	
	#monolithic matrix
	if (mat_type == 'nest' and (target.nspaces == 1 and source.nspaces == 1)) or mat_type == 'aij':
		#create matrix
		mat = create_mono(target,source,blocklist,kernellist)
		#do an empty assembly
		fill_mono(mat,target,source,blocklist,kernellist,zeroassembly=True)
	
	mat.assemble()

	#this catches bugs in pre-allocation and the initial assembly by locking the non-zero structure
	mat.setOption(PETSc.Mat.Option.NEW_NONZERO_LOCATION_ERR, True)
	mat.setOption(PETSc.Mat.Option.UNUSED_NONZERO_LOCATION_ERR, True)    
	mat.setOption(PETSc.Mat.Option.NEW_NONZERO_ALLOCATION_ERR, True)
	#These are for zeroRows- the first keeps the non-zero structure when zeroing rows, the 2nd tells PETSc that the process only zeros owned rows
	mat.setOption(PETSc.Mat.Option.KEEP_NONZERO_PATTERN, True) 
	mat.setOption(PETSc.Mat.Option.NO_OFF_PROC_ZERO_ROWS, False)	
	return mat
示例#5
0
    def create_dof_map(self, elem, ci, b):

        swidth = elem.maxdegree()
        ndof = elem.ndofs()

        sizes = []
        ndofs_per_cell = [
        ]  #this is the number of "unique" dofs per element ie 1 for DG0, 2 for DG1, 1 for CG1, 2 for CG2, etc.
        for i in xrange(self.ndim):
            nx = elem.get_nx(ci, i, self.nxs[b][i], self.bcs[i])
            ndofs = elem.get_ndofs_per_element(ci, i)

            ndofs_per_cell.append(ndofs)
            sizes.append(nx)

        da = PETSc.DMDA().create(dim=self.ndim,
                                 dof=ndof,
                                 proc_sizes=self._cell_das[b].getProcSizes(),
                                 sizes=sizes,
                                 boundary_type=self._blist,
                                 stencil_type=PETSc.DMDA.StencilType.BOX,
                                 stencil_width=swidth,
                                 setup=False)

        #THIS IS AN UGLY HACK NEEDED BECAUSE OWNERSHIP RANGES ARGUMENT TO petc4py DMDA_CREATE is BROKEN
        bdx = list(
            np.array(sizes) -
            np.array(self._cell_das[b].getSizes(), dtype=np.int32) *
            np.array(ndofs_per_cell, dtype=np.int32))
        #bdx is the "extra" boundary dof for this space (0 if periodic, 1 if non-periodic)
        if self.ndim == 1:
            ndofs_per_cell.append(1)
            ndofs_per_cell.append(1)
            bdx.append(0)
            bdx.append(0)
        if self.ndim == 2:
            ndofs_per_cell.append(1)
            bdx.append(0)
        decompfunction(self._cell_das[b], da, self.ndim, ndofs_per_cell[0],
                       ndofs_per_cell[1], ndofs_per_cell[2], int(bdx[0]),
                       int(bdx[1]), int(bdx[2]))
        da.setUp()

        return da
示例#6
0
文件: solver.py 项目: dham/themis
    def __init__(self, problem, **kwargs):
        """
		:arg problem: A :class:`NonlinearVariationalProblem` to solve.
		:kwarg nullspace: an optional :class:`.VectorSpaceBasis` (or
			   :class:`.MixedVectorSpaceBasis`) spanning the null
			   space of the operator.
		:kwarg transpose_nullspace: as for the nullspace, but used to
			   make the right hand side consistent.
		:kwarg near_nullspace: as for the nullspace, but used to
			   specify the near nullspace (for multigrid solvers).
		:kwarg solver_parameters: Solver parameters to pass to PETSc.
			   This should be a dict mapping PETSc options to values.
		:kwarg options_prefix: an optional prefix used to distinguish
			   PETSc options.  If not provided a unique prefix will be
			   created.  Use this option if you want to pass options
			   to the solver from the command line in addition to
			   through the ``solver_parameters`` dict.
		:kwarg pre_jacobian_callback: A user-defined function that will
			   be called immediately before Jacobian assembly. This can
			   be used, for example, to update a coefficient function
			   that has a complicated dependence on the unknown solution.
		:kwarg pre_function_callback: As above, but called immediately
			   before residual assembly

		Example usage of the ``solver_parameters`` option: to set the
		nonlinear solver type to just use a linear solver, use

		.. code-block:: python

			{'snes_type': 'ksponly'}

		PETSc flag options should be specified with `bool` values.
		For example:

		.. code-block:: python

			{'snes_monitor': True}

		To use the ``pre_jacobian_callback`` or ``pre_function_callback``
		functionality, the user-defined function must accept the current
		solution as a petsc4py Vec. Example usage is given below:

		.. code-block:: python

			def update_diffusivity(current_solution):
				with cursol.dat.vec as v:
					current_solution.copy(v)
				solve(trial*test*dx == dot(grad(cursol), grad(test))*dx, diffusivity)

			solver = NonlinearVariationalSolver(problem,
												pre_jacobian_callback=update_diffusivity)

		"""
        assert isinstance(problem, NonlinearVariationalProblem)

        parameters = kwargs.get("solver_parameters")
        nullspace = kwargs.get("nullspace")
        nullspace_T = kwargs.get("transpose_nullspace")
        near_nullspace = kwargs.get("near_nullspace")
        options_prefix = kwargs.get("options_prefix")
        pre_j_callback = kwargs.get("pre_jacobian_callback")
        pre_f_callback = kwargs.get("pre_function_callback")

        #CAN THIS COLLIDE?
        #ONLY AN ISSUE WITH MULTIPLE SOLVERS WITHOUT OPTIONS PREFIXES
        #DOESNT REALLY SHOW UP IN PRACTICE?
        if options_prefix == None:
            options_prefix = str(abs(problem.J.__hash__()))
        OptDB = PETSc.Options()

        #parameters are set in the following order (higher takes priority):
        #1) command-line
        #2) solver_parameters keyword argument
        #3) default (see below)

        #set parameters from solver_parameters
        if not parameters == None:
            for parameter, value in parameters.items():
                _set_parameter(OptDB, options_prefix + parameter, value)

        #set default parameters
        _set_parameter(OptDB, options_prefix + 'mat_type', 'aij')
        _set_parameter(OptDB, options_prefix + 'pmat_type', 'aij')
        _set_parameter(OptDB, options_prefix + 'ksp_type', 'gmres')
        _set_parameter(OptDB, options_prefix + 'pc_type', 'jacobi')

        #matrix-free
        mat_type = OptDB.getString(options_prefix + 'mat_type')
        pmat_type = OptDB.getString(options_prefix + 'pmat_type')
        matfree = mat_type == "matfree"
        pmatfree = pmat_type == "matfree"

        # No preconditioner by default for matrix-free
        if (problem.Jp is not None and pmatfree) or matfree:
            _set_parameter(OptDB, options_prefix + 'pc_type', 'none')

        self.snes = PETSc.SNES().create(PETSc.COMM_WORLD)

        self.snes.setOptionsPrefix(options_prefix)

        self.problem = problem

        #create forms and set function/jacobians and associated assembly functions

        #ADD NULL SPACES
        #ADD FORM COMPILER OPTIONS

        self.Fform = OneForm(problem.F,
                             self.problem.u,
                             bcs=problem.bcs,
                             pre_f_callback=pre_f_callback)
        #WHAT OTHER ARGUMENTS HERE?
        #DOES ABOVE NEED BVALS ARGUMENT?

        self.snes.setFunction(self.Fform.assembleform, self.Fform.vector)

        #EVENTUALLY ADD ABILITY HERE TO HAVE J NON-CONSTANT AND P CONSTANT, ETC.
        if problem.Jp == None:
            self.Jform = TwoForm(problem.J,
                                 self.problem.u,
                                 mat_type=mat_type,
                                 constantJ=problem._constant_jacobian,
                                 constantP=problem._constant_jacobian,
                                 bcs=problem.bcs,
                                 pre_j_callback=pre_j_callback)
            self.snes.setJacobian(self.Jform.assembleform, self.Jform.mat)

        else:
            self.Jform = TwoForm(problem.J,
                                 self.problem.u,
                                 Jp=problem.Jp,
                                 mat_type=mat_type,
                                 pmat_type=pmat_type,
                                 constantJ=problem._constant_jacobian,
                                 constantP=problem._constant_jacobian,
                                 bcs=problem.bcs,
                                 pre_j_callback=pre_j_callback)
            self.snes.setJacobian(self.Jform.assembleform, self.Jform.mat,
                                  self.Jform.pmat)

        #SET NULLSPACE
        #SET NULLSPACE T
        #SET NEAR NULLSPACE
        #nspace = PETSc.NullSpace().create(constant=True)
        #self.form.A.setNullSpace(nspace)
        #ctx.set_nullspace(nullspace, problem.J.arguments()[0].function_space()._ises,transpose=False, near=False)
        #ctx.set_nullspace(nullspace_T, problem.J.arguments()[1].function_space()._ises,transpose=True, near=False)
        #ctx.set_nullspace(near_nullspace, problem.J.arguments()[0].function_space()._ises,transpose=False, near=True)

        self.snes.setFromOptions()

        ismixed = self.problem.u.space.nspaces > 1
        pc = self.snes.getKSP().getPC()
        if ismixed:
            for si1 in xrange(self.Jform.target.nspaces):
                indices = self.Jform.target.get_field_gis(si1)
                name = str(si1)
                pc.setFieldSplitIS((name, indices))
示例#7
0
    def __init__(self, spacelist):
        self._spaces = spacelist
        self.nspaces = len(spacelist)

        #ADD CHECK THAT ALL SPACES ARE DEFINED ON THE SAME MESH

        UFLMixedFunctionSpace.__init__(self, *spacelist)

        #create composite DM for component-block wise view
        self._composite_da = PETSc.DMComposite().create()
        for si in xrange(self.nspaces):
            for ci in xrange(self.get_space(si).ncomp):
                for bi in xrange(self.get_space(si).npatches):
                    self._composite_da.addDM(self.get_space(si).get_da(ci, bi))
        self._composite_da.setUp()

        #compute space offsets ie how far in a list of component until space k starts
        s = 0
        self._space_offsets = []
        for si in xrange(self.nspaces):
            self._space_offsets.append(s)
            s = s + self._spaces[si].ncomp * self._spaces[si].npatches

        #Create correct FIELD local and global IS's since DMComposite can't handle nested DMComposites in this case
        lndofs_total = 0
        for si in xrange(self.nspaces):  #determine offset into global vector
            for ci in xrange(self.get_space(si).ncomp):
                for bi in xrange(self.get_space(si).npatches):
                    lndofs_total = lndofs_total + self.get_space(
                        si).get_localndofs(ci, bi)

        mpicomm = PETSc.COMM_WORLD.tompi4py()
        localcompoffset = mpicomm.scan(lndofs_total)
        localcompoffset = localcompoffset - lndofs_total  #This is the offset for this process

        self._field_lis = []
        self._field_gis = []
        ghostedlocaloffset = 0  #this is the offset for a given FIELD!
        localoffset = 0  #this is the offset for a given FIELD!
        for si in xrange(self.nspaces):
            #sum up the number of ghosted ndofs for the whole space
            totalghostedspacendofs = 0
            totalspacendofs = 0
            for ci in xrange(self.get_space(si).ncomp):
                for bi in xrange(self.get_space(si).npatches):
                    totalghostedspacendofs = totalghostedspacendofs + self.get_space(
                        si).get_localghostedndofs(ci, bi)
                    totalspacendofs = totalspacendofs + self.get_space(
                        si).get_localndofs(ci, bi)
            #create a strided index set of this size starting at the correct point
            self._field_lis.append(PETSc.IS().createStride(
                totalghostedspacendofs,
                first=ghostedlocaloffset,
                step=1,
                comm=PETSc.COMM_SELF))
            self._field_gis.append(PETSc.IS().createStride(
                totalspacendofs,
                first=localcompoffset + localoffset,
                step=1,
                comm=PETSc.COMM_WORLD))
            #adjust the FIELD offset
            ghostedlocaloffset = ghostedlocaloffset + totalghostedspacendofs
            localoffset = localoffset + totalspacendofs

        self._overall_lgmap = self._composite_da.getLGMap()
        self._component_lgmaps = self._composite_da.getLGMaps()
示例#8
0
    def __init__(self, nxs, bcs, name='singleblockmesh', coordelem=None):
        assert (len(nxs) == len(bcs))

        self.ndim = len(nxs)

        self.nxs = [
            nxs,
        ]
        self.bcs = bcs
        self.npatches = 1
        self.patchlist = []
        self._name = name
        self._blist = []

        for bc in bcs:
            if bc == 'periodic':
                self._blist.append(PETSc.DM.BoundaryType.PERIODIC)
            else:
                self._blist.append(PETSc.DM.BoundaryType.GHOSTED)

        bdx = 0
        bdy = 0
        bdz = 0
        edgex_nxs = list(nxs)
        if (bcs[0] == 'nonperiodic'):
            edgex_nxs[0] = edgex_nxs[0] + 1
            bdx = 1
        if self.ndim >= 2:
            edgey_nxs = list(nxs)
            if (bcs[1] == 'nonperiodic'):
                edgey_nxs[1] = edgey_nxs[1] + 1
                bdy = 1

        if self.ndim >= 3:
            edgez_nxs = list(nxs)
            if (bcs[2] == 'nonperiodic'):
                edgez_nxs[2] = edgez_nxs[2] + 1
                bdz = 1

        # generate mesh
        cell_da = PETSc.DMDA().create()
        #THIS SHOULD REALLY BE AN OPTIONS PREFIX THING TO MESH...
        #MAIN THING IS THE ABILITY TO CONTROL DECOMPOSITION AT COMMAND LINE
        #BUT WE WANT TO BE ABLE TO IGNORE DECOMPOSITION WHEN RUNNING WITH A SINGLE PROCESSOR
        #WHICH ALLOWS THE SAME OPTIONS LIST TO BE USED TO RUN SOMETHING IN PARALLEL, AND THEN DO PLOTTING IN SERIAL!
        if PETSc.COMM_WORLD.size > 1:  #this allows the same options list to be used to run something in parallel, but then plot it in serial!
            cell_da.setOptionsPrefix(self._name + '0_')
            cell_da.setFromOptions()
        #############

        cell_da.setDim(self.ndim)
        cell_da.setDof(1)
        cell_da.setSizes(nxs)
        cell_da.setBoundaryType(self._blist)
        cell_da.setStencil(PETSc.DMDA.StencilType.BOX, 1)
        cell_da.setUp()

        #print self.cell_da.getProcSizes()
        edgex_da = PETSc.DMDA().create(dim=self.ndim,
                                       dof=1,
                                       sizes=edgex_nxs,
                                       proc_sizes=cell_da.getProcSizes(),
                                       boundary_type=self._blist,
                                       stencil_type=PETSc.DMDA.StencilType.BOX,
                                       stencil_width=1,
                                       setup=False)
        #THIS IS AN UGLY HACK NEEDED BECAUSE OWNERSHIP RANGES ARGUMENT TO petc4py DMDA_CREATE is BROKEN
        decompfunction(cell_da, edgex_da, self.ndim, 1, 1, 1, bdx, 0, 0)
        edgex_da.setUp()

        if self.ndim >= 2:
            edgey_da = PETSc.DMDA().create(
                dim=self.ndim,
                dof=1,
                sizes=edgey_nxs,
                proc_sizes=cell_da.getProcSizes(),
                boundary_type=self._blist,
                stencil_type=PETSc.DMDA.StencilType.BOX,
                stencil_width=1,
                setup=False)
            #THIS IS AN UGLY HACK NEEDED BECAUSE OWNERSHIP RANGES ARGUMENT TO petc4py DMDA_CREATE is BROKEN
            decompfunction(cell_da, edgey_da, self.ndim, 1, 1, 1, 0, bdy, 0)
            edgey_da.setUp()
        if self.ndim >= 3:
            edgez_da = PETSc.DMDA().create(
                dim=self.ndim,
                dof=1,
                sizes=edgez_nxs,
                proc_sizes=cell_da.getProcSizes(),
                boundary_type=self._blist,
                stencil_type=PETSc.DMDA.StencilType.BOX,
                stencil_width=1,
                setup=False)
            #THIS IS AN UGLY HACK NEEDED BECAUSE OWNERSHIP RANGES ARGUMENT TO petc4py DMDA_CREATE is BROKEN
            decompfunction(cell_da, edgez_da, self.ndim, 1, 1, 1, 0, 0, bdz)
            edgez_da.setUp()

        self._cell_das = [
            cell_da,
        ]
        self._edgex_das = [
            edgex_da,
        ]
        self._edgex_nxs = [
            edgex_nxs,
        ]
        if self.ndim >= 2:
            self._edgey_das = [
                edgey_da,
            ]
            self._edgey_nxs = [
                edgey_nxs,
            ]
        if self.ndim >= 3:
            self._edgez_das = [
                edgez_da,
            ]
            self._edgez_nxs = [
                edgez_nxs,
            ]

        #ADD ACTUAL COORDINATE FUNCTION INITIALIZATION HERE

        #construct coordelem
        #FIX THIS- SHOULD USE H1 FOR NON-PERIODIC!
        #Use DG for periodic boundaries to avoid wrapping issues
        h1elem = FiniteElement("CG", interval, 1)  #1 dof per element = linear
        l2elem = FiniteElement("DG", interval, 1)  #2 dofs per element = linear
        elemlist = []
        dxs = []
        pxs = []
        lxs = []
        for i in range(len(nxs)):
            dxs.append(2.)
            pxs.append(0.)
            lxs.append(2. * nxs[i])
            #if bcs[i] == 'periodic':
            elemlist.append(l2elem)
            #else:
            #	elemlist.append(h1elem)
        celem = TensorProductElement(*elemlist)
        if len(nxs) == 1:
            celem = elemlist[0]
        if not (coordelem == None):
            celem = coordelem
        celem = VectorElement(celem, dim=self.ndim)

        Mesh.__init__(self, celem)

        #THIS BREAKS FOR COORDELEM NOT DG1...
        #construct and set coordsvec
        coordsspace = FunctionSpace(self, celem)
        self.coordinates = Function(coordsspace, name='coords')

        localnxs = self.get_local_nxny(0)
        newcoords = create_uniform_nodal_coords(self.get_cell_da(0), nxs, pxs,
                                                lxs, dxs, bcs, localnxs)

        coordsdm = coordsspace.get_da(0, 0)
        coordsarr = coordsdm.getVecArray(self.coordinates._vector)[:]

        if len(pxs) == 1:
            coordsarr[:] = np.squeeze(newcoords[:])
        else:
            coordsarr[:] = newcoords[:]

        self.coordinates.scatter()