def FormatToCode(format): # {{{ """ This routine takes the format string, and hardcodes it into an integer, which is passed along the record, in order to identify the nature of the dataset being sent. """ if m.strcmpi(format, 'Boolean'): code = 1 elif m.strcmpi(format, 'Integer'): code = 2 elif m.strcmpi(format, 'Double'): code = 3 elif m.strcmpi(format, 'String'): code = 4 elif m.strcmpi(format, 'BooleanMat'): code = 5 elif m.strcmpi(format, 'IntMat'): code = 6 elif m.strcmpi(format, 'DoubleMat'): code = 7 elif m.strcmpi(format, 'MatArray'): code = 8 elif m.strcmpi(format, 'StringArray'): code = 9 elif m.strcmpi(format, 'CompressedMat'): code = 10 else: raise InputError( 'FormatToCode error message: data type not supported yet!') return code
def checkconsistency(self, md, solution, analyses): # {{{ md = checkfield(md, 'fieldname', 'mask.groundedice_levelset', 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mask.ice_levelset', 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mask.ocean_levelset', 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mask.land_levelset', 'size', [md.mesh.numberofvertices]) isice = (md.mask.ice_levelset <= 0) if sum(isice) == 0: print('no ice present in the domain') if max(md.mask.ice_levelset) < 0: print('no ice front provided') elements = md.mesh.elements - 1 elements = elements.astype(np.int32, copy=False) icefront = np.sum(md.mask.ice_levelset[elements] == 0, axis=1) if (max(icefront) == 3 & m.strcmp(md.mesh.elementtype(), 'Tria')) or ( max(icefront == 6) & m.strcmp(md.mesh.elementtype(), 'Penta')): raise RuntimeError( 'At least one element has all nodes on ice front, change md.mask.ice_levelset to fix it' ) return md
def displayunit(offset, name, characterization, comment): # {{{ #take care of name if len(name) > 23: name = "%s..." % name[:20] #take care of characterization if m.strcmp(characterization, "''") or m.strcmp( characterization, '""') or m.strcmpi(characterization, 'nan'): characterization = "N/A" if len(characterization) > 15: characterization = "%s..." % characterization[:12] #print if not comment: string = "%s%-23s: %-15s" % (offset, name, characterization) else: if isinstance(comment, (str, unicode)): string = "%s%-23s: %-15s -- %s" % (offset, name, characterization, comment) elif isinstance(comment, list): string = "%s%-23s: %-15s -- %s" % (offset, name, characterization, comment[0]) for commenti in comment: string += "\n%s%-23s %-15s %s" % (offset, '', '', commenti) else: raise RuntimeError( "fielddisplay error message: format for comment not supported yet" ) return string
def ComputeHessian(index,x,y,field,type): """ COMPUTEHESSIAN - compute hessian matrix from a field Compute the hessian matrix of a given field return the three components Hxx Hxy Hyy for each element or each node Usage: hessian=ComputeHessian(index,x,y,field,type) Example: hessian=ComputeHessian(md.mesh.elements,md.mesh.x,md.mesh.y,md.inversion.vel_obs,'node') """ #some variables numberofnodes=np.size(x) numberofelements=np.size(index,axis=0) #some checks if np.size(field)!=numberofnodes and np.size(field)!=numberofelements: raise TypeError("ComputeHessian error message: the given field size not supported yet") if not m.strcmpi(type,'node') and not m.strcmpi(type,'element'): raise TypeError("ComputeHessian error message: only 'node' or 'element' type supported yet") #initialization line=index.reshape(-1,order='F') linesize=3*numberofelements #get areas and nodal functions coefficients N(x,y)=alpha x + beta y + gamma [alpha,beta,dum]=GetNodalFunctionsCoeff(index,x,y) areas=GetAreas(index,x,y) #compute weights that hold the volume of all the element holding the node i weights=m.sparse(line,np.ones((linesize,1)),np.tile(areas.reshape(-1,),(3,1)),numberofnodes,1) #compute field on nodes if on elements if np.size(field,axis=0)==numberofelements: field=m.sparse(line,np.ones((linesize,1)),np.tile(areas*field,(3,1)),numberofnodes,1)/weights #Compute gradient for each element grad_elx=np.sum(field[index-1,0]*alpha,axis=1) grad_ely=np.sum(field[index-1,0]*beta,axis=1) #Compute gradient for each node (average of the elements around) gradx=m.sparse(line,np.ones((linesize,1)),np.tile((areas*grad_elx).reshape(-1,),(3,1)),numberofnodes,1) grady=m.sparse(line,np.ones((linesize,1)),np.tile((areas*grad_ely).reshape(-1,),(3,1)),numberofnodes,1) gradx=gradx/weights grady=grady/weights #Compute hessian for each element hessian=np.vstack((np.sum(gradx[index-1,0]*alpha,axis=1).reshape(-1,),np.sum(grady[index-1,0]*alpha,axis=1).reshape(-1,),np.sum(grady[index-1,0]*beta,axis=1).reshape(-1,))).T if m.strcmpi(type,'node'): #Compute Hessian on the nodes (average of the elements around) hessian=np.hstack((m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,0]).reshape(-1,),(3,1)),numberofnodes,1)/weights, m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,1]).reshape(-1,),(3,1)),numberofnodes,1)/weights, m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,2]).reshape(-1,),(3,1)),numberofnodes,1)/weights )) return hessian
def waitonlock(md): """ WAITONLOCK - wait for a file This routine will return when a file named 'filename' is written to disk. If the time limit given in input is exceeded, return 0 Usage: flag=waitonlock(md) """ #Get filename (lock file) and options executionpath = md.cluster.executionpath cluster = md.cluster.name login = md.cluster.login port = md.cluster.port timelimit = md.settings.waitonlock filename = os.path.join(executionpath, md.private.runtimename, md.miscellaneous.name + '.lock') #waitonlock will work if the lock is on the same machine only: if not m.strcmpi(gethostname(), cluster): print 'solution launched on remote cluster. log in to detect job completion.' choice = raw_input('Is the job successfully completed? (y/n) ') if not m.strcmp(choice, 'y'): print 'Results not loaded... exiting' flag = 0 else: flag = 1 #job is running on the same machine else: if 'interactive' in vars(md.cluster) and md.cluster.interactive: #We are in interactive mode, no need to check for job completion flag = 1 return flag #initialize time and file presence test flag etime = 0 ispresent = 0 print "waiting for '%s' hold on... (Ctrl+C to exit)" % filename #loop till file .lock exist or time is up while ispresent == 0 and etime < timelimit: ispresent = os.path.exists(filename) time.sleep(1) etime += 1 / 60 #build output if etime > timelimit: print 'Time limit exceeded. Increase md.settings.waitonlock' print 'The results must be loaded manually with md=loadresultsfromcluster(md).' raise RuntimeError( 'waitonlock error message: time limit exceeded.') flag = 0 else: flag = 1 return flag
def loadresultsfromdisk(md,filename): """ LOADRESULTSFROMDISK - load results of solution sequence from disk file "filename" Usage: md=loadresultsfromdisk(md=False,filename=False); """ #check number of inputs/outputs if not md or not filename: raise ValueError("loadresultsfromdisk: error message.") if not md.qmu.isdakota: #Check that file exists if not os.path.exists(filename): raise OSError("binary file '%s' not found." % filename) #initialize md.results if not a structure yet if not isinstance(md.results,results): md.results=results() #load results onto model structure=parseresultsfromdisk(filename,not md.settings.io_gather) if not len(structure): raise RuntimeError("No result found in binary file '%s'. Check for solution crash." % filename) setattr(md.results,structure[0].SolutionType,structure) #recover solution_type from results md.private.solution=structure[0].SolutionType #read log files onto fields if os.path.exists(md.miscellaneous.name+'.errlog'): with open(md.miscellaneous.name+'.errlog','r') as f: setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[line[:-1] for line in f]) else: setattr(getattr(md.results,structure[0].SolutionType)[0],'errlog',[]) if os.path.exists(md.miscellaneous.name+'.outlog'): with open(md.miscellaneous.name+'.outlog','r') as f: setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[line[:-1] for line in f]) else: setattr(getattr(md.results,structure[0].SolutionType)[0],'outlog',[]) if len(getattr(md.results,structure[0].SolutionType)[0].errlog): print ("loadresultsfromcluster info message: error during solution. Check your errlog and outlog model fields.") #if only one solution, extract it from list for user friendliness if len(structure) == 1 and not m.strcmp(structure[0].SolutionType,'TransientSolution'): setattr(md.results,structure[0].SolutionType,structure[0]) #post processes qmu results if necessary else: if not isinstance(md.private.solution,str): [md.private.solution]=EnumToString(md.private.solution) md=postqmu(md) os.chdir('..') return md
def perform(self,string): # {{{ bool=False #Some checks if not isinstance(string,(str,unicode)): raise TypeError("Step provided should be a string") if not m.strcmp(string,string.strip()) or len(string.split()) > 1: raise TypeError("Step provided should not have any white space") if self._currentstep>0 and string in [step['string'] for step in self.steps]: raise RuntimeError("Step '%s' already present. Change name" % string) #Add step self.steps.append(OrderedDict()) self.steps[-1]['id']=len(self.steps) self.steps[-1]['string']=string self._currentstep+=1 #if requestedsteps = 0, print all steps in self if 0 in self.requestedsteps: if self._currentstep==1: print " prefix: %s" % self.prefix print " step #%i : %s" % (self.steps[self._currentstep-1]['id'],self.steps[self._currentstep-1]['string']) #Ok, now if _currentstep is a member of steps, return true if self._currentstep in self.requestedsteps: print "\n step #%i : %s\n" % (self.steps[self._currentstep-1]['id'],self.steps[self._currentstep-1]['string']) bool=True #assign self back to calling workspace # (no need, since Python modifies class instance directly) return bool
def checkconsistency(self, md, solution, analyses): # {{{ md = checkfield(md, 'fieldname', 'mesh.x', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mesh.y', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mesh.z', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mesh.elements', 'NaN', 1, 'Inf', 1, '>', 0, 'values', numpy.arange(1, md.mesh.numberofvertices + 1)) md = checkfield(md, 'fieldname', 'mesh.elements', 'size', [md.mesh.numberofelements, 6]) if numpy.any( numpy.logical_not( m.ismember(numpy.arange(1, md.mesh.numberofvertices + 1), md.mesh.elements))): md.checkmessage( "orphan nodes have been found. Check the mesh3dprisms outline") md = checkfield(md, 'fieldname', 'mesh.numberoflayers', '>=', 0) md = checkfield(md, 'fieldname', 'mesh.numberofelements', '>', 0) md = checkfield(md, 'fieldname', 'mesh.numberofvertices', '>', 0) md = checkfield(md, 'fieldname', 'mesh.vertexonbase', 'size', [md.mesh.numberofvertices], 'values', [0, 1]) md = checkfield(md, 'fieldname', 'mesh.vertexonsurface', 'size', [md.mesh.numberofvertices], 'values', [0, 1]) md = checkfield( md, 'fieldname', 'mesh.average_vertex_connectivity', '>=', 24, 'message', "'mesh.average_vertex_connectivity' should be at least 24 in 3d") return md
def BuildQueueScript(self,dirname,modelname,solution,io_gather,isvalgrind,isgprof,isdakota,isoceancoupling): # {{{ executable='issm.exe'; if isdakota: version=IssmConfig('_DAKOTA_VERSION_')[0:2] version=float(version) if version>=6: executable='issm_dakota.exe' if isoceancoupling: executable='issm_ocean.exe' #write queuing script if not m.ispc(): fid=open(modelname+'.queue','w') fid.write('#!/bin/sh\n') if not isvalgrind: if self.interactive: if IssmConfig('_HAVE_MPI_')[0]: fid.write('mpiexec -np %i %s/%s %s %s/%s %s ' % (self.np,self.codepath,executable,solution,self.executionpath,dirname,modelname)) else: fid.write('%s/%s %s %s/%s %s ' % (self.codepath,executable,solution,self.executionpath,dirname,modelname)) else: if IssmConfig('_HAVE_MPI_')[0]: fid.write('mpiexec -np %i %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % (self.np,self.codepath,executable,solution,self.executionpath,dirname,modelname,modelname,modelname)) else: fid.write('%s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % (self.codepath,executable,solution,self.executionpath,dirname,modelname,modelname,modelname)) elif isgprof: fid.write('\n gprof %s/%s gmon.out > %s.performance' % (self.codepath,executable,modelname)) else: #Add --gen-suppressions=all to get suppression lines fid.write('LD_PRELOAD=%s \\\n' % self.valgrindlib) if IssmConfig('_HAVE_MPI_')[0]: fid.write('mpiexec -np %i %s --leak-check=full --suppressions=%s %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % \ (self.np,self.valgrind,self.valgrindsup,self.codepath,executable,solution,self.executionpath,dirname,modelname,modelname,modelname)) else: fid.write('%s --leak-check=full --suppressions=%s %s/%s %s %s/%s %s 2> %s.errlog >%s.outlog ' % \ (self.valgrind,self.valgrindsup,self.codepath,executable,solution,self.executionpath,dirname,modelname,modelname,modelname)) if not io_gather: #concatenate the output files: fid.write('\ncat %s.outbin.* > %s.outbin' % (modelname,modelname)) fid.close() else: # Windows fid=open(modelname+'.bat','w') fid.write('@echo off\n') if self.interactive: fid.write('"%s/%s" %s "%s/%s" %s ' % (self.codepath,executable,solution,self.executionpath,dirname,modelname)) else: fid.write('"%s/%s" %s "%s/%s" %s 2> %s.errlog >%s.outlog' % \ (self.codepath,executable,solution,self.executionpath,dirname,modelname,modelname,modelname)) fid.close() #in interactive mode, create a run file, and errlog and outlog file if self.interactive: fid=open(modelname+'.errlog','w') fid.close() fid=open(modelname+'.outlog','w') fid.close()
def checkconsistency(self, md, solution, analyses): # {{{ if (not self.riftstruct) or numpy.any(isnans(self.riftstruct)): numrifts = 0 else: numrifts = len(self.riftstruct) if numrifts: if not m.strcmp(md.mesh.domaintype(), '2Dhorizontal'): md.checkmessage( "models with rifts are only supported in 2d for now!") if not isinstance(self.riftstruct, list): md.checkmessage("rifts.riftstruct should be a structure!") if numpy.any(md.mesh.segmentmarkers >= 2): #We have segments with rift markers, but no rift structure! md.checkmessage( "model should be processed for rifts (run meshprocessrifts)!" ) for i, rift in enumerate(self.riftstruct): md = checkfield( md, 'fieldname', "rifts.riftstruct[%d]['fill']" % i, 'values', [WaterEnum(), AirEnum(), IceEnum(), MelangeEnum()]) else: if self.riftstruct and numpy.any( numpy.logical_not(isnans(self.riftstruct))): md.checkmessage( "riftstruct should be NaN since numrifts is 0!") return md
def checkconsistency(self, md, solution, analyses): # {{{ md = checkfield(md, 'fieldname', 'mesh.x', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mesh.y', 'NaN', 1, 'Inf', 1, 'size', [md.mesh.numberofvertices]) md = checkfield(md, 'fieldname', 'mesh.elements', 'NaN', 1, 'Inf', 1, '>', 0, 'values', np.arange(1, md.mesh.numberofvertices + 1)) md = checkfield(md, 'fieldname', 'mesh.elements', 'size', [md.mesh.numberofelements, 3]) if np.any( np.logical_not( m.ismember(np.arange(1, md.mesh.numberofvertices + 1), md.mesh.elements))): md.checkmessage( "orphan nodes have been found. Check the mesh outline") md = checkfield(md, 'fieldname', 'mesh.numberofelements', '>', 0) md = checkfield(md, 'fieldname', 'mesh.numberofvertices', '>', 0) md = checkfield( md, 'fieldname', 'mesh.average_vertex_connectivity', '>=', 9, 'message', "'mesh.average_vertex_connectivity' should be at least 9 in 2d") md = checkfield(md, 'fieldname', 'mesh.segments', 'NaN', 1, 'Inf', 1, '>', 0, 'size', [np.nan, 3]) if solution == 'ThermalSolution': md.checkmessage("thermal not supported for 2d mesh") return md
def checkconsistency(self, md, solution, analyses): # {{{ #Early return if not md.qmu.isdakota: return if not md.qmu.params.evaluation_concurrency == 1: md.checkmessage( "concurrency should be set to 1 when running dakota in library mode" ) if md.qmu.partition: if not numpy.size(md.qmu.partition) == md.mesh.numberofvertices: md.checkmessage( "user supplied partition for qmu analysis should have size md.mesh.numberofvertices x 1" ) if not min(md.qmu.partition) == 0: md.checkmessage("partition vector not indexed from 0 on") if max(md.qmu.partition) >= md.qmu.numberofpartitions: md.checkmessage( "for qmu analysis, partitioning vector cannot go over npart, number of partition areas" ) if not m.strcmpi(md.cluster.name, 'none'): if not md.settings.waitonlock: md.checkmessage( "waitonlock should be activated when running qmu in parallel mode!" ) return md
def list_display(offset, name, field, comment): # {{{ #initialization if isinstance(field, list): sbeg = '[' send = ']' elif isinstance(field, tuple): sbeg = '(' send = ')' string = sbeg #go through the cell and fill string if len(field) < 5: for fieldi in field: if isinstance(fieldi, (str, unicode)): string += "'%s'," % fieldi elif isinstance(fieldi, (bool, int, long, float)): string += "%s," % str(fieldi) else: string = sbeg break if m.strcmp(string, sbeg): string = "%s%dx1%s" % (sbeg, len(field), send) else: string = string[:-1] + send #call displayunit return displayunit(offset, name, string, comment)
def Download(self,dirname,filelist): # {{{ if m.ispc(): #do nothing return #copy files from cluster to current directory directory='%s/%s/' % (self.executionpath,dirname) issmscpin(self.name,self.login,self.port,directory,filelist)
def triangle(md,domainname,*args): """ TRIANGLE - create model mesh using the triangle package This routine creates a model mesh using TriMesh and a domain outline, to within a certain resolution where md is a @model object, domainname is the name of an Argus domain outline file, and resolution is a characteristic length for the mesh (same unit as the domain outline unit). Riftname is an optional argument (Argus domain outline) describing rifts. Usage: md=triangle(md,domainname,resolution) or md=triangle(md,domainname, resolution, riftname) Examples: md=triangle(md,'DomainOutline.exp',1000); md=triangle(md,'DomainOutline.exp',1000,'Rifts.exp'); """ #Figure out a characteristic area. Resolution is a node oriented concept (ex a 1000m resolution node would #be made of 1000*1000 area squares). if len(args)==1: resolution=args[0] riftname='' if len(args)==2: riftname=args[0] resolution=args[1] #Check that mesh was not already run, and warn user: if md.mesh.numberofelements: choice = input('This model already has a mesh. Are you sure you want to go ahead? (y/n)') if not m.strcmp(choice,'y'): print('no meshing done ... exiting') return None area = resolution**2 #Mesh using TriMesh md.mesh=mesh2d() [md.mesh.elements,md.mesh.x,md.mesh.y,md.mesh.segments,md.mesh.segmentmarkers]=TriMesh(domainname,riftname,area) md.mesh.elements=md.mesh.elements.astype(int) md.mesh.segments=md.mesh.segments.astype(int) md.mesh.segmentmarkers=md.mesh.segmentmarkers.astype(int) #Fill in rest of fields: md.mesh.numberofelements = numpy.size(md.mesh.elements,axis=0) md.mesh.numberofvertices = numpy.size(md.mesh.x) md.mesh.vertexonboundary = numpy.zeros(md.mesh.numberofvertices,bool) md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1] = True #Now, build the connectivity tables for this mesh. [md.mesh.vertexconnectivity] = NodeConnectivity(md.mesh.elements, md.mesh.numberofvertices) [md.mesh.elementconnectivity] = ElementConnectivity(md.mesh.elements, md.mesh.vertexconnectivity) return md
def issmdir(): """ ISSMDIR - Get ISSM_DIR environment variable Usage: ISSM_DIR=issmdir() """ if not m.ispc(): ISSM_DIR = os.environ['ISSM_DIR'] else: ISSM_DIR = os.environ['ISSM_DIR_WIN'] if m.strcmpi(ISSM_DIR[-1], '/') or m.strcmpi(ISSM_DIR[-1], '\\'): ISSM_DIR = ISSM_DIR[:-1] #shave off the last '/' if not ISSM_DIR: raise RuntimeError( "issmdir error message: 'ISSM_DIR' environment variable is empty! You should define ISSM_DIR in your .cshrc or .bashrc!" ) return ISSM_DIR
def __init__(self, **kwargs): # {{{ self._currentstep = 0 self.repository = './' self.prefix = 'model.' self.trunkprefix = '' self.steps = [] self.requestedsteps = [0] #process options options = pairoptions.pairoptions(**kwargs) #Get prefix prefix = options.getfieldvalue('prefix', 'model.') if not isinstance(prefix, str): raise TypeError("prefix is not a string") if not m.strcmp(prefix, prefix.strip()) or len(prefix.split()) > 1: raise TypeError("prefix should not have any white space") self.prefix = prefix #Get repository repository = options.getfieldvalue('repository', './') if not isinstance(repository, str): raise TypeError("repository is not a string") if not os.path.isdir(repository): raise IOError("Directory '%s' not found" % repository) self.repository = repository #Get steps self.requestedsteps = options.getfieldvalue('steps', [0]) #Get trunk prefix (only if provided by user) if options.exist('trunkprefix'): trunkprefix = options.getfieldvalue('trunkprefix', '') if not isinstance(trunkprefix, str): raise TypeError("trunkprefix is not a string") if not m.strcmp(trunkprefix, trunkprefix.strip()) or len( trunkprefix.split()) > 1: raise TypeError("trunkprefix should not have any white space") self.trunkprefix = trunkprefix
def checkconsistency(self,md,solution,analyses): # {{{ #Early return if (StressbalanceAnalysisEnum() not in analyses and StressbalanceSIAAnalysisEnum() not in analyses) or (solution==TransientSolutionEnum() and not md.transient.isstressbalance): return md md = checkfield(md,'fieldname','flowequation.isSIA','numel',[1],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.isSSA','numel',[1],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.isL1L2','numel',[1],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.isHO','numel',[1],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.isFS','numel',[1],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.fe_SSA','values',['P1','P1bubble','P1bubblecondensed','P2','P2bubble']) md = checkfield(md,'fieldname','flowequation.fe_HO' ,'values',['P1','P1bubble','P1bubblecondensed','P1xP2','P2xP1','P2','P2bubble','P1xP3','P2xP4']) md = checkfield(md,'fieldname','flowequation.fe_FS' ,'values',['P1P1','P1P1GLS','MINIcondensed','MINI','TaylorHood','XTaylorHood','OneLayerP4z','CrouzeixRaviart']) md = checkfield(md,'fieldname','flowequation.borderSSA','size',[md.mesh.numberofvertices],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.borderHO','size',[md.mesh.numberofvertices],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.borderFS','size',[md.mesh.numberofvertices],'values',[0,1]) md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_r','numel',[1],'>',0.) md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rhop','numel',[1],'>',0.) md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rlambda','numel',[1],'>',0.) md = checkfield(md,'fieldname','flowequation.augmented_lagrangian_rholambda','numel',[1],'>',0.) md = checkfield(md,'fieldname','flowequation.XTH_theta','numel',[1],'>=',0.,'<',.5) if m.strcmp(md.mesh.domaintype(),'2Dhorizontal'): md = checkfield(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],'values',[1,2]) md = checkfield(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],'values',[1,2]) elif m.strcmp(md.mesh.domaintype(),'3D'): md = checkfield(md,'fieldname','flowequation.vertex_equation','size',[md.mesh.numberofvertices],'values',numpy.arange(0,8+1)) md = checkfield(md,'fieldname','flowequation.element_equation','size',[md.mesh.numberofelements],'values',numpy.arange(0,8+1)) else: raise RuntimeError('mesh type not supported yet') if not (self.isSIA or self.isSSA or self.isL1L2 or self.isHO or self.isFS): md.checkmessage("no element types set for this model") if StressbalanceSIAAnalysisEnum() in analyses: if any(self.element_equation==1): if numpy.any(numpy.logical_and(self.vertex_equation,md.mask.groundedice_levelset)): print("\n !!! Warning: SIA's model is not consistent on ice shelves !!!\n") return md
def BuildKrigingQueueScript(self,modelname,solution,io_gather,isvalgrind,isgprof): # {{{ #write queuing script if not m.ispc(): fid=open(modelname+'.queue','w') fid.write('#!/bin/sh\n') if not isvalgrind: if self.interactive: fid.write('mpiexec -np %i %s/kriging.exe %s/%s %s ' % (self.np,self.codepath,self.executionpath,modelname,modelname)) else: fid.write('mpiexec -np %i %s/kriging.exe %s/%s %s 2> %s.errlog >%s.outlog ' % (self.np,self.codepath,self.executionpath,modelname,modelname,modelname,modelname)) elif isgprof: fid.write('\n gprof %s/kriging.exe gmon.out > %s.performance' & (self.codepath,modelname)) else: #Add --gen-suppressions=all to get suppression lines fid.write('LD_PRELOAD=%s \\\n' % self.valgrindlib) fid.write('mpiexec -np %i %s --leak-check=full --suppressions=%s %s/kriging.exe %s/%s %s 2> %s.errlog >%s.outlog ' % \ (self.np,self.valgrind,self.valgrindsup,self.codepath,self.executionpath,modelname,modelname,modelname,modelname)) if not io_gather: #concatenate the output files: fid.write('\ncat %s.outbin.* > %s.outbin' % (modelname,modelname)) fid.close() else: # Windows fid=open(modelname+'.bat','w') fid.write('@echo off\n') if self.interactive: fid.write('"%s/issm.exe" %s "%s/%s" %s ' % (self.codepath,solution,self.executionpath,modelname,modelname)) else: fid.write('"%s/issm.exe" %s "%s/%s" %s 2> %s.errlog >%s.outlog' % \ (self.codepath,solution,self.executionpath,modelname,modelname,modelname,modelname)) fid.close() #in interactive mode, create a run file, and errlog and outlog file if self.interactive: fid=open(modelname+'.errlog','w') fid.close() fid=open(modelname+'.outlog','w') fid.close()
def checkconsistency(self, md, solution, analyses): # {{{ md = checkfield(md, 'fieldname', 'groundingline.migration', 'values', [ 'None', 'AggressiveMigration', 'SoftMigration', 'SubelementMigration', 'SubelementMigration2', 'Contact', 'GroundingOnly' ]) if not m.strcmp(self.migration, 'None'): if np.any(np.isnan(md.geometry.bed)): md.checkmessage( "requesting grounding line migration, but bathymetry is absent!" ) pos = np.nonzero(md.mask.groundedice_levelset > 0.)[0] if any( np.abs(md.geometry.base[pos] - md.geometry.bed[pos]) > 10**-10): md.checkmessage("base not equal to bed on grounded ice!") if any(md.geometry.bed - md.geometry.base > 10**-9): md.checkmessage("bed superior to base on floating ice!") return md
def meshprocessoutsiderifts(md, domainoutline): """ MESHPROCESSOUTSIDERIFTS - process rifts when they touch the domain outline Usage: md=meshprocessoutsiderifts(md,domain) """ #go through rifts, and figure out which ones touch the domain outline for rift in md.rifts.riftstruct: #first, flag nodes that belong to the domain outline flags = ContourToMesh(md.mesh.elements, md.mesh.x, md.mesh.y, domainoutline, 'node', 0) tips = rift.tips outsidetips = tips[np.nonzero(flags[rift.tips - 1])[0]] #we have found outsidetips, tips that touch the domain outline. go through them for tip in outsidetips: #find tip in the segments, take first segment (there should be 2) that holds tip, #and node_connected_to_tip is the other node on this segment: tipindex = np.nonzero(rift.segments[:, 0] == tip)[0] if tipindex: tipindex = tipindex[0] node_connected_to_tip = rift.segments[tipindex, 1] else: tipindex = np.nonzero(rift.segments[:, 1] == tip)[0] tipindex = tipindex[0] node_connected_to_tip = rift.segments[tipindex, 1] #ok, we have the tip node, and the first node connected to it, on the rift. Now, #identify all the elements that are connected to the tip, and that are on the same #side of the rift. A = tip B = node_connected_to_tip elements = np.empty(0, int) while flags( B ): #as long as B does not belong to the domain outline, keep looking. #detect elements on edge A,B: edgeelements = ElementsFromEdge(md.mesh.elements, A, B) #rule out those we already detected already_detected = m.ismember(edgeelements, elements) nextelement = edgeelements( np.nonzero(np.logical_not(already_detected))[0]) #add new detected element to the list of elements we are looking for. elements = np.concatenate((elements, nextelement)) #new B: B = md.mesh.elements[ nextelement - 1, np.nonzero( np.logical_not( m.ismember(md.mesh.elements[ nextelement - 1, :], np.array([A, B]))))] #take the list of elements on one side of the rift that connect to the tip, #and duplicate the tip on them, so as to open the rift to the outside. num = np.size(md.mesh.x) + 1 md.mesh.x = np.concatenate((md.mesh.x, md.mesh.x[tip])) md.mesh.y = np.concatenate((md.mesh.y, md.mesh.y[tip])) md.mesh.numberofvertices = num #replace tip in elements newelements = md.mesh.elements[elements - 1, :] pos = np.nonzero(newelements == tip) newelements[pos] = num md.mesh.elements[elements - 1, :] = newelements rift.tips = np.concatenate((rift.tips, num)) #deal with segments tipsegments = np.nonzero( np.logical_or(md.mesh.segments[:, 0] == tip, md.mesh.segments[:, 1] == tip))[0] for segment_index in tipsegments: pos = np.nonzero( md.mesh.segments[segment_index, 0:2] != tip)[0] other_node = md.mesh.segments[segment_index, pos] if not isconnected(md.mesh.elements, other_node, tip): pos = np.nonzero(md.mesh.segments[segment_index, 0:2] == tip)[0] md.mesh.segments[segment_index, pos] = num #Fill in rest of fields: md.mesh.numberofelements = np.size(md.mesh.elements, axis=0) md.mesh.numberofvertices = np.size(md.mesh.x) md.mesh.vertexonboundary = np.zeros(np.size(md.mesh.x), bool) md.mesh.vertexonboundary[md.mesh.segments[:, 0:2] - 1] = True md.rifts.numrifts = length(md.rifts.riftstruct) return md
def WriteData(fid, prefix, *args): """ WRITEDATA - write model field in binary file Usage: WriteData(fid,varargin) """ #process options options = pairoptions.pairoptions(*args) #Get data properties if options.exist('object'): #This is an object field, construct enum and data obj = options.getfieldvalue('object') fieldname = options.getfieldvalue('fieldname') classname = options.getfieldvalue( 'class', str(type(obj)).rsplit('.')[-1].split("'")[0]) name = options.getfieldvalue('name', prefix + '.' + fieldname) if options.exist('data'): data = options.getfieldvalue('data') else: data = getattr(obj, fieldname) else: #No processing required data = options.getfieldvalue('data') name = options.getfieldvalue('name') format = options.getfieldvalue('format') mattype = options.getfieldvalue('mattype', 0) #only required for matrices timeserieslength = options.getfieldvalue('timeserieslength', -1) #Process sparse matrices # if issparse(data), # data=full(data); # end #Scale data if necesarry if options.exist('scale'): scale = options.getfieldvalue('scale') if np.size(data) > 1: if np.size(data, 0) == timeserieslength: data = np.array(data) data[0:-1, :] = scale * data[0:-1, :] else: data = scale * data else: data = scale * data if np.size(data) > 1: if np.size(data, 0) == timeserieslength: yts = options.getfieldvalue('yts') data[-1, :] = yts * data[-1, :] #Step 1: write the enum to identify this record uniquely fid.write(struct.pack('i', len(name))) fid.write(struct.pack('%ds' % len(name), name)) #Step 2: write the data itself. if m.strcmpi(format, 'Boolean'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0]) #first write length of record fid.write(struct.pack('i', 4 + 4)) #1 bool (disguised as an int)+code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write integer fid.write(struct.pack( 'i', int(data))) #send an int, not easy to send a bool # }}} elif m.strcmpi(format, 'Integer'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0]) #first write length of record fid.write(struct.pack('i', 4 + 4)) #1 integer + code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write integer fid.write(struct.pack('i', data)) # }}} elif m.strcmpi(format, 'Double'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0]) #first write length of record fid.write(struct.pack('i', 8 + 4)) #1 double+code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write double fid.write(struct.pack('d', data)) # }}} elif m.strcmpi(format, 'String'): # {{{ #first write length of record fid.write(struct.pack('i', len(data) + 4 + 4)) #string + string size + code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write string fid.write(struct.pack('i', len(data))) fid.write(struct.pack('%ds' % len(data), data)) # }}} elif m.strcmpi(format, 'BooleanMat'): # {{{ if isinstance(data, bool): data = np.array([data]) elif isinstance(data, (list, tuple)): data = np.array(data).reshape(-1, ) if np.ndim(data) == 1: if np.size(data): data = data.reshape(np.size(data), ) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if np.ndim(data) == 2 and np.product(s) == 1 and np.all( np.isnan(data)): s = (0, 0) #first write length of record fid.write(struct.pack( 'i', 4 + 4 + 8 * np.product(s) + 4 + 4)) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix if np.ndim(data) == 1: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', 1)) for i in xrange(s[0]): fid.write(struct.pack('d', float( data[i]))) #get to the "c" convention, hence the transpose else: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in xrange(s[0]): for j in xrange(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'IntMat'): # {{{ if isinstance(data, (int, long)): data = np.array([data]) elif isinstance(data, (list, tuple)): data = np.array(data).reshape(-1, ) if np.ndim(data) == 1: if np.size(data): data = data.reshape(np.size(data), ) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if np.ndim(data) == 2 and np.product(s) == 1 and np.all( np.isnan(data)): s = (0, 0) #first write length of record fid.write(struct.pack( 'i', 4 + 4 + 8 * np.product(s) + 4 + 4)) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix if np.ndim(data) == 1: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', 1)) for i in xrange(s[0]): fid.write(struct.pack('d', float( data[i]))) #get to the "c" convention, hence the transpose else: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in xrange(s[0]): for j in xrange(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'DoubleMat'): # {{{ if isinstance(data, (bool, int, long, float)): data = np.array([data]) elif isinstance(data, (list, tuple)): data = np.array(data).reshape(-1, ) if np.ndim(data) == 1: if np.size(data): data = data.reshape(np.size(data), ) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if np.ndim(data) == 1 and np.product(s) == 1 and np.all( np.isnan(data)): s = (0, 0) #first write length of record recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type if recordlength > 4**31: raise ValueError( 'field %s cannot be marshalled because it is larger than 4^31 bytes!' % enum) fid.write( struct.pack('i', recordlength) ) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix if np.ndim(data) == 1: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', 1)) for i in xrange(s[0]): fid.write(struct.pack('d', float( data[i]))) #get to the "c" convention, hence the transpose else: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in xrange(s[0]): for j in xrange(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'CompressedMat'): # {{{ if isinstance(data, (bool, int, long, float)): data = np.array([data]) elif isinstance(data, (list, tuple)): data = np.array(data).reshape(-1, ) if np.ndim(data) == 1: if np.size(data): data = data.reshape(np.size(data), ) else: data = data.reshape(0, 0) #Get size s = data.shape if np.ndim(data) == 1: n2 = 1 else: n2 = s[1] #if matrix = NaN, then do not write anything if np.ndim(data) == 1 and np.product(s) == 1 and np.all( np.isnan(data)): s = (0, 0) n2 = 0 #first write length of record recordlength = 4 + 4 + 8 + 8 + 1 * ( s[0] - 1 ) * n2 + 8 * n2 + 4 + 4 #2 integers (32 bits) + the matrix + code + matrix type if recordlength > 4**31: raise ValueError( 'field %s cannot be marshalled because it is larger than 4^31 bytes!' % enum) fid.write(struct.pack('i', recordlength) ) #2 integers (32 bits) + the matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #Write offset and range A = data[0:s[0] - 1] offsetA = A.min() rangeA = A.max() - offsetA if rangeA == 0: A = A * 0 else: A = (A - offsetA) / rangeA * 255. #now write matrix if np.ndim(data) == 1: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', 1)) fid.write(struct.pack('d', float(offsetA))) fid.write(struct.pack('d', float(rangeA))) for i in xrange(s[0] - 1): fid.write(struct.pack('B', int(A[i]))) fid.write(struct.pack('d', float( data[s[0] - 1]))) #get to the "c" convention, hence the transpose elif np.product(s) > 0: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) fid.write(struct.pack('d', float(offsetA))) fid.write(struct.pack('d', float(rangeA))) for i in xrange(s[0] - 1): for j in xrange(s[1]): fid.write(struct.pack('B', int( A[i] [j]))) #get to the "c" convention, hence the transpose for j in xrange(s[1]): fid.write(struct.pack('d', float(data[s[0] - 1][j]))) # }}} elif m.strcmpi(format, 'MatArray'): # {{{ #first get length of record recordlength = 4 + 4 #number of records + code for matrix in data: if isinstance(matrix, (bool, int, long, float)): matrix = np.array([matrix]) elif isinstance(matrix, (list, tuple)): matrix = np.array(matrix).reshape(-1, ) if np.ndim(matrix) == 1: if np.size(matrix): matrix = matrix.reshape(np.size(matrix), ) else: matrix = matrix.reshape(0, 0) s = matrix.shape recordlength += 4 * 2 + np.product( s) * 8 #row and col of matrix + matrix of doubles #write length of record fid.write(struct.pack('i', recordlength)) #write data code: fid.write(struct.pack('i', FormatToCode(format))) #write data, first number of records fid.write(struct.pack('i', len(data))) #write each matrix: for matrix in data: if isinstance(matrix, (bool, int, long, float)): matrix = np.array([matrix]) elif isinstance(matrix, (list, tuple)): matrix = np.array(matrix).reshape(-1, ) if np.ndim(matrix) == 1: matrix = matrix.reshape(np.size(matrix), ) s = matrix.shape if np.ndim(data) == 1: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', 1)) for i in xrange(s[0]): fid.write( struct.pack('d', float(matrix[i])) ) #get to the "c" convention, hence the transpose else: fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in xrange(s[0]): for j in xrange(s[1]): fid.write(struct.pack('d', float(matrix[i][j]))) # }}} elif m.strcmpi(format, 'StringArray'): # {{{ #first get length of record recordlength = 4 + 4 #for length of array + code for string in data: recordlength += 4 + len(string) #for each string #write length of record fid.write(struct.pack('i', recordlength)) #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write length of string array fid.write(struct.pack('i', len(data))) #now write the strings for string in data: fid.write(struct.pack('i', len(string))) fid.write(struct.pack('%ds' % len(string), string)) # }}} else: # {{{ raise TypeError( 'WriteData error message: data type: %d not supported yet! (%s)' % (format, enum))
def issmssh(host, login, port, command): """ ISSMSSH - wrapper for OS independent ssh command. usage: issmssh(host,command) """ #first get hostname hostname = gethostname() #if same as host, just run the command. if m.strcmpi(host, hostname): subprocess.call(command, shell=True) else: if m.ispc(): #use the putty project plink.exe: it should be in the path. #get ISSM_DIR variable if 'ISSM_DIR_WIN' in os.environ: ISSM_DIR = os.environ['ISSM_DIR_WIN'][1:-2] else: raise OSError( "issmssh error message: could not find ISSM_DIR_WIN environment variable." ) username = input('Username: (quoted string) ') key = input('Key: (quoted string) ') subprocess.call( '%s/externalpackages/ssh/plink.exe -ssh -l "%s" -pw "%s" %s "%s"' % (ISSM_DIR, username, key, host, command), shell=True) else: #just use standard unix ssh if port: subprocess.call('ssh -l %s -p %d localhost "%s"' % (login, port, command), shell=True) else: subprocess.call('ssh -l %s %s "%s"' % (login, host, command), shell=True) # The following code was added to fix: # "IOError: [Errno 35] Resource temporarily unavailable" # on the Mac when trying to display md after the solution. # (from http://code.google.com/p/robotframework/issues/detail?id=995) # Make FreeBSD use blocking I/O like other platforms import sys import fcntl from os import O_NONBLOCK fd = sys.stdin.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK) fd = sys.stdout.fileno() flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)
def WriteData(fid, **kwargs): """ WRITEDATA - write model field in binary file Usage: WriteData(fid,varargin) """ #process options options = pairoptions.pairoptions(**kwargs) #Get data properties if options.exist('object'): #This is an object field, construct enum and data obj = options.getfieldvalue('object') fieldname = options.getfieldvalue('fieldname') classname = options.getfieldvalue( 'class', str(type(obj)).rsplit('.')[-1].split("'")[0]) if options.exist('enum'): enum = options.getfieldvalue('enum') else: enum = BuildEnum(classname + '_' + fieldname) data = getattr(obj, fieldname) else: #No processing required data = options.getfieldvalue('data') enum = options.getfieldvalue('enum') format = options.getfieldvalue('format') mattype = options.getfieldvalue('mattype', 0) #only required for matrices timeserieslength = options.getfieldvalue('timeserieslength', -1) #Process sparse matrices # if issparse(data), # data=full(data); # end #Scale data if necesarry if options.exist('scale'): scale = options.getfieldvalue('scale') if numpy.size(data) > 1: if numpy.size(data, 0) == timeserieslength: data = numpy.array(data) data[0:-1, :] = scale * data[0:-1, :] else: data = scale * data else: data = scale * data if numpy.size(data) > 1: if numpy.size(data, 0) == timeserieslength: yts = 365.0 * 24.0 * 3600.0 data[-1, :] = yts * data[-1, :] #Step 1: write the enum to identify this record uniquely fid.write(struct.pack('i', enum)) #Step 2: write the data itself. if m.strcmpi(format, 'Boolean'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) #first write length of record fid.write(struct.pack('i', 4 + 4)) #1 bool (disguised as an int)+code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write integer fid.write(struct.pack( 'i', int(data))) #send an int, not easy to send a bool # }}} elif m.strcmpi(format, 'Integer'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) #first write length of record fid.write(struct.pack('i', 4 + 4)) #1 integer + code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write integer fid.write(struct.pack('i', data)) # }}} elif m.strcmpi(format, 'Double'): # {{{ # if len(data) !=1: # raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0]) #first write length of record fid.write(struct.pack('i', 8 + 4)) #1 double+code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write double fid.write(struct.pack('d', data)) # }}} elif m.strcmpi(format, 'String'): # {{{ #first write length of record fid.write(struct.pack('i', len(data) + 4 + 4)) #string + string size + code #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write string fid.write(struct.pack('i', len(data))) fid.write(struct.pack('%ds' % len(data), data)) # }}} elif m.strcmpi(format, 'BooleanMat'): # {{{ if isinstance(data, bool): data = numpy.array([data]) elif isinstance(data, (list, tuple)): data = numpy.array(data).reshape(-1, 1) if numpy.ndim(data) == 1: if numpy.size(data): data = data.reshape(numpy.size(data), 1) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]): s = (0, 0) #first write length of record fid.write(struct.pack( 'i', 4 + 4 + 8 * s[0] * s[1] + 4 + 4)) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in range(s[0]): for j in range(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'IntMat'): # {{{ if isinstance(data, int): data = numpy.array([data]) elif isinstance(data, (list, tuple)): data = numpy.array(data).reshape(-1, 1) if numpy.ndim(data) == 1: if numpy.size(data): data = data.reshape(numpy.size(data), 1) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]): s = (0, 0) #first write length of record fid.write(struct.pack( 'i', 4 + 4 + 8 * s[0] * s[1] + 4 + 4)) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in range(s[0]): for j in range(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'DoubleMat'): # {{{ if isinstance(data, (bool, int, float)): data = numpy.array([data]) elif isinstance(data, (list, tuple)): data = numpy.array(data).reshape(-1, 1) if numpy.ndim(data) == 1: if numpy.size(data): data = data.reshape(numpy.size(data), 1) else: data = data.reshape(0, 0) #Get size s = data.shape #if matrix = NaN, then do not write anything if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]): s = (0, 0) #first write length of record recordlength = 4 + 4 + 8 * s[0] * s[1] + 4 + 4 #2 integers (32 bits) + the double matrix + code + matrix type if recordlength > 2**31: raise ValueError( 'field %s cannot be marshalled because it is larger than 4^31 bytes!' % EnumToString(enum)[0]) fid.write( struct.pack('i', recordlength) ) #2 integers (32 bits) + the double matrix + code + matrix type #write data code and matrix type: fid.write(struct.pack('i', FormatToCode(format))) fid.write(struct.pack('i', mattype)) #now write matrix fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in range(s[0]): for j in range(s[1]): fid.write(struct.pack('d', float( data[i] [j]))) #get to the "c" convention, hence the transpose # }}} elif m.strcmpi(format, 'MatArray'): # {{{ #first get length of record recordlength = 4 + 4 #number of records + code for matrix in data: if isinstance(matrix, (bool, int, float)): matrix = numpy.array([matrix]) elif isinstance(matrix, (list, tuple)): matrix = numpy.array(matrix).reshape(-1, 1) if numpy.ndim(matrix) == 1: if numpy.size(matrix): matrix = matrix.reshape(numpy.size(matrix), 1) else: matrix = matrix.reshape(0, 0) s = matrix.shape recordlength += 4 * 2 + s[0] * s[ 1] * 8 #row and col of matrix + matrix of doubles #write length of record fid.write(struct.pack('i', recordlength)) #write data code: fid.write(struct.pack('i', FormatToCode(format))) #write data, first number of records fid.write(struct.pack('i', len(data))) #write each matrix: for matrix in data: if isinstance(matrix, (bool, int, float)): matrix = numpy.array([matrix]) elif isinstance(matrix, (list, tuple)): matrix = numpy.array(matrix).reshape(-1, 1) if numpy.ndim(matrix) == 1: matrix = matrix.reshape(numpy.size(matrix), 1) s = matrix.shape fid.write(struct.pack('i', s[0])) fid.write(struct.pack('i', s[1])) for i in range(s[0]): for j in range(s[1]): fid.write(struct.pack('d', float(matrix[i][j]))) # }}} elif m.strcmpi(format, 'StringArray'): # {{{ #first get length of record recordlength = 4 + 4 #for length of array + code for string in data: recordlength += 4 + len(string) #for each string #write length of record fid.write(struct.pack('i', recordlength)) #write data code: fid.write(struct.pack('i', FormatToCode(format))) #now write length of string array fid.write(struct.pack('i', len(data))) #now write the strings for string in data: fid.write(struct.pack('i', len(string))) fid.write(struct.pack('%ds' % len(string), string)) # }}} else: # {{{ raise TypeError( 'WriteData error message: data type: %d not supported yet! (%s)' % (format, EnumToString(enum)[0]))
def setflowequation(md, **kwargs): """ SETFLOWEQUATION - associate a solution type to each element This routine works like plotmodel: it works with an even number of inputs 'SIA','SSA','HO','L1L2','FS' and 'fill' are the possible options that must be followed by the corresponding exp file or flags list It can either be a domain file (argus type, .exp extension), or an array of element flags. If user wants every element outside the domain to be setflowequationd, add '~' to the name of the domain file (ex: '~HO.exp'); an empty string '' will be considered as an empty domain a string 'all' will be considered as the entire domain You can specify the type of coupling, 'penalties' or 'tiling', to use with the input 'coupling' Usage: md=setflowequation(md,varargin) Example: md=setflowequation(md,'HO','HO.exp',fill','SIA','coupling','tiling'); """ #some checks on list of arguments if not isinstance(md, model) or not len(kwargs): raise TypeError("setflowequation error message") #process options options = pairoptions(**kwargs) print(options) # options=deleteduplicates(options,1); #Find_out what kind of coupling to use coupling_method = options.getfieldvalue('coupling', 'tiling') if coupling_method is not 'tiling' or not 'penalties': raise TypeError("coupling type can only be: tiling or penalties") #recover elements distribution SIAflag = FlagElements(md, options.getfieldvalue('SIA', '')) SSAflag = FlagElements(md, options.getfieldvalue('SSA', '')) HOflag = FlagElements(md, options.getfieldvalue('HO', '')) L1L2flag = FlagElements(md, options.getfieldvalue('L1L2', '')) FSflag = FlagElements(md, options.getfieldvalue('FS', '')) filltype = options.getfieldvalue('fill', 'none') #Flag the elements that have not been flagged as filltype if filltype is 'SIA': SIAflag[numpy.nonzero( numpy.logical_not(p.logical_or_n(SSAflag, HOflag)))] = True elif filltype is 'SSA': SSAflag[numpy.nonzero( numpy.logical_not(p.logical_or_n(SIAflag, HOflag, FSflag)))] = True elif filltype is 'HO': HOflag[numpy.nonzero( numpy.logical_not(p.logical_or_n(SIAflag, SSAflag, FSflag)))] = True #check that each element has at least one flag if not any(SIAflag + SSAflag + L1L2flag + HOflag + FSflag): raise TypeError( "elements type not assigned, supported models are 'SIA','SSA','HO' and 'FS'" ) #check that each element has only one flag if any(SIAflag + SSAflag + L1L2flag + HOflag + FSflag > 1): print( "setflowequation warning message: some elements have several types, higher order type is used for them" ) SIAflag[numpy.nonzero(numpy.logical_and(SIAflag, SSAflag))] = False SIAflag[numpy.nonzero(numpy.logical_and(SIAflag, HOflag))] = False SSAflag[numpy.nonzero(numpy.logical_and(SSAflag, HOflag))] = False #FS can only be used alone for now: if any(FSflag) and any(SIAflag): raise TypeError( "FS cannot be used with any other model for now, put FS everywhere" ) #Initialize node fields nodeonSIA = numpy.zeros(md.mesh.numberofvertices, bool) nodeonSIA[md.mesh.elements[numpy.nonzero(SIAflag), :] - 1] = True nodeonSSA = numpy.zeros(md.mesh.numberofvertices, bool) nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag), :] - 1] = True nodeonL1L2 = numpy.zeros(md.mesh.numberofvertices, bool) nodeonL1L2[md.mesh.elements[numpy.nonzero(L1L2flag), :] - 1] = True nodeonHO = numpy.zeros(md.mesh.numberofvertices, bool) nodeonHO[md.mesh.elements[numpy.nonzero(HOflag), :] - 1] = True nodeonFS = numpy.zeros(md.mesh.numberofvertices, bool) noneflag = numpy.zeros(md.mesh.numberofelements, bool) #First modify FSflag to get rid of elements contrained everywhere (spc + border with HO or SSA) if any(FSflag): # fullspcnodes=double((~isnan(md.stressbalance.spcvx)+~isnan(md.stressbalance.spcvy)+~isnan(md.stressbalance.spcvz))==3 | (nodeonHO & nodeonFS)); %find all the nodes on the boundary of the domain without icefront fullspcnodes=numpy.logical_or(numpy.logical_not(numpy.isnan(md.stressbalance.spcvx)).astype(int)+ \ numpy.logical_not(numpy.isnan(md.stressbalance.spcvy)).astype(int)+ \ numpy.logical_not(numpy.isnan(md.stressbalance.spcvz)).astype(int)==3, \ numpy.logical_and(nodeonHO,nodeonFS)).astype(int) #find all the nodes on the boundary of the domain without icefront # fullspcelems=double(sum(fullspcnodes(md.mesh.elements),2)==6); %find all the nodes on the boundary of the domain without icefront fullspcelems = ( numpy.sum(fullspcnodes[md.mesh.elements - 1], axis=1) == 6 ).astype( int ) #find all the nodes on the boundary of the domain without icefront FSflag[numpy.nonzero(fullspcelems.reshape(-1))] = False nodeonFS[md.mesh.elements[numpy.nonzero(FSflag), :] - 1] = True #Then complete with NoneApproximation or the other model used if there is no FS if any(FSflag): if any(HOflag): #fill with HO HOflag[numpy.logical_not(FSflag)] = True nodeonHO[md.mesh.elements[numpy.nonzero(HOflag), :] - 1] = True elif any(SSAflag): #fill with SSA SSAflag[numpy.logical_not(FSflag)] = True nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag), :] - 1] = True else: #fill with none noneflag[numpy.nonzero(numpy.logical_not(FSflag))] = True #Now take care of the coupling between SSA and HO md.stressbalance.vertex_pairing = numpy.array([]) nodeonSSAHO = numpy.zeros(md.mesh.numberofvertices, bool) nodeonHOFS = numpy.zeros(md.mesh.numberofvertices, bool) nodeonSSAFS = numpy.zeros(md.mesh.numberofvertices, bool) SSAHOflag = numpy.zeros(md.mesh.numberofelements, bool) SSAFSflag = numpy.zeros(md.mesh.numberofelements, bool) HOFSflag = numpy.zeros(md.mesh.numberofelements, bool) if coupling_method is 'penalties': #Create the border nodes between HO and SSA and extrude them numnodes2d = md.mesh.numberofvertices2d numlayers = md.mesh.numberoflayers bordernodes2d = numpy.nonzero( numpy.logical_and(nodeonHO[0:numnodes2d], nodeonSSA[0:numnodes2d]) )[0] + 1 #Nodes connected to two different types of elements #initialize and fill in penalties structure if numpy.all(numpy.logical_not(numpy.isnan(bordernodes2d))): penalties = numpy.zeros((0, 2)) for i in range(1, numlayers): penalties = numpy.vstack( (penalties, numpy.hstack((bordernodes2d.reshape(-1, 1), bordernodes2d.reshape(-1, 1) + md.mesh.numberofvertices2d * (i))))) md.stressbalance.vertex_pairing = penalties elif coupling_method is 'tiling': if any(SSAflag) and any(HOflag): #coupling SSA HO #Find node at the border nodeonSSAHO[numpy.nonzero(numpy.logical_and(nodeonSSA, nodeonHO))] = True #SSA elements in contact with this layer become SSAHO elements matrixelements = m.ismember(md.mesh.elements - 1, numpy.nonzero(nodeonSSAHO)[0]) commonelements = numpy.sum(matrixelements, axis=1) != 0 commonelements[numpy.nonzero( HOflag )] = False #only one layer: the elements previously in SSA SSAflag[numpy.nonzero( commonelements)] = False #these elements are now SSAHOelements SSAHOflag[numpy.nonzero(commonelements)] = True nodeonSSA[:] = False nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag), :] - 1] = True #rule out elements that don't touch the 2 boundaries pos = numpy.nonzero(SSAHOflag)[0] elist = numpy.zeros(numpy.size(pos), dtype=int) elist = elist + numpy.sum(nodeonSSA[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) elist = elist - numpy.sum(nodeonHO[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) pos1 = numpy.nonzero(elist == 1)[0] SSAflag[pos[pos1]] = True SSAHOflag[pos[pos1]] = False pos2 = numpy.nonzero(elist == -1)[0] HOflag[pos[pos2]] = True SSAHOflag[pos[pos2]] = False #Recompute nodes associated to these elements nodeonSSA[:] = False nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag), :] - 1] = True nodeonHO[:] = False nodeonHO[md.mesh.elements[numpy.nonzero(HOflag), :] - 1] = True nodeonSSAHO[:] = False nodeonSSAHO[md.mesh.elements[numpy.nonzero(SSAHOflag), :] - 1] = True elif any(HOflag) and any(FSflag): #coupling HO FS #Find node at the border nodeonHOFS[numpy.nonzero(numpy.logical_and(nodeonHO, nodeonFS))] = True #FS elements in contact with this layer become HOFS elements matrixelements = m.ismember(md.mesh.elements - 1, numpy.nonzero(nodeonHOFS)[0]) commonelements = numpy.sum(matrixelements, axis=1) != 0 commonelements[numpy.nonzero( HOflag )] = False #only one layer: the elements previously in SSA FSflag[numpy.nonzero( commonelements)] = False #these elements are now SSAHOelements HOFSflag[numpy.nonzero(commonelements)] = True nodeonFS = numpy.zeros(md.mesh.numberofvertices, bool) nodeonFS[md.mesh.elements[numpy.nonzero(FSflag), :] - 1] = True #rule out elements that don't touch the 2 boundaries pos = numpy.nonzero(HOFSflag)[0] elist = numpy.zeros(numpy.size(pos), dtype=int) elist = elist + numpy.sum(nodeonFS[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) elist = elist - numpy.sum(nodeonHO[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) pos1 = numpy.nonzero(elist == 1)[0] FSflag[pos[pos1]] = True HOFSflag[pos[pos1]] = False pos2 = numpy.nonzero(elist == -1)[0] HOflag[pos[pos2]] = True HOFSflag[pos[pos2]] = False #Recompute nodes associated to these elements nodeonFS[:] = False nodeonFS[md.mesh.elements[numpy.nonzero(FSflag), :] - 1] = True nodeonHO[:] = False nodeonHO[md.mesh.elements[numpy.nonzero(HOflag), :] - 1] = True nodeonHOFS[:] = False nodeonHOFS[md.mesh.elements[numpy.nonzero(HOFSflag), :] - 1] = True elif any(FSflag) and any(SSAflag): #Find node at the border nodeonSSAFS[numpy.nonzero(numpy.logical_and(nodeonSSA, nodeonFS))] = True #FS elements in contact with this layer become SSAFS elements matrixelements = m.ismember(md.mesh.elements - 1, numpy.nonzero(nodeonSSAFS)[0]) commonelements = numpy.sum(matrixelements, axis=1) != 0 commonelements[numpy.nonzero( SSAflag )] = False #only one layer: the elements previously in SSA FSflag[numpy.nonzero( commonelements )] = False #these elements are now SSASSAelements SSAFSflag[numpy.nonzero(commonelements)] = True nodeonFS = numpy.zeros(md.mesh.numberofvertices, bool) nodeonFS[md.mesh.elements[numpy.nonzero(FSflag), :] - 1] = True #rule out elements that don't touch the 2 boundaries pos = numpy.nonzero(SSAFSflag)[0] elist = numpy.zeros(numpy.size(pos), dtype=int) elist = elist + numpy.sum(nodeonSSA[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) elist = elist - numpy.sum(nodeonFS[md.mesh.elements[pos, :] - 1], axis=1).astype(bool) pos1 = numpy.nonzero(elist == 1)[0] SSAflag[pos[pos1]] = True SSAFSflag[pos[pos1]] = False pos2 = numpy.nonzero(elist == -1)[0] FSflag[pos[pos2]] = True SSAFSflag[pos[pos2]] = False #Recompute nodes associated to these elements nodeonSSA[:] = False nodeonSSA[md.mesh.elements[numpy.nonzero(SSAflag), :] - 1] = True nodeonFS[:] = False nodeonFS[md.mesh.elements[numpy.nonzero(FSflag), :] - 1] = True nodeonSSAFS[:] = False nodeonSSAFS[md.mesh.elements[numpy.nonzero(SSAFSflag), :] - 1] = True elif any(FSflag) and any(SIAflag): raise TypeError("type of coupling not supported yet") #Create SSAHOApproximation where needed md.flowequation.element_equation = numpy.zeros(md.mesh.numberofelements, int) md.flowequation.element_equation[numpy.nonzero(noneflag)] = 0 md.flowequation.element_equation[numpy.nonzero(SIAflag)] = 1 md.flowequation.element_equation[numpy.nonzero(SSAflag)] = 2 md.flowequation.element_equation[numpy.nonzero(L1L2flag)] = 3 md.flowequation.element_equation[numpy.nonzero(HOflag)] = 4 md.flowequation.element_equation[numpy.nonzero(FSflag)] = 5 md.flowequation.element_equation[numpy.nonzero(SSAHOflag)] = 6 md.flowequation.element_equation[numpy.nonzero(SSAFSflag)] = 7 md.flowequation.element_equation[numpy.nonzero(HOFSflag)] = 8 #border md.flowequation.borderHO = nodeonHO md.flowequation.borderSSA = nodeonSSA md.flowequation.borderFS = nodeonFS #Create vertices_type md.flowequation.vertex_equation = numpy.zeros(md.mesh.numberofvertices, int) pos = numpy.nonzero(nodeonSSA) md.flowequation.vertex_equation[pos] = 2 pos = numpy.nonzero(nodeonL1L2) md.flowequation.vertex_equation[pos] = 3 pos = numpy.nonzero(nodeonHO) md.flowequation.vertex_equation[pos] = 4 pos = numpy.nonzero(nodeonFS) md.flowequation.vertex_equation[pos] = 5 #DO SIA LAST! Otherwise spcs might not be set up correctly (SIA should have priority) pos = numpy.nonzero(nodeonSIA) md.flowequation.vertex_equation[pos] = 1 if any(FSflag): pos = numpy.nonzero(numpy.logical_not(nodeonFS)) if not (any(HOflag) or any(SSAflag)): md.flowequation.vertex_equation[pos] = 0 pos = numpy.nonzero(nodeonSSAHO) md.flowequation.vertex_equation[pos] = 6 pos = numpy.nonzero(nodeonHOFS) md.flowequation.vertex_equation[pos] = 7 pos = numpy.nonzero(nodeonSSAFS) md.flowequation.vertex_equation[pos] = 8 #figure out solution types md.flowequation.isSIA = any(md.flowequation.element_equation == 1) md.flowequation.isSSA = any(md.flowequation.element_equation == 2) md.flowequation.isL1L2 = any(md.flowequation.element_equation == 3) md.flowequation.isHO = any(md.flowequation.element_equation == 4) md.flowequation.isFS = any(md.flowequation.element_equation == 5) return md #Check that tiling can work: if any(md.flowequation.borderSSA) and any( md.flowequation.borderHO) and any( md.flowequation.borderHO + md.flowequation.borderSSA != 1): raise TypeError("error coupling domain too irregular") if any(md.flowequation.borderSSA) and any( md.flowequation.borderFS) and any( md.flowequation.borderFS + md.flowequation.borderSSA != 1): raise TypeError("error coupling domain too irregular") if any(md.flowequation.borderFS) and any(md.flowequation.borderHO) and any( md.flowequation.borderHO + md.flowequation.borderFS != 1): raise TypeError("error coupling domain too irregular") return md
def checkconsistency(self, md, solution, analyses): # {{{ #Early return if StressbalanceAnalysisEnum() not in analyses: return md md = checkfield(md, 'fieldname', 'stressbalance.spcvx', 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'stressbalance.spcvy', 'Inf', 1, 'timeseries', 1) if m.strcmp(md.mesh.domaintype(), '3D'): md = checkfield(md, 'fieldname', 'stressbalance.spcvz', 'Inf', 1, 'timeseries', 1) md = checkfield(md, 'fieldname', 'stressbalance.restol', 'size', [1], '>', 0) md = checkfield(md, 'fieldname', 'stressbalance.reltol', 'size', [1]) md = checkfield(md, 'fieldname', 'stressbalance.abstol', 'size', [1]) md = checkfield(md, 'fieldname', 'stressbalance.isnewton', 'numel', [1], 'values', [0, 1, 2]) md = checkfield(md, 'fieldname', 'stressbalance.FSreconditioning', 'size', [1], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'stressbalance.viscosity_overshoot', 'size', [1], 'NaN', 1, 'Inf', 1) md = checkfield(md, 'fieldname', 'stressbalance.maxiter', 'size', [1], '>=', 1) md = checkfield(md, 'fieldname', 'stressbalance.referential', 'size', [md.mesh.numberofvertices, 6]) md = checkfield(md, 'fieldname', 'stressbalance.loadingforce', 'size', [md.mesh.numberofvertices, 3]) md = checkfield(md, 'fieldname', 'stressbalance.requested_outputs', 'stringrow', 1) #singular solution # if ~any((~isnan(md.stressbalance.spcvx)+~isnan(md.stressbalance.spcvy))==2), if not numpy.any( numpy.logical_and( numpy.logical_not(numpy.isnan(md.stressbalance.spcvx)), numpy.logical_not(numpy.isnan(md.stressbalance.spcvy)))): print( "\n !!! Warning: no spc applied, model might not be well posed if no basal friction is applied, check for solution crash\n" ) #CHECK THAT EACH LINES CONTAINS ONLY NAN VALUES OR NO NAN VALUES # if any(sum(isnan(md.stressbalance.referential),2)~=0 & sum(isnan(md.stressbalance.referential),2)~=6), if numpy.any( numpy.logical_and( numpy.sum(numpy.isnan(md.stressbalance.referential), axis=1) != 0, numpy.sum(numpy.isnan(md.stressbalance.referential), axis=1) != 6)): md.checkmessage( "Each line of stressbalance.referential should contain either only NaN values or no NaN values" ) #CHECK THAT THE TWO VECTORS PROVIDED ARE ORTHOGONAL # if any(sum(isnan(md.stressbalance.referential),2)==0), if numpy.any( numpy.sum(numpy.isnan(md.stressbalance.referential), axis=1) == 0): pos = [ i for i, item in enumerate( numpy.sum(numpy.isnan(md.stressbalance.referential), axis=1)) if item == 0 ] # numpy.inner (and numpy.dot) calculate all the dot product permutations, resulting in a full matrix multiply # if numpy.any(numpy.abs(numpy.inner(md.stressbalance.referential[pos,0:2],md.stressbalance.referential[pos,3:5]).diagonal())>sys.float_info.epsilon): # md.checkmessage("Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal") for item in md.stressbalance.referential[pos, :]: if numpy.abs(numpy.inner(item[0:2], item[3:5])) > sys.float_info.epsilon: md.checkmessage( "Vectors in stressbalance.referential (columns 1 to 3 and 4 to 6) must be orthogonal" ) #CHECK THAT NO rotation specified for FS Grounded ice at base if m.strcmp(md.mesh.domaintype(), '3D') and md.flowequation.isFS: pos = numpy.nonzero( numpy.logical_and(md.mask.groundedice_levelset, md.mesh.vertexonbase)) if numpy.any( numpy.logical_not( numpy.isnan(md.stressbalance.referential[pos, :]))): md.checkmessage( "no referential should be specified for basal vertices of grounded ice" ) return md
def SetIceShelfBC(md, icefrontfile=''): """ SETICESHELFBC - Create the boundary conditions for stressbalance and thermal models for a Ice Shelf with Ice Front Neumann BC are used on the ice front (an ARGUS contour around the ice front must be given in input) Dirichlet BC are used elsewhere for stressbalance Usage: md=SetIceShelfBC(md,varargin) Example: md=SetIceShelfBC(md); md=SetIceShelfBC(md,'Front.exp'); See also: SETICESHEETBC, SETMARINEICESHEETBC """ #node on Dirichlet (boundary and ~icefront) if icefrontfile: if not os.path.exists(icefrontfile): raise IOError( "SetIceShelfBC error message: ice front file '%s' not found." % icefrontfile) [nodeinsideicefront, dum] = ContourToMesh(md.mesh.elements, md.mesh.x, md.mesh.y, icefrontfile, 'node', 2) nodeonicefront = numpy.logical_and(md.mesh.vertexonboundary, nodeinsideicefront.reshape(-1)) else: nodeonicefront = numpy.zeros((md.mesh.numberofvertices), bool) # pos=find(md.mesh.vertexonboundary & ~nodeonicefront); pos = numpy.nonzero( numpy.logical_and(md.mesh.vertexonboundary, numpy.logical_not(nodeonicefront)))[0] md.stressbalance.spcvx = float('nan') * numpy.ones( md.mesh.numberofvertices) md.stressbalance.spcvy = float('nan') * numpy.ones( md.mesh.numberofvertices) md.stressbalance.spcvz = float('nan') * numpy.ones( md.mesh.numberofvertices) md.stressbalance.referential = float('nan') * numpy.ones( (md.mesh.numberofvertices, 6)) md.stressbalance.loadingforce = 0 * numpy.ones( (md.mesh.numberofvertices, 3)) #Icefront position pos = numpy.nonzero(nodeonicefront)[0] md.mask.ice_levelset[pos] = 0 #First find segments that are not completely on the front if m.strcmp(md.mesh.elementtype(), 'Penta'): numbernodesfront = 4 elif m.strcmp(md.mesh.elementtype(), 'Tria'): numbernodesfront = 2 else: raise error('mesh type not supported yet') if any(md.mask.ice_levelset <= 0): values = md.mask.ice_levelset[md.mesh.segments[:, 0:-1] - 1] segmentsfront = 1 - values numpy.sum(segmentsfront, axis=1) != numbernodesfront segments = numpy.nonzero( numpy.sum(segmentsfront, axis=1) != numbernodesfront)[0] #Find all nodes for these segments and spc them pos = md.mesh.segments[segments, 0:-1] - 1 else: pos = numpy.nonzero(md.mesh.vertexonboundary)[0] md.stressbalance.spcvx[pos] = 0 md.stressbalance.spcvy[pos] = 0 md.stressbalance.spcvz[pos] = 0 #Dirichlet Values if isinstance(md.inversion.vx_obs, numpy.ndarray) and numpy.size( md.inversion.vx_obs, axis=0) == md.mesh.numberofvertices and isinstance( md.inversion.vy_obs, numpy.ndarray) and numpy.size( md.inversion.vy_obs, axis=0) == md.mesh.numberofvertices: #reshape to rank-2 if necessary to match spc arrays if numpy.ndim(md.inversion.vx_obs) == 1: md.inversion.vx_obs = md.inversion.vx_obs.reshape(-1, 1) if numpy.ndim(md.inversion.vy_obs) == 1: md.inversion.vy_obs = md.inversion.vy_obs.reshape(-1, 1) print( " boundary conditions for stressbalance model: spc set as observed velocities" ) md.stressbalance.spcvx[pos] = md.inversion.vx_obs[pos] md.stressbalance.spcvy[pos] = md.inversion.vy_obs[pos] else: print( " boundary conditions for stressbalance model: spc set as zero" ) #Create zeros basalforcings and smb md.smb.initialize(md) md.basalforcings.initialize(md) #Deal with other boundary conditions if numpy.all(numpy.isnan(md.balancethickness.thickening_rate)): md.balancethickness.thickening_rate = numpy.zeros( (md.mesh.numberofvertices, 1)) print( " no balancethickness.thickening_rate specified: values set as zero" ) md.masstransport.spcthickness = float('nan') * numpy.ones( (md.mesh.numberofvertices, 1)) md.balancethickness.spcthickness = float('nan') * numpy.ones( (md.mesh.numberofvertices, 1)) md.damage.spcdamage = float('nan') * numpy.ones( (md.mesh.numberofvertices, 1)) if isinstance(md.initialization.temperature, numpy.ndarray) and numpy.size( md.initialization.temperature, axis=0) == md.mesh.numberofvertices: md.thermal.spctemperature = float('nan') * numpy.ones( (md.mesh.numberofvertices, 1)) if hasattr(md.mesh, 'vertexonsurface'): pos = numpy.nonzero(md.mesh.vertexonsurface)[0] md.thermal.spctemperature[pos] = md.initialization.temperature[ pos] #impose observed temperature on surface if not isinstance(md.basalforcings.geothermalflux, numpy.ndarray) or not numpy.size( md.basalforcings.geothermalflux, axis=0) == md.mesh.numberofvertices: md.basalforcings.geothermalflux = numpy.zeros( (md.mesh.numberofvertices, 1)) else: print( " no thermal boundary conditions created: no observed temperature found" ) return md
def extract(md, area): # {{{ """ extract - extract a model according to an Argus contour or flag list This routine extracts a submodel from a bigger model with respect to a given contour md must be followed by the corresponding exp file or flags list It can either be a domain file (argus type, .exp extension), or an array of element flags. If user wants every element outside the domain to be extract2d, add '~' to the name of the domain file (ex: '~HO.exp') an empty string '' will be considered as an empty domain a string 'all' will be considered as the entire domain Usage: md2=extract(md,area) Examples: md2=extract(md,'Domain.exp') See also: EXTRUDE, COLLAPSE """ #copy model md1 = copy.deepcopy(md) #get elements that are inside area flag_elem = FlagElements(md1, area) if not np.any(flag_elem): raise RuntimeError("extracted model is empty") #kick out all elements with 3 dirichlets spc_elem = np.nonzero(np.logical_not(flag_elem))[0] spc_node = np.unique(md1.mesh.elements[spc_elem, :]) - 1 flag = np.ones(md1.mesh.numberofvertices) flag[spc_node] = 0 pos = np.nonzero( np.logical_not(np.sum(flag[md1.mesh.elements - 1], axis=1)))[0] flag_elem[pos] = 0 #extracted elements and nodes lists pos_elem = np.nonzero(flag_elem)[0] pos_node = np.unique(md1.mesh.elements[pos_elem, :]) - 1 #keep track of some fields numberofvertices1 = md1.mesh.numberofvertices numberofelements1 = md1.mesh.numberofelements numberofvertices2 = np.size(pos_node) numberofelements2 = np.size(pos_elem) flag_node = np.zeros(numberofvertices1) flag_node[pos_node] = 1 #Create Pelem and Pnode (transform old nodes in new nodes and same thing for the elements) Pelem = np.zeros(numberofelements1, int) Pelem[pos_elem] = np.arange(1, numberofelements2 + 1) Pnode = np.zeros(numberofvertices1, int) Pnode[pos_node] = np.arange(1, numberofvertices2 + 1) #renumber the elements (some node won't exist anymore) elements_1 = copy.deepcopy(md1.mesh.elements) elements_2 = elements_1[pos_elem, :] elements_2[:, 0] = Pnode[elements_2[:, 0] - 1] elements_2[:, 1] = Pnode[elements_2[:, 1] - 1] elements_2[:, 2] = Pnode[elements_2[:, 2] - 1] if md1.mesh.__class__.__name__ == 'mesh3dprisms': elements_2[:, 3] = Pnode[elements_2[:, 3] - 1] elements_2[:, 4] = Pnode[elements_2[:, 4] - 1] elements_2[:, 5] = Pnode[elements_2[:, 5] - 1] #OK, now create the new model! #take every field from model md2 = copy.deepcopy(md1) #automatically modify fields #loop over model fields model_fields = vars(md1) for fieldi in model_fields: #get field field = getattr(md1, fieldi) fieldsize = np.shape(field) if hasattr(field, '__dict__') and not m.ismember( fieldi, ['results'])[0]: #recursive call object_fields = vars(field) for fieldj in object_fields: #get field field = getattr(getattr(md1, fieldi), fieldj) fieldsize = np.shape(field) if len(fieldsize): #size = number of nodes * n if fieldsize[0] == numberofvertices1: setattr(getattr(md2, fieldi), fieldj, field[pos_node]) elif fieldsize[0] == numberofvertices1 + 1: setattr(getattr(md2, fieldi), fieldj, np.vstack((field[pos_node], field[-1, :]))) #size = number of elements * n elif fieldsize[0] == numberofelements1: setattr(getattr(md2, fieldi), fieldj, field[pos_elem]) else: if len(fieldsize): #size = number of nodes * n if fieldsize[0] == numberofvertices1: setattr(md2, fieldi, field[pos_node]) elif fieldsize[0] == numberofvertices1 + 1: setattr(md2, fieldi, np.hstack((field[pos_node], field[-1, :]))) #size = number of elements * n elif fieldsize[0] == numberofelements1: setattr(md2, fieldi, field[pos_elem]) #modify some specific fields #Mesh md2.mesh.numberofelements = numberofelements2 md2.mesh.numberofvertices = numberofvertices2 md2.mesh.elements = elements_2 #mesh.uppervertex mesh.lowervertex if md1.mesh.__class__.__name__ == 'mesh3dprisms': md2.mesh.uppervertex = md1.mesh.uppervertex[pos_node] pos = np.where(~np.isnan(md2.mesh.uppervertex))[0] md2.mesh.uppervertex[pos] = Pnode[ md2.mesh.uppervertex[pos].astype(int) - 1] md2.mesh.lowervertex = md1.mesh.lowervertex[pos_node] pos = np.where(~np.isnan(md2.mesh.lowervertex))[0] md2.mesh.lowervertex[pos] = Pnode[ md2.mesh.lowervertex[pos].astype(int) - 1] md2.mesh.upperelements = md1.mesh.upperelements[pos_elem] pos = np.where(~np.isnan(md2.mesh.upperelements))[0] md2.mesh.upperelements[pos] = Pelem[ md2.mesh.upperelements[pos].astype(int) - 1] md2.mesh.lowerelements = md1.mesh.lowerelements[pos_elem] pos = np.where(~np.isnan(md2.mesh.lowerelements))[0] md2.mesh.lowerelements[pos] = Pelem[ md2.mesh.lowerelements[pos].astype(int) - 1] #Initial 2d mesh if md1.mesh.__class__.__name__ == 'mesh3dprisms': flag_elem_2d = flag_elem[np.arange(0, md1.mesh.numberofelements2d)] pos_elem_2d = np.nonzero(flag_elem_2d)[0] flag_node_2d = flag_node[np.arange(0, md1.mesh.numberofvertices2d)] pos_node_2d = np.nonzero(flag_node_2d)[0] md2.mesh.numberofelements2d = np.size(pos_elem_2d) md2.mesh.numberofvertices2d = np.size(pos_node_2d) md2.mesh.elements2d = md1.mesh.elements2d[pos_elem_2d, :] md2.mesh.elements2d[:, 0] = Pnode[md2.mesh.elements2d[:, 0] - 1] md2.mesh.elements2d[:, 1] = Pnode[md2.mesh.elements2d[:, 1] - 1] md2.mesh.elements2d[:, 2] = Pnode[md2.mesh.elements2d[:, 2] - 1] md2.mesh.x2d = md1.mesh.x[pos_node_2d] md2.mesh.y2d = md1.mesh.y[pos_node_2d] #Edges if m.strcmp(md.mesh.domaintype(), '2Dhorizontal'): if np.ndim(md2.mesh.edges) > 1 and np.size( md2.mesh.edges, axis=1 ) > 1: #do not use ~isnan because there are some np.nans... #renumber first two columns pos = np.nonzero(md2.mesh.edges[:, 3] != -1)[0] md2.mesh.edges[:, 0] = Pnode[md2.mesh.edges[:, 0] - 1] md2.mesh.edges[:, 1] = Pnode[md2.mesh.edges[:, 1] - 1] md2.mesh.edges[:, 2] = Pelem[md2.mesh.edges[:, 2] - 1] md2.mesh.edges[pos, 3] = Pelem[md2.mesh.edges[pos, 3] - 1] #remove edges when the 2 vertices are not in the domain. md2.mesh.edges = md2.mesh.edges[np.nonzero( np.logical_and(md2.mesh.edges[:, 0], md2.mesh.edges[:, 1]) )[0], :] #Replace all zeros by -1 in the last two columns pos = np.nonzero(md2.mesh.edges[:, 2] == 0)[0] md2.mesh.edges[pos, 2] = -1 pos = np.nonzero(md2.mesh.edges[:, 3] == 0)[0] md2.mesh.edges[pos, 3] = -1 #Invert -1 on the third column with last column (Also invert first two columns!!) pos = np.nonzero(md2.mesh.edges[:, 2] == -1)[0] md2.mesh.edges[pos, 2] = md2.mesh.edges[pos, 3] md2.mesh.edges[pos, 3] = -1 values = md2.mesh.edges[pos, 1] md2.mesh.edges[pos, 1] = md2.mesh.edges[pos, 0] md2.mesh.edges[pos, 0] = values #Finally remove edges that do not belong to any element pos = np.nonzero( np.logical_and(md2.mesh.edges[:, 1] == -1, md2.mesh.edges[:, 2] == -1))[0] md2.mesh.edges = np.delete(md2.mesh.edges, pos, axis=0) #Penalties if np.any(np.logical_not(np.isnan(md2.stressbalance.vertex_pairing))): for i in xrange(np.size(md1.stressbalance.vertex_pairing, axis=0)): md2.stressbalance.vertex_pairing[i, :] = Pnode[ md1.stressbalance.vertex_pairing[i, :]] md2.stressbalance.vertex_pairing = md2.stressbalance.vertex_pairing[ np.nonzero(md2.stressbalance.vertex_pairing[:, 0])[0], :] if np.any(np.logical_not(np.isnan(md2.masstransport.vertex_pairing))): for i in xrange(np.size(md1.masstransport.vertex_pairing, axis=0)): md2.masstransport.vertex_pairing[i, :] = Pnode[ md1.masstransport.vertex_pairing[i, :]] md2.masstransport.vertex_pairing = md2.masstransport.vertex_pairing[ np.nonzero(md2.masstransport.vertex_pairing[:, 0])[0], :] #recreate segments if md1.mesh.__class__.__name__ == 'mesh2d': md2.mesh.vertexconnectivity = NodeConnectivity( md2.mesh.elements, md2.mesh.numberofvertices)[0] md2.mesh.elementconnectivity = ElementConnectivity( md2.mesh.elements, md2.mesh.vertexconnectivity)[0] md2.mesh.segments = contourenvelope(md2) md2.mesh.vertexonboundary = np.zeros(numberofvertices2, bool) md2.mesh.vertexonboundary[md2.mesh.segments[:, 0:2] - 1] = True else: #First do the connectivity for the contourenvelope in 2d md2.mesh.vertexconnectivity = NodeConnectivity( md2.mesh.elements2d, md2.mesh.numberofvertices2d)[0] md2.mesh.elementconnectivity = ElementConnectivity( md2.mesh.elements2d, md2.mesh.vertexconnectivity)[0] segments = contourenvelope(md2) md2.mesh.vertexonboundary = np.zeros( numberofvertices2 / md2.mesh.numberoflayers, bool) md2.mesh.vertexonboundary[segments[:, 0:2] - 1] = True md2.mesh.vertexonboundary = np.tile(md2.mesh.vertexonboundary, md2.mesh.numberoflayers) #Then do it for 3d as usual md2.mesh.vertexconnectivity = NodeConnectivity( md2.mesh.elements, md2.mesh.numberofvertices)[0] md2.mesh.elementconnectivity = ElementConnectivity( md2.mesh.elements, md2.mesh.vertexconnectivity)[0] #Boundary conditions: Dirichlets on new boundary #Catch the elements that have not been extracted orphans_elem = np.nonzero(np.logical_not(flag_elem))[0] orphans_node = np.unique(md1.mesh.elements[orphans_elem, :]) - 1 #Figure out which node are on the boundary between md2 and md1 nodestoflag1 = np.intersect1d(orphans_node, pos_node) nodestoflag2 = Pnode[nodestoflag1].astype(int) - 1 if np.size(md1.stressbalance.spcvx) > 1 and np.size( md1.stressbalance.spcvy) > 2 and np.size( md1.stressbalance.spcvz) > 2: if np.size(md1.inversion.vx_obs) > 1 and np.size( md1.inversion.vy_obs) > 1: md2.stressbalance.spcvx[nodestoflag2] = md2.inversion.vx_obs[ nodestoflag2] md2.stressbalance.spcvy[nodestoflag2] = md2.inversion.vy_obs[ nodestoflag2] else: md2.stressbalance.spcvx[nodestoflag2] = np.nan md2.stressbalance.spcvy[nodestoflag2] = np.nan print "\n!! extract warning: spc values should be checked !!\n\n" #put 0 for vz md2.stressbalance.spcvz[nodestoflag2] = 0 if np.any(np.logical_not(np.isnan(md1.thermal.spctemperature))): md2.thermal.spctemperature[nodestoflag2] = 1 #Results fields if md1.results: md2.results = results() for solutionfield, field in md1.results.__dict__.iteritems(): if isinstance(field, list): setattr(md2.results, solutionfield, []) #get time step for i, fieldi in enumerate(field): if isinstance(fieldi, results) and fieldi: getattr(md2.results, solutionfield).append(results()) fieldr = getattr(md2.results, solutionfield)[i] #get subfields for solutionsubfield, subfield in fieldi.__dict__.iteritems( ): if np.size(subfield) == numberofvertices1: setattr(fieldr, solutionsubfield, subfield[pos_node]) elif np.size(subfield) == numberofelements1: setattr(fieldr, solutionsubfield, subfield[pos_elem]) else: setattr(fieldr, solutionsubfield, subfield) else: getattr(md2.results, solutionfield).append(None) elif isinstance(field, results): setattr(md2.results, solutionfield, results()) if isinstance(field, results) and field: fieldr = getattr(md2.results, solutionfield) #get subfields for solutionsubfield, subfield in field.__dict__.iteritems( ): if np.size(subfield) == numberofvertices1: setattr(fieldr, solutionsubfield, subfield[pos_node]) elif np.size(subfield) == numberofelements1: setattr(fieldr, solutionsubfield, subfield[pos_elem]) else: setattr(fieldr, solutionsubfield, subfield) #Keep track of pos_node and pos_elem md2.mesh.extractedvertices = pos_node + 1 md2.mesh.extractedelements = pos_elem + 1 return md2
def issmscpout(host, path, login, port, packages): """ ISSMSCPOUT send packages to a host, using scp on unix, and pscp on windows usage: issmscpout(host,path,packages) """ #get hostname hostname = gethostname() #if hostname and host are the same, do a simple copy if m.strcmpi(host, hostname): for package in packages: here = os.getcwd() os.chdir(path) try: os.remove(package) except OSError as e: pass subprocess.call('ln -s %s %s' % (os.path.join(here, package), path), shell=True) os.chdir(here) else: if m.ispc(): #use the putty project pscp.exe: it should be in the path. #get ISSM_DIR variable if 'ISSM_DIR_WIN' in os.environ: ISSM_DIR = os.environ['ISSM_DIR_WIN'][1:-2] else: raise OSError( "issmscpout error message: could not find ISSM_DIR_WIN environment variable." ) username = input('Username: (quoted string) ') key = input('Key: (quoted string) ') for package in packages: try: subprocess.check_call( '%s/externalpackages/ssh/pscp.exe -l "%s" -pw "%s" %s %s:%s' % (ISSM_DIR, username, key, package, host, path), shell=True) except CalledProcessError as e: raise CalledProcessError( "issmscpout error message: could not call putty pscp.") else: #just use standard unix scp #create string of packages being sent string = '' for package in packages: string += ' ' + package string += ' ' if port: subprocess.call('scp -P %d %s %s@localhost:%s' % (port, string, login, path), shell=True) else: subprocess.call('scp %s %s@%s:%s' % (string, login, host, path), shell=True)
def expread(filename): """ EXPREAD - read a file exp and build a Structure This routine reads a file .exp and builds a list of dicts containing the fields x and y corresponding to the coordinates, one for the filename of the exp file, for the density, for the nodes, and a field closed to indicate if the domain is closed. The first argument is the .exp file to be read and the second one (optional) indicate if the last point shall be read (1 to read it, 0 not to). Usage: contours=expread(filename) Example: contours=expread('domainoutline.exp') contours=expread('domainoutline.exp') See also EXPDOC, EXPWRITEASVERTICES """ #some checks if not os.path.exists(filename): raise OSError("expread error message: file '%s' not found!" % filename) #initialize number of profile contours=[] #open file fid=open(filename,'r') #loop over the number of profiles while True: #update number of profiles contour=OrderedDict() #Get file name A=fid.readline() while A=='\n': A=fid.readline() if not A: break A=A.split(None,1) if not (len(A) == 2 and m.strcmp(A[0],'##') and m.strncmp(A[1],'Name:',5)): break if len(A[1])>5: contour['name']=A[1][5:-1] else: contour['name']='' #Get Icon A=fid.readline().split(None,1) if not (len(A) == 2 and m.strcmp(A[0],'##') and m.strncmp(A[1],'Icon:',5)): break #Get Info A=fid.readline().split() if not (len(A) == 4 and m.strcmp(A[0],'#') and m.strcmp(A[1],'Points')): break #Get number of nodes and density A=fid.readline().split() contour['nods']=int(A[0]) contour['density']=float(A[1]) #Get Info A=fid.readline().split() if not (len(A) == 5 and m.strcmp(A[0],'#') and m.strcmp(A[1],'X') and m.strcmp(A[2],'pos') and m.strcmp(A[3],'Y') and m.strcmp(A[4],'pos')): break #Get Coordinates contour['x']=np.empty(contour['nods']) contour['y']=np.empty(contour['nods']) for i in xrange(int(contour['nods'])): A=fid.readline().split() contour['x'][i]=float(A[0]) contour['y'][i]=float(A[1]) #Check if closed if (contour['nods'] > 1) and \ (contour['x'][-1] == contour['x'][0]) and \ (contour['y'][-1] == contour['y'][0]): contour['closed']=True else: contour['closed']=False contours.append(contour) #close file fid.close() return contours