示例#1
0
    def __init__(self, *args):  # {{{

        self.name = ''
        self.login = ''
        self.hostname = ''
        self.np = 1
        self.port = 0
        self.interactive = 1
        self.codepath = IssmConfig('ISSM_PREFIX')[0] + '/bin'
        self.executionpath = issmdir() + '/execution'
        self.valgrind = issmdir(
        ) + '/externalpackages/valgrind/install/bin/valgrind'
        self.valgrindlib = issmdir(
        ) + '/externalpackages/valgrind/install/lib/libmpidebug.so'
        self.valgrindsup = issmdir() + '/externalpackages/valgrind/issm.supp'

        #use provided options to change fields
        options = pairoptions(*args)

        #get name
        self.hostname = 'ollie0'  #socket.gethostname()

        #initialize cluster using user settings if provided
        if os.path.exists(self.name + '_settings.py'):
            execfile(self.name + '_settings.py', globals())

        #OK get other fields
        self = options.AssignObjectFields(self)
示例#2
0
    def __init__(self, *args):  # {{{
        self.name = ''
        self.type = ''
        self.fos_reverse_index = float('NaN')
        self.exp = ''
        self.segments = []
        self.index = -1
        self.nods = 0

        #set defaults
        self.setdefaultparameters()

        #use provided options to change fields
        options = pairoptions(*args)

        self.name = options.getfieldvalue('name', '')
        self.type = options.getfieldvalue('type', '')
        self.exp = options.getfieldvalue('exp', '')
        self.segments = options.getfieldvalue('segments', [])
        self.index = options.getfieldvalue('index', -1)
        self.nods = options.getfieldvalue('nods', 0)

        #if name is mass flux:
        if strcmpi(self.name, 'MassFlux'):
            #make sure that we supplied a file and that it exists!
            if not os.path.exists(self.exp):
                raise IOError(
                    "dependent checkconsistency: specified 'exp' file does not exist!"
                )
            #process the file and retrieve segments
            mesh = options.getfieldvalue('mesh')
            self.segments = MeshProfileIntersection(mesh.elements, mesh.x,
                                                    mesh.y, self.exp)[0]
示例#3
0
def asmoptions(*args):
    #ASMOPTIONS - return ASM petsc options
    #
    #   Usage:
    #      options=asmoptions;

    #retrieve options provided in varargin
    arguments = pairoptions.pairoptions(*args)

    options = [['toolkit', 'petsc'],
               ['mat_type', 'aij'], ['ksp_type', 'gmres'], ['pc_type', 'asm'],
               ['sub_pc_type', 'lu'], ['pc_asm_overlap',
                                       3], ['ksp_max_it', 100],
               ['ksp_rtol', 1e-30]]

    #now, go through our arguments, and write over default options.
    for i in range(len(arguments.list)):
        arg1 = arguments.list[i][0]
        arg2 = arguments.list[i][1]
        found = 0
        for j in range(len(options)):
            joption = options[j][0]
            if joption == arg1:
                joption[1] = arg2
                options[j] = joption
                found = 1
                break
        if not found:
            #this option did not exist, add it:
            options.append([arg1, arg2])
    return options
示例#4
0
def issmmumpssolver(*args):
    #ISSMSOLVE - return issm solver options
    #
    #   Usage:
    #      options=issmsolver;

    #retrieve options provided in varargin
    arguments = pairoptions.pairoptions(*args)

    options = OrderedDict()
    options['toolkit'] = 'issm'
    options['mat_type'] = 'mpidense'
    options['vec_type'] = 'mpi'
    options['solver_type'] = 'mumps'

    #now, go through our arguments, and write over default options.
    for i in range(len(arguments.list)):
        arg1 = arguments.list[i][0]
        arg2 = arguments.list[i][1]
        found = 0
        for j in range(len(options)):
            joption = options[j][0]
            if joption == arg1:
                joption[1] = arg2
                options[j] = joption
                found = 1
                break
        if not found:
            #this option did not exist, add it:
            options.append([arg1, arg2])
    return options
示例#5
0
def matlaboptions(*args):
    #MATLABOPTIONS - return Matlab petsc options
    #
    #   Usage:
    #      options=matlaboptions;

    #retrieve options provided in varargin
    arguments = pairoptions.pairoptions(*args)

    options = [['toolkit', 'petsc'], ['ksp_type', 'matlab']]

    #now, go through our arguments, and write over default options.
    for i in range(len(arguments.list)):
        arg1 = arguments.list[i][0]
        arg2 = arguments.list[i][1]
        found = 0
        for j in range(len(options)):
            joption = options[j][0]
            if joption == arg1:
                joption[1] = arg2
                options[j] = joption
                found = 1
                break
        if not found:
            #this option did not exist, add it:
            options.append([arg1, arg2])
    return options
示例#6
0
    def __init__(self, *args):  # {{{

        self.name = ''
        self.definitionstring = ''
        self.profilename = ''
        self.segments = float('NaN')

        #set defaults
        self.setdefaultparameters()

        #use provided options to change fields
        options = pairoptions(*args)

        #OK get other fields
        self = options.AssignObjectFields(self)
示例#7
0
    def __init__(self, *args):  # {{{
        self.name = ''
        self.type = ''
        self.fos_forward_index = float('NaN')
        self.fov_forward_indices = np.array([])
        self.nods = 0

        #set defaults
        self.setdefaultparameters()

        #use provided options to change fields
        options = pairoptions(*args)

        #OK get other fields
        self = options.AssignObjectFields(self)
示例#8
0
    def __init__(self, *args):
        # {{{
        self.name = 'cyclone'
        self.login = ''
        self.np = 2
        self.time = 100
        self.codepath = ''
        self.executionpath = ''
        self.port = ''
        self.interactive = 0

        #use provided options to change fields
        options = pairoptions(*args)

        #initialize cluster using user settings if provided
        self = cyclone_settings(self)
        #OK get other fields
        self = options.AssignObjectFields(self)
示例#9
0
    def __init__(self, *args):  # {{{
        self._currentstep = 0
        self.repository = './'
        self.prefix = 'model.'
        self.trunkprefix = ''
        self.steps = []
        self.requestedsteps = [0]

        #process options
        options = pairoptions.pairoptions(*args)

        #Get prefix
        prefix = options.getfieldvalue('prefix', 'model.')
        if not isinstance(prefix, (str, unicode)):
            raise TypeError("prefix is not a string")
        if not m.strcmp(prefix, prefix.strip()) or len(prefix.split()) > 1:
            raise TypeError("prefix should not have any white space")
        self.prefix = prefix

        #Get repository
        repository = options.getfieldvalue('repository', './')
        if not isinstance(repository, (str, unicode)):
            raise TypeError("repository is not a string")
        if not os.path.isdir(repository):
            raise IOError("Directory '%s' not found" % repository)
        self.repository = repository

        #Get steps
        self.requestedsteps = options.getfieldvalue('steps', [0])

        #Get trunk prefix (only if provided by user)
        if options.exist('trunkprefix'):
            trunkprefix = options.getfieldvalue('trunkprefix', '')
            if not isinstance(trunkprefix, (str, unicode)):
                raise TypeError("trunkprefix is not a string")
            if not m.strcmp(trunkprefix, trunkprefix.strip()) or len(
                    trunkprefix.split()) > 1:
                raise TypeError("trunkprefix should not have any white space")
            self.trunkprefix = trunkprefix
示例#10
0
    def __init__(self, *args):
        # {{{
        self.name = 'hexagon'
        self.login = ''
        self.numnodes = 2
        self.procspernodes = 32
        self.mem = 32000
        self.queue = 'batch'
        self.time = 2 * 60
        self.codepath = ''
        self.executionpath = ''
        self.interactive = 0
        self.port = []
        self.accountname = ''

        #use provided options to change fields
        options = pairoptions(*args)

        #initialize cluster using user settings if provided
        self = hexagon_settings(self)

        #OK get other fields
        self = options.AssignObjectFields(self)
        self.np = self.numnodes * self.procspernodes
示例#11
0
def mumpsoptions(*args):
    """
	MUMPSOPTIONS - return MUMPS direct solver  petsc options

	   Usage:
	      options=mumpsoptions;
	"""

    #retrieve options provided in varargin
    options = pairoptions(*args)
    mumps = OrderedDict()

    #default mumps options
    PETSC_VERSION = IssmConfig('_PETSC_MAJOR_')[0]
    if PETSC_VERSION == 2.:
        mumps['toolkit'] = 'petsc'
        mumps['mat_type'] = options.getfieldvalue('mat_type', 'aijmumps')
        mumps['ksp_type'] = options.getfieldvalue('ksp_type', 'preonly')
        mumps['pc_type'] = options.getfieldvalue('pc_type', 'lu')
        mumps['mat_mumps_icntl_14'] = options.getfieldvalue(
            'mat_mumps_icntl_14', 120)
        mumps['pc_factor_shift_positive_definite'] = options.getfieldvalue(
            'pc_factor_shift_positive_definite', 'true')
    if PETSC_VERSION == 3.:
        mumps['toolkit'] = 'petsc'
        mumps['mat_type'] = options.getfieldvalue('mat_type', 'mpiaij')
        mumps['ksp_type'] = options.getfieldvalue('ksp_type', 'preonly')
        mumps['pc_type'] = options.getfieldvalue('pc_type', 'lu')
        mumps['pc_factor_mat_solver_package'] = options.getfieldvalue(
            'pc_factor_mat_solver_package', 'mumps')
        mumps['mat_mumps_icntl_14'] = options.getfieldvalue(
            'mat_mumps_icntl_14', 120)
        mumps['pc_factor_shift_positive_definite'] = options.getfieldvalue(
            'pc_factor_shift_positive_definite', 'true')

    return mumps
示例#12
0
def WriteData(fid, prefix, *args):
    """
	WRITEDATA - write model field in binary file
 
	   Usage:
	      WriteData(fid,varargin)
	"""

    #process options
    options = pairoptions(*args)

    #Get data properties
    if options.exist('object'):
        #This is an object field, construct enum and data
        obj = options.getfieldvalue('object')
        fieldname = options.getfieldvalue('fieldname')
        classname = options.getfieldvalue(
            'class',
            str(type(obj)).rsplit('.')[-1].split("'")[0])
        name = options.getfieldvalue('name', prefix + '.' + fieldname)
        if options.exist('data'):
            data = options.getfieldvalue('data')
        else:
            data = getattr(obj, fieldname)
    else:
        #No processing required
        data = options.getfieldvalue('data')
        name = options.getfieldvalue('name')

    format = options.getfieldvalue('format')
    mattype = options.getfieldvalue('mattype', 0)  #only required for matrices
    timeserieslength = options.getfieldvalue('timeserieslength', -1)

    #Process sparse matrices
    #	if issparse(data),
    #		data=full(data);
    #	end

    #Scale data if necesarry
    if options.exist('scale'):
        scale = options.getfieldvalue('scale')
        if np.size(data) > 1:
            if np.size(data, 0) == timeserieslength:
                data = np.array(data)
                data[0:-1, :] = scale * data[0:-1, :]
            else:
                data = scale * data
        else:
            data = scale * data
    if np.size(data) > 1:
        if np.size(data, 0) == timeserieslength:
            yts = options.getfieldvalue('yts')
            data[-1, :] = yts * data[-1, :]

    #Step 1: write the enum to identify this record uniquely
    fid.write(struct.pack('i', len(name)))
    fid.write(struct.pack('%ds' % len(name), name))

    #Step 2: write the data itself.
    if m.strcmpi(format, 'Boolean'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 bool (disguised as an int)+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack(
            'i', int(data)))  #send an int, not easy to send a bool
        # }}}

    elif m.strcmpi(format, 'Integer'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 integer + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack('i', data))
        # }}}

    elif m.strcmpi(format, 'Double'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 8 + 4))  #1 double+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write double
        fid.write(struct.pack('d', data))
        # }}}

    elif m.strcmpi(format, 'String'):  # {{{
        #first write length of record
        fid.write(struct.pack('i',
                              len(data) + 4 + 4))  #string + string size + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write string
        fid.write(struct.pack('i', len(data)))
        fid.write(struct.pack('%ds' % len(data), data))
        # }}}

    elif m.strcmpi(format, 'BooleanMat'):  # {{{

        if isinstance(data, bool):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 2 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * np.product(s) + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'IntMat'):  # {{{

        if isinstance(data, (int, long)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 2 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * np.product(s) + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'DoubleMat'):  # {{{

        if isinstance(data, (bool, int, long, float)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 1 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4
        #2 integers (32 bits) + the double matrix + code + matrix type
        if recordlength > 4**31:
            raise ValueError(
                'field %s cannot be marshalled because it is larger than 4^31 bytes!'
                % enum)

        fid.write(
            struct.pack('i', recordlength)
        )  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'CompressedMat'):  # {{{

        if isinstance(data, (bool, int, long, float)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        if np.ndim(data) == 1:
            n2 = 1
        else:
            n2 = s[1]

        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 1 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)
            n2 = 0

        #first write length of record
        recordlength = 4 + 4 + 8 + 8 + 1 * (
            s[0] - 1
        ) * n2 + 8 * n2 + 4 + 4  #2 integers (32 bits) + the matrix + code + matrix type
        if recordlength > 4**31:
            raise ValueError(
                'field %s cannot be marshalled because it is larger than 4^31 bytes!'
                % enum)

        fid.write(struct.pack('i', recordlength)
                  )  #2 integers (32 bits) + the matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #Write offset and range
        A = data[0:s[0] - 1]
        offsetA = A.min()
        rangeA = A.max() - offsetA

        if rangeA == 0:
            A = A * 0
        else:
            A = (A - offsetA) / rangeA * 255.

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            fid.write(struct.pack('d', float(offsetA)))
            fid.write(struct.pack('d', float(rangeA)))
            for i in xrange(s[0] - 1):
                fid.write(struct.pack('B', int(A[i])))

            fid.write(struct.pack('d', float(
                data[s[0] -
                     1])))  #get to the "c" convention, hence the transpose

        elif np.product(s) > 0:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            fid.write(struct.pack('d', float(offsetA)))
            fid.write(struct.pack('d', float(rangeA)))
            for i in xrange(s[0] - 1):
                for j in xrange(s[1]):
                    fid.write(struct.pack('B', int(
                        A[i]
                        [j])))  #get to the "c" convention, hence the transpose

            for j in xrange(s[1]):
                fid.write(struct.pack('d', float(data[s[0] - 1][j])))

        # }}}

    elif m.strcmpi(format, 'MatArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #number of records + code
        for matrix in data:
            if isinstance(matrix, (bool, int, long, float)):
                matrix = np.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = np.array(matrix).reshape(-1, )
            if np.ndim(matrix) == 1:
                if np.size(matrix):
                    matrix = matrix.reshape(np.size(matrix), )
                else:
                    matrix = matrix.reshape(0, 0)

            s = matrix.shape
            recordlength += 4 * 2 + np.product(
                s) * 8  #row and col of matrix + matrix of doubles

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #write data, first number of records
        fid.write(struct.pack('i', len(data)))

        #write each matrix:
        for matrix in data:
            if isinstance(matrix, (bool, int, long, float)):
                matrix = np.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = np.array(matrix).reshape(-1, )
            if np.ndim(matrix) == 1:
                matrix = matrix.reshape(np.size(matrix), )

            s = matrix.shape
            if np.ndim(data) == 1:
                fid.write(struct.pack('i', s[0]))
                fid.write(struct.pack('i', 1))
                for i in xrange(s[0]):
                    fid.write(
                        struct.pack('d', float(matrix[i]))
                    )  #get to the "c" convention, hence the transpose
            else:
                fid.write(struct.pack('i', s[0]))
                fid.write(struct.pack('i', s[1]))
                for i in xrange(s[0]):
                    for j in xrange(s[1]):
                        fid.write(struct.pack('d', float(matrix[i][j])))
        # }}}

    elif m.strcmpi(format, 'StringArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #for length of array + code
        for string in data:
            recordlength += 4 + len(string)  #for each string

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write length of string array
        fid.write(struct.pack('i', len(data)))

        #now write the strings
        for string in data:
            fid.write(struct.pack('i', len(string)))
            fid.write(struct.pack('%ds' % len(string), string))
        # }}}

    else:  # {{{
        raise TypeError(
            'WriteData error message: data type: %d not supported yet! (%s)' %
            (format, enum))
示例#13
0
def bamg(md,*args):
	"""
	BAMG - mesh generation

	   Available options (for more details see ISSM website http://issm.jpl.nasa.gov/):

	   - domain :            followed by an ARGUS file that prescribes the domain outline
	   - hmin :              minimum edge length (default is 10^-100)
	   - hmax :              maximum edge length (default is 10^100)
	   - hVertices :         imposed edge length for each vertex (geometry or mesh)
	   - hminVertices :      minimum edge length for each vertex (mesh)
	   - hmaxVertices :      maximum edge length for each vertex (mesh)

	   - anisomax :          maximum ratio between the smallest and largest edges (default is 10^30)
	   - coeff :             coefficient applied to the metric (2-> twice as many elements, default is 1)
	   - cutoff :            scalar used to compute the metric when metric type 2 or 3 are applied
	   - err :               error used to generate the metric from a field
	   - errg :              geometric error (default is 0.1)
	   - field :             field of the model that will be used to compute the metric
	                         to apply several fields, use one column per field
	   - gradation :         maximum ratio between two adjacent edges
	   - Hessiantype :       0 -> use double P2 projection (default)
	                         1 -> use Green formula
	   - KeepVertices :      try to keep initial vertices when adaptation is done on an existing mesh (default 1)
	   - MaxCornerAngle :    maximum angle of corners in degree (default is 10)
	   - maxnbv :            maximum number of vertices used to allocate memory (default is 10^6)
	   - maxsubdiv :         maximum subdivision of exisiting elements (default is 10)
	   - metric :            matrix (numberofnodes x 3) used as a metric
	   - Metrictype :        1 -> absolute error          c/(err coeff^2) * Abs(H)        (default)
	                         2 -> relative error          c/(err coeff^2) * Abs(H)/max(s,cutoff*max(s))
	                         3 -> rescaled absolute error c/(err coeff^2) * Abs(H)/(smax-smin)
	   - nbjacoby :          correction used by Hessiantype=1 (default is 1)
	   - nbsmooth :          number of metric smoothing procedure (default is 3)
	   - omega :             relaxation parameter of the smoothing procedure (default is 1.8)
	   - power :             power applied to the metric (default is 1)
	   - splitcorners :      split triangles whuch have 3 vertices on the outline (default is 1)
	   - geometricalmetric : take the geometry into account to generate the metric (default is 0)
	   - verbose :           level of verbosity (default is 1)

	   - rifts :             followed by an ARGUS file that prescribes the rifts
	   - toltip :            tolerance to move tip on an existing point of the domain outline
	   - tracks :            followed by an ARGUS file that prescribes the tracks that the mesh will stick to
	   - RequiredVertices :  mesh vertices that are required. [x,y,ref]; ref is optional
	   - tol :               if the distance between 2 points of the domain outline is less than tol, they
	                         will be merged

	   Examples:
	      md=bamg(md,'domain','DomainOutline.exp','hmax',3000);
	      md=bamg(md,'field',[md.inversion.vel_obs md.geometry.thickness],'hmax',20000,'hmin',1000);
	      md=bamg(md,'metric',A,'hmin',1000,'hmax',20000,'gradation',3,'anisomax',1);
	"""

	#process options
	options=pairoptions(*args)
#	options=deleteduplicates(options,1);

	#initialize the structures required as input of Bamg
	bamg_options=OrderedDict()
	bamg_geometry=bamggeom()
	bamg_mesh=bamgmesh()

	# Bamg Geometry parameters {{{
	if options.exist('domain'):

		#Check that file exists
		domainfile=options.getfieldvalue('domain')
		if not os.path.exists(domainfile):
			raise IOError("bamg error message: file '%s' not found" % domainfile)
		domain=expread(domainfile)

		#Build geometry 
		count=0
		for i,domaini in enumerate(domain):

			#Check that the domain is closed
			if (domaini['x'][0]!=domaini['x'][-1] or domaini['y'][0]!=domaini['y'][-1]):
				raise RuntimeError("bamg error message: all contours provided in ''domain'' should be closed")

			#Checks that all holes are INSIDE the principle domain outline
			if i:
				flags=ContourToNodes(domaini['x'],domaini['y'],domainfile,0)[0]
				if np.any(np.logical_not(flags)):
					raise RuntimeError("bamg error message: All holes should be strictly inside the principal domain")

			#Add all points to bamg_geometry
			nods=domaini['nods']-1    #the domain are closed 0=end
			bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,np.vstack((domaini['x'][0:nods],domaini['y'][0:nods],np.ones((nods)))).T))
			bamg_geometry.Edges   =np.vstack((bamg_geometry.Edges,np.vstack((np.arange(count+1,count+nods+1),np.hstack((np.arange(count+2,count+nods+1),count+1)),1.*np.ones((nods)))).T))
			if i:
				bamg_geometry.SubDomains=np.vstack((bamg_geometry.SubDomains,[2,count+1,1,1]))

			# bamg_geometry.Vertices=np.hstack((bamg_geometry.Vertices,np.vstack((domaini['x'][0:nods].reshape(-1),domaini['y'][0:nods].reshape(-1),np.ones((nods))))))
			# bamg_geometry.Edges   =np.vstack((bamg_geometry.Edges,np.hstack((np.arange(count+1,count+nods+1).reshape(-1,),np.hstack((np.arange(count+2,count+nods+1),count+1)).reshape(-1,),1.*np.ones((nods,))))))
			# if i:
			# 	bamg_geometry.SubDomains=np.vstack((bamg_geometry.SubDomains,[2,count+1,1,1]))

			#update counter
			count+=nods

		#take care of rifts
		if options.exist('rifts'):

			#Check that file exists
			riftfile=options.getfieldvalue('rifts')
			if not os.path.exists(riftfile):
				raise IOError("bamg error message: file '%s' not found" % riftfile)
			rift=expread(riftfile)

			for i,rifti in enumerate(rift):

				#detect whether all points of the rift are inside the domain
				flags=ContourToNodes(rifti['x'],rifti['y'],domain[0],0)[0]
				if np.all(np.logical_not(flags)):
					raise RuntimeError("one rift has all its points outside of the domain outline")

				elif np.any(np.logical_not(flags)):
					#We LOTS of work to do
					print "Rift tip outside of or on the domain has been detected and is being processed..."

					#check that only one point is outside (for now)
					if np.sum(np.logical_not(flags).astype(int))!=1:
						raise RuntimeError("bamg error message: only one point outside of the domain is supported yet")

					#Move tip outside to the first position
					if   not flags[0]:
						#OK, first point is outside (do nothing),
						pass
					elif not flags[-1]:
						rifti['x']=np.flipud(rifti['x'])
						rifti['y']=np.flipud(rifti['y'])
					else:
						raise RuntimeError("bamg error message: only a rift tip can be outside of the domain")

					#Get cordinate of intersection point
					x1=rifti['x'][0]
					y1=rifti['y'][0]
					x2=rifti['x'][1]
					y2=rifti['y'][1]
					for j in xrange(0,np.size(domain[0]['x'])-1):
						if SegIntersect(np.array([[x1,y1],[x2,y2]]),np.array([[domain[0]['x'][j],domain[0]['y'][j]],[domain[0]['x'][j+1],domain[0]['y'][j+1]]])):

							#Get position of the two nodes of the edge in domain
							i1=j
							i2=j+1

							#rift is crossing edge [i1 i2] of the domain
							#Get coordinate of intersection point (http://mathworld.wolfram.com/Line-LineIntersection.html)
							x3=domain[0]['x'][i1]
							y3=domain[0]['y'][i1]
							x4=domain[0]['x'][i2]
							y4=domain[0]['y'][i2]
#							x=det([det([x1 y1; x2 y2])  x1-x2;det([x3 y3; x4 y4])  x3-x4])/det([x1-x2 y1-y2;x3-x4 y3-y4]);
#							y=det([det([x1 y1; x2 y2])  y1-y2;det([x3 y3; x4 y4])  y3-y4])/det([x1-x2 y1-y2;x3-x4 y3-y4]);
							x=np.linalg.det(np.array([[np.linalg.det(np.array([[x1,y1],[x2,y2]])),x1-x2],[np.linalg.det(np.array([[x3,y3],[x4,y4]])),x3-x4]]))/np.linalg.det(np.array([[x1-x2,y1-y2],[x3-x4,y3-y4]]))
							y=np.linalg.det(np.array([[np.linalg.det(np.array([[x1,y1],[x2,y2]])),y1-y2],[np.linalg.det(np.array([[x3,y3],[x4,y4]])),y3-y4]]))/np.linalg.det(np.array([[x1-x2,y1-y2],[x3-x4,y3-y4]]))

							segdis= sqrt((x4-x3)**2+(y4-y3)**2)
							tipdis=np.array([sqrt((x-x3)**2+(y-y3)**2),sqrt((x-x4)**2+(y-y4)**2)])

							if np.min(tipdis)/segdis < options.getfieldvalue('toltip',0):
								print "moving tip-domain intersection point"

								#Get position of the closer point
								if tipdis[0]>tipdis[1]:
									pos=i2
								else:
									pos=i1

								#This point is only in Vertices (number pos).
								#OK, now we can add our own rift
								nods=rifti['nods']-1
								bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,np.hstack((rifti['x'][1:].reshape(-1,),rifti['y'][1:].reshape(-1,),np.ones((nods,1))))))
								bamg_geometry.Edges=np.vstack((bamg_geometry.Edges,\
									np.array([[pos,count+1,(1+i)]]),\
									np.hstack((np.arange(count+1,count+nods).reshape(-1,),np.arange(count+2,count+nods+1).reshape(-1,),(1+i)*np.ones((nods-1,1))))))
								count+=nods

								break

							else:
								#Add intersection point to Vertices
								bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,np.array([[x,y,1]])))
								count+=1

								#Decompose the crossing edge into 2 subedges
								pos=np.nonzero(np.logical_and(bamg_geometry.Edges[:,0]==i1,bamg_geometry.Edges[:,1]==i2))[0]
								if not pos:
									raise RuntimeError("bamg error message: a problem occurred...")
								bamg_geometry.Edges=np.vstack((bamg_geometry.Edges[0:pos-1,:],\
									np.array([[bamg_geometry.Edges[pos,0],count                     ,bamg_geometry.Edges[pos,2]]]),\
									np.array([[count                     ,bamg_geometry.Edges[pos,1],bamg_geometry.Edges[pos,2]]]),\
									bamg_geometry.Edges[pos+1:,:]))

								#OK, now we can add our own rift
								nods=rifti['nods']-1
								bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,np.hstack((rifti['x'][1:].reshape(-1,),rifti['y'][1:].reshape(-1,),np.ones((nods,1))))))
								bamg_geometry.Edges=np.vstack((bamg_geometry.Edges,\
									np.array([[count,count+1,2]]),\
									np.hstack((np.arange(count+1,count+nods).reshape(-1,),np.arange(count+2,count+nods+1).reshape(-1,),(1+i)*np.ones((nods-1,1))))))
								count+=nods

								break

				else:
					nods=rifti['nods']-1
					bamg_geometry.Vertices=np.vstack(bamg_geometry.Vertices, np.hstack(rifti['x'][:],rifti['y'][:],np.ones((nods+1,1))))
					bamg_geometry.Edges   =np.vstack(bamg_geometry.Edges, np.hstack(np.arange(count+1,count+nods).reshape(-1,),np.arange(count+2,count+nods+1).reshape(-1,),i*np.ones((nods,1))))
					count=+nods+1

		#Deal with tracks
		if options.exist('tracks'):

			#read tracks
			track=options.getfieldvalue('tracks')
			if all(isinstance(track,(str,unicode))):
				A=expread(track)
				track=np.hstack((A.x.reshape(-1,),A.y.reshape(-1,)))
			else:
				track=float(track)    #for some reason, it is of class "single"
			if np.size(track,axis=1)==2:
				track=np.hstack((track,3.*np.ones((size(track,axis=0),1))))

			#only keep those inside
			flags=ContourToNodes(track[:,0],track[:,1],domainfile,0)[0]
			track=track[np.nonzero(flags),:]

			#Add all points to bamg_geometry
			nods=np.size(track,axis=0)
			bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,track))
			bamg_geometry.Edges   =np.vstack((bamg_geometry.Edges,np.hstack((np.arange(count+1,count+nods).reshape(-1,),np.arange(count+2,count+nods+1).reshape(-1,),3.*np.ones((nods-1,1))))))

			#update counter
			count+=nods

		#Deal with vertices that need to be kept by mesher
		if options.exist('RequiredVertices'):

			#recover RequiredVertices
			requiredvertices=options.getfieldvalue('RequiredVertices')    #for some reason, it is of class "single"
			if np.size(requiredvertices,axis=1)==2:
				requiredvertices=np.hstack((requiredvertices,4.*np.ones((np.size(requiredvertices,axis=0),1))))

			#only keep those inside
			flags=ContourToNodes(requiredvertices[:,0],requiredvertices[:,1],domainfile,0)[0]
			requiredvertices=requiredvertices[np.nonzero(flags)[0],:]
			#Add all points to bamg_geometry
			nods=np.size(requiredvertices,axis=0)
			bamg_geometry.Vertices=np.vstack((bamg_geometry.Vertices,requiredvertices))

			#update counter
			count+=nods

		#process geom
		#bamg_geometry=processgeometry(bamg_geometry,options.getfieldvalue('tol',float(nan)),domain[0])

	elif isinstance(md.private.bamg,dict) and 'geometry' in md.private.bamg:
		bamg_geometry=bamggeom(md.private.bamg['geometry'].__dict__) 
	else:
		#do nothing...
		pass
	#}}}
	# Bamg Mesh parameters {{{
	if not options.exist('domain') and md.mesh.numberofvertices and m.strcmp(md.mesh.elementtype(),'Tria'):

		if isinstance(md.private.bamg,dict) and 'mesh' in md.private.bamg:
			bamg_mesh=bamgmesh(md.private.bamg['mesh'].__dict__)
		else:
			bamg_mesh.Vertices=np.vstack((md.mesh.x,md.mesh.y,np.ones((md.mesh.numberofvertices)))).T
			#bamg_mesh.Vertices=np.hstack((md.mesh.x.reshape(-1,),md.mesh.y.reshape(-1,),np.ones((md.mesh.numberofvertices,1))))
			bamg_mesh.Triangles=np.hstack((md.mesh.elements,np.ones((md.mesh.numberofelements,1))))

		if isinstance(md.rifts.riftstruct,dict):
			raise TypeError("bamg error message: rifts not supported yet. Do meshprocessrift AFTER bamg")
	#}}}
	# Bamg Options {{{
	bamg_options['Crack']=options.getfieldvalue('Crack',0)
	bamg_options['anisomax']=options.getfieldvalue('anisomax',10.**30)
	bamg_options['coeff']=options.getfieldvalue('coeff',1.)
	bamg_options['cutoff']=options.getfieldvalue('cutoff',10.**-5)
	bamg_options['err']=options.getfieldvalue('err',np.array([[0.01]]))
	bamg_options['errg']=options.getfieldvalue('errg',0.1)
	bamg_options['field']=options.getfieldvalue('field',np.empty((0,1)))
	bamg_options['gradation']=options.getfieldvalue('gradation',1.5)
	bamg_options['Hessiantype']=options.getfieldvalue('Hessiantype',0)
	bamg_options['hmin']=options.getfieldvalue('hmin',10.**-100)
	bamg_options['hmax']=options.getfieldvalue('hmax',10.**100)
	bamg_options['hminVertices']=options.getfieldvalue('hminVertices',np.empty((0,1)))
	bamg_options['hmaxVertices']=options.getfieldvalue('hmaxVertices',np.empty((0,1)))
	bamg_options['hVertices']=options.getfieldvalue('hVertices',np.empty((0,1)))
	bamg_options['KeepVertices']=options.getfieldvalue('KeepVertices',1)
	bamg_options['MaxCornerAngle']=options.getfieldvalue('MaxCornerAngle',10.)
	bamg_options['maxnbv']=options.getfieldvalue('maxnbv',10**6)
	bamg_options['maxsubdiv']=options.getfieldvalue('maxsubdiv',10.)
	bamg_options['metric']=options.getfieldvalue('metric',np.empty((0,1)))
	bamg_options['Metrictype']=options.getfieldvalue('Metrictype',0)
	bamg_options['nbjacobi']=options.getfieldvalue('nbjacobi',1)
	bamg_options['nbsmooth']=options.getfieldvalue('nbsmooth',3)
	bamg_options['omega']=options.getfieldvalue('omega',1.8)
	bamg_options['power']=options.getfieldvalue('power',1.)
	bamg_options['splitcorners']=options.getfieldvalue('splitcorners',1)
	bamg_options['geometricalmetric']=options.getfieldvalue('geometricalmetric',0)
	bamg_options['random']=options.getfieldvalue('rand',True)
	bamg_options['verbose']=options.getfieldvalue('verbose',1)
	#}}}

	#call Bamg
	bamgmesh_out,bamggeom_out=BamgMesher(bamg_mesh.__dict__,bamg_geometry.__dict__,bamg_options)

	# plug results onto model
	md.private.bamg=OrderedDict()
	md.private.bamg['mesh']=bamgmesh(bamgmesh_out)
	md.private.bamg['geometry']=bamggeom(bamggeom_out)
	md.mesh = mesh2d()
	md.mesh.x=bamgmesh_out['Vertices'][:,0].copy()
	md.mesh.y=bamgmesh_out['Vertices'][:,1].copy()
	md.mesh.elements=bamgmesh_out['Triangles'][:,0:3].astype(int)
	md.mesh.edges=bamgmesh_out['IssmEdges'].astype(int)
	md.mesh.segments=bamgmesh_out['IssmSegments'][:,0:3].astype(int)
	md.mesh.segmentmarkers=bamgmesh_out['IssmSegments'][:,3].astype(int)

	#Fill in rest of fields:
	md.mesh.numberofelements=np.size(md.mesh.elements,axis=0)
	md.mesh.numberofvertices=np.size(md.mesh.x)
	md.mesh.numberofedges=np.size(md.mesh.edges,axis=0)
	md.mesh.vertexonboundary=np.zeros(md.mesh.numberofvertices,bool)
	md.mesh.vertexonboundary[md.mesh.segments[:,0:2]-1]=True
	md.mesh.elementconnectivity=md.private.bamg['mesh'].ElementConnectivity
	md.mesh.elementconnectivity[np.nonzero(np.isnan(md.mesh.elementconnectivity))]=0
	md.mesh.elementconnectivity=md.mesh.elementconnectivity.astype(int)

	#Check for orphan
	if np.any(np.logical_not(np.in1d(np.arange(1,md.mesh.numberofvertices+1),md.mesh.elements.flat))):
		raise RuntimeError("Output mesh has orphans. Decrease MaxCornerAngle to prevent outside points (ex: 0.01)")

	return md
示例#14
0
def checkfield(md,*args):
	"""
	CHECKFIELD - check field consistency

	   Used to check model consistency.,
	   Requires: 
	   'field' or 'fieldname' option. If 'fieldname' is provided, it will retrieve it from the model md. (md.(fieldname)) 
             If 'field' is provided, it will assume the argument following 'field' is a numeric array.

	   Available options:
	      - NaN: 1 if check that there is no NaN
	      - size: [lines cols], NaN for non checked dimensions
	      - >:  greater than provided value
	      - >=: greater or equal to provided value
	      - <:  smallerthan provided value
	      - <=: smaller or equal to provided value
	      - < vec:  smallerthan provided values on each vertex
	      - timeseries: 1 if check time series consistency (size and time)
	      - values: cell of strings or vector of acceptable values
	      - numel: list of acceptable number of elements
	      - cell: 1 if check that is cell
	      - empty: 1 if check that non empty
	      - message: overloaded error message

	   Usage:
	      md = checkfield(md,fieldname,options);
	"""

	#get options
	options=pairoptions(*args)

	#get field from model
	if options.exist('field'):
		field=options.getfieldvalue('field')
		fieldname=options.getfieldvalue('fieldname','no fieldname')
	else:
		fieldname=options.getfieldvalue('fieldname') 
		exec("field=md.{}".format(fieldname))

	if isinstance(field,(bool,int,long,float)):
		field=np.array([field])

	#check empty
	if options.exist('empty'):
		if not field:
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' is empty" % fieldname))

	#Check size
	if options.exist('size'):
		fieldsize=options.getfieldvalue('size')
		if len(fieldsize) == 1:
			if np.isnan(fieldsize[0]):
				pass
			elif np.ndim(field)==1:
				if not np.size(field)==fieldsize[0]:
					md = md.checkmessage(options.getfieldvalue('message',"field {} size should be {}".format(fieldname,fieldsize[0])))
			else:
				try:
					 exec("md.{}=field[:,0]".format(fieldname))
					 print('{} had a bad dimension, we fixed it but you should check it'.format(fieldname))
				except IndexError:
					md = md.checkmessage(options.getfieldvalue('message',"field {} should have {} dimension".format(fieldname,len(fieldsize))))
		elif len(fieldsize) == 2:
			if   np.isnan(fieldsize[0]):
				if not np.size(field,1)==fieldsize[1]:
					md = md.checkmessage(options.getfieldvalue('message',"field '%s' should have %d columns" % (fieldname,fieldsize[1])))
			elif np.isnan(fieldsize[1]):
				if not np.size(field,0)==fieldsize[0]:
					md = md.checkmessage(options.getfieldvalue('message',"field '%s' should have %d lines" % (fieldname,fieldsize[0])))
			elif fieldsize[1]==1:
				if (not np.size(field,0)==fieldsize[0]):
					md = md.checkmessage(options.getfieldvalue('message',"field '%s' size should be %d x %d" % (fieldname,fieldsize[0],fieldsize[1])))
			else:
				if (not np.size(field,0)==fieldsize[0]) or (not np.size(field,1)==fieldsize[1]):
					md = md.checkmessage(options.getfieldvalue('message',"field '%s' size should be %d x %d" % (fieldname,fieldsize[0],fieldsize[1])))
	
	#Check numel
	if options.exist('numel'):
		fieldnumel=options.getfieldvalue('numel')
		if np.size(field) not in fieldnumel:
			if   len(fieldnumel)==1:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' size should be %d" % (fieldname,fieldnumel)))
			elif len(fieldnumel)==2:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' size should be %d or %d" % (fieldname,fieldnumel[0],fieldnumel[1])))
			else:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' size should be %s" % (fieldname,fieldnumel)))

	#check NaN
	if options.getfieldvalue('NaN',0):
		if np.any(np.isnan(field)):
			md = md.checkmessage(options.getfieldvalue('message',\
				"NaN values found in field '%s'" % fieldname))

	#check Inf
	if options.getfieldvalue('Inf',0):
		if np.any(np.isinf(field)):
			md = md.checkmessage(options.getfieldvalue('message',\
				"Inf values found in field '%s'" % fieldname))

	#check cell
	if options.getfieldvalue('cell',0):
		if not isinstance(field,(tuple,list,dict)):
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should be a cell" % fieldname))

	#check values
	if options.exist('values'):
		fieldvalues=options.getfieldvalue('values')
		if False in m.ismember(field,fieldvalues):
			if   len(fieldvalues)==1:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' value should be '%s'"  % (fieldname,fieldvalues[0])))
			elif len(fieldvalues)==2:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' values should be '%s' or '%s'"  % (fieldname,fieldvalues[0],fieldvalues[1])))
			else:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' should have values in %s" % (fieldname,fieldvalues)))

	#check greater
	if options.exist('>='):
		lowerbound=options.getfieldvalue('>=')
		if np.any(field<lowerbound):
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have values above %d" % (fieldname,lowerbound)))
	if options.exist('>'):
		lowerbound=options.getfieldvalue('>')
		if np.any(field<=lowerbound):
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have values above %d" % (fieldname,lowerbound)))

	#check smaller
	if options.exist('<='):
		upperbound=options.getfieldvalue('<=')
		if np.any(field>upperbound):
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have values below %d" % (fieldname,upperbound)))
	if options.exist('<'):
		upperbound=options.getfieldvalue('<')
		if np.any(field>=upperbound):
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have values below %d" % (fieldname,upperbound)))

	#check file
	if options.getfieldvalue('file',0):
		if not os.path.exists(field):
			md = md.checkmessage("file provided in '%s': '%s' does not exist" % (fieldname,field))

	#Check row of strings
	if options.exist('stringrow'):
		if not isinstance(field,list):
			md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' should be a list" %fieldname))

	#Check forcings (size and times)
	if options.getfieldvalue('timeseries',0):
		if   np.size(field,0)==md.mesh.numberofvertices:
			if np.ndim(field)>1 and not np.size(field,1)==1:
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' should have only one column as there are md.mesh.numberofvertices lines" % fieldname))
		elif np.size(field,0)==md.mesh.numberofvertices+1 or np.size(field,0)==2:
			if not all(field[-1,:]==np.sort(field[-1,:])):
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' columns should be sorted chronologically" % fieldname))
			if any(field[-1,0:-1]==field[-1,1:]):
				md = md.checkmessage(options.getfieldvalue('message',\
					"field '%s' columns must not contain duplicate timesteps" % fieldname))
		else:
			md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have md.mesh.numberofvertices or md.mesh.numberofvertices+1 lines" % fieldname))

	#Check single value forcings (size and times)
	if options.getfieldvalue('singletimeseries',0):
		if np.size(field,0)==2:
			if not all(field[-1,:]==np.sort(field[-1,:])):
				md = md.checkmessage(options.getfieldvalue('message',\
						"field '%s' columns should be sorted chronologically" % fieldname))
			if any(field[-1,0:-1]==field[-1,1:]):
				md = md.checkmessage(options.getfieldvalue('message',\
						"field '%s' columns must not contain duplicate timesteps" % fieldname))
		else:
				md = md.checkmessage(options.getfieldvalue('message',\
				"field '%s' should have 2 lines" % fieldname))

	return md
示例#15
0
def solve(md, solutionstring, *args):
    """
	SOLVE - apply solution sequence for this model
 
	   Usage:
	      md=solve(md,solutionstring,varargin)
	      where varargin is a list of paired arguments of string OR enums
 
		solution types available comprise:
		 - 'Stressbalance'    or 'sb'
		 - 'Masstransport'    or 'mt'
		 - 'Thermal'          or 'th'
		 - 'Steadystate'      or 'ss'
		 - 'Transient'        or 'tr'
		 - 'Balancethickness' or 'mc'
		 - 'Balancevelocity'  or 'bv'
		 - 'BedSlope'         or 'bsl'
		 - 'SurfaceSlope'     or 'ssl'
		 - 'Hydrology'        or 'hy'
		 - 'DamageEvolution'  or 'da'
		 - 'Gia'              or 'gia'
		 - 'Sealevelrise'     or 'slr'

	   extra options:
        - loadonly : does not solve. only load results
		  - checkconsistency : 'yes' or 'no' (default is 'yes'), ensures checks on consistency of model
		  - restart: 'directory name (relative to the execution directory) where the restart file is located.
 
	   Examples:
	      md=solve(md,'Stressbalance');
         md=solve(md,'sb');
	"""

    #recover and process solve options
    if solutionstring.lower() == 'sb' or solutionstring.lower(
    ) == 'stressbalance':
        solutionstring = 'StressbalanceSolution'
    elif solutionstring.lower() == 'mt' or solutionstring.lower(
    ) == 'masstransport':
        solutionstring = 'MasstransportSolution'
    elif solutionstring.lower() == 'th' or solutionstring.lower() == 'thermal':
        solutionstring = 'ThermalSolution'
    elif solutionstring.lower() == 'st' or solutionstring.lower(
    ) == 'steadystate':
        solutionstring = 'SteadystateSolution'
    elif solutionstring.lower() == 'tr' or solutionstring.lower(
    ) == 'transient':
        solutionstring = 'TransientSolution'
    elif solutionstring.lower() == 'mc' or solutionstring.lower(
    ) == 'balancethickness':
        solutionstring = 'BalancethicknessSolution'
    elif solutionstring.lower() == 'bv' or solutionstring.lower(
    ) == 'balancevelocity':
        solutionstring = 'BalancevelocitySolution'
    elif solutionstring.lower() == 'bsl' or solutionstring.lower(
    ) == 'bedslope':
        solutionstring = 'BedSlopeSolution'
    elif solutionstring.lower() == 'ssl' or solutionstring.lower(
    ) == 'surfaceslope':
        solutionstring = 'SurfaceSlopeSolution'
    elif solutionstring.lower() == 'hy' or solutionstring.lower(
    ) == 'hydrology':
        solutionstring = 'HydrologySolution'
    elif solutionstring.lower() == 'da' or solutionstring.lower(
    ) == 'damageevolution':
        solutionstring = 'DamageEvolutionSolution'
    elif solutionstring.lower() == 'gia' or solutionstring.lower() == 'gia':
        solutionstring = 'GiaSolution'
    elif solutionstring.lower() == 'slr' or solutionstring.lower(
    ) == 'sealevelrise':
        solutionstring = 'SealevelriseSolution'
    else:
        raise ValueError("solutionstring '%s' not supported!" % solutionstring)
    options = pairoptions('solutionstring', solutionstring, *args)

    #recover some fields
    md.private.solution = solutionstring
    cluster = md.cluster
    if options.getfieldvalue('batch', 'no') == 'yes':
        batch = 1
    else:
        batch = 0

    #check model consistency
    if options.getfieldvalue('checkconsistency', 'yes') == 'yes':
        print "checking model consistency"
        ismodelselfconsistent(md)

    #First, build a runtime name that is unique
    restart = options.getfieldvalue('restart', '')
    if restart == 1:
        pass  #do nothing
    else:
        if restart:
            md.private.runtimename = restart
        else:
            if options.getfieldvalue('runtimename', True):
                c = datetime.datetime.now()
                md.private.runtimename = "%s-%02i-%02i-%04i-%02i-%02i-%02i-%i" % (
                    md.miscellaneous.name, c.month, c.day, c.year, c.hour,
                    c.minute, c.second, os.getpid())
            else:
                md.private.runtimename = md.miscellaneous.name

    #if running qmu analysis, some preprocessing of dakota files using models
    #fields needs to be carried out.
    if md.qmu.isdakota:
        md = preqmu(md, options)

    #Do we load results only?
    if options.getfieldvalue('loadonly', False):
        md = loadresultsfromcluster(md)
        return md

    #Write all input files
    marshall(md)  # bin file
    md.toolkits.ToolkitsFile(md.miscellaneous.name +
                             '.toolkits')  # toolkits file
    cluster.BuildQueueScript(md.private.runtimename, md.miscellaneous.name,
                             md.private.solution, md.settings.io_gather,
                             md.debug.valgrind, md.debug.gprof,
                             md.qmu.isdakota,
                             md.transient.isoceancoupling)  # queue file

    #Stop here if batch mode
    if options.getfieldvalue('batch', 'no') == 'yes':
        print 'batch mode requested: not launching job interactively'
        print 'launch solution sequence on remote cluster by hand'
        return md

    #Upload all required files:
    modelname = md.miscellaneous.name
    filelist = [
        modelname + '.bin ', modelname + '.toolkits ', modelname + '.queue '
    ]
    if md.qmu.isdakota:
        filelist.append(modelname + '.qmu.in')

    if not restart:
        cluster.UploadQueueJob(md.miscellaneous.name, md.private.runtimename,
                               filelist)

    #Launch job
    cluster.LaunchQueueJob(md.miscellaneous.name, md.private.runtimename,
                           filelist, restart, batch)

    #wait on lock
    if md.settings.waitonlock > 0:
        #we wait for the done file
        islock = waitonlock(md)
        if islock == 0:  #no results to be loaded
            print 'The results must be loaded manually with md=loadresultsfromcluster(md).'
        else:  #load results
            print 'loading results from cluster'
            md = loadresultsfromcluster(md)

    #post processes qmu results if necessary
    if md.qmu.isdakota:
        if not strncmpi(options['keep'], 'y', 1):
            shutil.rmtree('qmu' + str(os.getpid()))

    return md