Esempio n. 1
0
def FormatToCode(format):  # {{{
    """
	This routine takes the format string, and hardcodes it into an integer, which 
	is passed along the record, in order to identify the nature of the dataset being 
	sent.
	"""

    if m.strcmpi(format, 'Boolean'):
        code = 1
    elif m.strcmpi(format, 'Integer'):
        code = 2
    elif m.strcmpi(format, 'Double'):
        code = 3
    elif m.strcmpi(format, 'String'):
        code = 4
    elif m.strcmpi(format, 'BooleanMat'):
        code = 5
    elif m.strcmpi(format, 'IntMat'):
        code = 6
    elif m.strcmpi(format, 'DoubleMat'):
        code = 7
    elif m.strcmpi(format, 'MatArray'):
        code = 8
    elif m.strcmpi(format, 'StringArray'):
        code = 9
    elif m.strcmpi(format, 'CompressedMat'):
        code = 10
    else:
        raise InputError(
            'FormatToCode error message: data type not supported yet!')

    return code
Esempio n. 2
0
def ComputeHessian(index,x,y,field,type):
	"""
	COMPUTEHESSIAN - compute hessian matrix from a field

	   Compute the hessian matrix of a given field
	   return the three components Hxx Hxy Hyy
	   for each element or each node

	   Usage:
	      hessian=ComputeHessian(index,x,y,field,type)

	   Example:
	      hessian=ComputeHessian(md.mesh.elements,md.mesh.x,md.mesh.y,md.inversion.vel_obs,'node')
	"""

	#some variables
	numberofnodes=np.size(x)
	numberofelements=np.size(index,axis=0)

	#some checks
	if np.size(field)!=numberofnodes and np.size(field)!=numberofelements:
		raise TypeError("ComputeHessian error message: the given field size not supported yet")
	if not m.strcmpi(type,'node') and not m.strcmpi(type,'element'):
		raise TypeError("ComputeHessian error message: only 'node' or 'element' type supported yet")

	#initialization
	line=index.reshape(-1,order='F')
	linesize=3*numberofelements

	#get areas and nodal functions coefficients N(x,y)=alpha x + beta y + gamma 
	[alpha,beta,dum]=GetNodalFunctionsCoeff(index,x,y)
	areas=GetAreas(index,x,y)

	#compute weights that hold the volume of all the element holding the node i
	weights=m.sparse(line,np.ones((linesize,1)),np.tile(areas.reshape(-1,),(3,1)),numberofnodes,1)

	#compute field on nodes if on elements
	if np.size(field,axis=0)==numberofelements:
		field=m.sparse(line,np.ones((linesize,1)),np.tile(areas*field,(3,1)),numberofnodes,1)/weights

	#Compute gradient for each element
	grad_elx=np.sum(field[index-1,0]*alpha,axis=1) 
	grad_ely=np.sum(field[index-1,0]*beta,axis=1)

	#Compute gradient for each node (average of the elements around)
	gradx=m.sparse(line,np.ones((linesize,1)),np.tile((areas*grad_elx).reshape(-1,),(3,1)),numberofnodes,1)
	grady=m.sparse(line,np.ones((linesize,1)),np.tile((areas*grad_ely).reshape(-1,),(3,1)),numberofnodes,1)
	gradx=gradx/weights
	grady=grady/weights

	#Compute hessian for each element
	hessian=np.vstack((np.sum(gradx[index-1,0]*alpha,axis=1).reshape(-1,),np.sum(grady[index-1,0]*alpha,axis=1).reshape(-1,),np.sum(grady[index-1,0]*beta,axis=1).reshape(-1,))).T

	if m.strcmpi(type,'node'):
		#Compute Hessian on the nodes (average of the elements around)
		hessian=np.hstack((m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,0]).reshape(-1,),(3,1)),numberofnodes,1)/weights,
											 m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,1]).reshape(-1,),(3,1)),numberofnodes,1)/weights,
											 m.sparse(line,np.ones((linesize,1)),np.tile((areas*hessian[:,2]).reshape(-1,),(3,1)),numberofnodes,1)/weights ))

	return hessian
Esempio n. 3
0
def displayunit(offset, name, characterization, comment):  # {{{

    #take care of name
    if len(name) > 23:
        name = "%s..." % name[:20]

    #take care of characterization
    if m.strcmp(characterization, "''") or m.strcmp(
            characterization, '""') or m.strcmpi(characterization, 'nan'):
        characterization = "N/A"

    if len(characterization) > 15:
        characterization = "%s..." % characterization[:12]

    #print
    if not comment:
        string = "%s%-23s: %-15s" % (offset, name, characterization)
    else:
        if isinstance(comment, (str, unicode)):
            string = "%s%-23s: %-15s -- %s" % (offset, name, characterization,
                                               comment)
        elif isinstance(comment, list):
            string = "%s%-23s: %-15s -- %s" % (offset, name, characterization,
                                               comment[0])
            for commenti in comment:
                string += "\n%s%-23s  %-15s    %s" % (offset, '', '', commenti)
        else:
            raise RuntimeError(
                "fielddisplay error message: format for comment not supported yet"
            )

    return string
Esempio n. 4
0
File: qmu.py Progetto: pf4d/issm
    def checkconsistency(self, md, solution, analyses):  # {{{

        #Early return
        if not md.qmu.isdakota:
            return

        if not md.qmu.params.evaluation_concurrency == 1:
            md.checkmessage(
                "concurrency should be set to 1 when running dakota in library mode"
            )
        if md.qmu.partition:
            if not numpy.size(md.qmu.partition) == md.mesh.numberofvertices:
                md.checkmessage(
                    "user supplied partition for qmu analysis should have size md.mesh.numberofvertices x 1"
                )
            if not min(md.qmu.partition) == 0:
                md.checkmessage("partition vector not indexed from 0 on")
            if max(md.qmu.partition) >= md.qmu.numberofpartitions:
                md.checkmessage(
                    "for qmu analysis, partitioning vector cannot go over npart, number of partition areas"
                )

        if not m.strcmpi(md.cluster.name, 'none'):
            if not md.settings.waitonlock:
                md.checkmessage(
                    "waitonlock should be activated when running qmu in parallel mode!"
                )

        return md
Esempio n. 5
0
def waitonlock(md):
    """
	WAITONLOCK - wait for a file
 
	   This routine will return when a file named 'filename' is written to disk.
	   If the time limit given in input is exceeded, return 0
 
	   Usage:
	      flag=waitonlock(md)
	"""

    #Get filename (lock file) and options
    executionpath = md.cluster.executionpath
    cluster = md.cluster.name
    login = md.cluster.login
    port = md.cluster.port
    timelimit = md.settings.waitonlock
    filename = os.path.join(executionpath, md.private.runtimename,
                            md.miscellaneous.name + '.lock')

    #waitonlock will work if the lock is on the same machine only:
    if not m.strcmpi(gethostname(), cluster):

        print 'solution launched on remote cluster. log in to detect job completion.'
        choice = raw_input('Is the job successfully completed? (y/n) ')
        if not m.strcmp(choice, 'y'):
            print 'Results not loaded... exiting'
            flag = 0
        else:
            flag = 1

    #job is running on the same machine
    else:

        if 'interactive' in vars(md.cluster) and md.cluster.interactive:
            #We are in interactive mode, no need to check for job completion
            flag = 1
            return flag
        #initialize time and file presence test flag
        etime = 0
        ispresent = 0
        print "waiting for '%s' hold on... (Ctrl+C to exit)" % filename

        #loop till file .lock exist or time is up
        while ispresent == 0 and etime < timelimit:
            ispresent = os.path.exists(filename)
            time.sleep(1)
            etime += 1 / 60

        #build output
        if etime > timelimit:
            print 'Time limit exceeded. Increase md.settings.waitonlock'
            print 'The results must be loaded manually with md=loadresultsfromcluster(md).'
            raise RuntimeError(
                'waitonlock error message: time limit exceeded.')
            flag = 0
        else:
            flag = 1

    return flag
Esempio n. 6
0
def issmdir():
    """
	ISSMDIR - Get ISSM_DIR environment variable
 
	   Usage:
	      ISSM_DIR=issmdir()
	"""

    if not m.ispc():
        ISSM_DIR = os.environ['ISSM_DIR']
    else:
        ISSM_DIR = os.environ['ISSM_DIR_WIN']
        if m.strcmpi(ISSM_DIR[-1], '/') or m.strcmpi(ISSM_DIR[-1], '\\'):
            ISSM_DIR = ISSM_DIR[:-1]  #shave off the last '/'

    if not ISSM_DIR:
        raise RuntimeError(
            "issmdir error message: 'ISSM_DIR' environment variable is empty! You should define ISSM_DIR in your .cshrc or .bashrc!"
        )

    return ISSM_DIR
Esempio n. 7
0
def issmscpout(host, path, login, port, packages):
    """
	ISSMSCPOUT send packages to a host, using scp on unix, and pscp on windows
 
	   usage: issmscpout(host,path,packages)
	"""

    #get hostname
    hostname = gethostname()

    #if hostname and host are the same, do a simple copy

    if m.strcmpi(host, hostname):
        for package in packages:
            here = os.getcwd()
            os.chdir(path)
            try:
                os.remove(package)
            except OSError as e:
                pass
            subprocess.call('ln -s %s %s' %
                            (os.path.join(here, package), path),
                            shell=True)
            os.chdir(here)
    else:
        if m.ispc():
            #use the putty project pscp.exe: it should be in the path.

            #get ISSM_DIR variable
            if 'ISSM_DIR_WIN' in os.environ:
                ISSM_DIR = os.environ['ISSM_DIR_WIN'][1:-2]
            else:
                raise OSError(
                    "issmscpout error message: could not find ISSM_DIR_WIN environment variable."
                )

            username = input('Username: (quoted string) ')
            key = input('Key: (quoted string) ')

            for package in packages:
                try:
                    subprocess.check_call(
                        '%s/externalpackages/ssh/pscp.exe -l "%s" -pw "%s" %s %s:%s'
                        % (ISSM_DIR, username, key, package, host, path),
                        shell=True)
                except CalledProcessError as e:
                    raise CalledProcessError(
                        "issmscpout error message: could not call putty pscp.")

        else:
            #just use standard unix scp
            #create string of packages being sent
            string = ''
            for package in packages:
                string += ' ' + package
            string += ' '

            if port:
                subprocess.call('scp -P %d %s %s@localhost:%s' %
                                (port, string, login, path),
                                shell=True)
            else:
                subprocess.call('scp %s %s@%s:%s' %
                                (string, login, host, path),
                                shell=True)
Esempio n. 8
0
def WriteData(fid, **kwargs):
    """
	WRITEDATA - write model field in binary file
 
	   Usage:
	      WriteData(fid,varargin)
	"""

    #process options
    options = pairoptions.pairoptions(**kwargs)

    #Get data properties
    if options.exist('object'):
        #This is an object field, construct enum and data
        obj = options.getfieldvalue('object')
        fieldname = options.getfieldvalue('fieldname')
        classname = options.getfieldvalue(
            'class',
            str(type(obj)).rsplit('.')[-1].split("'")[0])
        if options.exist('enum'):
            enum = options.getfieldvalue('enum')
        else:
            enum = BuildEnum(classname + '_' + fieldname)
        data = getattr(obj, fieldname)
    else:
        #No processing required
        data = options.getfieldvalue('data')
        enum = options.getfieldvalue('enum')
    format = options.getfieldvalue('format')
    mattype = options.getfieldvalue('mattype', 0)  #only required for matrices
    timeserieslength = options.getfieldvalue('timeserieslength', -1)

    #Process sparse matrices
    #	if issparse(data),
    #		data=full(data);
    #	end

    #Scale data if necesarry
    if options.exist('scale'):
        scale = options.getfieldvalue('scale')
        if numpy.size(data) > 1:
            if numpy.size(data, 0) == timeserieslength:
                data = numpy.array(data)
                data[0:-1, :] = scale * data[0:-1, :]
            else:
                data = scale * data
        else:
            data = scale * data
    if numpy.size(data) > 1:
        if numpy.size(data, 0) == timeserieslength:
            yts = 365.0 * 24.0 * 3600.0
            data[-1, :] = yts * data[-1, :]

    #Step 1: write the enum to identify this record uniquely
    fid.write(struct.pack('i', enum))

    #Step 2: write the data itself.
    if m.strcmpi(format, 'Boolean'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 bool (disguised as an int)+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack(
            'i', int(data)))  #send an int, not easy to send a bool
        # }}}

    elif m.strcmpi(format, 'Integer'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 integer + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack('i', data))
        # }}}

    elif m.strcmpi(format, 'Double'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % EnumToString(enum)[0])

        #first write length of record
        fid.write(struct.pack('i', 8 + 4))  #1 double+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write double
        fid.write(struct.pack('d', data))
        # }}}

    elif m.strcmpi(format, 'String'):  # {{{
        #first write length of record
        fid.write(struct.pack('i',
                              len(data) + 4 + 4))  #string + string size + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write string
        fid.write(struct.pack('i', len(data)))
        fid.write(struct.pack('%ds' % len(data), data))
        # }}}

    elif m.strcmpi(format, 'BooleanMat'):  # {{{

        if isinstance(data, bool):
            data = numpy.array([data])
        elif isinstance(data, (list, tuple)):
            data = numpy.array(data).reshape(-1, 1)
        if numpy.ndim(data) == 1:
            if numpy.size(data):
                data = data.reshape(numpy.size(data), 1)
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * s[0] * s[1] + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        fid.write(struct.pack('i', s[0]))
        fid.write(struct.pack('i', s[1]))
        for i in range(s[0]):
            for j in range(s[1]):
                fid.write(struct.pack('d', float(
                    data[i]
                    [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'IntMat'):  # {{{

        if isinstance(data, int):
            data = numpy.array([data])
        elif isinstance(data, (list, tuple)):
            data = numpy.array(data).reshape(-1, 1)
        if numpy.ndim(data) == 1:
            if numpy.size(data):
                data = data.reshape(numpy.size(data), 1)
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * s[0] * s[1] + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        fid.write(struct.pack('i', s[0]))
        fid.write(struct.pack('i', s[1]))
        for i in range(s[0]):
            for j in range(s[1]):
                fid.write(struct.pack('d', float(
                    data[i]
                    [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'DoubleMat'):  # {{{

        if isinstance(data, (bool, int, float)):
            data = numpy.array([data])
        elif isinstance(data, (list, tuple)):
            data = numpy.array(data).reshape(-1, 1)
        if numpy.ndim(data) == 1:
            if numpy.size(data):
                data = data.reshape(numpy.size(data), 1)
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if s[0] == 1 and s[1] == 1 and math.isnan(data[0][0]):
            s = (0, 0)

        #first write length of record
        recordlength = 4 + 4 + 8 * s[0] * s[1] + 4 + 4
        #2 integers (32 bits) + the double matrix + code + matrix type
        if recordlength > 2**31:
            raise ValueError(
                'field %s cannot be marshalled because it is larger than 4^31 bytes!'
                % EnumToString(enum)[0])

        fid.write(
            struct.pack('i', recordlength)
        )  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        fid.write(struct.pack('i', s[0]))
        fid.write(struct.pack('i', s[1]))
        for i in range(s[0]):
            for j in range(s[1]):
                fid.write(struct.pack('d', float(
                    data[i]
                    [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'MatArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #number of records + code
        for matrix in data:
            if isinstance(matrix, (bool, int, float)):
                matrix = numpy.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = numpy.array(matrix).reshape(-1, 1)
            if numpy.ndim(matrix) == 1:
                if numpy.size(matrix):
                    matrix = matrix.reshape(numpy.size(matrix), 1)
                else:
                    matrix = matrix.reshape(0, 0)

            s = matrix.shape
            recordlength += 4 * 2 + s[0] * s[
                1] * 8  #row and col of matrix + matrix of doubles

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #write data, first number of records
        fid.write(struct.pack('i', len(data)))

        #write each matrix:
        for matrix in data:
            if isinstance(matrix, (bool, int, float)):
                matrix = numpy.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = numpy.array(matrix).reshape(-1, 1)
            if numpy.ndim(matrix) == 1:
                matrix = matrix.reshape(numpy.size(matrix), 1)

            s = matrix.shape
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in range(s[0]):
                for j in range(s[1]):
                    fid.write(struct.pack('d', float(matrix[i][j])))
        # }}}

    elif m.strcmpi(format, 'StringArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #for length of array + code
        for string in data:
            recordlength += 4 + len(string)  #for each string

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write length of string array
        fid.write(struct.pack('i', len(data)))

        #now write the strings
        for string in data:
            fid.write(struct.pack('i', len(string)))
            fid.write(struct.pack('%ds' % len(string), string))
        # }}}

    else:  # {{{
        raise TypeError(
            'WriteData error message: data type: %d not supported yet! (%s)' %
            (format, EnumToString(enum)[0]))
Esempio n. 9
0
def WriteData(fid, prefix, *args):
    """
	WRITEDATA - write model field in binary file
 
	   Usage:
	      WriteData(fid,varargin)
	"""

    #process options
    options = pairoptions.pairoptions(*args)

    #Get data properties
    if options.exist('object'):
        #This is an object field, construct enum and data
        obj = options.getfieldvalue('object')
        fieldname = options.getfieldvalue('fieldname')
        classname = options.getfieldvalue(
            'class',
            str(type(obj)).rsplit('.')[-1].split("'")[0])
        name = options.getfieldvalue('name', prefix + '.' + fieldname)
        if options.exist('data'):
            data = options.getfieldvalue('data')
        else:
            data = getattr(obj, fieldname)
    else:
        #No processing required
        data = options.getfieldvalue('data')
        name = options.getfieldvalue('name')

    format = options.getfieldvalue('format')
    mattype = options.getfieldvalue('mattype', 0)  #only required for matrices
    timeserieslength = options.getfieldvalue('timeserieslength', -1)

    #Process sparse matrices
    #	if issparse(data),
    #		data=full(data);
    #	end

    #Scale data if necesarry
    if options.exist('scale'):
        scale = options.getfieldvalue('scale')
        if np.size(data) > 1:
            if np.size(data, 0) == timeserieslength:
                data = np.array(data)
                data[0:-1, :] = scale * data[0:-1, :]
            else:
                data = scale * data
        else:
            data = scale * data
    if np.size(data) > 1:
        if np.size(data, 0) == timeserieslength:
            yts = options.getfieldvalue('yts')
            data[-1, :] = yts * data[-1, :]

    #Step 1: write the enum to identify this record uniquely
    fid.write(struct.pack('i', len(name)))
    fid.write(struct.pack('%ds' % len(name), name))

    #Step 2: write the data itself.
    if m.strcmpi(format, 'Boolean'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 bool (disguised as an int)+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack(
            'i', int(data)))  #send an int, not easy to send a bool
        # }}}

    elif m.strcmpi(format, 'Integer'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 4 + 4))  #1 integer + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write integer
        fid.write(struct.pack('i', data))
        # }}}

    elif m.strcmpi(format, 'Double'):  # {{{
        #		if len(data) !=1:
        #			raise ValueError('field %s cannot be marshalled as it has more than one element!' % name[0])

        #first write length of record
        fid.write(struct.pack('i', 8 + 4))  #1 double+code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write double
        fid.write(struct.pack('d', data))
        # }}}

    elif m.strcmpi(format, 'String'):  # {{{
        #first write length of record
        fid.write(struct.pack('i',
                              len(data) + 4 + 4))  #string + string size + code

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write string
        fid.write(struct.pack('i', len(data)))
        fid.write(struct.pack('%ds' % len(data), data))
        # }}}

    elif m.strcmpi(format, 'BooleanMat'):  # {{{

        if isinstance(data, bool):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 2 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * np.product(s) + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'IntMat'):  # {{{

        if isinstance(data, (int, long)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 2 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        fid.write(struct.pack(
            'i', 4 + 4 + 8 * np.product(s) + 4 +
            4))  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'DoubleMat'):  # {{{

        if isinstance(data, (bool, int, long, float)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 1 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)

        #first write length of record
        recordlength = 4 + 4 + 8 * np.product(s) + 4 + 4
        #2 integers (32 bits) + the double matrix + code + matrix type
        if recordlength > 4**31:
            raise ValueError(
                'field %s cannot be marshalled because it is larger than 4^31 bytes!'
                % enum)

        fid.write(
            struct.pack('i', recordlength)
        )  #2 integers (32 bits) + the double matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            for i in xrange(s[0]):
                fid.write(struct.pack('d', float(
                    data[i])))  #get to the "c" convention, hence the transpose
        else:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            for i in xrange(s[0]):
                for j in xrange(s[1]):
                    fid.write(struct.pack('d', float(
                        data[i]
                        [j])))  #get to the "c" convention, hence the transpose
        # }}}

    elif m.strcmpi(format, 'CompressedMat'):  # {{{

        if isinstance(data, (bool, int, long, float)):
            data = np.array([data])
        elif isinstance(data, (list, tuple)):
            data = np.array(data).reshape(-1, )
        if np.ndim(data) == 1:
            if np.size(data):
                data = data.reshape(np.size(data), )
            else:
                data = data.reshape(0, 0)

        #Get size
        s = data.shape
        if np.ndim(data) == 1:
            n2 = 1
        else:
            n2 = s[1]

        #if matrix = NaN, then do not write anything
        if np.ndim(data) == 1 and np.product(s) == 1 and np.all(
                np.isnan(data)):
            s = (0, 0)
            n2 = 0

        #first write length of record
        recordlength = 4 + 4 + 8 + 8 + 1 * (
            s[0] - 1
        ) * n2 + 8 * n2 + 4 + 4  #2 integers (32 bits) + the matrix + code + matrix type
        if recordlength > 4**31:
            raise ValueError(
                'field %s cannot be marshalled because it is larger than 4^31 bytes!'
                % enum)

        fid.write(struct.pack('i', recordlength)
                  )  #2 integers (32 bits) + the matrix + code + matrix type

        #write data code and matrix type:
        fid.write(struct.pack('i', FormatToCode(format)))
        fid.write(struct.pack('i', mattype))

        #Write offset and range
        A = data[0:s[0] - 1]
        offsetA = A.min()
        rangeA = A.max() - offsetA

        if rangeA == 0:
            A = A * 0
        else:
            A = (A - offsetA) / rangeA * 255.

        #now write matrix
        if np.ndim(data) == 1:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', 1))
            fid.write(struct.pack('d', float(offsetA)))
            fid.write(struct.pack('d', float(rangeA)))
            for i in xrange(s[0] - 1):
                fid.write(struct.pack('B', int(A[i])))

            fid.write(struct.pack('d', float(
                data[s[0] -
                     1])))  #get to the "c" convention, hence the transpose

        elif np.product(s) > 0:
            fid.write(struct.pack('i', s[0]))
            fid.write(struct.pack('i', s[1]))
            fid.write(struct.pack('d', float(offsetA)))
            fid.write(struct.pack('d', float(rangeA)))
            for i in xrange(s[0] - 1):
                for j in xrange(s[1]):
                    fid.write(struct.pack('B', int(
                        A[i]
                        [j])))  #get to the "c" convention, hence the transpose

            for j in xrange(s[1]):
                fid.write(struct.pack('d', float(data[s[0] - 1][j])))

        # }}}

    elif m.strcmpi(format, 'MatArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #number of records + code
        for matrix in data:
            if isinstance(matrix, (bool, int, long, float)):
                matrix = np.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = np.array(matrix).reshape(-1, )
            if np.ndim(matrix) == 1:
                if np.size(matrix):
                    matrix = matrix.reshape(np.size(matrix), )
                else:
                    matrix = matrix.reshape(0, 0)

            s = matrix.shape
            recordlength += 4 * 2 + np.product(
                s) * 8  #row and col of matrix + matrix of doubles

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #write data, first number of records
        fid.write(struct.pack('i', len(data)))

        #write each matrix:
        for matrix in data:
            if isinstance(matrix, (bool, int, long, float)):
                matrix = np.array([matrix])
            elif isinstance(matrix, (list, tuple)):
                matrix = np.array(matrix).reshape(-1, )
            if np.ndim(matrix) == 1:
                matrix = matrix.reshape(np.size(matrix), )

            s = matrix.shape
            if np.ndim(data) == 1:
                fid.write(struct.pack('i', s[0]))
                fid.write(struct.pack('i', 1))
                for i in xrange(s[0]):
                    fid.write(
                        struct.pack('d', float(matrix[i]))
                    )  #get to the "c" convention, hence the transpose
            else:
                fid.write(struct.pack('i', s[0]))
                fid.write(struct.pack('i', s[1]))
                for i in xrange(s[0]):
                    for j in xrange(s[1]):
                        fid.write(struct.pack('d', float(matrix[i][j])))
        # }}}

    elif m.strcmpi(format, 'StringArray'):  # {{{

        #first get length of record
        recordlength = 4 + 4  #for length of array + code
        for string in data:
            recordlength += 4 + len(string)  #for each string

        #write length of record
        fid.write(struct.pack('i', recordlength))

        #write data code:
        fid.write(struct.pack('i', FormatToCode(format)))

        #now write length of string array
        fid.write(struct.pack('i', len(data)))

        #now write the strings
        for string in data:
            fid.write(struct.pack('i', len(string)))
            fid.write(struct.pack('%ds' % len(string), string))
        # }}}

    else:  # {{{
        raise TypeError(
            'WriteData error message: data type: %d not supported yet! (%s)' %
            (format, enum))
Esempio n. 10
0
File: issmssh.py Progetto: pf4d/issm
def issmssh(host, login, port, command):
    """
	ISSMSSH - wrapper for OS independent ssh command.
 
	   usage: 
	      issmssh(host,command)
	"""

    #first get hostname
    hostname = gethostname()

    #if same as host, just run the command.
    if m.strcmpi(host, hostname):
        subprocess.call(command, shell=True)
    else:
        if m.ispc():
            #use the putty project plink.exe: it should be in the path.

            #get ISSM_DIR variable
            if 'ISSM_DIR_WIN' in os.environ:
                ISSM_DIR = os.environ['ISSM_DIR_WIN'][1:-2]
            else:
                raise OSError(
                    "issmssh error message: could not find ISSM_DIR_WIN environment variable."
                )

            username = input('Username: (quoted string) ')
            key = input('Key: (quoted string) ')

            subprocess.call(
                '%s/externalpackages/ssh/plink.exe -ssh -l "%s" -pw "%s" %s "%s"'
                % (ISSM_DIR, username, key, host, command),
                shell=True)

        else:
            #just use standard unix ssh
            if port:
                subprocess.call('ssh -l %s -p %d localhost "%s"' %
                                (login, port, command),
                                shell=True)
            else:
                subprocess.call('ssh -l %s %s "%s"' % (login, host, command),
                                shell=True)

    # The following code was added to fix:
    # "IOError: [Errno 35] Resource temporarily unavailable"
    # on the Mac when trying to display md after the solution.
    # (from http://code.google.com/p/robotframework/issues/detail?id=995)

    # Make FreeBSD use blocking I/O like other platforms
    import sys
    import fcntl
    from os import O_NONBLOCK

    fd = sys.stdin.fileno()
    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
    fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)

    fd = sys.stdout.fileno()
    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
    fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)
Esempio n. 11
0
def FlagElements(md, region):
    """
	FLAGELEMENTS - flag the elements in an region

	   The region can be given with an exp file, a list of elements or vertices

	   Usage: 
	      flag=FlagElements(md,region);

	   Example:
	      flag=FlagElements(md,'all');
	      flag=FlagElements(md,'');
	      flag=FlagElements(md,'Domain.exp');
	      flag=FlagElements(md,'~Domain.exp');
	"""

    if isinstance(region, (str, unicode)):
        if not region:
            flag = np.zeros(md.mesh.numberofelements, bool)
            invert = 0
        elif m.strcmpi(region, 'all'):
            flag = np.ones(md.mesh.numberofelements, bool)
            invert = 0
        else:
            #make sure that we actually don't want the elements outside the domain outline!
            if m.strcmpi(region[0], '~'):
                region = region[1:]
                invert = 1
            else:
                invert = 0

            #does the region domain outline exist or do we have to look for xlim,ylim in basinzoom?
            if not os.path.exists(region):
                if len(region) > 3 and not m.strcmp(region[-4:], '.exp'):
                    raise IOError("Error: File 'region' not found!" % region)
                raise RuntimeError(
                    "FlagElements.py calling basinzoom.py is not complete.")
                xlim, ylim = basinzoom('basin', region)
                flag_nodes = p.logical_and_n(md.mesh.x < xlim[1],
                                             md.mesh.x > xlim[0],
                                             md.mesh.y < ylim[1],
                                             md.mesh.y > ylim[0])
                flag = np.prod(flag_nodes[md.mesh.elements],
                               axis=1).astype(bool)
            else:
                #ok, flag elements
                flag = ContourToMesh(md.mesh.elements[:,
                                                      0:3].copy(), md.mesh.x,
                                     md.mesh.y, region, 'element', 1)
                flag = flag.astype(bool)

        if invert:
            flag = np.logical_not(flag)

    elif isinstance(region, np.ndarray) or isinstance(region, bool):
        if np.size(region, 0) == md.mesh.numberofelements:
            flag = region
        elif np.size(region, 0) == md.mesh.numberofvertices:
            flag = (np.sum(region[md.mesh.elements - 1] > 0,
                           axis=1) == np.size(md.mesh.elements, 1))
        else:
            raise TypeError(
                "Flaglist for region must be of same size as number of elements in model."
            )

    else:
        raise TypeError("Invalid region option")

    return flag
Esempio n. 12
0
def issmscpin(host, login, port, path, packages):
    """
	ISSMSCPIN get packages from host, using scp on unix, and pscp on windows
 
	   usage: issmscpin(host,packages,path)
	"""

    #first get hostname
    hostname = gethostname()

    #first be sure packages are not in the current directory, this could conflict with pscp on windows.
    #remove warnings in case the files do not exist
    for package in packages:
        try:
            os.remove(package)
        except OSError as e:
            pass

    #if hostname and host are the same, do a simple copy
    if m.strcmpi(hostname, host):

        for package in packages:
            try:
                shutil.copy(os.path.join(path, package),
                            os.getcwd())  #keep going, even if success=0
            except OSError as e:
                pass

    else:

        if m.ispc():
            #use the putty project pscp.exe: it should be in the path.

            #get ISSM_DIR variable
            if 'ISSM_DIR_WIN' in os.environ:
                ISSM_DIR = os.environ['ISSM_DIR_WIN'][1:-2]
            else:
                raise OSError(
                    "issmscpin error message: could not find ISSM_DIR_WIN environment variable."
                )

            username = raw_input('Username: (quoted string) ')
            key = raw_input('Key: (quoted string) ')

            for package in packages:
                try:
                    subprocess.check_call(
                        '%s/externalpackages/ssh/pscp.exe -l "%s" -pw "%s" %s:%s %s'
                        % (ISSM_DIR, username, key, host,
                           os.path.join(path, package), os.getcwd()),
                        shell=True)
                except CalledProcessError as e:
                    raise CalledProcessError(
                        "issmscpin error message: could not call putty pscp.")

        else:
            #just use standard unix scp
            #string to copy multiple files using scp:
            string = '\{' + ','.join([str(x) for x in packages]) + '\}'

            if port:
                subprocess.call(
                    'scp -P %d %s@localhost:%s %s/. ' %
                    (port, login, os.path.join(path, string), os.getcwd()),
                    shell=True)
            else:
                subprocess.call(
                    'scp %s@%s:%s %s/.' %
                    (login, host, os.path.join(path, string), os.getcwd()),
                    shell=True)

            #check scp worked
            for package in packages:
                if not os.path.exists(os.path.join('.', package)):
                    raise OSError(
                        "issmscpin error message: could not call scp on *nix system."
                    )
Esempio n. 13
0
def solve(md, solutionenum, **kwargs):
    """
	SOLVE - apply solution sequence for this model
 
	   Usage:
	      md=solve(md,solutionenum,varargin)
	      where varargin is a list of paired arguments of string OR enums
 
	   solution types available comprise:
	      - StressbalanceSolutionEnum
	      - MasstransportSolutionEnum
	      - ThermalSolutionEnum
	      - SteadystateSolutionEnum
	      - TransientSolutionEnum
	      - BalancethicknessSolutionEnum
	      - BedSlopeSolutionEnum
	      - SurfaceSlopeSolutionEnum
	      - HydrologySolutionEnum
	      - FlaimSolutionEnum
 
	   extra options:
	      - loadonly : does not solve. only load results
		  - checkconsistency : 'yes' or 'no' (default is 'yes'), ensures checks on consistency of model
		  - restart: 'directory name (relative to the execution directory) where the restart file is located.
 
	   Examples:
	      md=solve(md,StressbalanceSolutionEnum);
	"""

    #recover and process solve options
    if EnumToString(solutionenum)[0][-8:] != 'Solution':
        raise ValueError("solutionenum '%s' not supported!" %
                         EnumToString(solutionenum)[0])
    options = pairoptions(solutionenum=solutionenum, **kwargs)

    #recover some fields
    md.private.solution = solutionenum
    cluster = md.cluster

    #check model consistency
    if m.strcmpi(options.getfieldvalue('checkconsistency', 'yes'), 'yes'):
        print("checking model consistency")
        if solutionenum == FlaimSolutionEnum():
            md.private.isconsistent = True
            md.mesh.checkconsistency(md, solutionenum)
            md.flaim.checkconsistency(md, solutionenum)
            if not md.private.isconsistent:
                raise RuntimeError("Model not consistent, see messages above.")
        else:
            ismodelselfconsistent(md)

    #First, build a runtime name that is unique
    restart = options.getfieldvalue('restart', '')
    if restart == 1:
        pass  #do nothing
    else:
        if restart:
            md.private.runtimename = restart
        else:
            if options.getfieldvalue('runtimename', True):
                c = datetime.datetime.now()
                md.private.runtimename = "%s-%02i-%02i-%04i-%02i-%02i-%02i-%i" % (
                    md.miscellaneous.name, c.month, c.day, c.year, c.hour,
                    c.minute, c.second, os.getpid())
            else:
                md.private.runtimename = md.miscellaneous.name

    #if running qmu analysis, some preprocessing of dakota files using models
    #fields needs to be carried out.
    if md.qmu.isdakota:
        md = preqmu(md, options)

    #flaim analysis
    if solutionenum == FlaimSolutionEnum():
        md = flaim_sol(md, options)
        [md.private.solution] = EnumToString(solutionenum)
        return md

    #Do we load results only?
    if options.getfieldvalue('loadonly', False):
        md = loadresultsfromcluster(md)
        return md

    #Write all input files
    marshall(md)  # bin file
    md.toolkits.ToolkitsFile(md.miscellaneous.name +
                             '.toolkits')  # toolkits file
    cluster.BuildQueueScript(md.private.runtimename, md.miscellaneous.name,
                             md.private.solution, md.settings.io_gather,
                             md.debug.valgrind, md.debug.gprof,
                             md.qmu.isdakota)  # queue file

    #Stop here if batch mode
    if m.strcmpi(options.getfieldvalue('batch', 'no'), 'yes'):
        print('batch mode requested: not launching job interactively')
        print('launch solution sequence on remote cluster by hand')
        return md

    #Upload all required files:
    modelname = md.miscellaneous.name
    filelist = [
        modelname + '.bin ', modelname + '.toolkits ', modelname + '.queue '
    ]
    if md.qmu.isdakota:
        filelist.append(modelname + '.qmu.in')

    if not restart:
        cluster.UploadQueueJob(md.miscellaneous.name, md.private.runtimename,
                               filelist)

    #Launch job
    cluster.LaunchQueueJob(md.miscellaneous.name, md.private.runtimename,
                           filelist, restart)

    #wait on lock
    if md.settings.waitonlock > 0:
        #we wait for the done file
        islock = waitonlock(md)
        if islock == 0:  #no results to be loaded
            print(
                'The results must be loaded manually with md=loadresultsfromcluster(md).'
            )
        else:  #load results
            print('loading results from cluster')
            md = loadresultsfromcluster(md)

    #post processes qmu results if necessary
    if md.qmu.isdakota:
        if not strncmpi(options['keep'], 'y', 1):
            shutil.rmtree('qmu' + str(os.getpid()))

    return md