def get_conditions(self): """ Get the mechanical boundary conditions Returns ------- List of conditions as: [<underworld.conditions._conditions.DirichletCondition, <underworld.conditions._conditions.NeumannCondition] or [<underworld.conditions._conditions.DirichletCondition] """ Model = self.Model # Reinitialise neumnann condition self._neumann_indices = [] for _ in range(Model.mesh.dim): self._neumann_indices.append(Model.mesh.specialSets["Empty"]) for set_ in self.order_wall_conditions: (condition, nodes) = self._wall_indexSets[set_] self._apply_conditions_nodes(condition, nodes) if self.nodeSets: for (condition, nodes) in self.nodeSets: self._apply_conditions_nodes(condition, nodes) self.neumann_conditions = None _neumann_indices = [] # Remove empty Sets for val in self._neumann_indices: if val.data.size > 0: _neumann_indices.append(val) else: _neumann_indices.append(None) self._neumann_indices = tuple(_neumann_indices) # Now we only create a Neumann condition if we have a stress condition # somewhere, on any of the procs. local_procs_has_neumann = np.zeros((uw.nProcs())) global_procs_has_neumann = np.zeros((uw.nProcs())) if self._neumann_indices != tuple( [None for val in range(Model.mesh.dim)]): local_procs_has_neumann[uw.rank()] = 1 comm.Allreduce(local_procs_has_neumann, global_procs_has_neumann) comm.Barrier() if any(global_procs_has_neumann): self.neumann_conditions = uw.conditions.NeumannCondition( fn_flux=Model.tractionField, variable=Model.velocityField, indexSetsPerDof=self._neumann_indices) return self.neumann_conditions
def PostGAEvent(category, action, label=None, value=None): """ Posts an Event Tracking message to Google Analytics. Full parameter reference may be found here: https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#ev Note, this function will return quietly on any errors. Parameters ---------- category: str Textual name for event category. action: str Textual name for event action. label: str Optional label for event. value: non-negative integer Optional value for event. """ try: connection = httplib.HTTPSConnection('www.google-analytics.com') form_fields = { "v": "1", # Version. "aip": "1", # Enable IP anonymizing. "tid": GA_TRACKING_ID, # Tracking ID / Web property / Property ID. "ds": "app", # Data Source. "cid": GA_CLIENT_ID, # Anonymous Client ID. "t": "event", # Event hit type. "an": "underworld2", # Application name. "av": uw.__version__, # Application version. "ec": category, # Event Category. Required. "ea": action, # Event Action. Required. "el": label, # Event label. "ev": value, # Event value. "cm2": uw.nProcs( ), # Number of processes used. Stored into custom metric 2. "cd5": str( uw.nProcs() ), # Number of processes used. Stored into custom dim 2. Not sure if necessary. } import os # add user id if set if "UW_USER_ID" in os.environ: form_fields["uid"] = os.environ["UW_USER_ID"] form_fields["cd4"] = os.environ["UW_USER_ID"] if "UW_MACHINE" in os.environ: form_fields["cd6"] = os.environ["UW_MACHINE"] params = urllib.urlencode(form_fields) connection.connect() connection.request( 'POST', '/collect?%s' % params, '', {"Content-Type": "application/x-www-form-urlencoded"}) except: pass
def PostGAEvent( category, action, label=None, value=None ): """ Posts an Event Tracking message to Google Analytics. Full parameter reference may be found here: https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#ev Note, this function will return quietly on any errors. Parameters ---------- category: str Textual name for event category action: str Textual name for event action label: str (optional) Optional label for event value: non-negative integer (optional) Optional value for event """ try: connection = httplib.HTTPSConnection('www.google-analytics.com') form_fields = { "v" : "1", # Version. "aip": "1", # Enable IP anonymizing. "tid": GA_TRACKING_ID, # Tracking ID / Web property / Property ID. "ds" : "app", # Data Source. "cid": GA_CLIENT_ID, # Anonymous Client ID. "t" : "event", # Event hit type. "an" : "underworld2", # Application name. "av" : uw.__version__, # Application version. "ec" : category, # Event Category. Required. "ea" : action, # Event Action. Required. "el" : label, # Event label. "ev" : value, # Event value. "cm2": uw.nProcs(), # Number of processes used. Stored into custom metric 2. "cd5": str(uw.nProcs()),# Number of processes used. Stored into custom dim 2. Not sure if necessary. } import os # add user id if set if "UW_USER_ID" in os.environ: form_fields["uid"] = os.environ["UW_USER_ID"] form_fields["cd4"] = os.environ["UW_USER_ID"] params = urllib.urlencode(form_fields) connection.connect() connection.request('POST', '/collect?%s' % params, '', { "Content-Type": "application/x-www-form-urlencoded" }) except: pass
def compute_signed_distance_v2(self, coords): # make sure this is called by all procs including those # which have an empty self self.swarm.shadow_particles_fetch() if self.empty: return np.empty((0,1)), np.empty(0, dtype="int") if uw.nProcs() == 1: fdirector = self.director.data else: fdirector = np.concatenate((self.director.data, self.director.data_shadow)) # Need p, but it hasn't been stored even though d is the proximityVar d, p = self.kdtree.query( coords, distance_upper_bound=self.thickness ) fpts = np.where( np.isinf(d) == False )[0] director = np.zeros_like(coords) vector = coords[fpts] - self.kdtree.data[p[fpts]] director = fdirector[p[fpts]] signed_distance = np.zeros((coords.shape[0],1)) sd = np.einsum('ij,ij->i', vector, director) signed_distance[fpts,0] = sd[:] return signed_distance, fpts
def compute_normals_v2(self, coords): # make sure this is called by all procs including those # which have an empty self self.swarm.shadow_particles_fetch() # Nx, Ny = _points_to_normals(self) if self.empty: return np.empty((0,2)), np.empty(0, dtype="int") d, p = self.kdtree.query( coords, distance_upper_bound=self.thickness ) fpts = np.where( np.isinf(d) == False )[0] director = np.zeros_like(coords) if uw.nProcs() == 1: fdirector = self.director.data else: fdirector = np.concatenate((self.director.data, self.director.data_shadow)) director[fpts] = fdirector[p[fpts]] return director, fpts
def compute_normals(self, coords, thickness=None): # make sure this is called by all procs including those # which have an empty self self.swarm.shadow_particles_fetch() if thickness == None: thickness = self.thickness / 2.0 # Nx, Ny = _points_to_normals(self) if self.empty: return np.empty((0, 2)), np.empty(0, dtype="int") d, p = self.kdtree.query(coords, distance_upper_bound=thickness) fpts = np.where(np.isinf(d) == False)[0] director = np.zeros_like(coords) if uw.nProcs() == 1 or self.director.data_shadow.shape[0] == 0: fdirector = self.director.data #print('1') elif self.director.data.shape[0] == 0: fdirector = self.director.data_shadow #print('2') else: fdirector = np.concatenate( (self.director.data, self.director.data_shadow)) #print('3') director[fpts] = fdirector[p[fpts]] return director, fpts
def compute_signed_distance(self, coords, distance=None): # make sure this is called by all procs including those # which have are empty #can be important for parallel #self.swarm.shadow_particles_fetch() #Always need to call self.data on all procs all_particle_coords = self.data if not distance: distance = self.thickness / 2.0 #This hands back an empty array, that has the right shape to be used empty mask if self.empty: return np.empty((0, 1)), np.empty(0, dtype="int") #There are a number of cases to consider (probably more compact ways to this) #serial is trivial. #For parallel, there may be data on local processor, or in shadow zone, or any combination of either. # as long as fdirector is the same shape as self.kdtree.data, this will work if uw.nProcs() == 1 or self.director.data_shadow.shape[0] == 0: fdirector = self.director.data elif self.director.data.shape[0] == 0: fdirector = self.director.data_shadow else: #in this case both are non-empty fdirector = np.concatenate( (self.director.data, self.director.data_shadow)) d, p = self.kdtree.query(coords, distance_upper_bound=distance) fpts = np.where(np.isinf(d) == False)[0] #this is a bit sneaky, p[fpts] is larger than fdirector: (fdirector[p[fpts]]).shape == vector.shape #So this mask is size increasing director = fdirector[p[fpts]] vector = coords[fpts] - all_particle_coords[p[fpts]] #vector = coords[fpts] - self.kdtree.data[p[fpts]] signed_distance = np.empty((coords.shape[0], 1)) signed_distance[...] = np.inf #row-wise dot product sd = np.einsum('ij,ij->i', vector, director) signed_distance[fpts, 0] = sd[:] return signed_distance, fpts
def compute_signed_proximity(self, coords, sign='positive'): """ Build a mask of values for points within the influence zone. """ if self.empty: return np.empty((0,1)), np.empty(0, dtype="int") d, p = self.kdtree.query( coords, distance_upper_bound=self.thickness ) fpts = np.where( np.isinf(d) == False )[0] #get the initial proximity on both sides self.swarm.shadow_particles_fetch() dims = self.swarm.particleCoordinates.data.shape[1] if uw.nProcs() == 1: fdirector = self.director.data else: fdirector = np.concatenate((self.director.data, self.director.data_shadow)) director = np.zeros_like(coords) # Let it be zero outside the region of interest director = fdirector[p[fpts]] vector = coords[fpts] - self.kdtree.data[p[fpts]] vector = coords[fpts] - self.kdtree.data[p[fpts]] signed_distance = np.empty((coords.shape[0],1)) signed_distance[...] = -999999. sd = np.einsum('ij,ij->i', vector, director) signed_distance[fpts,0] = sd[:] if sign=='positive': fpts = fpts[sd > 0.] #the 'signed' proximity elif sign=='negative': fpts = fpts[sd < 0.] #the 'signed' proximity else: raise ValueError("sign must be one of 'positive', 'negative'") proximity = np.zeros((coords.shape[0],1)) proximity[fpts] = self.ID return proximity, fpts
def _sendData(): # disable collection of data if requested if "UW_NO_USAGE_METRICS" not in os.environ: # get platform info import platform label = platform.system() label += "__" + platform.release() # check if docker import os.path if (os.path.isfile("/.dockerinit")): label += "__docker" # send info async import threading thread = threading.Thread(target=_net.PostGAEvent, args=("runtime", "import", label, underworld.nProcs())) thread.daemon = True thread.start()
def init_stokes_system(self): conditions = self._velocityBCs self._set_density() self.buoyancyFn = self.densityFn * self.gravity if any([material.viscosity for material in self.materials]): self._set_viscosity() stokes = uw.systems.Stokes(velocityField=self.velocity, pressureField=self.pressure, conditions=conditions, fn_viscosity=self.viscosityFn, fn_bodyforce=self.buoyancyFn) self.solver = uw.systems.Solver(stokes) if uw.nProcs() > 1: self.solver.set_inner_method("mumps") else: self.solver.set_inner_method("lu") self.solver.set_penalty(1e6)
def compute_signed_distance(self, coords, distance=None): # make sure this is called by all procs including those # which have an empty self #can be important for parallel self.swarm.shadow_particles_fetch() if not distance: distance = self.thickness if self.empty: return np.empty((0,1)), np.empty(0, dtype="int") if uw.nProcs() == 1 or self.director.data_shadow.shape[0] == 0: fdirector = self.director.data print('1') elif self.director.data.shape[0] == 0: fdirector = self.director.data_shadow print('2') else: fdirector = np.concatenate((self.director.data, self.director.data_shadow)) print('3') d, p = self.kdtree.query( coords, distance_upper_bound=distance ) fpts = np.where( np.isinf(d) == False )[0] director = np.zeros_like(coords) # Let it be zero outside the region of interest director = fdirector[p[fpts]] vector = coords[fpts] - self.kdtree.data[p[fpts]] signed_distance = np.empty((coords.shape[0],1)) signed_distance[...] = np.inf sd = np.einsum('ij,ij->i', vector, director) signed_distance[fpts,0] = sd[:] return signed_distance, fpts
def compute_signed_distance(self, coords, distance=None): # make sure this is called by all procs including those # which have an empty self if not distance: distance = self.thickness #print(self.director.data.shape, self.director.data_shadow.shape) if self.empty: return np.empty((0,1)), np.empty(0, dtype="int") if uw.nProcs() == 1 or self.director.data_shadow.shape[0] == 0: fdirector = self.director.data #print('1') elif self.director.data.shape[0] == 0: fdirector = self.director.data_shadow #print('2') else: fdirector = np.concatenate((self.director.data, self.director.data_shadow)) #print('3') d, p = self.kdtree.query( coords, distance_upper_bound=distance ) fpts = np.where( np.isinf(d) == False )[0] director = np.zeros_like(coords) # Let it be zero outside the region of interest director = fdirector[p[fpts]] vector = coords[fpts] - self.kdtree.data[p[fpts]] signed_distance = np.empty((coords.shape[0],1)) signed_distance[...] = np.inf sd = np.einsum('ij,ij->i', vector, director) signed_distance[fpts,0] = sd[:] return signed_distance, fpts
# # In this example, no buoyancy forces are considered. However, to establish an appropriate pressure gradient in the material, it would normally be useful to map density from material properties and create a buoyancy force. # In[43]: stokes = uw.systems.Stokes(velocityField=velocityField, pressureField=pressureField, conditions=velocityBCs, fn_viscosity=backgroundViscosityFn, _fn_viscosity2=secondViscosityFn, _fn_director=directorVector) solver = uw.systems.Solver(stokes) # "mumps" is a good alternative for "lu" but # use "lu" direct solve and large penalty (if running in serial) if (uw.nProcs() == 1): solver.set_inner_method("lu") solver.set_penalty(1.0e7) solver.options.scr.ksp_type = "cg" solver.options.scr.ksp_rtol = 1.0e-4 else: solver.set_inner_method("mumps") solver.set_penalty(1.0e7) solver.options.scr.ksp_type = "cg" solver.options.scr.ksp_rtol = 1.0e-4 # In[44]: #solver.solve( nonLinearIterate=True, nonLinearMaxIterations=20)
def PostGAEvent( category, action, label=None, value=None ): """ Posts an Event Tracking message to Google Analytics. Current Underworld2 only dispatches a GA event when the underworld module is imported. This is effected by the calling of this function from underworld/__init__.py. Google Analytics uses the client id (GA_CLIENT_ID) to determine unique users. In Underworld, we generate a random string for this id and record it in _uwid.py (the value is available via the uw._id attribute). If the file (_uwid.py) exists, it is not recreated, so generally it will only be created the first time you build underworld. As this is a 'per build' identifier, it means that all users of a particular docker image will be identified as the same GA user. Likewise, all users of a particular HPC Underworld module will also be identified as the same GA user. Note that users are able to set the UW_USER_ID environment variable which overrides the randomly generated string, though this is probably of limited use. Regarding HPC usage, it seems that the compute nodes on most machines are closed to external network access, and hence GA analytics will not be dispatched successfully. Unfortunately this means that most high proc count simulations will not be captured in GA data. GA also reports on number of 'sessions'. The single session in GA is considered to be usage where concurrent events occur within 30 minutes of each other. So if you `import underworld` 5 times every 29 minutes, it will count as a single session. However if you `import underworld` 5 times every 31 minutes it will count as 5 sessions. Full GA parameter reference may be found here: https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters#ev Note, this function will return quietly on any errors. Parameters ---------- category: str Textual name for event category. action: str Textual name for event action. label: str Optional label for event. value: non-negative integer Optional value for event. Add the following test here to ensure we're catching out when we're running from doctests to avoid dispatching metrics. >>> print('Running in doctest? {}'.format(uw._in_doctest())) Running in doctest? True """ try: connection = httplib.HTTPSConnection('www.google-analytics.com') form_fields = { "v" : "1", # Version. "aip": "1", # Enable IP anonymizing. "tid": GA_TRACKING_ID, # Tracking ID / Web property / Property ID. "ds" : "app", # Data Source. "cid": GA_CLIENT_ID, # Anonymous Client ID. "t" : "event", # Event hit type. "an" : "underworld2", # Application name. "av" : uw.__version__, # Application version. "ec" : category, # Event Category. Required. "ea" : action, # Event Action. Required. "el" : label, # Event label. "ev" : value, # Event value. "cm2": uw.nProcs(), # Number of processes used. Stored into custom metric 2. "cd5": str(uw.nProcs()),# Number of processes used. Stored into custom dim 2. Not sure if necessary. } import os # add user id if set if "UW_USER_ID" in os.environ: form_fields["uid"] = os.environ["UW_USER_ID"] form_fields["cd4"] = os.environ["UW_USER_ID"] if "UW_MACHINE" in os.environ: form_fields["cd6"] = os.environ["UW_MACHINE"] params = urllib.urlencode(form_fields) connection.connect() connection.request('POST', '/collect?%s' % params, '', { "Content-Type": "application/x-www-form-urlencoded" }) except: pass
# also create one to write particle element counts dset_counts = f.create_dataset('counts', (mesh.elementsGlobal,), dtype='i') # get counts el_index, counts = np.unique(origOwningEl.data[:,0],return_counts=True) for element_gId, el_count in zip (el_index,counts): dset_counts[element_gId] = el_count if len(origCreatingProc.data_shadow) != 0: raise RuntimeError("The shadow data should be empty at this stage, but isn't. Hmm...") # get shadow particles!! swarm.shadow_particles_fetch() if len(origCreatingProc.data_shadow) == 0 and (uw.nProcs()>1): raise RuntimeError("The shadow data should be populated at this stage, but isn't. Hmm...") # now check that communicated particles contain required data. # first create local numpy copies of primary data in memory, # as h5py has limitations in the way you can index its arrays dset_numpy_data = np.array(dset_data) if not (dset_numpy_data[origCreatingProc.data_shadow[:,0], origParticleIndex.data_shadow[:,0]] == randomNumber.data_shadow[:,0]).all(): raise RuntimeError("Shadow particle data does not appear to be correct.") # also check that we have the correct particle counts # get counts el_index, counts = np.unique(origOwningEl.data_shadow[:,0],return_counts=True) # again create copy for indexing ease dset_numpy_counts = np.array(dset_counts)
# \end{equation} # # The yield strength described above needs to be evaluated on the fly at the particles (integration points). It therefore needs to be a function composed of mesh variables, swarm variables, constants, and mathematical operations. # In[19]: #Create a filtered random signal on mesh #from scipy.ndimage.filters import gaussian_filter np.random.seed(22) randomField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 ) randomField.data[:,0] = np.random.rand(randomField.data.shape[:][0]) #this only works in serial atm if uw.nProcs()==1: from scipy.ndimage.filters import gaussian_filter rfdata = randomField.data.copy() rfdata = rfdata.reshape(2*mesh.elementRes[1] + 1, 2*mesh.elementRes[0] + 1) filt = gaussian_filter(rfdata, sigma=md.pertSig) #plt.imshow(filt) randomField.data[:,0] = filt.reshape(randomField.data.shape[:][0]) #normalse the filterd signal randomField.data[:,0] -= randomField.data[:].min() randomField.data[:,0] /= randomField.data[:].max() # In[20]:
# also create one to write particle element counts dset_counts = f.create_dataset('counts', (mesh.elementsGlobal, ), dtype='i') # get counts el_index, counts = np.unique(origOwningEl.data[:, 0], return_counts=True) for element_gId, el_count in zip(el_index, counts): dset_counts[element_gId] = el_count if len(origCreatingProc.data_shadow) != 0: raise RuntimeError( "The shadow data should be empty at this stage, but isn't. Hmm...") # get shadow particles!! swarm.shadow_particles_fetch() if len(origCreatingProc.data_shadow) == 0 and (uw.nProcs() > 1): raise RuntimeError( "The shadow data should be populated at this stage, but isn't. Hmm...") # now check that communicated particles contain required data. # first create local numpy copies of primary data in memory, # as h5py has limitations in the way you can index its arrays dset_numpy_data = np.array(dset_data) if not (dset_numpy_data[origCreatingProc.data_shadow[:, 0], origParticleIndex.data_shadow[:, 0]] == randomNumber.data_shadow[:, 0]).all(): raise RuntimeError("Shadow particle data does not appear to be correct.") # also check that we have the correct particle counts # get counts el_index, counts = np.unique(origOwningEl.data_shadow[:, 0],
#This value seems to provide good results for a square mesh. #We end up with at leats one node in every element lying on the interface ds = 0.35 * (mesh.maxCoord[1] - mesh.minCoord[1]) / mesh.elementRes[1] #We do this in two parts as signed Distance and kdtree work a bit differenlty dN, pIgnore = marker.compute_signed_distance(mesh.data, distance=ds) nearbyNodesMask = np.where(np.abs(dN) < ds)[0] signedDists = dN[nearbyNodesMask] dIgnore, pN = marker.kdtree.query(mesh.data, distance_upper_bound=ds) nearbyNodesMask2 = np.where(dIgnore != np.inf) markerIndexes = pN[nearbyNodesMask2] if uw.nProcs() == 1 or marker.director.data_shadow.shape[0] == 0: allDirector = marker.director.data elif marker.director.data.shape[0] == 0: allDirector = marker.director.data_shadow else: allDirector = np.concatenate( (marker.director.data, marker.director.data_shadow)) #nodeDs = marker.director.data[markerIndexes]*signedDists nodeDs = allDirector[markerIndexes] * signedDists nodeAdjust = mesh.data[nearbyNodesMask] - nodeDs #Hacky test for making sure no node displacements are greater than ds. np.allclose(np.floor(np.linalg.norm(nodeDs / ds, axis=1)), 0.)