def add_polyline( self, name, points, ply_id=None, epsilon=None, ply_type=None, mat_group=None, point_vector=None, closed=False, decimals=4, ): """ Add a polyline to the gli. Parameters ---------- points : ndarray Array with new points. Either of shape (n,3) to add new points by their coordinates or a list of points IDs refering to existing points. name : str name of the new polyline points : ndarray Array with the points. Either array of point IDs or new coordinates. ply_id : int or None, optional Default: None epsilon : float or None, optional Default: None ply_type : int or None, optional Default: None mat_group : int or None, optional Default: None point_vector : str or None, optional Default: None closed : bool, optional If the polyline shall be closed, the first point will be added as last point again. Default: False decimals : int, optional Number of decimal places to round the added points to (default: 4). If decimals is negative, it specifies the number of positions to the left of the decimal point. This will not round the new points, it's just for comparison of the already present points to guarante uniqueness. """ points = np.asanyarray(points) name = str(name) safe_dict = self() if name in self.POLYLINE_NAMES: print("gli.add_polyline: Polyline-name already present!") return # add by id if (np.issubdtype(points.dtype, np.integer) and points.ndim == 1 and points.shape[0] >= 2 and np.min(points) >= 0 and np.max(points) < self.POINT_NO): if closed: points = np.hstack((points, points[0])) new_ply = { "NAME": name, "POINTS": points, "ID": ply_id, "EPSILON": epsilon, "TYPE": ply_type, "MAT_GROUP": mat_group, "POINT_VECTOR": point_vector, } # add by name elif (is_str_array(points) and points.ndim == 1 and points.shape[0] >= 2 and all([str(pnt) in self.POINT_NAMES for pnt in points])): if closed: points = np.hstack((points, points[0])) # get IDs from the given names # see: https://stackoverflow.com/a/32191125/6696397 # after the check, if points are IDs... points = np.array( [ np.where(self.POINT_NAMES == str(pnt))[0][0] for pnt in points ], dtype=int, ) new_ply = { "NAME": name, "POINTS": points, "ID": ply_id, "EPSILON": epsilon, "TYPE": ply_type, "MAT_GROUP": mat_group, "POINT_VECTOR": point_vector, } # add by coordinates elif (points.ndim == 2 and points.shape[0] >= 2 and points.shape[1] == 3): if closed: points = np.vstack((points, points[0])) unique_pnt, __, ixr = unique_rows(points, decimals=decimals) new_pos = self.add_points(unique_pnt, decimals=decimals) new_points = replace(ixr, np.arange(unique_pnt.shape[0]), new_pos) new_ply = { "NAME": name, "POINTS": new_points, "ID": ply_id, "EPSILON": epsilon, "TYPE": ply_type, "MAT_GROUP": mat_group, "POINT_VECTOR": point_vector, } else: print("gli.add_polyline: Polyline-points not valid!") return # add the new polyline self.__dict["polylines"].append(new_ply) if not check_polyline(new_ply, self.POINT_NO, verbose=False): print("gli.add_polyline: Polyline not valid!") self.__dict = safe_dict
def combine(mesh_1, mesh_2, decimals=4, fast=True): """ Combine mesh_1 and mesh_2 to one single mesh. The node list will be updated to eliminate duplicates. Element intersections are not checked. Parameters ---------- mesh_1,mesh_2 : dict dictionaries that contain one '#FEM_MSH' block each with the following information mesh_data : dict containing optional information about - AXISYMMETRY (bool) - CROSS_SECTION (bool) - PCS_TYPE (str) - GEO_TYPE (str) - GEO_NAME (str) - LAYER (int) nodes : ndarray Array with all node postions elements : dictionary contains array of nodes for elements sorted by element types material_id : dictionary contains material ids for each element sorted by element types element_id : dictionary contains element ids for each element sorted by element types decimals : int, optional Number of decimal places to round the nodes to (default: 3). This will not round the output, it is just for comparison of the node vectors. fast : bool, optional If fast is True, the vector comparison is executed by a decimal comparison. If fast is False, all pairwise distances are calculated. Default: True Returns ------- out : dict dictionary containing one '#FEM_MSH' block of the mesh file with the following information mesh_data : dict taken from mesh_1 nodes : ndarray Array with all unique node postions elements : dictionary contains array of nodes for elements sorted by element types material_id : dictionary contains material ids for each element sorted by element types element_id : dictionary contains element ids for each element sorted by element types """ # hack to prevent numerical errors from decimal rounding (random shift) shift = np.random.rand(3) shift_mesh(mesh_1, shift) shift_mesh(mesh_2, shift) # combine the node lists and make them unique nodes, __, ixr = unique_rows( np.vstack((mesh_1["nodes"], mesh_2["nodes"])), decimals=decimals, fast=fast, ) node_id_repl = range(len(ixr)) node_offset = mesh_1["nodes"].shape[0] elements = {} material_id = {} element_id = {} offset = no_of_elements(mesh_1) # combine the element lists and replace the new node IDs for elem in ELEM_NAMES: if elem not in mesh_1["elements"] and elem not in mesh_2["elements"]: continue elif elem in mesh_1["elements"] and elem not in mesh_2["elements"]: tmp = dcp(mesh_1["elements"][elem]) elements[elem] = replace(tmp, node_id_repl, ixr) material_id[elem] = mesh_1["material_id"][elem] element_id[elem] = mesh_1["element_id"][elem] elif elem not in mesh_1["elements"] and elem in mesh_2["elements"]: tmp = mesh_2["elements"][elem] + node_offset elements[elem] = replace(tmp, node_id_repl, ixr) material_id[elem] = mesh_2["material_id"][elem] element_id[elem] = mesh_2["element_id"][elem] + offset else: tmp = np.vstack(( mesh_1["elements"][elem], mesh_2["elements"][elem] + node_offset, )) elements[elem] = replace(tmp, node_id_repl, ixr) material_id[elem] = np.hstack( (mesh_1["material_id"][elem], mesh_2["material_id"][elem])) element_id[elem] = np.hstack(( mesh_1["element_id"][elem], mesh_2["element_id"][elem] + offset, )) # create the ouput dict out = { "mesh_data": mesh_1["mesh_data"], "nodes": nodes, "elements": elements, "material_id": material_id, "element_id": element_id, } # back shifting of the meshes shift_mesh(out, -shift) shift_mesh(mesh_1, -shift) shift_mesh(mesh_2, -shift) return out
def add_points(self, points, names=None, md=None, decimals=4): """ Add a list of points (ndarray with shape (n,3)). Keeps the pointlist unique. If a named point is added, that was already present, it will be renamed with the new name. Same for md. The pointlists of the polylines will be updated. Parameters ---------- points : ndarray Array with new points. names : ndarray of str or None, optional array containing the names. If None, all new points are unnamed. Default: None md : ndarray of float or None, optional array containing the material densitiy. If None, all new points will have unspecified md. Default: None decimals : int, optional Number of decimal places to round the added points to (default: 4). If decimals is negative, it specifies the number of positions to the left of the decimal point. This will not round the new points, it's just for comparison of the already present points to guarante uniqueness. Returns ------- new_pos : ndarray array with the IDs of the added points in the pointlist of the gli. """ # check if given points are unique points = np.array(points, dtype=float, ndmin=2) if names is not None: names = np.array(names, dtype=object, ndmin=1) if points.shape[0] != names.shape[0]: print("gli.add_points: Given names are not valid!") return np.zeros(0) if md is not None: md = np.array(md, dtype=float, ndmin=1) if points.shape[0] != md.shape[0]: print("gli.add_points: Given MDs are not valid!") return np.zeros(0) if points.shape[1] != 3: print("gli.add_points: Given points are not valid!") return np.zeros(0) check_points, __, __ = unique_rows(points, decimals=decimals) if check_points.shape[0] != points.shape[0]: print("gli.add_points: Given points are not unique!") return np.zeros(0) # workaround, if Points are None if self.POINT_NO == 0: self.POINTS = np.empty((0, 3), dtype=float) self.POINT_NAMES = np.empty(0, dtype=object) self.POINT_MD = np.empty(0, dtype=float) new_points = np.vstack((self.POINTS, points)) new_points, __, ixr = unique_rows(new_points, decimals=decimals) old_pos = ixr[:self.POINT_NO] new_pos = ixr[self.POINT_NO:] # set the new names new_names = np.array(new_points.shape[0] * [""], dtype=object) new_names[old_pos] = self.POINT_NAMES if names is not None: new_names[new_pos] = names # set the new MDs new_md = -np.inf * np.ones(new_points.shape[0], dtype=float) new_md[old_pos] = self.POINT_MD if md is not None: new_md[new_pos] = md # reset the point IDs within the polylines for ply_i in range(self.POLYLINE_NO): self.__dict["polylines"][ply_i]["POINTS"] = replace( self.__dict["polylines"][ply_i]["POINTS"], np.arange(self.POINT_NO), old_pos, ) # set the new points self.POINTS = new_points self.POINT_NAMES = new_names self.POINT_MD = new_md # return the new positions of the added points return new_pos