def __init__(self, mesh): guicommon.Object.__init__(self, mesh) self.hasWarpTargets = False self.MIN_AGE = 1.0 self.MAX_AGE = 90.0 self.MID_AGE = 25.0 self.mesh.setCameraProjection(0) self.mesh.setPickable(True) self.setShadeless(0) self.setCull(1) self.meshData = self.mesh self.maskFaces() self._resetProxies() self.targetsDetailStack = {} # All details targets applied, with their values self.symmetryModeEnabled = False self.setDefaultValues() self.bodyZones = ['l-eye','r-eye', 'jaw', 'nose', 'mouth', 'head', 'neck', 'torso', 'hip', 'pelvis', 'r-upperarm', 'l-upperarm', 'r-lowerarm', 'l-lowerarm', 'l-hand', 'r-hand', 'r-upperleg', 'l-upperleg', 'r-lowerleg', 'l-lowerleg', 'l-foot', 'r-foot', 'ear'] self.material = material.fromFile(getSysDataPath('skins/default.mhmat')) self._defaultMaterial = material.Material().copyFrom(self.material) self._modifiers = dict() self._modifier_varMapping = dict() # Maps macro variable to the modifier group that modifies it self._modifier_dependencyMapping = dict() # Maps a macro variable to all the modifiers that depend on it self._modifier_groups = dict() self._modifier_type_cache = dict() self.blockEthnicUpdates = False # When set to True, changes to race are not normalized automatically animation.AnimatedMesh.__init__(self, skel=None, mesh=self.meshData, vertexToBoneMapping=None) # Make sure that shadow vertices are copied self.refreshStaticMeshes()
def input_mesh(mesh_file): with open(mesh_file) as f: lines = f.readlines() nnode,nelem,nmaterial,dof = [int(s) for s in lines[0].split()] irec = 1 nodes = [None] * nnode for inode in range(nnode): items = lines[inode+irec].split() #mesh.in2行以降 id = int(items[0]) xyz = np.array([float(s) for s in items[1:3]]) #node座標list2成分 freedom = np.array([int(s) for s in items[3:]]) nodes[inode] = node.Node(id,xyz,freedom) irec += nnode #irecを1+nnodeで再定義 elements = [None] * nelem for ielem in range(nelem): items = lines[ielem+irec].split() #mesh.in1+nnode行以降 id = int(items[0]) style = items[1] material_id = int(items[2]) inode = np.array([int(s) for s in items[3:]]) elements[ielem] = element.Element(id,style,material_id,inode) irec += nelem materials = [None] * nmaterial for imaterial in range(nmaterial): items = lines[imaterial+irec].split() id = int(items[0]) style = items[1] param = np.array([float(s) for s in items[2:]]) materials[imaterial] = material.Material(id,style,param) return fem.Fem(dof,nodes,elements,materials)
def __init__(self, file, type, human): log.debug("Loading proxy file: %s.", file) import makehuman name = os.path.splitext(os.path.basename(file))[0] self.name = name.capitalize().replace(" ","_") self.type = type self.object = None self.human = human if not human: raise RuntimeError("Proxy constructor expects a valid human object.") self.file = file if file: self.mtime = os.path.getmtime(file) else: self.mtime = None self.uuid = None self.basemesh = makehuman.getBasemeshVersion() self.tags = [] self.ref_vIdxs = None # (Vidx1,Vidx2,Vidx3) list with references to human vertex indices, indexed by proxy vert self.weights = None # (w1,w2,w3) list, with weights per human vertex (mapped by ref_vIdxs), indexed by proxy vert self.vertWeights = {} # (proxy-vert, weight) list for each parent vert (reverse mapping of self.weights, indexed by human vertex) self.offsets = None # (x,y,z) list of vertex offsets, indexed by proxy vert self.tmatrix = TMatrix() # Offset transformation matrix. Replaces scale self.z_depth = -1 # Render order depth for the proxy object. Also used to determine which proxy object should mask others (delete faces) self.max_pole = None # Signifies the maximum number of faces per vertex on the mesh topology. Set to none for default. self.uvLayers = {} self.material = material.Material(self.name) self._obj_file = None self._vertexgroup_file = None # TODO document, is this still used? self.vertexGroups = None self._material_file = None self.deleteVerts = np.zeros(len(human.meshData.coord), bool)
def parse_f(self, args): if (len(self.tex_coords) > 1) and (len(self.normals) == 1): # does the spec allow for texture coordinates without normals? # if we allow this condition, the user will get a black screen # which is really confusing raise PywavefrontException, 'Found texture coordinates, but no normals' if self.mesh is None: self.mesh = mesh.Mesh() self.wavefront.add_mesh(self.mesh) if self.material is None: self.material = material.Material() self.mesh.add_material(self.material) # For fan triangulation, remember first and latest vertices v1 = None vlast = None points = [] for i, v in enumerate(args[0:]): v_index, t_index, n_index = \ (map(int, [j or 0 for j in v.split('/')]) + [0, 0])[:3] if v_index < 0: v_index += len(self.vertices) - 1 if t_index < 0: t_index += len(self.tex_coords) - 1 if n_index < 0: n_index += len(self.normals) - 1 vertex = self.tex_coords[t_index] + \ self.normals[n_index] + \ self.vertices[v_index] if i >= 3: # Triangulate self.material.vertices += v1 + vlast self.material.vertices += vertex if i == 0: v1 = vertex vlast = vertex
def __init__(self): depth = 0.25 min_radius = self.__class__.min_radius inner_endcap_radius = 0.34 self.maxeta = max_eta(inner_endcap_radius, self.__class__.min_z, self.__class__.max_z) nX0 = 23 #CLIC CDR, page 70, value for CLIC_ILD nLambdaI = 1 # ibid depth = self.__class__.max_z - self.__class__.min_z X0 = depth / nX0 lambdaI = depth / nLambdaI volume = VolumeCylinder('ecal', self.__class__.max_radius, self.__class__.max_z, self.__class__.min_radius, self.__class__.min_z) mat = material.Material('ECAL', X0, lambdaI) # todo: recompute self.eta_junction = volume.inner.eta_junction() # cooking up thresholds. a HG calo must be quite sensitive self.emin = {'barrel': 0.2, 'endcap': 0.2} # CLIC CDR p.123 self.eres = {'barrel': [0.167, 0.0, 0.011]} super(ECAL, self).__init__('ecal', volume, mat)
def __init__(self): depth = 0.25 min_radius = self.__class__.min_radius min_z = self.__class__.min_z inner_endcap_radius = 0.25 self.maxeta = -math.log( math.tan(math.atan(inner_endcap_radius / 1.7) / 2.)) nX0 = 23 #CLIC CDR, page 70, value for CLIC_ILD nLambdaI = 1 # ibid outer_radius = min_radius + depth outer_z = min_z + depth X0 = depth / nX0 lambdaI = depth / nLambdaI volume = VolumeCylinder('ecal', outer_radius, outer_z, min_radius, min_z) mat = material.Material('ECAL', X0, lambdaI) # todo: recompute self.eta_junction = volume.inner.eta_junction() # cooking up thresholds. a HG calo must be quite sensitive self.emin = {'barrel': 0.2, 'endcap': 0.2} # CLIC CDR p.123 self.eres = {'barrel': [0.167, 0.0, 0.011]} super(ECAL, self).__init__('ecal', volume, mat)
def __init__(self, settings=None): self.settings = settings self.human = G.app.selectedHuman self.macroModifierValues = dict() self.appliedTargets = dict(self.human.targetsDetailStack) self.skin = material.Material().copyFrom(self.human.material) self.hair = mhapi.assets.getEquippedHair() self.eyebrows = mhapi.assets.getEquippedEyebrows() self.eyelashes = mhapi.assets.getEquippedEyelashes() self.clothes = mhapi.assets.getEquippedClothes() self._fillMacroModifierValues() self.modifierInfo = ModifierInfo() if not settings is None: self._randomizeMacros() if settings.getValue("materials", "randomizeSkinMaterials"): self._randomizeSkin() self._randomizeProxies() self._randomizeDetails()
def __init__(self): volume = VolumeCylinder('hcal', 3.33, 2.348, 2.02, 2.348) mat = material.Material('HCAL', 1.74e-3, 0.17) self.eta_crack = 3. self.eres = [0.5, 0., 0.] super(HCAL, self).__init__('hcal', volume, mat)
def __init__(self, filename): self.filename = filename self.done = False self.triangles = [] scene = pyassimp.load(self.filename, pyassimp.postprocess.aiProcess_Triangulate) if scene: for index, mesh in enumerate(scene.meshes): for num in range(len(mesh.faces)): face = mesh.faces[num] if face.size == 3: vertex = [glm.vec3(), glm.vec3(), glm.vec3()] vertex[0].x = mesh.vertices[face[0], 0] vertex[0].y = mesh.vertices[face[0], 1] vertex[0].z = mesh.vertices[face[0], 2] vertex[1].x = mesh.vertices[face[1], 0] vertex[1].y = mesh.vertices[face[1], 1] vertex[1].z = mesh.vertices[face[1], 2] vertex[2].x = mesh.vertices[face[2], 0] vertex[2].y = mesh.vertices[face[2], 1] vertex[2].z = mesh.vertices[face[2], 2] self.triangles.append(triangle.Triangle(glm.vec3(vertex[0].x, vertex[0].y, vertex[0].z), glm.vec3(vertex[1].x, vertex[1].y, vertex[1].z), glm.vec3(vertex[2].x, vertex[2].y, vertex[2].z), material.Material())) self.done = True
def __init__(self): self.data_path = os.path.join(os.path.dirname(__file__), "materials") self.file_path = None self.tmp_material = material.Material() self.loaded_materials = []
def parse_material(node, two_sided=False): node_id = None if 'id' in node.attrib: node_id = node.attrib['id'] if node.attrib['type'] == 'diffuse': diffuse_reflectance = Variable(torch.from_numpy(\ np.array([0.5, 0.5, 0.5], dtype=np.float32))) diffuse_uv_scale = Variable(torch.from_numpy(\ np.array([1.0, 1.0], dtype=np.float32))) specular_reflectance = Variable(torch.from_numpy(\ np.array([0.0, 0.0, 0.0], dtype=np.float32))) specular_uv_scale = Variable(torch.from_numpy(\ np.array([1.0, 1.0], dtype=np.float32))) roughness = Variable(torch.from_numpy(\ np.array([1.0], dtype=np.float32))) for child in node: if child.attrib['name'] == 'reflectance': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': diffuse_reflectance = Variable(torch.from_numpy(\ image.imread(grandchild.attrib['value']))) elif grandchild.attrib['name'] == 'uscale': diffuse_uv_scale.data[0] = float( grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': diffuse_uv_scale.data[1] = float( grandchild.attrib['value']) elif child.tag == 'rgb': diffuse_reflectance = \ Variable(torch.from_numpy(\ parse_vector(child.attrib['value']))) elif child.attrib['name'] == 'specular': if child.tag == 'texture': for grandchild in child: if grandchild.attrib['name'] == 'filename': specular_reflectance = Variable(torch.from_numpy(\ image.imread(grandchild.attrib['value']))) elif grandchild.attrib['name'] == 'uscale': specular_uv_scale.data[0] = float( grandchild.attrib['value']) elif grandchild.attrib['name'] == 'vscale': specular_uv_scale.data[1] = float( grandchild.attrib['value']) elif child.tag == 'rgb': specular_reflectance = \ Variable(torch.from_numpy(\ parse_vector(child.attrib['value']))) elif child.attrib['name'] == 'roughness': roughness = \ Variable(torch.from_numpy(\ float(child.attrib['value']))) return (node_id, material.Material(diffuse_reflectance, diffuse_uv_scale=diffuse_uv_scale, specular_reflectance=specular_reflectance, specular_uv_scale=specular_uv_scale, roughness=roughness, two_sided=two_sided)) elif node.attrib['type'] == 'twosided': ret = parse_material(node[0], True) return (node_id, ret[1])
# # Constants, dimensions, etc. for the solver # Expect this file to change greatly over the course of its development import node import mesh import quadrature import material import calculator import plot1d import numpy as np FIXED_SOURCE = 1.0 # TODO: scale by 1, 2, 4pi? # Define the constituent materials fuel_mat = material.Material(groups=1) fuel_mat.macro_xs = {'scatter': np.array([1.0]), 'absorption': np.array([0.1])} mod_mat = material.Material(groups=1) mod_mat.macro_xs = {'absorption': np.array([0.1]), 'scatter': np.array([10.0])} # Cell dimensions PITCH = 0.6 # cm; pin pitch WIDTH = 0.4 # cm; length of one side of the square fuel pin class Pincell1D(mesh.Mesh1D): """Mesh for a one-dimensional pincell with 3 regions: mod, fuel, and mod Parameters:
(e1_len * e2_len)).view([-1, 1]) normals = normals / length(normals).view([-1, 1]) return normals resolution = [256, 256] cam = camera.Camera(position=np.array([0, 3, -6], dtype=np.float32), look_at=np.array([0, 0, 0], dtype=np.float32), up=np.array([0, 1, 0], dtype=np.float32), cam_to_world=None, fov=45.0, clip_near=0.01, clip_far=10000.0, resolution=resolution) mat_grey = material.Material(diffuse_reflectance=torch.from_numpy( np.array([0.5, 0.5, 0.5], dtype=np.float32))) mat_black = material.Material(diffuse_reflectance=torch.from_numpy( np.array([0.0, 0.0, 0.0], dtype=np.float32))) materials = [mat_grey, mat_black] # plane_vertices, plane_indices=generate_plane([32, 32]) # shape_plane=shape.Shape(plane_vertices,plane_indices,None,None,0) indices, vertices, uvs, normals = load_obj.load_obj( 'results/heightfield_gan/model.obj') indices = Variable(torch.from_numpy(indices.astype(np.int64))) vertices = Variable(torch.from_numpy(vertices)) normals = compute_vertex_normal(vertices, indices) shape_plane = shape.Shape(vertices, indices, None, normals, 0) light_vertices=Variable(torch.from_numpy(\ np.array([[-0.1,50,-0.1],[-0.1,50,0.1],[0.1,50,-0.1],[0.1,50,0.1]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,2,1],[1,2,3]],dtype=np.int32))
Iw = 1.00748 Iww = 0.796819 Aw = 0.247913 # 读取模型文件 # ================================================================= xlsx = pd.ExcelFile('Data.xlsx') nod = xlsx.parse('Node', index_col=0) ele = xlsx.parse('Element', index_col=0) cons = xlsx.parse('Constraint', index_col=0) nLoad = xlsx.parse('nLoad', index_col=0) eLoad = xlsx.parse('eLoad', index_col=0) # 定义材料特性与截面几何特性 # ================================================================= mat = material.Material(E, mu, gamma, alpha) sec = section.Section(A, I, Aw, Sw, Iw, Iww) # 计算局部坐标系下的单元刚度矩阵 # ================================================================= beam = element.ShearLag(mat, sec, nod, ele) Ke = beam.stiffness() # 计算自由度和单元定位向量 # ================================================================= n_phi = 1 # 每个节点选取的剪力滞自由度个数 DOF = np.array(range(3 + n_phi)) DOF = core.getDOF(cons, nod.shape[0]) LOC = core.getLOC(DOF, ele) # 计算整体坐标系下的单元刚度矩阵
def loadBinaryProxy(path, human, type): log.debug("Loading binary proxy %s.", path) npzfile = np.load(path) #if type is None: # proxyType = npzfile['proxyType'].tostring() #else: proxyType = type proxy = Proxy(path, proxyType, human) proxy.name = npzfile['name'].tostring() proxy.uuid = npzfile['uuid'].tostring() proxy.basemesh = npzfile['basemesh'].tostring() if 'description' in npzfile: proxy.description = npzfile['description'].tostring() if 'version' in npzfile: proxy.version = int(npzfile['version']) if 'lic_str' in npzfile and 'lic_idx' in npzfile: proxy.license.fromNumpyString(npzfile['lic_str'], npzfile['lic_idx']) proxy.tags = set( _unpackStringList(npzfile['tags_str'], npzfile['tags_idx'])) if 'z_depth' in npzfile: proxy.z_depth = int(npzfile['z_depth']) if 'max_pole' in npzfile: proxy.max_pole = int(npzfile['max_pole']) num_refverts = int(npzfile['num_refverts']) if num_refverts == 3: proxy.ref_vIdxs = npzfile['ref_vIdxs'] proxy.offsets = npzfile['offsets'] proxy.weights = npzfile['weights'] else: num_refs = npzfile['ref_vIdxs'].shape[0] proxy.ref_vIdxs = np.zeros((num_refs, 3), dtype=np.uint32) proxy.ref_vIdxs[:, 0] = npzfile['ref_vIdxs'] proxy.offsets = np.zeros((num_refs, 3), dtype=np.float32) proxy.weights = np.zeros((num_refs, 3), dtype=np.float32) proxy.weights[:, 0] = npzfile['weights'] if "deleteVerts" in npzfile: proxy.deleteVerts = npzfile['deleteVerts'] # Reconstruct reverse vertex (and weights) mapping proxy._reloadReverseMapping() proxy.tmatrix = TMatrix() proxy.uvLayers = {} for uvIdx, uvName in enumerate( _unpackStringList(npzfile['uvLayers_str'], npzfile['uvLayers_idx'])): uvLayers[uvIdx] = uvName proxy.material = material.Material(proxy.name) if 'material_file' in npzfile: proxy._material_file = npzfile['material_file'].tostring() if proxy.material_file: proxy.material.fromFile(proxy.material_file) proxy._obj_file = npzfile['obj_file'].tostring() if 'vertexBoneWeights_file' in npzfile: proxy._vertexBoneWeights_file = npzfile[ 'vertexBoneWeights_file'].tostring() if proxy.vertexBoneWeights_file: proxy.vertexBoneWeights = VertexBoneWeights.fromFile( proxy.vertexBoneWeights_file) if proxy.z_depth == -1: log.warning('Proxy file %s does not specify a Z depth. Using 50.', path) proxy.z_depth = 50 return proxy
def Render(settings): progress = Progress.begin() if not mh.hasRenderToRenderbuffer(): settings['dimensions'] = (G.windowWidth, G.windowHeight) if settings['lightmapSSS']: progress(0, 0.05, "Storing data") import material human = G.app.selectedHuman materialBackup = material.Material(human.material) progress(0.05, 0.1, "Projecting lightmaps") diffuse = imgop.Image(data=human.material.diffuseTexture) lmap = projection.mapSceneLighting(settings['scene'], border=human.material.sssRScale) progress(0.1, 0.4, "Applying medium scattering") lmapG = imgop.blurred(lmap, human.material.sssGScale, 13) progress(0.4, 0.7, "Applying high scattering") lmapR = imgop.blurred(lmap, human.material.sssRScale, 13) lmap = imgop.compose([lmapR, lmapG, lmap]) if not diffuse.isEmpty: progress(0.7, 0.8, "Combining textures") lmap = imgop.resized(lmap, diffuse.width, diffuse.height) progress(0.8, 0.9) lmap = imgop.multiply(lmap, diffuse) lmap.sourcePath = "Internal_Renderer_Lightmap_SSS_Texture" progress(0.9, 0.95, "Setting up renderer") human.material.diffuseTexture = lmap human.mesh.configureShading(diffuse=True) human.mesh.shadeless = True progress(0.95, 0.98, None) else: progress(0, 0.99, None) if not mh.hasRenderToRenderbuffer(): # Limited fallback mode, read from screen buffer log.message("Fallback render: grab screen") img = mh.grabScreen(0, 0, G.windowWidth, G.windowHeight) alphaImg = None else: # Render to framebuffer object renderprog = Progress() renderprog(0, 0.99 - 0.59 * settings['AA'], "Rendering") width, height = settings['dimensions'] log.message("Rendering at %sx%s", width, height) if settings['AA']: width = width * 2 height = height * 2 img = mh.renderToBuffer(width, height) alphaImg = mh.renderAlphaMask(width, height) img = imgop.addAlpha(img, imgop.getChannel(alphaImg, 0)) if settings['AA']: renderprog(0.4, 0.99, "AntiAliasing") # Resize to 50% using Qt image class qtImg = img.toQImage() del img # Bilinear filtered resize for anti-aliasing scaledImg = qtImg.scaled( width / 2, height / 2, transformMode=gui.QtCore.Qt.SmoothTransformation) del qtImg img = scaledImg #img = image.Image(scaledImg) # Convert back to MH image #del scaledImg renderprog.finish() if settings['lightmapSSS']: progress(0.98, 0.99, "Restoring data") human.material = materialBackup progress(1, None, 'Rendering complete') gui3d.app.getCategory('Rendering').getTaskByName('Viewer').setImage(img) mh.changeTask('Rendering', 'Viewer') gui3d.app.statusPersist('Rendering complete.')
data['influencesPerVertex']=influencesPerVertex assert len(data['ref_vIdxs'])==len(data['weights']) assert len(data['ref_vIdxs'])>0 assert len(data['offsets'])>0 assert len(data['skinIndices'])==len(data['skinIndices']) assert len(data['skinIndices'])>0 assert len(data['skinIndices'])%data['influencesPerVertex']==0 # load alternative materials materials=[] if prxy.material_file: for material_file in Path(prxy.material_file).dirname().glob('*.mhmat'): material_name = str(Path(material_file).basename().splitext()[0]) mat = material.Material(material_name) mat.fromFile(material_file) mtl = parse_mtl(material_to_mtl(mat, texdir=os.path.dirname(outfile))) mtl = mtl[mtl.keys()[0]] mtl['name']=material_name materials.append(mtl) data["materials"] = materials json.dump(data, open(outfile, 'w'), cls=NP_MH_Encoder, separators=(',', ':')) # copy thumbnail thumbnail = Path(infile.replace('.obj','.thumb')) if thumbnail.isfile(): copyAndCompress(thumbnail,outfile.replace('.json','.thumb.png')) for thumbnail in [p.replace('.mhmat','.thumb') for p in Path(prxy.material_file).dirname().glob('*.mhmat')]:
resolution = [256, 256] position = Variable(torch.from_numpy(np.array([0, 0, -5], dtype=np.float32))) look_at = Variable(torch.from_numpy(np.array([0, 0, 0], dtype=np.float32))) up = Variable(torch.from_numpy(np.array([0, 1, 0], dtype=np.float32))) fov = Variable(torch.from_numpy(np.array([45.0], dtype=np.float32))) clip_near = Variable(torch.from_numpy(np.array([0.01], dtype=np.float32))) clip_far = Variable(torch.from_numpy(np.array([10000.0], dtype=np.float32))) cam = camera.Camera(position=position, look_at=look_at, up=up, cam_to_world=None, fov=fov, clip_near=clip_near, clip_far=clip_far, resolution=resolution) mat_grey=material.Material(\ diffuse_reflectance=torch.from_numpy(np.array([0.5,0.5,0.5],dtype=np.float32))) mat_checker_board=material.Material(\ diffuse_reflectance=torch.from_numpy(image.imread('test/results/test_texture/checker_board.exr'))) materials = [mat_grey, mat_checker_board] vertices=Variable(torch.from_numpy(\ np.array([[-1.0,-1.0,0.0], [-1.0,1.0,0.0], [1.0,-1.0,0.0], [1.0,1.0,0.0]],dtype=np.float32))) indices = torch.from_numpy(np.array([[0, 1, 2], [1, 3, 2]], dtype=np.int32)) uvs = torch.from_numpy( np.array([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=np.float32)) shape_plane = shape.Shape(vertices, indices, uvs, None, 1) light_vertices=Variable(torch.from_numpy(\ np.array([[-1,-1,-7],[1,-1,-7],[-1,1,-7],[1,1,-7]],dtype=np.float32))) light_indices=torch.from_numpy(\ np.array([[0,1,2],[1,3,2]],dtype=np.int32)) shape_light = shape.Shape(light_vertices, light_indices, None, None, 0)
def loadBinaryProxy(path, human, type): log.debug("Loading binary proxy %s.", path) npzfile = np.load(path) #if type is None: # proxyType = npzfile['proxyType'].tostring() #else: proxyType = type proxy = Proxy(path, proxyType, human) proxy.name = npzfile['name'].tostring() proxy.uuid = npzfile['uuid'].tostring() proxy.basemesh = npzfile['basemesh'].tostring() proxy.tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx'])) if 'z_depth' in npzfile: proxy.z_depth = int(npzfile['z_depth']) if 'max_pole' in npzfile: proxy.max_pole = int(npzfile['max_pole']) proxy.num_refverts = int(npzfile['num_refverts']) if proxy.num_refverts == 3: proxy.ref_vIdxs = npzfile['ref_vIdxs'] proxy.offsets = npzfile['offsets'] proxy.weights = npzfile['weights'] else: num_refs = npzfile['ref_vIdxs'].shape[0] proxy.ref_vIdxs = np.zeros((num_refs,3), dtype=np.uint32) proxy.ref_vIdxs[:,0] = npzfile['ref_vIdxs'] proxy.offsets = np.zeros((num_refs,3), dtype=np.float32) proxy.weights = np.zeros((num_refs,3), dtype=np.float32) proxy.weights[:,0] = npzfile['weights'] if "deleteVerts" in npzfile: proxy.deleteVerts = npzfile['deleteVerts'] # Reconstruct reverse vertex (and weights) mapping proxy._reloadReverseMapping() proxy.tmatrix = TMatrix() proxy.uvLayers = {} for uvIdx, uvName in enumerate(_unpackStringList(npzfile['uvLayers_str'], npzfile['uvLayers_idx'])): uvLayers[uvIdx] = uvName proxy.material = material.Material(proxy.name) if 'material_file' in npzfile: proxy._material_file = npzfile['material_file'].tostring() if proxy.material_file: proxy.material.fromFile(proxy.material_file) proxy._obj_file = npzfile['obj_file'].tostring() if 'vertexgroup_file' in npzfile: proxy._vertexgroup_file = npzfile['vertexgroup_file'].tostring() if proxy.vertexgroup_file: proxy.vertexGroups = io_json.loadJson(proxy.vertexgroup_file) # Just set the defaults for these, no idea if they are still relevant proxy.wire = False proxy.cage = False proxy.modifiers = [] proxy.shapekeys = [] if proxy.z_depth == -1: log.warning('Proxy file %s does not specify a Z depth. Using 50.', path) proxy.z_depth = 50 return proxy
def __init__(self): volume = VolumeCylinder('hcal', 2.9, 3.6, 1.9, 2.6) mat = material.Material('HCAL', None, 0.17) super(HCAL, self).__init__('ecal', volume, mat)
def Render(settings): progress = Progress.begin() if not mh.hasRenderToRenderbuffer(): settings['dimensions'] = (G.windowWidth, G.windowHeight) if settings['lightmapSSS']: progress(0, 0.05, "Storing data") import material human = G.app.selectedHuman materialBackup = material.Material(human.material) progress(0.05, 0.1, "Projecting lightmaps") diffuse = imgop.Image(data=human.material.diffuseTexture) lmap = projection.mapSceneLighting(settings['scene'], border=human.material.sssRScale) progress(0.1, 0.4, "Applying medium scattering") lmapG = imgop.blurred(lmap, human.material.sssGScale, 13) progress(0.4, 0.7, "Applying high scattering") lmapR = imgop.blurred(lmap, human.material.sssRScale, 13) lmap = imgop.compose([lmapR, lmapG, lmap]) if not diffuse.isEmpty: progress(0.7, 0.8, "Combining textures") lmap = imgop.resized(lmap, diffuse.width, diffuse.height, filter=image.FILTER_BILINEAR) progress(0.8, 0.9) lmap = imgop.multiply(lmap, diffuse) lmap.sourcePath = "Internal_Renderer_Lightmap_SSS_Texture" progress(0.9, 0.95, "Setting up renderer") human.material.diffuseTexture = lmap human.configureShading(diffuse=True) human.shadeless = True progress(0.95, 0.98, None) else: progress(0, 0.99, None) if not mh.hasRenderToRenderbuffer(): # Limited fallback mode, read from screen buffer log.message("Fallback render: grab screen") img = mh.grabScreen(0, 0, G.windowWidth, G.windowHeight) alphaImg = None else: # Render to framebuffer object renderprog = Progress() renderprog(0, 0.99 - 0.59 * settings['AA'], "Rendering") width, height = settings['dimensions'] log.message("Rendering at %sx%s", width, height) if settings['AA']: width = width * 2 height = height * 2 img = mh.renderToBuffer(width, height) alphaImg = mh.renderAlphaMask(width, height) img = imgop.addAlpha(img, imgop.getChannel(alphaImg, 0)) if settings['AA']: renderprog(0.4, 0.99, "AntiAliasing") # Resize to 50% using bi-linear filtering img = img.resized(width / 2, height / 2, filter=image.FILTER_BILINEAR) # TODO still haven't figured out where components get swapped, but this hack appears to be necessary img.data[:, :, :] = img.data[:, :, (2, 1, 0, 3)] renderprog.finish() if settings['lightmapSSS']: progress(0.98, 0.99, "Restoring data") human.material = materialBackup progress(1, None, 'Rendering complete') gui3d.app.getCategory('Rendering').getTaskByName('Viewer').setImage(img) mh.changeTask('Rendering', 'Viewer') gui3d.app.statusPersist('Rendering complete')
def loadBinaryProxy(path, human, type): log.debug("Loading binary proxy %s.", path) npzfile = np.load(path) #if type is None: # proxyType = npzfile['proxyType'].tostring() #else: proxyType = type proxy = Proxy(path, proxyType, human) proxy.name = npzfile['name'].tostring() proxy.uuid = npzfile['uuid'].tostring() proxy.basemesh = npzfile['basemesh'].tostring() if 'description' in npzfile: proxy.description = npzfile['description'].tostring() if 'version' in npzfile: proxy.version = int(npzfile['version']) if 'lic_str' in npzfile and 'lic_idx' in npzfile: proxy.license.fromNumpyString(npzfile['lic_str'], npzfile['lic_idx']) proxy.tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx'])) if 'z_depth' in npzfile: proxy.z_depth = int(npzfile['z_depth']) if 'max_pole' in npzfile: proxy.max_pole = int(npzfile['max_pole']) if 'special_pose_str' in npzfile: special_poses = _unpackStringList(npzfile['special_pose_str'], npzfile['special_pose_idx']) for idx in range(0, len(special_poses), 2): proxy.special_pose[special_poses[idx]] = special_poses[idx+1] num_refverts = int(npzfile['num_refverts']) if num_refverts > 1: # 3 or 4 proxy.ref_vIdxs = npzfile['ref_vIdxs'] proxy.weights = npzfile['weights'] if 'offsets' in npzfile: proxy.offsets = npzfile['offsets'] else: if proxy.new_fitting: proxy.offsets = None else: proxy.offsets = np.zeros((num_refs,3), dtype=np.float32) else: # 1 refvert num_refs = npzfile['ref_vIdxs'].shape[0] proxy.ref_vIdxs = np.zeros((num_refs,3), dtype=np.uint32) proxy.ref_vIdxs[:,0] = npzfile['ref_vIdxs'] proxy.offsets = np.zeros((num_refs,3), dtype=np.float32) proxy.weights = np.zeros((num_refs,3), dtype=np.float32) proxy.weights[:,0] = npzfile['weights'] if "deleteVerts" in npzfile: proxy.deleteVerts = npzfile['deleteVerts'] # Reconstruct reverse vertex (and weights) mapping proxy._reloadReverseMapping() if proxy.new_fitting: # Create alias proxy.deltas = proxy.weights # TODO we could skip this for new-style proxies proxy.tmatrix.fromNumpyStruct(npzfile) proxy.uvLayers = {} for uvIdx, uvName in enumerate(_unpackStringList(npzfile['uvLayers_str'], npzfile['uvLayers_idx'])): proxy.uvLayers[uvIdx] = uvName proxy.material = material.Material(proxy.name) if 'material_file' in npzfile: proxy._material_file = npzfile['material_file'].tostring() if proxy.material_file: proxy.material.fromFile(proxy.material_file) proxy._obj_file = npzfile['obj_file'].tostring() if 'vertexBoneWeights_file' in npzfile: proxy._vertexBoneWeights_file = npzfile['vertexBoneWeights_file'].tostring() if proxy.vertexBoneWeights_file: from animation import VertexBoneWeights proxy.vertexBoneWeights = VertexBoneWeights.fromFile(proxy.vertexBoneWeights_file) if proxy.z_depth == -1: log.warning('Proxy file %s does not specify a Z depth. Using 50.', path) proxy.z_depth = 50 return proxy
import light import shape import transform import numpy as np dtype = torch.FloatTensor resolution = [256, 256] cam = camera.Camera(position=np.array([0, 2, -5], dtype=np.float32), look_at=np.array([0, 0, 0], dtype=np.float32), up=np.array([0, 1, 0], dtype=np.float32), cam_to_world=None, fov=45.0, clip_near=0.01, clip_far=10000.0, resolution=resolution) mat_grey = material.Material( albedo=torch.from_numpy(np.array([0.5, 0.5, 0.5], dtype=np.float32))) mat_red = material.Material( albedo=torch.from_numpy(np.array([0.9, 0.15, 0.15], dtype=np.float32))) mat_green = material.Material( albedo=torch.from_numpy(np.array([0.15, 0.9, 0.15], dtype=np.float32))) mat_black = material.Material( albedo=torch.from_numpy(np.array([0.0, 0.0, 0.0], dtype=np.float32))) materials = [mat_grey, mat_red, mat_green, mat_black] floor_vertices=Variable(torch.from_numpy(\ np.array([[-2.0,0.0,-2.0],[-2.0,0.0,2.0],[2.0,0.0,-2.0],[2.0,0.0,2.0]],dtype=np.float32))) floor_indices = torch.from_numpy( np.array([[0, 1, 2], [1, 3, 2]], dtype=np.int32)) shape_floor = shape.Shape(floor_vertices, floor_indices, None, None, 0) red_reflector_vertices=Variable(torch.from_numpy(\ np.array([[-4.0,4.0,2.0],[-4.0,8.0,2.0],[0.0,4.0,2.0],[0.0,8.0,2.0]],dtype=np.float32))) red_reflector_indices = torch.from_numpy(
RHO_MOD = 0.7 # g/cm^3 # One-group cross sections SIGMA_S_U238 = 11.29 # b SIGMA_S_O16 = 3.888 # b SIGMA_S_H1 = 20.47 # b SIGMA_A_H1 = 1.0 # b # --> fuel is pure scattering; only absorption is hydrogen # Define the nuclides u238 = material.Nuclide(238, {"nu-scatter": SIGMA_S_U238}) o16 = material.Nuclide(16, {"nu-scatter": SIGMA_S_O16}) h1 = material.Nuclide(1, {"nu-scatter": SIGMA_S_H1, "absorption": SIGMA_A_H1}) # Define the constituent materials fuel_mat = material.Material().fromNuclides([u238], RHO_FUEL, name="Fuel") fuel_mat.macro_xs["transport"] = sum(fuel_mat.macro_xs.values()) mod_mat = material.Material().fromNuclides([o16, h1, h1], RHO_MOD, name="Moderator") mod_mat.macro_xs["transport"] = sum(mod_mat.macro_xs.values()) # Cell dimensions PITCH = 1.25 # cm; pin pitch WIDTH = 0.80 # cm; length of one side of the square fuel pin BOUNDARIES = ["periodic"]*4 class Pincell2D(mesh.Mesh2D): """Mesh for a two-dimensional pincell with 3 regions: mod, fuel, and mod Parameters:
class MaterialEnum(Enum): iron = material.Material(1,1,1) steel = material.Material(1.5,1.5,1.5)
import geometry as gm import simulator as simu import material as mt import time def print_hi(name): # Use a breakpoint in the code line below to debug your script. print(f'Hi, {name}') # Press Ctrl+F8 to toggle the breakpoint. if __name__ == '__main__': cell0 = gm.Cell(2, -80, -50, 1000, -1000, 1000, -1000) cell1 = gm.Cell(1, -50, 50, 1000, -1000, 1000, -1000) cell2 = gm.Cell(2, 50, 80, 1000, -1000, 1000, -1000) mat1 = mt.Material(1, 0.7, 0.0092, 0.010) mat2 = mt.Material(2, 0.9, 0.002, 0) cell_list = [cell0, cell1, cell2] geo = gm.Geometry(cell_list) mat_list = [mat1, mat2] mode1 = simu.Mode(100, 50, 30, False) runner = simu.Simulate(geo, mat_list, mode1) t1 = time.time() runner.run() t2 = time.time() - t1 print_hi(f'succeed, run in {t2} seconds')
def __init__(self): volume = VolumeCylinder('ecal', 1.55, 2.25, 1.30, 2.) mat = material.Material('ECAL', 8.9e-3, 0.) # lambda_I = 0 self.eta_crack = 1.5 self.emin = 2. super(ECAL, self).__init__('ecal', volume, mat)
def __init__(self, data =None): #Inicializamos todo: self.im = Image.open("stars64.bmp") self.d = np.array(self.im).reshape(-1,1) self.im.close() #Variables de la clase self.width=800 self.height=800 self.aspect = self.width/self.height self.angulo = 0 self.window=0 #**Cargamos Materiales** self.materiales = data["materiales"] self.NUM_MATERIALES = len(self.materiales) for i in range (self.NUM_MATERIALES): self.materialesCargados.append(material.Material(self.materiales[i]["luzambiente"], self.materiales[i]["luzspecular"], self.materiales[i]["luzdifusa"], self.materiales[i]["brillo"])) print("&Material",i,"cargado desde JSON") #**Cargar Astros** numLunas=0 self.planetas = data["planetas"] self.NUM_ASTROS = len(self.planetas) #Aqui cargamos los datos en modelos y los guardamos en una lista llamada astros for i in range(self.NUM_ASTROS): if(self.planetas[i]["l"]=="n"): numLunas=0 self.astros.append(model.Modelo(self.planetas[i], self.materialesCargados[i])) print("&Planeta",i,"cargado desde JSON") elif(self.planetas[i]["l"]=="l"): numLunas+=1 self.astros[i-numLunas].addLuna(model.Modelo(self.planetas[i], self.materialesCargados[i])) #self.lunas.append(model.Modelo(self.planetas[i], self.materialesCargados[i])) print("&Luna",numLunas,"del planeta",self.astros[i-numLunas].nombre,"cargado desde JSON") #**Cargar Camaras** self.camaras = data["camaras"] self.numCamaras = len(self.camaras) #Cargamos las camaras de data en objetos Camara_Frustum for i in range (self.numCamaras): self.camarasCargadas.append(cf.Camera_Frustum(self.camaras[i]["ejex"], self.camaras[i]["ejey"], self.camaras[i]["ejez"], self.camaras[i]["centrox"], self.camaras[i]["centroy"], self.camaras[i]["centroz"], self.camaras[i]["upx"], self.camaras[i]["upy"], self.camaras[i]["upz"])) print("&Camara",i,"cargada desde JSON") #**Cargamos Focos** self.focos = data["focos"] self.NUM_FOCOS=len(self.focos) for i in range (self.NUM_FOCOS): self.focosCargados.append(foco.Foco(self.focos[i]["brillo"], self.focos[i]["luzdifusa"], self.focos[i]["luzambiente"], self.focos[i]["luzspecular"], self.focos[i]["posicion"])) print("&Foco",i,"cargado desde JSON") #Tamaño de los ejes y del alejamiento de Z. self.tamanio=0 self.z0=0 #Factor para el tamaño del modelo. self.escalaGeneral = 0.013 self.multiplicadorVelocidad = 15 #Rotacion de los modelos. self.alpha=0 self.beta=0 #Variables para la gestion del ratón. self.xold=0 self.yold=0 self.zoom=1.0 #Vistas del Sistema Planetario. #modelo.tipoVista iForma self.iDibujo=3 self.iFondo=0 self.iForma=6 self.iCamara=10
def __init__(self, obj, after): super(MaterialAction, self).__init__("Change material of %s" % obj.mesh.name) self.obj = obj self.before = material.Material().copyFrom(obj.material) self.after = after
mg["mod"]["D"] = mod_diffusion mg["mod"]["transport"] = 1/(3*mod_diffusion) mg["mod"]["absorption"] = np.array([0.0010, 0.0300]) s11, s22 = mg["mod"]["transport"] - mg["mod"]["absorption"] s12 = 0.0500 mod_scatter_matrix = np.array([[s11, 0], [s12, s22]]) mg["mod"]["nu-scatter"] = mod_scatter_matrix scatter = mod_scatter_matrix.sum(axis=0) mg["mod"]["total"] = mg["mod"]["transport"] else: raise NotImplementedError("{} groups".format(G)) mod_mat = material.Material(name="Moderator", groups=G) mod_mat.macro_xs = mg["mod"] fuel_mat = material.Material(name="Fuel, 3.1%", groups=G) fuel_mat.macro_xs = mg["fuel"] if G == 1: # debug fuel_mat.macro_xs["nu-fission"] = 2.1*fuel_mat.macro_xs["absorption"] # debug; force kinf to 2.1 fuel_mat.macro_xs["total"] = fuel_mat.macro_xs["transport"] # analytically calculate kinf from the 1-group xs kinf = float(mg["fuel"]["nu-fission"]/mg["fuel"]["absorption"]) elif G == 2: # fudge the numbers for testing ''' mg["fuel"]["nu-fission"][:] = 2.2*mg["fuel"]["absorption"][:] # debug: force kinf to 2.2