def init_system(self, options, n=1000, L=50, dim=2, min_dist=0.001): globals.options = util.dict2obj(**options) #globals.options = lambda:None; #globals.options.mode = 'gpu' print "running in gpu mode by default! --Ge" #initialize systems, within a box with edge L self.system = hmd.init.create_random(N=n, name='A', min_dist=min_dist, box=hmd.data.boxdim(dimensions=dim, L=L))
def __init__(self, image_size=224): """ Args: raster_settings: the parameters for rasterization. This should be a named tuple. All these initial settings can be overridden by passing keyword arguments to the forward function. """ super().__init__() raster_settings = { 'image_size': image_size, 'blur_radius': 0.0, 'faces_per_pixel': 1, 'bin_size': None, 'max_faces_per_bin': None, 'perspective_correct': False, } raster_settings = util.dict2obj(raster_settings) self.raster_settings = raster_settings
def face_recon_FLAME(params_path, crop_size=256, img_size=224, device='cuda'): global config config = util.dict2obj(config) flame = FLAME(config).to(device) flametex = FLAMETex(config).to(device) mesh_file = '/data/yunfan.liu/Data_Preparation_Face_Swapping_Reenactment/3DMM_models/FLAME_Fitting/data/head_template_mesh.obj' render = Renderer(img_size, obj_filename=mesh_file).to(device) params = np.load(params_path, allow_pickle=True) shape = torch.from_numpy(params.item()['shape']).float().to(device) exp = torch.from_numpy(params.item()['exp']).float().to(device) pose = torch.from_numpy(params.item()['pose']).float().to(device) tex = torch.from_numpy(params.item()['tex']).float().to(device) light = torch.from_numpy(params.item()['lit']).float().to(device) verts = torch.from_numpy(params.item()['verts']).float().to(device) # trans_vertices vertices, _, _ = flame(shape_params=shape, expression_params=exp, pose_params=pose) albedos = flametex(tex) / 255. img_render = render(vertices, verts, albedos, light)['images'] return img_render
"fil_off": -0.5, "fil_freq": 113e3, "fil_duration": 40e-3, "fil_delay": .01, "pulses": 150} labbrickParams = {} alazarConfig = {'clock_edge': 'rising', 'trigger_delay': 0, 'ch1_filter': False, 'ch1_enabled': True, 'samplesPerRecord': 5056, 'bufferCount': 10, 'trigger_edge1': 'rising', 'trigger_edge2': 'rising', 'ch2_range': 1, 'clock_source': 'reference', 'trigger_level2': 1.0, 'trigger_level1': 1.0, 'ch2_coupling': 'DC', 'trigger_coupling': 'DC', 'ch2_filter': False, 'trigger_operation': 'or', 'ch1_coupling': 'DC', 'trigger_source2': 'disabled', 'trigger_source1': 'external', 'recordsPerBuffer': 1, 'sample_rate': 5000, 'timeout': 3000, 'ch1_range': 1, 'ch2_enabled': True, 'recordsPerAcquisition': 1} aConfig = util.dict2obj(**alazarConfig) ehe = eHeExperiment(expt_path, prefix, alazarConfig, fridgeParams, filamentParams, newDataFile=True) print ehe.filename ehe.note('start experiment. ') def na_monit(): ehe.na.take_one("monit") ehe.sample = lambda: None; ehe.sample.freqNoE = 8.012e9 ehe.sample.freqWithE = 8023438335.47; ehe.clear_nwa_plotter() ehe.clear_na_plotter()
lights=lightcode) textured_images, normals = rendering_results[ 'images'], rendering_results['normals'] normal_images = self.render.render_normal(trans_verts, normals) return textured_images, normal_images def get_flame_faces(self): return self.flame_faces if __name__ == '__main__': config = { # FLAME 'flame_model_path': './data/model.pkl', # acquire it from FLAME project page 'flame_lmk_embedding_path': './data/landmark_embedding.npy', 'tex_space_path': './data/FLAME_texture.npz', # acquire it from FLAME project page 'camera_params': 3, 'shape_params': 100, 'expression_params': 50, 'pose_params': 6, 'tex_params': 50, 'use_face_contour': True, 'cropped_size': 256, 'batch_size': 1, 'image_size': 224, } config = util.dict2obj(config) gif_helper = render_utils(config)
import sys sys.path.append("/mnt/s/_Lib/python/Projects/slab") #sys.path.append("/mnt/s/_Data/140312 - EonHe M007v5 Trident/analysis") sys.path.append("/mnt/s/_Data/140312 - EonHe M007v5 Trident/experiment_M007v5_trident") import util as util options = {'ny': None, 'linear': False, 'notice_level': 2, 'min_cpu': False, 'shared_msg_file': None, 'ignore_display': False, 'msg_file': None, 'nx': None, 'nrank': None, 'nz': None, 'user': [], 'onelevel': False, 'gpu': None, 'gpu_error_checking': False, 'mode': 'gpu', 'autotuner_enable': True, 'autotuner_period': 50000} if jobs.__len__() == 0: print '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' print "job file is empty. simulation is completed!" print '-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-' sys.exit('now exit') current_job = jobs[0] config = util.dict2obj(**current_job) from ehe_simulation import eheSimulation if hasattr(config, 'newFile'): eheSim = eheSimulation(__file__, newFile=config.newFile) else: eheSim = eheSimulation(__file__) k = 0.0014 current_job['k'] = k C0 = config.resV * 0.06 * config.resV_correction current_job['C0'] = C0 eheSim.cache.find_last_stack() print eheSim.cache.current_stack try: