def _path_resolve(self): self.test_data = {} patient_name = None count_next_flag = False info_next_flag = False for line in self.path_list: if(line == '\n'): continue line = line.strip() if(count_next_flag): self.test_data[patient_name]['slice_count'] = int(line.split(':')[1]) count_next_flag = False info_next_flag = True continue if(info_next_flag): self.test_data[patient_name]['nii_info'] = loadInfo(line) info_next_flag = False continue if(line[0] == '['): patient_name = line.split(':')[1] patient_name = patient_name.replace(']', '') self.test_data[patient_name] = {} self.test_data[patient_name]['npzy_paths'] = [] count_next_flag = True continue self.test_data[patient_name]['npzy_paths'].append(line)
def viewSample(sample, frame=None): # Load sample info info_path = os.path.join(PATH_SRC, sample, 'info.mat') info = loadInfo(info_path) # Init scene init(sample, info) if frame is None: loadSampleSequence(sample, info) else: loadSampleFrame(sample, frame, info)
def install(package_names: set): makeCacheDir() if path.isdir(installdir): rmtree(installdir) mkdir(installdir) turn_to_packages = set() already_installed = listInstalledPackages()[0] # build dependency tree for all packages try: for package in package_names: if not path.isdir(f"{installdir}{package}"): buildDepTree(package) if package in already_installed: info = loadInfo(f"{libdir}{package}/Info.json") if info["Type"] != "Package": turn_to_packages.add(package) except DependencyError as e: printException(e) # remove already installed packages from he to install list package_names = {x[:-5] for x in listdir(installdir) if x.endswith(".json")} package_names = {x for x in package_names if x not in already_installed} if package_names: print("Following packages will be installed:") printPackages(package_names) if choice(): for package in package_names: installPackage(package) if turn_to_packages: print("Following packages are already installed as dependencies and will be turned into packages:") printPackages(turn_to_packages) if choice(): for package in turn_to_packages: info = loadInfo(f"{libdir}{package}/Info.json") info["Type"] = "Package" writeInfo(info, f"{libdir}{package}/Info.json")
def checkForUnusedPackages(ignore: set): # lists unused dependencies, files to remove and all packages that are a dependency global dependencies, used_packages, infos dependencies, used_packages, infos = set(), set(), {} installed_packages, to_remove = listInstalledPackages() installed_packages = installed_packages - ignore # collect all infos of installed packages for package in installed_packages: infos[package] = loadInfo(f"{libdir}{package}/Info.json") if infos[package]["Type"] == "Package": listDependencies(package) return installed_packages - used_packages, to_remove, dependencies
def installPackage(package_name: str): print(f"Installing {package_name}") info = loadInfo(f"{installdir}{package_name}.json") # download archive downloadFile(f"{main_repository}{package_name}/Versions/{info['Version']}.tar.gz", f"{libdir}{package_name}.tar.gz") # make directory for the package and extract it extractTar(f"{libdir}{package_name}.tar.gz", libdir + package_name, remove_tar=True) # replace optimized json file replace(f"{installdir}{package_name}.json", f"{libdir}{package_name}/Info.json") if not packageExists(package_name): printWarning(f"Package '{package_name}' does not seem to be valid.") # compile library system(f"{jacdir}Binaries/jacmake {libdir}{package_name}") rmtree(f"{libdir}{package_name}/Sources")
def buildDepTree(package_name: str, dependency=False): # check if file has already been processed (circular dependencies) if path.isfile(f"{installdir}{package_name}.json"): return # download info file try: downloadFile(f"{main_repository}{package_name}/Latest.json", f"{installdir}{package_name}.json") except HTTPError: raise DependencyError(f"Package '{package_name}' does not exist.") # load info file try: info = loadInfo(f"{installdir}{package_name}.json") except decoder.JSONDecodeError: raise DependencyError(f"Package '{package_name}' is damaged and therefore cannot be downloaded!") # verify info file if not verifyPackageJson(info, installed=False): raise DependencyError(f"Package '{package_name}' is incomplete and therefore cannot be downloaded!") # get current jaclang version and supported one supported_version = [int(x) for x in info["Supported Version"].split(".")] current_version = popen(f"{jacdir}Binaries/jaclang --version").read().split(" ")[1] current_version = [int(x) for x in current_version.split(".")[:-1]] # check if package supports current jaclang version if current_version[0] != supported_version[0] or current_version[1] < supported_version[1]: raise DependencyError(f"Package '{package_name}' is not compatible with your current version of jaclang!") del info["Supported Version"] info["Type"] = "Dependency" if dependency else "Package" for dependency_ in info["Dependencies"]: buildDepTree(dependency_, dependency=True) writeInfo(info, f"{installdir}{package_name}.json")
def dump_amass2pytroch(datasets, amass_dir, out_posepath, logger=None, rnd_seed=100, keep_rate=0.01): ''' Select random number of frames from central 80 percent of each mocap sequence Save individual data features like pose and shape per frame in pytorch pt files test set will have the extra field for original markers :param datasets: the name of the dataset :param amass_dir: directory of downloaded amass npz files. should be in this structure: path/datasets/subjects/*_poses.npz :param out_posepath: the path for final pose.pt file :param logger: an instance of human_body_prior.tools.omni_tools.log2file :param rnd_seed: :return: Number of datapoints dumped using out_poseth address pattern ''' import glob np.random.seed(rnd_seed) makepath(out_posepath, isfile=True) if logger is None: starttime = datetime.now().replace(microsecond=0) log_name = datetime.strftime(starttime, '%Y%m%d_%H%M') logger = log2file( out_posepath.replace('pose.pt', '%s.log' % (log_name))) logger('Creating pytorch dataset at %s' % out_posepath) data_pose = [] data_betas = [] data_gender = [] data_trans = [] data_idx = [] data_frame = [] data_tightness = [] data_outfit = [] for ds_name in datasets: npz_fnames = glob.glob(os.path.join(amass_dir, ds_name, '*/info.mat')) logger('randomly selecting data points from %s.' % (ds_name)) for npz_fname in tqdm(npz_fnames): try: cdata = loadInfo(npz_fname) cdata['idx'] = int(npz_fname.split("/")[-2]) except: logger('Could not read %s! skipping..' % npz_fname) continue cdata['poses'] = cdata['poses'].T cdata['trans'] = cdata['trans'].T outfit_arr = np.zeros(len(outfit_types)) for key in cdata['outfit'].keys(): outfit_arr[outfit_types.index(key)] = fabric_types.index( cdata['outfit'][key]['fabric']) + 1 if len(cdata['poses'].shape) < 2: continue N = len(cdata['poses']) cdata_ids = np.arange(N) np.random.shuffle(cdata_ids) if len(cdata_ids) < 1: continue # try: data_frame.extend(np.array(cdata_ids).astype(np.int32)) data_idx.extend(np.array([cdata['idx'] for _ in cdata_ids])) data_pose.extend(cdata['poses'][cdata_ids].astype(np.float32)) data_trans.extend(cdata['trans'][cdata_ids].astype(np.float32)) data_betas.extend( np.repeat(cdata['shape'][np.newaxis].astype(np.float32), repeats=len(cdata_ids), axis=0)) data_gender.extend(np.array([cdata['gender'] for _ in cdata_ids])) data_tightness.extend( np.repeat(cdata['tightness'][np.newaxis].astype(np.float32), repeats=len(cdata_ids), axis=0)) data_outfit.extend( np.repeat(outfit_arr[np.newaxis].astype(np.int32), repeats=len(cdata_ids), axis=0)) # except: # print(N, cdata['poses'].shape) assert len(data_pose) != 0 torch.save(torch.tensor(np.asarray(data_pose, np.float32)), out_posepath) torch.save(torch.tensor(np.asarray(data_betas, np.float32)), out_posepath.replace('pose.pt', 'betas.pt')) torch.save(torch.tensor(np.asarray(data_trans, np.float32)), out_posepath.replace('pose.pt', 'trans.pt')) torch.save(torch.tensor(np.asarray(data_gender, np.int32)), out_posepath.replace('pose.pt', 'gender.pt')) torch.save(torch.tensor(np.asarray(data_frame, np.int32)), out_posepath.replace('pose.pt', 'frame.pt')) torch.save(torch.tensor(np.asarray(data_idx, np.int32)), out_posepath.replace('pose.pt', 'idx.pt')) torch.save(torch.tensor(np.asarray(data_tightness, np.float32)), out_posepath.replace('pose.pt', 'tightness.pt')) torch.save(torch.tensor(np.asarray(data_outfit, np.int32)), out_posepath.replace('pose.pt', 'outfit.pt')) return len(data_pose)
def read_info(self, sample): info_path = os.path.join(self.SRC, sample, 'info') return loadInfo(info_path)