from collections import defaultdict from utils.landmarks import load_landmark_model, perform_cmr_landmark_detection from utils.cfg import load_config from utils.transforms import get_segmentation_transforms from utils.inference import center_crop, pad_if_needed from utils.vis import compute_bullseye_sector_mask_for_slice LANDMARK_MODELPATH = "E:/Data/T1T2_models/CMR_landmark_network_RO_352_E1_352_sax_with_T1_T1T2_LGE_PerfPD_LossMultiSoftProb_KLD_Dice_Pytorch_1.5.1_2020-08-13_20200813_181146.pts" LABEL_DIRS = [r"E:\Data\T1T2_peter_test_james", r"E:\Data\T1T2_peter_test_hui"] CONFIG = "./experiments/026.yaml" FOV = 256 WRITE_PNGS = True # Load config cfg, model_dir = load_config(CONFIG) sequences = cfg['export']['sequences'] label_classes = cfg['export']['label_classes'] gaussian_sigma = cfg['export']['gaussian_sigma'] n_channels_keep_img = len(cfg['export']['sequences']) # May have exported more channels to make PNG n_channels_keep_lab = len(cfg['export']['label_classes']) # landmark model landmark_model = load_landmark_model(LANDMARK_MODELPATH) _, transforms_test = get_segmentation_transforms(cfg) for label_dir in LABEL_DIRS: out_dir = os.path.join("./data/", os.path.basename(label_dir)) if not os.path.exists(out_dir):
if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser() parser.add_argument('--data_path', type=str, default='data/test_03_temp/multiview_data') parser.add_argument('--output_folder', type=str, default='results/test_03_temp') args = parser.parse_args() # prepare model and data device = torch.device(0) config = cfg.load_config('experiments/syn_data/multiview_data_2_alg.yaml') model = AlgebraicTriangulationNet(config, device=device).to(device) state_dict = torch.load(config.model.checkpoint) for key in list(state_dict.keys()): new_key = key.replace("module.", "") state_dict[new_key] = state_dict.pop(key) model.load_state_dict(state_dict, strict=True) print("Loading data ...") dataset = MultiView_SynData(args.data_path, invalid_joints=(9, 16), bbox=[80, 0, 560, 480], ori_form=1) dataloader = datasets_utils.syndata_loader(dataset, batch_size=4)
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( '--config', type=str, default="experiments/human36m/train/human36m_alg_17jnts.yaml") parser.add_argument('--resume', action='store_true') parser.add_argument('--logdir', type=str, default="./logs") parser.add_argument('--gamma', type=float, default=10.0) parser.add_argument('--resume_log', type=str, default="") args = parser.parse_args() config = cfg.load_config(args.config) assert config.dataset.type in ("syndata", "human36m", "mpii") device = torch.device(int(config.gpu_id)) # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(device) model = AlgebraicTriangulationNet(config, device=device) model = torch.nn.DataParallel(model, device_ids=[int(config.gpu_id)]) if config.model.init_weights: print("Initializing model weights..") if args.resume: last_epoch = int(
import os import multiprocessing from glob import glob from tqdm import tqdm from utils.cfg import load_config from utils.export import export_label CONFIG = "../experiments/026.yaml" EXCLUDED_FILES_PATH = "data/blacklist.txt" # Load config cfg, vis_dir, model_dir = load_config(CONFIG) npy_dir = cfg['export']['npydir'] output_dir = os.path.join(cfg['data']['pngdir']) sequences = cfg['export']['sequences'] label_classes = cfg['export']['label_classes'] gaussian_sigma = cfg['export']['gaussian_sigma'] frmt = cfg['export']['format'] # Excluded files with open(EXCLUDED_FILES_PATH) as f: excluded_files = f.read().splitlines() def export_label_helper(labelpath): export_label(labelpath, frmt, sequences, label_classes, output_dir, gaussian_sigma)