def load_model_fpn(_model_weights, is_inference=False): print("Using weights {}".format(_model_weights)) if _model_weights == "imagenet": model = FPN(unet_encoder, encoder_weights="imagenet", classes=4, activation=None) if is_inference: model.eval() return model else: model = FPN(unet_encoder, encoder_weights=None, classes=4, activation=None) if is_inference: model.eval() if _model_weights is not None: device = torch.device("cuda") model.to(device) state = torch.load( _model_weights) # , map_location=lambda storage, loc: storage) model.load_state_dict(state["state_dict"]) # new_state_dict = OrderedDict() # # for k, v in state['state_dict'].items(): # if k in model.state_dict(): # new_state_dict[k] = v # model = model.load_state_dict(new_state_dict) return model
state = torch.load( '../input/senetmodels/senext50_30_epochs_high_threshold.pth', map_location=lambda storage, loc: storage) model_senet.load_state_dict(state["state_dict"]) model_fpn91lb = FPN(encoder_name="se_resnext50_32x4d", classes=4, activation=None, encoder_weights=None) model_fpn91lb.to(device) model_fpn91lb.eval() #state = torch.load('../input/fpnseresnext/model_se_resnext50_32x4d_fold_0_epoch_7_dice_0.935771107673645.pth', map_location=lambda storage, loc: storage) state = torch.load( '../input/fpnse50dice944/model_se_resnext50_32x4d_fold_0_epoch_26_dice_0.94392.pth', map_location=lambda storage, loc: storage) model_fpn91lb.load_state_dict(state["state_dict"]) model_fpn91lb_pseudo = FPN(encoder_name="se_resnext50_32x4d", classes=4, activation=None, encoder_weights=None) model_fpn91lb_pseudo.to(device) model_fpn91lb_pseudo.eval() #state = torch.load('../input/fpnseresnext/model_se_resnext50_32x4d_fold_0_epoch_7_dice_0.935771107673645.pth', map_location=lambda storage, loc: storage) state = torch.load( '../input/942-finetuned-on-pseudo-to9399/pseudo_fpn_se_resnext50_32x4d_fold_0_epoch_22_dice_0.944/pseudo_fpn_se_resnext50_32x4d_fold_0_epoch_22_dice_0.9446276426315308.pth', map_location=lambda storage, loc: storage) model_fpn91lb_pseudo.load_state_dict(state["state_dict"]) ENCODER = 'se_resnext50_32x4d' ENCODER_WEIGHTS = 'imagenet'
def predict_valid(): inputdir = "/data/Thoracic_OAR/" transform = valid_aug(image_size=512) # nii_files = glob.glob(inputdir + "/*/data.nii.gz") folds = [0, 1, 2, 3, 4] for fold in folds: print(fold) outdir = f"/data/Thoracic_OAR_predict/FPN-seresnext50/" log_dir = f"/logs/ss_miccai/FPN-se_resnext50_32x4d-fold-{fold}" # model = VNet( # encoder_name='se_resnext50_32x4d', # encoder_weights=None, # classes=7, # # activation='sigmoid', # group_norm=False, # center='none', # attention_type='scse', # reslink=True, # multi_task=False # ) model = FPN(encoder_name='se_resnext50_32x4d', encoder_weights=None, classes=7) ckp = os.path.join(log_dir, "checkpoints/best.pth") checkpoint = torch.load(ckp) model.load_state_dict(checkpoint['model_state_dict']) model = nn.DataParallel(model) model = model.to(device) df = pd.read_csv(f'./csv/5folds/valid_{fold}.csv') patient_ids = df.patient_id.unique() for patient_id in patient_ids: print(patient_id) nii_file = f"{inputdir}/{patient_id}/data.nii.gz" image_slices, ct_image = extract_slice(nii_file) dataset = TestDataset(image_slices, transform) dataloader = DataLoader(dataset=dataset, num_workers=4, batch_size=8, drop_last=False) pred_mask, pred_logits = predict(model, dataloader) # import pdb # pdb.set_trace() pred_mask = np.argmax(pred_mask, axis=1).astype(np.uint8) pred_mask = SimpleITK.GetImageFromArray(pred_mask) pred_mask.SetDirection(ct_image.GetDirection()) pred_mask.SetOrigin(ct_image.GetOrigin()) pred_mask.SetSpacing(ct_image.GetSpacing()) # patient_id = nii_file.split("/")[-2] patient_dir = f"{outdir}/{patient_id}" os.makedirs(patient_dir, exist_ok=True) patient_pred = f"{patient_dir}/predict.nii.gz" SimpleITK.WriteImage(pred_mask, patient_pred)