import os import json from models.segmentor.dynamicUNet import Unet from dataset.transformer import TransformerVal from dataset.dataset import OralSlide, collate from helper.helper_unet import SlideInference, create_model_load_weights from helper.runner import Runner from configs.config_patch_merge_unet import Config distributed = False cfg = Config(mode='patch-merge', train=False) model = Unet(classes=cfg.n_class, encoder_name=cfg.encoder, **cfg.model_cfg) runner = Runner(cfg, model, create_model_load_weights, distributed=distributed) ################################### print("preparing datasets......") slideset_cfg = cfg.testset_cfg slide_list = sorted(os.listdir(slideset_cfg["img_dir"])) transformer = TransformerVal() dataset = OralSlide( slide_list, slideset_cfg["img_dir"], slideset_cfg["meta_file"], slide_mask_dir=slideset_cfg["mask_dir"], label=slideset_cfg['label'], transform=transformer, ) runner.eval_slide(dataset, SlideInference, cfg.test_output_path)
################################### print("preparing datasets......") batch_size = cfg.batch_size num_workers = cfg.num_workers trainset_cfg = cfg.trainset_cfg valset_cfg = cfg.valset_cfg transformer_train = Transformer() dataset_train = OralDataset( trainset_cfg["img_dir"], trainset_cfg["mask_dir"], trainset_cfg["meta_file"], label=trainset_cfg["label"], transform=transformer_train, ) transformer_val = TransformerVal() dataset_val = OralDataset(valset_cfg["img_dir"], valset_cfg["mask_dir"], valset_cfg["meta_file"], label=valset_cfg["label"], transform=transformer_val) if cfg.loss == "ce": criterion = nn.CrossEntropyLoss(reduction='mean') elif cfg.loss == "sce": criterion = SymmetricCrossEntropyLoss(alpha=cfg.loss_cfg['sce']['alpha'], beta=cfg.loss_cfg['sce']['beta'], num_classes=cfg.n_class) # criterion4 = NormalizedSymmetricCrossEntropyLoss(alpha=cfg.alpha, beta=cfg.beta, num_classes=cfg.n_class) elif cfg.loss == "focal": criterion = FocalLoss(gamma=cfg.loss_cfg['focal']['gamma'])