Beispiel #1
0
def Test():
    print('********************load data********************')
    dataloader_test = get_test_dataloader(batch_size=config['BATCH_SIZE'],
                                          shuffle=False,
                                          num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model = CXRNet(num_classes=N_CLASSES, is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'best_model_CXRNet.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(checkpoint)  #strict=False
        print("=> loaded Image model checkpoint: " + CKPT_PATH)
        torch.backends.cudnn.benchmark = True  # improve train speed slightly

        model_unet = UNet(n_channels=3, n_classes=1).cuda()  #initialize model
        CKPT_PATH = config['CKPT_PATH'] + 'best_unet.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet.load_state_dict(checkpoint)  #strict=False
            print("=> loaded well-trained unet model checkpoint: " + CKPT_PATH)
        model_unet.eval()
    else:
        print('No required model')
        return  #over
    print('******************** load model succeed!********************')

    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    pred = torch.FloatTensor().cuda()
    with torch.autograd.no_grad():
        for batch_idx, (image, label) in enumerate(dataloader_test):
            gt = torch.cat((gt, label.cuda()), 0)
            var_image = torch.autograd.Variable(image).cuda()
            var_label = torch.autograd.Variable(label).cuda()
            var_mask = model_unet(var_image)
            var_output = model(var_image, var_mask)  #forward
            pred = torch.cat((pred, var_output.data), 0)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()

    #for evaluation
    AUROC_all = compute_AUCs(gt, pred)
    AUROC_avg = np.array(AUROC_all).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_all[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))
Beispiel #2
0
def Train():
    print('********************load data********************')
    dataloader_train, dataloader_val = get_train_val_dataloader(
        batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model = CXRNet(num_classes=N_CLASSES,
                       is_pre_trained=True).cuda()  #initialize model
        optimizer_model = optim.Adam(model.parameters(),
                                     lr=1e-3,
                                     betas=(0.9, 0.999),
                                     eps=1e-08,
                                     weight_decay=1e-5)
        lr_scheduler_model = lr_scheduler.StepLR(optimizer_model,
                                                 step_size=10,
                                                 gamma=1)
        torch.backends.cudnn.benchmark = True  # improve train speed slightly
        bce_criterion = nn.BCELoss()  #define binary cross-entropy loss
        #mse_criterion = nn.MSELoss() #define regression loss

        model_unet = UNet(n_channels=3, n_classes=1).cuda()  #initialize model
        CKPT_PATH = config['CKPT_PATH'] + 'best_unet.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet.load_state_dict(checkpoint)  #strict=False
            print("=> loaded well-trained unet model checkpoint: " + CKPT_PATH)
        model_unet.eval()
    else:
        print('No required model')
        return  #over
    print('********************load model succeed!********************')

    print('********************begin training!********************')
    AUROC_best = 0.50
    for epoch in range(config['MAX_EPOCHS']):
        since = time.time()
        print('Epoch {}/{}'.format(epoch + 1, config['MAX_EPOCHS']))
        print('-' * 10)
        train_loss = []
        model.train()  #set model to training mode
        with torch.autograd.enable_grad():
            for batch_idx, (image, label) in enumerate(dataloader_train):
                optimizer_model.zero_grad()
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                var_mask = model_unet(var_image)
                var_output = model(var_image, var_mask)  #forward
                loss_tensor = bce_criterion(var_output, var_label)
                loss_tensor.backward()
                optimizer_model.step()
                train_loss.append(loss_tensor.item())
                sys.stdout.write(
                    '\r Epoch: {} / Step: {} : train BCE loss = {}'.format(
                        epoch + 1, batch_idx + 1,
                        float('%0.6f' % loss_tensor.item())))
                sys.stdout.flush()
        lr_scheduler_model.step()  #about lr and gamma
        print("\r Eopch: %5d train loss = %.6f" %
              (epoch + 1, np.mean(train_loss)))

        model.eval()  #turn to test mode
        val_loss = []
        gt = torch.FloatTensor().cuda()
        pred = torch.FloatTensor().cuda()
        with torch.autograd.no_grad():
            for batch_idx, (image, label) in enumerate(dataloader_val):
                gt = torch.cat((gt, label.cuda()), 0)
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                var_mask = model_unet(var_image)
                var_output = model(var_image, var_mask)  #forward
                loss_tensor = bce_criterion(var_output, var_label)
                pred = torch.cat((pred, var_output.data), 0)
                val_loss.append(loss_tensor.item())
                sys.stdout.write(
                    '\r Epoch: {} / Step: {} : validation loss = {}'.format(
                        epoch + 1, batch_idx + 1,
                        float('%0.6f' % loss_tensor.item())))
                sys.stdout.flush()
        #evaluation
        AUROCs_avg = np.array(compute_AUCs(gt, pred)).mean()
        logger.info(
            "\r Eopch: %5d validation loss = %.6f, Validataion AUROC image=%.4f"
            % (epoch + 1, np.mean(val_loss), AUROCs_avg))

        #save checkpoint
        if AUROC_best < AUROCs_avg:
            AUROC_best = AUROCs_avg
            torch.save(
                model.state_dict(), config['CKPT_PATH'] +
                'best_model_CXRNet.pkl')  #Saving torch.nn.DataParallel Models
            print(' Epoch: {} model has been already save!'.format(epoch + 1))

        time_elapsed = time.time() - since
        print('Training epoch: {} completed in {:.0f}m {:.0f}s'.format(
            epoch + 1, time_elapsed // 60, time_elapsed % 60))
Beispiel #3
0
def Test():
    print('********************load data********************')
    dataloader_test = get_test_dataloader(batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model_img = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True, is_roi=False).cuda()
        CKPT_PATH = config['CKPT_PATH']  +'img_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_img.load_state_dict(checkpoint) #strict=False
        print("=> loaded Image model checkpoint: "+CKPT_PATH)

        model_roi = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True, is_roi=True).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'roi_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_roi.load_state_dict(checkpoint) #strict=False
        print("=> loaded ROI model checkpoint: "+CKPT_PATH)

        model_fusion = FusionClassifier(input_size=2048, output_size=N_CLASSES).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'fusion_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_fusion.load_state_dict(checkpoint) #strict=False
        print("=> loaded Fusion model checkpoint: "+CKPT_PATH)

        roigen = ROIGenerator() #region generator

    else: 
        print('No required model')
        return #over
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    print('******************** load model succeed!********************')

    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    pred_img = torch.FloatTensor().cuda()
    pred_roi = torch.FloatTensor().cuda()
    pred_fusion = torch.FloatTensor().cuda()
    # switch to evaluate mode
    model_img.eval() #turn to test mode
    model_roi.eval()
    model_fusion.eval()
    cudnn.benchmark = True
    with torch.autograd.no_grad():
        for batch_idx, (image, label) in enumerate(dataloader_test):
            gt = torch.cat((gt, label.cuda()), 0)
            #image-level
            var_image = torch.autograd.Variable(image).cuda()
            #var_label = torch.autograd.Variable(label).cuda()
            conv_fea_img, fc_fea_img, out_img = model_img(var_image)#forward
            pred_img = torch.cat((pred_img, out_img.data), 0)
            #ROI-level
            #-----predicted label---------------
            shape_l, shape_c = out_img.size()[0], out_img.size()[1]
            pdlabel = torch.FloatTensor(shape_l, shape_c).zero_()
            for i in range(shape_l):
                for j in range(shape_c): 
                    if pdlabel[i,j]>classes_threshold_common[j]:
                        pdlabel[i,j]> = 1.0
            #-----predicted label---------------
            cls_weights = list(model_img.parameters())
            weight_softmax = np.squeeze(cls_weights[-5].data.cpu().numpy())
            roi = roigen.ROIGeneration(image, conv_fea_img, weight_softmax, pdlabel.numpy())
            var_roi = torch.autograd.Variable(roi).cuda()
            _, fc_fea_roi, out_roi = model_roi(var_roi)
            pred_roi = torch.cat((pred_roi, out_roi.data), 0)
            #Fusion
            fc_fea_fusion = torch.cat((fc_fea_img,fc_fea_roi), 1)
            var_fusion = torch.autograd.Variable(fc_fea_fusion).cuda()
            out_fusion = model_fusion(var_fusion)
            pred_fusion = torch.cat((pred_fusion, out_fusion.data), 0)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx+1))
            sys.stdout.flush()

    #for evaluation
    AUROC_img = compute_AUCs(gt, pred_img)
    AUROC_avg = np.array(AUROC_img).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_img[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))

    AUROC_roi = compute_AUCs(gt, pred_roi)
    AUROC_avg = np.array(AUROC_roi).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_roi[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))

    AUROC_fusion = compute_AUCs(gt, pred_fusion)
    AUROC_avg = np.array(AUROC_fusion).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_fusion[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))

    #Evaluating the threshold of prediction
    thresholds = compute_ROCCurve(gt, pred_fusion)
    print(thresholds)
Beispiel #4
0
def Train():
    print('********************load data********************')
    dataloader_train = get_train_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
    dataloader_val = get_validation_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
    #dataloader_train, dataloader_val = get_train_val_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8, split_ratio=0.1)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model_img = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True, is_roi=False).cuda()#initialize model 
        #model_img = nn.DataParallel(model_img).cuda()  # make model available multi GPU cores training
        optimizer_img = optim.Adam(model_img.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_img = lr_scheduler.StepLR(optimizer_img, step_size = 10, gamma = 1)

        roigen = ROIGenerator()

        model_roi = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True, is_roi=True).cuda()
        #model_roi = nn.DataParallel(model_roi).cuda()
        optimizer_roi = optim.Adam(model_roi.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_roi = lr_scheduler.StepLR(optimizer_roi, step_size = 10, gamma = 1)

        model_fusion = FusionClassifier(input_size=2048, output_size=N_CLASSES).cuda()
        #model_fusion = nn.DataParallel(model_fusion).cuda()
        optimizer_fusion = optim.Adam(model_fusion.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_fusion = lr_scheduler.StepLR(optimizer_fusion, step_size = 10, gamma = 1)
    else: 
        print('No required model')
        return #over

    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    bce_criterion = nn.BCELoss() #define binary cross-entropy loss
    print('********************load model succeed!********************')

    print('********************begin training!********************')
    AUROC_best = 0.50
    for epoch in range(config['MAX_EPOCHS']):
        since = time.time()
        print('Epoch {}/{}'.format(epoch+1 , config['MAX_EPOCHS']))
        print('-' * 10)
        model_img.train()  #set model to training mode
        model_roi.train()
        model_fusion.train()
        train_loss = []
        with torch.autograd.enable_grad():
            for batch_idx, (image, label) in enumerate(dataloader_train):
                optimizer_img.zero_grad()
                optimizer_roi.zero_grad() 
                optimizer_fusion.zero_grad() 
                #image-level
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                conv_fea_img, fc_fea_img, out_img = model_img(var_image)#forward
                loss_img = bce_criterion(out_img, var_label)
                #ROI-level
                cls_weights = list(model_img.parameters())
                weight_softmax = np.squeeze(cls_weights[-5].data.cpu().numpy())
                roi = roigen.ROIGeneration(image, conv_fea_img, weight_softmax, label.numpy())
                var_roi = torch.autograd.Variable(roi).cuda()
                _, fc_fea_roi, out_roi = model_roi(var_roi)
                loss_roi = bce_criterion(out_roi, var_label) 
                #Fusion
                fc_fea_fusion = torch.cat((fc_fea_img,fc_fea_roi), 1)
                var_fusion = torch.autograd.Variable(fc_fea_fusion).cuda()
                out_fusion = model_fusion(var_fusion)
                loss_fusion = bce_criterion(out_fusion, var_label) 
                #backward and update parameters 
                loss_tensor = 0.7*loss_img + 0.2*loss_roi + 0.1*loss_fusion
                loss_tensor.backward() 
                optimizer_img.step() 
                optimizer_roi.step()
                optimizer_fusion.step() 
                train_loss.append(loss_tensor.item())
                #print([x.grad for x in optimizer.param_groups[0]['params']])
                sys.stdout.write('\r Epoch: {} / Step: {} : image loss ={}, roi loss ={}, fusion loss = {}, train loss = {}'
                                .format(epoch+1, batch_idx+1, float('%0.6f'%loss_img.item()), float('%0.6f'%loss_roi.item()),
                                float('%0.6f'%loss_fusion.item()), float('%0.6f'%loss_tensor.item()) ))
                sys.stdout.flush()        
        lr_scheduler_img.step()  #about lr and gamma
        lr_scheduler_roi.step()
        lr_scheduler_fusion.step()
        print("\r Eopch: %5d train loss = %.6f" % (epoch + 1, np.mean(train_loss))) 

        model_img.eval() #turn to test mode
        model_roi.eval()
        model_fusion.eval()
        loss_img_all, loss_roi_all, loss_fusion_all = [], [], []
        val_loss = []
        gt = torch.FloatTensor().cuda()
        pred_img = torch.FloatTensor().cuda()
        pred_roi = torch.FloatTensor().cuda()
        pred_fusion = torch.FloatTensor().cuda()
        with torch.autograd.no_grad():
            for batch_idx, (image, label) in enumerate(dataloader_val):
                gt = torch.cat((gt, label.cuda()), 0)
                #image-level
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                conv_fea_img, fc_fea_img, out_img = model_img(var_image)#forward
                loss_img = bce_criterion(out_img, var_label) 
                pred_img = torch.cat((pred_img, out_img.data), 0)
                #ROI-level
                cls_weights = list(model_img.parameters())
                weight_softmax = np.squeeze(cls_weights[-5].data.cpu().numpy())
                roi = roigen.ROIGeneration(image, conv_fea_img, weight_softmax, label.numpy())
                var_roi = torch.autograd.Variable(roi).cuda()
                _, fc_fea_roi, out_roi = model_roi(var_roi)
                loss_roi = bce_criterion(out_roi, var_label) 
                pred_roi = torch.cat((pred_roi, out_roi.data), 0)
                #Fusion
                fc_fea_fusion = torch.cat((fc_fea_img,fc_fea_roi), 1)
                var_fusion = torch.autograd.Variable(fc_fea_fusion).cuda()
                out_fusion = model_fusion(var_fusion)
                loss_fusion = bce_criterion(out_fusion, var_label) 
                pred_fusion = torch.cat((pred_fusion, out_fusion.data), 0)
                #loss
                loss_tensor = 0.7*loss_img + 0.2*loss_roi + 0.1*loss_fusion
                val_loss.append(loss_tensor.item())
                sys.stdout.write('\r Epoch: {} / Step: {} : image loss ={}, roi loss ={}, fusion loss = {}, train loss = {}'
                                .format(epoch+1, batch_idx+1, float('%0.6f'%loss_img.item()), float('%0.6f'%loss_roi.item()),
                                float('%0.6f'%loss_fusion.item()), float('%0.6f'%loss_tensor.item()) ))
                sys.stdout.flush()
                
                loss_img_all.append(loss_img.item())
                loss_roi_all.append(loss_roi.item())
                loss_fusion_all.append(loss_fusion.item())
        #evaluation       
        AUROCs_img = np.array(compute_AUCs(gt, pred_img)).mean()
        AUROCs_roi = np.array(compute_AUCs(gt, pred_roi)).mean()
        AUROCs_fusion = np.array(compute_AUCs(gt, pred_fusion)).mean()
        print("\r Eopch: %5d validation loss = %.6f, Validataion AUROC image=%.4f roi=%.4f fusion=%.4f" 
              % (epoch + 1, np.mean(val_loss), AUROCs_img, AUROCs_roi, AUROCs_fusion)) 

        logger.info("\r Eopch: %5d validation loss = %.4f, image loss = %.4f,  roi loss =%.4f fusion loss =%.4f" 
                     % (epoch + 1, np.mean(val_loss), np.mean(loss_img_all), np.mean(loss_roi_all), np.mean(loss_fusion_all))) 
        #save checkpoint
        if AUROC_best < AUROCs_fusion:
            AUROC_best = AUROCs_fusion
            #torch.save(model.module.state_dict(), CKPT_PATH)
            torch.save(model_img.state_dict(), config['CKPT_PATH'] +  'img_model.pkl') #Saving torch.nn.DataParallel Models
            torch.save(model_roi.state_dict(), config['CKPT_PATH'] + 'roi_model.pkl')
            torch.save(model_fusion.state_dict(), config['CKPT_PATH'] + 'fusion_model.pkl')
            print(' Epoch: {} model has been already save!'.format(epoch+1))
    
        time_elapsed = time.time() - since
        print('Training epoch: {} completed in {:.0f}m {:.0f}s'.format(epoch+1, time_elapsed // 60 , time_elapsed % 60))
Beispiel #5
0
def Train():
    print('********************load data********************')
    if args.dataset == 'NIHCXR':
        dataloader_train = get_train_dataloader_NIH(
            batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
        dataloader_val = get_test_dataloader_NIH(
            batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    elif args.dataset == 'VinCXR':
        dataloader_train = get_train_dataloader_VIN(
            batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
        dataloader_val = get_val_dataloader_VIN(
            batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    else:
        print('No required dataset')
        return
    print('********************load data succeed!********************')

    print('********************load model********************')
    if args.model == 'CXRNet' and args.dataset == 'NIHCXR':
        N_CLASSES = len(CLASS_NAMES_NIH)
        model = CXRNet(num_classes=N_CLASSES,
                       is_pre_trained=True)  #initialize model
        CKPT_PATH = config[
            'CKPT_PATH'] + args.model + '_' + args.dataset + '_best.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model.load_state_dict(checkpoint)  #strict=False
            print(
                "=> Loaded well-trained CXRNet model checkpoint of NIH-CXR dataset: "
                + CKPT_PATH)
    elif args.model == 'CXRNet' and args.dataset == 'VinCXR':
        N_CLASSES = len(CLASS_NAMES_Vin)
        model = CXRNet(num_classes=N_CLASSES,
                       is_pre_trained=True)  #initialize model
        #model = se_densenet121(t_num_classes=N_CLASSES, pretrained=True)#initialize model
        CKPT_PATH = config[
            'CKPT_PATH'] + args.model + '_' + args.dataset + '_best.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model.load_state_dict(checkpoint)  #strict=False
            print(
                "=> Loaded well-trained CXRNet model checkpoint of NIH-CXR dataset: "
                + CKPT_PATH)
    else:
        print('No required model')
        return  #over
    model = nn.DataParallel(
        model).cuda()  # make model available multi GPU cores training
    optimizer_model = optim.Adam(model.parameters(),
                                 lr=1e-3,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=1e-5)
    lr_scheduler_model = lr_scheduler.StepLR(optimizer_model,
                                             step_size=10,
                                             gamma=1)
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    bce_criterion = nn.BCELoss()  #define binary cross-entropy loss
    print('********************load model succeed!********************')

    print('********************begin training!********************')
    AUROC_best = 0.50
    for epoch in range(config['MAX_EPOCHS']):
        since = time.time()
        print('Epoch {}/{}'.format(epoch + 1, config['MAX_EPOCHS']))
        print('-' * 10)
        model.train()  #set model to training mode
        train_loss = []
        with torch.autograd.enable_grad():
            for batch_idx, (image, label, box) in enumerate(dataloader_train):
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()

                optimizer_model.zero_grad()
                _, var_output = model(var_image)
                loss_tensor = bce_criterion(var_output, var_label)  #backward
                loss_tensor.backward()
                optimizer_model.step()  ##update parameters

                sys.stdout.write(
                    '\r Epoch: {} / Step: {} : train loss = {}'.format(
                        epoch + 1, batch_idx + 1,
                        float('%0.6f' % loss_tensor.item())))
                sys.stdout.flush()
                train_loss.append(loss_tensor.item())
        lr_scheduler_model.step()  #about lr and gamma
        print("\r Eopch: %5d train loss = %.6f" %
              (epoch + 1, np.mean(train_loss)))

        model.eval()  #turn to test mode
        val_loss = []
        gt = torch.FloatTensor().cuda()
        pred = torch.FloatTensor().cuda()
        with torch.autograd.no_grad():
            for batch_idx, (image, label, box) in enumerate(dataloader_val):
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                _, var_output = model(var_image)  #forward
                loss_tensor = bce_criterion(var_output, var_label)  #backward
                sys.stdout.write(
                    '\r Epoch: {} / Step: {} : validation loss = {}'.format(
                        epoch + 1, batch_idx + 1,
                        float('%0.6f' % loss_tensor.item())))
                sys.stdout.flush()
                val_loss.append(loss_tensor.item())
                gt = torch.cat((gt, label.cuda()), 0)
                pred = torch.cat((pred, var_output.data), 0)
        AUROCs = compute_AUCs(gt, pred, N_CLASSES)
        AUROC_avg = np.array(AUROCs).mean()
        logger.info(
            "\r Eopch: %5d validation loss = %.6f, Validataion AUROC = %.4f" %
            (epoch + 1, np.mean(val_loss), AUROC_avg))

        AUROC_avg = Test()
        if AUROC_best < AUROC_avg:
            AUROC_best = AUROC_avg
            CKPT_PATH = config[
                'CKPT_PATH'] + args.model + '_' + args.dataset + '_best.pkl'
            torch.save(model.module.state_dict(),
                       CKPT_PATH)  #Saving torch.nn.DataParallel Models
            print(' Epoch: {} model has been already save!'.format(epoch + 1))

        time_elapsed = time.time() - since
        print('Training epoch: {} completed in {:.0f}m {:.0f}s'.format(
            epoch + 1, time_elapsed // 60, time_elapsed % 60))
Beispiel #6
0
def Test():
    print('********************load data********************')
    dataloader_test = get_test_dataloader(batch_size=config['BATCH_SIZE'],
                                          shuffle=False,
                                          num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model_img = ImageClassifier(num_classes=N_CLASSES,
                                    is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'img_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_img.load_state_dict(checkpoint)  #strict=False
        print("=> loaded Image model checkpoint: " + CKPT_PATH)

        model_roi = RegionClassifier(num_classes=N_CLASSES,
                                     is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'roi_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_roi.load_state_dict(checkpoint)  #strict=False
        print("=> loaded ROI model checkpoint: " + CKPT_PATH)

        model_fusion = FusionClassifier(input_size=2048,
                                        output_size=N_CLASSES).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'fusion_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_fusion.load_state_dict(checkpoint)  #strict=False
        print("=> loaded Fusion model checkpoint: " + CKPT_PATH)

    else:
        print('No required model')
        return  #over
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    print('******************** load model succeed!********************')

    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    pred_img = torch.FloatTensor().cuda()
    pred_roi = torch.FloatTensor().cuda()
    pred_fusion = torch.FloatTensor().cuda()
    # switch to evaluate mode
    model_img.eval()  #turn to test mode
    model_roi.eval()
    model_fusion.eval()
    cudnn.benchmark = True
    with torch.autograd.no_grad():
        for batch_idx, (image, label) in enumerate(dataloader_test):
            gt = torch.cat((gt, label.cuda()), 0)
            var_image = torch.autograd.Variable(image).cuda()
            #image-level
            fc_fea_img, out_img = model_img(var_image)  #forward
            pred_img = torch.cat((pred_img, out_img.data), 0)
            #ROI-level
            fc_fea_roi, out_roi = model_roi(var_image)
            pred_roi = torch.cat((pred_roi, out_roi.data), 0)
            #Fusion
            fc_fea_fusion = torch.cat((fc_fea_img, fc_fea_roi), 1)
            var_fusion = torch.autograd.Variable(fc_fea_fusion).cuda()
            out_fusion = model_fusion(var_fusion)
            pred_fusion = torch.cat((pred_fusion, out_fusion.data), 0)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()

    #for evaluation
    AUROC_img = compute_AUCs(gt, pred_img)
    AUROC_avg = np.array(AUROC_img).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_img[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))

    AUROC_roi = compute_AUCs(gt, pred_roi)
    AUROC_avg = np.array(AUROC_roi).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_roi[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))

    AUROC_fusion = compute_AUCs(gt, pred_fusion)
    AUROC_avg = np.array(AUROC_fusion).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i],
                                                 AUROC_fusion[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))
Beispiel #7
0
def Test():
    print('********************load data********************')
    if args.testset == 'NIHCXR':
        dataloader_test = get_test_dataloader_NIH(
            batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    elif args.testset == 'CVTECXR':
        dataloader_test = get_test_dataloader_CVTE(
            batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    elif args.testset == 'VinCXR':
        dataloader_test = get_val_dataloader_VIN(
            batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    else:
        print('No required dataset')
        return
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet' and args.dataset == 'NIHCXR':
        CLASS_NAMES = CLASS_NAMES_NIH
        N_CLASSES = len(CLASS_NAMES_NIH)
        model = CXRNet(num_classes=N_CLASSES,
                       is_pre_trained=True).cuda()  #initialize model
        CKPT_PATH = config[
            'CKPT_PATH'] + args.model + '_' + args.dataset + '_best.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model.load_state_dict(checkpoint)  #strict=False
            print(
                "=> Loaded well-trained CXRNet model checkpoint of NIH-CXR dataset: "
                + CKPT_PATH)
    elif args.model == 'CXRNet' and args.dataset == 'VinCXR':
        CLASS_NAMES = CLASS_NAMES_Vin
        N_CLASSES = len(CLASS_NAMES_Vin)
        model = CXRNet(num_classes=N_CLASSES,
                       is_pre_trained=True).cuda()  #initialize model
        #model = se_densenet121(t_num_classes=N_CLASSES, pretrained=True).cuda()#initialize model
        CKPT_PATH = config[
            'CKPT_PATH'] + args.model + '_' + args.dataset + '_best.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model.load_state_dict(checkpoint)  #strict=False
            print(
                "=> Loaded well-trained CXRNet model checkpoint of NIH-CXR dataset: "
                + CKPT_PATH)

    else:
        print('No required model')
        return  #over
    model.eval()
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    print('******************** load model succeed!********************')

    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    pred = torch.FloatTensor().cuda()
    name_list = []
    with torch.autograd.no_grad():
        for batch_idx, (image, label, name) in enumerate(dataloader_test):
            var_image = torch.autograd.Variable(image).cuda()
            var_label = torch.autograd.Variable(label).cuda()
            _, var_output = model(var_image)  #forward
            gt = torch.cat((gt, label.cuda()), 0)
            pred = torch.cat((pred, var_output.data), 0)
            name_list.extend(name)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()
    #evaluation
    if args.testset == 'VinCXR' or args.testset == 'NIHCXR':
        AUROCs = compute_AUCs(gt, pred, N_CLASSES)
        AUROC_avg = np.array(AUROCs).mean()
        for i in range(N_CLASSES):
            print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i],
                                                     AUROCs[i]))
        print('The average AUROC is {:.4f}'.format(AUROC_avg))
        compute_ROCCurve(gt, pred, N_CLASSES, CLASS_NAMES,
                         args.dataset)  #plot ROC Curve
    elif args.testset == 'CVTECXR':
        gt_np = gt.cpu().numpy()
        pred_np = pred.cpu().numpy()
        AUROCs = roc_auc_score(1 - gt_np, pred_np[:, -1])
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[-1], AUROCs))

        pred_np_ad = np.where(pred_np[:, -1] > config['PROB'], 0,
                              1)  #normal=0, abnormal=1
        pred_np = np.where(pred_np[:, :-1] > 1 - config['PROB'], 1,
                           0).sum(axis=1)
        pred_np_ad = np.logical_or(pred_np_ad, pred_np)
        #F1 = 2 * (precision * recall) / (precision + recall)
        f1score = f1_score(gt_np, pred_np_ad, average='micro')
        print('\r F1 Score = {:.4f}'.format(f1score))
        #sensitivity and specificity
        tn, fp, fn, tp = confusion_matrix(gt_np, pred_np_ad).ravel()
        sen = tp / (tp + fn)
        spe = tn / (tn + fp)
        print('\rSensitivity = {:.4f} and specificity = {:.4f}'.format(
            sen, spe))

        #result = pd.concat([pd.DataFrame(np.array(name_list)),pd.DataFrame(gt_np), pd.DataFrame(pred_np_ad)], axis=1)
        #result.to_csv(config['log_path']+'disan.csv', index=False, header=False, sep=',')

        return AUROCs

    else:
        print('No dataset need to evaluate')

    return 0.0
Beispiel #8
0
def Train():
    print('********************load data********************')
    dataloader_train = get_train_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
    dataloader_val = get_validation_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8)
    #dataloader_train, dataloader_val = get_train_val_dataloader(batch_size=config['BATCH_SIZE'], shuffle=True, num_workers=8, split_ratio=0.1)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        #for left_lung
        model_unet_left = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_left.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet_left.load_state_dict(checkpoint) #strict=False
            print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_left.eval()

        model_left = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()#initialize model 
        optimizer_left = optim.Adam(model_left.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_left = lr_scheduler.StepLR(optimizer_left, step_size = 10, gamma = 1)
        #for right lung
        model_unet_right = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_right.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet_right.load_state_dict(checkpoint) #strict=False
            print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_right.eval()

        model_right = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()#initialize model 
        optimizer_right = optim.Adam(model_right.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_right = lr_scheduler.StepLR(optimizer_right, step_size = 10, gamma = 1)
        #for heart
        model_unet_heart = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_heart.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet_heart.load_state_dict(checkpoint) #strict=False
            print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_heart.eval()

        model_heart = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()#initialize model 
        optimizer_heart = optim.Adam(model_heart.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        lr_scheduler_heart = lr_scheduler.StepLR(optimizer_heart, step_size = 10, gamma = 1)
    else: 
        print('No required model')
        return #over

    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    bce_criterion = nn.BCELoss() #define binary cross-entropy loss
    print('********************load model succeed!********************')

    print('********************begin training!********************')
    AUROC_best = 0.50
    for epoch in range(config['MAX_EPOCHS']):
        since = time.time()
        print('Epoch {}/{}'.format(epoch+1 , config['MAX_EPOCHS']))
        print('-' * 10)
        model_left.train()  #set model to training mode
        model_right.train()
        model_heart.train()
        train_loss = []
        with torch.autograd.enable_grad():
            for batch_idx, (image, label) in enumerate(dataloader_train):
                optimizer_left.zero_grad()
                optimizer_right.zero_grad() 
                optimizer_heart.zero_grad() 
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                #for left lung
                mask = model_unet_left(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_left = model_left(var_roi)#forward
                loss_left = bce_criterion(out_left, var_label)
                loss_left.backward()
                optimizer_left.step()
                #for right lung
                mask = model_unet_right(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_right = model_right(var_roi)#forward
                loss_right = bce_criterion(out_right, var_label)
                loss_right.backward()
                optimizer_right.step()
                #for heart
                mask = model_unet_heart(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_heart = model_heart(var_roi)#forward
                loss_heart = bce_criterion(out_heart, var_label)
                loss_heart.backward()
                optimizer_heart.step()
                #loss sum 
                loss_tensor = loss_left + loss_right + loss_heart
                train_loss.append(loss_tensor.item())
                #print([x.grad for x in optimizer.param_groups[0]['params']])
                sys.stdout.write('\r Epoch: {} / Step: {} : train loss = {}'.format(epoch+1, batch_idx+1, float('%0.6f'%loss_tensor.item()) ))
                sys.stdout.flush()        
        lr_scheduler_left.step()  #about lr and gamma
        lr_scheduler_right.step()
        lr_scheduler_heart.step()
        print("\r Eopch: %5d train loss = %.6f" % (epoch + 1, np.mean(train_loss))) 

        model_left.eval() #turn to test mode
        model_right.eval()
        model_heart.eval()
        val_loss = []
        gt = torch.FloatTensor().cuda()
        preds = torch.FloatTensor().cuda()
        with torch.autograd.no_grad():
            for batch_idx, (image, label) in enumerate(dataloader_val):
                pred = torch.FloatTensor().cuda()
                gt = torch.cat((gt, label.cuda()), 0)
                var_image = torch.autograd.Variable(image).cuda()
                var_label = torch.autograd.Variable(label).cuda()
                #for left lung
                mask = model_unet_left(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_left = model_left(var_roi)#forward
                pred = torch.cat((pred, out_left.data.unsqueeze(0)), 0)
                #for right lung
                mask = model_unet_right(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_right = model_right(var_roi)#forward
                pred = torch.cat((pred, out_right.data.unsqueeze(0)), 0)
                #for heart
                mask = model_unet_heart(var_image)
                roi = ROIGeneration(image, mask)
                var_roi = torch.autograd.Variable(roi).cuda()
                out_heart = model_heart(var_roi)#forward
                pred = torch.cat((pred, out_heart.data.unsqueeze(0)), 0)
                #prediction
                pred = torch.max(pred, 0)[0] #torch.mean
                preds = torch.cat((preds, pred.data), 0)
                loss_tensor = bce_criterion(pred, var_label)
                val_loss.append(loss_tensor.item())
                sys.stdout.write('\r Epoch: {} / Step: {} : validation loss = {}'.format(epoch+1, batch_idx+1, float('%0.6f'%loss_tensor.item()) ))
                sys.stdout.flush()
        #evaluation       
        AUROCs_avg = np.array(compute_AUCs(gt, preds)).mean()
        print("\r Eopch: %5d validation loss = %.6f, average AUROC=%.4f"% (epoch + 1, np.mean(val_loss), AUROCs_avg)) 

        #save checkpoint
        if AUROC_best < AUROCs_avg:
            AUROC_best = AUROCs_avg
            torch.save(model_img.state_dict(), config['CKPT_PATH'] +  'left_model.pkl') #Saving torch.nn.DataParallel Models
            torch.save(model_roi.state_dict(), config['CKPT_PATH'] + 'right_model.pkl')
            torch.save(model_fusion.state_dict(), config['CKPT_PATH'] + 'heart_model.pkl')
            print(' Epoch: {} model has been already save!'.format(epoch+1))
    
        time_elapsed = time.time() - since
        print('Training epoch: {} completed in {:.0f}m {:.0f}s'.format(epoch+1, time_elapsed // 60 , time_elapsed % 60))
Beispiel #9
0
def Test():
    print('********************load data********************')
    dataloader_test = get_test_dataloader(batch_size=config['BATCH_SIZE'], shuffle=False, num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        #for left
        model_left = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH']  +'left_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_left.load_state_dict(checkpoint) #strict=False
        print("=> loaded left model checkpoint: "+CKPT_PATH)
        model_left.eval()

        model_unet_left = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_left.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_unet_left.load_state_dict(checkpoint) #strict=False
        print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_left.eval()

        #for right
        model_right = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH']  +'right_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_right.load_state_dict(checkpoint) #strict=False
        print("=> loaded right model checkpoint: "+CKPT_PATH)
        model_right.eval()

        model_unet_right = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_right.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_unet_right.load_state_dict(checkpoint) #strict=False
        print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_right.eval()

        #for heart
        model_heart = CXRClassifier(num_classes=N_CLASSES, is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH']  +'heart_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_heart.load_state_dict(checkpoint) #strict=False
        print("=> loaded heart model checkpoint: "+CKPT_PATH)
        model_heart.eval()

        model_unet_heart = UNet(n_channels=3, n_classes=1).cuda()#initialize model 
        CKPT_PATH = config['CKPT_PATH'] +  'best_unet_right.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model_unet_heart.load_state_dict(checkpoint) #strict=False
        print("=> loaded well-trained unet model checkpoint: "+CKPT_PATH)
        model_unet_heart.eval()
        
    else: 
        print('No required model')
        return #over
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
        
    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    preds = torch.FloatTensor().cuda()
    # switch to evaluate mode
    model_img.eval() #turn to test mode
    with torch.autograd.no_grad():
        for batch_idx, (image, label) in enumerate(dataloader_test):
            gt = torch.cat((gt, label.cuda()), 0)
            pred = torch.FloatTensor().cuda()
            var_image = torch.autograd.Variable(image).cuda()
            var_label = torch.autograd.Variable(label).cuda()
            #for left lung
            mask = model_unet_left(var_image)
            roi = ROIGeneration(image, mask)
            var_roi = torch.autograd.Variable(roi).cuda()
            out_left = model_left(var_roi)#forward
            pred = torch.cat((pred, out_left.data.unsqueeze(0)), 0)
            #for right lung
            mask = model_unet_right(var_image)
            roi = ROIGeneration(image, mask)
            var_roi = torch.autograd.Variable(roi).cuda()
            out_right = model_right(var_roi)#forward
            pred = torch.cat((pred, out_right.data.unsqueeze(0)), 0)
            #for heart
            mask = model_unet_heart(var_image)
            roi = ROIGeneration(image, mask)
            var_roi = torch.autograd.Variable(roi).cuda()
            out_heart = model_heart(var_roi)#forward
            pred = torch.cat((pred, out_heart.data.unsqueeze(0)), 0)
            #prediction
            pred = torch.max(pred, 0)[0] #torch.mean
            preds = torch.cat((preds, pred.data), 0)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx+1))
            sys.stdout.flush()

    #for evaluation
    AUROC_img = compute_AUCs(gt, preds)
    AUROC_avg = np.array(AUROC_img).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_img[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))
Beispiel #10
0
def Test():
    print('********************load data********************')
    dataloader_train = get_train_dataloader(batch_size=BATCH_SIZE,
                                            shuffle=False,
                                            num_workers=0)
    dataloader_test = get_test_dataloader(batch_size=BATCH_SIZE,
                                          shuffle=False,
                                          num_workers=0)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'YNet':
        model = model = YNet(n_classes=2, n_masks=3, code_size=64).cuda()
        CKPT_PATH = '/data/pycode/YNet/model/best_model.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(checkpoint)  #strict=False
        print("=> loaded Image model checkpoint: " + CKPT_PATH)
    else:
        print('No required model')
        return  #over
    torch.backends.cudnn.benchmark = True  # improve train speed slightly
    model.eval()  #turn to test mode
    print('******************** load model succeed!********************')

    print('******* begin indexing!*********')
    tr_label = torch.FloatTensor().cuda()
    #tr_mask = torch.LongTensor().cuda()
    tr_hash = torch.FloatTensor().cuda()
    with torch.autograd.no_grad():
        for batch_idx, (image, mask, label) in enumerate(dataloader_train):
            tr_label = torch.cat((tr_label, label.cuda()), 0)
            #tr_mask = torch.cat((tr_mask, mask.cuda()), 0)
            var_image = torch.autograd.Variable(image).cuda()
            h_feat, _, _, _, _ = model(var_image)
            tr_hash = torch.cat((tr_hash, h_feat.data), 0)
            sys.stdout.write(
                '\r train indexing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()

    te_label = torch.FloatTensor().cuda()
    te_mask = torch.LongTensor().cuda()
    te_hash = torch.FloatTensor().cuda()
    te_mask_pd = torch.LongTensor().cuda()
    te_label_pd = torch.LongTensor().cuda()
    with torch.autograd.no_grad():
        for batch_idx, (image, mask, label) in enumerate(dataloader_test):
            te_label = torch.cat((te_label, label.cuda()), 0)
            te_mask = torch.cat((te_mask, mask.cuda()), 0)
            var_image = torch.autograd.Variable(image).cuda()
            h_feat, _, s_mask, _, c_cls = model(var_image)
            te_hash = torch.cat((te_hash, h_feat.data), 0)
            s_mask = F.log_softmax(s_mask, dim=1)
            s_mask = s_mask.max(1, keepdim=True)[1]
            te_mask_pd = torch.cat((te_mask_pd, s_mask.data), 0)
            c_cls = F.log_softmax(c_cls, dim=1)
            c_cls = c_cls.max(1, keepdim=True)[1]
            te_label_pd = torch.cat((te_label_pd, c_cls.data), 0)
            sys.stdout.write(
                '\r test indexing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()

    print('******* begin evaluating!*********')
    #retrieval performance
    sim_mat = cosine_similarity(te_hash.cpu().numpy(), tr_hash.cpu().numpy())
    te_label = te_label.cpu().numpy().tolist()
    tr_label = tr_label.cpu().numpy().tolist()
    for topk in [5, 10, 20, 50]:
        mAPs = []  #mean average precision
        for i in range(sim_mat.shape[0]):
            #idxs = heapq.nlargest(topk, sim_mat[i,:])
            idxs, vals = zip(*heapq.nlargest(
                topk, enumerate(sim_mat[i, :].tolist()), key=lambda x: x[1]))
            num_pos = 0
            rank_pos = 0
            mAP = []
            for j in idxs:
                rank_pos = rank_pos + 1
                if tr_label[j] == te_label[i]:  #hit
                    num_pos = num_pos + 1
                    mAP.append(num_pos / rank_pos)
            if np.sum(mAP) != 0:
                mAPs.append(np.mean(mAP))
            else:
                mAPs.append(0)
        print("mAP@{}={:.4f}".format(topk, np.mean(mAPs)))
    #segmentation performance
    mIoU = []
    te_mask = te_mask.cpu().numpy()
    te_mask_pd = te_mask_pd.cpu().numpy()
    for i in range(len(te_mask)):
        iou_score = te_mask[i] == te_mask_pd[i]
        mIoU.append(np.mean(iou_score))
    print("mIoU={:.4f}".format(np.mean(mIoU)))
    #classification performance
    #te_label = te_label.cpu().numpy()
    te_label_pd = te_label_pd.cpu().numpy()
    print('Accuracy: %.6f' % accuracy_score(te_label, te_label_pd))
Beispiel #11
0
def Test():
    print('********************load data********************')
    dataloader_test = get_test_dataloader(batch_size=config['BATCH_SIZE'],
                                          shuffle=False,
                                          num_workers=8)
    print('********************load data succeed!********************')

    print('********************load model********************')
    # initialize and load the model
    if args.model == 'CXRNet':
        model = CXRNet(num_classes=N_CLASSES, is_pre_trained=True).cuda()
        CKPT_PATH = config['CKPT_PATH'] + 'best_model_CXRNet.pkl'
        checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(checkpoint)  #strict=False
        print("=> loaded Image model checkpoint: " + CKPT_PATH)
        torch.backends.cudnn.benchmark = True  # improve train speed slightly

        model_unet = UNet(n_channels=3, n_classes=1).cuda()  #initialize model
        CKPT_PATH = config['CKPT_PATH'] + 'best_unet.pkl'
        if os.path.exists(CKPT_PATH):
            checkpoint = torch.load(CKPT_PATH)
            model_unet.load_state_dict(checkpoint)  #strict=False
            print("=> loaded well-trained unet model checkpoint: " + CKPT_PATH)
        model_unet.eval()
    else:
        print('No required model')
        return  #over
    print('******************** load model succeed!********************')

    print('******* begin testing!*********')
    gt = torch.FloatTensor().cuda()
    pred = torch.FloatTensor().cuda()
    with torch.autograd.no_grad():
        for batch_idx, (image, label) in enumerate(dataloader_test):
            gt = torch.cat((gt, label.cuda()), 0)
            var_image = torch.autograd.Variable(image).cuda()
            var_label = torch.autograd.Variable(label).cuda()

            var_mask = model_unet(var_image)
            var_mask = var_mask.ge(0.5).float()  #0,1 binarization
            mask_np = var_mask.squeeze().cpu().numpy()  #bz*224*224
            patchs = torch.FloatTensor()
            for i in range(0, mask_np.shape[0]):
                mask = mask_np[i]
                ind = np.argwhere(mask != 0)
                if len(ind) > 0:
                    minh = min(ind[:, 0])
                    minw = min(ind[:, 1])
                    maxh = max(ind[:, 0])
                    maxw = max(ind[:, 1])

                    image_crop = image[i].permute(
                        1, 2, 0).squeeze().numpy()  #224*224*3
                    image_crop = image_crop[minh:maxh, minw:maxw, :]
                    image_crop = cv2.resize(
                        image_crop, (config['TRAN_CROP'], config['TRAN_CROP']))
                    image_crop = torch.FloatTensor(image_crop).permute(
                        2, 1, 0).unsqueeze(0)  #1*3*224*224
                    patchs = torch.cat((patchs, image_crop), 0)
                else:
                    image_crop = image[i].unsqueeze(0)
                    patchs = torch.cat((patchs, image_crop), 0)

            var_patchs = torch.autograd.Variable(patchs).cuda()
            var_output = model(var_patchs)  #forward
            pred = torch.cat((pred, var_output.data), 0)
            sys.stdout.write('\r testing process: = {}'.format(batch_idx + 1))
            sys.stdout.flush()

    #for evaluation
    AUROC_all = compute_AUCs(gt, pred)
    AUROC_avg = np.array(AUROC_all).mean()
    for i in range(N_CLASSES):
        print('The AUROC of {} is {:.4f}'.format(CLASS_NAMES[i], AUROC_all[i]))
    print('The average AUROC is {:.4f}'.format(AUROC_avg))