Ejemplo n.º 1
0
    def __init__(self, pathModel, nnArchitecture, nnClassCount, transCrop):

        #---- Initialize the network
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, True).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        self.model = model.module.densenet121.features
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        #---- Initialize the image transform - resize + normalize
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        self.transformSequence = transforms.Compose(transformList)
def get_params(example_index):
    """
        Gets used variables for almost all visualizations, like the image, model etc.

    Args:
        example_index (int): Image id to use from examples

    returns:
        original_image (numpy arr): Original image read from the file
        prep_img (numpy_arr): Processed image
        target_class (int): Target class for the image
        file_name_to_export (string): File name to export the visualizations
        pretrained_model(Pytorch model): Model to use for the operations
    """
    # Pick one of the examples
    example_list = [['../input_images/snake.jpg', 56],
                    ['../input_images/cat_dog.png', 243],
                    ['../input_images/spider.png', 72],
                    ['../input_images/pneumonia_1.png', 6]]
    selected_example = example_index
    img_path = example_list[selected_example][0]
    target_class = example_list[selected_example][1]
    file_name_to_export = img_path[img_path.rfind('/') + 1:img_path.rfind('.')]
    # Read image
    original_image = cv2.imread(img_path, 1)
    # Process image
    prep_img = preprocess_image(original_image)
    # Define model
    model = DenseNet121(14, True)
    pretrained_model = torch.load(pathModel,
                                  map_location=lambda storage, loc: storage)
    model.load_state_dict(pretrained_model['state_dict'], strict=False)
    return (original_image, prep_img, target_class, file_name_to_export,
            model.densenet121)
Ejemplo n.º 3
0
def download(nnArchitecture, nnIsTrained, nnClassCount):

    #-------------------- SETTINGS: NETWORK ARCHITECTURE
    if nnArchitecture == 'DENSE-NET-121':
        model = DenseNet121(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'DENSE-NET-169':
        model = DenseNet169(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'DENSE-NET-201':
        model = DenseNet201(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'RES-NET-18':
        model = ResNet18(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'RES-NET-50':
        model = ResNet50(nnClassCount, nnIsTrained)

    model = torch.nn.DataParallel(model)
Ejemplo n.º 4
0
    def __init__ (self, pathModel, nnArchitecture, nnClassCount, transCrop):
       
        #---- Initialize the network
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, True)
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, True).cuda()
          
        model = torch.nn.DataParallel(model)
        modelCheckpoint = torch.load(pathModel, map_location='cpu')
        state_dict = modelCheckpoint['state_dict']
        remove_data_parallel = False # Change if you don't want to use nn.DataParallel(model)
        print('starging this')
        pattern = re.compile(r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
        for key in list(state_dict.keys()):
            match = pattern.match(key)
            new_key = match.group(1) + match.group(2) if match else key
            new_key = new_key[7:] if remove_data_parallel else new_key
            state_dict[new_key] = state_dict[key]
            # Delete old key only if modified.
            if match or remove_data_parallel: 
                del state_dict[key]
        print('done this')
        # if os.path.isfile(CKPT_PATH):
        #     print("=> loading checkpoint")
        #     checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(modelCheckpoint['state_dict'])
        print("=> loaded checkpoint") 
        #addition by claire
        # self.modelP = model
        # self.model = model.module.densenet121.features
        # self.model.eval()
        self.feature_extractor = model.module.densenet121.features
        self.feature_extractor.eval()
        self.classifier = model.module.densenet121.classifier
        
        self.weights = list(self.classifier.parameters())[-2].cpu().data.numpy()
        self.bias = list(self.classifier.parameters())[-1].cpu().data.numpy()

        #---- Initialize the image transform - resize + normalize
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)      
        
        self.transformSequence = transforms.Compose(transformList)
Ejemplo n.º 5
0
    def loadModel(nnArchitecture, nnClassCount, nnIsTrained):

        if nnArchitecture == 'RES-NET-18':
            model = Resnet18(nnClassCount, nnIsTrained)
            print("Resnet18 Loaded")

        elif nnArchitecture == 'RES-NET-34':
            model = Resnet34(nnClassCount, nnIsTrained)
            print("Resnet34 Loaded")

        elif nnArchitecture == 'RES-NET-50':
            model = Resnet50(nnClassCount, nnIsTrained)
            print("Resnet50 Loaded")

        elif nnArchitecture == 'RES-NET-101':
            model = Resnet101(nnClassCount, nnIsTrained)
            print("Resnet101 Loaded")

        elif nnArchitecture == 'RES-NET-152':
            model = Resnet152(nnClassCount, nnIsTrained)
            print("Resnet152 loaded")

        elif nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained)
            print("DenseNet121 loaded")

        elif nnArchitecture == 'DENSE-NET-161':
            model = DenseNet161(nnClassCount, nnIsTrained)
            print("DenseNet161 loaded")

        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained)
            print("DenseNet169 loaded")

        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained)
            print("DENSENET201 loaded")

        elif nnArchitecture == 'INCEPTION-V3':
            model = InceptionV3(nnClassCount, nnIsTrained)
            print("InceptionV3 loaded")

        else:
            print("No model loaded")

        return model
Ejemplo n.º 6
0
    def __init__(self, pathModel, nnArchitecture, nnClassCount, transCrop):

        #---- Initialize the network
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, True).cuda()

        model = torch.nn.DataParallel(model).cuda()

        print("=> loading checkpoint")
        modelCheckpoint = torch.load(pathModel)
        # https://github.com/KaiyangZhou/deep-person-reid/issues/23
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        state_dict = modelCheckpoint['state_dict']
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
        print("=> loaded checkpoint")

        self.model = model.module.densenet121.features
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        #---- Initialize the image transform - resize + normalize
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        self.transformSequence = transforms.Compose(transformList)
Ejemplo n.º 7
0
class ChexnetTrainer():

    #---- Train the densenet network
    #---- pathDirData - path to the directory that contains images
    #---- pathFileTrain - path to the file that contains image paths and label pairs (training set)
    #---- pathFileVal - path to the file that contains image path and label pairs (validation set)
    #---- nnArchitecture - model architecture 'DENSE-NET-121', 'DENSE-NET-169' or 'DENSE-NET-201'
    #---- nnIsTrained - if True, uses pre-trained version of the network (pre-trained on imagenet)
    #---- nnClassCount - number of output classes
    #---- trBatchSize - batch size
    #---- trMaxEpoch - number of epochs
    #---- transResize - size of the image to scale down to (not used in current implementation)
    #---- transCrop - size of the cropped image
    #---- launchTimestamp - date/time, used to assign unique name for the checkpoint file
    #---- checkpoint - if not None loads the model and continues training

    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'resnet':
            model = ResNeXt(14).cuda()
        elif nnArchitecture == 'dcsnnet':
            model = DCSNNet(14).cuda()

        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileTrain,
            transform=transformSequence)
        datasetVal = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileVal,
            transform=transformSequence)

        dataLoaderTrain = DataLoader(
            dataset=datasetTrain,
            batch_size=trBatchSize,
            shuffle=True,
            num_workers=24,
            pin_memory=True)
        dataLoaderVal = DataLoader(
            dataset=datasetVal,
            batch_size=trBatchSize,
            shuffle=False,
            num_workers=24,
            pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(
            model.parameters(),
            lr=0.0001,
            betas=(0.9, 0.999),
            eps=1e-08,
            weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(
            optimizer, factor=0.1, patience=5, mode='min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])

        #---- TRAIN THE NETWORK

        lossMIN = 100000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.data[0])

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save({
                    'epoch': epochID + 1,
                    'state_dict': model.state_dict(),
                    'best_loss': lossMIN,
                    'optimizer': optimizer.state_dict()
                }, 'm-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))

    #--------------------------------------------------------------------------------

    def epochTrain(model, dataLoader, optimizer, scheduler, epochMax,
                   classCount, loss):

        model.train()

        for batchID, (input, target) in enumerate(dataLoader):

            target = target.cuda(async=True)

            varInput = torch.autograd.Variable(input)
            varTarget = torch.autograd.Variable(target)
            varOutput = model(varInput)

            lossvalue = loss(varOutput, varTarget)

            optimizer.zero_grad()
            lossvalue.backward()
            optimizer.step()

    #--------------------------------------------------------------------------------

    def epochVal(model, dataLoader, optimizer, scheduler, epochMax, classCount,
                 loss):
        model.eval()

        lossVal = 0
        lossValNorm = 0

        losstensorMean = 0

        for i, (input, target) in enumerate(dataLoader):

            target = target.cuda(async=True)

            varInput = torch.autograd.Variable(input, volatile=True)
            varTarget = torch.autograd.Variable(target, volatile=True)
            varOutput = model(varInput)

            losstensor = loss(varOutput, varTarget)
            losstensorMean += losstensor

            lossVal += losstensor.data[0]
            lossValNorm += 1

        outLoss = lossVal / lossValNorm
        losstensorMean = losstensorMean / lossValNorm

        return outLoss, losstensorMean

    #--------------------------------------------------------------------------------

    #---- Computes area under ROC curve
    #---- dataGT - ground truth data
    #---- dataPRED - predicted data
    #---- classCount - number of classes

    def computeAUROC(dataGT, dataPRED, classCount):

        outAUROC = []

        datanpGT = dataGT.cpu().numpy()
        datanpPRED = dataPRED.cpu().numpy()

        for i in range(classCount):
            outAUROC.append(roc_auc_score(datanpGT[:, i], datanpPRED[:, i]))

        return outAUROC

    #--------------------------------------------------------------------------------

    #---- Test the trained network
    #---- pathDirData - path to the directory that contains images
    #---- pathFileTrain - path to the file that contains image paths and label pairs (training set)
    #---- pathFileVal - path to the file that contains image path and label pairs (validation set)
    #---- nnArchitecture - model architecture 'DENSE-NET-121', 'DENSE-NET-169' or 'DENSE-NET-201'
    #---- nnIsTrained - if True, uses pre-trained version of the network (pre-trained on imagenet)
    #---- nnClassCount - number of output classes
    #---- trBatchSize - batch size
    #---- trMaxEpoch - number of epochs
    #---- transResize - size of the image to scale down to (not used in current implementation)
    #---- transCrop - size of the cropped image
    #---- launchTimestamp - date/time, used to assign unique name for the checkpoint file
    #---- checkpoint - if not None loads the model and continues training

    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
       if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'resnet':
            model = ResNeXt(10).cuda()
        elif nnArchitecture == 'dcsnnet':
            model = DCSNNet(10).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        model.load_state_dict({
            k.replace('module.', ''): v
            for k, v in modelCheckpoint['state_dict'].items()
        })

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        )
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(
            pathImageDirectory=pathDirData,
            pathDatasetFile=pathFileTest,
            transform=transformSequence)
        dataLoaderTest = DataLoader(
            dataset=datasetTest,
            batch_size=trBatchSize,
            num_workers=8,
            shuffle=False,
            pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(
                input.view(-1, c, h, w).cuda(), volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

            print(outPRED)

        np.savetext("np.txt", outGT)
        np.savetext("np2.txt", outPRED)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
Ejemplo n.º 8
0
    def test(self, pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet-18':
            model = ResNet18(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet-50':
            model = ResNet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet18_lh':
            model = MyModels.ResNet18_lh(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet18_fpn':
            model = MyModels.ResNet18_fpn(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet50_fpn':
            model = MyModels.ResNet50_fpn(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'senet50_fpn':
            model = MyModels.senet50_fpn(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'senet50_sm':
            model = MyModels.senet50_sm(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'multi_model':
            # model1 = se_resnet50(1000, pretrained='imagenet').cuda()
            # kernelCount = model1.last_linear.in_features
            # model1.last_linear = nn.Sequential(nn.Linear(kernelCount, nnClassCount), nn.Sigmoid())
            # model1.avg_pool = nn.AvgPool2d(8, stride=1)

            model1 = MyModels.senet50_sm(nnClassCount, nnIsTrained).cuda()
            model2 = MyModels.senet50_fpn(nnClassCount, nnIsTrained).cuda()

        if nnArchitecture != 'multi_model':
            model = torch.nn.DataParallel(model).cuda()
            modelCheckpoint = torch.load(pathModel)
            model.load_state_dict(modelCheckpoint['state_dict'])
        else:
            model1 = torch.nn.DataParallel(model1).cuda()
            model2 = torch.nn.DataParallel(model2).cuda()
            modelCheckpoint1 = torch.load(pathModel[0])
            model1.load_state_dict(modelCheckpoint1['state_dict'])

            modelCheckpoint2 = torch.load(pathModel[1])
            model2.load_state_dict(modelCheckpoint2['state_dict'])

            model = MyModels.multi_model(model1, model2)

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=8,
                                    shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(),
                                               volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

#        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        aurocIndividual = self.computeAUROC(outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
Ejemplo n.º 9
0
    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = ['Normal', 'Pneumonia']

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)

        #-------------------- 額外增加來刪掉不必要的
        state_dict = modelCheckpoint['state_dict']
        remove_data_parallel = False  # Change if you don't want to use nn.DataParallel(model)

        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:\d+\.)\.(?:weight|bias|running_mean|running_var))$'
        )
        for key in list(state_dict.keys()):
            match = pattern.match(key)
            new_key = match.group(1) + match.group(2) if match else key
            new_key = new_key[7:] if remove_data_parallel else new_key
            state_dict[new_key] = state_dict[key]
        # Delete old key only if modified.
        if match or remove_data_parallel:
            del state_dict[key]
        model.load_state_dict(state_dict)

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=8,
                                    shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(),
                                               volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return


#--------------------------------------------------------------------------------
Ejemplo n.º 10
0
    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model, device_ids=[0, 1]).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=8,
                                    shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()
        totallen = len(dataLoaderTest)
        t = time.time()
        for i, (input, target) in enumerate(dataLoaderTest):
            #status=(totallen-i)/(i+1)*(time.time()-t)#seconds
            #hours = status // 3600
            #minutes = (status // 60) % 60
            progress(i + 1, totallen, t)
            target = target.cuda()

            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(),
                                               volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
Ejemplo n.º 11
0
    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        # model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS

        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        transform_only_aug = transforms.Compose([XRaysPolicy()])
        transform_with_aug = transforms.Compose([
            XRaysPolicy(),
            transforms.RandomResizedCrop(transCrop),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(), normalize
        ])
        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathDirData,
                                        pathFileTrain,
                                        transform=transform_with_aug)
        datasetTrainUnsup = DatasetGenerator(pathDirData,
                                             pathFileTrain,
                                             transform=transformSequence,
                                             transform_aug=transform_only_aug)
        datasetVal = DatasetGenerator(pathDirData,
                                      pathFileVal,
                                      transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     num_workers=4,
                                     pin_memory=True)
        dataLoaderUnsup = DataLoader(dataset=datasetTrainUnsup,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     num_workers=4,
                                     pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal,
                                   batch_size=trBatchSize,
                                   shuffle=False,
                                   num_workers=4,
                                   pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(model.parameters(),
                               lr=0.0001,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)

        # optimizer = optim.SGD(
        # filter(
        #     lambda p: p.requires_grad,
        #     model.parameters()),
        # lr=0.01,
        # momentum=0.9,
        # weight_decay=5e-4)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.1,
                                      patience=5,
                                      mode='min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        start_epoch = 0
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])
            start_epoch = modelCheckpoint['epoch']

        #---- TRAIN THE NETWORK

        lossMIN = 100000
        max_auroc_mean = -1000

        for epochID in range(start_epoch, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss, dataLoaderUnsup)
            lossVal, losstensor, aurocMean = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.item())
            if aurocMean > max_auroc_mean:
                max_auroc_mean = aurocMean
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] aurocMean= ' + str(aurocMean))
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'max_suroc_mean': max_auroc_mean,
                        'optimizer': optimizer.state_dict()
                    }, 'm-' + launchTimestamp + '_best_auroc.pth.tar')
            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict()
                    }, 'm-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))
Ejemplo n.º 12
0
    def __init__(self, pathModel, nnArchitecture, nnClassCount, transCrop):

        #---- Initialize the network
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, True).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, True).cuda()

        model = torch.nn.DataParallel(model).cuda()
        modelCheckpoint = torch.load(pathModel)
        state_dict = modelCheckpoint['state_dict']
        remove_data_parallel = False  # Change if you don't want to use nn.DataParallel(model)
        print('starging this')
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        for key in list(state_dict.keys()):
            match = pattern.match(key)
            new_key = match.group(1) + match.group(2) if match else key
            new_key = new_key[7:] if remove_data_parallel else new_key
            state_dict[new_key] = state_dict[key]
            # Delete old key only if modified.
            if match or remove_data_parallel:
                del state_dict[key]
        print('done this')
        # if os.path.isfile(CKPT_PATH):
        #     print("=> loading checkpoint")
        #     checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(modelCheckpoint['state_dict'])
        print("=> loaded checkpoint")
        self.model2 = model
        self.model = model.module.densenet121.features
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        #---- Initialize the image transform - resize + normalize
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        self.transformSequence = transforms.Compose(transformList)

        #added from chexnettrainer
        #-------------------- SETTINGS: DATASET BUILDERS
        transformList2 = []
        transResize = 256
        transformList2.append(transforms.Resize(transResize))
        transformList2.append(transforms.TenCrop(224))
        transformList2.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList2.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        self.transformSequence2 = transforms.Compose(transformList2)
Ejemplo n.º 13
0
    def test(self, pathDirData, pathFileTest, pathModel_1, pathModel_2, nnArchitecture, nnClassCount, nnIsTrained, trBatchSize,
             transResize, transCrop):

        CLASS_NAMES = ['Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
                       'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening',
                       'Hernia']

        cudnn.benchmark = True

        # -------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        model_1 = ResNet18(nnClassCount, nnIsTrained).cuda()
        model_2 = DenseNet121(nnClassCount, nnIsTrained).cuda()

        model_1 = torch.nn.DataParallel(model_1).cuda()
        model_2 = torch.nn.DataParallel(model_2).cuda()

        modelCheckpoint_1 = torch.load(pathModel_1)
        modelCheckpoint_2 = torch.load(pathModel_2)
        model_1.load_state_dict(modelCheckpoint_1['state_dict'])
        model_2.load_state_dict(modelCheckpoint_2['state_dict'])

        # -------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        # -------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=trBatchSize, num_workers=8, shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model_1.eval()

        for i, (input, target) in enumerate(dataLoaderTest):
            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)

            out = model_1(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)
            outPRED = torch.cat((outPRED, outMean.data), 0)

        #       aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        aurocIndividual = self.computeAUROC(outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        model_2.eval()

        for i, (input, target) in enumerate(dataLoaderTest):
            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)

            out = model_2(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)
            outPRED = torch.cat((outPRED, outMean.data), 0)

        #       aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        aurocIndividual = self.computeAUROC(outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
Ejemplo n.º 14
0
pathModel = 'ResNet-50-m-17052018-210358.pth.tar'

nnArchitecture = 'ResNet-50'
nnClassCount = 14

transCrop = 224

features_blobs = []


def hook_feature(module, input, output):
    features_blobs.append(output)


if nnArchitecture == 'DENSE-NET-121':
    model = DenseNet121(nnClassCount, True).cuda()
elif nnArchitecture == 'DENSE-NET-169':
    model = DenseNet169(nnClassCount, True).cuda()
elif nnArchitecture == 'DENSE-NET-201':
    model = DenseNet201(nnClassCount, True).cuda()
elif nnArchitecture == 'ResNet-50':
    model = ResNet50(nnClassCount, True).cuda()

model = torch.nn.DataParallel(model).cuda()

modelCheckpoint = torch.load(pathModel)
model.load_state_dict(modelCheckpoint['state_dict'])
model = model.module.resnet50
model.eval()

#---- Initialize the image transform - resize + normalize
Ejemplo n.º 15
0
    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData,
                                        pathDatasetFile=pathFileTrain,
                                        transform=transformSequence)
        datasetVal = DatasetGenerator(pathImageDirectory=pathDirData,
                                      pathDatasetFile=pathFileVal,
                                      transform=transformSequence)

        #--WeightedRandomSampler
        # targets =[]
        # for batchID, (input, target) in enumerate (datasetTrain):
        #     # print(target)
        #     targets.append(target)

        # # print('\n', targets[10][0])
        # class_sample_count = np.array([3993, 249]) # 0, 1
        # weight = 1. / class_sample_count
        # samples_weight = np.array([weight[int(t[0].numpy())] for t in targets])
        # samples_weight = torch.from_numpy(samples_weight)
        # sampler = torch.utils.data.WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'), len(samples_weight))

        #--End of WeightedRandomSampler

        # uncomment this to apply sampler
        # dataLoaderTrain = DataLoader(dataset=datasetTrain, batch_size=trBatchSize, shuffle=False,  \
        #     num_workers=24, \
        #     sampler=sampler, pin_memory=True)
        dataLoaderTrain = DataLoader(dataset=datasetTrain,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     num_workers=24,
                                     pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal,
                                   batch_size=trBatchSize,
                                   shuffle=False,
                                   num_workers=24,
                                   pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(model.parameters(),
                               lr=0.0001,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.1,
                                      patience=5,
                                      mode='min')

        #-------------------- SETTINGS: LOSS
        # loss = ChexnetTrainer.weighted_BCELoss()
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])

        #---- TRAIN THE NETWORK

        lossMIN = 100000

        AOC = 0
        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime
            # ChexnetTrainer.epochTrain (model, dataLoaderTrain, optimizer, scheduler, trMaxEpoch, nnClassCount)
            # lossVal, losstensor = ChexnetTrainer.epochVal (model, dataLoaderVal, optimizer, scheduler, trMaxEpoch, nnClassCount)

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.data)

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict()
                    }, 'm-' + str(epochID + 1) + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))
Ejemplo n.º 16
0
    def test(pathDirData,
             pathFileTest,
             pathModel,
             nnArchitecture,
             nnClassCount,
             nnIsTrained,
             trBatchSize,
             transResize,
             transCrop,
             launchTimeStamp,
             predict_output=''):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]
        #CLASS_NAMES = ['WORST', 'BEST']

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=8,
                                    shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()
        print("\nTesting...\n")
        total = len(dataLoaderTest)
        for i, (input, target) in enumerate(dataLoaderTest):
            sys.stdout.write('\r')
            sys.stdout.write("\rSteps: {}/{}".format(i + 1, total))
            sys.stdout.flush()
            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()
            with torch.no_grad():
                varInput = torch.autograd.Variable(
                    input.view(-1, c, h, w).cuda())

                out = model(varInput)
                outMean = out.view(bs, n_crops, -1).mean(1)

                outPRED = torch.cat((outPRED, outMean.data), 0)

        #ChexnetTrainer.splitResult(outGT, outPRED, datasetTest.listImagePaths)
        ChexnetTrainer.predict(outGT, outPRED, datasetTest.listImagePaths,
                               predict_output)
        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED,
                                                      nnClassCount)

        aurocMean = np.array(aurocIndividual).mean()

        print('\nAUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        return
Ejemplo n.º 17
0
    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData,
                                        pathDatasetFile=pathFileTrain,
                                        transform=transformSequence)
        datasetVal = DatasetGenerator(pathImageDirectory=pathDirData,
                                      pathDatasetFile=pathFileVal,
                                      transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     pin_memory=True)  # num_workers=N
        dataLoaderVal = DataLoader(dataset=datasetVal,
                                   batch_size=trBatchSize,
                                   shuffle=False,
                                   pin_memory=True)  # num_workers=N

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(model.parameters(),
                               lr=0.0001,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.1,
                                      patience=5,
                                      mode='min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        if checkpoint != None:
            print("=> loading checkpoint")
            modelCheckpoint = torch.load(checkpoint)
            # Error when loading DenseNet model : Missing key(s) in state_dict
            # https://github.com/KaiyangZhou/deep-person-reid/issues/23
            # The error is caused by the mismatch in keys, e.g. layers were named 'norm.1', 'conv.1',
            # but are now named 'norm1', 'conv1' (I trained the model with the old torchvision).
            # modify:
            # '.'s are no longer allowed in module names, but pervious _DenseLayer
            # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
            # They are also in the checkpoints in model_urls.
            # This pattern is used to find such keys.
            # https://github.com/pytorch/vision/blob/50b2f910490a731c4cd50db5813b291860f02237/torchvision/models/densenet.py#L28
            pattern = re.compile(
                r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
            )
            state_dict = modelCheckpoint['state_dict']
            for key in list(state_dict.keys()):
                res = pattern.match(key)
                if res:
                    new_key = res.group(1) + res.group(2)
                    state_dict[new_key] = state_dict[key]
                    del state_dict[key]
            model.load_state_dict(state_dict)
            optimizer.load_state_dict(modelCheckpoint['optimizer'])
            print("=> loaded checkpoint")

        #---- TRAIN THE NETWORK

        lossMIN = 100000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.data[0])

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict()
                    }, 'm-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))
Ejemplo n.º 18
0
    def predict(pathDirData, pathFileTest, pathModel, nnArchitecture,
                nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
                launchTimeStamp):

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        # https://github.com/KaiyangZhou/deep-person-reid/issues/23
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        state_dict = modelCheckpoint['state_dict']
        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    shuffle=False,
                                    pin_memory=True)  #num_workers=N

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(),
                                               volatile=True)

            out = model(varInput)

            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        pneumonia_probas = []
        for p in outPRED.cpu().data.numpy()[:, CLASS_NAMES.index('Pneumonia')]:
            pneumonia_probas.append(p)

        return pneumonia_probas
Ejemplo n.º 19
0
    def __init__(self, pathModel_1, pathModel_2, transCrop, pathInputImage,
                 pathOutputFile):

        checkpoint_1 = torch.load(pathModel_1,
                                  map_location=lambda storage, loc: storage)

        model = DenseNet121(2, False).cpu()
        model = torch.nn.DataParallel(model).cpu()
        model.load_state_dict(checkpoint_1['state_dict'], False)

        self.model = model.module.densenet121.features
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        normalize = transforms.Normalize([0.4914, 0.4822, 0.4465],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.Resize((1152, 896), interpolation=2))
        #transformList.append(transforms.Resize(transCrop))
        #transformList.append(transforms.CenterCrop(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        #print(pathInputImage)
        self.transformSequence = transforms.Compose(transformList)
        img = Image.open(pathInputImage)
        img = img.convert('RGB')

        img = self.transformSequence(img)
        img = img.unsqueeze(0)
        device = torch.device("cpu")
        img = img.to(device)

        input = torch.autograd.Variable(img)

        self.model.cpu()
        output = self.model(input.cpu())
        #---- Generate heatmap
        heatmap = None
        #print(torch.max(self.weights))

        for i in range(0, len(self.weights)):
            # print(self.weights)
            map = output[0, i, :, :]
            if i == 0: heatmap = self.weights[i] * map
            else: heatmap += self.weights[i] * map

        #---- Blend original and heatmap
        npHeatmap = heatmap.cpu().data.numpy()

        imgOriginal = cv2.imread(pathInputImage, 1)
        imgOriginal = cv2.resize(imgOriginal, (transCrop, transCrop),
                                 interpolation=cv2.INTER_LINEAR)

        cam = npHeatmap / np.max(npHeatmap)

        cam = cv2.resize(cam, (transCrop, transCrop))
        heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
        img = heatmap * 0.5 + imgOriginal
        print(pathOutputFile)
        cv2.imwrite(pathOutputFile, img)

        checkpoint_2 = torch.load(pathModel_2,
                                  map_location=lambda storage, loc: storage)
        model2 = DenseNet121(2, False).cpu()
        model2 = torch.nn.DataParallel(model2).cpu()
        model2.load_state_dict(checkpoint_2['state_dict'], False)
        model2.eval()

        transformList2 = []
        transformList2.append(transforms.ToTensor())

        transformSequence2 = transforms.Compose(transformList2)
        img2 = Image.open(pathOutputFile)

        img2 = transformSequence2(img2)
        img2 = img2.unsqueeze(0)
        device = torch.device("cpu")
        img2 = img2.to(device)

        classes = ('NORMAL', 'Mammo Mass')

        print('\n')
        print(pathInputImage)
        print('\n')
        with torch.no_grad():
            py = model2(img2)
            _, predicted = torch.max(py, 1)  # 获取分类结果
            classIndex_ = predicted[0]
            print('预测结果', py)
            print(classes[int(classIndex_)])
        print('\n')
Ejemplo n.º 20
0
    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        # print(torch.cuda.get_device_name(0))
        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        # cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        # if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        # elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        # elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained)
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained)
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained)

        model = torch.nn.DataParallel(model)
        # model = DenseNet121(N_CLASSES).cuda()
        # model = torch.nn.DataParallel(model).cuda()
        modelCheckpoint = torch.load(pathModel, map_location='cpu')
        state_dict = modelCheckpoint['state_dict']
        remove_data_parallel = False  # Change if you don't want to use nn.DataParallel(model)
        print('starging this')
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )
        for key in list(state_dict.keys()):
            match = pattern.match(key)
            new_key = match.group(1) + match.group(2) if match else key
            new_key = new_key[7:] if remove_data_parallel else new_key
            state_dict[new_key] = state_dict[key]
            # Delete old key only if modified.
            if match or remove_data_parallel:
                del state_dict[key]
        print('done this')
        # if os.path.isfile(CKPT_PATH):
        #     print("=> loading checkpoint")
        #     checkpoint = torch.load(CKPT_PATH)
        model.load_state_dict(modelCheckpoint['state_dict'])
        print("=> loaded checkpoint")
        # modelCheckpoint = torch.load(pathModel)
        # model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)
        print('transformed')
        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=4,
                                    shuffle=False,
                                    pin_memory=True)
        print('made dataset')
        # outGT = torch.FloatTensor().cuda()
        # outPRED = torch.FloatTensor().cuda()
        outGT = torch.FloatTensor()
        outPRED = torch.FloatTensor()
        print('pred')
        model.eval()
        print('eval')
        totalPredictions = []
        totalTruth = []
        with torch.no_grad():
            for i, (input, target) in enumerate(dataLoaderTest):
                if i % 5 == 0:
                    print(i)
                # target = target.cuda()
                outGT = torch.cat((outGT, target), 0)

                bs, n_crops, c, h, w = input.size()
                # varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda())
                varInput = torch.autograd.Variable(input.view(-1, c, h, w))

                out = model(varInput)
                outMean = out.view(bs, n_crops, -1).mean(1)
                predicted = outMean.data.tolist()  #16x14
                truth = target.data.tolist()  #16x14
                totalPredictions.extend(predicted)
                totalTruth.extend(truth)

                # predict_2, predict_7, predict_9 = [], [], []
                # t2_true, t7_true, t9_true  = [], [], []
                # deferred_2, deferred_7, deferred_9 = 0, 0, 0

                # for i in range(16):
                # thisPred = predicted[i]
                # thisTruth = truth[i]
                # three = [thisPred[2], thisPred[7], thisPred[9]]
                # threeTrue = [thisTruth[2], thisTruth[7], thisTruth[9]]
                #$ decideThree = [x>0.5 for x in three]
                # labelList.append(decideThree == threeTrue)

                # if t < three[0] < 1-t:
                #     deferred_2 += 1
                # else:
                #if three[0] >.5:
                #    predict_2.append(three[0])
                #    t2_true.append(threeTrue[0])
                #else:
                #predict_2.append(1-three[0])
                #t2_true.append(threeTrue[0])

                #     if t < three[1] < 1-t:
                #         deferred_7 += 1
                #     else:
                #         if three[1] >.5:
                #             predict_7.append(three[1])
                #             t7_true.append(threeTrue[1])
                #          else:
                #              predict_7.append(1-three[1])
                #              t7_true.append(threeTrue[1])
                #       if t < three[2] < 1-t:
                #           deferred_9 += 1
                #       else:
                #           if three[2] >.5:
                #               predict_9.append(three[2])
                #               t9_true.append(threeTrue[2])
                #           else:
                #               predict_9.append(1-three[2])
                #               t9_true.append(threeTrue[2])
                #   t2.append(predict_2)
                #   t2_deferred.append(deferred_2)
                #   t2_label.append(t2_true)
                #=    t7.append(predict_7)
                #    t7_deferred.append(deferred_7)
                #    t7_label.append(t7_true)
                #    t9.append(predict_9)
                #    t9_deferred.append(deferred_9)
                #    t9_label.append(t9_true)
                #t2_deferred = [x/16 for x in t2_deferred]
                #t7_deferred = [x/16 for x in t7_deferred]
                #t9_deferred = [x/16 for x in t9_deferred]

                #t2_auroc = []
                #for i in range(5):
                #    if len(t2_label[i]) == 0:
                #        auc = 0
                #    elif len(t2_label[i]) == 1:
                #        auc = 0
                #    elif (len(set(t2_label[i])) <= 1) == True:
                #        auc = 0
                #    else:
                #        auc = roc_auc_score(t2_label[i], t2[i])
                #    t2_auroc.append(auc)

                #t7_auroc = []
                #for i in range(5):
                #    if len(t7_label[i]) == 0:
                #        auc = 0
                #    elif len(t7_label[i]) == 1:
                #        auc = 0
                #    elif (len(set(t7_label[i])) <= 1) == True:
                #        auc = 0
                #    else:
                #        auc = roc_auc_score(t7_label[i], t7[i])
                #    t7_auroc.append(auc)

                #t9_auroc = []
                #for i in range(5):
                #    if len(t9_label[i]) == 0:
                #        auc = 0
                #    elif len(t9_label[i]) == 1:
                #        auc = 0
                #    elif (len(set(t9_label[i])) <= 1) == True:
                #        auc = 0
                #    else:
                #        auc = roc_auc_score(t9_label[i], t9[i])
                #    t9_auroc.append(auc)

                #with open("t2.txt", "w") as output:
                #    output.write(str(t2))
                #with open("t2_auroc.txt", "w") as output:
                #    output.write(str(t2_auroc))
                #with open("t2_deferred_pct.txt", "w") as output:
                #    output.write(str(t2_deferred))
                #with open("t7.txt", "w") as output:
                #output.write(str(t7))
                #with open("t7_auroc.txt", "w") as output:
                #    output.write(str(t7_auroc))
                #with open("t7_deferred_pct.txt", "w") as output:
                #    output.write(str(t7_deferred))
        #     with open("t9.txt", "w") as output:
        #         output.write(str(t9))
        #     with open("t9_auroc.txt", "w") as output:
        #         output.write(str(t9_auroc))
        #     with open("t9_deferred_pct.txt", "w") as output:
        #         output.write(str(t9_deferred))

        #        outPRED = torch.cat((outPRED, outMean.data), 0)
        # if i%5 == 0:
        #     print(i)

        #aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        # aurocMean = np.array(aurocIndividual).mean()

        # print ('AUROC mean ', aurocMean)

        # for i in range (0, len(aurocIndividual)):
        #     try:
        #         print (CLASS_NAMES[i], ' ', aurocIndividual[i])
        #     except Exception as e:
        #         print(e)
        #         print('that index was not there')

        #print('all auroc: ', aurocIndividual)
        # with open('output.txt', 'w') as f:
        #     for i in range()
        #print('Actual Effusion Score: ', aurocIndividual[0])

    # print('Actual Pneumothorax Score: ', aurocIndividual[1])
    # print('Actual Edema Score: ', aurocIndividual[2])
        with open('predictedProbs.txt', 'w') as f:
            for listy in totalPredictions:
                for item in listy:
                    f.write("%s\n" % item)
        with open('labels.txt', 'w') as f:
            for listy in totalTruth:
                for item in listy:
                    f.write("%s\n" % item)
        return
Ejemplo n.º 21
0
    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint, pathModel):

        # mô hình
        if nnArchitecture == 'DenseNet121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'VGG16':
            model = VGG16(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet50':
            model = ResNet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'GoogLeNet':
            model = GoogLeNet(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'AlexNet':
            model = AlexNet(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        # xử lý dữ liệu
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        # load dataset
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData,
                                        pathDatasetFile=pathFileTrain,
                                        transform=transformSequence)
        datasetVal = DatasetGenerator(pathImageDirectory=pathDirData,
                                      pathDatasetFile=pathFileVal,
                                      transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     num_workers=24,
                                     pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal,
                                   batch_size=trBatchSize,
                                   shuffle=False,
                                   num_workers=24,
                                   pin_memory=True)

        # optimizer
        optimizer = optim.Adam(model.parameters(),
                               lr=0.0001,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.1,
                                      patience=5,
                                      mode='min')

        # loss
        loss = torch.nn.BCELoss(size_average=True)

        # check point
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])

        # training

        lossMIN = 100000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.item())

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict()
                    }, pathModel)
                print('Epoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss = ' + str(lossVal))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss = ' + str(lossVal))
Ejemplo n.º 22
0
    def test (images, labels, pathModel, nnArchitecture, nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop, launchTimeStamp):
        
        
        CLASS_NAMES = [ 'A', 'B', 'C', 'D', 'E', '']
        
        cudnn.benchmark = True
        
        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        
        model = torch.nn.DataParallel(model).cuda() 
        
        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        
        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))
        transformSequence=transforms.Compose(transformList)
        
        datasetTest = ImageGenerator(images=images, labels=labels, transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=trBatchSize, num_workers=8, shuffle=False, pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()
       
        model.eval()
        
        for i, (input, target) in enumerate(dataLoaderTest):
            
            #target = target.cuda()
            #outGT = torch.cat((outGT, target), 0)
            
            bs, n_crops, c, h, w = input.size()
            
            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)
            
            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)
            
            outPRED = torch.cat((outPRED, outMean.data), 0)

        '''
        aurocIndividual, cm = DensenetTrainer.computeAUROC(outGT, outPRED, nnClassCount, datasetTest)
        aurocMean = np.array(aurocIndividual).mean()

        print ('AUROC mean ', aurocMean)

        for i in range (0, len(aurocIndividual)):
            print (CLASS_NAMES[i], ' ', aurocIndividual[i])

        print(cm)
        '''
        
     
        return outPRED
Ejemplo n.º 23
0
    def test (pathDirData, pathFileTest, pathModel, nnArchitecture, nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop, launchTimeStamp):


        CLASS_NAMES = [ 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
                'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia']

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'RES-NET-18': model = ResNet18(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'RES-NET-50': model = ResNet50(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(os.path.join("models", pathModel))
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        # transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))
        transformSequence=transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTest, transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=trBatchSize, num_workers=8, shuffle=False, pin_memory=True)

        # outGT = torch.FloatTensor().cuda()
        # outPRED = torch.FloatTensor().cuda()

        outGT = []
        outPRED = []

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            # outGT = torch.cat((outGT, target), 0)
            outGT.append(target.numpy())

            bs, n_crops, c, h, w = input.size()

            out = model(input.view(-1, c, h, w).cuda())
            outMean = out.detach().cpu().numpy().reshape(bs, n_crops, -1).mean(axis=1)

            # outPRED = torch.cat((outPRED, outMean.data), 0)
            outPRED.append(outMean)
            del out

        outGT = np.concatenate(outGT, axis=0)
        outPRED = np.concatenate(outPRED, axis=0)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        modelTested = pathModel.split("/")[-1].replace(".pth.tar", "")
        modelResultsPath = os.path.join("results", modelTested + ".txt")

        with open(modelResultsPath, "w") as f:
            print ('Architecture: ', nnArchitecture, file=f)
            print ('AUROC mean ', aurocMean, file=f)
            for i in range (0, len(aurocIndividual)):
                print (CLASS_NAMES[i], ' ', aurocIndividual[i], file=f)

        print ('Architecture: ', nnArchitecture)
        print ('AUROC mean ', aurocMean)
        for i in range (0, len(aurocIndividual)):
            print (CLASS_NAMES[i], ' ', aurocIndividual[i])


        return
Ejemplo n.º 24
0
def main(nnClassCount, nnIsTrained, IRID_stats=True):
    nnClassCount = nclasses
    nnArchitectureList = [{
        'name': 'densenet201',
        'model': DenseNet201(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet169',
        'model': DenseNet169(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet161',
        'model': DenseNet161(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet121',
        'model': DenseNet121(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet152',
        'model': ResNet152(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet101',
        'model': ResNet101(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet50',
        'model': ResNet50(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet34',
        'model': ResNet34(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet18',
        'model': ResNet18(nnClassCount, nnIsTrained)
    }]
    #runTest()
    for nnArchitecture in nnArchitectureList:
        runTrain(expert=False,
                 nnArchitecture=nnArchitecture,
                 IRID_stats=IRID_stats)

    nnClassCount = nclasses_expert
    nnArchitectureList = [{
        'name': 'densenet201',
        'model': DenseNet201(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet169',
        'model': DenseNet169(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet161',
        'model': DenseNet161(nnClassCount, nnIsTrained)
    }, {
        'name': 'densenet121',
        'model': DenseNet121(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet152',
        'model': ResNet152(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet101',
        'model': ResNet101(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet50',
        'model': ResNet50(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet34',
        'model': ResNet34(nnClassCount, nnIsTrained)
    }, {
        'name': 'resnet18',
        'model': ResNet18(nnClassCount, nnIsTrained)
    }]

    for nnArchitecture in nnArchitectureList:
        print("Expert model training....")
        runTrain(expert=True,
                 nnArchitecture=nnArchitecture,
                 IRID_stats=IRID_stats)
Ejemplo n.º 25
0
def load_model (nnArchitecture, nnIsTrained, 
                nnInChanCount, nnClassCount, gpu = True):
    
    if nnArchitecture == 'resnet18': 
        model = models.resnet18(nnClassCount, nnIsTrained)
        num_ftrs = model.fc.in_features
        model.fc = torch.nn.Linear(num_ftrs, nnClassCount)
        model.classifier = torch.nn.Sequential(torch.nn.ReLU())
    elif nnArchitecture == 'resnet34': 
        model = models.resnet34(nnClassCount, nnIsTrained)
        num_ftrs = model.fc.in_features
        model.fc = torch.nn.Linear(num_ftrs, nnClassCount)
        model.classifier = torch.nn.Sequential(torch.nn.ReLU())
    elif nnArchitecture == 'resnet50': 
        model = models.resnet50(nnClassCount, nnIsTrained)
        num_ftrs = model.fc.in_features
        model.fc = torch.nn.Linear(num_ftrs, nnClassCount)
        model.classifier = torch.nn.Sequential(torch.nn.ReLU())
    elif nnArchitecture == 'alexnet': model = models.alexnet(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'vgg19': model = models.vgg19(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'DENSE-NET-121': 
        from DensenetModels import DenseNet121 
        model = DenseNet121(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'DENSE-NET-169': 
        from DensenetModels import DenseNet169
        model = DenseNet169(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'DENSE-NET-201': 
        from DensenetModels import DenseNet201
        model = DenseNet201(nnClassCount, nnIsTrained)
    elif nnArchitecture == 'mine': model = User_defined_model.SimpleCNN()
    elif nnArchitecture == "RakibNET": model = User_defined_model.RakibNET()
    
    #model.classifier._modules['6'] = torch.nn.Linear(4096, nnClassCount)

    # # let's make our model work with channels we want
    # trained_kernel = model.conv1.weight
    # new_conv = torch.nn.Conv2d(nnInChanCount, 64, kernel_size=7, stride=2, padding=3, bias=False)
    # with torch.no_grad():
    #     new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel, 1)]*nnInChanCount, dim=1)
    # model.conv1 = new_conv

    print('-' * 100)
    # for idx, m in enumerate(model.modules()):
    #     print("{} is {}".format(idx, m))
    # print('-' * 100)
    
    if(gpu):
        model = model.cuda()

    # # Freeze model weights
    for param in model.parameters():
        param.requires_grad = True
    
    # Print Trainable and Non-Trainable Parameters
    print('-' * 100)
    total_params = sum(p.numel() for p in model.parameters())
    print(f'{total_params:,} total parameters.')
    total_trainable_params = sum(
        p.numel() for p in model.parameters() if p.requires_grad)
    print(f'{total_trainable_params:,} training parameters.')
    
    return model
Ejemplo n.º 26
0
    def train(pathDirData, pathFileTrain, pathFileVal, nnArchitecture,
              nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize,
              transCrop, launchTimestamp, checkpoint):

        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'RES-NET-152':
            model = ResNet152(nnClassCount, nnIsTrained).cuda()

        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData,
                                        pathDatasetFile=pathFileTrain,
                                        transform=transformSequence)
        datasetVal = DatasetGenerator(pathImageDirectory=pathDirData,
                                      pathDatasetFile=pathFileVal,
                                      transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain,
                                     batch_size=trBatchSize,
                                     shuffle=True,
                                     num_workers=24,
                                     pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal,
                                   batch_size=trBatchSize,
                                   shuffle=False,
                                   num_workers=24,
                                   pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam(model.parameters(),
                               lr=0.0000001,
                               betas=(0.9, 0.999),
                               eps=1e-08,
                               weight_decay=1e-5)
        scheduler = ReduceLROnPlateau(optimizer,
                                      factor=0.1,
                                      patience=5,
                                      mode='min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average=True)

        #---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])

        #---- TRAIN THE NETWORK

        lossMIN = 1000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain(model, dataLoaderTrain, optimizer,
                                      scheduler, trMaxEpoch, nnClassCount,
                                      loss)
            lossVal, losstensor = ChexnetTrainer.epochVal(
                model, dataLoaderVal, optimizer, scheduler, trMaxEpoch,
                nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(lossVal)

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save(
                    {
                        'epoch': epochID + 1,
                        'state_dict': model.state_dict(),
                        'best_loss': lossMIN,
                        'optimizer': optimizer.state_dict()
                    }, 'm-' + launchTimestamp + '.pth.tar')
                print('\nEpoch [' + str(epochID + 1) + '] [save] [' +
                      timestampEND + '] loss= ' + str(lossVal))
            else:
                print('\nEpoch [' + str(epochID + 1) + '] [----] [' +
                      timestampEND + '] loss= ' + str(lossVal))
Ejemplo n.º 27
0
    def __init__(self, pathModel, transCrop, pathInputImage, pathOutputFile):

        checkpoint = torch.load(pathModel,
                                map_location=lambda storage, loc: storage)
        model = DenseNet121(2, False).cpu()
        model = torch.nn.DataParallel(model).cpu()
        model.load_state_dict(checkpoint['state_dict'])

        self.model = model.module.densenet121.features
        self.model.eval()

        # for param in model.parameters():
        #     print(param.data)
        #     break

        #summary(model,(3,1000, 1000))
        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        normalize = transforms.Normalize([0.4914, 0.4822, 0.4465],
                                         [0.229, 0.224, 0.225])
        img = Image.open(pathInputImage)
        img = img.convert('RGB')
        img_width, img_height = img.size

        transformList = []
        #transformList.append(transforms.Resize((3584,2816),interpolation=2))
        #transformList.append(transforms.CenterCrop((3584,2816)))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        #print(pathInputImage)
        self.transformSequence = transforms.Compose(transformList)

        # img.show()

        img = self.transformSequence(img)
        img = img.unsqueeze(0)
        device = torch.device("cpu")
        img = img.to(device)

        #classes = ('NORMAL', 'Mammo Calcification')

        # print('\n')
        # print(pathInputImage)
        # print('\n')
        # with torch.no_grad():
        #     py = model(img)
        #     _, predicted = torch.max(py, 1)  # 获取分类结果
        #     classIndex_ = predicted[0]
        #     print('预测结果', py)
        #     print(classes[int(classIndex_)])
        # print('\n')

        # ---- Load image, transform, convert

        #imageData = Image.open(pathImageFile).convert('RGB')
        #imageData = self.transformSequence(imageData)
        #imageData = imageData.unsqueeze_(0)

        input = torch.autograd.Variable(img)

        self.model.cpu()
        output = self.model(input.cpu())
        #---- Generate heatmap
        heatmap = None
        #print(torch.max(self.weights))

        for i in range(0, len(self.weights)):
            # print(self.weights)
            map = output[0, i, :, :]
            if i == 0: heatmap = self.weights[i] * map
            else: heatmap += self.weights[i] * map

        #---- Blend original and heatmap
        npHeatmap = heatmap.cpu().data.numpy()

        imgOriginal = cv2.imread(pathInputImage, 1)
        imgOriginal = cv2.resize(imgOriginal, (transCrop, transCrop),
                                 interpolation=cv2.INTER_LINEAR)

        cam = npHeatmap / np.max(npHeatmap)

        cam = cv2.resize(cam, (transCrop, transCrop))
        heatmap = cv2.applyColorMap(np.uint8(255 * cam), cv2.COLORMAP_JET)
        img = heatmap * 0.5 + imgOriginal
        print(pathOutputFile)
        cv2.imwrite(pathOutputFile, img)
Ejemplo n.º 28
0
    def train(self, pathDirData, pathFileTrain, pathFileVal, nnArchitecture, nnIsTrained, nnClassCount, trBatchSize,
              trMaxEpoch, transResize, transCrop, launchTimestamp, checkpoint):
        # -------------------- SETTINGS: NETWORK ARCHITECTURE

        model_1 = ResNet18(nnClassCount, nnIsTrained).cuda()
        model_2 = DenseNet121(nnClassCount, nnIsTrained).cuda()

        model_1 = torch.nn.DataParallel(model_1).cuda()
        model_2 = torch.nn.DataParallel(model_2).cuda()

        # -------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence = transforms.Compose(transformList)

        # -------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTrain,
                                        transform=transformSequence)
        datasetVal = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileVal,
                                      transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain, batch_size=trBatchSize, shuffle=True, num_workers=24,
                                     pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal, batch_size=trBatchSize, shuffle=False, num_workers=24,
                                   pin_memory=True)

        # -------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer_1 = optim.Adam(model_1.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        scheduler_1 = ReduceLROnPlateau(optimizer_1, factor=0.1, patience=5, mode='min')

        optimizer_2 = optim.Adam(model_2.parameters(), lr=0.0001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        scheduler_2 = ReduceLROnPlateau(optimizer_2, factor=0.1, patience=5, mode='min')
        # -------------------- SETTINGS: LOSS
        loss_CE = torch.nn.BCELoss(size_average=False)
        loss_KLD = torch.nn.KLDivLoss(size_average=False)

        # ---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model_1.load_state_dict(modelCheckpoint['state_dict'])
            optimizer_1.load_state_dict(modelCheckpoint['optimizer_1'])
            optimizer_2.load_state_dict(modelCheckpoint['optimizer_2'])
        # ---- TRAIN THE NETWORK

        lossMIN_1 = 100000
        lossMIN_2 = 100000

        for epochID in range(0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            self.epochTrain(model_1, model_2, dataLoaderTrain, optimizer_1, optimizer_2, scheduler_1, scheduler_2, trMaxEpoch, nnClassCount, loss_CE, loss_KLD)

            lossVal_1, losstensor_1 = self.epochVal(model_1, model_2, dataLoaderVal, optimizer_1, scheduler_1, trMaxEpoch, nnClassCount, loss_CE, loss_KLD)
            lossVal_2, losstensor_2 = self.epochVal(model_2, model_1, dataLoaderVal, optimizer_2, scheduler_2, trMaxEpoch, nnClassCount, loss_CE, loss_KLD)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler_1.step(losstensor_1.data[0])
            scheduler_2.step(losstensor_2.data[0])
            if lossVal_1 < lossMIN_1:
                lossMIN_1 = lossVal_1
                torch.save({'epoch': epochID + 1, 'state_dict': model_1.state_dict(), 'best_loss': lossMIN_1,
                            'optimizer': optimizer_1.state_dict()}, 'm1-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' + timestampEND + '] los1_1= ' + str(lossVal_1))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' + timestampEND + '] loss_1= ' + str(lossVal_1))
            if epochID == trMaxEpoch:
                torch.save({'epoch': epochID + 1, 'state_dict': model_1.state_dict(), 'best_loss': lossMIN_1,
                            'optimizer': optimizer_1.state_dict()}, 'm1-final-checkpoint' + '.pth.tar')


            if lossVal_2 < lossMIN_2:
                lossMIN_2 = lossVal_2
                torch.save({'epoch': epochID + 1, 'state_dict': model_2.state_dict(), 'best_loss': lossMIN_2,
                            'optimizer': optimizer_2.state_dict()}, 'm2-' + launchTimestamp + '.pth.tar')
                print('Epoch [' + str(epochID + 1) + '] [save] [' + timestampEND + '] loss_2= ' + str(lossVal_2))
            else:
                print('Epoch [' + str(epochID + 1) + '] [----] [' + timestampEND + '] loss_2= ' + str(lossVal_2))
            if epochID == trMaxEpoch:
                torch.save({'epoch': epochID + 1, 'state_dict': model_2.state_dict(), 'best_loss': lossMIN_1,
                            'optimizer': optimizer_2.state_dict()}, 'm2-final-checkpoint' + '.pth.tar')
Ejemplo n.º 29
0
    def test(pathDirData, pathFileTest, pathModel, nnArchitecture,
             nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop,
             launchTimeStamp):

        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
            'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
        ]

        cudnn.benchmark = True

        # mô hình

        if nnArchitecture == 'DenseNet121':
            model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'VGG16':
            model = VGG16(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'ResNet50':
            model = ResNet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'GoogLeNet':
            model = GoogLeNet(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'AlexNet':
            model = AlexNet(nnClassCount, nnIsTrained).cuda()

        import re
        pattern = re.compile(
            r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$'
        )

        model = torch.nn.DataParallel(model).cuda()
        modelCheckpoint = torch.load(pathModel)
        state_dict = modelCheckpoint['state_dict']

        for key in list(state_dict.keys()):
            res = pattern.match(key)
            if res:
                new_key = res.group(1) + res.group(2)
                state_dict[new_key] = state_dict[key]
                del state_dict[key]
        model.load_state_dict(state_dict)
        print("Loaded Checkpoint")

        # xử lý dữ liệu
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])

        # load dataset

        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(
            transforms.Lambda(lambda crops: torch.stack(
                [normalize(crop) for crop in crops])))
        transformSequence = transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData,
                                       pathDatasetFile=pathFileTest,
                                       transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest,
                                    batch_size=trBatchSize,
                                    num_workers=8,
                                    shuffle=False,
                                    pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()
        with torch.no_grad():
            for i, (input, target) in enumerate(dataLoaderTest):
                #print (i, input, target)
                target = target.cuda()
                outGT = torch.cat((outGT, target), 0)
                #print("GT", outGT)
                bs, n_crops, c, h, w = input.size()
                varInput = input.view(-1, c, h, w).cuda()

                out = model(varInput)
                outMean = out.view(bs, n_crops, -1).mean(1)
                outPRED = torch.cat((outPRED, outMean.data), 0)

                #print("PRED", outPRED)
        aurocIndividual, fpr, tpr, thresholds = ChexnetTrainer.computeAUROC(
            outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print('AUROC mean ', aurocMean)

        for i in range(0, len(aurocIndividual)):
            print(CLASS_NAMES[i], ' ', aurocIndividual[i])

        np.save('result/visualization/roc_auc.npy', aurocIndividual)
        np.save('result/visualization/fpr.npy', fpr)
        np.save('result/visualization/tpr.npy', tpr)
        np.save('result/visualization/threshold.npy', thresholds)


#--------------------------------------------------------------------------------