Example #1
0
def get_SENet(net_type):
    if net_type == "senet50":
        model = senet.se_resnet50(pretrained=True)
    elif net_type == "senet101":
        model = senet.se_resnet101(pretrained=True)
    else:
        assert False, "unknown SE ResNet type : " + net_type

    return model
Example #2
0
    def __init__(self, backbone1, backbone2, drop, pretrained=True):
        super(MultiModalNet, self).__init__()

        self.visit_model = DPN26()
        if backbone1 == 'se_resnext101_32x4d':
            self.img_encoder = se_resnext101_32x4d(9, None)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnext50_32x4d':
            self.img_encoder = se_resnext50_32x4d(9, None)

            print(
                "load pretrained model from /home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth"
            )
            state_dict = torch.load(
                '/home/zxw/2019BaiduXJTU/se_resnext50_32x4d-a260b3a4.pth')

            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            self.img_encoder.load_state_dict(state_dict, strict=False)

            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnext26_32x4d':
            self.img_encoder = se_resnext26_32x4d(9, None)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'multiscale_se_resnext':
            self.img_encoder = multiscale_se_resnext(9)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'multiscale_se_resnext_cat':
            self.img_encoder = multiscale_se_resnext(9)
            self.img_fc = nn.Linear(1024, 256)

        elif backbone1 == 'multiscale_se_resnext_HR':
            self.img_encoder = multiscale_se_resnext_HR(9)
            self.img_fc = nn.Linear(2048, 256)

        elif backbone1 == 'se_resnet50':
            self.img_encoder = se_resnet50(9, None)
            print(
                "load pretrained model from /home/zxw/2019BaiduXJTU/se_resnet50-ce0d4300.pth"
            )
            state_dict = torch.load(
                '/home/zxw/2019BaiduXJTU/se_resnet50-ce0d4300.pth')

            state_dict.pop('last_linear.bias')
            state_dict.pop('last_linear.weight')
            self.img_encoder.load_state_dict(state_dict, strict=False)

            self.img_fc = nn.Linear(2048, 256)

        self.dropout = nn.Dropout(0.5)
        self.cls = nn.Linear(512, 9)
Example #3
0
    def __init__(self,
                 num_classes,
                 loss={'xent'},
                 stride=2,
                 droprate=0.5,
                 **kwargs):
        super(ResNet50TA, self).__init__()
        self.loss = loss

        model_ft = senet.se_resnet50(pretrained=True)

        model_ft.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.model = model_ft

        self.classifier = ClassBlock(2048 + 1024, num_classes, droprate)
        self.att_gen = 'softmax'  # method for attention generation: softmax or sigmoid
        self.feat_dim = 1024 + 2048  # feature dimension
        self.middle_dim = 256  # middle layer dimension

        self.attention_conv = nn.Conv2d(
            2048 + 1024, self.middle_dim,
            [1, 1])  # 7,4 cooresponds to 224, 112 input image size

        self.attention_tconv = nn.Conv1d(self.middle_dim, 1, 3, padding=1)
Example #4
0
def infer():
    args = get_args()
    model = se_resnet50(9,None)
    #model = nn.DataParallel(model,device_ids=[int(i) for i in args.gpus.split(',') ])
    #model = nn.DataParallel(model, device_ids=[0])

    model.load_state_dict(torch.load(args.model_path))
    model.cuda()
    model.eval()

    testdata = testDataset(args,cfg)
    testloader = DataLoader(testdata,batch_size=1,num_workers=2)

    ans = []
    with torch.no_grad():
        for idx,(img,name) in tqdm.tqdm(enumerate(testloader)):
            img = Variable(img.cuda())
            output = model(img)
            pred = torch.argmax(output).item() + 1
            ans.append([str(name[0]).strip('.jpg'),"00"+str(pred)])

    result = pd.DataFrame(ans,columns=['AreaID','CategoryID'])
    result.sort_values(by=['AreaID'],inplace=True)
    result.to_csv('submit.csv',index=False,header=False,sep = '\t')
Example #5
0
        }, epoch + 1)
        writer.add_scalars('accu', {
            'train': train_acc,
            'val': eval_acc
        }, epoch + 1)
        train_loss = 0.0
        train_acc = 0.0
        scheduler.step()

if __name__ == '__main__':
    create_dir(cfg['checkpoint_dir'])

    # if not os.path.exists(cfg['val_path']):
    #     split_dataset(cfg)

    #img_mean_std(cfg)
    args = get_args()

    model = se_resnet50(9,None)
    optimizer = optim.SGD(
                filter(lambda p: p.requires_grad, model.parameters()),
                momentum=0.9,lr = cfg['base_lr'], weight_decay=0.001
            )
    scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=cfg['gamma'])
    #scheduler = lr_scheduler.ReduceLROnPlateau(optimizer,mode='min',factor=0.2,patience=3,verbose=True,)

    #summary(model,(3,224,224))

    train(model,optimizer,scheduler,cfg,args)

Example #6
0
    for images, fea, fea2, fnames in testloader:
        preds = torch.sigmoid(
            model(images.cuda(), fea.cuda(), fea2.cuda()).detach())
        test_outputs.append(preds.cpu().numpy())
        test_fnames.extend(fnames)

    test_preds = pd.DataFrame(data=np.concatenate(test_outputs),
                              index=test_fnames,
                              columns=map(str, range(34)))
    test_preds = test_preds.groupby(level=0).mean()
    testres.append(test_preds)
    print(2)
test2_se_resnet101 = (testres[0] + testres[1] + testres[2] + testres[3] +
                      testres[4]) / 5

model = Model(se_resnet50(num_classes=34, pretrained=None)).cuda()
testres = []
for foldNum in range(5):
    testData = TestData(subA,
                        'testA',
                        Feat_RR,
                        transform=transforms.Compose([ToTensor()]))
    testloader = DataLoader(testData, batch_size=64, shuffle=False)
    model.load_state_dict(
        torch.load('se_resnet50/weight_best_%s.pt' % str(foldNum)))
    model.cuda()
    model.eval()
    test_outputs = []
    test_fnames = []
    for images, fea, fea2, fnames in testloader:
        preds = torch.sigmoid(
Example #7
0
    def train (pathDirData, pathFileTrain, pathFileVal, nnArchitecture, nnIsTrained, nnClassCount, trBatchSize, trMaxEpoch, transResize, transCrop, launchTimestamp, checkpoint):


        #-------------------- SETTINGS: NETWORK ARCHITECTURE
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'RES-NET-50': model = ResNet50(nnClassCount, nnIsTrained).cuda()
        #elif nnArchitecture == 'SE-RES-NET-50': model = SE_ResNet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-RES-NET-50': model = senet.se_resnet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-NET-154': model = senet.senet154(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-DENSE-NET-121': model = SE_DenseNet121(nnClassCount, nnIsTrained).cuda()







        model = torch.nn.DataParallel(model).cuda()

        #-------------------- SETTINGS: DATA TRANSFORMS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        transformList = []
        transformList.append(transforms.RandomResizedCrop(transCrop))
        transformList.append(transforms.RandomHorizontalFlip())
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)
        transformSequence=transforms.Compose(transformList)

        #-------------------- SETTINGS: DATASET BUILDERS
        datasetTrain = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTrain, transform=transformSequence)
        datasetVal =   DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileVal, transform=transformSequence)

        dataLoaderTrain = DataLoader(dataset=datasetTrain, batch_size=trBatchSize, shuffle=True,  num_workers=24, pin_memory=True)
        dataLoaderVal = DataLoader(dataset=datasetVal, batch_size=trBatchSize, shuffle=False, num_workers=24, pin_memory=True)

        #-------------------- SETTINGS: OPTIMIZER & SCHEDULER
        optimizer = optim.Adam (model.parameters(), lr=0.00001, betas=(0.9, 0.999), eps=1e-08, weight_decay=1e-5)
        # optimizer = optim.SGD(model.parameters(), lr=0.00001, momentum=0.9, weight_decay=1e-5)

        scheduler = ReduceLROnPlateau(optimizer, factor = 0.1, patience = 5, mode = 'min')

        #-------------------- SETTINGS: LOSS
        loss = torch.nn.BCELoss(size_average = True)

        #---- Load checkpoint
        if checkpoint != None:
            modelCheckpoint = torch.load(checkpoint)
            model.load_state_dict(modelCheckpoint['state_dict'])
            optimizer.load_state_dict(modelCheckpoint['optimizer'])


        #---- TRAIN THE NETWORK

        lossMIN = 100000

        for epochID in range (0, trMaxEpoch):

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampSTART = timestampDate + '-' + timestampTime

            ChexnetTrainer.epochTrain (model, dataLoaderTrain, optimizer, scheduler, trMaxEpoch, nnClassCount, loss)
            lossVal, losstensor = ChexnetTrainer.epochVal (model, dataLoaderVal, optimizer, scheduler, trMaxEpoch, nnClassCount, loss)

            timestampTime = time.strftime("%H%M%S")
            timestampDate = time.strftime("%d%m%Y")
            timestampEND = timestampDate + '-' + timestampTime

            scheduler.step(losstensor.data[0])

            if lossVal < lossMIN:
                lossMIN = lossVal
                torch.save({'epoch': epochID + 1, 'state_dict': model.state_dict(), 'best_loss': lossMIN, 'optimizer' : optimizer.state_dict()}, 'm-' + launchTimestamp + '.pth.tar')
                print ('Epoch [' + str(epochID + 1) + '] [save] [' + timestampEND + '] loss= ' + str(lossVal))
            else:
                print ('Epoch [' + str(epochID + 1) + '] [----] [' + timestampEND + '] loss= ' + str(lossVal))
Example #8
0
    def test (pathDirData, pathFileTest, pathModel, nnArchitecture, nnClassCount, nnIsTrained, trBatchSize, transResize, transCrop, launchTimeStamp):


        CLASS_NAMES = [ 'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass', 'Nodule', 'Pneumonia',
                'Pneumothorax', 'Consolidation', 'Edema', 'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia']

        cudnn.benchmark = True

        #-------------------- SETTINGS: NETWORK ARCHITECTURE, MODEL LOAD
        if nnArchitecture == 'DENSE-NET-121': model = DenseNet121(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-169': model = DenseNet169(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'DENSE-NET-201': model = DenseNet201(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'RES-NET-50': model = ResNet50(nnClassCount, nnIsTrained).cuda()
        #elif nnArchitecture == 'SE-RES-NET-50': model = SE_ResNet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-RES-NET-50': model = senet.se_resnet50(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-NET-154': model = senet.senet154(nnClassCount, nnIsTrained).cuda()
        elif nnArchitecture == 'SE-DENSE-NET-121': model = SE_DenseNet121(nnClassCount, nnIsTrained).cuda()



        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['state_dict'])

        #-------------------- SETTINGS: DATA TRANSFORMS, TEN CROPS
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

        #-------------------- SETTINGS: DATASET BUILDERS
        transformList = []
        transformList.append(transforms.Resize(transResize))
        transformList.append(transforms.TenCrop(transCrop))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
        transformList.append(transforms.Lambda(lambda crops: torch.stack([normalize(crop) for crop in crops])))
        transformSequence=transforms.Compose(transformList)

        datasetTest = DatasetGenerator(pathImageDirectory=pathDirData, pathDatasetFile=pathFileTest, transform=transformSequence)
        dataLoaderTest = DataLoader(dataset=datasetTest, batch_size=trBatchSize, num_workers=8, shuffle=False, pin_memory=True)

        outGT = torch.FloatTensor().cuda()
        outPRED = torch.FloatTensor().cuda()

        model.eval()

        for i, (input, target) in enumerate(dataLoaderTest):

            target = target.cuda()
            outGT = torch.cat((outGT, target), 0)

            bs, n_crops, c, h, w = input.size()

            varInput = torch.autograd.Variable(input.view(-1, c, h, w).cuda(), volatile=True)

            out = model(varInput)
            outMean = out.view(bs, n_crops, -1).mean(1)

            outPRED = torch.cat((outPRED, outMean.data), 0)

        aurocIndividual = ChexnetTrainer.computeAUROC(outGT, outPRED, nnClassCount)
        aurocMean = np.array(aurocIndividual).mean()

        print ('AUROC mean ', aurocMean)

        for i in range (0, len(aurocIndividual)):
            print (CLASS_NAMES[i], ' ', aurocIndividual[i])


        return