def get_prediction(image_bytes):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    num_classes = 2
    images = get_tensor(image_bytes)
    images = images.to(device)
    model = DenseNet121(num_classes).to(device)
    model.load_state_dict(torch.load("/content/pneumonia_model.pth"))

    prediction = model(something.to(device))
    activated_features = SaveFeatures(
        model._modules.get('densenet121').features.denseblock4.denselayer16)
    prediction = model(images)
    pred_probabilities = F.softmax(prediction).data.squeeze()
    activated_features.remove()
    topk(pred_probabilities, 1)

    weight_softmax_params = list(
        model._modules.get('densenet121').classifier[0].parameters())
    weight_softmax = np.squeeze(weight_softmax_params[0].cpu().data.numpy())
    #weight_softmax*=0.01
    class_idx = topk(pred_probabilities, 1)[1].int()
    overlay = getCAM(activated_features.features, weight_softmax, class_idx)

    img = images[0].cpu().numpy()[0]
    #imshow(img,cmap='gray')
    #imshow(skimage.transform.resize(overlay[0], images.shape[2:4]), alpha=0.4, cmap='jet')
    return class_idx, skimage.transform.resize(overlay[0], images.shape[2:4])
Example #2
0
def get_config(lr=None, bs=None, opt=None, mm=None, wd=None):
    if lr == None:
        lr = np.random.normal(0.15, 1e-3)
        while (lr < 0):
            lr = np.random.normal(0.15, 1e-3)
    if bs == None:
        bs = random.choice(BATCHES)
    if opt == None:
        opt = random.choice(OPTIMS)
    if mm == None:
        mm = np.random.normal(0.9, 1e-2)
    if wd == None:
        wd = np.random.normal(5e-6, 1e-7)
    model = DenseNet121()
    optimiser = None
    # Make optimiser
    if opt == 'Adam':
        optimiser = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=wd)
    elif opt == 'SGD':
        optimiser = torch.optim.SGD(model.parameters(),
                                    lr=lr,
                                    weight_decay=wd,
                                    momentum=mm)
    elif opt == 'RMSprop':
        optimiser = torch.optim.RMSprop(model.parameters(),
                                        lr=lr,
                                        weight_decay=wd,
                                        momentum=mm)

    return HP_Config(bs, lr, optimiser, opt, mm, wd, model)
def get_result(threshold=[0.2]):
    submit = pd.read_csv(config['sample_submission'])

    if not os.path.exists("./predict_result2/"):
        os.mkdir("./predict_result2")

    state_dict = torch.load(config['load_model_path'])

    epoch = ntpath.basename(config['load_model_path'])

    net = DenseNet121()
    print(config['load_model_path'])
    net.load_state_dict(state_dict['state_dict'])  #load the model
    net.cuda()
    net.eval()
    for thresh in threshold:
        predicted = []
        draw_predict = []
        for name in tqdm(submit['Id']):

            path = os.path.join(config['path_to_test'], name)
            image1 = load_image(path,
                                (config['SIZE'], config['SIZE'], 3)) / 255.
            image2 = np.fliplr(image1)
            image3 = np.flipud(image1)
            image4 = np.rot90(image1)
            image5 = np.rot90(image4)
            image6 = np.rot90(image5)

            images = np.stack([image1, image2, image3, image4, image5,
                               image6]).transpose(0, 3, 1, 2).copy()
            images = torch.from_numpy(images)
            score_predict = net(images.float().cuda())
            score_predict = F.sigmoid(score_predict)  #sigmoid激活
            score_predict = torch.mean(score_predict,
                                       dim=0).detach().cpu().numpy()  #转成numpy
            draw_predict.append(score_predict)
            label_predict = np.arange(28)[score_predict >= thresh]

            if int(label_predict.shape[0]) == 0:
                print("the label is None")
                print(score_predict)
                label_predict = np.arange(28)[np.argsort(score_predict)[-2:]]
                print(label_predict)

            str_predict_label = ' '.join(str(l) for l in label_predict)
            predicted.append(str_predict_label)

        submit['Predicted'] = predicted
        np.save(
            './predict_result2/draw_predict_DenseNet121_512_' + str(thresh) +
            '_' + str(epoch) + '.npy', score_predict)
        submit.to_csv("./predict_result2/" + str(epoch) + ".csv", index=False)
def main():
    torch.manual_seed(0)
    train_dataset_info=read_train_dataset_info()
    indexes=np.arange(train_dataset_info.shape[0])
    np.random.shuffle(indexes)
    train_indexes, valid_indexes = train_test_split(indexes, test_size=0.1, random_state=8)
    train_dataset=DataLoad_protein(train_dataset_info[train_indexes],config['batch_size'],(config['SIZE'],config['SIZE'],4),
                                   augument=True)
    train_loader=DataLoader(
        dataset=train_dataset,
        batch_size=config['batch_size'],
        pin_memory=True,
        num_workers=config['num_workers'],
    )
    net=DenseNet121(pretrained=True,start_first="start_training")

    start_epoch=config['start_epoch']

    if config['resume'] is not None:
        print("load the model",config['resume'])
        model=torch.load(config['resume'])
        start_epoch=model['epoch']
        net.load_state_dict(model['state_dict'])

    net.cuda()
    first_conv=net.state_dict()['base_model.features.denseblock3.denselayer22.conv2.weight'].clone()
    second_classifier=net.state_dict()['S.conv1.weight'].clone()
    second_separ=net.state_dict()['classifier.weight'].clone()




    loss=BCEWithLogitsLoss()
    opt=torch.optim.Adam(
        filter(lambda p:p.requires_grad,net.parameters()),
        lr=1e-3,
    )

    for epoch in range(start_epoch+1,start_epoch+6):
        print("epoch is ",epoch)
        train(train_loader,net,loss,epoch,opt,config['save_freq'],config['save_dir'])

    first_conv_after=net.state_dict()['base_model.features.denseblock3.denselayer22.conv2.weight'].clone()
    second_classifier_after=net.state_dict()['S.conv1.weight'].clone()
    second_separ_after=net.state_dict()['classifier.weight'].clone()
    print(net.state_dict()['base_model.features.denseblock3.denselayer22.conv2.weight'])
    print("the training parameters",net.state_dict()['S.conv1.weight'],net.state_dict()['classifier.weight'])
    print(torch.equal(first_conv_after,first_conv))
    print(torch.equal(second_classifier,second_classifier_after))
    print(torch.equal(second_separ,second_separ_after))
def build_network(train_dataset, val_dataset):

    train_dataset = DataLoad_protein(train_dataset,
                                     config['batch_size'],
                                     (config['SIZE'], config['SIZE'], 4),
                                     augument=True)
    val_dataset = DataLoad_protein(val_dataset,
                                   config['batch_size'],
                                   (config['SIZE'], config['SIZE'], 4),
                                   augument=False)

    train_loader = DataLoader(
        dataset=train_dataset,
        shuffle=True,
        batch_size=config['batch_size'],
        pin_memory=True,
        num_workers=config['num_workers'],
    )
    validation_loader = DataLoader(dataset=val_dataset,
                                   shuffle=True,
                                   batch_size=3,
                                   pin_memory=True,
                                   num_workers=config['num_workers'])

    net = DenseNet121(pretrained=True)

    start_epoch = config['start_epoch']

    print("load the fine model", config['finetune_model'])
    fine_model = torch.load(config['finetune_model'])
    net.load_state_dict(fine_model['state_dict'])

    if config['resume'] is not None:
        model = torch.load(config['resume'])
        start_epoch = model['epoch']
        net.load_state_dict(model['state_dict'])

    opt = torch.optim.Adam(net.parameters(), lr=config['lr'])  #without

    loss = BCEWithLogitsLoss()

    return train_loader, validation_loader, net, loss, opt, start_epoch
Example #6
0
def main():
    """
    Script entrypoint
    """
    t_start = datetime.now()
    header = ["Start Time", "End Time", "Duration (s)"]
    row = [t_start.strftime(DEFAULT_DATE_TIME_FORMAT)]

    dnn = DenseNet121()

    # show class indices
    print('****************')
    for cls, idx in dnn.train_batches.class_indices.items():
        print('Class #{} = {}'.format(idx, cls))
    print('****************')

    print(dnn.model.summary())

    dnn.train(t_start,
              epochs=dnn.num_epochs,
              batch_size=dnn.batch_size,
              training=dnn.train_batches,
              validation=dnn.valid_batches)

    # save trained weights
    dnn.model.save(dnn.file_weights + 'old')

    dnn.model.save_weights(dnn.file_weights)
    with open(dnn.file_architecture, 'w') as f:
        f.write(dnn.model.to_json())

    t_end = datetime.now()
    difference_in_seconds = get_difference_in_seconds(t_start, t_end)

    row.append(t_end.strftime(DEFAULT_DATE_TIME_FORMAT))
    row.append(str(difference_in_seconds))

    append_row_to_csv(complete_run_timing_file, header)
    append_row_to_csv(complete_run_timing_file, row)
Example #7
0
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # initialize and load the model
    model = DenseNet121(N_CLASSES).to(device)

    if torch.cuda.device_count() > 1:
        model = torch.nn.DataParallel(model).to(device)

    if os.path.isfile(args.model_path):
        model.load_state_dict(torch.load(args.model_path, map_location=device))
        print('model state has loaded')
    else:
        print('=> model state file not found')

    model.train(False)
    dummy_input = torch.randn(args.batch_size, 3, 224, 224)
    torch_out = model(dummy_input)
    torch.onnx.export(model,
                      dummy_input,
                      'model/densenet121.onnx',
                      verbose=False)
    print('ONNX model exported.')
Example #8
0
def main(args):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('Using %s device.' % device)

    # initialize and load the model
    net = DenseNet121(N_CLASSES).to(device)

    if torch.cuda.device_count() > 1:
        net = torch.nn.DataParallel(net).to(device)

    net.load_state_dict(torch.load(args.model_path, map_location=device))
    print('model state has loaded')

    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])

    test_dataset = ChestXrayDataSet(
        data_dir=DATA_DIR,
        image_list_file=TEST_IMAGE_LIST,
        transform=transforms.Compose([
            transforms.Resize(256),
            transforms.TenCrop(224),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(
                lambda crops: torch.stack([normalize(crop) for crop in crops]))
        ]))
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=args.batch_size,
                             shuffle=False)

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor().to(device)
    pred = torch.FloatTensor().to(device)

    # switch to evaluate mode
    net.eval()

    for index, (data, target) in enumerate(test_loader):
        start_time = timeit.default_timer()

        target = target.to(device)
        bs, n_crops, c, h, w = data.size()
        data = data.view(-1, c, h, w).to(device)

        with torch.no_grad():
            output = net(data)

        output_mean = output.view(bs, n_crops, -1).mean(1)

        gt = torch.cat((gt, target))
        pred = torch.cat((pred, output_mean))

        print('\rbatch %03d/%03d %6.3fsec' %
              (index, len(test_loader), (timeit.default_timer() - start_time)),
              end='')

    AUCs = []
    for i in range(N_CLASSES):
        AUCs.append(roc_auc_score(gt.cpu()[:, i], pred.cpu()[:, i]))
    print('The average AUC is %6.3f' % np.mean(AUCs))

    for i in range(N_CLASSES):
        print('The AUC of %s is %6.3f' % (CLASS_NAMES[i], AUCs[i]))
Example #9
0
def main():
    cfg = Config()

    # Redirect logs to both console and file.
    if cfg.log_to_file:
        ReDirectSTD(cfg.stdout_file, 'stdout', False)
        ReDirectSTD(cfg.stderr_file, 'stderr', False)

    # Lazily create SummaryWriter
    writer = None

    TVT, TMO = set_devices(cfg.sys_device_ids)

    if cfg.seed is not None:
        set_seed(cfg.seed)

    # Dump the configurations to log.
    import pprint
    print('-' * 60)
    print('cfg.__dict__')
    pprint.pprint(cfg.__dict__)
    print('-' * 60)

    ###########
    # Dataset #
    ###########

    if not cfg.only_test:
        train_set = create_dataset(**cfg.train_set_kwargs)
        # The combined dataset does not provide val set currently.
        val_set = None if (cfg.dataset == 'combined' or
                           cfg.model_type != 'resnet50') else create_dataset(
                               **cfg.val_set_kwargs)

    ###########
    # Models  #
    ###########
    if cfg.add_softmax_loss:
        model = DenseNet121_classifier(751)
    else:
        if cfg.model_type == 'resnet50':
            model = Model(last_conv_stride=cfg.last_conv_stride)
        elif cfg.model_type == 'densenet121':
            model = DenseNet121()
        elif cfg.model_type == 'preActResnet50':
            model = PreActResNet50()
        elif cfg.model_type == 'resnet50mid':
            model = resnet50mid()

    #Output the embedding size
    #input  = Variable(torch.FloatTensor(32, 3, 256, 128))
    #out = model(input)
    #print('Model is ', str(cfg.model_type), 'embedding size is ', out.shape)

    # Model wrapper
    model_w = DataParallel(model)

    #############################
    # Criteria and Optimizers   #
    #############################

    tri_loss = TripletLoss(margin=cfg.margin)

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.base_lr,
                           weight_decay=cfg.weight_decay)

    # Bind them together just to save some codes in the following usage.
    modules_optims = [model, optimizer]

    # May Transfer Models and Optims to Specified Device. Transferring optimizer
    # is to cope with the case when you load the checkpoint to a new device.
    TMO(modules_optims)
    #Softmax loss
    criterian_softmax = CrossEntropyLabelSmooth(751)

    ########
    # Test #
    ########

    def validate():
        if val_set.extract_feat_func is None:
            val_set.set_feat_func(ExtractFeature(model_w, TVT))
        print('\n=========> Test on validation set <=========\n')
        mAP, cmc_scores, _, _ = val_set.eval(
            normalize_feat=cfg.normalize_feature,
            to_re_rank=False,
            verbose=False)
        print()
        return mAP, cmc_scores[0]

    ############
    # Training #
    ############

    start_ep = 0
    for ep in range(start_ep, cfg.total_epochs):

        # Adjust Learning Rate
        if cfg.lr_decay_type == 'exp':
            adjust_lr_exp(optimizer, cfg.base_lr, ep + 1, cfg.total_epochs,
                          cfg.exp_decay_at_epoch)
        else:
            adjust_lr_staircase(optimizer, cfg.base_lr, ep + 1,
                                cfg.staircase_decay_at_epochs,
                                cfg.staircase_decay_multiply_factor)

        may_set_mode(modules_optims, 'train')

        # For recording precision, satisfying margin, etc
        prec_meter = AverageMeter()
        sm_meter = AverageMeter()
        dist_ap_meter = AverageMeter()
        dist_an_meter = AverageMeter()
        loss_meter = AverageMeter()

        ep_st = time.time()
        step = 0
        epoch_done = False
        while not epoch_done:

            step += 1
            step_st = time.time()

            ims, im_names, labels, mirrored, epoch_done = train_set.next_batch(
            )

            ims_var = Variable(TVT(torch.from_numpy(ims).float()))
            labels_t = TVT(torch.from_numpy(labels).long())

            if cfg.add_softmax_loss:
                feat, v = model_w(ims_var)
            else:
                feat = model_w(ims_var)

            loss, p_inds, n_inds, dist_ap, dist_an, dist_mat = global_loss(
                tri_loss,
                feat,
                labels_t,
                normalize_feature=cfg.normalize_feature)
            if cfg.add_softmax_loss:
                softmax_loss = criterian_softmax(v, labels_t)
                loss = (1 - cfg.softmax_loss_weight
                        ) * loss + cfg.softmax_loss_weight * softmax_loss

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            ############
            # Step Log #
            ############

            # precision
            prec = (dist_an > dist_ap).data.float().mean()
            # the proportion of triplets that satisfy margin
            sm = (dist_an > dist_ap + cfg.margin).data.float().mean()
            # average (anchor, positive) distance
            d_ap = dist_ap.data.mean()
            # average (anchor, negative) distance
            d_an = dist_an.data.mean()

            prec_meter.update(prec)
            sm_meter.update(sm)
            dist_ap_meter.update(d_ap)
            dist_an_meter.update(d_an)
            loss_meter.update(to_scalar(loss))

            if step % cfg.steps_per_log == 0:
                time_log = '\tStep {}/Ep {}, {:.2f}s'.format(
                    step,
                    ep + 1,
                    time.time() - step_st,
                )

                tri_log = (', prec {:.2%}, sm {:.2%}, '
                           'd_ap {:.4f}, d_an {:.4f}, '
                           'loss {:.4f}'.format(
                               prec_meter.val,
                               sm_meter.val,
                               dist_ap_meter.val,
                               dist_an_meter.val,
                               loss_meter.val,
                           ))

                log = time_log + tri_log
                print(log)

        #############
        # Epoch Log #
        #############

        time_log = 'Ep {}, {:.2f}s'.format(ep + 1, time.time() - ep_st)

        tri_log = (', prec {:.2%}, sm {:.2%}, '
                   'd_ap {:.4f}, d_an {:.4f}, '
                   'loss {:.4f}'.format(
                       prec_meter.avg,
                       sm_meter.avg,
                       dist_ap_meter.avg,
                       dist_an_meter.avg,
                       loss_meter.avg,
                   ))

        log = time_log + tri_log
        print(log)

        ##########################
        # Test on Validation Set #
        ##########################

        mAP, Rank1 = 0, 0
        if ((ep + 1) % cfg.epochs_per_val == 0) and (val_set is not None):
            mAP, Rank1 = validate()

        # save ckpt
        if cfg.log_to_file:
            save_weights(modules_optims[0], cfg.ckpt_file)
Example #10
0
from model import DenseNet121
from extras import SaveFeatures, getCAM

#device supporting the training
device=torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# the two classes here are pneumonia and non pneumonia
# Hyper parameters.
num_classes=2
num_epochs=1000
batch_size=64
learning_rate=0.001

data_dir="/content/drive/My Drive/Projects/Pneumonia detection/chest_xray/chest_xray/"
train_loader, val_loader,test_loader=loadTrainData(data_dir)
model=DenseNet121(num_classes).to(device) # pretrained model can be loaded too. Just add pretrained =True
criterion=nn.CrossEntropyLoss() 
optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)

"""##Model"""



#Training process
total_step=len(train_loader)
for epoch in range(num_epochs):
  for i,(images,labels) in enumerate(train_loader):
    # batch pair of image and label using data loader
    images=images.to(device) 
    labels=labels.to(device)
    
Example #11
0
    im = im[:, :, None]
    padding = 0
    if im.shape[0] > im.shape[1]:
        padding = (int((im.shape[0] - im.shape[1]) / 2), 0)
    else:
        padding = (0, int((im.shape[1] - im.shape[0]) / 2))

    data_transforms = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Pad(padding, fill=0),
    ])
    pim = data_transforms(im)
    return (pim)


densenet = DenseNet121(14)
# densenet = addDropout(densenet, p=0)
saved_model_path = "../Models/model.pth.tar"
densenet.load_state_dict(load_dictionary(saved_model_path, map_location='cpu'))
if torch.cuda.is_available():
    densenet = densenet.cuda()

name = "Exp_64_512_0.00001_RandomLabel_4.0"
#Path to the experiment (it would be the github)
ExpDir = "/media/vince/MILA/ChestXrays/ALI/model/" + name
ExpDir = "/network/home/frappivi/ChestXrays/ALI/model/" + name

isize = 64
LS = 512  #Latent Space Size
ColorsNumber = 1  #Number of color (always 1 for x-ray)
isize = 64
def main_worker(local_rank, ngpus, args):
    best_prec1 = .0

    dist.init_process_group(backend=args.dist_backend,
                            init_method=args.dist_url)

    print(f'local_rank: {local_rank}\n')

    torch.cuda.set_device(local_rank)

    # IMPORTANT: we need to set the random seed in each process so that the models are initialized with the same weights
    # Reference: https://yangkky.github.io/2019/07/08/distributed-pytorch-tutorial.html
    # torch.cuda.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    # build model
    densenet = DenseNet121(in_channels=3, growth_rate=args.growth_rate, \
                           compression_rate=args.compression_rate, \
                           num_classes=args.num_classes)

    densenet.cuda(local_rank)

    densenet = nn.parallel.DistributedDataParallel(densenet, \
                                                    device_ids=[local_rank], \
                                                    output_device=local_rank)

    # Reference: https://github.com/pytorch/examples/blob/master/imagenet/main.py
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(args.image_width),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])
    trainset = torchvision.datasets.ImageFolder(root=os.path.join(
        args.dataset_root, 'train'),
                                                transform=train_transform)

    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(args.image_width),
        transforms.ToTensor(), normalize
    ])
    valset = torchvision.datasets.ImageFolder(root=os.path.join(
        args.dataset_root, 'val'),
                                              transform=val_transform)

    # num_replicas: int, Number of processes participating in distributed training. By default, world_size is retrieved from the current distributed group.
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, num_replicas=ngpus)
    batch_size = args.batch_size // ngpus
    num_workers = args.num_workers // ngpus

    train_data = DataLoader(
        trainset,
        batch_size=batch_size,
        shuffle=False,  # when sampler is specified, shuffle should be False
        num_workers=num_workers,
        pin_memory=True,
        sampler=train_sampler)

    val_data = DataLoader(valset,
                          batch_size=batch_size,
                          shuffle=False,
                          num_workers=num_workers,
                          pin_memory=True)

    criterion = nn.CrossEntropyLoss().cuda(local_rank)
    optimizer = optim.SGD(densenet.parameters(), lr=args.lr, momentum=args.momentum, \
                            weight_decay=args.weight_decay)

    # Reference: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/2
    # this is useful for cudnn finding optimal set of algorithms for particular configurations
    # and accelerate training when the input sizes do not change over iteration.
    cudnn.backend = True

    for epoch in range(args.epochs):

        train_sampler.set_epoch(epoch)

        adjust_lr(args, optimizer, epoch)

        losses, top1, top5 = train(densenet, train_data, criterion, optimizer,
                                   epoch, local_rank, args)

        if args.tensorboard:
            log_value('train_loss', losses.avg, epoch)
            log_value('top1_acc', top1.avg, epoch)
            log_value('top5_acc', top5.avg, epoch)

        # validate the model every epoch
        prec1 = validate(args, val_data, densenet, criterion, epoch)

        is_best = prec1.avg > best_prec1
        best_prec1 = max(prec1.avg, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': densenet.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict()
            }, is_best)
Example #13
0
def main():
    N_CLASSES = 14
    CLASS_NAMES = ['Atelectasis', 
                'Cardiomegaly', 
                'Effusion', 
                'Infiltration', 
                'Mass', 
                'Nodule', 
                'Pneumonia',
                'Pneumothorax', 
                'Consolidation', 
                'Edema', 
                'Emphysema', 
                'Fibrosis', 
                'Pleural_Thickening', 
                'Hernia']



    # initialize model
    device = utils.get_device()
    model = DenseNet121(N_CLASSES).to(device)
 
    
    
    checkpoint = torch.load(args.checkpoint)

    model.load_state_dict(checkpoint['model_state_dict'])


    # initialize test loader
    test_dataset = ChestXrayDataSet(data_dir=args.path_to_images,
                                    image_list_file=args.test_list,
                                    transform=transforms_test)
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size,
                            shuffle=False, num_workers=args.num_workers, pin_memory=True)

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor()
    gt = gt.cuda()
    pred = torch.FloatTensor()
    pred = pred.cuda()

    # switch to evaluate mode
    
    model.eval()
    with torch.no_grad():
        for i, (inp, target) in enumerate(test_loader):
            target = target.cuda()
            gt = torch.cat((gt, target), 0)
            bs, c, h, w = inp.size()
            input_var = torch.autograd.Variable(inp.view(-1, c, h, w).cuda())
            output = model(input_var)
            output_mean = output.view(bs, -1)
            pred = torch.cat((pred, output_mean.data), 0)

    gt_np = gt.cpu().numpy()
    pred_np = sigmoid(pred.cpu().numpy())

    Y_t = [] #labels for each anomaly
    for i in range(N_CLASSES):
        Y_t.append([])
        for x in gt_np:
            Y_t[i].append(x[i])

    Y_pred = [] #preds for each anomaly
    for j in range(N_CLASSES):
        Y_pred.append([])
        for y in pred_np:
            Y_pred[j].append(y[j])


    AUCs = [] # AUCs for each 
    for i in range(N_CLASSES):
        auc = roc_auc_score(Y_t[i], Y_pred[i])
        AUCs.append(auc)

    matrices=[] #for each
    for i in range(14):
        matrix = confusion_matrix(Y_t[i], np.asarray(Y_pred[i])>0.6)
        matrices.append(matrix)

    
    class_names = ['no disease', 'disease']
    fig = plt.figure(figsize = (20,20))
    for i in range(14):
        plt.subplot(4,4,i+1)
        
        df_cm = pd.DataFrame(
            matrices[i], index=class_names, columns=class_names)
        heatmap = sns.heatmap(df_cm, annot=True, fmt="d").set_title(CLASS_NAMES[i])
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        
        
        
    plt.show()
    fig.savefig(os.path.join(args.test_outdir,'confusion_matrix.pdf'))

    fig, axes2d = plt.subplots(nrows=2, ncols=7,
                            sharex=True, sharey=True,figsize = (12, 4))



    for i, row in enumerate(axes2d):
        for j, cell in enumerate(row):
            if i==0:
                x=i+j
            else:
                x=13-i*j
            
            fpr, tpr, threshold = roc_curve(Y_t[x], Y_pred[x])
            roc_auc = auc(fpr, tpr)
                      
            cell.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
            cell.legend(loc = 'lower right', handlelength=0,handletextpad=0,frameon=False, prop={'size': 8})

            cell.plot([0, 1], [0, 1],'r--')
            plt.xlim([0, 1])
            plt.ylim([0, 1])
            cell.set_title(CLASS_NAMES[x],fontsize=10)
            
            if i == len(axes2d) - 1:
                cell.set_xlabel('False positive rate')
            if j == 0:
                cell.set_ylabel('True negative rate')
    fig.tight_layout(pad=1.0)    
    plt.show()
    fig.savefig(os.path.join(args.test_outdir,'roc_auc.pdf'))
Example #14
0
def main():
    args = get_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
    SEED = 42
    utils.set_global_seed(SEED)
    utils.prepare_cudnn(deterministic=True)
    num_classes = 14

    #define datasets
    train_dataset = ChestXrayDataSet(
        data_dir=args.path_to_images,
        image_list_file=args.train_list,
        transform=transforms_train,
    )

    val_dataset = ChestXrayDataSet(
        data_dir=args.path_to_images,
        image_list_file=args.val_list,
        transform=transforms_val,
    )

    loaders = {
        'train':
        DataLoader(train_dataset,
                   batch_size=args.batch_size,
                   shuffle=True,
                   num_workers=args.num_workers),
        'valid':
        DataLoader(val_dataset,
                   batch_size=2,
                   shuffle=False,
                   num_workers=args.num_workers)
    }

    logdir = args.log_dir  #where model weights and logs are stored

    #define model
    model = DenseNet121(num_classes)
    if len(args.gpus) > 1:
        model = nn.DataParallel(model)
    device = utils.get_device()
    runner = SupervisedRunner(device=device)

    optimizer = RAdam(model.parameters(), lr=args.lr, weight_decay=0.0003)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     factor=0.25,
                                                     patience=2)

    weights = torch.Tensor(
        [10, 100, 30, 8, 40, 40, 330, 140, 35, 155, 110, 250, 155,
         200]).to(device)
    criterion = BCEWithLogitsLoss(pos_weight=weights)

    class_names = [
        'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
        'Nodule', 'Pneumonia', 'Pneumothorax', 'Consolidation', 'Edema',
        'Emphysema', 'Fibrosis', 'Pleural_Thickening', 'Hernia'
    ]

    runner.train(
        model=model,
        logdir=logdir,
        criterion=criterion,
        optimizer=optimizer,
        scheduler=scheduler,
        loaders=loaders,
        num_epochs=args.epochs,

        # We can specify the callbacks list for the experiment;
        # For this task, we will check AUC and accuracy
        callbacks=[
            AUCCallback(
                input_key="targets",
                output_key='logits',
                prefix='auc',
                class_names=class_names,
                num_classes=num_classes,
                activation='Sigmoid',
            ),
            AccuracyCallback(
                input_key="targets",
                output_key="logits",
                prefix="accuracy",
                accuracy_args=[1],
                num_classes=14,
                threshold=0.5,
                activation='Sigmoid',
            ),
        ],
        main_metric='auc/_mean',
        minimize_metric=False,
        verbose=True,
    )
Example #15
0
from sklearn.metrics import roc_auc_score

small_set = False

num_classes = 14
class_names = labels = [
    'Atelectasis', 'Consolidation', 'Infiltration', 'Pneumothorax', 'Edema',
    'Emphysema', 'Fibrosis', 'Effusion', 'Pneumonia', 'Pleural_Thickening',
    'Cardiomegaly', 'Mass', 'Nodule', 'Hernia'
]
data_dir = 'data_entry_labels.csv'
batch_size = 32
max_epoch = 50

print('Loading Model')
model = DenseNet121(num_classes).cuda()

normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])

ts = transforms.Compose([
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    normalize,
])

if small_set == True:
    train_list = 'train_list_small.txt'
    val_list = 'val_list_small.txt'
else:
    train_list = 'train_list.txt'
Example #16
0
    query_cam, query_label = get_id(query_path)

######################################################################
# Load Collected data Trained model
print('-------test-----------')

###########
# Models  #
###########
if opt.add_softmax_loss:
    model_structure = DenseNet121_classifier(751)
else:
    if opt.model_type == 'resnet50':
        model_structure = Model(last_conv_stride=opt.last_conv_stride)
    elif opt.model_type == 'densenet121':
        model_structure = DenseNet121()
    elif opt.model_type == 'preActResnet50':
        model_structure = PreActResNet50()
    elif opt.model_type == 'resnet50mid':
        model_structure = resnet50mid()

print('Model selected: ', opt.model_type)

model = load_network(model_structure)

# Remove the final fc layer and classifier layer
if not opt.add_softmax_loss:
    model.model.fc = nn.Sequential()
    model.classifier = nn.Sequential()

# Change to test mode
def main_worker(gpu, args):
    """
    @param: gpu - index of the gpu on a single node, here its range is [0, args.gpus-1]
    """

    # IMPORTANT: we need to set the random seed in each process so that the models are initialized with the same weights
    # Reference: https://yangkky.github.io/2019/07/08/distributed-pytorch-tutorial.html
    # torch.cuda.manual_seed(args.seed)

    # for distributed training, rank needs to be global rank among all processes
    rank = args.node_rank * args.gpus + gpu

    dist.init_process_group(backend=args.dist_backend, \
                            init_method=args.dist_url, \
                            world_size=args.world_size, \
                            rank=rank)

    # build model
    densenet = DenseNet121(in_channels=3, growth_rate=args.growth_rate, \
                           compression_rate=args.compression_rate, \
                           num_classes=args.num_classes)

    # torch.cuda.device(gpu)

    # densenet.cuda(gpu)
    densenet.cuda()

    # densenet = nn.parallel.DistributedDataParallel(densenet, device_ids=[gpu])
    densenet = nn.parallel.DistributedDataParallel(densenet)

    # Reference: https://github.com/pytorch/examples/blob/master/imagenet/main.py
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_transform = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(), normalize
    ])

    trainset = torchvision.datasets.ImageFolder(root=os.path.join(
        args.dataset_root, 'train'),
                                                transform=train_transform)

    val_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(), normalize
    ])

    valset = torchvision.datasets.ImageFolder(root=os.path.join(
        args.dataset_root, 'val'),
                                              transform=val_transform)

    train_sampler = torch.utils.data.distributed.DistributedSampler(
        trainset, num_replicas=args.world_size, rank=rank)

    args.batch_size = int(args.batch_size / args.gpus)
    args.num_workers = int(args.num_workers / args.gpus)

    train_data = torch.utils.data.DataLoader(
        trainset,
        batch_size=args.batch_size,
        shuffle=False,  # when sampler is specified, shuffle should be False
        num_workers=args.num_workers,
        pin_memory=True,
        sampler=train_sampler)

    val_data = torch.utils.data.DataLoader(valset,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           num_workers=args.num_workers,
                                           pin_memory=True)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(densenet.parameters(), lr=args.lr, momentum=args.momentum, \
                            weight_decay=args.weight_decay)

    global best_prec1

    # Reference: https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/2
    # this is useful for cudnn finding optimal set of algorithms for particular configurations
    # and accelerate training when the input sizes do not change over iteration.
    cudnn.backend = True

    for epoch in range(args.epochs):

        train_sampler.set_epoch(epoch)

        adjust_lr(args, optimizer, epoch)

        train(densenet, train_data, criterion, optimizer, epoch, args)

        if args.tensorboard:
            log_value('train_loss', losses.avg, epoch)
            log_value('train_acc', top1.avg, epoch)

        # validate the model every epoch
        prec1 = validate(args, val_data, densenet, criterion, epoch)

        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': densenet.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict()
            }, is_best)