Exemplo n.º 1
0
def test(**kwargs):
    opt.parse(kwargs)

    # configure model
    # model = DenseNet169(num_classes=2)
    # model = CustomDenseNet169(num_classes=2)
    # model = ResNet152(num_classes=2)
    model = getattr(models, opt.model)()
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        model.cuda()

    model.eval()

    # data
    test_data = MURA_Dataset(opt.data_root,
                             opt.test_image_paths,
                             train=False,
                             test=True)
    test_dataloader = DataLoader(test_data,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.num_workers)

    results = []
    # confusion_matrix = meter.ConfusionMeter(2)
    # s = t.nn.Softmax()

    for ii, (data, label, body_part, path) in tqdm(enumerate(test_dataloader)):
        input = Variable(data, volatile=True)
        # body_part = Variable(body_part, volatile=True)
        if opt.use_gpu:
            input = input.cuda()
            # body_part = body_part.cuda()
        if opt.model.startswith('MultiBranch'):
            score = model(input, body_part)
        else:
            score = model(input)

        # confusion_matrix.add(s(Variable(score.data.squeeze())).data, label.type(t.LongTensor))

        probability = t.nn.functional.softmax(score)[:, 0].data.tolist()

        # 每一行为 图片路径 和 positive的概率
        batch_results = [(path_, probability_)
                         for path_, probability_ in zip(path, probability)]

        results += batch_results

    # cm_value = confusion_matrix.value()
    # accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum())

    # print('confusion matrix: ')
    # print(cm_value)
    # print(f'accuracy: {accuracy}')

    write_csv(results, opt.result_file)

    calculate_cohen_kappa()
Exemplo n.º 2
0
def show(**kwargs):
    opt.parse(kwargs)

    # configure model
    # model = DenseNet169(num_classes=2)
    # model = CustomDenseNet169(num_classes=2)
    # model = ResNet152(num_classes=2)
    # model = getattr(models, opt.model)()
    # if opt.load_model_path:
    #     model.load(opt.load_model_path)
    # if opt.use_gpu:
    #     model.cuda()
    #
    # model.eval()

    # data
    test_data = MURA_Dataset(opt.data_root,
                             opt.test_image_paths,
                             train=False,
                             test=True)
    test_dataloader = DataLoader(test_data,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.num_workers)

    results = []
    # confusion_matrix = meter.ConfusionMeter(2)
    # s = t.nn.Softmax()

    for ii, (data, label, path, body_part) in tqdm(enumerate(test_dataloader)):
        input = Variable(data, volatile=True)
        # body_part = Variable(body_part, volatile=True)
        # if opt.use_gpu:
        #     input = input.cuda()
        #     # body_part = body_part.cuda()
        # if opt.model.startswith('MultiBranch'):
        #     score = model(input, body_part)
        # else:
        #     score = model(input)
        for i in range(data.shape[0]):
            img_data = data.cpu().numpy()[i]
            img_data = np.transpose(img_data, (1, 2, 0))
            # img_data = cv2.cvtColor(img_data, cv2.COLOR_BGR2RGB)
            cv2.imshow('image', img_data)
            cv2.waitKey(0)
            cv2.destroyAllWindows()
Exemplo n.º 3
0
def ensemble_test(**kwargs):
    opt.parse(kwargs)

    # configure model
    model_hub = []
    for i in range(len(opt.ensemble_model_types)):
        model = getattr(models, opt.ensemble_model_types[i])()
        if opt.ensemble_model_paths[i]:
            model.load(opt.ensemble_model_paths[i])
        if opt.use_gpu:
            model.cuda()
        model.eval()
        model_hub.append(model)

    # data
    test_data = MURA_Dataset(opt.data_root,
                             opt.test_image_paths,
                             train=False,
                             test=True)
    test_dataloader = DataLoader(test_data,
                                 batch_size=opt.batch_size,
                                 shuffle=False,
                                 num_workers=opt.num_workers)

    results = []
    # confusion_matrix = meter.ConfusionMeter(2)
    # s = t.nn.Softmax()

    for ii, (data, label, path) in tqdm(enumerate(test_dataloader)):
        input = Variable(data, volatile=True)
        if opt.use_gpu:
            input = input.cuda()

        probability_hub = []
        for model in model_hub:
            score = model(input)
            # confusion_matrix.add(s(Variable(score.data.squeeze())).data, label.type(t.LongTensor))
            probability = t.nn.functional.softmax(score)[:, 0].data.tolist()
            probability_hub.append(probability)

        # print(probability_hub)

        prob = [
            np.mean([x[i] for x in probability_hub])
            for i in range(len(probability_hub[0]))
        ]

        # 每一行为 图片路径 和 positive的概率
        batch_results = [(path_, probability_)
                         for path_, probability_ in zip(path, prob)]

        results += batch_results

    # cm_value = confusion_matrix.value()
    # accuracy = 100. * (cm_value[0][0] + cm_value[1][1]) / (cm_value.sum())

    # print('confusion matrix: ')
    # print(cm_value)
    # print(f'accuracy: {accuracy}')

    write_csv(results, opt.result_file)

    calculate_cohen_kappa()
Exemplo n.º 4
0
def train(**kwargs):
    opt.parse(kwargs)
    if opt.use_visdom:
        vis = Visualizer(opt.env)

    # step 1: configure model
    # model = densenet169(pretrained=True)
    # model = DenseNet169(num_classes=2)
    # model = ResNet152(num_classes=2)
    model = getattr(models, opt.model)()
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        print('CUDA MODEL!')
        model.cuda()

    model.train()

    # step 2: data
    train_data = MURA_Dataset(opt.data_root,
                              opt.train_image_paths,
                              train=True,
                              test=False)
    val_data = MURA_Dataset(opt.data_root,
                            opt.test_image_paths,
                            train=False,
                            test=False)

    print('Training images:', len(train_data), 'Validation images:',
          len(val_data))

    train_dataloader = DataLoader(train_data,
                                  opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)
    val_dataloader = DataLoader(val_data,
                                batch_size=opt.batch_size,
                                shuffle=False,
                                num_workers=opt.num_workers)

    # step 3: criterion and optimizer
    A = 21935
    N = 14873
    weight = t.Tensor([A / (A + N), N / (A + N)])
    if opt.use_gpu:
        weight = weight.cuda()

    criterion = t.nn.CrossEntropyLoss(weight=weight)
    # criterion = FocalLoss(alpha=weight, class_num=2)
    lr = opt.lr
    optimizer = t.optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=opt.weight_decay)

    # step 4: meters
    loss_meter = meter.AverageValueMeter()
    confusion_matrix = meter.ConfusionMeter(2)
    previous_loss = 1e10

    # step 5: train

    if not os.path.exists(os.path.join('checkpoints', model.model_name)):
        os.mkdir(os.path.join('checkpoints', model.model_name))
    prefix = time.strftime('%m%d')
    if not os.path.exists(os.path.join('checkpoints', model.model_name,
                                       prefix)):
        os.mkdir(os.path.join('checkpoints', model.model_name, prefix))

    s = t.nn.Softmax()
    for epoch in range(opt.max_epoch):

        loss_meter.reset()
        confusion_matrix.reset()

        for ii, (data, label, _,
                 body_part) in tqdm(enumerate(train_dataloader)):

            # train model
            input = Variable(data)
            target = Variable(label)
            # body_part = Variable(body_part)
            if opt.use_gpu:
                input = input.cuda()
                target = target.cuda()
                # body_part = body_part.cuda()

            optimizer.zero_grad()
            if opt.model.startswith('MultiBranch'):
                score = model(input, body_part)
            else:
                score = model(input)
            loss = criterion(score, target)
            loss.backward()
            optimizer.step()

            # meters update and visualize
            loss_meter.add(loss.data[0])
            confusion_matrix.add(s(Variable(score.data)).data, target.data)

            if ii % opt.print_freq == opt.print_freq - 1:
                if opt.use_visdom:
                    vis.plot('loss', loss_meter.value()[0])
                # print('loss', loss_meter.value()[0])

                # debug
                if os.path.exists(opt.debug_file):
                    import ipdb
                    ipdb.set_trace()

        ck_name = f'epoch_{epoch}_{str(opt)}.pth'
        model.save(
            os.path.join('checkpoints', model.model_name, prefix, ck_name))
        # model.save()

        # validate and visualize
        val_cm, val_accuracy, val_loss = val(model, val_dataloader)

        cm = confusion_matrix.value()

        if opt.use_visdom:
            vis.plot('val_accuracy', val_accuracy)
            vis.log(
                "epoch:{epoch},lr:{lr},loss:{loss},train_cm:{train_cm},val_cm:{val_cm},train_acc:{train_acc}, "
                "val_acc:{val_acc}".format(
                    epoch=epoch,
                    loss=loss_meter.value()[0],
                    val_cm=str(val_cm.value()),
                    train_cm=str(confusion_matrix.value()),
                    lr=lr,
                    train_acc=str(100. * (cm[0][0] + cm[1][1]) / (cm.sum())),
                    val_acc=str(100. *
                                (val_cm.value()[0][0] + val_cm.value()[1][1]) /
                                (val_cm.value().sum()))))
        print('val_accuracy: ', val_accuracy)
        print(
            "epoch:{epoch},lr:{lr},loss:{loss},train_cm:{train_cm},val_cm:{val_cm},train_acc:{train_acc}, "
            "val_acc:{val_acc}".format(
                epoch=epoch,
                loss=loss_meter.value()[0],
                val_cm=str(val_cm.value()),
                train_cm=str(confusion_matrix.value()),
                lr=lr,
                train_acc=100. * (cm[0][0] + cm[1][1]) / (cm.sum()),
                val_acc=100. * (val_cm.value()[0][0] + val_cm.value()[1][1]) /
                (val_cm.value().sum())))

        # update learning rate
        if loss_meter.value()[0] > previous_loss:
            # if val_loss > previous_loss:
            lr = lr * opt.lr_decay
            # 第二种降低学习率的方法:不会有moment等信息的丢失
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        # previous_loss = val_loss
        previous_loss = loss_meter.value()[0]