Exemplo n.º 1
0
def test(opt, net, data=None):
    """
  Test script for Split model
  Args:
    opt(dic): Options
    net(torch.model): Split model instance
    data(dataloader): Dataloader or None, if load data with configuration in opt.
  Return:
    total_loss(torch.tensor): The total loss of the dataset
    accuracy(torch.tensor): the accuracy of the dataset
  """
    if not data:
        with open(opt.json_dir, 'r') as f:
            labels = json.load(f)
        dir_img = opt.img_dir

        test_set = ImageDataset(dir_img, labels, opt.featureW, scale=opt.scale)
        test_loader = DataLoader(test_set,
                                 batch_size=opt.batch_size,
                                 shuffle=True)
    else:
        test_loader = data

    loss_func = bce_loss

    for epoch in range(1):
        net.eval()
        epoch_loss = 0
        correct_count = 0
        count = 0
        times = 1
        for i, b in enumerate(test_loader):
            with torch.no_grad():
                img, label = b
                if opt.gpu:
                    img = img.cuda()
                    label = [x.cuda() for x in label]
                pred_label = net(img)
                loss = loss_func(pred_label, label, [0.1, 0.25, 1])
                epoch_loss += loss
                correct_count += (torch.sum(
                    (pred_label[0] > 0.5).type(torch.IntTensor) == label[0][0].
                    repeat(times, 1).type(torch.IntTensor)).item() + torch.sum(
                        (pred_label[1] > 0.5).type(torch.IntTensor) == label[1]
                        [0].repeat(times, 1).type(torch.IntTensor)).item())
                count += label[0].view(-1).size()[0] * times + label[1].view(
                    -1).size()[0] * times
        accuracy = correct_count / (count)
        total_loss = epoch_loss / (i + 1)
        print('Validation finished ! Loss: {0} , Accuracy: {1}'.format(
            epoch_loss / (i + 1), accuracy))
        return total_loss, accuracy
Exemplo n.º 2
0
def torch_loader(**kwargs):
    from dataset.dataset import ImageDataset
    from paddle.io import DataLoader
    dataset = ImageDataset(data_list=kwargs['file_list'],
                           input_size=kwargs['input_size'],
                           max_char_per_line=kwargs['max_char_per_line'],
                           mean_color=kwargs['mean_color'],
                           label_dict=kwargs['label_dict'],
                           mode=kwargs['mode'])
    train_reader = DataLoader(dataset,
                              places=kwargs['place'],
                              num_workers=0,
                              batch_size=kwargs['batch_size'],
                              drop_last=True,
                              shuffle=True)
    return train_reader
def train(opt, net):
    """
  Train the split model
  Args:
    opt(dic): Options
    net(torch.model): Split model instance
  """
    with open(opt.json_dir, 'r') as f:
        labels = json.load(f)
    dir_img = opt.img_dir

    with open(opt.val_json, 'r') as f:
        val_labels = json.load(f)
    val_img_dir = opt.val_img_dir

    train_set = ImageDataset(dir_img, labels, opt.featureW, scale=opt.scale)
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True)

    val_set = ImageDataset(val_img_dir,
                           val_labels,
                           opt.featureW,
                           scale=opt.scale)
    val_loader = DataLoader(val_set, batch_size=opt.batch_size, shuffle=False)

    print('Data loaded!')

    loss_func = bce_loss
    optimizer = optim.Adam(net.parameters(), lr=opt.lr, weight_decay=0.001)
    best_accuracy = 0
    for epoch in range(opt.epochs):
        print('epoch:{}'.format(epoch + 1))
        net.train()
        epoch_loss = 0
        correct_count = 0
        count = 0
        for i, b in enumerate(train_loader):
            img, label = b
            if opt.gpu:
                img = img.cuda()
                label = [x.cuda() for x in label]
            pred_label = net(img)
            loss = loss_func(pred_label, label, [0.1, 0.25, 1])
            epoch_loss += loss
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            times = 1
            correct_count += (torch.sum(
                (pred_label[0] > 0.5).type(torch.IntTensor) == label[0]
                [0].repeat(times, 1).type(torch.IntTensor)).item() + torch.sum(
                    (pred_label[1] > 0.5).type(torch.IntTensor) == label[1]
                    [0].repeat(times, 1).type(torch.IntTensor)).item())
            count += label[0].view(-1).size()[0] * times + label[1].view(
                -1).size()[0] * times
        accuracy = correct_count / (count)
        print('Epoch finished ! Loss: {0} , Accuracy: {1}'.format(
            epoch_loss / (i + 1), accuracy))
        val_loss, val_acc = test(opt, net, val_loader)
        if val_acc > best_accuracy:
            best_accuracy = val_acc
            torch.save(net.state_dict(),
                       opt.saved_dir + 'CP{}.pth'.format(epoch + 1))
def test(opt, net, data=None):
    """
  Test script for Merge model
  Args:
    opt(dic): Options
    net(torch.model): Merge model instance
    data(dataloader): Dataloader or None, if load data with configuration in opt.
  Return:
    total_loss(torch.tensor): The total loss of the dataset
    precision(torch.tensor): Precision (TP / TP + FP)
    recall(torch.tensor): Recall (TP / TP + FN)
    f1(torch.tensor): f1 score (2 * precision * recall / (precision + recall))
  """
    if not data:
        with open(opt.json_dir, 'r') as f:
            labels = json.load(f)
        dir_img = opt.img_dir

        test_set = ImageDataset(dir_img,
                                labels,
                                opt.featureW,
                                scale=opt.scale,
                                mode='merge')
        test_loader = DataLoader(test_set,
                                 batch_size=opt.batch_size,
                                 shuffle=False)
    else:
        test_loader = data

    loss_func = merge_loss

    for epoch in range(1):
        net.eval()
        epoch_loss = 0
        number_batchs = 0
        total_tp = 0
        total_tn = 0
        total_fp = 0
        total_fn = 0
        for i, b in enumerate(test_loader):
            with torch.no_grad():
                img, label, arc = b
                if opt.gpu:
                    img = img.cuda()
                    label = [x.cuda() for x in label]
                pred_label = net(img, arc)
                loss, D, R = loss_func(pred_label, label, 10.)
                epoch_loss += loss

                tp = torch.sum(
                    ((D.view(-1)[(label[0].view(-1) > 0.5).type(
                        torch.ByteTensor)] > 0.5).type(torch.IntTensor)
                     == label[0].view(-1)[(label[0].view(-1) > 0.5).type(
                         torch.ByteTensor)].type(torch.IntTensor))
                ).item() + torch.sum(
                    ((R.view(-1)[(label[1].view(-1) > 0.5).type(
                        torch.ByteTensor)] > 0.5).type(torch.IntTensor)
                     == label[1].view(-1)[(label[1].view(-1) > 0.5).type(
                         torch.ByteTensor)].type(torch.IntTensor))).item()
                tn = torch.sum(
                    ((D.view(-1)[(label[0].view(-1) <= 0.5).type(
                        torch.ByteTensor)] > 0.5).type(torch.IntTensor)
                     == label[0].view(-1)[(label[0].view(-1) <= 0.5).type(
                         torch.ByteTensor)].type(torch.IntTensor))
                ).item() + torch.sum(
                    ((R.view(-1)[(label[1].view(-1) <= 0.5).type(
                        torch.ByteTensor)] > 0.5).type(torch.IntTensor)
                     == label[1].view(-1)[(label[1].view(-1) <= 0.5).type(
                         torch.ByteTensor)].type(torch.IntTensor))).item()
                fn = torch.sum((label[0].view(-1) > 0.5).type(
                    torch.ByteTensor)).item() + torch.sum(
                        (label[1].view(-1) > 0.5).type(
                            torch.ByteTensor)).item() - tp
                fp = torch.sum((label[0].view(-1) < 0.5).type(
                    torch.ByteTensor)).item() + torch.sum(
                        (label[1].view(-1) < 0.5).type(
                            torch.ByteTensor)).item() - tn

                total_fn += fn
                total_fp += fp
                total_tn += tn
                total_tp += tp
                number_batchs += 1
        total_loss = epoch_loss / number_batchs
        precision = total_tp / (total_tp + total_fp)
        recall = total_tp / (total_tp + total_fn)
        f1 = 2 * precision * recall / (precision + recall)
        print(
            'Validation finished ! Loss: {0} ; Precision: {1} ; Recall: {2} ; F1 Score: {3}'
            .format(total_loss, precision, recall, f1))
        return total_loss, precision, recall, f1
def train(opt, net):
    """
  Train the merge model
  Args:
    opt(dic): Options
    net(torch.model): Merge model instance
  """
    # load labels
    with open(opt.json_dir, 'r') as f:
        labels = json.load(f)
    dir_img = opt.img_dir

    with open(opt.val_json, 'r') as f:
        val_labels = json.load(f)
    val_img_dir = opt.val_img_dir

    train_set = ImageDataset(dir_img,
                             labels,
                             opt.featureW,
                             scale=opt.scale,
                             mode='merge')
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True)

    val_set = ImageDataset(val_img_dir,
                           val_labels,
                           opt.featureW,
                           scale=opt.scale,
                           mode='merge')
    val_loader = DataLoader(val_set, batch_size=opt.batch_size, shuffle=False)

    print('Data loaded!')

    # defines loss function
    loss_func = merge_loss
    optimizer = optim.Adam(net.parameters(), lr=opt.lr, weight_decay=0.001)
    best_f1 = 0
    for epoch in range(opt.epochs):
        print('epoch:{}'.format(epoch + 1))
        net.train()
        epoch_loss = 0
        number_batchs = 0
        for i, b in enumerate(train_loader):
            img, label, arc = b
            if opt.gpu:
                img = img.cuda()
                label = [x.cuda() for x in label]
            pred_label = net(img, arc)
            loss, _, _ = loss_func(pred_label, label, 10.)
            if loss.requires_grad:
                epoch_loss += loss
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                number_batchs += 1

        print('Epoch finished ! Loss: {0} '.format(epoch_loss / number_batchs))
        val_loss, precision, recall, f1 = test(opt, net, val_loader)
        # save model if best f1 score less than current f1 score
        if f1 > best_f1:
            best_f1 = f1
            torch.save(net.state_dict(),
                       opt.saved_dir + 'CP{}.pth'.format(epoch + 1))
        # write training information of current epoch to the log file
        with open(os.path.join(opt.saved_dir, 'log.txt'), 'a') as f:
            f.write(
                'Epoch {0}, val loss : {1}, precision : {2}, recall : {3}, f1 score : {4}  \n   tra loss : {5} \n\n'
                .format(epoch + 1, val_loss, precision, recall, f1,
                        epoch_loss / number_batchs))
Exemplo n.º 6
0
def eval_function(args, model):
    '''
    This function accepts the CNN model and evaluate the model on the test set and make the predictions.

    Parameters
    ----------
    args: configration file
    model: the model to test
    '''
    curve_path = "ROC Curves/"
    if args.num_classes == 1:
        CLASS_NAMES = ['Disease']
    elif args.dataset == 'NIH':
        CLASS_NAMES = [
            'Atelectasis', 'Cardiomegaly', 'Effusion', 'Infiltration', 'Mass',
            'Nodule', 'Pneumonia', 'Pneumothorax'
        ]
    elif args.dataset == 'ChesXpert':
        CLASS_NAMES = [
            'Atelectasis', 'Edema', 'Cardiomegaly', 'Consolidation',
            'Pleural Effusion'
        ]
    else:
        assert "Wrong dataset"
    trans = transforms.Compose([
        transforms.Resize((args.img_size, args.img_size)),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])
    current_location = os.getcwd()
    data_root_dir = os.path.join(current_location, 'dataset')
    if args.multi_label:
        if args.dataset == 'NIH':
            datasets = CXRDataset(data_root_dir,
                                  dataset_type='test',
                                  Num_classes=args.num_classes,
                                  img_size=args.img_size,
                                  transform=trans)
        elif args.dataset == 'ChesXpert':
            datasets = ImageDataset(args.data_root_dir,
                                    dataset_type='valid',
                                    Num_classes=args.num_classes,
                                    transform=trans)

    else:
        datasets = CXRDatasetBinary(data_root_dir,
                                    dataset_type='val',
                                    img_size=args.img_size,
                                    transform=trans)

    dataloader = DataLoader(datasets,
                            batch_size=args.batch_size,
                            shuffle=True,
                            num_workers=args.num_workers)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    print("Beginning evaluation ")
    N_CLASSES = args.num_classes
    print("Test dataset loaded")
    cudnn.benchmark = True

    # initialize the ground truth and output tensor
    gt = torch.FloatTensor().cuda()  # of shape (# of batches * batch_size, 8)
    pred = torch.FloatTensor().cuda()
    print("testing...")
    test_length = len(datasets)
    print("total test examples: " + str(test_length))
    print("total batches: " + str(int(test_length / args.batch_size)))

    for i, (inputs, target,
            weight) in tqdm(enumerate(dataloader),
                            total=int(test_length / args.batch_size)):
        target = target.cuda()
        inputs = inputs.to(device)
        gt = torch.cat((gt, target), 0)
        with torch.no_grad():
            if args.global_pool == "PCAM":
                output, _ = model(inputs)
                output = torch.sigmoid(output)
            else:
                output = model(inputs)
        pred = torch.cat((pred, output.data), 0)

    AUROCs, roc_curves, mean_TPR, mean_TNR, mean_PPV, mean_F1, F1_dict, PPV_dict, TNR_dict, TPR_dict, mean_Hamming_loss \
        = compute_stats(gt, pred, args)

    AUROC_avg: None = np.array(AUROCs).mean()
    print('The average AUROC is {AUROC_avg:.3f}'.format(AUROC_avg=AUROC_avg))
    for i in range(N_CLASSES):
        print('The AUROC of {} is {}'.format(CLASS_NAMES[i], AUROCs[i]))
    print("Mean hamming loss is {}".format(mean_Hamming_loss))
    print("Micro-averaging Precison is {} ".format(mean_PPV))
    print("Micro-averaging Recall or Sensitivity is {} ".format(mean_TPR))
    print("Micro-averaging Specificity is {} ".format(mean_TNR))
    print("Micro-averaging F1 score is {} ".format(mean_F1))

    for i in range(N_CLASSES):
        print('The Precison of {} is {}'.format(CLASS_NAMES[i],
                                                PPV_dict[CLASS_NAMES[i]]))
    for i in range(N_CLASSES):
        print('The Recall or Sensitivity of {} is {}'.format(
            CLASS_NAMES[i], TPR_dict[CLASS_NAMES[i]]))
    for i in range(N_CLASSES):
        print('The Specificity of {} is {}'.format(CLASS_NAMES[i],
                                                   TNR_dict[CLASS_NAMES[i]]))
    for i in range(N_CLASSES):
        print('The F1 score of {} is {}'.format(CLASS_NAMES[i],
                                                F1_dict[CLASS_NAMES[i]]))

    for i in range(N_CLASSES):
        fpr, tpr, thresholds = roc_curves[i]
        plt.plot([0, 1], [0, 1], linestyle="--")
        plt.plot(fpr, tpr, label="model")
        plt.xlabel("False Positive Rate")
        plt.ylabel("True Positive Rate")
        plt.title("ROC CURVE: " + CLASS_NAMES[i])
        plt.savefig(curve_path + model_name(args) + CLASS_NAMES[i] + ".png")
        plt.clf()

    for i in range(N_CLASSES):
        fpr, tpr, thresholds = roc_curves[i]
        plt.plot([0, 1], [0, 1], linestyle="--")
        plt.plot(fpr, tpr, label=CLASS_NAMES[i])
    plt.xlabel("False Positive Rate")
    plt.ylabel("True Positive Rate")
    plt.legend()
    plt.title("ROC CURVE")
    plt.savefig(curve_path + model_name(args) + ".png")
    plt.clf()
Exemplo n.º 7
0
def train(**kwargs):
    opt = Config()
    opt._parse(kwargs)

    transform = tf.Compose([
        tf.Resize(int(1.12 * opt.image_size), Image.BICUBIC),
        tf.RandomCrop(opt.image_size),
        tf.RandomHorizontalFlip(),
        tf.ToTensor(),
        tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    '''
    Image.NEAREST :低质量
    Image.BILINEAR:双线性
    Image.BICUBIC :三次样条插值
    Image.ANTIALIAS:高质量
    '''
    # 读取数据
    trian_data = ImageDataset(opt.dataroot, transforms=transform, istrain=True)
    train_loader = DataLoader(trian_data,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers)
    # 实例化网络
    G_A2B = CycleGan.generator()
    G_B2A = CycleGan.generator()

    D_A = CycleGan.discriminator()
    D_B = CycleGan.discriminator()

    if t.cuda.is_available():
        G_A2B.cuda()
        G_B2A.cuda()
        D_A.cuda()
        D_B.cuda()

    # 初始化网络
    G_A2B.weight_init()
    G_B2A.weight_init()
    D_A.weight_init()
    D_B.weight_init()

    # 定义loss
    criterion_GAN = t.nn.MSELoss()
    criterion_Cycle = t.nn.L1Loss()
    criterion_identity = t.nn.L1Loss()

    # 定义优化器
    optimizer_G = t.optim.Adam(itertools.chain(G_A2B.parameters(),
                                               G_B2A.parameters()),
                               lr=opt.lr,
                               betas=(opt.betas, 0.999))
    optimizer_D = t.optim.Adam(itertools.chain(D_A.parameters(),
                                               D_B.parameters()),
                               lr=opt.lr,
                               betas=(opt.betas, 0.999))

    # 定义动态改变学习率
    lr_schedule_G = t.optim.lr_scheduler.LambdaLR(optimizer_G,
                                                  lr_lambda=LambdaLR(
                                                      opt.max_epoch, 0,
                                                      opt.decay_epoch).step)
    lr_schedule_D = t.optim.lr_scheduler.LambdaLR(optimizer_D,
                                                  lr_lambda=LambdaLR(
                                                      opt.max_epoch, 0,
                                                      opt.decay_epoch).step)

    # 输入输出,标签
    Tensor = t.cuda.FloatTensor if t.cuda.is_available() else t.Tensor
    input_A = Tensor(opt.batch_size, 3, opt.image_size, opt.image_size)
    input_B = Tensor(opt.batch_size, 3, opt.image_size, opt.image_size)
    target_real = t.ones(opt.batch_size, 1).cuda()
    target_fake = t.zeros(opt.batch_size, 1).cuda()

    fake_A_buffer = ReplayBuffer()
    fake_B_buffer = ReplayBuffer()

    # 定义可视化visdom
    vis = Visualizer(env=opt.env, port=15024)

    # 定义averagemeter
    lossG_A2B_meter = meter.AverageValueMeter()
    lossG_B2A_meter = meter.AverageValueMeter()
    lossG_identity_meter = meter.AverageValueMeter()
    lossG_cycle_meter = meter.AverageValueMeter()
    lossD_B_meter = meter.AverageValueMeter()
    lossD_A_meter = meter.AverageValueMeter()

    # 开始训练
    lam = 10
    for epoch in range(opt.max_epoch):
        lossD_A_meter.reset()
        lossD_B_meter.reset()
        lossG_cycle_meter.reset()
        lossG_identity_meter.reset()
        lossG_B2A_meter.reset()
        lossG_A2B_meter.reset()
        for i, batch in tqdm.tqdm(enumerate(train_loader)):

            real_A = input_A.copy_(batch['A']).cuda()
            real_B = input_B.copy_(batch['B']).cuda()
            # print(real_A.requires_grad)
            # 训练生成器
            # 生成器A2b,生成器B2A
            optimizer_G.zero_grad()

            # identity loss
            # G_A2B(B)=B if B is real
            same_B = G_A2B(real_B)
            loss_identity_B = criterion_identity(same_B, real_B) * 0.5 * lam
            # the same as above
            same_A = G_B2A(real_A)
            loss_identity_A = criterion_identity(same_A, real_A) * 0.5 * lam
            lossG_identity_meter.add(loss_identity_A.item() +
                                     loss_identity_B.item())

            # GAN loss
            fake_B = G_A2B(real_A)
            prob_fakeB = D_B(fake_B)
            loss_GAN_A2B = criterion_GAN(prob_fakeB, target_real)
            lossG_A2B_meter.add(loss_GAN_A2B.item())

            fake_A = G_B2A(real_B)
            prob_fakeA = D_A(fake_A)
            loss_GAN_B2A = criterion_GAN(prob_fakeA, target_real)
            lossG_B2A_meter.add(loss_GAN_B2A.item())
            # Cycle loss
            recoverA = G_B2A(fake_B)
            loss_cycle_ABA = criterion_Cycle(recoverA, real_A) * lam

            recoverB = G_A2B(fake_A)
            loss_cycle_BAB = criterion_Cycle(recoverB, real_B) * lam
            lossG_cycle_meter.add(loss_cycle_BAB.item() +
                                  loss_cycle_ABA.item())
            # total loss
            loss_G = loss_identity_A + loss_identity_B + loss_GAN_A2B + loss_GAN_B2A + loss_cycle_ABA + loss_cycle_BAB
            loss_G.backward()
            optimizer_G.step()

            # 训练判别器
            optimizer_D.zero_grad()

            # real loss
            pred_real_B = D_B(real_B)
            loss_D_real_B = criterion_GAN(pred_real_B, target_real)

            # fake loss ,fake from buffer
            fake_B_new = fake_B_buffer.push_and_pop(fake_B)
            pred_fake_B = D_B(fake_B_new)
            loss_D_fake_B = criterion_GAN(pred_fake_B, target_fake)
            loss_total_B = (loss_D_real_B + loss_D_fake_B) * 0.5
            lossD_B_meter.add(loss_total_B.item())
            loss_total_B.backward()

            # real loss
            pred_real_A = D_A(real_A)
            loss_D_real_A = criterion_GAN(pred_real_A, target_real)

            # fakr loss ,fake from buffer
            fake_A_new = fake_A_buffer.push_and_pop(fake_A)
            pred_fake_A = D_A(fake_A_new)
            loss_D_fake_A = criterion_GAN(pred_fake_A, target_fake)
            loss_total_A = (loss_D_fake_A + loss_D_real_A) * 0.5
            lossD_A_meter.add(loss_total_A.item())
            loss_total_A.backward()

            optimizer_D.step()
            ###打印可视化
            if (i + 1) % opt.plot_every == 0:
                vis.plot('lossG_A2B', lossG_A2B_meter.value()[0])
                vis.plot('lossG_B2A', lossG_B2A_meter.value()[0])
                vis.plot('lossG_identity', lossG_identity_meter.value()[0])
                vis.plot('lossG_cycle', lossG_cycle_meter.value()[0])
                vis.plot('lossD_B', lossD_B_meter.value()[0])
                vis.plot('lossD_A', lossD_A_meter.value()[0])
                vis.img('real_A', real_A.data.cpu()[0] * 0.5 + 0.5)
                vis.img('fake_B', fake_B.data.cpu()[0] * 0.5 + 0.5)
                vis.img('real_B', real_B.data.cpu()[0] * 0.5 + 0.5)
                vis.img('fake_A', fake_A.data.cpu()[0] * 0.5 + 0.5)
        # 更新学习率
        lr_schedule_G.step()
        lr_schedule_D.step()

        # 保存模型m
        if (epoch + 1) % opt.savemode_every == 0:
            t.save(
                G_A2B.state_dict(), 'checkpoints/%s_%s_G_A2B.pth' %
                (epoch, time.strftime('%m%d_%H:%M%S')))
            t.save(
                G_B2A.state_dict(), 'checkpoints/%s_%s_G_B2A.pth' %
                (epoch, time.strftime('%m%d_%H:%M%S')))
            t.save(
                D_A.state_dict(), 'checkpoints/%s_%s_D_A.pth' %
                (epoch, time.strftime('%m%d_%H:%M%S')))
            t.save(
                D_B.state_dict(), 'checkpoints/%s_%s_D_B.pth' %
                (epoch, time.strftime('%m%d_%H:%M%S')))