Esempio n. 1
0
def test(test_loader, net, criterion, gpu):
    print('\nTest_net...')
    start = time.time()
    net.eval()
    losses = AverageMeter()
    total_correct = 0
    mtr = meter.AUCMeter()

    with torch.no_grad():
        for i, (imgs, marks, labels) in enumerate(test_loader):
            if gpu:
                imgs = imgs.cuda()
                labels = labels.cuda()

            outputs = net(imgs)  # 网络输出
            # _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)

            losses.update(loss.item(), imgs.size(0))
            smax_probs = nn.Softmax(dim=1)(outputs)  # 每个类的概率
            mtr.add(smax_probs.data[:, 1], labels.cpu())  # 计算AUC

            pred = torch.max(smax_probs, 1)[1]  # 预测类别标签
            correct = pred.eq(labels.view_as(pred)).sum()
            total_correct += correct

    accuracy = 100. * float(total_correct) / float(len(test_loader.dataset))
    mtr_values = mtr.value()
    end = time.time()
    print(
        f'Time:{int((end - start)//60)}:{int((end - start)%60)}\nTest set: Average loss: {losses.avg:.4f}, Accuracy: {total_correct}/{len(test_loader.dataset)} ({accuracy:.2f}%), AUC: {mtr_values[0]:.4f}\n'
    )

    return losses.avg, accuracy, mtr_values
Esempio n. 2
0
 def metrics(self, testloader, accuracy = True, auc = False, conf_matrix = False):
     am = meter.AUCMeter()
     cm = meter.ConfusionMeter(2)
     correct = 0
     total = 0
     for data in testloader:   
        
         x,y = data
         y_ = self.model(Variable(x))
         _, predicted = torch.max(y_.data, 1)
       
         cm.add(y_.data, y)
         
         am.add(y_.data[:,1].clone(),y)
         total += y.size(0)
         correct += (predicted == y).sum()
     print (correct, total)
     if accuracy:
         print("Accuracy for the model is", round(correct/float(total)*100, 4), correct, "/", total)
     
     if auc:
         print("Area under ROC curve for the given model is", round(am.value()[0],4))
     
     if conf_matrix:
         print ("Confusion Matrix for the given model is\n", cm.value())
Esempio n. 3
0
def val(model, dataloader):
    model.eval()
    confusion_matrix = meter.ConfusionMeter(opt.num_class)
    auc_meter = meter.AUCMeter()
    for ii, data in enumerate(dataloader):
        input, label = data
        val_input = Variable(input)
        val_label = Variable(label.long())
        if opt.use_gpu:
            val_input = val_input.cuda()
            val_label = val_label.cuda()

        with t.no_grad():
            score = model(val_input)
            confusion_matrix.add(score.data.squeeze(), label)
            if opt.num_class == 2:
                auc_meter.add(score.data[:, 1], label)

    model.train()

    cm_value = confusion_matrix.value()
    accuracy = 1. * np.trace(cm_value) / (cm_value.sum())

    if opt.num_class == 2:
        return confusion_matrix, accuracy, auc_meter.value()[0]
    else:
        return confusion_matrix, accuracy
Esempio n. 4
0
def test(model, dataloader, num_workers, batch_size, resultpath):
    print("num test = {}".format(len(dataloader.dataset)))
    """
    测试指标:
    1、 准确率(Accuracy): 模型预测正确样本数占总样本数的比例。test_acc
    2、 各个类的精度: 模型对各个类别的预测准确率。
    3、 AUC
    4、 混淆矩阵: 用于计算各种指标(包括灵敏性,特异性等)
    """
    # 整个测试数据集的准确率
    test_acc = meter.ClassErrorMeter(topk=[1], accuracy=True)
    # 每一类的精度
    test_ap = meter.APMeter()
    # AUC指标,AUC要求输入样本预测为正例的概率
    """根据我的数据集文件命名,0表示阴性,1表示阳性(即1表示正例)"""
    test_auc = meter.AUCMeter()
    # 混淆矩阵
    test_conf = meter.ConfusionMeter(k=2, normalized=False)

    result_writer = ResultsWriter(str(resultpath), overwrite=False)

    with torch.no_grad():

        for inputs, labels in tqdm(dataloader, desc="Test"):

            # inputs[B,C,H,W]
            inputs = inputs.cuda() if torch.cuda.is_available() else inputs
            # labes[B,numclasses]
            labels = labels.cuda() if torch.cuda.is_available() else labels

            # outputs[B,numclasses]
            outputs = model(inputs)

            # 计算指标
            pred_proc = F.softmax(outputs.detach(), dim=1)
            test_acc.add(pred_proc, labels.detach())
            test_ap.add(pred_proc, labels.detach())
            # 取出output第1列的数,正例即1(患病)的概率
            test.auc.add(pred_proc[:1], labels.detach())
            test_conf.add(pred_proc, labels.detach())

    # 记录保存, 便于evaluate.py计算和画图一些结果
    result_writer.update(
        "test", {
            "acc": test_acc.value(),
            "ap": test_ap.value(),
            "test_auc": test_auc.value()[0],
            "test_tpr": test_auc.value()[1],
            "test_fpr": test_auc.value()[2],
            "test_conf": test_conf.value()
        })

    return test_acc, test_ap, test_auc
Esempio n. 5
0
    def __init__(self, name=None, n_classes=2):
        self.name = name
        self.n_classes = n_classes
        self.path = os.path.join('log', name)
        self.conf_mtr = meter.ConfusionMeter(n_classes)
        self.auc_mtr = meter.AUCMeter()
        self.err_mtr = meter.ClassErrorMeter(topk=[1], accuracy=True)
        saveMkdir(self.path)

        self.fp = open(os.path.join(self.path, 'res.log'), 'w')
        self.y_scores = np.array([], dtype=np.float32).reshape(0, 1)
        self.y_true = np.array([], dtype=np.float32).reshape(0, 1)
Esempio n. 6
0
def batch_handler(mode, epoch_metrics, dict_, model, criterion, optimizer,
                  logger, args):
    if len(epoch_metrics) == 0:  # first time metric for epoch, initialization
        epoch_metrics = {
            "loss": meter.AverageValueMeter(),
            "auc": meter.AUCMeter()
        }

    target = dict_.pop("target")
    bs = len(target)

    if torch.cuda.is_available():
        input_var = {
            key: torch.autograd.Variable(value.cuda(async=True),
                                         requires_grad=False)
            for key, value in dict_.items()
        }
    else:
        input_var = {
            key: torch.autograd.Variable(value, requires_grad=False)
            for key, value in dict_.items()
        }

    if torch.cuda.is_available():
        target = target.cuda(async=True)
    target_var = torch.autograd.Variable(target, requires_grad=False)

    # compute output
    output = model(input_var)
    # @TODO: BCE loss issue
    output = output.squeeze(1)

    loss = criterion(output, target_var)
    loss_ = float(loss.data.cpu().squeeze().numpy()[0])
    epoch_metrics["loss"].add(loss_)
    logger.add_scalar("loss", loss_, args.step)

    epoch_metrics["auc"].add(output.data, target)
    logger.add_scalar("auc", epoch_metrics["auc"].value()[0], args.step)

    if mode == "train":
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    epoch_metrics["batch_size"] = bs

    return epoch_metrics
Esempio n. 7
0
 def metrics_val(self, testloader):
     am = meter.AUCMeter()
     cm = meter.ConfusionMeter(2)
     correct = 0
     total = 0
     for data in testloader:   
         x,y = data
         y_ = self.model(Variable(x))
         _, predicted = torch.max(y_.data, 1)
         cm.add(y_.data, y)
         am.add(y_.data[:,1].clone(),y)
         total += y.size(0)
         correct += (predicted == y).sum()
     
     cor_tot = str(correct) + "/" + str(total)
     
     return round(correct/float(total)*100, 4), cor_tot, round(am.value()[0],4), cm.value()   
Esempio n. 8
0
    def testAUCMeter(self):
        mtr = meter.AUCMeter()

        test_size = 1000
        mtr.add(torch.rand(test_size), torch.zeros(test_size))
        mtr.add(torch.rand(test_size), torch.Tensor(test_size).fill_(1))

        val, tpr, fpr = mtr.value()
        self.assertTrue(math.fabs(val - 0.5) < 0.1, msg="AUC Meter fails")

        mtr.reset()
        mtr.add(torch.Tensor(test_size).fill_(0), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.1), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.2), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.3), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(0.4), torch.zeros(test_size))
        mtr.add(torch.Tensor(test_size).fill_(1), torch.Tensor(test_size).fill_(1))
        val, tpr, fpr = mtr.value()

        self.assertEqual(val, 1.0, msg="AUC Meter fails")
Esempio n. 9
0
def train(train_loader, net, criterion, optimizer, gpu, epoch):
    print('\nTrain_net...')
    start = time.time()
    net.train()
    losses = AverageMeter()
    total_correct = 0
    mtr = meter.AUCMeter()
    for i, (imgs, marks, labels) in enumerate(train_loader):
        if gpu:
            imgs = imgs.cuda()
            labels = labels.cuda()

        outputs = net(imgs)  # 网络输出
        # _, preds = torch.max(outputs, 1)
        loss = criterion(outputs, labels)

        losses.update(loss.item(), imgs.size(0))
        smax_probs = nn.Softmax(dim=1)(outputs)  # 每个类的概率
        mtr.add(smax_probs.data[:, 1], labels.cpu())  # 计算AUC

        pred = torch.max(smax_probs, 1)[1]  # 预测类别标签
        correct = pred.eq(labels.view_as(pred)).sum()
        total_correct += correct

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        print(
            f'Train Epoch: {epoch + 1} [{(i + 1) * len(imgs)}/{len(train_loader.dataset)} ({100 * (i + 1) / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}'
        )

    end = time.time()

    accuracy = 100. * float(total_correct) / float(len(train_loader.dataset))
    mtr_values = mtr.value()
    print(
        f'Time:{int((end - start)//60)}:{int((end - start)%60)}\nTrain set: Average loss: {losses.avg:.4f}, Accuracy: {total_correct}/{len(train_loader.dataset)} ({accuracy:.2f}%), AUC: {mtr_values[0]:.4f}'
    )

    return losses.avg, accuracy, mtr_values
Esempio n. 10
0
                                                           batch_size=30)
    a = custom_model(model, loss_fn)
    a.model.apply(init_weights)
    a.train(trainloader, testloader, validloader, optimizer, 30, plot=True)
    cross_val_models.append(a.model.state_dict())
    accuracy, ct, auc, cm = a.metrics_val(testloader)
    if accuracy > best_accuracy:
        best_accuracy_model = a.model.state_dict()
        best_accuracy = accuracy
    cross_val_accu.append(accuracy)
    print("Accuracy:", accuracy, ct)

print("Average Accuracy:", sum(cross_val_accu) / len(cross_val_accu))

#cross_val_models = pkl.load(open("cross_models_best", "rb"))
am = meter.AUCMeter()
cm = meter.ConfusionMeter(2)
correct = 0
total = 0
Y_ = []
a = custom_model(model, loss_fn)
for data in testloader:
    Y_ = []
    x, y = data
    #    a.model.load_state_dict(cross_val_models)
    #    y_ = a.model(Variable(x))
    for mod in cross_val_models:
        a.model.load_state_dict(mod)
        Y_.append(a.model(Variable(x)))

    y_ = Y_[0]
Esempio n. 11
0
def test_2class(**kwargs):
    config.parse(kwargs)

    # ============================================= Prepare Data =============================================
    test_data = ContextVB_Dataset(config.test_paths, phase='test', num_classes=config.num_classes, useRGB=config.useRGB,
                                  usetrans=config.usetrans, padding=config.padding, balance=config.data_balance)
    test_dataloader = DataLoader(test_data, batch_size=config.batch_size, shuffle=False, num_workers=config.num_workers)
    test_dist = test_data.dist()

    print('Test Image:', test_data.__len__())

    # ============================================= Prepare Model ============================================
    model = ContextNet(num_classes=config.num_classes)
    print(model)

    if config.load_model_path:
        model.load(config.load_model_path)
        print('Model has been loaded!')
    else:
        print("Don't load model")
    if config.use_gpu:
        model.cuda()
    if config.parallel:
        model = torch.nn.DataParallel(model, device_ids=list(range(config.num_of_gpu)))
    model.eval()

    # =========================================== Prepare Metrics =====================================
    test_cm = meter.ConfusionMeter(config.num_classes)
    test_AUC = meter.AUCMeter()
    softmax = functional.softmax
    results = []
    y_true, y_scores = [], []

    # =========================================== Test ============================================
    for i, (image, label, image_path) in tqdm(enumerate(test_dataloader)):
        # ******************* prepare input and go through the model *******************
        if config.use_gpu:
            last_image, cur_image, next_image = image[0].cuda(), image[1].cuda(), image[2].cuda()
            last_label, cur_label, next_label = label[0].cuda(), label[1].cuda(), label[2].cuda()
        else:
            last_image, cur_image, next_image = image[0], image[1], image[2]
            last_label, cur_label, next_label = label[0], label[1], label[2]

        last_image.requires_grad = False
        cur_image.requires_grad = False
        next_image.requires_grad = False
        last_label.requires_grad = False
        cur_label.requires_grad = False
        next_label.requires_grad = False

        # score = model(last_image, cur_image, next_image)
        score, diff1, diff2 = model(last_image, cur_image, next_image)

        # *************************** confusion matrix and AUC *************************
        test_cm.add(softmax(score, dim=1).data, cur_label.data)
        positive_score = np.array([item[1] for item in softmax(score, dim=1).data.cpu().numpy().tolist()])
        test_AUC.add(positive_score, cur_label.data)  # torchnet计算AUC和ROC

        y_true.extend(cur_label.data.cpu().numpy().tolist())  # 用于sklearn计算AUC和ROC
        y_scores.extend(positive_score.tolist())

        # ******************************** record prediction results ******************************
        for l, p, ip in zip(cur_label.detach(), softmax(score, dim=1).detach(), image_path):
            if p[1] < 0.5:
                results.append((ip, int(l), 0, round(float(p[0]), 4), round(float(p[1]), 4)))
            else:
                results.append((ip, int(l), 1, round(float(p[0]), 4), round(float(p[1]), 4)))

    # ************************** TPR, FPR, AUC ******************************
    SKL_FPR, SKL_TPR, SKL_Thresholds = roc_curve(y_true, y_scores)
    SKL_AUC = roc_auc_score(np.array(y_true), np.array(y_scores), average='weighted')

    TNet_AUC, TNet_TPR, TNet_FPR = test_AUC.value()

    # ******************** Best SE, SP, Thresh, Matrix ***********************
    best_index = np.argmax(SKL_TPR - SKL_FPR, axis=0)
    best_SE, best_SP, best_T = SKL_TPR[best_index], 1 - SKL_FPR[best_index], SKL_Thresholds[best_index]
    best_confusion_matrix = [[int(round(test_dist['0'] * best_SP)), int(round(test_dist['0'] * (1 - best_SP)))],
                             [int(round(test_dist['1'] * (1 - best_SE))), int(round(test_dist['1'] * best_SE))]]

    # *********************** accuracy and sensitivity ***********************
    test_accuracy = 100. * sum([test_cm.value()[c][c] for c in range(config.num_classes)]) / np.sum(test_cm.value())
    test_se = [100. * test_cm.value()[i][i] / np.sum(test_cm.value()[i]) for i in range(config.num_classes)]

    # ================================ Save and Print Prediction Results ===========================
    if config.result_file:
        write_csv(os.path.join('results', config.result_file), tag=['path', 'label', 'predict', 'p1', 'p2'], content=results)

    draw_ROC(tpr=SKL_TPR, fpr=SKL_FPR, best_index=best_index, tangent=True, save_path=os.path.join('results', config.load_model_path.split('/')[-1][:-4] + "_ROC.png"))

    print('test_acc:', test_accuracy)
    print('test_avgse:', round(np.average(test_se), 4), 'train_se0:', round(test_se[0], 4), 'train_se1:', round(test_se[1], 4))
    print('SKL_AUC:', SKL_AUC, 'TNet_AUC:', TNet_AUC)
    print('Best_SE:', best_SE, 'Best_SP:', best_SP, 'Best_Threshold:', best_T)
    print('test_cm:')
    print(best_confusion_matrix)
Esempio n. 12
0
def train(**kwargs):
    config.parse(kwargs)
    vis = Visualizer(port=2333, env=config.env)
    vis.log('Use config:')
    for k, v in config.__class__.__dict__.items():
        if not k.startswith('__'):
            vis.log(f"{k}: {getattr(config, k)}")

    # prepare data
    train_data = VB_Dataset(config.train_paths,
                            phase='train',
                            useRGB=config.useRGB,
                            usetrans=config.usetrans,
                            padding=config.padding,
                            balance=config.data_balance)
    val_data = VB_Dataset(config.test_paths,
                          phase='val',
                          useRGB=config.useRGB,
                          usetrans=config.usetrans,
                          padding=config.padding,
                          balance=False)
    print('Training Images:', train_data.__len__(), 'Validation Images:',
          val_data.__len__())
    dist = train_data.dist()
    print('Train Data Distribution:', dist, 'Val Data Distribution:',
          val_data.dist())

    train_dataloader = DataLoader(train_data,
                                  batch_size=config.batch_size,
                                  shuffle=True,
                                  num_workers=config.num_workers)
    val_dataloader = DataLoader(val_data,
                                batch_size=config.batch_size,
                                shuffle=False,
                                num_workers=config.num_workers)

    # prepare model
    # model = ResNet18(num_classes=config.num_classes)
    # model = Vgg16(num_classes=config.num_classes)
    # model = densenet_collapse(num_classes=config.num_classes)
    model = ShallowVgg(num_classes=config.num_classes)
    print(model)

    if config.load_model_path:
        model.load(config.load_model_path)
    if config.use_gpu:
        model.cuda()
    if config.parallel:
        model = torch.nn.DataParallel(
            model, device_ids=[x for x in range(config.num_of_gpu)])

    # criterion and optimizer
    # weight = torch.Tensor([1/dist['0'], 1/dist['1'], 1/dist['2'], 1/dist['3']])
    # weight = torch.Tensor([1/dist['0'], 1/dist['1']])
    # weight = torch.Tensor([dist['1'], dist['0']])
    # weight = torch.Tensor([1, 10])
    # vis.log(f'loss weight: {weight}')
    # print('loss weight:', weight)
    # weight = weight.cuda()

    # criterion = torch.nn.CrossEntropyLoss()
    criterion = LabelSmoothing(size=config.num_classes, smoothing=0.1)
    # criterion = torch.nn.CrossEntropyLoss(weight=weight)
    # criterion = FocalLoss(gamma=4, alpha=None)

    lr = config.lr
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 weight_decay=config.weight_decay)

    # metric
    softmax = functional.softmax
    log_softmax = functional.log_softmax
    loss_meter = meter.AverageValueMeter()
    epoch_loss = meter.AverageValueMeter()
    train_cm = meter.ConfusionMeter(config.num_classes)
    train_AUC = meter.AUCMeter()

    previous_avgse = 0
    # previous_AUC = 0
    if config.parallel:
        save_model_dir = config.save_model_dir if config.save_model_dir else model.module.model_name
        save_model_name = config.save_model_name if config.save_model_name else model.module.model_name + '_best_model.pth'
    else:
        save_model_dir = config.save_model_dir if config.save_model_dir else model.model_name
        save_model_name = config.save_model_name if config.save_model_name else model.model_name + '_best_model.pth'
    save_epoch = 1  # 用于记录验证集上效果最好模型对应的epoch
    # process_record = {'epoch_loss': [],  # 用于记录实验过程中的曲线,便于画曲线图
    #                   'train_avgse': [], 'train_se0': [], 'train_se1': [], 'train_se2': [], 'train_se3': [],
    #                   'val_avgse': [], 'val_se0': [], 'val_se1': [], 'val_se2': [], 'val_se3': []}
    process_record = {
        'epoch_loss': [],  # 用于记录实验过程中的曲线,便于画曲线图
        'train_avgse': [],
        'train_se0': [],
        'train_se1': [],
        'val_avgse': [],
        'val_se0': [],
        'val_se1': [],
        'train_AUC': [],
        'val_AUC': []
    }

    # train
    for epoch in range(config.max_epoch):
        print(
            f"epoch: [{epoch+1}/{config.max_epoch}] {config.save_model_name[:-4]} =================================="
        )
        epoch_loss.reset()
        train_cm.reset()
        train_AUC.reset()

        # train
        model.train()
        for i, (image, label, image_path) in tqdm(enumerate(train_dataloader)):
            loss_meter.reset()

            # prepare input
            if config.use_gpu:
                image = image.cuda()
                label = label.cuda()

            # go through the model
            score = model(image)

            # backpropagate
            optimizer.zero_grad()
            # loss = criterion(score, label)
            loss = criterion(log_softmax(score, dim=1), label)
            loss.backward()
            optimizer.step()

            loss_meter.add(loss.item())
            epoch_loss.add(loss.item())
            train_cm.add(softmax(score, dim=1).data, label.data)
            positive_score = np.array([
                item[1]
                for item in softmax(score, dim=1).data.cpu().numpy().tolist()
            ])
            train_AUC.add(positive_score, label.data)

            if (i + 1) % config.print_freq == 0:
                vis.plot('loss', loss_meter.value()[0])

        # print result
        # train_se = [100. * train_cm.value()[0][0] / (train_cm.value()[0][0] + train_cm.value()[0][1] + train_cm.value()[0][2] + train_cm.value()[0][3]),
        #             100. * train_cm.value()[1][1] / (train_cm.value()[1][0] + train_cm.value()[1][1] + train_cm.value()[1][2] + train_cm.value()[1][3]),
        #             100. * train_cm.value()[2][2] / (train_cm.value()[2][0] + train_cm.value()[2][1] + train_cm.value()[2][2] + train_cm.value()[2][3]),
        #             100. * train_cm.value()[3][3] / (train_cm.value()[3][0] + train_cm.value()[3][1] + train_cm.value()[3][2] + train_cm.value()[3][3])]
        train_se = [
            100. * train_cm.value()[0][0] /
            (train_cm.value()[0][0] + train_cm.value()[0][1]),
            100. * train_cm.value()[1][1] /
            (train_cm.value()[1][0] + train_cm.value()[1][1])
        ]

        # validate
        model.eval()
        if (epoch + 1) % 1 == 0:
            val_cm, val_se, val_accuracy, val_AUC = val_2class(
                model, val_dataloader)

            if np.average(
                    val_se) > previous_avgse:  # 当测试集上的平均sensitivity升高时保存模型
                # if val_AUC.value()[0] > previous_AUC:  # 当测试集上的AUC升高时保存模型
                if config.parallel:
                    if not os.path.exists(
                            os.path.join('checkpoints', save_model_dir,
                                         save_model_name.split('.')[0])):
                        os.makedirs(
                            os.path.join('checkpoints', save_model_dir,
                                         save_model_name.split('.')[0]))
                    model.module.save(
                        os.path.join('checkpoints', save_model_dir,
                                     save_model_name.split('.')[0],
                                     save_model_name))
                else:
                    if not os.path.exists(
                            os.path.join('checkpoints', save_model_dir,
                                         save_model_name.split('.')[0])):
                        os.makedirs(
                            os.path.join('checkpoints', save_model_dir,
                                         save_model_name.split('.')[0]))
                    model.save(
                        os.path.join('checkpoints', save_model_dir,
                                     save_model_name.split('.')[0],
                                     save_model_name))
                previous_avgse = np.average(val_se)
                # previous_AUC = val_AUC.value()[0]
                save_epoch = epoch + 1

            process_record['epoch_loss'].append(epoch_loss.value()[0])
            process_record['train_avgse'].append(np.average(train_se))
            process_record['train_se0'].append(train_se[0])
            process_record['train_se1'].append(train_se[1])
            # process_record['train_se2'].append(train_se[2])
            # process_record['train_se3'].append(train_se[3])
            process_record['train_AUC'].append(train_AUC.value()[0])
            process_record['val_avgse'].append(np.average(val_se))
            process_record['val_se0'].append(val_se[0])
            process_record['val_se1'].append(val_se[1])
            # process_record['val_se2'].append(val_se[2])
            # process_record['val_se3'].append(val_se[3])
            process_record['val_AUC'].append(val_AUC.value()[0])

            # vis.plot_many({'epoch_loss': epoch_loss.value()[0],
            #                'train_avgse': np.average(train_se), 'train_se0': train_se[0], 'train_se1': train_se[1], 'train_se2': train_se[2], 'train_se3': train_se[3],
            #                'val_avgse': np.average(val_se), 'val_se0': val_se[0], 'val_se1': val_se[1], 'val_se2': val_se[2], 'val_se3': val_se[3]})
            # vis.log(f"epoch: [{epoch+1}/{config.max_epoch}] =========================================")
            # vis.log(f"lr: {optimizer.param_groups[0]['lr']}, loss: {round(loss_meter.value()[0], 5)}")
            # vis.log(f"train_avgse: {round(np.average(train_se), 4)}, train_se0: {round(train_se[0], 4)}, train_se1: {round(train_se[1], 4)}, train_se2: {round(train_se[2], 4)}, train_se3: {round(train_se[3], 4)},")
            # vis.log(f"val_avgse: {round(np.average(val_se), 4)}, val_se0: {round(val_se[0], 4)}, val_se1: {round(val_se[1], 4)}, val_se2: {round(val_se[2], 4)}, val_se3: {round(val_se[3], 4)}")
            # vis.log(f'train_cm: {train_cm.value()}')
            # vis.log(f'val_cm: {val_cm.value()}')
            # print("lr:", optimizer.param_groups[0]['lr'], "loss:", round(epoch_loss.value()[0], 5))
            # print('train_avgse:', round(np.average(train_se), 4), 'train_se0:', round(train_se[0], 4), 'train_se1:', round(train_se[1], 4), 'train_se2:', round(train_se[2], 4), 'train_se3:', round(train_se[3], 4))
            # print('val_avgse:', round(np.average(val_se), 4), 'val_se0:', round(val_se[0], 4), 'val_se1:', round(val_se[1], 4), 'val_se2:', round(val_se[2], 4), 'val_se3:', round(val_se[3], 4))
            # print('train_cm:')
            # print(train_cm.value())
            # print('val_cm:')
            # print(val_cm.value())

            vis.plot_many({
                'epoch_loss': epoch_loss.value()[0],
                'train_avgse': np.average(train_se),
                'train_se0': train_se[0],
                'train_se1': train_se[1],
                'val_avgse': np.average(val_se),
                'val_se0': val_se[0],
                'val_se1': val_se[1],
                'train_AUC': train_AUC.value()[0],
                'val_AUC': val_AUC.value()[0]
            })
            vis.log(
                f"epoch: [{epoch + 1}/{config.max_epoch}] ========================================="
            )
            vis.log(
                f"lr: {optimizer.param_groups[0]['lr']}, loss: {round(loss_meter.value()[0], 5)}"
            )
            vis.log(
                f"train_avgse: {round(np.average(train_se), 4)}, train_se0: {round(train_se[0], 4)}, train_se1: {round(train_se[1], 4)}"
            )
            vis.log(
                f"val_avgse: {round(np.average(val_se), 4)}, val_se0: {round(val_se[0], 4)}, val_se1: {round(val_se[1], 4)}"
            )
            vis.log(f'train_AUC: {train_AUC.value()[0]}')
            vis.log(f'val_AUC: {val_AUC.value()[0]}')
            vis.log(f'train_cm: {train_cm.value()}')
            vis.log(f'val_cm: {val_cm.value()}')
            print("lr:", optimizer.param_groups[0]['lr'], "loss:",
                  round(epoch_loss.value()[0], 5))
            print('train_avgse:', round(np.average(train_se), 4), 'train_se0:',
                  round(train_se[0], 4), 'train_se1:', round(train_se[1], 4))
            print('val_avgse:', round(np.average(val_se), 4), 'val_se0:',
                  round(val_se[0], 4), 'val_se1:', round(val_se[1], 4))
            print('train_AUC:',
                  train_AUC.value()[0], 'val_AUC:',
                  val_AUC.value()[0])
            print('train_cm:')
            print(train_cm.value())
            print('val_cm:')
            print(val_cm.value())

            if os.path.exists(
                    os.path.join('checkpoints', save_model_dir,
                                 save_model_name.split('.')[0])):
                write_json(file=os.path.join('checkpoints', save_model_dir,
                                             save_model_name.split('.')[0],
                                             'process_record.json'),
                           content=process_record)

        # if (epoch+1) % 5 == 0:
        #     lr = lr * config.lr_decay
        #     for param_group in optimizer.param_groups:
        #         param_group['lr'] = lr

    vis.log(f"Best Epoch: {save_epoch}")
    print("Best Epoch:", save_epoch)
Esempio n. 13
0
def train(**kwargs):
    # torch.manual_seed(100) # 10, 100, 666,
    opt.parse(kwargs)
    vis = Visualizer(opt.env)

    # step1: configure model
    model = getattr(models, opt.model)()
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        model.cuda()

    # step2: load data
    if os.path.isfile(opt.train_features_path) and\
       os.path.isfile(opt.train_targets_path):
        print "load train dataset from file"
        features = torch.load(opt.train_features_path)
        # features[features == float('Inf')] = 0  # for errors
        targets = torch.load(opt.train_targets_path) * 5  # !!!!!!!!!!!!!!!!!
        train_data = torch.utils.data.TensorDataset(features, targets)
        train_dataloader = DataLoader(train_data, opt.batch_size,
                                      shuffle=True,
                                      num_workers=opt.num_workers)
    else:
        train_data = STSDataset(opt.train_data_path, opt)
        train_dataloader = DataLoader(train_data, opt.batch_size,
                                      shuffle=True,
                                      num_workers=opt.num_workers)
        torch.save(train_data.X, opt.train_features_path)
        torch.save(train_data.y, opt.train_targets_path)

    if os.path.isfile(opt.test_features_path) and\
       os.path.isfile(opt.test_targets_path):
        print "load test dataset from file"
        features = torch.load(opt.test_features_path)
        # features[features == float('Inf')] = 0  # for errors
        targets = torch.load(opt.test_targets_path) * 5  # !!!!!!!!!!!!!!!!!
        test_data = torch.utils.data.TensorDataset(features, targets)
        test_dataloader = DataLoader(test_data, opt.batch_size,
                                     shuffle=False,
                                     num_workers=opt.num_workers)
    else:
        test_data = STSDataset(opt.test_data_path, opt)
        test_dataloader = DataLoader(test_data, opt.batch_size,
                                     shuffle=True,
                                     num_workers=opt.num_workers)
        torch.save(test_data.X, opt.test_features_path)
        torch.save(test_data.y, opt.test_targets_path)

    # step3: set criterion and optimizer
    # criterion = torch.nn.MarginRankingLoss()
    criterion = torch.nn.BCELoss()
    lr = opt.lr
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
                                 # weight_decay=opt.weight_decay)
    # optimizer = torch.optim.RMSprop(model.parameters(), lr=lr)
    #                                 weight_decay=opt.weight_decay)
    # optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)

    # step4: set meters
    loss_meter = meter.AUCMeter()
    # loss_meter = meter.ClassErrorMeter()
    previous_loss = 1e100

    # train
    testf1 = []
    test_f_a_l = []
    for epoch in range(opt.max_epoch):
        loss_meter.reset()

        for ii, (data, label) in enumerate(train_dataloader):
            # train model on a batch data
            input = Variable(data)
            target = Variable(torch.FloatTensor(label.numpy()))
            if opt.use_gpu:
                input = input.cuda()
                target = target.cuda()
            optimizer.zero_grad()
            score = model(input)
            loss = criterion(score, target)
            # print score

            loss.backward()
            optimizer.step()

            # update meters and visualize
            loss_meter.add(score.data, target.data)

            # if ii % opt.print_freq == opt.print_freq - 1:
            #     vis.plot('loss', loss_meter.value()[0])

        # save model for each epoch
        # model.save()

        # validate and visualize
        train_ce, train_acc, train_f1 = val(model, train_dataloader)
        test_ce, test_acc, test_f1 = val(model, test_dataloader)
        testf1.append(test_acc)
        test_f_a_l.append([test_f1, test_acc, test_ce])
        print('epoch: %d' %epoch)
        print('test acc, f1: '+str(test_acc)+' , '+str(test_f1))

        # vis.plot_many({"train_ce": train_ce,
        #                "test_ce": test_ce})  # !!!!!!!!!!!!!!!!!
        # vis.plot_many({"train_acc": train_acc,
        #                "test_acc": test_acc})
        # vis.plot_many({"train_f1": train_f1,
        #                "test_f1": test_f1})
        # vis.log("epoch:{epoch}, lr:{lr}, loss:{loss}, \
        #          train_ce:{train_ce}, train_acc:{train_acc}, \
        #          test_ce:{test_ce}, test_acc:{test_acc}".format(
        #     epoch=epoch, lr=lr, loss=loss_meter.value(),
        #     train_ce=str(train_ce), train_acc=str(train_acc),
        #     test_ce=str(test_ce), test_acc=str(test_acc)))

        # update learning rate
        if loss_meter.value() > previous_loss:
            lr = lr * opt.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        previous_loss = loss_meter.value()
    x1 = max(testf1)
    ind = testf1.index(x1)
    return test_f_a_l[ind], ind
Esempio n. 14
0
def train(**kwargs):
    opt.parse(kwargs)
    vis = Visualizer(opt.env)

    model = getattr(models, opt.models)(opt.num_class)
    if opt.load_model_path:
        model.load(opt.load_model_path)
    if opt.use_gpu:
        model.cuda()

    train_data = Echocardiography(opt.train_data_root, train=True)
    val_data = Echocardiography(opt.train_data_root, train=False)
    train_dataloader = DataLoader(train_data,
                                  opt.batch_size,
                                  shuffle=True,
                                  num_workers=opt.num_workers)
    val_dataloader = DataLoader(val_data,
                                opt.batch_size,
                                shuffle=True,
                                num_workers=opt.num_workers)

    criterion = nn.CrossEntropyLoss()
    lr = opt.lr
    optimizer = t.optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=opt.weight_decay)

    loss_meter = meter.AverageValueMeter()
    confusion_matrix = meter.ConfusionMeter(opt.num_class)
    auc_meter = meter.AUCMeter()

    previous_loss = 100

    for epoch in range(opt.max_epoch):
        loss_meter.reset()
        confusion_matrix.reset()
        if opt.num_class == 2:
            auc_meter.reset()

        for ii, (data, label) in enumerate(train_dataloader):
            input = Variable(data)
            target = Variable(label.long())
            if opt.use_gpu:
                input = input.cuda()
                target = target.cuda()
            optimizer.zero_grad()
            score = model(input)
            loss = criterion(score, target)
            loss.backward()
            optimizer.step()

            loss_meter.add(loss.data)
            confusion_matrix.add(score.data, target.data)
            if opt.num_class == 2:
                auc_meter.add(score.data[:, 1], target.data)

            if ii % opt.print_freq == opt.print_freq - 1:
                vis.plot('loss', loss_meter.value()[0])
                if opt.num_class == 2:
                    vis.plot('auc', auc_meter.value()[0])

        val_cm, val_accuracy = val(model, val_dataloader)[:2]
        if opt.num_class == 2:
            val_auc = val(model, val_dataloader)[2]
            vis.plot('val_auc', val_auc)
        vis.plot('val_accuracy', val_accuracy)
        vis.log(
            "epoch:{epoch}, lr:{lr}, loss:{loss}, train_cm:{train_cm}, val_cm:{val_cm}"
            .format(epoch=epoch,
                    lr=lr,
                    loss=loss_meter.value()[0],
                    train_cm=str(confusion_matrix.value()),
                    val_cm=str(val_cm.value())))
        model.save()
        if loss_meter.value()[0] > previous_loss:
            lr = lr * opt.lr_decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = lr

        previous_loss = loss_meter.value()[0]