コード例 #1
0
ファイル: train_org.py プロジェクト: jdariasl/COVIDNET
def train(args, model, trainloader, optimizer, epoch):
    model.train()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metrics = Metrics('')
    metrics.reset()
    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)

        num_samples = batch_idx * args.batch_size + 1
        metrics.update({
            'correct': correct,
            'total': total,
            'loss': loss.item(),
            'accuracy': acc
        })
        print_stats(args, epoch, num_samples, trainloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Training")
    return metrics
コード例 #2
0
ファイル: train_class.py プロジェクト: regfish7/COVIDNet
    def validation(self, args, model, testloader, epoch):
        model.eval()
        criterion = nn.CrossEntropyLoss(size_average='mean')

        metrics = Metrics()
        metrics.reset()
        with torch.no_grad():
            for batch_idx, input_tensors in enumerate(testloader):

                input_data, target = input_tensors
                if (args.cuda):
                    input_data = input_data.cuda()
                    target = target.cuda()

                output = model(input_data)

                loss = criterion(output, target)

                correct, total, acc = accuracy(output, target)
                num_samples = batch_idx * args.batch_size + 1

                metrics.update({
                    'correct': correct,
                    'total': total,
                    'loss': loss.item(),
                    'accuracy': acc
                })
                print_stats(args, epoch, num_samples, testloader, metrics)

        print_summary(args, epoch, num_samples, metrics, mode="Validation")
        return metrics
コード例 #3
0
ファイル: train_utils.py プロジェクト: iliasprc/COVIDNet
def validation(args, model, testloader, epoch, writer):
    model.eval()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metric_ftns = [
        'loss', 'correct', 'total', 'accuracy', 'ppv', 'sensitivity'
    ]
    val_metrics = MetricTracker(*[m for m in metric_ftns],
                                writer=writer,
                                mode='val')
    val_metrics.reset()
    confusion_matrix = torch.zeros(args.class_dict, args.class_dict)
    with torch.no_grad():
        for batch_idx, input_tensors in enumerate(testloader):

            input_data, target = input_tensors
            if (args.cuda):
                input_data = input_data.cuda()
                target = target.cuda()

            output = model(input_data)

            loss = criterion(output, target)

            correct, total, acc = accuracy(output, target)
            num_samples = batch_idx * args.batch_size + 1
            _, pred = torch.max(output, 1)

            num_samples = batch_idx * args.batch_size + 1
            for t, p in zip(target.cpu().view(-1), pred.cpu().view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            val_metrics.update_all_metrics(
                {
                    'correct': correct,
                    'total': total,
                    'loss': loss.item(),
                    'accuracy': acc
                },
                writer_step=(epoch - 1) * len(testloader) + batch_idx)

    print_summary(args, epoch, num_samples, val_metrics, mode="Validation")
    s = sensitivity(confusion_matrix.numpy())
    ppv = positive_predictive_value(confusion_matrix.numpy())
    print(f" s {s} ,ppv {ppv}")
    val_metrics.update('sensitivity',
                       s,
                       writer_step=(epoch - 1) * len(testloader) + batch_idx)
    val_metrics.update('ppv',
                       ppv,
                       writer_step=(epoch - 1) * len(testloader) + batch_idx)
    print('Confusion Matrix\n{}'.format(confusion_matrix.cpu().numpy()))
    return val_metrics, confusion_matrix
コード例 #4
0
def train(args, model, trainloader, optimizer, epoch):

    start_time = time.time()
    model.train()

    train_metrics = MetricTracker(*[m for m in METRICS_TRACKED], mode='train')
    w2 = torch.Tensor([1.0, 1.0, 1.5])

    if (args.cuda):
        model.cuda()
        w2 = w2.cuda()

    train_metrics.reset()
    # JUST FOR CHECK
    counter_batches = 0
    counter_covid = 0

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        counter_batches += 1

        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss, counter = weighted_loss(output, target, w2)
        counter_covid += counter
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)
        precision_mean, recall_mean = precision_score(output, target)

        num_samples = batch_idx * args.batch_size + 1
        train_metrics.update_all_metrics(
            {
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc,
                'precision_mean': precision_mean,
                'recall_mean': recall_mean
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        print_stats(args, epoch, num_samples, trainloader, train_metrics)
    print("--- %s seconds ---" % (time.time() - start_time))
    print_summary(args, epoch, num_samples, train_metrics, mode="Training")
    return train_metrics
コード例 #5
0
def train(args, model, trainloader, optimizer, epoch, class_weight):
    model.train()
    criterion = nn.CrossEntropyLoss(weight=class_weight, reduction='mean')

    metrics = Metrics('')
    metrics.reset()
    #-------------------------------------------------------
    #Esto es para congelar las capas de la red preentrenada
    #for m in model.modules():
    #    if isinstance(m, nn.BatchNorm2d):
    #        m.train()
    #        m.weight.requires_grad = False
    #        m.bias.requires_grad = False
    #-----------------------------------------------------

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()
        #print(input_data.shape)
        output = model(input_data)
        #print(output.shape)
        #print(target.shape)
        #loss = focal_loss(output, target)
        if args.model == 'CovidNet_DenseNet':
            output = output[-1]

        loss = crossentropy_loss(output, target, weight=class_weight)
        loss.backward()
        optimizer.step()
        correct, total, acc = accuracy(output, target)

        num_samples = batch_idx * args.batch_size + 1
        _, output_class = output.max(1)
        #print(output_class)
        #print(target)
        bacc = balanced_accuracy_score(target.cpu().detach().numpy(),
                                       output_class.cpu().detach().numpy())
        metrics.update({
            'correct': correct,
            'total': total,
            'loss': loss.item(),
            'accuracy': acc,
            'bacc': bacc
        })
        print_stats(args, epoch, num_samples, trainloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Training")
    return metrics
コード例 #6
0
def validation(args, model, testloader, epoch):

    model.eval()

    val_metrics = MetricTracker(*[m for m in METRICS_TRACKED], mode='val')
    val_metrics.reset()
    w2 = torch.Tensor([1.0, 1.0,
                       1.5])  #w_full = torch.Tensor([1.456,1.0,15.71])

    if (args.cuda):
        w2 = w2.cuda()

    confusion_matrix = torch.zeros(args.classes, args.classes)

    with torch.no_grad():
        for batch_idx, input_tensors in enumerate(testloader):

            input_data, target = input_tensors

            if (args.cuda):
                input_data = input_data.cuda()
                target = target.cuda()

            output = model(input_data)

            loss, counter = weighted_loss(output, target, w2)
            correct, total, acc = accuracy(output, target)
            precision_mean, recall_mean = precision_score(output, target)

            num_samples = batch_idx * args.batch_size + 1
            _, preds = torch.max(output, 1)

            for t, p in zip(target.cpu().view(-1), preds.cpu().view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            val_metrics.update_all_metrics(
                {
                    'correct': correct,
                    'total': total,
                    'loss': loss.item(),
                    'accuracy': acc,
                    'precision_mean': precision_mean,
                    'recall_mean': recall_mean
                },
                writer_step=(epoch - 1) * len(testloader) + batch_idx)

    print_summary(args, epoch, num_samples, val_metrics, mode="Validation")
    print('Confusion Matrix\n {}'.format(confusion_matrix.cpu().numpy()))

    return val_metrics, confusion_matrix
コード例 #7
0
def validation(args, model, testloader, epoch, class_weight):
    model.eval()

    #-------------------------------------------------------
    #Esto es para congelar las capas de la red preentrenada
    #for m in model.modules():
    #    if isinstance(m, nn.BatchNorm2d):
    #        m.train()
    #        m.weight.requires_grad = False
    #        m.bias.requires_grad = False
    #-----------------------------------------------------

    criterion = nn.CrossEntropyLoss(size_average='mean')

    metrics = Metrics('')
    metrics.reset()
    confusion_matrix = torch.zeros(args.classes, args.classes)
    with torch.no_grad():
        for batch_idx, input_tensors in enumerate(testloader):

            input_data, target = input_tensors
            if (args.cuda):
                input_data = input_data.cuda()
                target = target.cuda()
            #print(input_data.shape)
            output = model(input_data)
            if args.model == 'CovidNet_DenseNet':
                output = output[-1]
            #loss = focal_loss(output, target)
            loss = crossentropy_loss(output, target, weight=class_weight)

            correct, total, acc = accuracy(output, target)
            num_samples = batch_idx * args.batch_size + 1
            _, preds = torch.max(output, 1)
            bacc = balanced_accuracy_score(target.cpu().detach().numpy(),
                                           preds.cpu().detach().numpy())
            for t, p in zip(target.cpu().view(-1), preds.cpu().view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            metrics.update({
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc,
                'bacc': bacc
            })
            #print_stats(args, epoch, num_samples, testloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Validation")
    return metrics, confusion_matrix
コード例 #8
0
def inference(args, model, val_loader, epoch, writer, device, writer_step):

    # Run Inference on val set
    val_metrics, cm = val(args, model, val_loader, epoch, writer, device)
    val_metrics.write_tb(writer_step)

    # Print a summary message
    print_summary(args, epoch, val_metrics)

    # Print the confusion matrix
    print('Confusion Matrix\n{}'.format(cm.cpu().numpy()))

    val_score = val_metrics.avg('sens') * val_metrics.avg('ppv')

    return val_score, cm
コード例 #9
0
ファイル: train_utils.py プロジェクト: iliasprc/COVIDNet
def train(args, model, trainloader, optimizer, epoch, writer, log):
    model.train()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metric_ftns = [
        'loss', 'correct', 'total', 'accuracy', 'ppv', 'sensitivity'
    ]
    train_metrics = MetricTracker(*[m for m in metric_ftns],
                                  writer=writer,
                                  mode='train')
    train_metrics.reset()
    confusion_matrix = torch.zeros(args.class_dict, args.class_dict)

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)
        pred = torch.argmax(output, dim=1)

        num_samples = batch_idx * args.batch_size + 1
        train_metrics.update_all_metrics(
            {
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        print_stats(args, epoch, num_samples, trainloader, train_metrics)
        for t, p in zip(target.cpu().view(-1), pred.cpu().view(-1)):
            confusion_matrix[t.long(), p.long()] += 1
    s = sensitivity(confusion_matrix.numpy())
    ppv = positive_predictive_value(confusion_matrix.numpy())
    print(f" s {s} ,ppv {ppv}")
    # train_metrics.update('sensitivity', s, writer_step=(epoch - 1) * len(trainloader) + batch_idx)
    # train_metrics.update('ppv', ppv, writer_step=(epoch - 1) * len(trainloader) + batch_idx)
    print_summary(args, epoch, num_samples, train_metrics, mode="Training")
    return train_metrics
コード例 #10
0
ファイル: train_org.py プロジェクト: jdariasl/COVIDNET
def validation(args, model, testloader, epoch):
    model.eval()
    criterion = nn.CrossEntropyLoss(size_average='mean')

    metrics = Metrics('')
    metrics.reset()
    with torch.no_grad():
        for batch_idx, input_tensors in enumerate(testloader):

            input_data, target = input_tensors
            if (args.cuda):
                input_data = input_data.cuda()
                target = target.cuda()

            output = model(input_data)

            #loss = criterion(output, target)
            loss = focal_loss(output, target)

            correct, total, acc = accuracy(output, target)
            num_samples = batch_idx * args.batch_size + 1

            _, preds = torch.max(output, 1)
            for t, p in zip(target.cpu().view(-1), preds.cpu().view(-1)):
                confusion_matrix[t.long(), p.long()] += 1
            metrics.update({
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc
            })

            #metrics.update({'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc})
            #print_stats(args, epoch, num_samples, testloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Validation")
    return metrics