示例#1
0
def train(args, model, trainloader, optimizer, epoch):
    model.train()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metrics = Metrics('')
    metrics.reset()
    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)

        num_samples = batch_idx * args.batch_size + 1
        metrics.update({
            'correct': correct,
            'total': total,
            'loss': loss.item(),
            'accuracy': acc
        })
        print_stats(args, epoch, num_samples, trainloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Training")
    return metrics
示例#2
0
    def validation(self, args, model, testloader, epoch):
        model.eval()
        criterion = nn.CrossEntropyLoss(size_average='mean')

        metrics = Metrics()
        metrics.reset()
        with torch.no_grad():
            for batch_idx, input_tensors in enumerate(testloader):

                input_data, target = input_tensors
                if (args.cuda):
                    input_data = input_data.cuda()
                    target = target.cuda()

                output = model(input_data)

                loss = criterion(output, target)

                correct, total, acc = accuracy(output, target)
                num_samples = batch_idx * args.batch_size + 1

                metrics.update({
                    'correct': correct,
                    'total': total,
                    'loss': loss.item(),
                    'accuracy': acc
                })
                print_stats(args, epoch, num_samples, testloader, metrics)

        print_summary(args, epoch, num_samples, metrics, mode="Validation")
        return metrics
示例#3
0
def train(model, args, device, writer, optimizer, data_loader, epoch):

    # Set train mode
    model.train()

    criterion = nn.CrossEntropyLoss(reduction='mean')
    metric_ftns = ['loss', 'correct', 'total', 'accuracy', 'sens', 'ppv']
    metrics = MetricTracker(*[m for m in metric_ftns],
                            writer=writer,
                            mode='train')
    metrics.reset()

    cm = torch.zeros(args.classes, args.classes)

    for batch_idx, input_tensors in enumerate(data_loader):

        input_data, target = input_tensors[0].to(device), input_tensors[1].to(
            device)

        # Forward
        output = model(input_data)
        loss = criterion(output, target)

        correct, total, acc = accuracy(output, target)
        update_confusion_matrix(cm, output, target)
        metrics.update_all_metrics({
            'correct': correct,
            'total': total,
            'loss': loss.item(),
            'accuracy': acc
        })

        # Backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # Save TB stats
        writer_step = (epoch - 1) * len(data_loader) + batch_idx
        if ((batch_idx + 1) % args.log_interval == 0):

            # Calculate confusion for this bucket
            ppv, sens = update_confusion_calc(cm)
            metrics.update_all_metrics({'sens': sens, 'ppv': ppv})
            cm = torch.zeros(args.classes, args.classes)

            metrics.write_tb(writer_step)

            num_samples = batch_idx * args.batch_size
            print_stats(args, epoch, num_samples, data_loader, metrics)

    return metrics, writer_step
示例#4
0
def train(args, model, trainloader, optimizer, epoch):

    start_time = time.time()
    model.train()

    train_metrics = MetricTracker(*[m for m in METRICS_TRACKED], mode='train')
    w2 = torch.Tensor([1.0, 1.0, 1.5])

    if (args.cuda):
        model.cuda()
        w2 = w2.cuda()

    train_metrics.reset()
    # JUST FOR CHECK
    counter_batches = 0
    counter_covid = 0

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        counter_batches += 1

        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss, counter = weighted_loss(output, target, w2)
        counter_covid += counter
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)
        precision_mean, recall_mean = precision_score(output, target)

        num_samples = batch_idx * args.batch_size + 1
        train_metrics.update_all_metrics(
            {
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc,
                'precision_mean': precision_mean,
                'recall_mean': recall_mean
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        print_stats(args, epoch, num_samples, trainloader, train_metrics)
    print("--- %s seconds ---" % (time.time() - start_time))
    print_summary(args, epoch, num_samples, train_metrics, mode="Training")
    return train_metrics
示例#5
0
def train(args, model, trainloader, optimizer, epoch, class_weight):
    model.train()
    criterion = nn.CrossEntropyLoss(weight=class_weight, reduction='mean')

    metrics = Metrics('')
    metrics.reset()
    #-------------------------------------------------------
    #Esto es para congelar las capas de la red preentrenada
    #for m in model.modules():
    #    if isinstance(m, nn.BatchNorm2d):
    #        m.train()
    #        m.weight.requires_grad = False
    #        m.bias.requires_grad = False
    #-----------------------------------------------------

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()
        #print(input_data.shape)
        output = model(input_data)
        #print(output.shape)
        #print(target.shape)
        #loss = focal_loss(output, target)
        if args.model == 'CovidNet_DenseNet':
            output = output[-1]

        loss = crossentropy_loss(output, target, weight=class_weight)
        loss.backward()
        optimizer.step()
        correct, total, acc = accuracy(output, target)

        num_samples = batch_idx * args.batch_size + 1
        _, output_class = output.max(1)
        #print(output_class)
        #print(target)
        bacc = balanced_accuracy_score(target.cpu().detach().numpy(),
                                       output_class.cpu().detach().numpy())
        metrics.update({
            'correct': correct,
            'total': total,
            'loss': loss.item(),
            'accuracy': acc,
            'bacc': bacc
        })
        print_stats(args, epoch, num_samples, trainloader, metrics)

    print_summary(args, epoch, num_samples, metrics, mode="Training")
    return metrics
示例#6
0
def train(args, model, trainloader, optimizer, epoch, writer, log):
    model.train()
    criterion = nn.CrossEntropyLoss(reduction='mean')

    metric_ftns = [
        'loss', 'correct', 'total', 'accuracy', 'ppv', 'sensitivity'
    ]
    train_metrics = MetricTracker(*[m for m in metric_ftns],
                                  writer=writer,
                                  mode='train')
    train_metrics.reset()
    confusion_matrix = torch.zeros(args.class_dict, args.class_dict)

    for batch_idx, input_tensors in enumerate(trainloader):
        optimizer.zero_grad()
        input_data, target = input_tensors
        if (args.cuda):
            input_data = input_data.cuda()
            target = target.cuda()

        output = model(input_data)

        loss = criterion(output, target)
        loss.backward()

        optimizer.step()
        correct, total, acc = accuracy(output, target)
        pred = torch.argmax(output, dim=1)

        num_samples = batch_idx * args.batch_size + 1
        train_metrics.update_all_metrics(
            {
                'correct': correct,
                'total': total,
                'loss': loss.item(),
                'accuracy': acc
            },
            writer_step=(epoch - 1) * len(trainloader) + batch_idx)
        print_stats(args, epoch, num_samples, trainloader, train_metrics)
        for t, p in zip(target.cpu().view(-1), pred.cpu().view(-1)):
            confusion_matrix[t.long(), p.long()] += 1
    s = sensitivity(confusion_matrix.numpy())
    ppv = positive_predictive_value(confusion_matrix.numpy())
    print(f" s {s} ,ppv {ppv}")
    # train_metrics.update('sensitivity', s, writer_step=(epoch - 1) * len(trainloader) + batch_idx)
    # train_metrics.update('ppv', ppv, writer_step=(epoch - 1) * len(trainloader) + batch_idx)
    print_summary(args, epoch, num_samples, train_metrics, mode="Training")
    return train_metrics