Ejemplo n.º 1
0
def test(model, params, test_dataset_loader, criterion):
    metric_monitor = MetricMonitor()
    model.eval()
    stream = tqdm(test_dataset_loader)
    with torch.no_grad():
        for i, (images, target) in enumerate(stream, start=1):
            images = images.to(params["device"], non_blocking=True).float()
            target = target.to(params["device"], non_blocking=True)
            output = model(images)
            loss = criterion(output, target)
            accuracy_result = accuracy(output, target)
            metric_monitor.update("Loss", loss.item())
            metric_monitor.update("Accuracy", accuracy_result)
            stream.set_description(f"Test. {metric_monitor}")
Ejemplo n.º 2
0
def evaluate(model, tokenizer, params, valid_examples):
    print("***** Running evaluation *****")
    print("Num examples: ", len(valid_examples))
    print("Batch size:   ", params['eval_batch_size'])
    
    prob_preds = predict(model, tokenizer, params, valid_examples)
    true_labels = np.array([int(example.label) 
                            for i, example in enumerate(valid_examples)])
    result = {
        'eval_loss': metrics.log_loss(true_labels, prob_preds),
        'eval_accuracy': metrics.accuracy(true_labels, prob_preds),
        'eval_f1_score': metrics.f1_score(true_labels, prob_preds),
        'eval_matthews_corrcoef': metrics.matthews_corrcoef(true_labels, prob_preds)
    }
    return result, prob_preds
Ejemplo n.º 3
0
    def train(epoch):
        model.train()
        lr = adjust_learning_rate(FLAGS, optimizer, epoch)
        train_sampler.set_epoch(epoch)
        train_loss = Metric('train_loss')
        train_accuracy = Metric('train_accuracy')

        with tqdm.tqdm(total=len(train_loader),
                       desc='Train Epoch     #{}'.format(epoch + 1),
                       disable=not verbose) as t:
            for batch_idx, (data, target) in enumerate(train_loader):
                if FLAGS.CUDA:
                    data, target = data.cuda(), target.cuda()
                optimizer.zero_grad()
                # Split data into sub-batches of size batch_size
                for i in range(0, len(data), FLAGS.BATCH_SIZE):
                    data_batch = data[i:i + FLAGS.BATCH_SIZE]
                    target_batch = target[i:i + FLAGS.BATCH_SIZE]
                    output = model(data_batch)
                    train_accuracy.update(accuracy(output, target_batch))
                    loss = F.cross_entropy(output, target_batch)
                    train_loss.update(loss)
                    # Average gradients among sub-batches
                    loss.div_(math.ceil(float(len(data)) / FLAGS.BATCH_SIZE))
                    loss.backward()
                    if i == 0 and hvd.rank() == 0:
                        train_summary_writer.add_image("input",
                                                       transforms.denormalize(data[0],
                                                                              mean=FLAGS.DATA_MEAN,
                                                                              std=FLAGS.DATA_STD),
                                                       epoch)
                # Gradient is applied across all ranks
                optimizer.step()
                t.set_postfix({'loss': train_loss.avg.item(),
                               'accuracy': 100. * train_accuracy.avg.item(),
                               'lr': lr})
                t.update(1)

        if hvd.rank() == 0:
            train_summary_writer.add_scalar('info/lr', lr, epoch)
            train_summary_writer.add_scalar('info/loss', train_loss.avg, epoch)
            train_summary_writer.add_scalar('metric/accuracy', train_accuracy.avg, epoch)
Ejemplo n.º 4
0
    def test():
        model.eval()
        valid_loss = Metric('valid_loss')
        valid_accuracy = Metric('valid_accuracy')

        with tqdm.tqdm(total=len(test_loader),
                       desc='Test Model',
                       disable=not verbose) as t:
            with torch.no_grad():
                for data, target in test_loader:
                    if FLAGS.CUDA:
                        data, target = data.cuda(), target.cuda()
                    output = model(data)

                    valid_loss.update(F.cross_entropy(output, target))
                    valid_accuracy.update(accuracy(output, target))
                    t.set_postfix({
                        'loss': valid_loss.avg.item(),
                        'accuracy': 100. * valid_accuracy.avg.item()
                    })
                    t.update(1)
        print("test result: {:.2f}".format(valid_accuracy.avg * 100))
Ejemplo n.º 5
0
    def validate(epoch):
        model.eval()
        valid_loss = Metric('valid_loss')
        valid_accuracy = Metric('valid_accuracy')

        with tqdm.tqdm(total=len(valid_loader),
                       desc='Validate Epoch  #{}'.format(epoch + 1),
                       disable=not verbose) as t:
            with torch.no_grad():
                for data, target in valid_loader:
                    if FLAGS.CUDA:
                        data, target = data.cuda(), target.cuda()
                    output = model(data)

                    valid_loss.update(F.cross_entropy(output, target))
                    valid_accuracy.update(accuracy(output, target))
                    t.set_postfix({'loss': valid_loss.avg.item(),
                                   'accuracy': 100. * valid_accuracy.avg.item()})
                    t.update(1)

        if hvd.rank() == 0:
            valid_summary_writer.add_scalar('info/loss', valid_loss.avg, epoch)
            valid_summary_writer.add_scalar('metric/accuracy', valid_accuracy.avg, epoch)