コード例 #1
0
def evaluate(model, data_loader, device, device_ids, distributed, num_classes,
             log_freq=1000, title=None, header='Test:'):
    model.to(device)
    if distributed:
        model = DistributedDataParallel(model, device_ids=device_ids)
    elif device.type.startswith('cuda'):
        model = DataParallel(model, device_ids=device_ids)

    if title is not None:
        logger.info(title)

    model.eval()
    metric_logger = MetricLogger(delimiter='  ')
    seg_evaluator = SegEvaluator(num_classes)
    for sample_batch, targets in metric_logger.log_every(data_loader, log_freq, header):
        sample_batch, targets = sample_batch.to(device), targets.to(device)
        model_time = time.time()
        outputs = model(sample_batch)
        model_time = time.time() - model_time
        outputs = outputs['out']
        evaluator_time = time.time()
        seg_evaluator.update(targets.flatten(), outputs.argmax(1).flatten())
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    seg_evaluator.reduce_from_all_processes()
    logger.info(seg_evaluator)
    return seg_evaluator
コード例 #2
0
def evaluate(model, data_loader, device, device_ids, distributed, log_freq=1000, title=None, header='Test:'):
    model.to(device)
    if distributed:
        model = DistributedDataParallel(model, device_ids=device_ids)
    elif device.type.startswith('cuda'):
        model = DataParallel(model, device_ids=device_ids)

    if title is not None:
        logger.info(title)

    model.eval()
    metric_logger = MetricLogger(delimiter='  ')
    for image, target in metric_logger.log_every(data_loader, log_freq, header):
        image = image.to(device, non_blocking=True)
        target = target.to(device, non_blocking=True)
        output = model(image)
        acc1, acc5 = compute_accuracy(output, target, topk=(1, 5))
        # FIXME need to take into account that the datasets
        # could have been padded in distributed setup
        batch_size = image.shape[0]
        metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
        metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    top1_accuracy = metric_logger.acc1.global_avg
    top5_accuracy = metric_logger.acc5.global_avg
    logger.info(' * Acc@1 {:.4f}\tAcc@5 {:.4f}\n'.format(top1_accuracy, top5_accuracy))
    return metric_logger.acc1.global_avg
コード例 #3
0
def evaluate(model,
             data_loader,
             device,
             device_ids,
             distributed,
             num_classes,
             log_freq=1000,
             title=None,
             header='Test:'):
    model.to(device)
    if distributed:
        model = DistributedDataParallel(model, device_ids=device_ids)
    elif device.type.startswith('cuda'):
        model = DataParallel(model, device_ids=device_ids)

    if title is not None:
        logger.info(title)

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    model.eval()
    metric_logger = MetricLogger(delimiter='  ')
    seg_evaluator = SegEvaluator(num_classes)
    for sample_batch, targets in metric_logger.log_every(
            data_loader, log_freq, header):
        sample_batch, targets = sample_batch.to(device), targets.to(device)
        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(sample_batch)
        model_time = time.time() - model_time
        outputs = outputs['out']
        evaluator_time = time.time()
        seg_evaluator.update(targets.flatten(), outputs.argmax(1).flatten())
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    seg_evaluator.reduce_from_all_processes()
    avg_stats_str = 'Averaged stats: {}'.format(seg_evaluator)
    logger.info(seg_evaluator)
    torch.set_num_threads(n_threads)
    return seg_evaluator
コード例 #4
0
def train_one_epoch(training_box, epoch, log_freq):
    metric_logger = MetricLogger(delimiter='  ')
    metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
    metric_logger.add_meter('sample/s',
                            SmoothedValue(window_size=10, fmt='{value}'))
    header = 'Epoch: [{}]'.format(epoch)
    for sample_batch in \
            metric_logger.log_every(training_box.train_data_loader, log_freq, header):
        start_time = time.time()
        loss = training_box(sample_batch, targets=None, supp_dict=None)
        training_box.update_params(loss)
        batch_size = len(sample_batch)
        metric_logger.update(loss=loss.item(),
                             lr=training_box.optimizer.param_groups[0]['lr'])
        metric_logger.meters['sample/s'].update(batch_size /
                                                (time.time() - start_time))
コード例 #5
0
def train_one_epoch(training_box, device, epoch, log_freq):
    metric_logger = MetricLogger(delimiter='  ')
    metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
    metric_logger.add_meter('img/s', SmoothedValue(window_size=10, fmt='{value}'))
    header = 'Epoch: [{}]'.format(epoch)
    for sample_batch, targets, supp_dict in \
            metric_logger.log_every(training_box.train_data_loader, log_freq, header):
        start_time = time.time()
        sample_batch, targets = sample_batch.to(device), targets.to(device)
        loss = training_box(sample_batch, targets, supp_dict)
        training_box.update_params(loss)
        batch_size = sample_batch.shape[0]
        metric_logger.update(loss=loss.item(), lr=training_box.optimizer.param_groups[0]['lr'])
        metric_logger.meters['img/s'].update(batch_size / (time.time() - start_time))
        if (torch.isnan(loss) or torch.isinf(loss)) and is_main_process():
            raise ValueError('The training loop was broken due to loss = {}'.format(loss))
コード例 #6
0
def distill_one_epoch(distillation_box, device, epoch, log_freq):
    metric_logger = MetricLogger(delimiter='  ')
    metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
    metric_logger.add_meter('img/s',
                            SmoothedValue(window_size=10, fmt='{value}'))
    header = 'Epoch: [{}]'.format(epoch)
    for sample_batch, targets, supp_dict in \
            metric_logger.log_every(distillation_box.train_data_loader, log_freq, header):
        start_time = time.time()
        sample_batch, targets = sample_batch.to(device), targets.to(device)
        loss = distillation_box(sample_batch, targets, supp_dict)
        distillation_box.update_params(loss)
        batch_size = sample_batch.shape[0]
        metric_logger.update(
            loss=loss.item(),
            lr=distillation_box.optimizer.param_groups[0]['lr'])
        metric_logger.meters['img/s'].update(batch_size /
                                             (time.time() - start_time))
コード例 #7
0
def train_one_epoch(distillation_box, device, epoch, log_freq):
    metric_logger = MetricLogger(delimiter='  ')
    metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value}'))
    metric_logger.add_meter('img/s',
                            SmoothedValue(window_size=10, fmt='{value}'))
    header = 'Epoch: [{}]'.format(epoch)
    for sample_batch, targets, supp_dict in \
            metric_logger.log_every(distillation_box.train_data_loader, log_freq, header):
        start_time = time.time()
        sample_batch = list(image.to(device) for image in sample_batch)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        supp_dict = default_collate(supp_dict)
        loss = distillation_box(sample_batch, targets, supp_dict)
        distillation_box.update_params(loss)
        batch_size = len(sample_batch)
        metric_logger.update(
            loss=loss.item(),
            lr=distillation_box.optimizer.param_groups[0]['lr'])
        metric_logger.meters['img/s'].update(batch_size /
                                             (time.time() - start_time))
コード例 #8
0
def evaluate(model,
             data_loader,
             device,
             device_ids,
             distributed,
             log_freq=1000,
             title=None,
             header='Test:'):
    model.to(device)
    if distributed:
        model = DistributedDataParallel(model, device_ids=device_ids)

    if title is not None:
        logger.info(title)

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)

    # Replace built-in print function with logger.info to log summary printed by pycocotools
    builtin_print = __builtin__.print
    __builtin__.print = log_info

    cpu_device = torch.device('cpu')
    model.eval()
    metric_logger = MetricLogger(delimiter='  ')
    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    for sample_batch, targets in metric_logger.log_every(
            data_loader, log_freq, header):
        sample_batch = list(image.to(device) for image in sample_batch)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(sample_batch)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target['image_id'].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    avg_stats_str = 'Averaged stats: {}'.format(metric_logger)
    logger.info(avg_stats_str)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()

    # Revert print function
    __builtin__.print = builtin_print

    torch.set_num_threads(n_threads)
    return coco_evaluator