コード例 #1
0
ファイル: test_accumulation.py プロジェクト: OBITORASU/ignite
def _test_apex_average(device, amp_mode, opt_level):
    assert amp_mode == "apex"
    assert device == "cuda"

    model = Linear(1, 1)

    if device:
        model.to(device)

    model.weight.data.zero_()
    model.bias.data.zero_()
    optimizer = SGD(model.parameters(), 0.1)

    from apex import amp

    model, optimizer = amp.initialize(model, optimizer, opt_level=opt_level)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100).float().to(device)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)
コード例 #2
0
ファイル: test_accumulation.py プロジェクト: OBITORASU/ignite
def test_variable_accumulation_wrong_inputs():

    with pytest.raises(TypeError, match=r"Argument op should be a callable"):
        VariableAccumulation(1)

    with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
        mean_acc = VariableAccumulation(lambda a, x: a + x)
        mean_acc.update((1, 2))

    with pytest.raises(TypeError, match=r"Output should be a number or torch.Tensor,"):
        mean_acc = VariableAccumulation(lambda a, x: a + x)
        mean_acc.update("a")
コード例 #3
0
ファイル: test_accumulation.py プロジェクト: OBITORASU/ignite
def _test_distrib_accumulator_device(device):

    metric_devices = [torch.device("cpu")]
    if device.type != "xla":
        metric_devices.append(idist.device())
    for metric_device in metric_devices:

        m = VariableAccumulation(lambda a, x: x, device=metric_device)
        assert m._device == metric_device
        assert (
            m.accumulator.device == metric_device
        ), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"

        m.update(torch.tensor(1, device=device))
        assert (
            m.accumulator.device == metric_device
        ), f"{type(m.accumulator.device)}:{m.accumulator.device} vs {type(metric_device)}:{metric_device}"
コード例 #4
0
def test_variable_accumulation_mean_variable():

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
    assert n == len(y_true)
コード例 #5
0
def inference(
        cfg,
        model,
        val_loader
):
    device = cfg.MODEL.DEVICE

    logger = logging.getLogger("template_model.inference")
    logger.info("Start inferencing")
    evaluator = create_supervised_evaluator(model,  # metrics={'accuracy': Accuracy()},
                                            device=device)

    def concat_(a, x):
        if len(a.size()) == 0:
            return a + x.cpu()
        else:
            return torch.cat((a, x.cpu()))

    pred_accumulator = VariableAccumulation(op=concat_, output_transform=lambda x: x[0])
    pred_accumulator.attach(evaluator, 'pred_accumulator')

    label_accumulator = VariableAccumulation(op=concat_, output_transform=lambda x: x[1])
    label_accumulator.attach(evaluator, 'label_accumulator')

    # adding handlers using `evaluator.on` decorator API
    @evaluator.on(Events.EPOCH_COMPLETED)
    def compute_metrics(engine):
        preds = pred_accumulator.compute()[0]
        labels = label_accumulator.compute()[0]

        np.save(os.path.join(cfg.OUTPUT_DIR, 'preds.npy'), preds)
        np.save(os.path.join(cfg.OUTPUT_DIR, 'labels.npy'), labels)
        print(preds.size())
        # now do whatever you would like with computed metrics

    # adding handlers using `evaluator.on` decorator API
    # @evaluator.on(Events.EPOCH_COMPLETED)
    # def print_validation_results(engine):
    #     metrics = evaluator.state.metrics
    #     avg_acc = metrics['accuracy']
    #     logger.info("Validation Results - Accuracy: {:.3f}".format(avg_acc))

    @evaluator.on(Events.ITERATION_COMPLETED)
    def log_iter(engine):
        iter = (engine.state.iteration - 1) % len(val_loader) + 1
        logger.info("Iteration[{}/{}]"
                    .format(iter, len(val_loader)))

    evaluator.run(val_loader)
コード例 #6
0
def test_variable_accumulation_mean_variable():

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100)

    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x)
    y_true = torch.rand(100, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(y_true.sum(dim=0).numpy())
    assert n == len(y_true)

    mean_var = VariableAccumulation(lambda a, x: a + x.sum(dim=0))
    # iterate by batch of 16 samples
    y_true = torch.rand(8, 16, 10)
    for y in y_true:
        mean_var.update(y)

    a, n = mean_var.compute()
    assert a.numpy() == pytest.approx(
        y_true.reshape(-1, 10).sum(dim=0).numpy())
    assert n == y_true.shape[0] * y_true.shape[1]
コード例 #7
0
def _test_distrib_variable_accumulation(device):

    mean_var = VariableAccumulation(lambda a, x: a + x, device=device)
    y_true = torch.rand(100, device=device, dtype=torch.float64)

    for y in y_true:
        mean_var.update(y)

    y_true = idist.all_reduce(y_true)
    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true) * idist.get_world_size()
    # check if call compute twice
    a, n = mean_var.compute()
    assert a.item() == pytest.approx(y_true.sum().item())
    assert n == len(y_true) * idist.get_world_size()

    mean_var = VariableAccumulation(lambda a, x: a + x, device=device)
    y_true = torch.rand(50, 10, device=device, dtype=torch.float64)

    for y in y_true:
        mean_var.update(y)

    y_true = idist.all_reduce(y_true)
    a, n = mean_var.compute()
    assert n == len(y_true) * idist.get_world_size()
    np.testing.assert_almost_equal(a.cpu().numpy(),
                                   y_true.sum(dim=0).cpu().numpy(),
                                   decimal=4)
    a, n = mean_var.compute()
    assert n == len(y_true) * idist.get_world_size()
    np.testing.assert_almost_equal(a.cpu().numpy(),
                                   y_true.sum(dim=0).cpu().numpy(),
                                   decimal=4)