Esempio n. 1
0
def test_model_backward(model_name, batch_size):
    """Run a single forward pass with each model"""
    model = create_model(model_name, pretrained=False, num_classes=42)
    num_params = sum([x.numel() for x in model.parameters()])
    model.eval()

    input_size = model.default_cfg['input_size']
    if not is_model_default_key(model_name, 'fixed_input_size'):
        min_input_size = get_model_default_value(model_name, 'min_input_size')
        if min_input_size is not None:
            input_size = min_input_size
        else:
            if any([x > MAX_BWD_SIZE for x in input_size]):
                # cap backward test at 128 * 128 to keep resource usage down
                input_size = tuple([min(x, MAX_BWD_SIZE) for x in input_size])

    inputs = torch.randn((batch_size, *input_size))
    outputs = model(inputs)
    outputs.mean().backward()
    for n, x in model.named_parameters():
        assert x.grad is not None, f'No gradient for {n}'
    num_grad = sum(
        [x.grad.numel() for x in model.parameters() if x.grad is not None])

    assert outputs.shape[-1] == 42
    assert num_params == num_grad, 'Some parameters are missing gradients'
    assert not torch.isnan(outputs).any(), 'Output included NaNs'
Esempio n. 2
0
def test_model_forward(model_name, batch_size):
    """Run a single forward pass with each model"""
    model = create_model(model_name, pretrained=False)
    model.eval()

    input_size = model.default_cfg['input_size']
    if any([x > MAX_FWD_SIZE for x in input_size]):
        if is_model_default_key(model_name, 'fixed_input_size'):
            pytest.skip("Fixed input size model > limit.")
        # cap forward test at max res 384 * 384 to keep resource down
        input_size = tuple([min(x, MAX_FWD_SIZE) for x in input_size])
    inputs = torch.randn((batch_size, *input_size))
    outputs = model(inputs)

    assert outputs.shape[0] == batch_size
    assert not torch.isnan(outputs).any(), 'Output included NaNs'