def test_model_backward(model_name, batch_size): """Run a single forward pass with each model""" model = create_model(model_name, pretrained=False, num_classes=42) num_params = sum([x.numel() for x in model.parameters()]) model.eval() input_size = model.default_cfg['input_size'] if not is_model_default_key(model_name, 'fixed_input_size'): min_input_size = get_model_default_value(model_name, 'min_input_size') if min_input_size is not None: input_size = min_input_size else: if any([x > MAX_BWD_SIZE for x in input_size]): # cap backward test at 128 * 128 to keep resource usage down input_size = tuple([min(x, MAX_BWD_SIZE) for x in input_size]) inputs = torch.randn((batch_size, *input_size)) outputs = model(inputs) outputs.mean().backward() for n, x in model.named_parameters(): assert x.grad is not None, f'No gradient for {n}' num_grad = sum( [x.grad.numel() for x in model.parameters() if x.grad is not None]) assert outputs.shape[-1] == 42 assert num_params == num_grad, 'Some parameters are missing gradients' assert not torch.isnan(outputs).any(), 'Output included NaNs'
def test_model_forward_torchscript(model_name, batch_size): """Run a single forward pass with each model""" with set_scriptable(True): model = create_model(model_name, pretrained=False) model.eval() if has_model_default_key(model_name, 'fixed_input_size'): input_size = get_model_default_value(model_name, 'input_size') elif has_model_default_key(model_name, 'min_input_size'): input_size = get_model_default_value(model_name, 'min_input_size') else: input_size = (3, 128, 128) # jit compile is already a bit slow and we've tested normal res already... model = torch.jit.script(model) outputs = model(torch.randn((batch_size, *input_size))) assert outputs.shape[0] == batch_size assert not torch.isnan(outputs).any(), 'Output included NaNs'
def test_model_forward_features(model_name, batch_size): """Run a single forward pass with each model in feature extraction mode""" model = create_model(model_name, pretrained=False, features_only=True) model.eval() expected_channels = model.feature_info.channels() assert len(expected_channels) >= 4 # all models here should have at least 4 feature levels by default, some 5 or 6 if has_model_default_key(model_name, 'fixed_input_size'): input_size = get_model_default_value(model_name, 'input_size') elif has_model_default_key(model_name, 'min_input_size'): input_size = get_model_default_value(model_name, 'min_input_size') else: input_size = (3, 96, 96) # jit compile is already a bit slow and we've tested normal res already... outputs = model(torch.randn((batch_size, *input_size))) assert len(expected_channels) == len(outputs) for e, o in zip(expected_channels, outputs): assert e == o.shape[1] assert o.shape[0] == batch_size assert not torch.isnan(o).any()
def _get_input_size(model=None, model_name='', target=None): if model is None: assert model_name, "One of model or model_name must be provided" input_size = get_model_default_value(model_name, 'input_size') fixed_input_size = get_model_default_value(model_name, 'fixed_input_size') min_input_size = get_model_default_value(model_name, 'min_input_size') else: default_cfg = model.default_cfg input_size = default_cfg['input_size'] fixed_input_size = default_cfg.get('fixed_input_size', None) min_input_size = default_cfg.get('min_input_size', None) assert input_size is not None if fixed_input_size: return input_size if min_input_size: if target and max(input_size) > target: input_size = min_input_size else: if target and max(input_size) > target: input_size = tuple([min(x, target) for x in input_size]) return input_size