Beispiel #1
0
def _alloc_storage(data: torch.Tensor, size: torch.Size) -> None:
    """Allocate storage for a tensor."""
    if data.storage().size() == size.numel():  # no need to reallocate
        return
    assert (data.storage().size() == 0
            ), "Then tensor storage should have been resized to be 0."
    data.storage().resize_(size.numel())  # type: ignore[attr-defined]
Beispiel #2
0
def basic_checks(posterior, N: int = int(5e4)):
    """Makes some basic checks to ensure the distribution is well defined.

    Args:
        posterior: Variational posterior object to check. Of type `VIPosterior`. No
            typing due to circular imports.
        N: Number of samples that are checked.
    """
    prior = posterior._prior
    assert prior is not None, "Posterior has no `._prior` attribute."
    prior_samples = prior.sample(Size((N, )))
    samples = posterior.sample(Size((N, )))
    assert (
        torch.isfinite(samples)).all(), "Some of the samples are not finite"
    try:
        _ = prior.support
        has_support = True
    except (NotImplementedError, AttributeError):
        has_support = False
    if has_support:
        assert (
            prior.support.check(samples)  # type: ignore
        ).all(), "Some of the samples are not within the prior support."
    assert (torch.isfinite(posterior.log_prob(samples))
            ).all(), "The log probability is not finite for some samples"
    assert (torch.isfinite(posterior.log_prob(prior_samples))
            ).all(), "The log probability is not finite for some samples"
def test_default_box_custom_anchors():
    kwargs = {
        'image_size':
        160,
        'steps': [8, 16, 32],
        'aspect_ratios':
        [2, 3],  ## since we directly specify anchors, ar will be ignored
        'anchors': [
            [[0.2, 0.2], [0.2, 0.1], [0.1, 0.2]],  ## for 20x20
            [[0.5, 0.5], [0.5, 0.2], [0.2, 0.5]],  ## for 10x10
            [[0.9, 0.9], [0.9, 0.5], [0.5, 0.9]],  ## for 5x5
        ],
        'variance': [0.1, 0.2],
        'clip':
        1,
    }
    default_box = DefaultBox(**kwargs)
    boxes = default_box()
    assert torch.all(eq(default_box.feature_maps, tensor([20, 10, 5])))
    assert default_box.anchors.dim() == 3
    assert default_box.anchors.size() == Size([3, 3, 2])
    assert boxes.dim() == 2
    """
    2+2 -> cxywh
    """
    assert boxes.size() == Size([(20 * 20 + 10 * 10 + 5 * 5) * 3, 2 + 2])
def get_traverse():
    return [
        (torch.nn.Linear(3, 3),
         lambda x: [x],
         None),
        (BayesianModule(3, 3, Normal(0, 1)),
         lambda x: [type(x.weight_prior)],
         [Normal]),
        (NormalLinear(3, 3, Normal(0, 1)),
         lambda x: [x.bias is not None],
         [True]),
        (NormalLinear(3, 3, False, Normal(0, 1)),
         lambda x: [x.bias is not None],
         [False]),
        (ComposableBNN(3, 4,
                       NormalLinear(3, 4, Normal(0, 1))),
         lambda x: [x.bias is not None],
         [True]),
        (ComposableBNN(3, 4,
                       torch.nn.Sequential(
                           NormalLinear(3, 4, Normal(0, 1)),
                           NormalLinear(4, 2, Normal(0, 1)),
                           NormalLinear(2, 1, Normal(0, 1)))),
         lambda x: [x.weight.shape],
         [Size([4, 3]), Size([2, 4]), Size([1, 2])])
    ]
Beispiel #5
0
def test_darknet53_retinaface() :
    """
    training
    """
    retinaface = RetinaFace(image_size=640,backbone='darknet53', pyramid_channels=256)
    bounding_boxes, classifications, landmarks = retinaface(torch.rand(1,3,640,640))
    n_anchors = 2
    assert retinaface.head.n_anchors == n_anchors
    feature_maps = [80, 40, 20]
    grid_size = [f ** 2 for f in feature_maps]
    feature_shape = [g * n_anchors for g in grid_size]
    predcition_channels = sum(feature_shape)
    assert landmarks.dim() == 3
    assert bounding_boxes.dim() == 3
    assert classifications.dim() == 3
    assert landmarks.size() == Size([1,predcition_channels,10])
    assert bounding_boxes.size() == Size([1,predcition_channels,4])
    assert classifications.size() == Size([1,predcition_channels,2])

    """
    eval
    """
    retinaface.eval()
    predictions = retinaface(torch.rand(1,3,640,640))
    assert predictions.dim() == 3
    assert predictions.size() == Size([1,predcition_channels,16])
    def check_transform_forward(self, model):
        train_y = randn(2, 10, 4, 5, device=self.device)
        train_y_var = rand(2, 10, 4, 5, device=self.device)

        output, output_var = model.outcome_transform.forward(train_y)
        self.assertEqual(output.shape, Size((2, 10, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.forward(train_y, train_y_var)
        self.assertEqual(output.shape, Size((2, 10, 4, 5)))
        self.assertEqual(output_var.shape, Size((2, 10, 4, 5)))
    def check_transform_untransform(self, model):
        output, output_var = model.outcome_transform.untransform(
            randn(2, 2, 4, 5, device=self.device))
        self.assertEqual(output.shape, Size((2, 2, 4, 5)))
        self.assertEqual(output_var, None)

        output, output_var = model.outcome_transform.untransform(
            randn(2, 2, 4, 5, device=self.device),
            rand(2, 2, 4, 5, device=self.device),
        )
        self.assertEqual(output.shape, Size((2, 2, 4, 5)))
        self.assertEqual(output_var.shape, Size((2, 2, 4, 5)))
def test_default_box_simple():
    """
    tests for single aspect ratio
    note : in SSD paper, aspect ratio of sqrt(sk_i*sk_i1) is added in addition to aspect ratio of 1
        in other words, if your aspect ratio includes 1, we add an 'augmented' aspect ratio, so total aspect ratio is len(aspect_ratio)+1
    """
    kwargs = {
        'image_size': 160,
        'steps': [8, 16, 32],
        'aspect_ratios': [1],
        'variance': [0.1, 0.2],
        'clip': 1,
    }
    default_box = DefaultBox(**kwargs)
    boxes = default_box()
    assert torch.all(eq(default_box.feature_maps, tensor([20, 10, 5])))
    assert default_box.anchors.dim() == 3
    """
    3 -> number of feature maps
    2 -> n aspect ratio (+1)
    2 -> width and height value
    """
    assert default_box.anchors.size() == Size([3, 2, 2])
    assert boxes.dim() == 2
    """
    (20*20+10*10+5*5)*2 -> number of feature maps (grids) * number of aspect ratios
    2+2 -> cxywh
    """
    assert boxes.size() == Size([(20 * 20 + 10 * 10 + 5 * 5) * 2, 2 + 2])
    kwargs = {
        'image_size': 160,
        'steps': [8, 16, 32],
        'aspect_ratios': [2, 3],
        'variance': [0.1, 0.2],
        'clip': 1,
    }
    default_box = DefaultBox(**kwargs)
    boxes = default_box()
    assert torch.all(eq(default_box.feature_maps, tensor([20, 10, 5])))
    assert default_box.anchors.dim() == 3
    """
    3 -> number of feature maps
    4 -> n aspect ratio * 2 (for 1/2 and 1/3)
    2 -> width and height value
    """
    assert default_box.anchors.size() == Size([3, 4, 2])
    assert boxes.dim() == 2
    """
    (20*20+10*10+5*5)*4 -> number of feature maps (grids) * number of aspect ratios
    2+2 -> cxywh
    """
    assert boxes.size() == Size([(20 * 20 + 10 * 10 + 5 * 5) * 4, 2 + 2])
Beispiel #9
0
def build_embedding(input_size: torch.Size,
                    arch: str = None,
                    **kwargs) -> tuple:
    flatten = nn.Flatten(-len(input_size))

    if arch == 'MLP':
        net = amnre.MLP(input_size.numel(), **kwargs)
        return nn.Sequential(flatten, net), net.output_size
    elif arch == 'ResNet':
        net = amnre.ResNet(input_size.numel(), **kwargs)
        return nn.Sequential(flatten, net), net.output_size
    else:
        return flatten, input_size.numel()
Beispiel #10
0
def psis_diagnostics(
        potential_function: Callable,
        q: Distribution,
        proposal: Optional[Distribution] = None,
        N: int = int(5e4),
) -> float:
    r"""This will evaluate the posteriors quality by investingating its importance
    weights. If q is a perfect posterior approximation then $q(\theta) \propto
    p(\theta, x_o)$ thus $\log w(\theta) = \log \frac{p(\theta, x_o)}{\log q(\theta)} =
    \log p(x_o)$ is constant. This function will fit a Generalized Paretto
    distribution to the tails of w. The shape parameter k serves as metric as detailed
    in [1]. In short it is related to the variance of a importance sampling estimate,
    especially for k > 1 the variance will be infinite.

    NOTE: In our experience this metric does distinguish "very bad" from "ok", but
    becomes less sensitive to distinguish "ok" from "good".

    Args:
        potential_function: Potential function of target.
        q: Variational distribution, should be proportional to the potential_function
        proposal: Proposal for samples. Typically this is q.
        N: Number of samples involved in the test.

    Returns:
        float: Quality metric

    Reference:
        [1] _Yes, but Did It Work?: Evaluating Variational Inference_, Yuling Yao, Aki
        Vehtari, Daniel Simpson, Andrew Gelman, 2018, https://arxiv.org/abs/1802.02538

    """
    M = int(min(N / 5, 3 * np.sqrt(N)))
    with torch.no_grad():
        if proposal is None:
            samples = q.sample(Size((N, )))
        else:
            samples = proposal.sample(Size((N, )))
        log_q = q.log_prob(samples)
        log_potential = potential_function(samples)
        logweights = log_potential - log_q
        logweights = logweights[torch.isfinite(logweights)]
        logweights_max = logweights.max()
        weights = torch.exp(logweights -
                            logweights_max)  # Thus will only affect scale
        vals, _ = weights.sort()
        largest_weigths = vals[-M:]
        k, _ = gpdfit(largest_weigths)
    return k
Beispiel #11
0
    def add_coordinates(self, layer_index, coordinates, lr, avg=1):
        param_coords = []
        gradients = []
        s = []

        # extract coordinate-gradient pairs and combine gradients at the same coordinate
        for coord in coordinates:
            cd = coord[0][1:]
            coord[1] /= avg

            if cd in param_coords:
                parameters[param_coords.index(cd)] += (coord[1])
            else:
                param_coords.append(cd)
                gradients.append(coord[1])

        # get corresponding parameters
        params = [p for p in self.parameters()]

        # create coordinate/index tensor i, and value tensor v
        try:
            grads = FloatTensor(gradients)
            shape = list(params[layer_index].size())

            if len(shape) > 1:
                # update parameters with gradients at particular coordinates
                grads = sparse.FloatTensor(
                    LongTensor(param_coords).t(), grads,
                    Size(shape)).to_dense()

            params[layer_index].data.add_(-lr * grads)
        except Exception as e:
            self.log.exception("Unexpected exception! %s", e)
Beispiel #12
0
 def sample_shape(self) -> Size:
     sample_models = Size([])
     sample_parameters = Size([])
     if len(self._parameters) > 0:
         sample_parameters = max(
             [
                 parameter.shape[:-1]
                 for parameter in self._parameters.values()
             ],
             key=len,
         )
     if len(self._models) > 0:
         sample_models = max(
             [model.sample_shape for model in self._models.values()],
             key=len)
     return max(sample_models, sample_parameters, key=len)
    def test_fc_densenet(self):
        densenet = FCDenseNet(in_channels=self.rgb_channels,
                              out_channels=self.num_classes,
                              initial_num_features=24,
                              dropout=0.2,
                              down_dense_growth_rates=8,
                              down_dense_bottleneck_ratios=None,
                              down_dense_num_layers=(4, 5, 7),
                              down_transition_compression_factors=1.0,
                              middle_dense_growth_rate=8,
                              middle_dense_bottleneck=None,
                              middle_dense_num_layers=10,
                              up_dense_growth_rates=8,
                              up_dense_bottleneck_ratios=None,
                              up_dense_num_layers=(7, 5, 4))

        print(densenet)
        print('Layers:', count_conv2d(densenet))
        print('Parameters:', count_parameters(densenet))

        logits = densenet(self.images)
        print('Logits:', logits.shape)
        self.assertEqual(
            logits.shape,
            Size((self.batch_size, self.num_classes, self.H, self.W)))
Beispiel #14
0
    def test_transforms(self):
        train_x = rand(10, 3, device=self.device)
        train_y = randn(10, 4, 5, device=self.device)

        # test handling of Standardize
        with self.assertWarns(RuntimeWarning):
            model = HigherOrderGP(train_X=train_x,
                                  train_Y=train_y,
                                  outcome_transform=Standardize(m=5))
        self.assertIsInstance(model.outcome_transform, FlattenedStandardize)
        self.assertEqual(model.outcome_transform.output_shape,
                         train_y.shape[1:])
        self.assertEqual(model.outcome_transform.batch_shape, Size())

        model = HigherOrderGP(
            train_X=train_x,
            train_Y=train_y,
            input_transform=Normalize(d=3),
            outcome_transform=FlattenedStandardize(train_y.shape[1:]),
        )
        mll = ExactMarginalLogLikelihood(model.likelihood, model)
        fit_gpytorch_torch(mll, options={"maxiter": 1, "disp": False})

        test_x = rand(2, 5, 3, device=self.device)
        test_y = randn(2, 5, 4, 5, device=self.device)
        posterior = model.posterior(test_x)
        self.assertIsInstance(posterior, TransformedPosterior)

        conditioned_model = model.condition_on_observations(test_x, test_y)
        self.assertIsInstance(conditioned_model, HigherOrderGP)

        self.check_transform_forward(model)
        self.check_transform_untransform(model)
Beispiel #15
0
 def forward(self, input : Tensor, score_threshold : Tensor) -> Tuple[Tensor,Tensor,Tensor,Tensor] :
     assert input.dim() == 3
     assert input.size(0) == 1   ## single batch for now
     assert input.size(2) == (4 + self.n_classes)
     assert score_threshold.dim() == 1
     assert score_threshold.size() == Size([1])
     bounding_boxes = input[...,0:4]
     classifications = input[...,4:]
     classifications = F.softmax(classifications,dim=-1).squeeze(0)
     class_conf, class_pred = classifications.max(1, keepdim=True)
     ## take object only
     object_indices = torch.gt(class_pred, 0)
     object_indices = object_indices.squeeze(1).nonzero().squeeze(1)
     class_conf = class_conf.index_select(0,object_indices)
     class_pred = class_pred.index_select(0,object_indices)
     bounding_boxes = decode(bounding_boxes.squeeze(0), self.priors, variances=self.variance)
     bounding_boxes = bounding_boxes.index_select(0,object_indices)
     ## take conf greater than threshold
     indices = torch.gt(class_conf, score_threshold)
     indices = indices.squeeze(1).nonzero().squeeze(1)
     bounding_boxes = bounding_boxes.index_select(0,indices)
     class_conf = class_conf.index_select(0,indices)
     class_pred = class_pred.index_select(0,indices)
     class_pred = class_pred - 1
     ## final detection tensor
     detections = torch.cat((bounding_boxes,class_conf,class_pred.float()),dim=-1)
     return bounding_boxes, class_conf.squeeze(1), class_pred.squeeze(1), detections.unsqueeze(0)
    def test_fantasize(self):
        manual_seed(0)
        test_x = rand(2, 5, 1, device=self.device)
        sampler = IIDNormalSampler(num_samples=32).to(self.device)

        _ = self.model.posterior(test_x)
        fantasy_model = self.model.fantasize(test_x, sampler=sampler)
        self.assertIsInstance(fantasy_model, HigherOrderGP)
        self.assertEqual(fantasy_model.train_inputs[0].shape[:2], Size((32, 2)))
def test_intersect_simple():
    box_a = [0.45, 0.45, 0.55, 0.55]
    box_b = [0.45, 0.45, 0.50, 0.50]
    box_a = tensor(box_a).unsqueeze(0)
    box_b = tensor(box_b).unsqueeze(0)
    intersection = ssd_utils.intersect(box_a, box_b)
    assert intersection.dim() == 2
    assert intersection.size() == Size([1, 1])
    assert allclose(intersection, tensor([0.05 * 0.05]))
Beispiel #18
0
def proportional_to_joint_diagnostics(
        potential_function: Callable,
        q: Distribution,
        proposal: Optional[Distribution] = None,
        N: int = int(5e4),
) -> float:
    r"""This will evaluate the posteriors quality by investingating its importance
    weights. If q is a perfect posterior approximation then $q(\theta) \propto
    p(\theta, x_o)$. Thus we should be able to fit a line to $(q(\theta),
    p(\theta, x_o))$, whereas the slope will be proportional to the normalizing
    constant. The quality of a linear fit is hence a direct metric for the quality of q.
    We use R2 statistic.

    NOTE: In our experience this metric does distinguish "good" from "ok", but
    becomes less sensitive to distinguish "very bad" from "ok".

    Args:
        potential_function: Potential function of target.
        q: Variational distribution, should be proportional to the potential_function
        proposal: Proposal for samples. Typically this is q.
        N: Number of samples involved in the test.

    Returns:
        float: Quality metric

    """

    with torch.no_grad():
        if proposal is None:
            samples = q.sample(Size((N, )))
        else:
            samples = proposal.sample(Size((N, )))
        log_q = q.log_prob(samples)
        log_potential = potential_function(samples)

        X = log_q.exp().unsqueeze(-1)
        Y = log_potential.exp().unsqueeze(-1)
        w = torch.linalg.solve(X.T @ X, X.T @ Y)  # Linear regression

        residuals = Y - w * X
        var_res = torch.sum(residuals**2)
        var_tot = torch.sum((Y - Y.mean())**2)
        r2 = 1 - var_res / var_tot  # R2 statistic to evaluate fit
    return r2.item()
Beispiel #19
0
    def __init__(
            self,
            in_features: int,
            out_features: int,
            bias: Optional[bool] = True,
            initialization: Optional[Initialization] = DEFAULT_UNIFORM,
            prior: Optional[Parameter] = DEFAULT_SCALED_GAUSSIAN_MIXTURE
    ) -> None:
        """Initialization

        Arguments:
            in_features (int): input number of features
            out_features (int): output number of features

        Keyword Arguments:
            bias (bool): presence of bias in the layer {default: True}
            initialization (Optional[Initialization]): initialization callback
                for the gaussian parameters {default: DEFAULT_UNIFORM}
            prior (Optional[Parameter]): prior of the weight
                {default: DEFAULT_SCALED_GAUSSIAN_MIXTURE}
        """
        super(Linear, self).__init__()
        self.in_features, self.out_features = in_features, out_features
        self.initialization = initialization

        size = Size((self.out_features, self.in_features))
        self.weight = Gaussian(size, self.initialization)
        self.weight_prior = prior

        if bias:
            size = Size((self.out_features, ))
            self.bias = Gaussian(size, self.initialization)
            self.bias_prior = prior
        else:
            self.bias = NoneParameter()
            self.bias_prior = NoneParameter()

#         self.log_prior = 0.0
#         self.log_variational_posterior = 0.0
        self.register_parameter(
            "log_prior", nn.Parameter(torch.tensor(0.), requires_grad=False))
        self.register_parameter(
            "log_variational_posterior",
            nn.Parameter(torch.tensor(0.), requires_grad=False))
Beispiel #20
0
    def _map_mask_to_tensor(
        self,
        grouped_mask: Tensor,
        original_tensor_shape: torch.Size,
        tensor_idx: Optional[int] = None,
    ) -> Tensor:
        """
        :param grouped_mask: A binary mask the size of a tensor from group_tensor
        :param original_tensor_shape: Shape of the original tensor grouped_mask
            derives from
        :param tensor_idx: optional index this tensor was passed into a tensor
            list for mask creation
        :return: The values from grouped_mask mapped to a tensor of size
            original_tensor_shape
        """
        # expand so every element has a corresponding value in the original tensor
        block_mask = grouped_mask.expand(-1, 4).contiguous()

        # adjust for permuted shape if necessary
        original_tensor_shape = list(original_tensor_shape)
        if len(original_tensor_shape) > 2:
            original_tensor_shape.append(original_tensor_shape[1])
            del original_tensor_shape[1]

        # adjust for padding if necessary
        remainder = original_tensor_shape[-1] % 4
        if remainder != 0:
            original_tensor_shape[-1] += 4 - remainder

        # set to original shape
        block_mask = block_mask.reshape(original_tensor_shape)

        # remove padding if necessary
        if remainder != 0:
            pad_num = 4 - remainder
            block_mask = block_mask[..., :-pad_num]

        # repermute mask if necessary
        if len(original_tensor_shape) > 2:
            permute_val = list(range(len(original_tensor_shape)))
            del permute_val[-1]
            permute_val.insert(1, len(permute_val))
            block_mask = block_mask.permute(*permute_val)
        return block_mask
def test_jaccard_simple():
    box_a = [0.45, 0.45, 0.55, 0.55]
    box_b = [0.45, 0.45, 0.50, 0.50]
    box_a = tensor(box_a).unsqueeze(0)
    box_b = tensor(box_b).unsqueeze(0)
    ious = ssd_utils.jaccard(box_a, box_b)
    assert ious.dim() == 2
    assert ious.size() == Size([1, 1])
    assert allclose(
        ious, tensor([0.05 * 0.05 / (0.1 * 0.1 + 0.05 * 0.05 - 0.05 * 0.05)]))
Beispiel #22
0
def construct_base_samples(
    batch_shape: torch.Size,
    output_shape: torch.Size,
    sample_shape: torch.Size,
    qmc: bool = True,
    seed: Optional[int] = None,
    device: Optional[torch.device] = None,
    dtype: Optional[torch.dtype] = None,
) -> Tensor:
    r"""Construct base samples from a multi-variate standard normal N(0, I_qo).

    Args:
        batch_shape: The batch shape of the base samples to generate. Typically,
            this is used with each dimension of size 1, so as to eliminate
            sampling variance across batches.
        output_shape: The output shape (`q x o`) of the base samples to generate.
        sample_shape: The sample shape of the samples to draw.
        qmc: If True, use quasi-MC sampling (instead of iid draws).
        seed: If provided, use as a seed for the RNG.

    Returns:
        A `sample_shape x batch_shape x output_shape` dimensional tensor of base
        samples, drawn from a N(0, I_qo) distribution (using QMC if `qmc=True`).
        Here `output_shape = q x o`.

    Example:
        >>> batch_shape = torch.Size([2])
        >>> output_shape = torch.Size([3])
        >>> sample_shape = torch.Size([10])
        >>> samples = construct_base_samples(batch_shape, output_shape, sample_shape)
    """
    base_sample_shape = batch_shape + output_shape
    output_dim = output_shape.numel()
    if qmc and output_dim <= SobolEngine.MAXDIM:
        n = (sample_shape + batch_shape).numel()
        base_samples = draw_sobol_normal_samples(d=output_dim,
                                                 n=n,
                                                 device=device,
                                                 dtype=dtype,
                                                 seed=seed)
        base_samples = base_samples.view(sample_shape + base_sample_shape)
    else:
        if qmc and output_dim > SobolEngine.MAXDIM:
            warnings.warn(
                f"Number of output elements (q*d={output_dim}) greater than "
                f"maximum supported by qmc ({SobolEngine.MAXDIM}). "
                "Using iid sampling instead.",
                SamplingWarning,
            )
        with manual_seed(seed=seed):
            base_samples = torch.randn(sample_shape + base_sample_shape,
                                       device=device,
                                       dtype=dtype)
    return base_samples
Beispiel #23
0
    def __init__(
        self, output_shape: Size, batch_shape: Size = None, min_stdv: float = 1e-8
    ):
        if batch_shape is None:
            batch_shape = Size()

        super(FlattenedStandardize, self).__init__(
            m=1, outputs=None, batch_shape=batch_shape, min_stdv=min_stdv
        )

        self.output_shape = output_shape
        self.batch_shape = batch_shape
def test_match_targets():
    box_kwargs = {
        'image_size': 160,
        'steps': [8],
        'aspect_ratios': [1],
        'variance': [0.1, 0.2],
        'clip': 1,
    }
    default_box = DefaultBox(**box_kwargs)
    priors = default_box()
    """
    simulate single gt at the center
    """
    targets = [[0.45, 0.45, 0.55, 0.55, 1]]
    targets = tensor(targets).unsqueeze(0)
    n_batch = 1
    gt_loc = targets[0][:, :-1]
    gt_cls = targets[0][:, -1]
    """
    find best default box for each targets
    """
    best_prior_overlap, best_prior_idx, best_truth_overlap, best_truth_idx = ssd_utils.match_targets(
        gt_loc, priors, 0)
    """
    """
    n_anchors = 2  ## when using ar 1, an augmented ar is automatically added! see ssd paper
    feature_maps = [20]  ## stride 8 at 160 img_size
    assert best_prior_overlap.dim() == 1
    assert best_prior_idx.dim() == 1
    assert best_prior_overlap.size() == Size([1])
    assert best_prior_idx.size() == Size([1])
    """
    best_truth_overlap holds ious between priors to gt, dimension : 1, shape : 1, grid_size * n_anchors
    """
    assert best_truth_overlap.dim() == 1
    assert best_truth_idx.dim() == 1
    assert best_truth_idx.size() == Size([(feature_maps[0]**2) * n_anchors])
    assert best_truth_overlap.size() == Size([(feature_maps[0]**2) * n_anchors
                                              ])
    """
def test_jaccard_batched():
    box_a = [[0.45, 0.45, 0.55, 0.55], [0.6, 0.35, 0.8, 0.65],
             [0.85, 0.15, 0.95, 0.45]]
    box_b = [[0.45, 0.45, 0.50, 0.50]]
    box_a = tensor(box_a)
    box_b = tensor(box_b)
    ious = ssd_utils.jaccard(box_a, box_b)
    assert ious.dim() == 2
    assert ious.size() == Size([3, 1])
    assert allclose(
        ious,
        tensor([
            [0.0025 / (0.1 * 0.1)],  # box_a[0] iwth box_b
            [0.0],  # box_a[1] with box_b
            [0.0],  # box_a[2] with box_b
        ]))
    ## setting 2
    box_a = [[0.45, 0.45, 0.55, 0.55], [0.6, 0.35, 0.8, 0.65],
             [0.85, 0.15, 0.95, 0.45]]
    box_b = [
        [0.45, 0.45, 0.50, 0.50],
        [0.65, 0.45, 0.80, 0.60],
        [0.90, 0.20, 0.95, 0.50],
        [0.45, 0.45, 0.55, 0.55],
    ]
    box_a = tensor(box_a)
    box_b = tensor(box_b)
    ious = ssd_utils.jaccard(box_a, box_b)
    assert ious.dim() == 2
    assert ious.size() == Size([3, 4])
    assert allclose(
        ious,
        tensor([
            [0.25, 0.0, 0.0, 1.0],  # box_a[0] iwth box_b
            [0.0, 0.15 * 0.15 / (0.2 * 0.3), 0.0, 0.0],  # box_a[1] with box_b
            [
                0.0, 0.0, 0.05 * 0.25 / (0.1 * 0.3 + 0.05 * 0.3 - 0.05 * 0.25),
                0.0
            ],  # box_a[2] with box_b
        ]))
def get_example_shape(data: dict) -> Size:
    name = data['name']
    if name == 'reference':
        ds = ReferenceDataset(**data['training'])
        x, _ = ds[0]
        return x.shape
    if name == 'video':
        train = data['training']
        return Size((3, train['height'], train['width']))
    if name == 'batch-video':
        l = data['loader']
        return Size((l['num_frames'], 3, l['height'], l['width']))
    if name == 'grasp-and-lift-eeg':
        size = data['training']['num_samples']
        lod = data['training'].get('lod', 0)
        divisor = 2 ** lod
        size = size // divisor
        size = (32, size)
        return Size(size)
    if name in ['rsna-intracranial', 'deeplesion', 'cq500']:
        lod = data['training'].get('lod', 0)
        divisor = 2 ** lod
        size = 512 // divisor
        size = (1, size, size)
        return Size(size)
    if name == 'forrestgump':
        alignment = data['training'].get('alignment', 'raw')
        if alignment is None or alignment == 'raw':
            return (1, 32, 160, 160)
        elif alignment == 'linear':
            raise NotImplementedError
            return (1, 32, 160, 160)
        elif alignment == 'nonlinear':
            return (1, 48, 132, 175)
        else:
            raise ValueError(f"unknown alignment '{alignment}'")
    if name not in dataset_dims:
        raise ValueError(f'unknown dataset "{name}"')
    return Size(dataset_dims[name])
    def test_initialize_latents(self):
        manual_seed(0)

        train_x = rand(10, 1, device=self.device)
        train_y = randn(10, 3, 5, device=self.device)

        for latent_dim_sizes in [[1, 1], [2, 3]]:
            for latent_init in ["gp", "default"]:
                self.model = HigherOrderGP(
                    train_x,
                    train_y,
                    num_latent_dims=latent_dim_sizes,
                    latent_init=latent_init,
                )
                self.assertEqual(
                    self.model.latent_parameters[0].shape,
                    Size((3, latent_dim_sizes[0])),
                )
                self.assertEqual(
                    self.model.latent_parameters[1].shape,
                    Size((5, latent_dim_sizes[1])),
                )
Beispiel #28
0
def test_vgg16_fpn_ssd():
    strides = [8, 16, 32, 64, 128]
    feature_maps = [80, 40, 20, 10, 5]
    grids = [s**2 for s in feature_maps]
    n_predictions = sum([g * 2 for g in grids])  # (n_anchors)
    ssd = FPNSSD(image_size=640,
                 n_classes=4,
                 pyramid_channels=256,
                 backbone='vgg16')
    assert ssd.head.n_anchors == 2
    bounding_boxes, classifications = ssd(torch.rand(1, 3, 640, 640))
    assert bounding_boxes.dim() == 3
    assert classifications.dim() == 3
    assert bounding_boxes.size() == Size([1, n_predictions, 4])
    assert classifications.size() == Size([1, n_predictions, 4 + 1])
    """
    eval
    """
    ssd.eval()
    predictions = ssd(torch.rand(1, 3, 640, 640))
    assert predictions.dim() == 3
    assert predictions.size() == Size([1, n_predictions, 9])
Beispiel #29
0
 def test_more_img_requests_than_available(self):
     
     # Test enough imgs in each dir
     # for the desired number of imgs
     # to display. Write summaries to a tmp
     # file underneath this script's dir, which 
     # will be removed later.
     plotter = TensorBoardPlotter()
     grid = plotter.write_img_grid(self.writer,
                                   self.data_root, 
                                   num_imgs=40,
                                   unittesting=False)
     self.assertEqual(grid.shape, Size([3,430,3290]))
def test_intersect_batched():
    box_a = [[0.45, 0.45, 0.55, 0.55], [0.6, 0.35, 0.8, 0.65],
             [0.85, 0.15, 0.95, 0.45]]
    box_b = [[0.45, 0.45, 0.50, 0.50]]
    box_a = tensor(box_a)
    box_b = tensor(box_b)
    intersection = ssd_utils.intersect(box_a, box_b)
    assert intersection.dim() == 2
    assert intersection.size() == Size([3, 1])
    assert allclose(
        intersection,
        tensor([
            [0.0025],  # box_a[0] iwth box_b
            [0.0],  # box_a[1] with box_b
            [0.0],  # box_a[2] with box_b
        ]))
    ## setting 2
    box_a = [[0.45, 0.45, 0.55, 0.55], [0.6, 0.35, 0.8, 0.65],
             [0.85, 0.15, 0.95, 0.45]]
    box_b = [
        [0.45, 0.45, 0.50, 0.50],
        [0.65, 0.45, 0.80, 0.60],
        [0.90, 0.20, 0.95, 0.50],
        [0.45, 0.45, 0.55, 0.55],
    ]
    box_a = tensor(box_a)
    box_b = tensor(box_b)
    intersection = ssd_utils.intersect(box_a, box_b)
    assert intersection.dim() == 2

    assert intersection.size() == Size([3, 4])
    assert allclose(
        intersection,
        tensor([
            [0.0025, 0.0, 0.0, 0.01],  # box_a[0] iwth box_b
            [0.0, 0.15 * 0.15, 0.0, 0.0],  # box_a[1] with box_b
            [0.0, 0.0, 0.05 * 0.25, 0.0],  # box_a[2] with box_b
        ]))
def _make_linear_constraints(
    indices: Tensor,
    coefficients: Tensor,
    rhs: float,
    shapeX: torch.Size,
    eq: bool = False,
) -> List[ScipyConstraintDict]:
    r"""Create linear constraints to be used by `scipy.minimize`.

    Encodes constraints of the form
    `\sum_i (coefficients[i] * X[..., indices[i]]) ? rhs`
    where `?` can be designated either as `>=` by setting `eq=False`, or as
    `=` by setting `eq=True`.

    If indices is one-dimensional, the constraints are broadcasted across all
    elements of the q-batch. If indices is two-dimensional (NOT YET SUPPORTED),
    then constraints are applied across elements of a q-batch. In either case,
    constraints are created for all t-batches.

    Args:
        indices: A single-dimensional tensor of torch.long dtype, containing the
            indices of the dimensions of the feature space that occur in the
            linear constraint.
        coefficients: A single-dimensional tensor of coefficients with the same
            number of elements as `indices`.
        rhs: The right hand side of the constraint.
        shapeX: The shape of the torch tensor to construct the constraints for
            (i.e. `b x q x d`). Must have three dimensions.
        eq: If True, return an equality constraint, o/w return an inequality
            constraint (indicated by "eq" / "ineq" value of the `type` key).

    Returns:
        A list of constraint dictionaries with the following keys

        - "type": Indicates the type of the constraint ("eq" if `eq=True`, "ineq" o/w)
        - "fun": A callable evaluating the constraint value on `x`, a flattened
            version of the input tensor `X`, returning a scalar.
        - "jac": A callable evaluating the constraint's Jacobian on `x`, a flattened
            version of the input tensor `X`, returning a numpy array.
    """
    if len(shapeX) != 3:
        raise UnsupportedError("`shapeX` must be `b x q x d`")
    d = shapeX[-1]
    n = shapeX.numel()
    constraints: List[ScipyConstraintDict] = []
    coeffs = _arrayify(coefficients)
    ctype = "eq" if eq else "ineq"
    if indices[-1].max() > d - 1:
        raise RuntimeError(f"Index out of bounds for {d}-dim parameter tensor")
    # indices has two dimensions (potential constraints across q-batch elements)
    if indices.dim() == 2:
        raise NotImplementedError(
            "Constraints across elements of q-batches not yet supported"
        )
    elif indices.dim() > 2:
        raise UnsupportedError(
            "Linear constraints supported only on individual candidates and "
            "across q-batches, not across general batch shapes."
        )
    elif indices.dim() == 1:
        # indices is one-dim - broadcast constraints across q-batches and t-batches
        offsets = [shapeX[i:].numel() for i in range(1, len(shapeX))]
        for i in range(shapeX[0]):
            for j in range(shapeX[1]):
                idxr = (i * offsets[0] + j * offsets[1] + indices).tolist()
                fun = partial(
                    eval_lin_constraint, flat_idxr=idxr, coeffs=coeffs, rhs=rhs
                )
                jac = partial(lin_constraint_jac, flat_idxr=idxr, coeffs=coeffs, n=n)
                constraints.append({"type": ctype, "fun": fun, "jac": jac})
    return constraints