Exemple #1
0
def test_integer_index(size: int, data: st.SearchStrategy):
    index = data.draw(integer_index(size), label="index")
    x = np.empty((size,))
    o = x[index]  # raises if invalid index
    assert isinstance(
        o, Real
    ), "An integer index should produce a number from a 1D array"
    def test_nan_in_grad(
        self,
        inputs: ndarray,
        alpha: float,
        gamma: float,
        dtype: torch.dtype,
        data: st.SearchStrategy,
    ):
        """ Ensures, across a wide range of inputs, that the focal loss gradient is not nan. """
        targets = data.draw(
            hnp.arrays(
                dtype=int,
                shape=(inputs.shape[0], ),
                elements=st.integers(0, inputs.shape[1] - 1),
            ),
            label="targets",
        )

        inputs = tensor(inputs, dtype=dtype, requires_grad=True)
        targets = tensor(targets, dtype=torch.long)
        loss = softmax_focal_loss(inputs, targets, alpha=alpha, gamma=gamma)

        loss.backward()
        assert not np.any(np.isnan(
            inputs.grad.numpy())), "focal loss gradient is nan"
Exemple #3
0
def test_broadcast_compat_shape(
    shape: Tuple[int, ...],
    allow_singleton: bool,
    min_dim: int,
    min_side: int,
    data: st.SearchStrategy,
):
    """ Ensures that the `broadcastable_shape` strategy:
        - produces broadcastable shapes
        - respects input parameters"""
    max_side = data.draw(st.integers(min_side, min_side + 5), label="max side")
    max_dim = data.draw(
        st.integers(min_dim, max(min_dim, len(shape) + 3)), label="max dim"
    )
    compat_shape = data.draw(
        broadcastable_shape(
            shape=shape,
            allow_singleton=allow_singleton,
            min_dim=min_dim,
            max_dim=max_dim,
            min_side=min_side,
            max_side=max_side,
        ),
        label="broadcastable_shape",
    )
    assert (
        min_dim <= len(compat_shape) <= max_dim
    ), "a shape of inappropriate dimensionality was generated by the strategy"

    a = np.empty(shape)
    b = np.empty(compat_shape)
    np.broadcast(a, b)  # error if drawn shape for b is not broadcast-compatible

    if not allow_singleton:
        small_dim = min(a.ndim, b.ndim)
        if small_dim:
            assert (
                shape[-small_dim:] == compat_shape[-small_dim:]
            ), "singleton dimensions were included by the strategy"

    if len(compat_shape) > len(shape):
        n = len(compat_shape) - len(shape)
        for side in compat_shape[:n]:
            assert (
                min_side <= side <= max_side
            ), "out-of-bound sides were generated by the strategy"
Exemple #4
0
def test_choices(seq: List[int], replace: bool, data: st.SearchStrategy):
    """ Ensures that the `choices` strategy:
        - draws from the provided sequence
        - respects input parameters"""
    upper = len(seq) + 10 if replace and seq else len(seq)
    size = data.draw(st.integers(0, upper), label="size")
    chosen = data.draw(choices(seq, size=size, replace=replace), label="choices")
    assert set(chosen) <= set(seq), (
        "choices contains elements that do not " "belong to `seq`"
    )
    assert len(chosen) == size, "the number of choices does not match `size`"

    if not replace and len(set(seq)) == len(seq):
        unique_choices = sorted(set(chosen))
        assert unique_choices == sorted(chosen), (
            "`choices` with `replace=False` draws " "elements with replacement"
        )
Exemple #5
0
def test_basic_index(shape: Tuple[int, ...], data: st.SearchStrategy):
    min_dim = data.draw(st.integers(0, len(shape) + 2), label="min_dim")
    max_dim = data.draw(st.integers(min_dim, min_dim + len(shape)), label="max_dim")
    index = data.draw(
        basic_index(shape=shape, min_dim=min_dim, max_dim=max_dim), label="index"
    )
    x = np.zeros(shape, dtype=int)
    o = x[index]  # raises if invalid index

    note("`x[index]`: {}".format(o))
    if o.size and o.ndim > 0:
        assert np.shares_memory(x, o), (
            "The basic index should produce a " "view of the original array."
        )
    assert min_dim <= o.ndim <= max_dim, (
        "The dimensionality input constraints " "were not obeyed"
    )
Exemple #6
0
    def choose_metrics(self, num_train_metrics: int, num_test_metrics: int,
                       data: st.SearchStrategy):
        assume(num_train_metrics + num_test_metrics > 0)
        self.train_metric_names = ["metric-a", "metric-b",
                                   "metric-c"][:num_train_metrics]

        self.test_metric_names = ["metric-a", "metric-b",
                                  "metric-c"][:num_test_metrics]
        train_colors = data.draw(
            st.lists(
                cst.matplotlib_colors(),
                min_size=num_train_metrics,
                max_size=num_train_metrics,
            ),
            label="train_colors",
        )

        test_colors = data.draw(
            st.lists(
                cst.matplotlib_colors(),
                min_size=num_test_metrics,
                max_size=num_test_metrics,
            ),
            label="test_colors",
        )

        metrics = OrderedDict((n, dict()) for n in sorted(
            set(self.train_metric_names + self.test_metric_names)))

        for metric, color in zip(self.train_metric_names, train_colors):
            metrics[metric]["train"] = color

        for metric, color in zip(self.test_metric_names, test_colors):
            metrics[metric]["test"] = color

        self.plotter = LivePlot(
            metrics,
            max_fraction_spent_plotting=data.draw(
                st.floats(0, 1), label="max_fraction_spent_plotting"),
            last_n_batches=data.draw(st.none() | st.integers(1, 100),
                                     label="last_n_batches"),
        )
        self.logger = LiveLogger()

        note("Train metric names: {}".format(self.train_metric_names))
        note("Test metric names: {}".format(self.test_metric_names))
Exemple #7
0
    def set_test_batch(self, batch_size: int, data: SearchStrategy):
        self.test_batch_set = True

        batch = {
            name: data.draw(st.floats(-1, 1), label=name)
            for name in self.test_metric_names
        }
        self.logger.set_test_batch(metrics=batch, batch_size=batch_size)
        self.plotter.set_test_batch(metrics=batch, batch_size=batch_size)
Exemple #8
0
def test_logsumexp(data: st.SearchStrategy, x: np.ndarray, keepdims: bool):
    axes = data.draw(valid_axes(ndim=x.ndim), label="axes")
    mygrad_result = logsumexp(x, axis=axes, keepdims=keepdims)
    scipy_result = special.logsumexp(x, axis=axes, keepdims=keepdims)
    assert_array_equal(
        mygrad_result,
        scipy_result,
        err_msg="mygrad's implementation of logsumexp does "
        "not match that of scipy's",
    )
Exemple #9
0
def test_comparison_ops(
    op: str, x: np.ndarray, x_constant: bool, y_constant: bool, data: st.SearchStrategy
):
    y = data.draw(hnp.arrays(shape=x.shape, dtype=x.dtype, elements=st.floats(-10, 10)))
    x = Tensor(x, constant=x_constant)
    y = Tensor(y, constant=y_constant)
    assert hasattr(Tensor, op), "`Tensor` is missing the attribute {}".format(op)
    tensor_out = getattr(Tensor, op)(x, y)
    array_out = getattr(np.ndarray, op)(x.data, y.data)
    assert_equal(actual=tensor_out, desired=array_out)
Exemple #10
0
    def set_test_batch(self, batch_size: int, data: SearchStrategy):
        self.test_batch_set = True
        batch = {
            metric.name: data.draw(
                st.floats(-1, 1) | st.floats(-1, 1).map(np.array), label=metric.name
            )
            for metric in self.test_metrics
        }
        self.logger.set_test_batch(metrics=batch, batch_size=batch_size)

        for metric in self.test_metrics:
            metric.add_datapoint(batch[metric.name], weighting=batch_size)
    def test_matches_simple_implementation(
        self,
        inputs: ndarray,
        alpha: float,
        gamma: float,
        dtype: torch.dtype,
        data: st.SearchStrategy,
    ):
        """ Ensures that focal loss matches a naive-implementation over the domain where numerical
        stability is not an issue. """
        targets = data.draw(
            hnp.arrays(
                dtype=int,
                shape=(inputs.shape[0], ),
                elements=st.integers(0, inputs.shape[1] - 1),
            ),
            label="targets",
        )

        inputs1 = tensor(inputs, dtype=dtype, requires_grad=True)
        inputs2 = tensor(inputs, dtype=dtype, requires_grad=True)
        targets = tensor(targets, dtype=torch.int64)

        # numerically-stable focal loss
        loss = softmax_focal_loss(inputs1, targets, alpha=alpha, gamma=gamma)
        loss.backward()

        # naive focal loss
        input = F.softmax(inputs2, dim=1)
        pc = input[(range(len(targets)), targets)]
        naive_loss = (-alpha * (1 - pc)**gamma * torch.log(pc)).mean()
        naive_loss.backward()

        assert_allclose(
            actual=loss.detach().numpy(),
            desired=naive_loss.detach().numpy(),
            atol=1e-5,
            rtol=1e-5,
            err_msg="focal loss does not match naive implementation on "
            "numerically-stable domain",
        )
        assert_allclose(
            actual=inputs1.grad.numpy(),
            desired=inputs2.grad.numpy(),
            atol=1e-5,
            rtol=1e-5,
            err_msg="focal loss gradient does not match that of naive loss on "
            "numerically-stable domain",
        )
Exemple #12
0
def test_slice_index(size: int, data: st.SearchStrategy):
    index = data.draw(slice_index(size), label="index")
    x = np.empty((size,))
    o = x[index]  # raises if invalid index
    assert isinstance(o, np.ndarray) and o.ndim == 1, (
        "A slice index should produce " "a 1D array from a 1D array"
    )
    if o.size:
        assert np.shares_memory(o, x), "A slice should produce a view of `x`"

    if index.start is not None:
        assert -size <= index.start <= size

    if index.stop is not None:
        assert -size <= index.stop <= size
    def test_shapes(self, boxes: ndarray, truth: ndarray,
                    data: st.SearchStrategy):
        """ Ensure the shape returned by generate_targets is correct, even in edge cases producing empty arrays. """
        boxes = boxes.cumsum(
            axis=1)  # to ensure we don't hit 0-width or -height boxes
        truth = truth.cumsum(
            axis=1)  # to ensure we don't hit 0-width or -height boxes
        N = boxes.shape[0]
        K = truth.shape[0]
        labels = data.draw(hnp.arrays(dtype=int, shape=(K, )))
        cls, reg = generate_targets(boxes, truth, labels, 0.5, 0.4)

        msg = "generate_targets failed to produce classification targets of the correct shape"
        assert cls.shape == (N, ), msg

        msg = "generate_targets failed to produce regression targets of the correct shape"
        assert reg.shape == (N, 4), msg
    def test_matches_crossentropy(self, inputs: ndarray, alpha: float,
                                  dtype: torch.dtype, data: st.SearchStrategy):
        """ Ensures that focal loss w/ gamma=0 matches softmax cross-entropy (scaled by alpha). """
        targets = data.draw(
            hnp.arrays(
                dtype=int,
                shape=(inputs.shape[0], ),
                elements=st.integers(0, inputs.shape[1] - 1),
            ),
            label="targets",
        )

        inputs = tensor(inputs, dtype=dtype)
        targets = tensor(targets, dtype=torch.long)
        assert_allclose(
            desired=alpha * F.cross_entropy(inputs, targets),
            actual=softmax_focal_loss(inputs, targets, alpha=alpha, gamma=0.0),
            atol=1e-6,
            rtol=1e-6,
            err_msg=
            "Focal loss with gamma=0 fails to match cross-entropy loss.",
        )