Beispiel #1
0
def test_squeeze(x, data):
    axes = data.draw(valid_axes(x.ndim), label="axes")
    x_arr = Tensor(np.copy(x))
    x_arr2 = Tensor(np.copy(x))

    def f(x):
        return np.squeeze(x, axes)

    try:
        numpy_out = np.squeeze(x, axes)
    except ValueError:
        with raises(ValueError):
            squeeze(x_arr, axes, constant=False)
        return

    o = squeeze(x_arr, axes, constant=False)
    o_method = x_arr2.squeeze(axes)
    assert_allclose(o.data, numpy_out)
    assert_allclose(o_method.data, numpy_out)

    grad = data.draw(
        hnp.arrays(shape=o.shape,
                   dtype=float,
                   elements=st.floats(1, 10),
                   unique=True),
        label="grad",
    )
    o.backward(grad)
    o_method.backward(grad)

    dx, = numerical_gradient_full(f, x, back_grad=grad)

    assert_allclose(x_arr.grad, dx)
    assert_allclose(x_arr2.grad, dx)
Beispiel #2
0
        dbad = []
        wcaption = []
        for x in batch:
            dgoodtemp = iv.get_resnet_vector(x[0], resnet=resnet)
            dbadtemp = iv.get_resnet_vector(x[2], resnet=resnet)
            wcaptiontemp = text_embeds[x[1]]

            dgood.append(dgoodtemp)
            dbad.append(dbadtemp)
            wcaption.append(wcaptiontemp)
        wcaption = np.array(wcaption)
        wgood = model(np.array(dgood))
        wbad = model(np.array(dbad))
        sgood = (wcaption[:, np.newaxis] @ wgood[:, :, np.newaxis])
        sbad = (wcaption[:, np.newaxis] @ wbad[:, :, np.newaxis])
        sgood = mg.squeeze(sgood)
        sbad = mg.squeeze(sbad)
        loss = la.loss(sgood, sbad)
        accuracy = la.acc(sgood, sbad)
        loss.backward()
        optim.step()
        loss.null_gradients()

        if batch_rate % 100 == 0:
            plotter.set_train_batch({
                "loss": loss.item(),
                "accuracy": accuracy
            },
                                    batch_size=batch_size)

    if epoch_rate % plot_rate == 0 and epoch_rate > 0: