Ejemplo n.º 1
0
def test_slice():
    a = Tensor.normal(0, 1, (30, 40, 20, 10), requires_grad=True)
    a_torch = create_identical_torch_tensor(a)

    b = a[10:20, :, :12, 5:]
    b_torch = a_torch[10:20, :, :12, 5:]

    b_torch.sum().backward()
    b.backward()

    check_val_and_grad(a, a_torch)
    check_val_and_grad(b, b_torch)
Ejemplo n.º 2
0
def test_sqrt():
    a = Tensor.normal(30, 1, (5, 5), requires_grad=True)
    a_torch = create_identical_torch_tensor(a)

    b = a.sqrt()
    b_torch = a_torch.sqrt()

    b_torch.sum().backward()
    b.backward()

    check_val_and_grad(a, a_torch)
    check_val_and_grad(b, b_torch)
Ejemplo n.º 3
0
def test_pow_exp_neg():
    a = Tensor.normal(30, 1, (20, 20), requires_grad=True)
    a_torch = create_identical_torch_tensor(a)

    b = a**(-2)
    b_torch = a_torch**(-2)

    b_torch.sum().backward()
    b.backward()

    check_val_and_grad(a, a_torch)
    check_val_and_grad(b, b_torch)
Ejemplo n.º 4
0
def test_max():
    a = Tensor.normal(0, 1, (30, 40, 20, 10), requires_grad=True)
    a_torch = create_identical_torch_tensor(a)

    b = a.max(axis=3).max(axis=1)
    b_torch, _ = a_torch.max(axis=3)
    b_torch, _ = b_torch.max(axis=1)

    b_torch.sum().backward()
    b.backward()

    check_val_and_grad(a, a_torch)
    check_val_and_grad(b, b_torch)
Ejemplo n.º 5
0
def init_weights(shape: tuple,
                 weight_initialization: str,
                 fan_mode: str = "fan_in",
                 **kwargs) -> Tensor:
    assert type(
        shape) == tuple, f"Shape must be a tuple. Got {type(shape).__name__}."
    assert fan_mode in ["fan_in", "fan_out"
                        ], "Wrong fan mode. Only fan_in and fan_out supported."

    out_features, in_features = shape[0], shape[1]

    if weight_initialization == "kaiming_normal":
        std = np.sqrt(1.0 / in_features) if fan_mode == "fan_in" else np.sqrt(
            1.0 / out_features)
        return Tensor.normal(0.0, std, shape, **kwargs)

    elif weight_initialization == "kaiming_uniform":
        bound = np.sqrt(3.0 /
                        in_features) if fan_mode == "fan_in" else np.sqrt(
                            3.0 / out_features)
        weight = np.random.uniform(-bound, bound, shape)
        return Tensor(weight, **kwargs)

    elif weight_initialization == "glorot_normal":
        std = np.sqrt(2.0 / (in_features + out_features))
        return Tensor.normal(0.0, std, shape, **kwargs)

    elif weight_initialization == "glorot_uniform":
        bound = np.sqrt(6.0 / (in_features + out_features))
        weight = np.random.uniform(-bound, bound, shape)
        return Tensor(weight, **kwargs)

    else:
        raise Exception(
            "Unknown weight initialization methods. Only Glorot and Kaiming are available."
        )
Ejemplo n.º 6
0
def test_flatten_forward():
    np.random.seed(SEED)
    inp = Tensor.normal(0, 1, (8, 30, 3), requires_grad=True)
    inp_torch = create_identical_torch_tensor(inp)

    model = nnn.Flatten()
    torch_model = nn.Flatten()

    y = model(inp)
    y_torch = torch_model(inp_torch)

    y_torch.sum().backward()
    y.backward()

    check_val_and_grad(y, y_torch)
    check_val_and_grad(inp, inp_torch)
Ejemplo n.º 7
0
def test_batchnorm2d_backward():
    np.random.seed(SEED)
    model = nnn.Sequential(nnn.Conv2d(3, 10, 3, 1), nnn.BatchNorm2d(10),
                           nnn.ReLU())
    pytorch_model = get_same_pytorch_mlp(model)

    shape = (16, 3, 20, 20)
    x = Tensor.normal(0, 1, shape)
    x_torch = create_identical_torch_tensor(x).double()

    y = model(x)
    y_torch = pytorch_model(x_torch)

    y.backward()
    y_torch.sum().backward()

    check_val_and_grad(y, y_torch)
    check_val_and_grad(x, x_torch)
Ejemplo n.º 8
0
def test_cross_entropy():
    num_classes = 10
    batch_size = 64

    predicted = Tensor.normal(5, 2, (batch_size, num_classes), requires_grad=True)
    target = Tensor.randint(0, 9, (batch_size, ))
    pred_torch, target_torch = create_identical_torch_tensor(predicted, target)

    target_torch = target_torch.type(torch.LongTensor)

    loss = cross_entropy(predicted, target)
    loss_torch = torch.nn.functional.cross_entropy(pred_torch, target_torch)

    loss_torch.sum().backward()
    loss.backward()

    check_val_and_grad(loss, loss_torch)
    check_val_and_grad(predicted, pred_torch)
    check_val_and_grad(target, target_torch)