Beispiel #1
0
def multiple_modules(regularization: str, name: str):
    return getattr(tl, regularization)(
        torch.nn.Sequential(tl.Linear(40), tl.ReLU(), tl.Linear(20), tl.ReLU(),
                            tl.Linear(10)),
        weight_decay=1000000,
        name=name,
    )
Beispiel #2
0
def model():
    return tl.Sequential(
        tl.Conv(64),
        tl.BatchNorm(),
        tl.ReLU(),
        tl.Conv(128),
        tl.BatchNorm(),
        tl.ReLU(),
        tl.Conv(256),
        tl.GlobalMaxPool(),
        tl.Linear(64),
        tl.BatchNorm(),
        tl.Linear(10),
    )
Beispiel #3
0
def model():
    return torchlayers.Sequential(
        torchlayers.Conv(64),
        torchlayers.BatchNorm(),
        torchlayers.ReLU(),
        torchlayers.Conv(128),
        torchlayers.BatchNorm(),
        torchlayers.ReLU(),
        torchlayers.Conv(256),
        torchlayers.GlobalMaxPool(),
        torchlayers.Linear(64),
        torchlayers.BatchNorm(),
        torchlayers.Linear(10),
    )
Beispiel #4
0
def test_save():
    inputs = torch.randn(16, 32)
    temp = pathlib.Path(tempfile.gettempdir())

    layer = tl.build(tl.Linear(64), inputs)
    output = layer(inputs)
    torch.save(layer, temp / "linear_model.pt")

    new_layer = torch.load(temp / "linear_model.pt")
    new_output = new_layer(inputs)
    assert torch.allclose(output, new_output)
Beispiel #5
0
def test_text_cnn():
    model = torch.nn.Sequential(
        torchlayers.Conv(64),  # specify ONLY out_channels
        torch.nn.ReLU(),  # use torch.nn wherever you wish
        torchlayers.BatchNorm(),  # BatchNormNd inferred from input
        torchlayers.Conv(128),  # Default kernel_size equal to 3
        torchlayers.ReLU(),
        torchlayers.Conv(256, kernel_size=11),  # "same" padding as default
        torchlayers.GlobalMaxPool(),  # Known from Keras
        torchlayers.Linear(10),  # Output for 10 classes
    )

    torchlayers.build(model, torch.randn(2, 300, 1))
Beispiel #6
0
def model():
    return torchlayers.Sequential(
        torchlayers.Conv(64),
        torchlayers.ReLU(),
        torchlayers.MaxPool(),
        torchlayers.Residual(
            torchlayers.Sequential(
                torchlayers.Conv(64, groups=16),
                torchlayers.ReLU(),
                torchlayers.BatchNorm(),
                torchlayers.Conv(64, groups=16),
                torchlayers.ChannelShuffle(groups=16),
                torchlayers.ReLU(),
            )
        ),
        torchlayers.SqueezeExcitation(),
        torchlayers.Sequential(
            torchlayers.Dropout(),
            torchlayers.Conv(128),
            torchlayers.ReLU(),
            torchlayers.InstanceNorm(),
        ),
        torchlayers.Poly(
            torchlayers.WayPoly(
                torchlayers.Fire(128),
                torchlayers.Fire(128),
                torchlayers.Fire(128),
                torchlayers.Fire(128),
            ),
            order=2,
        ),
        torchlayers.AvgPool(),
        torchlayers.StochasticDepth(torchlayers.Fire(128, hidden_channels=64)),
        torchlayers.ReLU(),
        torchlayers.GlobalAvgPool(),
        torchlayers.Linear(64),
    )
def classification_model():
    return tl.Sequential(
        tl.Conv(64),
        tl.ReLU(),
        tl.MaxPool(),
        tl.Residual(
            tl.Sequential(
                tl.Conv(64, groups=16),
                tl.ReLU(),
                tl.GroupNorm(num_groups=4),
                tl.Conv(64, groups=16),
                tl.ChannelShuffle(groups=16),
                tl.ReLU(),
            )),
        tl.SqueezeExcitation(),
        tl.Sequential(
            tl.Dropout(),
            tl.Conv(128),
            tl.ReLU(),
            tl.InstanceNorm(),
        ),
        tl.Poly(
            tl.WayPoly(
                tl.Fire(128),
                tl.Fire(128),
                tl.Fire(128),
                tl.Fire(128),
            ),
            order=2,
        ),
        tl.AvgPool(),
        tl.StochasticDepth(tl.Fire(128, hidden_channels=64)),
        tl.ReLU(),
        tl.GlobalAvgPool(),
        tl.Linear(64),
    )
Beispiel #8
0
def test_last_dimension_linear():
    module = tl.Linear(64)
    module = tl.build(module, torch.randn(1, 3, 32, 32))
    assert module(torch.randn(2, 6, 24, 32)).shape == (2, 6, 24, 64)
Beispiel #9
0
def single_module(regularization: str, name: str):
    return getattr(tl, regularization)(tl.Linear(10),
                                       weight_decay=1000000,
                                       name=name)