def multiple_modules(regularization: str, name: str): return getattr(tl, regularization)( torch.nn.Sequential(tl.Linear(40), tl.ReLU(), tl.Linear(20), tl.ReLU(), tl.Linear(10)), weight_decay=1000000, name=name, )
def model(): return torchlayers.Sequential( torchlayers.Conv(64), torchlayers.ReLU(), torchlayers.MaxPool(), torchlayers.Conv(128), torchlayers.ReLU(), torchlayers.MaxPool(), torchlayers.Conv(256), torchlayers.ReLU(), torchlayers.Flatten(), )
def model(): return tl.Sequential( tl.Conv(64), tl.ReLU(), tl.MaxPool(), tl.BatchNorm(), tl.Conv(128), tl.ReLU(), tl.MaxPool(), tl.Conv(256), tl.ReLU(), tl.Reshape(-1), )
def model(): return tl.Sequential( tl.Conv(64), tl.BatchNorm(), tl.ReLU(), tl.Conv(128), tl.BatchNorm(), tl.ReLU(), tl.Conv(256), tl.GlobalMaxPool(), tl.Linear(64), tl.BatchNorm(), tl.Linear(10), )
def model(): return torchlayers.Sequential( torchlayers.Conv(64), torchlayers.BatchNorm(), torchlayers.ReLU(), torchlayers.Conv(128), torchlayers.BatchNorm(), torchlayers.ReLU(), torchlayers.Conv(256), torchlayers.GlobalMaxPool(), torchlayers.Linear(64), torchlayers.BatchNorm(), torchlayers.Linear(10), )
def test_text_cnn(): model = torch.nn.Sequential( torchlayers.Conv(64), # specify ONLY out_channels torch.nn.ReLU(), # use torch.nn wherever you wish torchlayers.BatchNorm(), # BatchNormNd inferred from input torchlayers.Conv(128), # Default kernel_size equal to 3 torchlayers.ReLU(), torchlayers.Conv(256, kernel_size=11), # "same" padding as default torchlayers.GlobalMaxPool(), # Known from Keras torchlayers.Linear(10), # Output for 10 classes ) torchlayers.build(model, torch.randn(2, 300, 1))
def model(): return torchlayers.Sequential( torchlayers.Conv(64), torchlayers.ReLU(), torchlayers.MaxPool(), torchlayers.Residual( torchlayers.Sequential( torchlayers.Conv(64, groups=16), torchlayers.ReLU(), torchlayers.BatchNorm(), torchlayers.Conv(64, groups=16), torchlayers.ChannelShuffle(groups=16), torchlayers.ReLU(), ) ), torchlayers.SqueezeExcitation(), torchlayers.Sequential( torchlayers.Dropout(), torchlayers.Conv(128), torchlayers.ReLU(), torchlayers.InstanceNorm(), ), torchlayers.Poly( torchlayers.WayPoly( torchlayers.Fire(128), torchlayers.Fire(128), torchlayers.Fire(128), torchlayers.Fire(128), ), order=2, ), torchlayers.AvgPool(), torchlayers.StochasticDepth(torchlayers.Fire(128, hidden_channels=64)), torchlayers.ReLU(), torchlayers.GlobalAvgPool(), torchlayers.Linear(64), )
def classification_model(): return tl.Sequential( tl.Conv(64), tl.ReLU(), tl.MaxPool(), tl.Residual( tl.Sequential( tl.Conv(64, groups=16), tl.ReLU(), tl.GroupNorm(num_groups=4), tl.Conv(64, groups=16), tl.ChannelShuffle(groups=16), tl.ReLU(), )), tl.SqueezeExcitation(), tl.Sequential( tl.Dropout(), tl.Conv(128), tl.ReLU(), tl.InstanceNorm(), ), tl.Poly( tl.WayPoly( tl.Fire(128), tl.Fire(128), tl.Fire(128), tl.Fire(128), ), order=2, ), tl.AvgPool(), tl.StochasticDepth(tl.Fire(128, hidden_channels=64)), tl.ReLU(), tl.GlobalAvgPool(), tl.Linear(64), )
def __init__(self): super().__init__() self.encoder = torchlayers.Sequential( torchlayers.Conv(64, kernel_size=7), torchlayers.activations.Swish( ), # Direct access to module .activations torchlayers.InvertedResidualBottleneck(squeeze_excitation=False), torchlayers.AvgPool( ), # shape 64 x 128 x 128, kernel_size=2 by default torchlayers.HardSwish(), # Access simply through torchlayers torchlayers.SeparableConv(128), # Up number of channels to 128 torchlayers.InvertedResidualBottleneck( ), # Default with squeeze excitation torch.nn.ReLU(), torchlayers.AvgPool( ), # shape 128 x 64 x 64, kernel_size=2 by default torchlayers.DepthwiseConv(256), # DepthwiseConv easier to use # Pass input thrice through the same weights like in PolyNet torchlayers.Poly(torchlayers.InvertedResidualBottleneck(), order=3), torchlayers.ReLU(), # all torch.nn can be accessed via torchlayers torchlayers.MaxPool(), # shape 256 x 32 x 32 torchlayers.Fire(out_channels=512), # shape 512 x 32 x 32 torchlayers.SqueezeExcitation(hidden=64), torchlayers.InvertedResidualBottleneck(), torchlayers.MaxPool(), # shape 512 x 16 x 16 torchlayers.InvertedResidualBottleneck(squeeze_excitation=False), torchlayers.Dropout(), # Default 0.5 and Dropout2d for images # Randomly Switch the last two layers with 0.5 probability torchlayers.StochasticDepth( torch.nn.Sequential( torchlayers.InvertedResidualBottleneck( squeeze_excitation=False), torchlayers.InvertedResidualBottleneck( squeeze_excitation=False), ), p=0.5, ), torchlayers.AvgPool(), # shape 512 x 8 x 8 ) # Will make this one easier and repetitive self.decoder = torchlayers.Sequential( torchlayers.Poly(torchlayers.InvertedResidualBottleneck(), order=2), # Has ICNR initialization by default as well torchlayers.ConvPixelShuffle(out_channels=512, upscale_factor=2), # Shape 512 x 16 x 16 after PixelShuffle torchlayers.Poly(torchlayers.InvertedResidualBottleneck(), order=3), torchlayers.ConvPixelShuffle(out_channels=256, upscale_factor=2), torchlayers.StandardNormalNoise(), # add Gaussian Noise # Shape 256 x 32 x 32 torchlayers.Poly(torchlayers.InvertedResidualBottleneck(), order=3), torchlayers.ConvPixelShuffle(out_channels=128, upscale_factor=2), # Shape 128 x 64 x 64 torchlayers.Poly(torchlayers.InvertedResidualBottleneck(), order=4), torchlayers.ConvPixelShuffle(out_channels=64, upscale_factor=2), # Shape 64 x 128 x 128 torchlayers.InvertedResidualBottleneck(), torchlayers.Conv(256), torchlayers.Swish(), torchlayers.BatchNorm(), torchlayers.ConvPixelShuffle(out_channels=32, upscale_factor=2), # Shape 32 x 256 x 256 torchlayers.Conv(16), torchlayers.Swish(), torchlayers.Conv(3), # Shape 3 x 256 x 256 )