Exemple #1
0
import flexflow.torch.fx as fx
import torch.nn as nn
import torch

class Transpose(nn.Module):
  def __init__(self):
    super(Transpose, self).__init__()

  def forward(self, x):
    x = torch.transpose(x,-1,-2)
    return x

model = nn.Sequential(Transpose(),nn.Flatten(229*229*3,10))
fx.torch_to_flexflow(model, "scalar_multiply.ff")
        self.flat1 = nn.Flatten()
        self.linear1 = nn.Linear(512, 512)
        self.linear2 = nn.Linear(512, 10)
        self.relu = nn.ReLU()

    def forward(self, input1, input2):
        y1 = self.conv1(input1)
        y1 = self.relu(y1)
        y2 = self.conv1(input2)
        y2 = self.relu(y2)
        y = torch.cat((y1, y2), 1)
        (y1, y2) = torch.split(y, 1)
        y = torch.cat((y1, y2), 1)
        y = self.conv2(y)
        y = self.relu(y)
        y = self.pool1(y)
        y = self.conv3(y)
        y = self.relu(y)
        y = self.conv4(y)
        y = self.relu(y)
        y = self.pool2(y)
        y = self.flat1(y)
        y = self.linear1(y)
        y = self.relu(y)
        yo = self.linear2(y)
        return (yo, y)


model = CNN()
fx.torch_to_flexflow(model, "cnn.ff")
Exemple #3
0
import flexflow.torch.fx as fx
import torch.nn as nn


class Addition(nn.Module):
    def __init__(self, scalar):
        super(Addition, self).__init__()
        self.scalar = scalar

    def forward(self, x):
        x = x + self.scalar
        return x


model = nn.Sequential(Addition(2.0), nn.Flatten(229 * 229 * 3, 10))
fx.torch_to_flexflow(model, "scalar_addition.ff")
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    kwargs['width_per_group'] = 64 * 2
    return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained,
                   progress, **kwargs)


def wide_resnet101_2(pretrained: bool = False,
                     progress: bool = True,
                     **kwargs: Any) -> ResNet:
    r"""Wide ResNet-101-2 model from
    `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.

    The model is the same as ResNet except for the bottleneck number of channels
    which is twice larger in every block. The number of channels in outer 1x1
    convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
    channels, and in Wide ResNet-50-2 has 2048-1024-2048.

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    kwargs['width_per_group'] = 64 * 2
    return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained,
                   progress, **kwargs)


input = torch.randn(64, 3, 224, 224)
model = resnet152()
fx.torch_to_flexflow(model, "resnet152.ff")
import flexflow.torch.fx as fx
import torch.nn as nn

model = nn.Sequential(nn.Identity(), nn.Flatten(229 * 229 * 3, 10))
fx.torch_to_flexflow(model, "identity.ff")
import flexflow.torch.fx as fx
import torch.nn as nn
import torch


class CustomParam(nn.Module):
    def __init__(self):
        super(CustomParam, self).__init__()
        self.mat = nn.Parameter(torch.zeros(1, 1))

    def forward(self, x):
        x = torch.matmul(self.mat, x)
        return x


model = nn.Sequential(CustomParam())
fx.torch_to_flexflow(model, "customParam.ff")
import flexflow.torch.fx as fx
import torch.nn as nn
import torch


class Division(nn.Module):
    def __init__(self, scalar):
        super(Division, self).__init__()
        self.scalar = scalar

    def forward(self, x):
        x = x // self.scalar
        return x


model = nn.Sequential(Division(2.0), nn.Flatten(229 * 229 * 3, 10))
fx.torch_to_flexflow(model, "scalar.ff")
import flexflow.torch.fx as fx
import torch.nn as nn

model = nn.Sequential(nn.GELU(),nn.Flatten(229*229*3,10))
fx.torch_to_flexflow(model, "gelu.ff")
Exemple #9
0
import torch.nn as nn
import flexflow.torch.fx as fx


class MLP(nn.Module):
    def __init__(self):
        super().__init__()
        self.linear1 = nn.Linear(784, 512)
        self.linear2 = nn.Linear(512, 512)
        self.linear3 = nn.Linear(512, 10)
        self.relu = nn.ReLU()

    def forward(self, x):
        y = self.linear1(x)
        y = self.relu(y)
        y = self.linear2(y)
        y = self.relu(y)
        y = self.linear3(y)
        return y


model = MLP()
fx.torch_to_flexflow(model, "mlp.ff")
import torch.nn as nn
import torchvision.models as models
import flexflow.torch.fx as fx

# alexnet = models.alexnet()
# fx.torch_to_flexflow(alexnet, "alexnet.ff")
#
# vgg16 = models.vgg16()
# fx.torch_to_flexflow(vgg16, "vgg16.ff")
#
# squeezenet = models.squeezenet1_0()
# fx.torch_to_flexflow(squeezenet, "squeezenet.ff")

# densenet = models.densenet161()
# fx.torch_to_flexflow(densenet, "densenet.ff")

# inception = models.inception_v3()
# fx.torch_to_flexflow(inception, "inception.ff")

googlenet = models.googlenet()
fx.torch_to_flexflow(googlenet, "googlenet.ff")

# shufflenet = models.shufflenet_v2_x1_0()
# fx.torch_to_flexflow(shufflenet, "shufflenet.ff")

# mobilenet = models.mobilenet_v2()
# fx.torch_to_flexflow(mobilenet, "mobilenet.ff")
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(64, 192, kernel_size=5, padding=2),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
            nn.Conv2d(192, 384, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=3, stride=2),
        )
        self.classifier = nn.Sequential(
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Linear(4096, num_classes),
            nn.Softmax(),
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.features(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


model = AlexNet(num_classes=10)
fx.torch_to_flexflow(model, "alexnet.ff")
Exemple #12
0
import timm.models.vision_transformer as vit
import flexflow.torch.fx as fx
import torch.nn as nn

model = vit.vit_base_patch32_224_in21k()
model = nn.Sequential(model,nn.Flatten(),nn.Linear(21843,1000))
fx.torch_to_flexflow(model, "vit_base_patch32_224_in21k.ff")
import classy_vision.models.regnet as rgn
import flexflow.torch.fx as fx
import torch.nn as nn

model = rgn.RegNetX32gf()
model = nn.Sequential(model,nn.Flatten(),nn.Linear(2520*7*7,1000))
fx.torch_to_flexflow(model, "regnetX32gf.ff")