예제 #1
0
    def __init__(self,
                 in_features,
                 out_features,
                 dropout=0.0,
                 activation=None,
                 residual=False,
                 norm=None,
                 bias=True,
                 **kwargs):
        super(GCNLayer, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.linear = nn.Linear(in_features, out_features, bias=bias)
        if dropout > 0:
            self.dropout = nn.Dropout(dropout)
        else:
            self.dropout = None
        if residual:
            self.residual = nn.Linear(in_features, out_features)
        else:
            self.residual = None

        if activation is not None and activation == "relu":
            self.act = nn.ReLU()
        else:
            self.act = None

        if norm is not None:
            if norm == "batchnorm":
                self.norm = nn.BatchNorm1d(out_features)
            elif norm == "layernorm":
                self.norm = nn.LayerNorm(out_features)
            else:
                raise NotImplementedError
        else:
            self.norm = None

        self.reset_parameters()
예제 #2
0
    def __init__(self,
                 *,
                 image_size,
                 patch_size,
                 num_classes,
                 dim,
                 depth,
                 heads,
                 mlp_dim,
                 transformer=None,
                 channels=3,
                 dropout=0.,
                 emb_dropout=0.):
        super(ViT, self).__init__()
        assert image_size % patch_size == 0, 'image sizes must be divisible by the patch size'
        num_patches = (image_size // patch_size)**2
        patch_dim = channels * patch_size**2
        assert num_patches > MIN_NUM_PATCHES, f'your num_patches is too small'

        self.patch_size = patch_size

        self.pos_embedding = jt.random((1, num_patches + 1, dim),
                                       dtype='float32')
        self.patch_to_embedding = nn.Linear(patch_dim, dim)
        self.cls_token = jt.random((1, 1, dim), dtype='float32')
        self.dropout = nn.Dropout(emb_dropout)

        if transformer is None:
            self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout)
        else:
            self.transformer = transformer

        self.to_cls_token = nn.Identity()

        self.mlp_head = nn.Sequential(nn.LayerNorm(dim),
                                      nn.Linear(dim, mlp_dim), nn.GELU(),
                                      nn.Dropout(dropout),
                                      nn.Linear(mlp_dim, num_classes))
예제 #3
0
    def test_batchnorm(self):
        # ***************************************************************
        # Test BatchNorm Layer
        # ***************************************************************
        arr = np.random.randn(16, 10, 224, 224)
        check_equal_with_istrain(arr, jnn.BatchNorm(10, is_train=True),
                                 tnn.BatchNorm2d(10))

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.BatchNorm2d(10)

            def forward(self, x):
                return self.layer(x)

        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.BatchNorm(10, is_train=False), model,
                                 False)

        # ***************************************************************
        # Test InstanceNorm2d Layer
        # ***************************************************************
        arr = np.random.randn(16, 10, 224, 224)
        check_equal_without_istrain(arr, jnn.InstanceNorm2d(10, is_train=True),
                                    tnn.InstanceNorm2d(10))

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.InstanceNorm2d(10)

            def forward(self, x):
                return self.layer(x)

        model = Model()
        model.eval()
        check_equal_without_istrain(arr, jnn.InstanceNorm2d(10,
                                                            is_train=False),
                                    model)

        # ***************************************************************
        # Test BatchNorm1d Layer
        # ***************************************************************
        arr = np.random.randn(16, 10)
        check_equal_with_istrain(arr, jnn.BatchNorm1d(10, is_train=True),
                                 tnn.BatchNorm1d(10), 1e-3)

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.BatchNorm1d(10)

            def forward(self, x):
                return self.layer(x)

        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.BatchNorm1d(10, is_train=False),
                                 model, False)

        # ***************************************************************
        # Test GroupNorm Layer
        # ***************************************************************
        arr = np.random.randn(16, 10, 224, 224)

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.GroupNorm(2, 10)

            def forward(self, x):
                return self.layer(x)

        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.GroupNorm(2, 10, is_train=False),
                                 model, False, False)

        # ***************************************************************
        # Test LayerNorm Layer
        # ***************************************************************
        arr = np.random.randn(16, 10, 224, 224)

        class Model(tnn.Module):
            def __init__(self):
                super(Model, self).__init__()
                self.layer = tnn.LayerNorm(224)

            def forward(self, x):
                return self.layer(x)

        model = Model()
        model.eval()
        check_equal_with_istrain(arr, jnn.LayerNorm(224), model, False, False)
예제 #4
0
 def __init__(self, dim, fn):
     super(PreNorm, self).__init__()
     self.norm = nn.LayerNorm(dim)
     self.fn = fn