def __init__(
        self,
        in_channels,
        out_channels,
        dim,
        kernel_size,
        hidden_channels=None,
        dilation=1,
        bias=True,
        **kwargs,
    ):
        super(XConv, self).__init__()

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
示例#2
0
    def __init__(self, in_channels, out_channels, dim, kernel_size,
                 hidden_channels=None, dilation=1, bias=True, BiLinear=BiLinear, BiConv1d=BiConv1d, ifFirst=False, **kwargs):
        super(BiXConv, self).__init__()

        if knn_graph is None:
            raise ImportError('`XConv` requires `torch-cluster`.')

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.kwargs = kwargs

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        if ifFirst:
            Lin1 = Lin
        else:
            Lin1 = BiLinear

        self.mlp1 = S(
            Lin1(dim, C_delta),
            Hardtanh(),
            BN(C_delta),
            BiLinear(C_delta, C_delta),
            Hardtanh(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            Lin1(D * K, K**2),
            Hardtanh(),
            BN(K**2),
            Reshape(-1, K, K),
            BiConv1d(K, K**2, K, groups=K),
            Hardtanh(),
            BN(K**2),
            Reshape(-1, K, K),
            BiConv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            BiConv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            BiLinear(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
示例#3
0
    def __init__(self, in_channels: int, out_channels: int, dim: int,
                 kernel_size: int, hidden_channels: Optional[int] = None,
                 dilation: int = 1, bias: bool = True, num_workers: int = 1):
        super(XConv, self).__init__()

        if knn_graph is None:
            raise ImportError('`XConv` requires `torch-cluster`.')

        self.in_channels = in_channels
        if hidden_channels is None:
            hidden_channels = in_channels // 4
        assert hidden_channels > 0
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.dim = dim
        self.kernel_size = kernel_size
        self.dilation = dilation
        self.num_workers = num_workers

        C_in, C_delta, C_out = in_channels, hidden_channels, out_channels
        D, K = dim, kernel_size

        self.mlp1 = S(
            L(dim, C_delta),
            ELU(),
            BN(C_delta),
            L(C_delta, C_delta),
            ELU(),
            BN(C_delta),
            Reshape(-1, K, C_delta),
        )

        self.mlp2 = S(
            L(D * K, K**2),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            ELU(),
            BN(K**2),
            Reshape(-1, K, K),
            Conv1d(K, K**2, K, groups=K),
            BN(K**2),
            Reshape(-1, K, K),
        )

        C_in = C_in + C_delta
        depth_multiplier = int(ceil(C_out / C_in))
        self.conv = S(
            Conv1d(C_in, C_in * depth_multiplier, K, groups=C_in),
            Reshape(-1, C_in * depth_multiplier),
            L(C_in * depth_multiplier, C_out, bias=bias),
        )

        self.reset_parameters()
 def __init__(self):
     super(PointModel, self).__init__()
     # self.stepWeights = GNNFixedK()
     # self.dropout = torch.nn.Dropout(p=0.25)
     self.k_size = 12
     self.layer1 = S(L(7, 32), ReLU(), L(32, 16))
     self.layerg = S(L(19, 32), ReLU(), L(32, 8))
     self.layer2 = S(L(24, 32), ReLU(), L(32, 16))
     self.layerg2 = S(L(16, 32), ReLU(), L(32, 8))
     self.layer3 = S(L(24, 32), ReLU(), L(32, 16))
     self.layerg3 = S(L(16, 32), ReLU(), L(32, 12))
     self.layer4 = S(L(27, 64), ReLU(), L(64, 1))
     self.layer5 = S(L(13, 64), ReLU(), L(64, 3))
     self.reset_parameters()
示例#5
0
    def __init__(self):
        super(GNNFixedK, self).__init__()

        self.layer1 = S(L(8, 32), ReLU(), L(32, 16))
        self.layerg = S(L(19, 32), ReLU(), L(32, 8))
        self.layer2 = S(L(24, 32), ReLU(), L(32, 16))
        self.layerg2 = S(L(16, 32), ReLU(), L(32, 8))
        self.layer3 = S(L(24, 32), ReLU(), L(32, 16))
        self.layerg3 = S(L(16, 32), ReLU(), L(32, 12))
        self.layer4 = S(L(27, 64), ReLU(), L(64, 1))
        self.reset_parameters()
示例#6
0
    def __init__(self):
        super(PneumoniaFakeModel, self).__init__()

        # Image size [3, 3, 256, 256]

        self.conv1 = Conv2d(in_channels=3,
                            out_channels=12,
                            kernel_size=3,
                            stride=1,
                            padding=1)
        self.fc2 = L(256, 50)
        self.fc3 = L(50, 2)

        self.pool1 = MaxPool2d(
            kernel_size=2)  # Reduce the image size be factor 2
        self.relu2 = R()
        self.sig = S()
        self.bn1 = BatchNorm2d(num_features=12)
        self.pool2 = MaxPool2d(
            kernel_size=2)  # Reduce the image size be factor 2