Esempio n. 1
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, \
                 padding=0, output_padding=0, groups=1, bias=True, dilation=1):
        self.in_channels = in_channels
        self.out_channels = out_channels

        # added
        self.dilation = dilation
        self.group = groups
        assert groups==1, "Group conv not supported yet."

        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if isinstance(stride, tuple) else (stride, stride)
        self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation)
        # added
        self.padding = padding if isinstance(padding, tuple) else (padding, padding)
        self.real_padding = (self.dilation[0] * (self.kernel_size[0] - 1) - self.padding[0],
            self.dilation[1] * (self.kernel_size[1] - 1) - self.padding[1])
        self.output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding)
        assert self.output_padding[0] < max(self.stride[0], self.dilation[0]) and \
            self.output_padding[1] < max(self.stride[1], self.dilation[1]), \
            "output padding must be smaller than max(stride, dilation)"

        self.weight = init.invariant_uniform((in_channels, out_channels) + self.kernel_size, dtype="float")
        if bias:
            fan=1
            for i in self.weight.shape[1:]:
                fan *= i
            bound = 1 / math.sqrt(fan)
            self.bias = init.uniform([out_channels], dtype="float", low=-bound, high=bound)
        else:
            self.bias = None
Esempio n. 2
0
def linear(x, n):
    w = jt.make_var([n, x.shape[-1]],
                    init=lambda *a: init.invariant_uniform(*a))
    w = w.reindex([w.shape[1], w.shape[0]], ["i1", "i0"])
    bound = 1.0 / math.sqrt(w.shape[0])
    b = jt.make_var([n], init=lambda *a: init.uniform(*a, -bound, bound))
    return jt.matmul(x, w) + b
Esempio n. 3
0
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if isinstance(stride, tuple) else (stride, stride)
        self.padding = padding if isinstance(padding, tuple) else (padding, padding)
        self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation)
        self.groups = groups
        assert in_channels % groups == 0, 'in_channels must be divisible by groups'
        assert out_channels % groups == 0, 'out_channels must be divisible by groups'
        Kh, Kw = self.kernel_size
        self.groups = groups
        assert in_channels % groups == 0, 'in_channels must be divisible by groups'
        assert out_channels % groups == 0, 'out_channels must be divisible by groups'

        # self.weight = init.relu_invariant_gauss([out_channels, in_channels//groups, Kh, Kw], dtype="float", mode="fan_out")
        self.weight = init.invariant_uniform([out_channels, in_channels//groups, Kh, Kw], dtype="float")
        if bias:
            fan=1
            for i in self.weight.shape[1:]:
                fan *= i
            bound = 1 / math.sqrt(fan)
            self.bias = init.uniform([out_channels], dtype="float", low=-bound, high=bound)
        else:
            self.bias = None
Esempio n. 4
0
 def __init__(self, in_features, out_features, bias=True):
     self.in_features = in_features
     self.out_features = out_features
     self.weight = init.invariant_uniform((out_features, in_features),
                                          "float32")
     bound = 1.0 / math.sqrt(in_features)
     self.bias = init.uniform(
         (out_features, ), "float32", -bound, bound) if bias else None
Esempio n. 5
0
def main():
    x_input = init.invariant_uniform([16, 1024, 3], dtype='float')
    x_ = x_input
    x = (x_, x_input)
    model = PointCNN()
    y = model(x)
    _ = y.data
    print(y.shape)
Esempio n. 6
0
File: nn.py Progetto: waTeim/jittor
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, \
                 padding=0, output_padding=0, groups=1, bias=True, dilation=1):
        self.in_channels = in_channels
        self.out_channels = out_channels

        # added
        self.dilation = dilation
        self.group = groups
        assert groups==1, "Group conv not supported yet."

        self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size)
        self.stride = stride if isinstance(stride, tuple) else (stride, stride)
        self.dilation = dilation if isinstance(dilation, tuple) else (dilation, dilation)
        # added
        self.padding = padding if isinstance(padding, tuple) else (padding, padding)
        self.real_padding = (self.dilation[0] * (self.kernel_size[0] - 1) - self.padding[0],
            self.dilation[1] * (self.kernel_size[1] - 1) - self.padding[1])
        self.output_padding = output_padding if isinstance (output_padding, tuple) else (output_padding, output_padding)

        self.weight = init.invariant_uniform((in_channels, out_channels) + self.kernel_size, dtype="float")
        if bias:
            self.bias = init.uniform([out_channels], dtype="float", low=-1, high=1)
        else:
            self.bias = None
Esempio n. 7
0
        out2 = self.relu(self.bn2(self.conv2(out1)))
        out3 = self.relu(self.bn3(self.conv3(out2)))

        trans_feat = self.fstn(out3)
        x = out3.transpose(0, 2, 1)
        net_transformed = nn.bmm(x, trans_feat)
        net_transformed = net_transformed.transpose(0, 2, 1)

        out4 = self.relu(self.bn4(self.conv4(net_transformed)))
        out5 = self.bn5(self.conv5(out4))
        out_max = jt.argmax(out5, 2, keepdims=True)[1]
        out_max = out_max.view(-1, 2048)

        out_max = concat((out_max, label), 1)
        expand = out_max.view(-1, 2048 + 16, 1).repeat(1, 1, N)
        concat_feature = concat([expand, out1, out2, out3, out4, out5], 1)
        net = self.relu(self.bns1(self.convs1(concat_feature)))
        net = self.relu(self.bns2(self.convs2(net)))
        net = self.relu(self.bns3(self.convs3(net)))
        net = self.convs4(net)
        return net


if __name__ == '__main__':
    from jittor import init
    x_input = init.invariant_uniform([16, 3, 1024], dtype='float')
    cls_input = init.invariant_uniform([16, 16], dtype='float')
    model = PointNet_partseg()
    out = model(x_input, cls_input)
    print(out.size())