Exemple #1
0
    def __init__(self, Rs_in, mul, lmax, Rs_out, layers=3):
        super().__init__()

        Rs = self.Rs_in = rs.simplify(Rs_in)
        self.Rs_out = rs.simplify(Rs_out)

        def make_act(p_val, p_arg, act):
            Rs = [(1, l, p_val * p_arg**l) for l in range(lmax + 1)]
            return S2Activation(Rs, act, res=20 * (lmax + 1))

        self.act1, self.act2 = make_act(1, -1, swish), make_act(-1, -1, tanh)
        self.mul = mul

        self.layers = []

        for _ in range(layers):
            Rs_out = mul * (self.act1.Rs_in + self.act2.Rs_in)
            lin = LearnableTensorSquare(Rs,
                                        Rs_out,
                                        linear=True,
                                        allow_zero_outputs=True)

            # s2 nonlinearity
            Rs = mul * (self.act1.Rs_out + self.act2.Rs_out)

            self.layers += [lin]

        self.layers = torch.nn.ModuleList(self.layers)

        self.tail = LearnableTensorSquare(Rs, self.Rs_out)
Exemple #2
0
    def __init__(self, Rs_in, mul, lmax, Rs_out, layers=3):
        super().__init__()

        Rs = self.Rs_in = rs.simplify(Rs_in)
        self.Rs_out = rs.simplify(Rs_out)
        self.act = S2Activation(list(range(lmax + 1)),
                                swish,
                                res=20 * (lmax + 1))

        self.layers = []

        for _ in range(layers):
            lin = LearnableTensorSquare(Rs,
                                        mul * self.act.Rs_in,
                                        linear=True,
                                        allow_zero_outputs=True)

            # s2 nonlinearity
            Rs = mul * self.act.Rs_out

            self.layers += [lin]

        self.layers = torch.nn.ModuleList(self.layers)

        self.tail = LearnableTensorSquare(Rs, self.Rs_out)
Exemple #3
0
    def __init__(self, Rs_in, mul, lmax, Rs_out, size=5, layers=3):
        super().__init__()

        Rs = rs.simplify(Rs_in)
        Rs_out = rs.simplify(Rs_out)
        Rs_act = list(range(lmax + 1))

        self.mul = mul
        self.layers = []

        for _ in range(layers):
            conv = ImageConvolution(Rs,
                                    mul * Rs_act,
                                    size,
                                    lmax=lmax,
                                    fuzzy_pixels=True,
                                    padding=size // 2)

            # s2 nonlinearity
            act = S2Activation(Rs_act, swish, res=60)
            Rs = mul * act.Rs_out

            pool = LowPassFilter(scale=2.0, stride=2)

            self.layers += [torch.nn.ModuleList([conv, act, pool])]

        self.layers = torch.nn.ModuleList(self.layers)
        self.tail = LearnableTensorSquare(Rs, Rs_out)
Exemple #4
0
def test_learnable_tensor_square_normalization():
    Rs_in = [1, 2, 3, 4]
    Rs_out = [0, 2, 4, 5]

    m = LearnableTensorSquare(Rs_in, Rs_out)
    y = m(rs.randn(1000, Rs_in))

    assert y.var().log10().abs() < 1.5, y.var().item()
Exemple #5
0
 def make_layer(Rs_in, Rs_out):
     if feature_product:
         tr1 = rs.TransposeToMulL(Rs_in)
         lts = LearnableTensorSquare(tr1.Rs_out,
                                     list(range(lmax + 1)),
                                     allow_change_output=True)
         tr2 = torch.nn.Flatten(2)
         Rs = tr1.mul * lts.Rs_out
         act = GatedBlock(Rs_out, swish, sigmoid)
         conv = convolution(K(Rs, act.Rs_in))
         return torch.nn.ModuleList(
             [torch.nn.Sequential(tr1, lts, tr2), conv, act])
     else:
         act = GatedBlock(Rs_out, swish, sigmoid)
         conv = convolution(K(Rs_in, act.Rs_in))
         return torch.nn.ModuleList([conv, act])
Exemple #6
0
    def __init__(self,
                 Rs_in,
                 mul,
                 Rs_out,
                 lmax,
                 layers=3,
                 max_radius=1.0,
                 number_of_basis=3,
                 radial_layers=3,
                 feature_product=False,
                 kernel=Kernel,
                 convolution=Convolution,
                 min_radius=0.0):
        super().__init__()

        R = partial(GaussianRadialModel,
                    max_radius=max_radius,
                    number_of_basis=number_of_basis,
                    h=100,
                    L=radial_layers,
                    act=swish,
                    min_radius=min_radius)
        K = partial(kernel,
                    RadialModel=R,
                    selection_rule=partial(o3.selection_rule_in_out_sh,
                                           lmax=lmax))

        modules = []

        Rs = Rs_in
        for _ in range(layers):
            scalars = [(mul, l, p)
                       for mul, l, p in [(mul, 0, +1), (mul, 0, -1)]
                       if rs.haslinearpath(Rs, l, p)]
            act_scalars = [(mul, swish if p == 1 else tanh)
                           for mul, l, p in scalars]

            nonscalars = [(mul, l, p) for l in range(1, lmax + 1)
                          for p in [+1, -1] if rs.haslinearpath(Rs, l, p)]
            gates = [(rs.mul_dim(nonscalars), 0, +1)]
            act_gates = [(-1, sigmoid)]

            act = GatedBlockParity(scalars, act_scalars, gates, act_gates,
                                   nonscalars)
            conv = convolution(K(Rs, act.Rs_in))

            if feature_product:
                tr1 = rs.TransposeToMulL(act.Rs_out)
                lts = LearnableTensorSquare(tr1.Rs_out,
                                            [(1, l, p) for l in range(lmax + 1)
                                             for p in [-1, 1]],
                                            allow_change_output=True)
                tr2 = torch.nn.Flatten(2)
                act = torch.nn.Sequential(act, tr1, lts, tr2)
                Rs = tr1.mul * lts.Rs_out
            else:
                Rs = act.Rs_out

            block = torch.nn.ModuleList([conv, act])
            modules.append(block)

        self.layers = torch.nn.ModuleList(modules)

        K = partial(K, allow_unused_inputs=True)
        self.layers.append(convolution(K(Rs, Rs_out)))
        self.feature_product = feature_product