예제 #1
0
    def __init__(self,
                 conv_configs,
                 in_channels,
                 out_channels,
                 split_in_channels=False,
                 reflective=False):
        super().__init__()
        assert (out_channels % len(conv_configs) == 0)
        self.conv_configs = conv_configs
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.in_channels_per_conv = in_channels
        self.out_channels_per_conv = out_channels / len(conv_configs)
        self.split_in_channels = split_in_channels
        self.reflective = reflective

        if split_in_channels:
            assert (in_channels % len(conv_configs) == 0)
            self.in_channels_per_conv = in_channels / len(conv_configs)

        self.convs = nn.ModuleList()
        for config in conv_configs:
            config = config.copy()
            config['padding'] = 0
            conv = nn.Conv1d(int(self.in_channels_per_conv),
                             int(self.out_channels_per_conv), **config)
            fill_weights_normal(conv.weight)
            fill_bias_zero(conv.bias)
            self.convs.append(conv)
예제 #2
0
 def __init__(self, n_features, n_time):
     super().__init__()
     self.weight_conv = nn.Conv1d(1, n_features, 1, bias=False)
     self.n_features = n_features
     self.n_time = n_time
     fill_weights_normal(self.weight_conv.weight)
예제 #3
0
 def __init__(self, n_classes, n_features, bias):
     nn.Linear.__init__(self, n_classes, n_features, bias=bias)
     fill_weights_normal(self.weight)
     fill_bias_zero(self.bias)
예제 #4
0
 def __init__(self, n_mappings, n_features):
     nn.Linear.__init__(self, n_mappings, n_features * 2, bias=True)
     self.n_mappings = n_mappings
     self.n_features = n_features
     fill_weights_normal(self.weight)
     fill_bias_zero(self.bias)
예제 #5
0
 def reset_parameters(self):
     if self.affine:
         fill_weights_normal(self.weight)
         fill_bias_zero(self.bias)