def __init__(self, conv_configs, in_channels, out_channels, split_in_channels=False, reflective=False): super().__init__() assert (out_channels % len(conv_configs) == 0) self.conv_configs = conv_configs self.in_channels = in_channels self.out_channels = out_channels self.in_channels_per_conv = in_channels self.out_channels_per_conv = out_channels / len(conv_configs) self.split_in_channels = split_in_channels self.reflective = reflective if split_in_channels: assert (in_channels % len(conv_configs) == 0) self.in_channels_per_conv = in_channels / len(conv_configs) self.convs = nn.ModuleList() for config in conv_configs: config = config.copy() config['padding'] = 0 conv = nn.Conv1d(int(self.in_channels_per_conv), int(self.out_channels_per_conv), **config) fill_weights_normal(conv.weight) fill_bias_zero(conv.bias) self.convs.append(conv)
def __init__(self, n_mappings, n_features): nn.Linear.__init__(self, n_mappings, n_features * 2, bias=True) self.n_mappings = n_mappings self.n_features = n_features fill_weights_normal(self.weight) fill_bias_zero(self.bias)
def __init__(self, n_features): super().__init__() self.bias = torch.nn.Parameter(torch.Tensor(1, n_features, 1)) fill_bias_zero(self.bias)
def __init__(self, n_classes, n_features, bias): nn.Linear.__init__(self, n_classes, n_features, bias=bias) fill_weights_normal(self.weight) fill_bias_zero(self.bias)
def reset_parameters(self): if self.affine: fill_weights_normal(self.weight) fill_bias_zero(self.bias)